1 /* 2 * OMAP2 McSPI controller driver 3 * 4 * Copyright (C) 2005, 2006 Nokia Corporation 5 * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and 6 * Juha Yrj�l� <juha.yrjola@nokia.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 */ 23 24 #include <linux/kernel.h> 25 #include <linux/init.h> 26 #include <linux/interrupt.h> 27 #include <linux/module.h> 28 #include <linux/device.h> 29 #include <linux/delay.h> 30 #include <linux/dma-mapping.h> 31 #include <linux/platform_device.h> 32 #include <linux/err.h> 33 #include <linux/clk.h> 34 #include <linux/io.h> 35 #include <linux/slab.h> 36 #include <linux/pm_runtime.h> 37 #include <linux/of.h> 38 #include <linux/of_device.h> 39 40 #include <linux/spi/spi.h> 41 42 #include <plat/dma.h> 43 #include <plat/clock.h> 44 #include <plat/mcspi.h> 45 46 #define OMAP2_MCSPI_MAX_FREQ 48000000 47 48 /* OMAP2 has 3 SPI controllers, while OMAP3 has 4 */ 49 #define OMAP2_MCSPI_MAX_CTRL 4 50 51 #define OMAP2_MCSPI_REVISION 0x00 52 #define OMAP2_MCSPI_SYSSTATUS 0x14 53 #define OMAP2_MCSPI_IRQSTATUS 0x18 54 #define OMAP2_MCSPI_IRQENABLE 0x1c 55 #define OMAP2_MCSPI_WAKEUPENABLE 0x20 56 #define OMAP2_MCSPI_SYST 0x24 57 #define OMAP2_MCSPI_MODULCTRL 0x28 58 59 /* per-channel banks, 0x14 bytes each, first is: */ 60 #define OMAP2_MCSPI_CHCONF0 0x2c 61 #define OMAP2_MCSPI_CHSTAT0 0x30 62 #define OMAP2_MCSPI_CHCTRL0 0x34 63 #define OMAP2_MCSPI_TX0 0x38 64 #define OMAP2_MCSPI_RX0 0x3c 65 66 /* per-register bitmasks: */ 67 68 #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) 69 #define OMAP2_MCSPI_MODULCTRL_MS BIT(2) 70 #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3) 71 72 #define OMAP2_MCSPI_CHCONF_PHA BIT(0) 73 #define OMAP2_MCSPI_CHCONF_POL BIT(1) 74 #define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2) 75 #define OMAP2_MCSPI_CHCONF_EPOL BIT(6) 76 #define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7) 77 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12) 78 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13) 79 #define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12) 80 #define OMAP2_MCSPI_CHCONF_DMAW BIT(14) 81 #define OMAP2_MCSPI_CHCONF_DMAR BIT(15) 82 #define OMAP2_MCSPI_CHCONF_DPE0 BIT(16) 83 #define OMAP2_MCSPI_CHCONF_DPE1 BIT(17) 84 #define OMAP2_MCSPI_CHCONF_IS BIT(18) 85 #define OMAP2_MCSPI_CHCONF_TURBO BIT(19) 86 #define OMAP2_MCSPI_CHCONF_FORCE BIT(20) 87 88 #define OMAP2_MCSPI_CHSTAT_RXS BIT(0) 89 #define OMAP2_MCSPI_CHSTAT_TXS BIT(1) 90 #define OMAP2_MCSPI_CHSTAT_EOT BIT(2) 91 92 #define OMAP2_MCSPI_CHCTRL_EN BIT(0) 93 94 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0) 95 96 /* We have 2 DMA channels per CS, one for RX and one for TX */ 97 struct omap2_mcspi_dma { 98 int dma_tx_channel; 99 int dma_rx_channel; 100 101 int dma_tx_sync_dev; 102 int dma_rx_sync_dev; 103 104 struct completion dma_tx_completion; 105 struct completion dma_rx_completion; 106 }; 107 108 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and 109 * cache operations; better heuristics consider wordsize and bitrate. 110 */ 111 #define DMA_MIN_BYTES 160 112 113 114 struct omap2_mcspi { 115 struct work_struct work; 116 /* lock protects queue and registers */ 117 spinlock_t lock; 118 struct list_head msg_queue; 119 struct spi_master *master; 120 /* Virtual base address of the controller */ 121 void __iomem *base; 122 unsigned long phys; 123 /* SPI1 has 4 channels, while SPI2 has 2 */ 124 struct omap2_mcspi_dma *dma_channels; 125 struct device *dev; 126 struct workqueue_struct *wq; 127 }; 128 129 struct omap2_mcspi_cs { 130 void __iomem *base; 131 unsigned long phys; 132 int word_len; 133 struct list_head node; 134 /* Context save and restore shadow register */ 135 u32 chconf0; 136 }; 137 138 /* used for context save and restore, structure members to be updated whenever 139 * corresponding registers are modified. 140 */ 141 struct omap2_mcspi_regs { 142 u32 modulctrl; 143 u32 wakeupenable; 144 struct list_head cs; 145 }; 146 147 static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL]; 148 149 #define MOD_REG_BIT(val, mask, set) do { \ 150 if (set) \ 151 val |= mask; \ 152 else \ 153 val &= ~mask; \ 154 } while (0) 155 156 static inline void mcspi_write_reg(struct spi_master *master, 157 int idx, u32 val) 158 { 159 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 160 161 __raw_writel(val, mcspi->base + idx); 162 } 163 164 static inline u32 mcspi_read_reg(struct spi_master *master, int idx) 165 { 166 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 167 168 return __raw_readl(mcspi->base + idx); 169 } 170 171 static inline void mcspi_write_cs_reg(const struct spi_device *spi, 172 int idx, u32 val) 173 { 174 struct omap2_mcspi_cs *cs = spi->controller_state; 175 176 __raw_writel(val, cs->base + idx); 177 } 178 179 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx) 180 { 181 struct omap2_mcspi_cs *cs = spi->controller_state; 182 183 return __raw_readl(cs->base + idx); 184 } 185 186 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi) 187 { 188 struct omap2_mcspi_cs *cs = spi->controller_state; 189 190 return cs->chconf0; 191 } 192 193 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val) 194 { 195 struct omap2_mcspi_cs *cs = spi->controller_state; 196 197 cs->chconf0 = val; 198 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val); 199 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); 200 } 201 202 static void omap2_mcspi_set_dma_req(const struct spi_device *spi, 203 int is_read, int enable) 204 { 205 u32 l, rw; 206 207 l = mcspi_cached_chconf0(spi); 208 209 if (is_read) /* 1 is read, 0 write */ 210 rw = OMAP2_MCSPI_CHCONF_DMAR; 211 else 212 rw = OMAP2_MCSPI_CHCONF_DMAW; 213 214 MOD_REG_BIT(l, rw, enable); 215 mcspi_write_chconf0(spi, l); 216 } 217 218 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable) 219 { 220 u32 l; 221 222 l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0; 223 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l); 224 /* Flash post-writes */ 225 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0); 226 } 227 228 static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active) 229 { 230 u32 l; 231 232 l = mcspi_cached_chconf0(spi); 233 MOD_REG_BIT(l, OMAP2_MCSPI_CHCONF_FORCE, cs_active); 234 mcspi_write_chconf0(spi, l); 235 } 236 237 static void omap2_mcspi_set_master_mode(struct spi_master *master) 238 { 239 u32 l; 240 241 /* setup when switching from (reset default) slave mode 242 * to single-channel master mode 243 */ 244 l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL); 245 MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_STEST, 0); 246 MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_MS, 0); 247 MOD_REG_BIT(l, OMAP2_MCSPI_MODULCTRL_SINGLE, 1); 248 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l); 249 250 omap2_mcspi_ctx[master->bus_num - 1].modulctrl = l; 251 } 252 253 static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) 254 { 255 struct spi_master *spi_cntrl; 256 struct omap2_mcspi_cs *cs; 257 spi_cntrl = mcspi->master; 258 259 /* McSPI: context restore */ 260 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, 261 omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl); 262 263 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, 264 omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable); 265 266 list_for_each_entry(cs, &omap2_mcspi_ctx[spi_cntrl->bus_num - 1].cs, 267 node) 268 __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); 269 } 270 static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi) 271 { 272 pm_runtime_put_sync(mcspi->dev); 273 } 274 275 static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) 276 { 277 return pm_runtime_get_sync(mcspi->dev); 278 } 279 280 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) 281 { 282 unsigned long timeout; 283 284 timeout = jiffies + msecs_to_jiffies(1000); 285 while (!(__raw_readl(reg) & bit)) { 286 if (time_after(jiffies, timeout)) 287 return -1; 288 cpu_relax(); 289 } 290 return 0; 291 } 292 293 static unsigned 294 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) 295 { 296 struct omap2_mcspi *mcspi; 297 struct omap2_mcspi_cs *cs = spi->controller_state; 298 struct omap2_mcspi_dma *mcspi_dma; 299 unsigned int count, c; 300 unsigned long base, tx_reg, rx_reg; 301 int word_len, data_type, element_count; 302 int elements = 0; 303 u32 l; 304 u8 * rx; 305 const u8 * tx; 306 void __iomem *chstat_reg; 307 308 mcspi = spi_master_get_devdata(spi->master); 309 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 310 l = mcspi_cached_chconf0(spi); 311 312 chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; 313 314 count = xfer->len; 315 c = count; 316 word_len = cs->word_len; 317 318 base = cs->phys; 319 tx_reg = base + OMAP2_MCSPI_TX0; 320 rx_reg = base + OMAP2_MCSPI_RX0; 321 rx = xfer->rx_buf; 322 tx = xfer->tx_buf; 323 324 if (word_len <= 8) { 325 data_type = OMAP_DMA_DATA_TYPE_S8; 326 element_count = count; 327 } else if (word_len <= 16) { 328 data_type = OMAP_DMA_DATA_TYPE_S16; 329 element_count = count >> 1; 330 } else /* word_len <= 32 */ { 331 data_type = OMAP_DMA_DATA_TYPE_S32; 332 element_count = count >> 2; 333 } 334 335 if (tx != NULL) { 336 omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel, 337 data_type, element_count, 1, 338 OMAP_DMA_SYNC_ELEMENT, 339 mcspi_dma->dma_tx_sync_dev, 0); 340 341 omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0, 342 OMAP_DMA_AMODE_CONSTANT, 343 tx_reg, 0, 0); 344 345 omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0, 346 OMAP_DMA_AMODE_POST_INC, 347 xfer->tx_dma, 0, 0); 348 } 349 350 if (rx != NULL) { 351 elements = element_count - 1; 352 if (l & OMAP2_MCSPI_CHCONF_TURBO) 353 elements--; 354 355 omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, 356 data_type, elements, 1, 357 OMAP_DMA_SYNC_ELEMENT, 358 mcspi_dma->dma_rx_sync_dev, 1); 359 360 omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0, 361 OMAP_DMA_AMODE_CONSTANT, 362 rx_reg, 0, 0); 363 364 omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0, 365 OMAP_DMA_AMODE_POST_INC, 366 xfer->rx_dma, 0, 0); 367 } 368 369 if (tx != NULL) { 370 omap_start_dma(mcspi_dma->dma_tx_channel); 371 omap2_mcspi_set_dma_req(spi, 0, 1); 372 } 373 374 if (rx != NULL) { 375 omap_start_dma(mcspi_dma->dma_rx_channel); 376 omap2_mcspi_set_dma_req(spi, 1, 1); 377 } 378 379 if (tx != NULL) { 380 wait_for_completion(&mcspi_dma->dma_tx_completion); 381 dma_unmap_single(&spi->dev, xfer->tx_dma, count, DMA_TO_DEVICE); 382 383 /* for TX_ONLY mode, be sure all words have shifted out */ 384 if (rx == NULL) { 385 if (mcspi_wait_for_reg_bit(chstat_reg, 386 OMAP2_MCSPI_CHSTAT_TXS) < 0) 387 dev_err(&spi->dev, "TXS timed out\n"); 388 else if (mcspi_wait_for_reg_bit(chstat_reg, 389 OMAP2_MCSPI_CHSTAT_EOT) < 0) 390 dev_err(&spi->dev, "EOT timed out\n"); 391 } 392 } 393 394 if (rx != NULL) { 395 wait_for_completion(&mcspi_dma->dma_rx_completion); 396 dma_unmap_single(&spi->dev, xfer->rx_dma, count, DMA_FROM_DEVICE); 397 omap2_mcspi_set_enable(spi, 0); 398 399 if (l & OMAP2_MCSPI_CHCONF_TURBO) { 400 401 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) 402 & OMAP2_MCSPI_CHSTAT_RXS)) { 403 u32 w; 404 405 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); 406 if (word_len <= 8) 407 ((u8 *)xfer->rx_buf)[elements++] = w; 408 else if (word_len <= 16) 409 ((u16 *)xfer->rx_buf)[elements++] = w; 410 else /* word_len <= 32 */ 411 ((u32 *)xfer->rx_buf)[elements++] = w; 412 } else { 413 dev_err(&spi->dev, 414 "DMA RX penultimate word empty"); 415 count -= (word_len <= 8) ? 2 : 416 (word_len <= 16) ? 4 : 417 /* word_len <= 32 */ 8; 418 omap2_mcspi_set_enable(spi, 1); 419 return count; 420 } 421 } 422 423 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) 424 & OMAP2_MCSPI_CHSTAT_RXS)) { 425 u32 w; 426 427 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); 428 if (word_len <= 8) 429 ((u8 *)xfer->rx_buf)[elements] = w; 430 else if (word_len <= 16) 431 ((u16 *)xfer->rx_buf)[elements] = w; 432 else /* word_len <= 32 */ 433 ((u32 *)xfer->rx_buf)[elements] = w; 434 } else { 435 dev_err(&spi->dev, "DMA RX last word empty"); 436 count -= (word_len <= 8) ? 1 : 437 (word_len <= 16) ? 2 : 438 /* word_len <= 32 */ 4; 439 } 440 omap2_mcspi_set_enable(spi, 1); 441 } 442 return count; 443 } 444 445 static unsigned 446 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) 447 { 448 struct omap2_mcspi *mcspi; 449 struct omap2_mcspi_cs *cs = spi->controller_state; 450 unsigned int count, c; 451 u32 l; 452 void __iomem *base = cs->base; 453 void __iomem *tx_reg; 454 void __iomem *rx_reg; 455 void __iomem *chstat_reg; 456 int word_len; 457 458 mcspi = spi_master_get_devdata(spi->master); 459 count = xfer->len; 460 c = count; 461 word_len = cs->word_len; 462 463 l = mcspi_cached_chconf0(spi); 464 465 /* We store the pre-calculated register addresses on stack to speed 466 * up the transfer loop. */ 467 tx_reg = base + OMAP2_MCSPI_TX0; 468 rx_reg = base + OMAP2_MCSPI_RX0; 469 chstat_reg = base + OMAP2_MCSPI_CHSTAT0; 470 471 if (c < (word_len>>3)) 472 return 0; 473 474 if (word_len <= 8) { 475 u8 *rx; 476 const u8 *tx; 477 478 rx = xfer->rx_buf; 479 tx = xfer->tx_buf; 480 481 do { 482 c -= 1; 483 if (tx != NULL) { 484 if (mcspi_wait_for_reg_bit(chstat_reg, 485 OMAP2_MCSPI_CHSTAT_TXS) < 0) { 486 dev_err(&spi->dev, "TXS timed out\n"); 487 goto out; 488 } 489 dev_vdbg(&spi->dev, "write-%d %02x\n", 490 word_len, *tx); 491 __raw_writel(*tx++, tx_reg); 492 } 493 if (rx != NULL) { 494 if (mcspi_wait_for_reg_bit(chstat_reg, 495 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 496 dev_err(&spi->dev, "RXS timed out\n"); 497 goto out; 498 } 499 500 if (c == 1 && tx == NULL && 501 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 502 omap2_mcspi_set_enable(spi, 0); 503 *rx++ = __raw_readl(rx_reg); 504 dev_vdbg(&spi->dev, "read-%d %02x\n", 505 word_len, *(rx - 1)); 506 if (mcspi_wait_for_reg_bit(chstat_reg, 507 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 508 dev_err(&spi->dev, 509 "RXS timed out\n"); 510 goto out; 511 } 512 c = 0; 513 } else if (c == 0 && tx == NULL) { 514 omap2_mcspi_set_enable(spi, 0); 515 } 516 517 *rx++ = __raw_readl(rx_reg); 518 dev_vdbg(&spi->dev, "read-%d %02x\n", 519 word_len, *(rx - 1)); 520 } 521 } while (c); 522 } else if (word_len <= 16) { 523 u16 *rx; 524 const u16 *tx; 525 526 rx = xfer->rx_buf; 527 tx = xfer->tx_buf; 528 do { 529 c -= 2; 530 if (tx != NULL) { 531 if (mcspi_wait_for_reg_bit(chstat_reg, 532 OMAP2_MCSPI_CHSTAT_TXS) < 0) { 533 dev_err(&spi->dev, "TXS timed out\n"); 534 goto out; 535 } 536 dev_vdbg(&spi->dev, "write-%d %04x\n", 537 word_len, *tx); 538 __raw_writel(*tx++, tx_reg); 539 } 540 if (rx != NULL) { 541 if (mcspi_wait_for_reg_bit(chstat_reg, 542 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 543 dev_err(&spi->dev, "RXS timed out\n"); 544 goto out; 545 } 546 547 if (c == 2 && tx == NULL && 548 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 549 omap2_mcspi_set_enable(spi, 0); 550 *rx++ = __raw_readl(rx_reg); 551 dev_vdbg(&spi->dev, "read-%d %04x\n", 552 word_len, *(rx - 1)); 553 if (mcspi_wait_for_reg_bit(chstat_reg, 554 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 555 dev_err(&spi->dev, 556 "RXS timed out\n"); 557 goto out; 558 } 559 c = 0; 560 } else if (c == 0 && tx == NULL) { 561 omap2_mcspi_set_enable(spi, 0); 562 } 563 564 *rx++ = __raw_readl(rx_reg); 565 dev_vdbg(&spi->dev, "read-%d %04x\n", 566 word_len, *(rx - 1)); 567 } 568 } while (c >= 2); 569 } else if (word_len <= 32) { 570 u32 *rx; 571 const u32 *tx; 572 573 rx = xfer->rx_buf; 574 tx = xfer->tx_buf; 575 do { 576 c -= 4; 577 if (tx != NULL) { 578 if (mcspi_wait_for_reg_bit(chstat_reg, 579 OMAP2_MCSPI_CHSTAT_TXS) < 0) { 580 dev_err(&spi->dev, "TXS timed out\n"); 581 goto out; 582 } 583 dev_vdbg(&spi->dev, "write-%d %08x\n", 584 word_len, *tx); 585 __raw_writel(*tx++, tx_reg); 586 } 587 if (rx != NULL) { 588 if (mcspi_wait_for_reg_bit(chstat_reg, 589 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 590 dev_err(&spi->dev, "RXS timed out\n"); 591 goto out; 592 } 593 594 if (c == 4 && tx == NULL && 595 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 596 omap2_mcspi_set_enable(spi, 0); 597 *rx++ = __raw_readl(rx_reg); 598 dev_vdbg(&spi->dev, "read-%d %08x\n", 599 word_len, *(rx - 1)); 600 if (mcspi_wait_for_reg_bit(chstat_reg, 601 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 602 dev_err(&spi->dev, 603 "RXS timed out\n"); 604 goto out; 605 } 606 c = 0; 607 } else if (c == 0 && tx == NULL) { 608 omap2_mcspi_set_enable(spi, 0); 609 } 610 611 *rx++ = __raw_readl(rx_reg); 612 dev_vdbg(&spi->dev, "read-%d %08x\n", 613 word_len, *(rx - 1)); 614 } 615 } while (c >= 4); 616 } 617 618 /* for TX_ONLY mode, be sure all words have shifted out */ 619 if (xfer->rx_buf == NULL) { 620 if (mcspi_wait_for_reg_bit(chstat_reg, 621 OMAP2_MCSPI_CHSTAT_TXS) < 0) { 622 dev_err(&spi->dev, "TXS timed out\n"); 623 } else if (mcspi_wait_for_reg_bit(chstat_reg, 624 OMAP2_MCSPI_CHSTAT_EOT) < 0) 625 dev_err(&spi->dev, "EOT timed out\n"); 626 627 /* disable chan to purge rx datas received in TX_ONLY transfer, 628 * otherwise these rx datas will affect the direct following 629 * RX_ONLY transfer. 630 */ 631 omap2_mcspi_set_enable(spi, 0); 632 } 633 out: 634 omap2_mcspi_set_enable(spi, 1); 635 return count - c; 636 } 637 638 static u32 omap2_mcspi_calc_divisor(u32 speed_hz) 639 { 640 u32 div; 641 642 for (div = 0; div < 15; div++) 643 if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div)) 644 return div; 645 646 return 15; 647 } 648 649 /* called only when no transfer is active to this device */ 650 static int omap2_mcspi_setup_transfer(struct spi_device *spi, 651 struct spi_transfer *t) 652 { 653 struct omap2_mcspi_cs *cs = spi->controller_state; 654 struct omap2_mcspi *mcspi; 655 struct spi_master *spi_cntrl; 656 u32 l = 0, div = 0; 657 u8 word_len = spi->bits_per_word; 658 u32 speed_hz = spi->max_speed_hz; 659 660 mcspi = spi_master_get_devdata(spi->master); 661 spi_cntrl = mcspi->master; 662 663 if (t != NULL && t->bits_per_word) 664 word_len = t->bits_per_word; 665 666 cs->word_len = word_len; 667 668 if (t && t->speed_hz) 669 speed_hz = t->speed_hz; 670 671 speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ); 672 div = omap2_mcspi_calc_divisor(speed_hz); 673 674 l = mcspi_cached_chconf0(spi); 675 676 /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS 677 * REVISIT: this controller could support SPI_3WIRE mode. 678 */ 679 l &= ~(OMAP2_MCSPI_CHCONF_IS|OMAP2_MCSPI_CHCONF_DPE1); 680 l |= OMAP2_MCSPI_CHCONF_DPE0; 681 682 /* wordlength */ 683 l &= ~OMAP2_MCSPI_CHCONF_WL_MASK; 684 l |= (word_len - 1) << 7; 685 686 /* set chipselect polarity; manage with FORCE */ 687 if (!(spi->mode & SPI_CS_HIGH)) 688 l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */ 689 else 690 l &= ~OMAP2_MCSPI_CHCONF_EPOL; 691 692 /* set clock divisor */ 693 l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK; 694 l |= div << 2; 695 696 /* set SPI mode 0..3 */ 697 if (spi->mode & SPI_CPOL) 698 l |= OMAP2_MCSPI_CHCONF_POL; 699 else 700 l &= ~OMAP2_MCSPI_CHCONF_POL; 701 if (spi->mode & SPI_CPHA) 702 l |= OMAP2_MCSPI_CHCONF_PHA; 703 else 704 l &= ~OMAP2_MCSPI_CHCONF_PHA; 705 706 mcspi_write_chconf0(spi, l); 707 708 dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n", 709 OMAP2_MCSPI_MAX_FREQ >> div, 710 (spi->mode & SPI_CPHA) ? "trailing" : "leading", 711 (spi->mode & SPI_CPOL) ? "inverted" : "normal"); 712 713 return 0; 714 } 715 716 static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data) 717 { 718 struct spi_device *spi = data; 719 struct omap2_mcspi *mcspi; 720 struct omap2_mcspi_dma *mcspi_dma; 721 722 mcspi = spi_master_get_devdata(spi->master); 723 mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); 724 725 complete(&mcspi_dma->dma_rx_completion); 726 727 /* We must disable the DMA RX request */ 728 omap2_mcspi_set_dma_req(spi, 1, 0); 729 } 730 731 static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data) 732 { 733 struct spi_device *spi = data; 734 struct omap2_mcspi *mcspi; 735 struct omap2_mcspi_dma *mcspi_dma; 736 737 mcspi = spi_master_get_devdata(spi->master); 738 mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); 739 740 complete(&mcspi_dma->dma_tx_completion); 741 742 /* We must disable the DMA TX request */ 743 omap2_mcspi_set_dma_req(spi, 0, 0); 744 } 745 746 static int omap2_mcspi_request_dma(struct spi_device *spi) 747 { 748 struct spi_master *master = spi->master; 749 struct omap2_mcspi *mcspi; 750 struct omap2_mcspi_dma *mcspi_dma; 751 752 mcspi = spi_master_get_devdata(master); 753 mcspi_dma = mcspi->dma_channels + spi->chip_select; 754 755 if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX", 756 omap2_mcspi_dma_rx_callback, spi, 757 &mcspi_dma->dma_rx_channel)) { 758 dev_err(&spi->dev, "no RX DMA channel for McSPI\n"); 759 return -EAGAIN; 760 } 761 762 if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX", 763 omap2_mcspi_dma_tx_callback, spi, 764 &mcspi_dma->dma_tx_channel)) { 765 omap_free_dma(mcspi_dma->dma_rx_channel); 766 mcspi_dma->dma_rx_channel = -1; 767 dev_err(&spi->dev, "no TX DMA channel for McSPI\n"); 768 return -EAGAIN; 769 } 770 771 init_completion(&mcspi_dma->dma_rx_completion); 772 init_completion(&mcspi_dma->dma_tx_completion); 773 774 return 0; 775 } 776 777 static int omap2_mcspi_setup(struct spi_device *spi) 778 { 779 int ret; 780 struct omap2_mcspi *mcspi; 781 struct omap2_mcspi_dma *mcspi_dma; 782 struct omap2_mcspi_cs *cs = spi->controller_state; 783 784 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) { 785 dev_dbg(&spi->dev, "setup: unsupported %d bit words\n", 786 spi->bits_per_word); 787 return -EINVAL; 788 } 789 790 mcspi = spi_master_get_devdata(spi->master); 791 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 792 793 if (!cs) { 794 cs = kzalloc(sizeof *cs, GFP_KERNEL); 795 if (!cs) 796 return -ENOMEM; 797 cs->base = mcspi->base + spi->chip_select * 0x14; 798 cs->phys = mcspi->phys + spi->chip_select * 0x14; 799 cs->chconf0 = 0; 800 spi->controller_state = cs; 801 /* Link this to context save list */ 802 list_add_tail(&cs->node, 803 &omap2_mcspi_ctx[mcspi->master->bus_num - 1].cs); 804 } 805 806 if (mcspi_dma->dma_rx_channel == -1 807 || mcspi_dma->dma_tx_channel == -1) { 808 ret = omap2_mcspi_request_dma(spi); 809 if (ret < 0) 810 return ret; 811 } 812 813 ret = omap2_mcspi_enable_clocks(mcspi); 814 if (ret < 0) 815 return ret; 816 817 ret = omap2_mcspi_setup_transfer(spi, NULL); 818 omap2_mcspi_disable_clocks(mcspi); 819 820 return ret; 821 } 822 823 static void omap2_mcspi_cleanup(struct spi_device *spi) 824 { 825 struct omap2_mcspi *mcspi; 826 struct omap2_mcspi_dma *mcspi_dma; 827 struct omap2_mcspi_cs *cs; 828 829 mcspi = spi_master_get_devdata(spi->master); 830 831 if (spi->controller_state) { 832 /* Unlink controller state from context save list */ 833 cs = spi->controller_state; 834 list_del(&cs->node); 835 836 kfree(spi->controller_state); 837 } 838 839 if (spi->chip_select < spi->master->num_chipselect) { 840 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 841 842 if (mcspi_dma->dma_rx_channel != -1) { 843 omap_free_dma(mcspi_dma->dma_rx_channel); 844 mcspi_dma->dma_rx_channel = -1; 845 } 846 if (mcspi_dma->dma_tx_channel != -1) { 847 omap_free_dma(mcspi_dma->dma_tx_channel); 848 mcspi_dma->dma_tx_channel = -1; 849 } 850 } 851 } 852 853 static void omap2_mcspi_work(struct work_struct *work) 854 { 855 struct omap2_mcspi *mcspi; 856 857 mcspi = container_of(work, struct omap2_mcspi, work); 858 859 if (omap2_mcspi_enable_clocks(mcspi) < 0) 860 return; 861 862 spin_lock_irq(&mcspi->lock); 863 864 /* We only enable one channel at a time -- the one whose message is 865 * at the head of the queue -- although this controller would gladly 866 * arbitrate among multiple channels. This corresponds to "single 867 * channel" master mode. As a side effect, we need to manage the 868 * chipselect with the FORCE bit ... CS != channel enable. 869 */ 870 while (!list_empty(&mcspi->msg_queue)) { 871 struct spi_message *m; 872 struct spi_device *spi; 873 struct spi_transfer *t = NULL; 874 int cs_active = 0; 875 struct omap2_mcspi_cs *cs; 876 struct omap2_mcspi_device_config *cd; 877 int par_override = 0; 878 int status = 0; 879 u32 chconf; 880 881 m = container_of(mcspi->msg_queue.next, struct spi_message, 882 queue); 883 884 list_del_init(&m->queue); 885 spin_unlock_irq(&mcspi->lock); 886 887 spi = m->spi; 888 cs = spi->controller_state; 889 cd = spi->controller_data; 890 891 omap2_mcspi_set_enable(spi, 1); 892 list_for_each_entry(t, &m->transfers, transfer_list) { 893 if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) { 894 status = -EINVAL; 895 break; 896 } 897 if (par_override || t->speed_hz || t->bits_per_word) { 898 par_override = 1; 899 status = omap2_mcspi_setup_transfer(spi, t); 900 if (status < 0) 901 break; 902 if (!t->speed_hz && !t->bits_per_word) 903 par_override = 0; 904 } 905 906 if (!cs_active) { 907 omap2_mcspi_force_cs(spi, 1); 908 cs_active = 1; 909 } 910 911 chconf = mcspi_cached_chconf0(spi); 912 chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; 913 chconf &= ~OMAP2_MCSPI_CHCONF_TURBO; 914 915 if (t->tx_buf == NULL) 916 chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY; 917 else if (t->rx_buf == NULL) 918 chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY; 919 920 if (cd && cd->turbo_mode && t->tx_buf == NULL) { 921 /* Turbo mode is for more than one word */ 922 if (t->len > ((cs->word_len + 7) >> 3)) 923 chconf |= OMAP2_MCSPI_CHCONF_TURBO; 924 } 925 926 mcspi_write_chconf0(spi, chconf); 927 928 if (t->len) { 929 unsigned count; 930 931 /* RX_ONLY mode needs dummy data in TX reg */ 932 if (t->tx_buf == NULL) 933 __raw_writel(0, cs->base 934 + OMAP2_MCSPI_TX0); 935 936 if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES) 937 count = omap2_mcspi_txrx_dma(spi, t); 938 else 939 count = omap2_mcspi_txrx_pio(spi, t); 940 m->actual_length += count; 941 942 if (count != t->len) { 943 status = -EIO; 944 break; 945 } 946 } 947 948 if (t->delay_usecs) 949 udelay(t->delay_usecs); 950 951 /* ignore the "leave it on after last xfer" hint */ 952 if (t->cs_change) { 953 omap2_mcspi_force_cs(spi, 0); 954 cs_active = 0; 955 } 956 } 957 958 /* Restore defaults if they were overriden */ 959 if (par_override) { 960 par_override = 0; 961 status = omap2_mcspi_setup_transfer(spi, NULL); 962 } 963 964 if (cs_active) 965 omap2_mcspi_force_cs(spi, 0); 966 967 omap2_mcspi_set_enable(spi, 0); 968 969 m->status = status; 970 m->complete(m->context); 971 972 spin_lock_irq(&mcspi->lock); 973 } 974 975 spin_unlock_irq(&mcspi->lock); 976 977 omap2_mcspi_disable_clocks(mcspi); 978 } 979 980 static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) 981 { 982 struct omap2_mcspi *mcspi; 983 unsigned long flags; 984 struct spi_transfer *t; 985 986 m->actual_length = 0; 987 m->status = 0; 988 989 /* reject invalid messages and transfers */ 990 if (list_empty(&m->transfers) || !m->complete) 991 return -EINVAL; 992 list_for_each_entry(t, &m->transfers, transfer_list) { 993 const void *tx_buf = t->tx_buf; 994 void *rx_buf = t->rx_buf; 995 unsigned len = t->len; 996 997 if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ 998 || (len && !(rx_buf || tx_buf)) 999 || (t->bits_per_word && 1000 ( t->bits_per_word < 4 1001 || t->bits_per_word > 32))) { 1002 dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n", 1003 t->speed_hz, 1004 len, 1005 tx_buf ? "tx" : "", 1006 rx_buf ? "rx" : "", 1007 t->bits_per_word); 1008 return -EINVAL; 1009 } 1010 if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) { 1011 dev_dbg(&spi->dev, "speed_hz %d below minimum %d Hz\n", 1012 t->speed_hz, 1013 OMAP2_MCSPI_MAX_FREQ >> 15); 1014 return -EINVAL; 1015 } 1016 1017 if (m->is_dma_mapped || len < DMA_MIN_BYTES) 1018 continue; 1019 1020 if (tx_buf != NULL) { 1021 t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, 1022 len, DMA_TO_DEVICE); 1023 if (dma_mapping_error(&spi->dev, t->tx_dma)) { 1024 dev_dbg(&spi->dev, "dma %cX %d bytes error\n", 1025 'T', len); 1026 return -EINVAL; 1027 } 1028 } 1029 if (rx_buf != NULL) { 1030 t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len, 1031 DMA_FROM_DEVICE); 1032 if (dma_mapping_error(&spi->dev, t->rx_dma)) { 1033 dev_dbg(&spi->dev, "dma %cX %d bytes error\n", 1034 'R', len); 1035 if (tx_buf != NULL) 1036 dma_unmap_single(&spi->dev, t->tx_dma, 1037 len, DMA_TO_DEVICE); 1038 return -EINVAL; 1039 } 1040 } 1041 } 1042 1043 mcspi = spi_master_get_devdata(spi->master); 1044 1045 spin_lock_irqsave(&mcspi->lock, flags); 1046 list_add_tail(&m->queue, &mcspi->msg_queue); 1047 queue_work(mcspi->wq, &mcspi->work); 1048 spin_unlock_irqrestore(&mcspi->lock, flags); 1049 1050 return 0; 1051 } 1052 1053 static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) 1054 { 1055 struct spi_master *master = mcspi->master; 1056 u32 tmp; 1057 int ret = 0; 1058 1059 ret = omap2_mcspi_enable_clocks(mcspi); 1060 if (ret < 0) 1061 return ret; 1062 1063 tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN; 1064 mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp); 1065 omap2_mcspi_ctx[master->bus_num - 1].wakeupenable = tmp; 1066 1067 omap2_mcspi_set_master_mode(master); 1068 omap2_mcspi_disable_clocks(mcspi); 1069 return 0; 1070 } 1071 1072 static int omap_mcspi_runtime_resume(struct device *dev) 1073 { 1074 struct omap2_mcspi *mcspi; 1075 struct spi_master *master; 1076 1077 master = dev_get_drvdata(dev); 1078 mcspi = spi_master_get_devdata(master); 1079 omap2_mcspi_restore_ctx(mcspi); 1080 1081 return 0; 1082 } 1083 1084 static struct omap2_mcspi_platform_config omap2_pdata = { 1085 .regs_offset = 0, 1086 }; 1087 1088 static struct omap2_mcspi_platform_config omap4_pdata = { 1089 .regs_offset = OMAP4_MCSPI_REG_OFFSET, 1090 }; 1091 1092 static const struct of_device_id omap_mcspi_of_match[] = { 1093 { 1094 .compatible = "ti,omap2-mcspi", 1095 .data = &omap2_pdata, 1096 }, 1097 { 1098 .compatible = "ti,omap4-mcspi", 1099 .data = &omap4_pdata, 1100 }, 1101 { }, 1102 }; 1103 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match); 1104 1105 static int __init omap2_mcspi_probe(struct platform_device *pdev) 1106 { 1107 struct spi_master *master; 1108 struct omap2_mcspi_platform_config *pdata; 1109 struct omap2_mcspi *mcspi; 1110 struct resource *r; 1111 int status = 0, i; 1112 char wq_name[20]; 1113 u32 regs_offset = 0; 1114 static int bus_num = 1; 1115 struct device_node *node = pdev->dev.of_node; 1116 const struct of_device_id *match; 1117 1118 master = spi_alloc_master(&pdev->dev, sizeof *mcspi); 1119 if (master == NULL) { 1120 dev_dbg(&pdev->dev, "master allocation failed\n"); 1121 return -ENOMEM; 1122 } 1123 1124 /* the spi->mode bits understood by this driver: */ 1125 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1126 1127 master->setup = omap2_mcspi_setup; 1128 master->transfer = omap2_mcspi_transfer; 1129 master->cleanup = omap2_mcspi_cleanup; 1130 master->dev.of_node = node; 1131 1132 match = of_match_device(omap_mcspi_of_match, &pdev->dev); 1133 if (match) { 1134 u32 num_cs = 1; /* default number of chipselect */ 1135 pdata = match->data; 1136 1137 of_property_read_u32(node, "ti,spi-num-cs", &num_cs); 1138 master->num_chipselect = num_cs; 1139 master->bus_num = bus_num++; 1140 } else { 1141 pdata = pdev->dev.platform_data; 1142 master->num_chipselect = pdata->num_cs; 1143 if (pdev->id != -1) 1144 master->bus_num = pdev->id; 1145 } 1146 regs_offset = pdata->regs_offset; 1147 1148 dev_set_drvdata(&pdev->dev, master); 1149 1150 mcspi = spi_master_get_devdata(master); 1151 mcspi->master = master; 1152 1153 sprintf(wq_name, "omap2_mcspi/%d", master->bus_num); 1154 mcspi->wq = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 1); 1155 if (mcspi->wq == NULL) { 1156 status = -ENOMEM; 1157 goto free_master; 1158 } 1159 1160 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1161 if (r == NULL) { 1162 status = -ENODEV; 1163 goto free_master; 1164 } 1165 1166 r->start += regs_offset; 1167 r->end += regs_offset; 1168 mcspi->phys = r->start; 1169 if (!request_mem_region(r->start, resource_size(r), 1170 dev_name(&pdev->dev))) { 1171 status = -EBUSY; 1172 goto free_master; 1173 } 1174 1175 mcspi->base = ioremap(r->start, resource_size(r)); 1176 if (!mcspi->base) { 1177 dev_dbg(&pdev->dev, "can't ioremap MCSPI\n"); 1178 status = -ENOMEM; 1179 goto release_region; 1180 } 1181 1182 mcspi->dev = &pdev->dev; 1183 INIT_WORK(&mcspi->work, omap2_mcspi_work); 1184 1185 spin_lock_init(&mcspi->lock); 1186 INIT_LIST_HEAD(&mcspi->msg_queue); 1187 INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs); 1188 1189 mcspi->dma_channels = kcalloc(master->num_chipselect, 1190 sizeof(struct omap2_mcspi_dma), 1191 GFP_KERNEL); 1192 1193 if (mcspi->dma_channels == NULL) 1194 goto unmap_io; 1195 1196 for (i = 0; i < master->num_chipselect; i++) { 1197 char dma_ch_name[14]; 1198 struct resource *dma_res; 1199 1200 sprintf(dma_ch_name, "rx%d", i); 1201 dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, 1202 dma_ch_name); 1203 if (!dma_res) { 1204 dev_dbg(&pdev->dev, "cannot get DMA RX channel\n"); 1205 status = -ENODEV; 1206 break; 1207 } 1208 1209 mcspi->dma_channels[i].dma_rx_channel = -1; 1210 mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start; 1211 sprintf(dma_ch_name, "tx%d", i); 1212 dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, 1213 dma_ch_name); 1214 if (!dma_res) { 1215 dev_dbg(&pdev->dev, "cannot get DMA TX channel\n"); 1216 status = -ENODEV; 1217 break; 1218 } 1219 1220 mcspi->dma_channels[i].dma_tx_channel = -1; 1221 mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start; 1222 } 1223 1224 if (status < 0) 1225 goto dma_chnl_free; 1226 1227 pm_runtime_enable(&pdev->dev); 1228 1229 if (status || omap2_mcspi_master_setup(mcspi) < 0) 1230 goto disable_pm; 1231 1232 status = spi_register_master(master); 1233 if (status < 0) 1234 goto err_spi_register; 1235 1236 return status; 1237 1238 err_spi_register: 1239 spi_master_put(master); 1240 disable_pm: 1241 pm_runtime_disable(&pdev->dev); 1242 dma_chnl_free: 1243 kfree(mcspi->dma_channels); 1244 unmap_io: 1245 iounmap(mcspi->base); 1246 release_region: 1247 release_mem_region(r->start, resource_size(r)); 1248 free_master: 1249 kfree(master); 1250 platform_set_drvdata(pdev, NULL); 1251 return status; 1252 } 1253 1254 static int __exit omap2_mcspi_remove(struct platform_device *pdev) 1255 { 1256 struct spi_master *master; 1257 struct omap2_mcspi *mcspi; 1258 struct omap2_mcspi_dma *dma_channels; 1259 struct resource *r; 1260 void __iomem *base; 1261 1262 master = dev_get_drvdata(&pdev->dev); 1263 mcspi = spi_master_get_devdata(master); 1264 dma_channels = mcspi->dma_channels; 1265 1266 omap2_mcspi_disable_clocks(mcspi); 1267 pm_runtime_disable(&pdev->dev); 1268 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1269 release_mem_region(r->start, resource_size(r)); 1270 1271 base = mcspi->base; 1272 spi_unregister_master(master); 1273 iounmap(base); 1274 kfree(dma_channels); 1275 destroy_workqueue(mcspi->wq); 1276 platform_set_drvdata(pdev, NULL); 1277 1278 return 0; 1279 } 1280 1281 /* work with hotplug and coldplug */ 1282 MODULE_ALIAS("platform:omap2_mcspi"); 1283 1284 #ifdef CONFIG_SUSPEND 1285 /* 1286 * When SPI wake up from off-mode, CS is in activate state. If it was in 1287 * unactive state when driver was suspend, then force it to unactive state at 1288 * wake up. 1289 */ 1290 static int omap2_mcspi_resume(struct device *dev) 1291 { 1292 struct spi_master *master = dev_get_drvdata(dev); 1293 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1294 struct omap2_mcspi_cs *cs; 1295 1296 omap2_mcspi_enable_clocks(mcspi); 1297 list_for_each_entry(cs, &omap2_mcspi_ctx[master->bus_num - 1].cs, 1298 node) { 1299 if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) { 1300 1301 /* 1302 * We need to toggle CS state for OMAP take this 1303 * change in account. 1304 */ 1305 MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 1); 1306 __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); 1307 MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 0); 1308 __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); 1309 } 1310 } 1311 omap2_mcspi_disable_clocks(mcspi); 1312 return 0; 1313 } 1314 #else 1315 #define omap2_mcspi_resume NULL 1316 #endif 1317 1318 static const struct dev_pm_ops omap2_mcspi_pm_ops = { 1319 .resume = omap2_mcspi_resume, 1320 .runtime_resume = omap_mcspi_runtime_resume, 1321 }; 1322 1323 static struct platform_driver omap2_mcspi_driver = { 1324 .driver = { 1325 .name = "omap2_mcspi", 1326 .owner = THIS_MODULE, 1327 .pm = &omap2_mcspi_pm_ops, 1328 .of_match_table = omap_mcspi_of_match, 1329 }, 1330 .remove = __exit_p(omap2_mcspi_remove), 1331 }; 1332 1333 1334 static int __init omap2_mcspi_init(void) 1335 { 1336 return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe); 1337 } 1338 subsys_initcall(omap2_mcspi_init); 1339 1340 static void __exit omap2_mcspi_exit(void) 1341 { 1342 platform_driver_unregister(&omap2_mcspi_driver); 1343 1344 } 1345 module_exit(omap2_mcspi_exit); 1346 1347 MODULE_LICENSE("GPL"); 1348