1 /* 2 * OMAP2 McSPI controller driver 3 * 4 * Copyright (C) 2005, 2006 Nokia Corporation 5 * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and 6 * Juha Yrj�l� <juha.yrjola@nokia.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/interrupt.h> 21 #include <linux/module.h> 22 #include <linux/device.h> 23 #include <linux/delay.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/dmaengine.h> 26 #include <linux/omap-dma.h> 27 #include <linux/pinctrl/consumer.h> 28 #include <linux/platform_device.h> 29 #include <linux/err.h> 30 #include <linux/clk.h> 31 #include <linux/io.h> 32 #include <linux/slab.h> 33 #include <linux/pm_runtime.h> 34 #include <linux/of.h> 35 #include <linux/of_device.h> 36 #include <linux/gcd.h> 37 38 #include <linux/spi/spi.h> 39 #include <linux/gpio.h> 40 41 #include <linux/platform_data/spi-omap2-mcspi.h> 42 43 #define OMAP2_MCSPI_MAX_FREQ 48000000 44 #define OMAP2_MCSPI_MAX_DIVIDER 4096 45 #define OMAP2_MCSPI_MAX_FIFODEPTH 64 46 #define OMAP2_MCSPI_MAX_FIFOWCNT 0xFFFF 47 #define SPI_AUTOSUSPEND_TIMEOUT 2000 48 49 #define OMAP2_MCSPI_REVISION 0x00 50 #define OMAP2_MCSPI_SYSSTATUS 0x14 51 #define OMAP2_MCSPI_IRQSTATUS 0x18 52 #define OMAP2_MCSPI_IRQENABLE 0x1c 53 #define OMAP2_MCSPI_WAKEUPENABLE 0x20 54 #define OMAP2_MCSPI_SYST 0x24 55 #define OMAP2_MCSPI_MODULCTRL 0x28 56 #define OMAP2_MCSPI_XFERLEVEL 0x7c 57 58 /* per-channel banks, 0x14 bytes each, first is: */ 59 #define OMAP2_MCSPI_CHCONF0 0x2c 60 #define OMAP2_MCSPI_CHSTAT0 0x30 61 #define OMAP2_MCSPI_CHCTRL0 0x34 62 #define OMAP2_MCSPI_TX0 0x38 63 #define OMAP2_MCSPI_RX0 0x3c 64 65 /* per-register bitmasks: */ 66 #define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17) 67 68 #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) 69 #define OMAP2_MCSPI_MODULCTRL_MS BIT(2) 70 #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3) 71 72 #define OMAP2_MCSPI_CHCONF_PHA BIT(0) 73 #define OMAP2_MCSPI_CHCONF_POL BIT(1) 74 #define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2) 75 #define OMAP2_MCSPI_CHCONF_EPOL BIT(6) 76 #define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7) 77 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12) 78 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13) 79 #define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12) 80 #define OMAP2_MCSPI_CHCONF_DMAW BIT(14) 81 #define OMAP2_MCSPI_CHCONF_DMAR BIT(15) 82 #define OMAP2_MCSPI_CHCONF_DPE0 BIT(16) 83 #define OMAP2_MCSPI_CHCONF_DPE1 BIT(17) 84 #define OMAP2_MCSPI_CHCONF_IS BIT(18) 85 #define OMAP2_MCSPI_CHCONF_TURBO BIT(19) 86 #define OMAP2_MCSPI_CHCONF_FORCE BIT(20) 87 #define OMAP2_MCSPI_CHCONF_FFET BIT(27) 88 #define OMAP2_MCSPI_CHCONF_FFER BIT(28) 89 #define OMAP2_MCSPI_CHCONF_CLKG BIT(29) 90 91 #define OMAP2_MCSPI_CHSTAT_RXS BIT(0) 92 #define OMAP2_MCSPI_CHSTAT_TXS BIT(1) 93 #define OMAP2_MCSPI_CHSTAT_EOT BIT(2) 94 #define OMAP2_MCSPI_CHSTAT_TXFFE BIT(3) 95 96 #define OMAP2_MCSPI_CHCTRL_EN BIT(0) 97 #define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK (0xff << 8) 98 99 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0) 100 101 /* We have 2 DMA channels per CS, one for RX and one for TX */ 102 struct omap2_mcspi_dma { 103 struct dma_chan *dma_tx; 104 struct dma_chan *dma_rx; 105 106 int dma_tx_sync_dev; 107 int dma_rx_sync_dev; 108 109 struct completion dma_tx_completion; 110 struct completion dma_rx_completion; 111 112 char dma_rx_ch_name[14]; 113 char dma_tx_ch_name[14]; 114 }; 115 116 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and 117 * cache operations; better heuristics consider wordsize and bitrate. 118 */ 119 #define DMA_MIN_BYTES 160 120 121 122 /* 123 * Used for context save and restore, structure members to be updated whenever 124 * corresponding registers are modified. 125 */ 126 struct omap2_mcspi_regs { 127 u32 modulctrl; 128 u32 wakeupenable; 129 struct list_head cs; 130 }; 131 132 struct omap2_mcspi { 133 struct spi_master *master; 134 /* Virtual base address of the controller */ 135 void __iomem *base; 136 unsigned long phys; 137 /* SPI1 has 4 channels, while SPI2 has 2 */ 138 struct omap2_mcspi_dma *dma_channels; 139 struct device *dev; 140 struct omap2_mcspi_regs ctx; 141 int fifo_depth; 142 unsigned int pin_dir:1; 143 }; 144 145 struct omap2_mcspi_cs { 146 void __iomem *base; 147 unsigned long phys; 148 int word_len; 149 u16 mode; 150 struct list_head node; 151 /* Context save and restore shadow register */ 152 u32 chconf0, chctrl0; 153 }; 154 155 static inline void mcspi_write_reg(struct spi_master *master, 156 int idx, u32 val) 157 { 158 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 159 160 writel_relaxed(val, mcspi->base + idx); 161 } 162 163 static inline u32 mcspi_read_reg(struct spi_master *master, int idx) 164 { 165 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 166 167 return readl_relaxed(mcspi->base + idx); 168 } 169 170 static inline void mcspi_write_cs_reg(const struct spi_device *spi, 171 int idx, u32 val) 172 { 173 struct omap2_mcspi_cs *cs = spi->controller_state; 174 175 writel_relaxed(val, cs->base + idx); 176 } 177 178 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx) 179 { 180 struct omap2_mcspi_cs *cs = spi->controller_state; 181 182 return readl_relaxed(cs->base + idx); 183 } 184 185 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi) 186 { 187 struct omap2_mcspi_cs *cs = spi->controller_state; 188 189 return cs->chconf0; 190 } 191 192 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val) 193 { 194 struct omap2_mcspi_cs *cs = spi->controller_state; 195 196 cs->chconf0 = val; 197 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val); 198 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); 199 } 200 201 static inline int mcspi_bytes_per_word(int word_len) 202 { 203 if (word_len <= 8) 204 return 1; 205 else if (word_len <= 16) 206 return 2; 207 else /* word_len <= 32 */ 208 return 4; 209 } 210 211 static void omap2_mcspi_set_dma_req(const struct spi_device *spi, 212 int is_read, int enable) 213 { 214 u32 l, rw; 215 216 l = mcspi_cached_chconf0(spi); 217 218 if (is_read) /* 1 is read, 0 write */ 219 rw = OMAP2_MCSPI_CHCONF_DMAR; 220 else 221 rw = OMAP2_MCSPI_CHCONF_DMAW; 222 223 if (enable) 224 l |= rw; 225 else 226 l &= ~rw; 227 228 mcspi_write_chconf0(spi, l); 229 } 230 231 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable) 232 { 233 struct omap2_mcspi_cs *cs = spi->controller_state; 234 u32 l; 235 236 l = cs->chctrl0; 237 if (enable) 238 l |= OMAP2_MCSPI_CHCTRL_EN; 239 else 240 l &= ~OMAP2_MCSPI_CHCTRL_EN; 241 cs->chctrl0 = l; 242 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0); 243 /* Flash post-writes */ 244 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0); 245 } 246 247 static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable) 248 { 249 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); 250 u32 l; 251 252 /* The controller handles the inverted chip selects 253 * using the OMAP2_MCSPI_CHCONF_EPOL bit so revert 254 * the inversion from the core spi_set_cs function. 255 */ 256 if (spi->mode & SPI_CS_HIGH) 257 enable = !enable; 258 259 if (spi->controller_state) { 260 int err = pm_runtime_get_sync(mcspi->dev); 261 if (err < 0) { 262 dev_err(mcspi->dev, "failed to get sync: %d\n", err); 263 return; 264 } 265 266 l = mcspi_cached_chconf0(spi); 267 268 if (enable) 269 l &= ~OMAP2_MCSPI_CHCONF_FORCE; 270 else 271 l |= OMAP2_MCSPI_CHCONF_FORCE; 272 273 mcspi_write_chconf0(spi, l); 274 275 pm_runtime_mark_last_busy(mcspi->dev); 276 pm_runtime_put_autosuspend(mcspi->dev); 277 } 278 } 279 280 static void omap2_mcspi_set_master_mode(struct spi_master *master) 281 { 282 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 283 struct omap2_mcspi_regs *ctx = &mcspi->ctx; 284 u32 l; 285 286 /* 287 * Setup when switching from (reset default) slave mode 288 * to single-channel master mode 289 */ 290 l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL); 291 l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS); 292 l |= OMAP2_MCSPI_MODULCTRL_SINGLE; 293 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l); 294 295 ctx->modulctrl = l; 296 } 297 298 static void omap2_mcspi_set_fifo(const struct spi_device *spi, 299 struct spi_transfer *t, int enable) 300 { 301 struct spi_master *master = spi->master; 302 struct omap2_mcspi_cs *cs = spi->controller_state; 303 struct omap2_mcspi *mcspi; 304 unsigned int wcnt; 305 int max_fifo_depth, fifo_depth, bytes_per_word; 306 u32 chconf, xferlevel; 307 308 mcspi = spi_master_get_devdata(master); 309 310 chconf = mcspi_cached_chconf0(spi); 311 if (enable) { 312 bytes_per_word = mcspi_bytes_per_word(cs->word_len); 313 if (t->len % bytes_per_word != 0) 314 goto disable_fifo; 315 316 if (t->rx_buf != NULL && t->tx_buf != NULL) 317 max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2; 318 else 319 max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH; 320 321 fifo_depth = gcd(t->len, max_fifo_depth); 322 if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0) 323 goto disable_fifo; 324 325 wcnt = t->len / bytes_per_word; 326 if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT) 327 goto disable_fifo; 328 329 xferlevel = wcnt << 16; 330 if (t->rx_buf != NULL) { 331 chconf |= OMAP2_MCSPI_CHCONF_FFER; 332 xferlevel |= (fifo_depth - 1) << 8; 333 } 334 if (t->tx_buf != NULL) { 335 chconf |= OMAP2_MCSPI_CHCONF_FFET; 336 xferlevel |= fifo_depth - 1; 337 } 338 339 mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel); 340 mcspi_write_chconf0(spi, chconf); 341 mcspi->fifo_depth = fifo_depth; 342 343 return; 344 } 345 346 disable_fifo: 347 if (t->rx_buf != NULL) 348 chconf &= ~OMAP2_MCSPI_CHCONF_FFER; 349 350 if (t->tx_buf != NULL) 351 chconf &= ~OMAP2_MCSPI_CHCONF_FFET; 352 353 mcspi_write_chconf0(spi, chconf); 354 mcspi->fifo_depth = 0; 355 } 356 357 static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) 358 { 359 struct spi_master *spi_cntrl = mcspi->master; 360 struct omap2_mcspi_regs *ctx = &mcspi->ctx; 361 struct omap2_mcspi_cs *cs; 362 363 /* McSPI: context restore */ 364 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl); 365 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable); 366 367 list_for_each_entry(cs, &ctx->cs, node) 368 writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); 369 } 370 371 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) 372 { 373 unsigned long timeout; 374 375 timeout = jiffies + msecs_to_jiffies(1000); 376 while (!(readl_relaxed(reg) & bit)) { 377 if (time_after(jiffies, timeout)) { 378 if (!(readl_relaxed(reg) & bit)) 379 return -ETIMEDOUT; 380 else 381 return 0; 382 } 383 cpu_relax(); 384 } 385 return 0; 386 } 387 388 static void omap2_mcspi_rx_callback(void *data) 389 { 390 struct spi_device *spi = data; 391 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); 392 struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 393 394 /* We must disable the DMA RX request */ 395 omap2_mcspi_set_dma_req(spi, 1, 0); 396 397 complete(&mcspi_dma->dma_rx_completion); 398 } 399 400 static void omap2_mcspi_tx_callback(void *data) 401 { 402 struct spi_device *spi = data; 403 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); 404 struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 405 406 /* We must disable the DMA TX request */ 407 omap2_mcspi_set_dma_req(spi, 0, 0); 408 409 complete(&mcspi_dma->dma_tx_completion); 410 } 411 412 static void omap2_mcspi_tx_dma(struct spi_device *spi, 413 struct spi_transfer *xfer, 414 struct dma_slave_config cfg) 415 { 416 struct omap2_mcspi *mcspi; 417 struct omap2_mcspi_dma *mcspi_dma; 418 unsigned int count; 419 420 mcspi = spi_master_get_devdata(spi->master); 421 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 422 count = xfer->len; 423 424 if (mcspi_dma->dma_tx) { 425 struct dma_async_tx_descriptor *tx; 426 struct scatterlist sg; 427 428 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); 429 430 sg_init_table(&sg, 1); 431 sg_dma_address(&sg) = xfer->tx_dma; 432 sg_dma_len(&sg) = xfer->len; 433 434 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1, 435 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 436 if (tx) { 437 tx->callback = omap2_mcspi_tx_callback; 438 tx->callback_param = spi; 439 dmaengine_submit(tx); 440 } else { 441 /* FIXME: fall back to PIO? */ 442 } 443 } 444 dma_async_issue_pending(mcspi_dma->dma_tx); 445 omap2_mcspi_set_dma_req(spi, 0, 1); 446 447 } 448 449 static unsigned 450 omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, 451 struct dma_slave_config cfg, 452 unsigned es) 453 { 454 struct omap2_mcspi *mcspi; 455 struct omap2_mcspi_dma *mcspi_dma; 456 unsigned int count, dma_count; 457 u32 l; 458 int elements = 0; 459 int word_len, element_count; 460 struct omap2_mcspi_cs *cs = spi->controller_state; 461 mcspi = spi_master_get_devdata(spi->master); 462 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 463 count = xfer->len; 464 dma_count = xfer->len; 465 466 if (mcspi->fifo_depth == 0) 467 dma_count -= es; 468 469 word_len = cs->word_len; 470 l = mcspi_cached_chconf0(spi); 471 472 if (word_len <= 8) 473 element_count = count; 474 else if (word_len <= 16) 475 element_count = count >> 1; 476 else /* word_len <= 32 */ 477 element_count = count >> 2; 478 479 if (mcspi_dma->dma_rx) { 480 struct dma_async_tx_descriptor *tx; 481 struct scatterlist sg; 482 483 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg); 484 485 if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0) 486 dma_count -= es; 487 488 sg_init_table(&sg, 1); 489 sg_dma_address(&sg) = xfer->rx_dma; 490 sg_dma_len(&sg) = dma_count; 491 492 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1, 493 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | 494 DMA_CTRL_ACK); 495 if (tx) { 496 tx->callback = omap2_mcspi_rx_callback; 497 tx->callback_param = spi; 498 dmaengine_submit(tx); 499 } else { 500 /* FIXME: fall back to PIO? */ 501 } 502 } 503 504 dma_async_issue_pending(mcspi_dma->dma_rx); 505 omap2_mcspi_set_dma_req(spi, 1, 1); 506 507 wait_for_completion(&mcspi_dma->dma_rx_completion); 508 dma_unmap_single(mcspi->dev, xfer->rx_dma, count, 509 DMA_FROM_DEVICE); 510 511 if (mcspi->fifo_depth > 0) 512 return count; 513 514 omap2_mcspi_set_enable(spi, 0); 515 516 elements = element_count - 1; 517 518 if (l & OMAP2_MCSPI_CHCONF_TURBO) { 519 elements--; 520 521 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) 522 & OMAP2_MCSPI_CHSTAT_RXS)) { 523 u32 w; 524 525 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); 526 if (word_len <= 8) 527 ((u8 *)xfer->rx_buf)[elements++] = w; 528 else if (word_len <= 16) 529 ((u16 *)xfer->rx_buf)[elements++] = w; 530 else /* word_len <= 32 */ 531 ((u32 *)xfer->rx_buf)[elements++] = w; 532 } else { 533 int bytes_per_word = mcspi_bytes_per_word(word_len); 534 dev_err(&spi->dev, "DMA RX penultimate word empty\n"); 535 count -= (bytes_per_word << 1); 536 omap2_mcspi_set_enable(spi, 1); 537 return count; 538 } 539 } 540 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) 541 & OMAP2_MCSPI_CHSTAT_RXS)) { 542 u32 w; 543 544 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); 545 if (word_len <= 8) 546 ((u8 *)xfer->rx_buf)[elements] = w; 547 else if (word_len <= 16) 548 ((u16 *)xfer->rx_buf)[elements] = w; 549 else /* word_len <= 32 */ 550 ((u32 *)xfer->rx_buf)[elements] = w; 551 } else { 552 dev_err(&spi->dev, "DMA RX last word empty\n"); 553 count -= mcspi_bytes_per_word(word_len); 554 } 555 omap2_mcspi_set_enable(spi, 1); 556 return count; 557 } 558 559 static unsigned 560 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) 561 { 562 struct omap2_mcspi *mcspi; 563 struct omap2_mcspi_cs *cs = spi->controller_state; 564 struct omap2_mcspi_dma *mcspi_dma; 565 unsigned int count; 566 u32 l; 567 u8 *rx; 568 const u8 *tx; 569 struct dma_slave_config cfg; 570 enum dma_slave_buswidth width; 571 unsigned es; 572 u32 burst; 573 void __iomem *chstat_reg; 574 void __iomem *irqstat_reg; 575 int wait_res; 576 577 mcspi = spi_master_get_devdata(spi->master); 578 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 579 l = mcspi_cached_chconf0(spi); 580 581 582 if (cs->word_len <= 8) { 583 width = DMA_SLAVE_BUSWIDTH_1_BYTE; 584 es = 1; 585 } else if (cs->word_len <= 16) { 586 width = DMA_SLAVE_BUSWIDTH_2_BYTES; 587 es = 2; 588 } else { 589 width = DMA_SLAVE_BUSWIDTH_4_BYTES; 590 es = 4; 591 } 592 593 count = xfer->len; 594 burst = 1; 595 596 if (mcspi->fifo_depth > 0) { 597 if (count > mcspi->fifo_depth) 598 burst = mcspi->fifo_depth / es; 599 else 600 burst = count / es; 601 } 602 603 memset(&cfg, 0, sizeof(cfg)); 604 cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0; 605 cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0; 606 cfg.src_addr_width = width; 607 cfg.dst_addr_width = width; 608 cfg.src_maxburst = burst; 609 cfg.dst_maxburst = burst; 610 611 rx = xfer->rx_buf; 612 tx = xfer->tx_buf; 613 614 if (tx != NULL) 615 omap2_mcspi_tx_dma(spi, xfer, cfg); 616 617 if (rx != NULL) 618 count = omap2_mcspi_rx_dma(spi, xfer, cfg, es); 619 620 if (tx != NULL) { 621 wait_for_completion(&mcspi_dma->dma_tx_completion); 622 dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len, 623 DMA_TO_DEVICE); 624 625 if (mcspi->fifo_depth > 0) { 626 irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS; 627 628 if (mcspi_wait_for_reg_bit(irqstat_reg, 629 OMAP2_MCSPI_IRQSTATUS_EOW) < 0) 630 dev_err(&spi->dev, "EOW timed out\n"); 631 632 mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS, 633 OMAP2_MCSPI_IRQSTATUS_EOW); 634 } 635 636 /* for TX_ONLY mode, be sure all words have shifted out */ 637 if (rx == NULL) { 638 chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; 639 if (mcspi->fifo_depth > 0) { 640 wait_res = mcspi_wait_for_reg_bit(chstat_reg, 641 OMAP2_MCSPI_CHSTAT_TXFFE); 642 if (wait_res < 0) 643 dev_err(&spi->dev, "TXFFE timed out\n"); 644 } else { 645 wait_res = mcspi_wait_for_reg_bit(chstat_reg, 646 OMAP2_MCSPI_CHSTAT_TXS); 647 if (wait_res < 0) 648 dev_err(&spi->dev, "TXS timed out\n"); 649 } 650 if (wait_res >= 0 && 651 (mcspi_wait_for_reg_bit(chstat_reg, 652 OMAP2_MCSPI_CHSTAT_EOT) < 0)) 653 dev_err(&spi->dev, "EOT timed out\n"); 654 } 655 } 656 return count; 657 } 658 659 static unsigned 660 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) 661 { 662 struct omap2_mcspi *mcspi; 663 struct omap2_mcspi_cs *cs = spi->controller_state; 664 unsigned int count, c; 665 u32 l; 666 void __iomem *base = cs->base; 667 void __iomem *tx_reg; 668 void __iomem *rx_reg; 669 void __iomem *chstat_reg; 670 int word_len; 671 672 mcspi = spi_master_get_devdata(spi->master); 673 count = xfer->len; 674 c = count; 675 word_len = cs->word_len; 676 677 l = mcspi_cached_chconf0(spi); 678 679 /* We store the pre-calculated register addresses on stack to speed 680 * up the transfer loop. */ 681 tx_reg = base + OMAP2_MCSPI_TX0; 682 rx_reg = base + OMAP2_MCSPI_RX0; 683 chstat_reg = base + OMAP2_MCSPI_CHSTAT0; 684 685 if (c < (word_len>>3)) 686 return 0; 687 688 if (word_len <= 8) { 689 u8 *rx; 690 const u8 *tx; 691 692 rx = xfer->rx_buf; 693 tx = xfer->tx_buf; 694 695 do { 696 c -= 1; 697 if (tx != NULL) { 698 if (mcspi_wait_for_reg_bit(chstat_reg, 699 OMAP2_MCSPI_CHSTAT_TXS) < 0) { 700 dev_err(&spi->dev, "TXS timed out\n"); 701 goto out; 702 } 703 dev_vdbg(&spi->dev, "write-%d %02x\n", 704 word_len, *tx); 705 writel_relaxed(*tx++, tx_reg); 706 } 707 if (rx != NULL) { 708 if (mcspi_wait_for_reg_bit(chstat_reg, 709 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 710 dev_err(&spi->dev, "RXS timed out\n"); 711 goto out; 712 } 713 714 if (c == 1 && tx == NULL && 715 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 716 omap2_mcspi_set_enable(spi, 0); 717 *rx++ = readl_relaxed(rx_reg); 718 dev_vdbg(&spi->dev, "read-%d %02x\n", 719 word_len, *(rx - 1)); 720 if (mcspi_wait_for_reg_bit(chstat_reg, 721 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 722 dev_err(&spi->dev, 723 "RXS timed out\n"); 724 goto out; 725 } 726 c = 0; 727 } else if (c == 0 && tx == NULL) { 728 omap2_mcspi_set_enable(spi, 0); 729 } 730 731 *rx++ = readl_relaxed(rx_reg); 732 dev_vdbg(&spi->dev, "read-%d %02x\n", 733 word_len, *(rx - 1)); 734 } 735 } while (c); 736 } else if (word_len <= 16) { 737 u16 *rx; 738 const u16 *tx; 739 740 rx = xfer->rx_buf; 741 tx = xfer->tx_buf; 742 do { 743 c -= 2; 744 if (tx != NULL) { 745 if (mcspi_wait_for_reg_bit(chstat_reg, 746 OMAP2_MCSPI_CHSTAT_TXS) < 0) { 747 dev_err(&spi->dev, "TXS timed out\n"); 748 goto out; 749 } 750 dev_vdbg(&spi->dev, "write-%d %04x\n", 751 word_len, *tx); 752 writel_relaxed(*tx++, tx_reg); 753 } 754 if (rx != NULL) { 755 if (mcspi_wait_for_reg_bit(chstat_reg, 756 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 757 dev_err(&spi->dev, "RXS timed out\n"); 758 goto out; 759 } 760 761 if (c == 2 && tx == NULL && 762 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 763 omap2_mcspi_set_enable(spi, 0); 764 *rx++ = readl_relaxed(rx_reg); 765 dev_vdbg(&spi->dev, "read-%d %04x\n", 766 word_len, *(rx - 1)); 767 if (mcspi_wait_for_reg_bit(chstat_reg, 768 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 769 dev_err(&spi->dev, 770 "RXS timed out\n"); 771 goto out; 772 } 773 c = 0; 774 } else if (c == 0 && tx == NULL) { 775 omap2_mcspi_set_enable(spi, 0); 776 } 777 778 *rx++ = readl_relaxed(rx_reg); 779 dev_vdbg(&spi->dev, "read-%d %04x\n", 780 word_len, *(rx - 1)); 781 } 782 } while (c >= 2); 783 } else if (word_len <= 32) { 784 u32 *rx; 785 const u32 *tx; 786 787 rx = xfer->rx_buf; 788 tx = xfer->tx_buf; 789 do { 790 c -= 4; 791 if (tx != NULL) { 792 if (mcspi_wait_for_reg_bit(chstat_reg, 793 OMAP2_MCSPI_CHSTAT_TXS) < 0) { 794 dev_err(&spi->dev, "TXS timed out\n"); 795 goto out; 796 } 797 dev_vdbg(&spi->dev, "write-%d %08x\n", 798 word_len, *tx); 799 writel_relaxed(*tx++, tx_reg); 800 } 801 if (rx != NULL) { 802 if (mcspi_wait_for_reg_bit(chstat_reg, 803 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 804 dev_err(&spi->dev, "RXS timed out\n"); 805 goto out; 806 } 807 808 if (c == 4 && tx == NULL && 809 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 810 omap2_mcspi_set_enable(spi, 0); 811 *rx++ = readl_relaxed(rx_reg); 812 dev_vdbg(&spi->dev, "read-%d %08x\n", 813 word_len, *(rx - 1)); 814 if (mcspi_wait_for_reg_bit(chstat_reg, 815 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 816 dev_err(&spi->dev, 817 "RXS timed out\n"); 818 goto out; 819 } 820 c = 0; 821 } else if (c == 0 && tx == NULL) { 822 omap2_mcspi_set_enable(spi, 0); 823 } 824 825 *rx++ = readl_relaxed(rx_reg); 826 dev_vdbg(&spi->dev, "read-%d %08x\n", 827 word_len, *(rx - 1)); 828 } 829 } while (c >= 4); 830 } 831 832 /* for TX_ONLY mode, be sure all words have shifted out */ 833 if (xfer->rx_buf == NULL) { 834 if (mcspi_wait_for_reg_bit(chstat_reg, 835 OMAP2_MCSPI_CHSTAT_TXS) < 0) { 836 dev_err(&spi->dev, "TXS timed out\n"); 837 } else if (mcspi_wait_for_reg_bit(chstat_reg, 838 OMAP2_MCSPI_CHSTAT_EOT) < 0) 839 dev_err(&spi->dev, "EOT timed out\n"); 840 841 /* disable chan to purge rx datas received in TX_ONLY transfer, 842 * otherwise these rx datas will affect the direct following 843 * RX_ONLY transfer. 844 */ 845 omap2_mcspi_set_enable(spi, 0); 846 } 847 out: 848 omap2_mcspi_set_enable(spi, 1); 849 return count - c; 850 } 851 852 static u32 omap2_mcspi_calc_divisor(u32 speed_hz) 853 { 854 u32 div; 855 856 for (div = 0; div < 15; div++) 857 if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div)) 858 return div; 859 860 return 15; 861 } 862 863 /* called only when no transfer is active to this device */ 864 static int omap2_mcspi_setup_transfer(struct spi_device *spi, 865 struct spi_transfer *t) 866 { 867 struct omap2_mcspi_cs *cs = spi->controller_state; 868 struct omap2_mcspi *mcspi; 869 struct spi_master *spi_cntrl; 870 u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0; 871 u8 word_len = spi->bits_per_word; 872 u32 speed_hz = spi->max_speed_hz; 873 874 mcspi = spi_master_get_devdata(spi->master); 875 spi_cntrl = mcspi->master; 876 877 if (t != NULL && t->bits_per_word) 878 word_len = t->bits_per_word; 879 880 cs->word_len = word_len; 881 882 if (t && t->speed_hz) 883 speed_hz = t->speed_hz; 884 885 speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ); 886 if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) { 887 clkd = omap2_mcspi_calc_divisor(speed_hz); 888 speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd; 889 clkg = 0; 890 } else { 891 div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz; 892 speed_hz = OMAP2_MCSPI_MAX_FREQ / div; 893 clkd = (div - 1) & 0xf; 894 extclk = (div - 1) >> 4; 895 clkg = OMAP2_MCSPI_CHCONF_CLKG; 896 } 897 898 l = mcspi_cached_chconf0(spi); 899 900 /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS 901 * REVISIT: this controller could support SPI_3WIRE mode. 902 */ 903 if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) { 904 l &= ~OMAP2_MCSPI_CHCONF_IS; 905 l &= ~OMAP2_MCSPI_CHCONF_DPE1; 906 l |= OMAP2_MCSPI_CHCONF_DPE0; 907 } else { 908 l |= OMAP2_MCSPI_CHCONF_IS; 909 l |= OMAP2_MCSPI_CHCONF_DPE1; 910 l &= ~OMAP2_MCSPI_CHCONF_DPE0; 911 } 912 913 /* wordlength */ 914 l &= ~OMAP2_MCSPI_CHCONF_WL_MASK; 915 l |= (word_len - 1) << 7; 916 917 /* set chipselect polarity; manage with FORCE */ 918 if (!(spi->mode & SPI_CS_HIGH)) 919 l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */ 920 else 921 l &= ~OMAP2_MCSPI_CHCONF_EPOL; 922 923 /* set clock divisor */ 924 l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK; 925 l |= clkd << 2; 926 927 /* set clock granularity */ 928 l &= ~OMAP2_MCSPI_CHCONF_CLKG; 929 l |= clkg; 930 if (clkg) { 931 cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK; 932 cs->chctrl0 |= extclk << 8; 933 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0); 934 } 935 936 /* set SPI mode 0..3 */ 937 if (spi->mode & SPI_CPOL) 938 l |= OMAP2_MCSPI_CHCONF_POL; 939 else 940 l &= ~OMAP2_MCSPI_CHCONF_POL; 941 if (spi->mode & SPI_CPHA) 942 l |= OMAP2_MCSPI_CHCONF_PHA; 943 else 944 l &= ~OMAP2_MCSPI_CHCONF_PHA; 945 946 mcspi_write_chconf0(spi, l); 947 948 cs->mode = spi->mode; 949 950 dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n", 951 speed_hz, 952 (spi->mode & SPI_CPHA) ? "trailing" : "leading", 953 (spi->mode & SPI_CPOL) ? "inverted" : "normal"); 954 955 return 0; 956 } 957 958 /* 959 * Note that we currently allow DMA only if we get a channel 960 * for both rx and tx. Otherwise we'll do PIO for both rx and tx. 961 */ 962 static int omap2_mcspi_request_dma(struct spi_device *spi) 963 { 964 struct spi_master *master = spi->master; 965 struct omap2_mcspi *mcspi; 966 struct omap2_mcspi_dma *mcspi_dma; 967 dma_cap_mask_t mask; 968 unsigned sig; 969 970 mcspi = spi_master_get_devdata(master); 971 mcspi_dma = mcspi->dma_channels + spi->chip_select; 972 973 init_completion(&mcspi_dma->dma_rx_completion); 974 init_completion(&mcspi_dma->dma_tx_completion); 975 976 dma_cap_zero(mask); 977 dma_cap_set(DMA_SLAVE, mask); 978 sig = mcspi_dma->dma_rx_sync_dev; 979 980 mcspi_dma->dma_rx = 981 dma_request_slave_channel_compat(mask, omap_dma_filter_fn, 982 &sig, &master->dev, 983 mcspi_dma->dma_rx_ch_name); 984 if (!mcspi_dma->dma_rx) 985 goto no_dma; 986 987 sig = mcspi_dma->dma_tx_sync_dev; 988 mcspi_dma->dma_tx = 989 dma_request_slave_channel_compat(mask, omap_dma_filter_fn, 990 &sig, &master->dev, 991 mcspi_dma->dma_tx_ch_name); 992 993 if (!mcspi_dma->dma_tx) { 994 dma_release_channel(mcspi_dma->dma_rx); 995 mcspi_dma->dma_rx = NULL; 996 goto no_dma; 997 } 998 999 return 0; 1000 1001 no_dma: 1002 dev_warn(&spi->dev, "not using DMA for McSPI\n"); 1003 return -EAGAIN; 1004 } 1005 1006 static int omap2_mcspi_setup(struct spi_device *spi) 1007 { 1008 int ret; 1009 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); 1010 struct omap2_mcspi_regs *ctx = &mcspi->ctx; 1011 struct omap2_mcspi_dma *mcspi_dma; 1012 struct omap2_mcspi_cs *cs = spi->controller_state; 1013 1014 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 1015 1016 if (!cs) { 1017 cs = kzalloc(sizeof *cs, GFP_KERNEL); 1018 if (!cs) 1019 return -ENOMEM; 1020 cs->base = mcspi->base + spi->chip_select * 0x14; 1021 cs->phys = mcspi->phys + spi->chip_select * 0x14; 1022 cs->mode = 0; 1023 cs->chconf0 = 0; 1024 cs->chctrl0 = 0; 1025 spi->controller_state = cs; 1026 /* Link this to context save list */ 1027 list_add_tail(&cs->node, &ctx->cs); 1028 1029 if (gpio_is_valid(spi->cs_gpio)) { 1030 ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev)); 1031 if (ret) { 1032 dev_err(&spi->dev, "failed to request gpio\n"); 1033 return ret; 1034 } 1035 gpio_direction_output(spi->cs_gpio, 1036 !(spi->mode & SPI_CS_HIGH)); 1037 } 1038 } 1039 1040 if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) { 1041 ret = omap2_mcspi_request_dma(spi); 1042 if (ret < 0 && ret != -EAGAIN) 1043 return ret; 1044 } 1045 1046 ret = pm_runtime_get_sync(mcspi->dev); 1047 if (ret < 0) 1048 return ret; 1049 1050 ret = omap2_mcspi_setup_transfer(spi, NULL); 1051 pm_runtime_mark_last_busy(mcspi->dev); 1052 pm_runtime_put_autosuspend(mcspi->dev); 1053 1054 return ret; 1055 } 1056 1057 static void omap2_mcspi_cleanup(struct spi_device *spi) 1058 { 1059 struct omap2_mcspi *mcspi; 1060 struct omap2_mcspi_dma *mcspi_dma; 1061 struct omap2_mcspi_cs *cs; 1062 1063 mcspi = spi_master_get_devdata(spi->master); 1064 1065 if (spi->controller_state) { 1066 /* Unlink controller state from context save list */ 1067 cs = spi->controller_state; 1068 list_del(&cs->node); 1069 1070 kfree(cs); 1071 } 1072 1073 if (spi->chip_select < spi->master->num_chipselect) { 1074 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 1075 1076 if (mcspi_dma->dma_rx) { 1077 dma_release_channel(mcspi_dma->dma_rx); 1078 mcspi_dma->dma_rx = NULL; 1079 } 1080 if (mcspi_dma->dma_tx) { 1081 dma_release_channel(mcspi_dma->dma_tx); 1082 mcspi_dma->dma_tx = NULL; 1083 } 1084 } 1085 1086 if (gpio_is_valid(spi->cs_gpio)) 1087 gpio_free(spi->cs_gpio); 1088 } 1089 1090 static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi, 1091 struct spi_device *spi, struct spi_transfer *t) 1092 { 1093 1094 /* We only enable one channel at a time -- the one whose message is 1095 * -- although this controller would gladly 1096 * arbitrate among multiple channels. This corresponds to "single 1097 * channel" master mode. As a side effect, we need to manage the 1098 * chipselect with the FORCE bit ... CS != channel enable. 1099 */ 1100 1101 struct spi_master *master; 1102 struct omap2_mcspi_dma *mcspi_dma; 1103 struct omap2_mcspi_cs *cs; 1104 struct omap2_mcspi_device_config *cd; 1105 int par_override = 0; 1106 int status = 0; 1107 u32 chconf; 1108 1109 master = spi->master; 1110 mcspi_dma = mcspi->dma_channels + spi->chip_select; 1111 cs = spi->controller_state; 1112 cd = spi->controller_data; 1113 1114 /* 1115 * The slave driver could have changed spi->mode in which case 1116 * it will be different from cs->mode (the current hardware setup). 1117 * If so, set par_override (even though its not a parity issue) so 1118 * omap2_mcspi_setup_transfer will be called to configure the hardware 1119 * with the correct mode on the first iteration of the loop below. 1120 */ 1121 if (spi->mode != cs->mode) 1122 par_override = 1; 1123 1124 omap2_mcspi_set_enable(spi, 0); 1125 1126 if (gpio_is_valid(spi->cs_gpio)) 1127 omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH); 1128 1129 if (par_override || 1130 (t->speed_hz != spi->max_speed_hz) || 1131 (t->bits_per_word != spi->bits_per_word)) { 1132 par_override = 1; 1133 status = omap2_mcspi_setup_transfer(spi, t); 1134 if (status < 0) 1135 goto out; 1136 if (t->speed_hz == spi->max_speed_hz && 1137 t->bits_per_word == spi->bits_per_word) 1138 par_override = 0; 1139 } 1140 if (cd && cd->cs_per_word) { 1141 chconf = mcspi->ctx.modulctrl; 1142 chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE; 1143 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf); 1144 mcspi->ctx.modulctrl = 1145 mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL); 1146 } 1147 1148 chconf = mcspi_cached_chconf0(spi); 1149 chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; 1150 chconf &= ~OMAP2_MCSPI_CHCONF_TURBO; 1151 1152 if (t->tx_buf == NULL) 1153 chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY; 1154 else if (t->rx_buf == NULL) 1155 chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY; 1156 1157 if (cd && cd->turbo_mode && t->tx_buf == NULL) { 1158 /* Turbo mode is for more than one word */ 1159 if (t->len > ((cs->word_len + 7) >> 3)) 1160 chconf |= OMAP2_MCSPI_CHCONF_TURBO; 1161 } 1162 1163 mcspi_write_chconf0(spi, chconf); 1164 1165 if (t->len) { 1166 unsigned count; 1167 1168 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && 1169 (t->len >= DMA_MIN_BYTES)) 1170 omap2_mcspi_set_fifo(spi, t, 1); 1171 1172 omap2_mcspi_set_enable(spi, 1); 1173 1174 /* RX_ONLY mode needs dummy data in TX reg */ 1175 if (t->tx_buf == NULL) 1176 writel_relaxed(0, cs->base 1177 + OMAP2_MCSPI_TX0); 1178 1179 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && 1180 (t->len >= DMA_MIN_BYTES)) 1181 count = omap2_mcspi_txrx_dma(spi, t); 1182 else 1183 count = omap2_mcspi_txrx_pio(spi, t); 1184 1185 if (count != t->len) { 1186 status = -EIO; 1187 goto out; 1188 } 1189 } 1190 1191 omap2_mcspi_set_enable(spi, 0); 1192 1193 if (mcspi->fifo_depth > 0) 1194 omap2_mcspi_set_fifo(spi, t, 0); 1195 1196 out: 1197 /* Restore defaults if they were overriden */ 1198 if (par_override) { 1199 par_override = 0; 1200 status = omap2_mcspi_setup_transfer(spi, NULL); 1201 } 1202 1203 if (cd && cd->cs_per_word) { 1204 chconf = mcspi->ctx.modulctrl; 1205 chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE; 1206 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf); 1207 mcspi->ctx.modulctrl = 1208 mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL); 1209 } 1210 1211 omap2_mcspi_set_enable(spi, 0); 1212 1213 if (gpio_is_valid(spi->cs_gpio)) 1214 omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH)); 1215 1216 if (mcspi->fifo_depth > 0 && t) 1217 omap2_mcspi_set_fifo(spi, t, 0); 1218 1219 return status; 1220 } 1221 1222 static int omap2_mcspi_prepare_message(struct spi_master *master, 1223 struct spi_message *msg) 1224 { 1225 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1226 struct omap2_mcspi_regs *ctx = &mcspi->ctx; 1227 struct omap2_mcspi_cs *cs; 1228 1229 /* Only a single channel can have the FORCE bit enabled 1230 * in its chconf0 register. 1231 * Scan all channels and disable them except the current one. 1232 * A FORCE can remain from a last transfer having cs_change enabled 1233 */ 1234 list_for_each_entry(cs, &ctx->cs, node) { 1235 if (msg->spi->controller_state == cs) 1236 continue; 1237 1238 if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE)) { 1239 cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE; 1240 writel_relaxed(cs->chconf0, 1241 cs->base + OMAP2_MCSPI_CHCONF0); 1242 readl_relaxed(cs->base + OMAP2_MCSPI_CHCONF0); 1243 } 1244 } 1245 1246 return 0; 1247 } 1248 1249 static int omap2_mcspi_transfer_one(struct spi_master *master, 1250 struct spi_device *spi, struct spi_transfer *t) 1251 { 1252 struct omap2_mcspi *mcspi; 1253 struct omap2_mcspi_dma *mcspi_dma; 1254 const void *tx_buf = t->tx_buf; 1255 void *rx_buf = t->rx_buf; 1256 unsigned len = t->len; 1257 1258 mcspi = spi_master_get_devdata(master); 1259 mcspi_dma = mcspi->dma_channels + spi->chip_select; 1260 1261 if ((len && !(rx_buf || tx_buf))) { 1262 dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n", 1263 t->speed_hz, 1264 len, 1265 tx_buf ? "tx" : "", 1266 rx_buf ? "rx" : "", 1267 t->bits_per_word); 1268 return -EINVAL; 1269 } 1270 1271 if (len < DMA_MIN_BYTES) 1272 goto skip_dma_map; 1273 1274 if (mcspi_dma->dma_tx && tx_buf != NULL) { 1275 t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf, 1276 len, DMA_TO_DEVICE); 1277 if (dma_mapping_error(mcspi->dev, t->tx_dma)) { 1278 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", 1279 'T', len); 1280 return -EINVAL; 1281 } 1282 } 1283 if (mcspi_dma->dma_rx && rx_buf != NULL) { 1284 t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len, 1285 DMA_FROM_DEVICE); 1286 if (dma_mapping_error(mcspi->dev, t->rx_dma)) { 1287 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", 1288 'R', len); 1289 if (tx_buf != NULL) 1290 dma_unmap_single(mcspi->dev, t->tx_dma, 1291 len, DMA_TO_DEVICE); 1292 return -EINVAL; 1293 } 1294 } 1295 1296 skip_dma_map: 1297 return omap2_mcspi_work_one(mcspi, spi, t); 1298 } 1299 1300 static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) 1301 { 1302 struct spi_master *master = mcspi->master; 1303 struct omap2_mcspi_regs *ctx = &mcspi->ctx; 1304 int ret = 0; 1305 1306 ret = pm_runtime_get_sync(mcspi->dev); 1307 if (ret < 0) 1308 return ret; 1309 1310 mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, 1311 OMAP2_MCSPI_WAKEUPENABLE_WKEN); 1312 ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN; 1313 1314 omap2_mcspi_set_master_mode(master); 1315 pm_runtime_mark_last_busy(mcspi->dev); 1316 pm_runtime_put_autosuspend(mcspi->dev); 1317 return 0; 1318 } 1319 1320 static int omap_mcspi_runtime_resume(struct device *dev) 1321 { 1322 struct omap2_mcspi *mcspi; 1323 struct spi_master *master; 1324 1325 master = dev_get_drvdata(dev); 1326 mcspi = spi_master_get_devdata(master); 1327 omap2_mcspi_restore_ctx(mcspi); 1328 1329 return 0; 1330 } 1331 1332 static struct omap2_mcspi_platform_config omap2_pdata = { 1333 .regs_offset = 0, 1334 }; 1335 1336 static struct omap2_mcspi_platform_config omap4_pdata = { 1337 .regs_offset = OMAP4_MCSPI_REG_OFFSET, 1338 }; 1339 1340 static const struct of_device_id omap_mcspi_of_match[] = { 1341 { 1342 .compatible = "ti,omap2-mcspi", 1343 .data = &omap2_pdata, 1344 }, 1345 { 1346 .compatible = "ti,omap4-mcspi", 1347 .data = &omap4_pdata, 1348 }, 1349 { }, 1350 }; 1351 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match); 1352 1353 static int omap2_mcspi_probe(struct platform_device *pdev) 1354 { 1355 struct spi_master *master; 1356 const struct omap2_mcspi_platform_config *pdata; 1357 struct omap2_mcspi *mcspi; 1358 struct resource *r; 1359 int status = 0, i; 1360 u32 regs_offset = 0; 1361 static int bus_num = 1; 1362 struct device_node *node = pdev->dev.of_node; 1363 const struct of_device_id *match; 1364 1365 master = spi_alloc_master(&pdev->dev, sizeof *mcspi); 1366 if (master == NULL) { 1367 dev_dbg(&pdev->dev, "master allocation failed\n"); 1368 return -ENOMEM; 1369 } 1370 1371 /* the spi->mode bits understood by this driver: */ 1372 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1373 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 1374 master->setup = omap2_mcspi_setup; 1375 master->auto_runtime_pm = true; 1376 master->prepare_message = omap2_mcspi_prepare_message; 1377 master->transfer_one = omap2_mcspi_transfer_one; 1378 master->set_cs = omap2_mcspi_set_cs; 1379 master->cleanup = omap2_mcspi_cleanup; 1380 master->dev.of_node = node; 1381 master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ; 1382 master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15; 1383 1384 platform_set_drvdata(pdev, master); 1385 1386 mcspi = spi_master_get_devdata(master); 1387 mcspi->master = master; 1388 1389 match = of_match_device(omap_mcspi_of_match, &pdev->dev); 1390 if (match) { 1391 u32 num_cs = 1; /* default number of chipselect */ 1392 pdata = match->data; 1393 1394 of_property_read_u32(node, "ti,spi-num-cs", &num_cs); 1395 master->num_chipselect = num_cs; 1396 master->bus_num = bus_num++; 1397 if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL)) 1398 mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN; 1399 } else { 1400 pdata = dev_get_platdata(&pdev->dev); 1401 master->num_chipselect = pdata->num_cs; 1402 if (pdev->id != -1) 1403 master->bus_num = pdev->id; 1404 mcspi->pin_dir = pdata->pin_dir; 1405 } 1406 regs_offset = pdata->regs_offset; 1407 1408 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1409 if (r == NULL) { 1410 status = -ENODEV; 1411 goto free_master; 1412 } 1413 1414 r->start += regs_offset; 1415 r->end += regs_offset; 1416 mcspi->phys = r->start; 1417 1418 mcspi->base = devm_ioremap_resource(&pdev->dev, r); 1419 if (IS_ERR(mcspi->base)) { 1420 status = PTR_ERR(mcspi->base); 1421 goto free_master; 1422 } 1423 1424 mcspi->dev = &pdev->dev; 1425 1426 INIT_LIST_HEAD(&mcspi->ctx.cs); 1427 1428 mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect, 1429 sizeof(struct omap2_mcspi_dma), 1430 GFP_KERNEL); 1431 if (mcspi->dma_channels == NULL) { 1432 status = -ENOMEM; 1433 goto free_master; 1434 } 1435 1436 for (i = 0; i < master->num_chipselect; i++) { 1437 char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name; 1438 char *dma_tx_ch_name = mcspi->dma_channels[i].dma_tx_ch_name; 1439 struct resource *dma_res; 1440 1441 sprintf(dma_rx_ch_name, "rx%d", i); 1442 if (!pdev->dev.of_node) { 1443 dma_res = 1444 platform_get_resource_byname(pdev, 1445 IORESOURCE_DMA, 1446 dma_rx_ch_name); 1447 if (!dma_res) { 1448 dev_dbg(&pdev->dev, 1449 "cannot get DMA RX channel\n"); 1450 status = -ENODEV; 1451 break; 1452 } 1453 1454 mcspi->dma_channels[i].dma_rx_sync_dev = 1455 dma_res->start; 1456 } 1457 sprintf(dma_tx_ch_name, "tx%d", i); 1458 if (!pdev->dev.of_node) { 1459 dma_res = 1460 platform_get_resource_byname(pdev, 1461 IORESOURCE_DMA, 1462 dma_tx_ch_name); 1463 if (!dma_res) { 1464 dev_dbg(&pdev->dev, 1465 "cannot get DMA TX channel\n"); 1466 status = -ENODEV; 1467 break; 1468 } 1469 1470 mcspi->dma_channels[i].dma_tx_sync_dev = 1471 dma_res->start; 1472 } 1473 } 1474 1475 if (status < 0) 1476 goto free_master; 1477 1478 pm_runtime_use_autosuspend(&pdev->dev); 1479 pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); 1480 pm_runtime_enable(&pdev->dev); 1481 1482 status = omap2_mcspi_master_setup(mcspi); 1483 if (status < 0) 1484 goto disable_pm; 1485 1486 status = devm_spi_register_master(&pdev->dev, master); 1487 if (status < 0) 1488 goto disable_pm; 1489 1490 return status; 1491 1492 disable_pm: 1493 pm_runtime_disable(&pdev->dev); 1494 free_master: 1495 spi_master_put(master); 1496 return status; 1497 } 1498 1499 static int omap2_mcspi_remove(struct platform_device *pdev) 1500 { 1501 struct spi_master *master = platform_get_drvdata(pdev); 1502 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1503 1504 pm_runtime_put_sync(mcspi->dev); 1505 pm_runtime_disable(&pdev->dev); 1506 1507 return 0; 1508 } 1509 1510 /* work with hotplug and coldplug */ 1511 MODULE_ALIAS("platform:omap2_mcspi"); 1512 1513 #ifdef CONFIG_SUSPEND 1514 /* 1515 * When SPI wake up from off-mode, CS is in activate state. If it was in 1516 * unactive state when driver was suspend, then force it to unactive state at 1517 * wake up. 1518 */ 1519 static int omap2_mcspi_resume(struct device *dev) 1520 { 1521 struct spi_master *master = dev_get_drvdata(dev); 1522 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1523 struct omap2_mcspi_regs *ctx = &mcspi->ctx; 1524 struct omap2_mcspi_cs *cs; 1525 1526 pm_runtime_get_sync(mcspi->dev); 1527 list_for_each_entry(cs, &ctx->cs, node) { 1528 if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) { 1529 /* 1530 * We need to toggle CS state for OMAP take this 1531 * change in account. 1532 */ 1533 cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE; 1534 writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); 1535 cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE; 1536 writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); 1537 } 1538 } 1539 pm_runtime_mark_last_busy(mcspi->dev); 1540 pm_runtime_put_autosuspend(mcspi->dev); 1541 1542 return pinctrl_pm_select_default_state(dev); 1543 } 1544 1545 static int omap2_mcspi_suspend(struct device *dev) 1546 { 1547 return pinctrl_pm_select_sleep_state(dev); 1548 } 1549 1550 #else 1551 #define omap2_mcspi_suspend NULL 1552 #define omap2_mcspi_resume NULL 1553 #endif 1554 1555 static const struct dev_pm_ops omap2_mcspi_pm_ops = { 1556 .resume = omap2_mcspi_resume, 1557 .suspend = omap2_mcspi_suspend, 1558 .runtime_resume = omap_mcspi_runtime_resume, 1559 }; 1560 1561 static struct platform_driver omap2_mcspi_driver = { 1562 .driver = { 1563 .name = "omap2_mcspi", 1564 .pm = &omap2_mcspi_pm_ops, 1565 .of_match_table = omap_mcspi_of_match, 1566 }, 1567 .probe = omap2_mcspi_probe, 1568 .remove = omap2_mcspi_remove, 1569 }; 1570 1571 module_platform_driver(omap2_mcspi_driver); 1572 MODULE_LICENSE("GPL"); 1573