1 /* 2 * OMAP2 McSPI controller driver 3 * 4 * Copyright (C) 2005, 2006 Nokia Corporation 5 * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and 6 * Juha Yrj�l� <juha.yrjola@nokia.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 */ 23 24 #include <linux/kernel.h> 25 #include <linux/interrupt.h> 26 #include <linux/module.h> 27 #include <linux/device.h> 28 #include <linux/delay.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/dmaengine.h> 31 #include <linux/omap-dma.h> 32 #include <linux/platform_device.h> 33 #include <linux/err.h> 34 #include <linux/clk.h> 35 #include <linux/io.h> 36 #include <linux/slab.h> 37 #include <linux/pm_runtime.h> 38 #include <linux/of.h> 39 #include <linux/of_device.h> 40 #include <linux/gcd.h> 41 42 #include <linux/spi/spi.h> 43 44 #include <linux/platform_data/spi-omap2-mcspi.h> 45 46 #define OMAP2_MCSPI_MAX_FREQ 48000000 47 #define OMAP2_MCSPI_MAX_DIVIDER 4096 48 #define OMAP2_MCSPI_MAX_FIFODEPTH 64 49 #define OMAP2_MCSPI_MAX_FIFOWCNT 0xFFFF 50 #define SPI_AUTOSUSPEND_TIMEOUT 2000 51 52 #define OMAP2_MCSPI_REVISION 0x00 53 #define OMAP2_MCSPI_SYSSTATUS 0x14 54 #define OMAP2_MCSPI_IRQSTATUS 0x18 55 #define OMAP2_MCSPI_IRQENABLE 0x1c 56 #define OMAP2_MCSPI_WAKEUPENABLE 0x20 57 #define OMAP2_MCSPI_SYST 0x24 58 #define OMAP2_MCSPI_MODULCTRL 0x28 59 #define OMAP2_MCSPI_XFERLEVEL 0x7c 60 61 /* per-channel banks, 0x14 bytes each, first is: */ 62 #define OMAP2_MCSPI_CHCONF0 0x2c 63 #define OMAP2_MCSPI_CHSTAT0 0x30 64 #define OMAP2_MCSPI_CHCTRL0 0x34 65 #define OMAP2_MCSPI_TX0 0x38 66 #define OMAP2_MCSPI_RX0 0x3c 67 68 /* per-register bitmasks: */ 69 #define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17) 70 71 #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) 72 #define OMAP2_MCSPI_MODULCTRL_MS BIT(2) 73 #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3) 74 75 #define OMAP2_MCSPI_CHCONF_PHA BIT(0) 76 #define OMAP2_MCSPI_CHCONF_POL BIT(1) 77 #define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2) 78 #define OMAP2_MCSPI_CHCONF_EPOL BIT(6) 79 #define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7) 80 #define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12) 81 #define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13) 82 #define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12) 83 #define OMAP2_MCSPI_CHCONF_DMAW BIT(14) 84 #define OMAP2_MCSPI_CHCONF_DMAR BIT(15) 85 #define OMAP2_MCSPI_CHCONF_DPE0 BIT(16) 86 #define OMAP2_MCSPI_CHCONF_DPE1 BIT(17) 87 #define OMAP2_MCSPI_CHCONF_IS BIT(18) 88 #define OMAP2_MCSPI_CHCONF_TURBO BIT(19) 89 #define OMAP2_MCSPI_CHCONF_FORCE BIT(20) 90 #define OMAP2_MCSPI_CHCONF_FFET BIT(27) 91 #define OMAP2_MCSPI_CHCONF_FFER BIT(28) 92 #define OMAP2_MCSPI_CHCONF_CLKG BIT(29) 93 94 #define OMAP2_MCSPI_CHSTAT_RXS BIT(0) 95 #define OMAP2_MCSPI_CHSTAT_TXS BIT(1) 96 #define OMAP2_MCSPI_CHSTAT_EOT BIT(2) 97 #define OMAP2_MCSPI_CHSTAT_TXFFE BIT(3) 98 99 #define OMAP2_MCSPI_CHCTRL_EN BIT(0) 100 #define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK (0xff << 8) 101 102 #define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0) 103 104 /* We have 2 DMA channels per CS, one for RX and one for TX */ 105 struct omap2_mcspi_dma { 106 struct dma_chan *dma_tx; 107 struct dma_chan *dma_rx; 108 109 int dma_tx_sync_dev; 110 int dma_rx_sync_dev; 111 112 struct completion dma_tx_completion; 113 struct completion dma_rx_completion; 114 115 char dma_rx_ch_name[14]; 116 char dma_tx_ch_name[14]; 117 }; 118 119 /* use PIO for small transfers, avoiding DMA setup/teardown overhead and 120 * cache operations; better heuristics consider wordsize and bitrate. 121 */ 122 #define DMA_MIN_BYTES 160 123 124 125 /* 126 * Used for context save and restore, structure members to be updated whenever 127 * corresponding registers are modified. 128 */ 129 struct omap2_mcspi_regs { 130 u32 modulctrl; 131 u32 wakeupenable; 132 struct list_head cs; 133 }; 134 135 struct omap2_mcspi { 136 struct spi_master *master; 137 /* Virtual base address of the controller */ 138 void __iomem *base; 139 unsigned long phys; 140 /* SPI1 has 4 channels, while SPI2 has 2 */ 141 struct omap2_mcspi_dma *dma_channels; 142 struct device *dev; 143 struct omap2_mcspi_regs ctx; 144 int fifo_depth; 145 unsigned int pin_dir:1; 146 }; 147 148 struct omap2_mcspi_cs { 149 void __iomem *base; 150 unsigned long phys; 151 int word_len; 152 u16 mode; 153 struct list_head node; 154 /* Context save and restore shadow register */ 155 u32 chconf0, chctrl0; 156 }; 157 158 static inline void mcspi_write_reg(struct spi_master *master, 159 int idx, u32 val) 160 { 161 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 162 163 writel_relaxed(val, mcspi->base + idx); 164 } 165 166 static inline u32 mcspi_read_reg(struct spi_master *master, int idx) 167 { 168 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 169 170 return readl_relaxed(mcspi->base + idx); 171 } 172 173 static inline void mcspi_write_cs_reg(const struct spi_device *spi, 174 int idx, u32 val) 175 { 176 struct omap2_mcspi_cs *cs = spi->controller_state; 177 178 writel_relaxed(val, cs->base + idx); 179 } 180 181 static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx) 182 { 183 struct omap2_mcspi_cs *cs = spi->controller_state; 184 185 return readl_relaxed(cs->base + idx); 186 } 187 188 static inline u32 mcspi_cached_chconf0(const struct spi_device *spi) 189 { 190 struct omap2_mcspi_cs *cs = spi->controller_state; 191 192 return cs->chconf0; 193 } 194 195 static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val) 196 { 197 struct omap2_mcspi_cs *cs = spi->controller_state; 198 199 cs->chconf0 = val; 200 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val); 201 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0); 202 } 203 204 static inline int mcspi_bytes_per_word(int word_len) 205 { 206 if (word_len <= 8) 207 return 1; 208 else if (word_len <= 16) 209 return 2; 210 else /* word_len <= 32 */ 211 return 4; 212 } 213 214 static void omap2_mcspi_set_dma_req(const struct spi_device *spi, 215 int is_read, int enable) 216 { 217 u32 l, rw; 218 219 l = mcspi_cached_chconf0(spi); 220 221 if (is_read) /* 1 is read, 0 write */ 222 rw = OMAP2_MCSPI_CHCONF_DMAR; 223 else 224 rw = OMAP2_MCSPI_CHCONF_DMAW; 225 226 if (enable) 227 l |= rw; 228 else 229 l &= ~rw; 230 231 mcspi_write_chconf0(spi, l); 232 } 233 234 static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable) 235 { 236 struct omap2_mcspi_cs *cs = spi->controller_state; 237 u32 l; 238 239 l = cs->chctrl0; 240 if (enable) 241 l |= OMAP2_MCSPI_CHCTRL_EN; 242 else 243 l &= ~OMAP2_MCSPI_CHCTRL_EN; 244 cs->chctrl0 = l; 245 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0); 246 /* Flash post-writes */ 247 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0); 248 } 249 250 static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active) 251 { 252 u32 l; 253 254 l = mcspi_cached_chconf0(spi); 255 if (cs_active) 256 l |= OMAP2_MCSPI_CHCONF_FORCE; 257 else 258 l &= ~OMAP2_MCSPI_CHCONF_FORCE; 259 260 mcspi_write_chconf0(spi, l); 261 } 262 263 static void omap2_mcspi_set_master_mode(struct spi_master *master) 264 { 265 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 266 struct omap2_mcspi_regs *ctx = &mcspi->ctx; 267 u32 l; 268 269 /* 270 * Setup when switching from (reset default) slave mode 271 * to single-channel master mode 272 */ 273 l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL); 274 l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS); 275 l |= OMAP2_MCSPI_MODULCTRL_SINGLE; 276 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l); 277 278 ctx->modulctrl = l; 279 } 280 281 static void omap2_mcspi_set_fifo(const struct spi_device *spi, 282 struct spi_transfer *t, int enable) 283 { 284 struct spi_master *master = spi->master; 285 struct omap2_mcspi_cs *cs = spi->controller_state; 286 struct omap2_mcspi *mcspi; 287 unsigned int wcnt; 288 int max_fifo_depth, fifo_depth, bytes_per_word; 289 u32 chconf, xferlevel; 290 291 mcspi = spi_master_get_devdata(master); 292 293 chconf = mcspi_cached_chconf0(spi); 294 if (enable) { 295 bytes_per_word = mcspi_bytes_per_word(cs->word_len); 296 if (t->len % bytes_per_word != 0) 297 goto disable_fifo; 298 299 if (t->rx_buf != NULL && t->tx_buf != NULL) 300 max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2; 301 else 302 max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH; 303 304 fifo_depth = gcd(t->len, max_fifo_depth); 305 if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0) 306 goto disable_fifo; 307 308 wcnt = t->len / bytes_per_word; 309 if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT) 310 goto disable_fifo; 311 312 xferlevel = wcnt << 16; 313 if (t->rx_buf != NULL) { 314 chconf |= OMAP2_MCSPI_CHCONF_FFER; 315 xferlevel |= (fifo_depth - 1) << 8; 316 } 317 if (t->tx_buf != NULL) { 318 chconf |= OMAP2_MCSPI_CHCONF_FFET; 319 xferlevel |= fifo_depth - 1; 320 } 321 322 mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel); 323 mcspi_write_chconf0(spi, chconf); 324 mcspi->fifo_depth = fifo_depth; 325 326 return; 327 } 328 329 disable_fifo: 330 if (t->rx_buf != NULL) 331 chconf &= ~OMAP2_MCSPI_CHCONF_FFER; 332 333 if (t->tx_buf != NULL) 334 chconf &= ~OMAP2_MCSPI_CHCONF_FFET; 335 336 mcspi_write_chconf0(spi, chconf); 337 mcspi->fifo_depth = 0; 338 } 339 340 static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) 341 { 342 struct spi_master *spi_cntrl = mcspi->master; 343 struct omap2_mcspi_regs *ctx = &mcspi->ctx; 344 struct omap2_mcspi_cs *cs; 345 346 /* McSPI: context restore */ 347 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl); 348 mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable); 349 350 list_for_each_entry(cs, &ctx->cs, node) 351 writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); 352 } 353 354 static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) 355 { 356 unsigned long timeout; 357 358 timeout = jiffies + msecs_to_jiffies(1000); 359 while (!(readl_relaxed(reg) & bit)) { 360 if (time_after(jiffies, timeout)) { 361 if (!(readl_relaxed(reg) & bit)) 362 return -ETIMEDOUT; 363 else 364 return 0; 365 } 366 cpu_relax(); 367 } 368 return 0; 369 } 370 371 static void omap2_mcspi_rx_callback(void *data) 372 { 373 struct spi_device *spi = data; 374 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); 375 struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 376 377 /* We must disable the DMA RX request */ 378 omap2_mcspi_set_dma_req(spi, 1, 0); 379 380 complete(&mcspi_dma->dma_rx_completion); 381 } 382 383 static void omap2_mcspi_tx_callback(void *data) 384 { 385 struct spi_device *spi = data; 386 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); 387 struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 388 389 /* We must disable the DMA TX request */ 390 omap2_mcspi_set_dma_req(spi, 0, 0); 391 392 complete(&mcspi_dma->dma_tx_completion); 393 } 394 395 static void omap2_mcspi_tx_dma(struct spi_device *spi, 396 struct spi_transfer *xfer, 397 struct dma_slave_config cfg) 398 { 399 struct omap2_mcspi *mcspi; 400 struct omap2_mcspi_dma *mcspi_dma; 401 unsigned int count; 402 403 mcspi = spi_master_get_devdata(spi->master); 404 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 405 count = xfer->len; 406 407 if (mcspi_dma->dma_tx) { 408 struct dma_async_tx_descriptor *tx; 409 struct scatterlist sg; 410 411 dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); 412 413 sg_init_table(&sg, 1); 414 sg_dma_address(&sg) = xfer->tx_dma; 415 sg_dma_len(&sg) = xfer->len; 416 417 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1, 418 DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 419 if (tx) { 420 tx->callback = omap2_mcspi_tx_callback; 421 tx->callback_param = spi; 422 dmaengine_submit(tx); 423 } else { 424 /* FIXME: fall back to PIO? */ 425 } 426 } 427 dma_async_issue_pending(mcspi_dma->dma_tx); 428 omap2_mcspi_set_dma_req(spi, 0, 1); 429 430 } 431 432 static unsigned 433 omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, 434 struct dma_slave_config cfg, 435 unsigned es) 436 { 437 struct omap2_mcspi *mcspi; 438 struct omap2_mcspi_dma *mcspi_dma; 439 unsigned int count, dma_count; 440 u32 l; 441 int elements = 0; 442 int word_len, element_count; 443 struct omap2_mcspi_cs *cs = spi->controller_state; 444 mcspi = spi_master_get_devdata(spi->master); 445 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 446 count = xfer->len; 447 dma_count = xfer->len; 448 449 if (mcspi->fifo_depth == 0) 450 dma_count -= es; 451 452 word_len = cs->word_len; 453 l = mcspi_cached_chconf0(spi); 454 455 if (word_len <= 8) 456 element_count = count; 457 else if (word_len <= 16) 458 element_count = count >> 1; 459 else /* word_len <= 32 */ 460 element_count = count >> 2; 461 462 if (mcspi_dma->dma_rx) { 463 struct dma_async_tx_descriptor *tx; 464 struct scatterlist sg; 465 466 dmaengine_slave_config(mcspi_dma->dma_rx, &cfg); 467 468 if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0) 469 dma_count -= es; 470 471 sg_init_table(&sg, 1); 472 sg_dma_address(&sg) = xfer->rx_dma; 473 sg_dma_len(&sg) = dma_count; 474 475 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1, 476 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | 477 DMA_CTRL_ACK); 478 if (tx) { 479 tx->callback = omap2_mcspi_rx_callback; 480 tx->callback_param = spi; 481 dmaengine_submit(tx); 482 } else { 483 /* FIXME: fall back to PIO? */ 484 } 485 } 486 487 dma_async_issue_pending(mcspi_dma->dma_rx); 488 omap2_mcspi_set_dma_req(spi, 1, 1); 489 490 wait_for_completion(&mcspi_dma->dma_rx_completion); 491 dma_unmap_single(mcspi->dev, xfer->rx_dma, count, 492 DMA_FROM_DEVICE); 493 494 if (mcspi->fifo_depth > 0) 495 return count; 496 497 omap2_mcspi_set_enable(spi, 0); 498 499 elements = element_count - 1; 500 501 if (l & OMAP2_MCSPI_CHCONF_TURBO) { 502 elements--; 503 504 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) 505 & OMAP2_MCSPI_CHSTAT_RXS)) { 506 u32 w; 507 508 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); 509 if (word_len <= 8) 510 ((u8 *)xfer->rx_buf)[elements++] = w; 511 else if (word_len <= 16) 512 ((u16 *)xfer->rx_buf)[elements++] = w; 513 else /* word_len <= 32 */ 514 ((u32 *)xfer->rx_buf)[elements++] = w; 515 } else { 516 int bytes_per_word = mcspi_bytes_per_word(word_len); 517 dev_err(&spi->dev, "DMA RX penultimate word empty\n"); 518 count -= (bytes_per_word << 1); 519 omap2_mcspi_set_enable(spi, 1); 520 return count; 521 } 522 } 523 if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) 524 & OMAP2_MCSPI_CHSTAT_RXS)) { 525 u32 w; 526 527 w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); 528 if (word_len <= 8) 529 ((u8 *)xfer->rx_buf)[elements] = w; 530 else if (word_len <= 16) 531 ((u16 *)xfer->rx_buf)[elements] = w; 532 else /* word_len <= 32 */ 533 ((u32 *)xfer->rx_buf)[elements] = w; 534 } else { 535 dev_err(&spi->dev, "DMA RX last word empty\n"); 536 count -= mcspi_bytes_per_word(word_len); 537 } 538 omap2_mcspi_set_enable(spi, 1); 539 return count; 540 } 541 542 static unsigned 543 omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) 544 { 545 struct omap2_mcspi *mcspi; 546 struct omap2_mcspi_cs *cs = spi->controller_state; 547 struct omap2_mcspi_dma *mcspi_dma; 548 unsigned int count; 549 u32 l; 550 u8 *rx; 551 const u8 *tx; 552 struct dma_slave_config cfg; 553 enum dma_slave_buswidth width; 554 unsigned es; 555 u32 burst; 556 void __iomem *chstat_reg; 557 void __iomem *irqstat_reg; 558 int wait_res; 559 560 mcspi = spi_master_get_devdata(spi->master); 561 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 562 l = mcspi_cached_chconf0(spi); 563 564 565 if (cs->word_len <= 8) { 566 width = DMA_SLAVE_BUSWIDTH_1_BYTE; 567 es = 1; 568 } else if (cs->word_len <= 16) { 569 width = DMA_SLAVE_BUSWIDTH_2_BYTES; 570 es = 2; 571 } else { 572 width = DMA_SLAVE_BUSWIDTH_4_BYTES; 573 es = 4; 574 } 575 576 count = xfer->len; 577 burst = 1; 578 579 if (mcspi->fifo_depth > 0) { 580 if (count > mcspi->fifo_depth) 581 burst = mcspi->fifo_depth / es; 582 else 583 burst = count / es; 584 } 585 586 memset(&cfg, 0, sizeof(cfg)); 587 cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0; 588 cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0; 589 cfg.src_addr_width = width; 590 cfg.dst_addr_width = width; 591 cfg.src_maxburst = burst; 592 cfg.dst_maxburst = burst; 593 594 rx = xfer->rx_buf; 595 tx = xfer->tx_buf; 596 597 if (tx != NULL) 598 omap2_mcspi_tx_dma(spi, xfer, cfg); 599 600 if (rx != NULL) 601 count = omap2_mcspi_rx_dma(spi, xfer, cfg, es); 602 603 if (tx != NULL) { 604 wait_for_completion(&mcspi_dma->dma_tx_completion); 605 dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len, 606 DMA_TO_DEVICE); 607 608 if (mcspi->fifo_depth > 0) { 609 irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS; 610 611 if (mcspi_wait_for_reg_bit(irqstat_reg, 612 OMAP2_MCSPI_IRQSTATUS_EOW) < 0) 613 dev_err(&spi->dev, "EOW timed out\n"); 614 615 mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS, 616 OMAP2_MCSPI_IRQSTATUS_EOW); 617 } 618 619 /* for TX_ONLY mode, be sure all words have shifted out */ 620 if (rx == NULL) { 621 chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; 622 if (mcspi->fifo_depth > 0) { 623 wait_res = mcspi_wait_for_reg_bit(chstat_reg, 624 OMAP2_MCSPI_CHSTAT_TXFFE); 625 if (wait_res < 0) 626 dev_err(&spi->dev, "TXFFE timed out\n"); 627 } else { 628 wait_res = mcspi_wait_for_reg_bit(chstat_reg, 629 OMAP2_MCSPI_CHSTAT_TXS); 630 if (wait_res < 0) 631 dev_err(&spi->dev, "TXS timed out\n"); 632 } 633 if (wait_res >= 0 && 634 (mcspi_wait_for_reg_bit(chstat_reg, 635 OMAP2_MCSPI_CHSTAT_EOT) < 0)) 636 dev_err(&spi->dev, "EOT timed out\n"); 637 } 638 } 639 return count; 640 } 641 642 static unsigned 643 omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) 644 { 645 struct omap2_mcspi *mcspi; 646 struct omap2_mcspi_cs *cs = spi->controller_state; 647 unsigned int count, c; 648 u32 l; 649 void __iomem *base = cs->base; 650 void __iomem *tx_reg; 651 void __iomem *rx_reg; 652 void __iomem *chstat_reg; 653 int word_len; 654 655 mcspi = spi_master_get_devdata(spi->master); 656 count = xfer->len; 657 c = count; 658 word_len = cs->word_len; 659 660 l = mcspi_cached_chconf0(spi); 661 662 /* We store the pre-calculated register addresses on stack to speed 663 * up the transfer loop. */ 664 tx_reg = base + OMAP2_MCSPI_TX0; 665 rx_reg = base + OMAP2_MCSPI_RX0; 666 chstat_reg = base + OMAP2_MCSPI_CHSTAT0; 667 668 if (c < (word_len>>3)) 669 return 0; 670 671 if (word_len <= 8) { 672 u8 *rx; 673 const u8 *tx; 674 675 rx = xfer->rx_buf; 676 tx = xfer->tx_buf; 677 678 do { 679 c -= 1; 680 if (tx != NULL) { 681 if (mcspi_wait_for_reg_bit(chstat_reg, 682 OMAP2_MCSPI_CHSTAT_TXS) < 0) { 683 dev_err(&spi->dev, "TXS timed out\n"); 684 goto out; 685 } 686 dev_vdbg(&spi->dev, "write-%d %02x\n", 687 word_len, *tx); 688 writel_relaxed(*tx++, tx_reg); 689 } 690 if (rx != NULL) { 691 if (mcspi_wait_for_reg_bit(chstat_reg, 692 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 693 dev_err(&spi->dev, "RXS timed out\n"); 694 goto out; 695 } 696 697 if (c == 1 && tx == NULL && 698 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 699 omap2_mcspi_set_enable(spi, 0); 700 *rx++ = readl_relaxed(rx_reg); 701 dev_vdbg(&spi->dev, "read-%d %02x\n", 702 word_len, *(rx - 1)); 703 if (mcspi_wait_for_reg_bit(chstat_reg, 704 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 705 dev_err(&spi->dev, 706 "RXS timed out\n"); 707 goto out; 708 } 709 c = 0; 710 } else if (c == 0 && tx == NULL) { 711 omap2_mcspi_set_enable(spi, 0); 712 } 713 714 *rx++ = readl_relaxed(rx_reg); 715 dev_vdbg(&spi->dev, "read-%d %02x\n", 716 word_len, *(rx - 1)); 717 } 718 } while (c); 719 } else if (word_len <= 16) { 720 u16 *rx; 721 const u16 *tx; 722 723 rx = xfer->rx_buf; 724 tx = xfer->tx_buf; 725 do { 726 c -= 2; 727 if (tx != NULL) { 728 if (mcspi_wait_for_reg_bit(chstat_reg, 729 OMAP2_MCSPI_CHSTAT_TXS) < 0) { 730 dev_err(&spi->dev, "TXS timed out\n"); 731 goto out; 732 } 733 dev_vdbg(&spi->dev, "write-%d %04x\n", 734 word_len, *tx); 735 writel_relaxed(*tx++, tx_reg); 736 } 737 if (rx != NULL) { 738 if (mcspi_wait_for_reg_bit(chstat_reg, 739 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 740 dev_err(&spi->dev, "RXS timed out\n"); 741 goto out; 742 } 743 744 if (c == 2 && tx == NULL && 745 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 746 omap2_mcspi_set_enable(spi, 0); 747 *rx++ = readl_relaxed(rx_reg); 748 dev_vdbg(&spi->dev, "read-%d %04x\n", 749 word_len, *(rx - 1)); 750 if (mcspi_wait_for_reg_bit(chstat_reg, 751 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 752 dev_err(&spi->dev, 753 "RXS timed out\n"); 754 goto out; 755 } 756 c = 0; 757 } else if (c == 0 && tx == NULL) { 758 omap2_mcspi_set_enable(spi, 0); 759 } 760 761 *rx++ = readl_relaxed(rx_reg); 762 dev_vdbg(&spi->dev, "read-%d %04x\n", 763 word_len, *(rx - 1)); 764 } 765 } while (c >= 2); 766 } else if (word_len <= 32) { 767 u32 *rx; 768 const u32 *tx; 769 770 rx = xfer->rx_buf; 771 tx = xfer->tx_buf; 772 do { 773 c -= 4; 774 if (tx != NULL) { 775 if (mcspi_wait_for_reg_bit(chstat_reg, 776 OMAP2_MCSPI_CHSTAT_TXS) < 0) { 777 dev_err(&spi->dev, "TXS timed out\n"); 778 goto out; 779 } 780 dev_vdbg(&spi->dev, "write-%d %08x\n", 781 word_len, *tx); 782 writel_relaxed(*tx++, tx_reg); 783 } 784 if (rx != NULL) { 785 if (mcspi_wait_for_reg_bit(chstat_reg, 786 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 787 dev_err(&spi->dev, "RXS timed out\n"); 788 goto out; 789 } 790 791 if (c == 4 && tx == NULL && 792 (l & OMAP2_MCSPI_CHCONF_TURBO)) { 793 omap2_mcspi_set_enable(spi, 0); 794 *rx++ = readl_relaxed(rx_reg); 795 dev_vdbg(&spi->dev, "read-%d %08x\n", 796 word_len, *(rx - 1)); 797 if (mcspi_wait_for_reg_bit(chstat_reg, 798 OMAP2_MCSPI_CHSTAT_RXS) < 0) { 799 dev_err(&spi->dev, 800 "RXS timed out\n"); 801 goto out; 802 } 803 c = 0; 804 } else if (c == 0 && tx == NULL) { 805 omap2_mcspi_set_enable(spi, 0); 806 } 807 808 *rx++ = readl_relaxed(rx_reg); 809 dev_vdbg(&spi->dev, "read-%d %08x\n", 810 word_len, *(rx - 1)); 811 } 812 } while (c >= 4); 813 } 814 815 /* for TX_ONLY mode, be sure all words have shifted out */ 816 if (xfer->rx_buf == NULL) { 817 if (mcspi_wait_for_reg_bit(chstat_reg, 818 OMAP2_MCSPI_CHSTAT_TXS) < 0) { 819 dev_err(&spi->dev, "TXS timed out\n"); 820 } else if (mcspi_wait_for_reg_bit(chstat_reg, 821 OMAP2_MCSPI_CHSTAT_EOT) < 0) 822 dev_err(&spi->dev, "EOT timed out\n"); 823 824 /* disable chan to purge rx datas received in TX_ONLY transfer, 825 * otherwise these rx datas will affect the direct following 826 * RX_ONLY transfer. 827 */ 828 omap2_mcspi_set_enable(spi, 0); 829 } 830 out: 831 omap2_mcspi_set_enable(spi, 1); 832 return count - c; 833 } 834 835 static u32 omap2_mcspi_calc_divisor(u32 speed_hz) 836 { 837 u32 div; 838 839 for (div = 0; div < 15; div++) 840 if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div)) 841 return div; 842 843 return 15; 844 } 845 846 /* called only when no transfer is active to this device */ 847 static int omap2_mcspi_setup_transfer(struct spi_device *spi, 848 struct spi_transfer *t) 849 { 850 struct omap2_mcspi_cs *cs = spi->controller_state; 851 struct omap2_mcspi *mcspi; 852 struct spi_master *spi_cntrl; 853 u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0; 854 u8 word_len = spi->bits_per_word; 855 u32 speed_hz = spi->max_speed_hz; 856 857 mcspi = spi_master_get_devdata(spi->master); 858 spi_cntrl = mcspi->master; 859 860 if (t != NULL && t->bits_per_word) 861 word_len = t->bits_per_word; 862 863 cs->word_len = word_len; 864 865 if (t && t->speed_hz) 866 speed_hz = t->speed_hz; 867 868 speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ); 869 if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) { 870 clkd = omap2_mcspi_calc_divisor(speed_hz); 871 speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd; 872 clkg = 0; 873 } else { 874 div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz; 875 speed_hz = OMAP2_MCSPI_MAX_FREQ / div; 876 clkd = (div - 1) & 0xf; 877 extclk = (div - 1) >> 4; 878 clkg = OMAP2_MCSPI_CHCONF_CLKG; 879 } 880 881 l = mcspi_cached_chconf0(spi); 882 883 /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS 884 * REVISIT: this controller could support SPI_3WIRE mode. 885 */ 886 if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) { 887 l &= ~OMAP2_MCSPI_CHCONF_IS; 888 l &= ~OMAP2_MCSPI_CHCONF_DPE1; 889 l |= OMAP2_MCSPI_CHCONF_DPE0; 890 } else { 891 l |= OMAP2_MCSPI_CHCONF_IS; 892 l |= OMAP2_MCSPI_CHCONF_DPE1; 893 l &= ~OMAP2_MCSPI_CHCONF_DPE0; 894 } 895 896 /* wordlength */ 897 l &= ~OMAP2_MCSPI_CHCONF_WL_MASK; 898 l |= (word_len - 1) << 7; 899 900 /* set chipselect polarity; manage with FORCE */ 901 if (!(spi->mode & SPI_CS_HIGH)) 902 l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */ 903 else 904 l &= ~OMAP2_MCSPI_CHCONF_EPOL; 905 906 /* set clock divisor */ 907 l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK; 908 l |= clkd << 2; 909 910 /* set clock granularity */ 911 l &= ~OMAP2_MCSPI_CHCONF_CLKG; 912 l |= clkg; 913 if (clkg) { 914 cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK; 915 cs->chctrl0 |= extclk << 8; 916 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0); 917 } 918 919 /* set SPI mode 0..3 */ 920 if (spi->mode & SPI_CPOL) 921 l |= OMAP2_MCSPI_CHCONF_POL; 922 else 923 l &= ~OMAP2_MCSPI_CHCONF_POL; 924 if (spi->mode & SPI_CPHA) 925 l |= OMAP2_MCSPI_CHCONF_PHA; 926 else 927 l &= ~OMAP2_MCSPI_CHCONF_PHA; 928 929 mcspi_write_chconf0(spi, l); 930 931 cs->mode = spi->mode; 932 933 dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n", 934 speed_hz, 935 (spi->mode & SPI_CPHA) ? "trailing" : "leading", 936 (spi->mode & SPI_CPOL) ? "inverted" : "normal"); 937 938 return 0; 939 } 940 941 /* 942 * Note that we currently allow DMA only if we get a channel 943 * for both rx and tx. Otherwise we'll do PIO for both rx and tx. 944 */ 945 static int omap2_mcspi_request_dma(struct spi_device *spi) 946 { 947 struct spi_master *master = spi->master; 948 struct omap2_mcspi *mcspi; 949 struct omap2_mcspi_dma *mcspi_dma; 950 dma_cap_mask_t mask; 951 unsigned sig; 952 953 mcspi = spi_master_get_devdata(master); 954 mcspi_dma = mcspi->dma_channels + spi->chip_select; 955 956 init_completion(&mcspi_dma->dma_rx_completion); 957 init_completion(&mcspi_dma->dma_tx_completion); 958 959 dma_cap_zero(mask); 960 dma_cap_set(DMA_SLAVE, mask); 961 sig = mcspi_dma->dma_rx_sync_dev; 962 963 mcspi_dma->dma_rx = 964 dma_request_slave_channel_compat(mask, omap_dma_filter_fn, 965 &sig, &master->dev, 966 mcspi_dma->dma_rx_ch_name); 967 if (!mcspi_dma->dma_rx) 968 goto no_dma; 969 970 sig = mcspi_dma->dma_tx_sync_dev; 971 mcspi_dma->dma_tx = 972 dma_request_slave_channel_compat(mask, omap_dma_filter_fn, 973 &sig, &master->dev, 974 mcspi_dma->dma_tx_ch_name); 975 976 if (!mcspi_dma->dma_tx) { 977 dma_release_channel(mcspi_dma->dma_rx); 978 mcspi_dma->dma_rx = NULL; 979 goto no_dma; 980 } 981 982 return 0; 983 984 no_dma: 985 dev_warn(&spi->dev, "not using DMA for McSPI\n"); 986 return -EAGAIN; 987 } 988 989 static int omap2_mcspi_setup(struct spi_device *spi) 990 { 991 int ret; 992 struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); 993 struct omap2_mcspi_regs *ctx = &mcspi->ctx; 994 struct omap2_mcspi_dma *mcspi_dma; 995 struct omap2_mcspi_cs *cs = spi->controller_state; 996 997 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 998 999 if (!cs) { 1000 cs = kzalloc(sizeof *cs, GFP_KERNEL); 1001 if (!cs) 1002 return -ENOMEM; 1003 cs->base = mcspi->base + spi->chip_select * 0x14; 1004 cs->phys = mcspi->phys + spi->chip_select * 0x14; 1005 cs->mode = 0; 1006 cs->chconf0 = 0; 1007 cs->chctrl0 = 0; 1008 spi->controller_state = cs; 1009 /* Link this to context save list */ 1010 list_add_tail(&cs->node, &ctx->cs); 1011 } 1012 1013 if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) { 1014 ret = omap2_mcspi_request_dma(spi); 1015 if (ret < 0 && ret != -EAGAIN) 1016 return ret; 1017 } 1018 1019 ret = pm_runtime_get_sync(mcspi->dev); 1020 if (ret < 0) 1021 return ret; 1022 1023 ret = omap2_mcspi_setup_transfer(spi, NULL); 1024 pm_runtime_mark_last_busy(mcspi->dev); 1025 pm_runtime_put_autosuspend(mcspi->dev); 1026 1027 return ret; 1028 } 1029 1030 static void omap2_mcspi_cleanup(struct spi_device *spi) 1031 { 1032 struct omap2_mcspi *mcspi; 1033 struct omap2_mcspi_dma *mcspi_dma; 1034 struct omap2_mcspi_cs *cs; 1035 1036 mcspi = spi_master_get_devdata(spi->master); 1037 1038 if (spi->controller_state) { 1039 /* Unlink controller state from context save list */ 1040 cs = spi->controller_state; 1041 list_del(&cs->node); 1042 1043 kfree(cs); 1044 } 1045 1046 if (spi->chip_select < spi->master->num_chipselect) { 1047 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 1048 1049 if (mcspi_dma->dma_rx) { 1050 dma_release_channel(mcspi_dma->dma_rx); 1051 mcspi_dma->dma_rx = NULL; 1052 } 1053 if (mcspi_dma->dma_tx) { 1054 dma_release_channel(mcspi_dma->dma_tx); 1055 mcspi_dma->dma_tx = NULL; 1056 } 1057 } 1058 } 1059 1060 static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m) 1061 { 1062 1063 /* We only enable one channel at a time -- the one whose message is 1064 * -- although this controller would gladly 1065 * arbitrate among multiple channels. This corresponds to "single 1066 * channel" master mode. As a side effect, we need to manage the 1067 * chipselect with the FORCE bit ... CS != channel enable. 1068 */ 1069 1070 struct spi_device *spi; 1071 struct spi_transfer *t = NULL; 1072 struct spi_master *master; 1073 struct omap2_mcspi_dma *mcspi_dma; 1074 int cs_active = 0; 1075 struct omap2_mcspi_cs *cs; 1076 struct omap2_mcspi_device_config *cd; 1077 int par_override = 0; 1078 int status = 0; 1079 u32 chconf; 1080 1081 spi = m->spi; 1082 master = spi->master; 1083 mcspi_dma = mcspi->dma_channels + spi->chip_select; 1084 cs = spi->controller_state; 1085 cd = spi->controller_data; 1086 1087 /* 1088 * The slave driver could have changed spi->mode in which case 1089 * it will be different from cs->mode (the current hardware setup). 1090 * If so, set par_override (even though its not a parity issue) so 1091 * omap2_mcspi_setup_transfer will be called to configure the hardware 1092 * with the correct mode on the first iteration of the loop below. 1093 */ 1094 if (spi->mode != cs->mode) 1095 par_override = 1; 1096 1097 omap2_mcspi_set_enable(spi, 0); 1098 list_for_each_entry(t, &m->transfers, transfer_list) { 1099 if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) { 1100 status = -EINVAL; 1101 break; 1102 } 1103 if (par_override || 1104 (t->speed_hz != spi->max_speed_hz) || 1105 (t->bits_per_word != spi->bits_per_word)) { 1106 par_override = 1; 1107 status = omap2_mcspi_setup_transfer(spi, t); 1108 if (status < 0) 1109 break; 1110 if (t->speed_hz == spi->max_speed_hz && 1111 t->bits_per_word == spi->bits_per_word) 1112 par_override = 0; 1113 } 1114 if (cd && cd->cs_per_word) { 1115 chconf = mcspi->ctx.modulctrl; 1116 chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE; 1117 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf); 1118 mcspi->ctx.modulctrl = 1119 mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL); 1120 } 1121 1122 1123 if (!cs_active) { 1124 omap2_mcspi_force_cs(spi, 1); 1125 cs_active = 1; 1126 } 1127 1128 chconf = mcspi_cached_chconf0(spi); 1129 chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; 1130 chconf &= ~OMAP2_MCSPI_CHCONF_TURBO; 1131 1132 if (t->tx_buf == NULL) 1133 chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY; 1134 else if (t->rx_buf == NULL) 1135 chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY; 1136 1137 if (cd && cd->turbo_mode && t->tx_buf == NULL) { 1138 /* Turbo mode is for more than one word */ 1139 if (t->len > ((cs->word_len + 7) >> 3)) 1140 chconf |= OMAP2_MCSPI_CHCONF_TURBO; 1141 } 1142 1143 mcspi_write_chconf0(spi, chconf); 1144 1145 if (t->len) { 1146 unsigned count; 1147 1148 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && 1149 (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)) 1150 omap2_mcspi_set_fifo(spi, t, 1); 1151 1152 omap2_mcspi_set_enable(spi, 1); 1153 1154 /* RX_ONLY mode needs dummy data in TX reg */ 1155 if (t->tx_buf == NULL) 1156 writel_relaxed(0, cs->base 1157 + OMAP2_MCSPI_TX0); 1158 1159 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && 1160 (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)) 1161 count = omap2_mcspi_txrx_dma(spi, t); 1162 else 1163 count = omap2_mcspi_txrx_pio(spi, t); 1164 m->actual_length += count; 1165 1166 if (count != t->len) { 1167 status = -EIO; 1168 break; 1169 } 1170 } 1171 1172 if (t->delay_usecs) 1173 udelay(t->delay_usecs); 1174 1175 /* ignore the "leave it on after last xfer" hint */ 1176 if (t->cs_change) { 1177 omap2_mcspi_force_cs(spi, 0); 1178 cs_active = 0; 1179 } 1180 1181 omap2_mcspi_set_enable(spi, 0); 1182 1183 if (mcspi->fifo_depth > 0) 1184 omap2_mcspi_set_fifo(spi, t, 0); 1185 } 1186 /* Restore defaults if they were overriden */ 1187 if (par_override) { 1188 par_override = 0; 1189 status = omap2_mcspi_setup_transfer(spi, NULL); 1190 } 1191 1192 if (cs_active) 1193 omap2_mcspi_force_cs(spi, 0); 1194 1195 if (cd && cd->cs_per_word) { 1196 chconf = mcspi->ctx.modulctrl; 1197 chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE; 1198 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf); 1199 mcspi->ctx.modulctrl = 1200 mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL); 1201 } 1202 1203 omap2_mcspi_set_enable(spi, 0); 1204 1205 if (mcspi->fifo_depth > 0 && t) 1206 omap2_mcspi_set_fifo(spi, t, 0); 1207 1208 m->status = status; 1209 } 1210 1211 static int omap2_mcspi_transfer_one_message(struct spi_master *master, 1212 struct spi_message *m) 1213 { 1214 struct spi_device *spi; 1215 struct omap2_mcspi *mcspi; 1216 struct omap2_mcspi_dma *mcspi_dma; 1217 struct spi_transfer *t; 1218 1219 spi = m->spi; 1220 mcspi = spi_master_get_devdata(master); 1221 mcspi_dma = mcspi->dma_channels + spi->chip_select; 1222 m->actual_length = 0; 1223 m->status = 0; 1224 1225 list_for_each_entry(t, &m->transfers, transfer_list) { 1226 const void *tx_buf = t->tx_buf; 1227 void *rx_buf = t->rx_buf; 1228 unsigned len = t->len; 1229 1230 if ((len && !(rx_buf || tx_buf))) { 1231 dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n", 1232 t->speed_hz, 1233 len, 1234 tx_buf ? "tx" : "", 1235 rx_buf ? "rx" : "", 1236 t->bits_per_word); 1237 return -EINVAL; 1238 } 1239 1240 if (m->is_dma_mapped || len < DMA_MIN_BYTES) 1241 continue; 1242 1243 if (mcspi_dma->dma_tx && tx_buf != NULL) { 1244 t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf, 1245 len, DMA_TO_DEVICE); 1246 if (dma_mapping_error(mcspi->dev, t->tx_dma)) { 1247 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", 1248 'T', len); 1249 return -EINVAL; 1250 } 1251 } 1252 if (mcspi_dma->dma_rx && rx_buf != NULL) { 1253 t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len, 1254 DMA_FROM_DEVICE); 1255 if (dma_mapping_error(mcspi->dev, t->rx_dma)) { 1256 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", 1257 'R', len); 1258 if (tx_buf != NULL) 1259 dma_unmap_single(mcspi->dev, t->tx_dma, 1260 len, DMA_TO_DEVICE); 1261 return -EINVAL; 1262 } 1263 } 1264 } 1265 1266 omap2_mcspi_work(mcspi, m); 1267 spi_finalize_current_message(master); 1268 return 0; 1269 } 1270 1271 static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) 1272 { 1273 struct spi_master *master = mcspi->master; 1274 struct omap2_mcspi_regs *ctx = &mcspi->ctx; 1275 int ret = 0; 1276 1277 ret = pm_runtime_get_sync(mcspi->dev); 1278 if (ret < 0) 1279 return ret; 1280 1281 mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, 1282 OMAP2_MCSPI_WAKEUPENABLE_WKEN); 1283 ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN; 1284 1285 omap2_mcspi_set_master_mode(master); 1286 pm_runtime_mark_last_busy(mcspi->dev); 1287 pm_runtime_put_autosuspend(mcspi->dev); 1288 return 0; 1289 } 1290 1291 static int omap_mcspi_runtime_resume(struct device *dev) 1292 { 1293 struct omap2_mcspi *mcspi; 1294 struct spi_master *master; 1295 1296 master = dev_get_drvdata(dev); 1297 mcspi = spi_master_get_devdata(master); 1298 omap2_mcspi_restore_ctx(mcspi); 1299 1300 return 0; 1301 } 1302 1303 static struct omap2_mcspi_platform_config omap2_pdata = { 1304 .regs_offset = 0, 1305 }; 1306 1307 static struct omap2_mcspi_platform_config omap4_pdata = { 1308 .regs_offset = OMAP4_MCSPI_REG_OFFSET, 1309 }; 1310 1311 static const struct of_device_id omap_mcspi_of_match[] = { 1312 { 1313 .compatible = "ti,omap2-mcspi", 1314 .data = &omap2_pdata, 1315 }, 1316 { 1317 .compatible = "ti,omap4-mcspi", 1318 .data = &omap4_pdata, 1319 }, 1320 { }, 1321 }; 1322 MODULE_DEVICE_TABLE(of, omap_mcspi_of_match); 1323 1324 static int omap2_mcspi_probe(struct platform_device *pdev) 1325 { 1326 struct spi_master *master; 1327 const struct omap2_mcspi_platform_config *pdata; 1328 struct omap2_mcspi *mcspi; 1329 struct resource *r; 1330 int status = 0, i; 1331 u32 regs_offset = 0; 1332 static int bus_num = 1; 1333 struct device_node *node = pdev->dev.of_node; 1334 const struct of_device_id *match; 1335 1336 master = spi_alloc_master(&pdev->dev, sizeof *mcspi); 1337 if (master == NULL) { 1338 dev_dbg(&pdev->dev, "master allocation failed\n"); 1339 return -ENOMEM; 1340 } 1341 1342 /* the spi->mode bits understood by this driver: */ 1343 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1344 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 1345 master->setup = omap2_mcspi_setup; 1346 master->auto_runtime_pm = true; 1347 master->transfer_one_message = omap2_mcspi_transfer_one_message; 1348 master->cleanup = omap2_mcspi_cleanup; 1349 master->dev.of_node = node; 1350 master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ; 1351 master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15; 1352 1353 platform_set_drvdata(pdev, master); 1354 1355 mcspi = spi_master_get_devdata(master); 1356 mcspi->master = master; 1357 1358 match = of_match_device(omap_mcspi_of_match, &pdev->dev); 1359 if (match) { 1360 u32 num_cs = 1; /* default number of chipselect */ 1361 pdata = match->data; 1362 1363 of_property_read_u32(node, "ti,spi-num-cs", &num_cs); 1364 master->num_chipselect = num_cs; 1365 master->bus_num = bus_num++; 1366 if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL)) 1367 mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN; 1368 } else { 1369 pdata = dev_get_platdata(&pdev->dev); 1370 master->num_chipselect = pdata->num_cs; 1371 if (pdev->id != -1) 1372 master->bus_num = pdev->id; 1373 mcspi->pin_dir = pdata->pin_dir; 1374 } 1375 regs_offset = pdata->regs_offset; 1376 1377 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1378 if (r == NULL) { 1379 status = -ENODEV; 1380 goto free_master; 1381 } 1382 1383 r->start += regs_offset; 1384 r->end += regs_offset; 1385 mcspi->phys = r->start; 1386 1387 mcspi->base = devm_ioremap_resource(&pdev->dev, r); 1388 if (IS_ERR(mcspi->base)) { 1389 status = PTR_ERR(mcspi->base); 1390 goto free_master; 1391 } 1392 1393 mcspi->dev = &pdev->dev; 1394 1395 INIT_LIST_HEAD(&mcspi->ctx.cs); 1396 1397 mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect, 1398 sizeof(struct omap2_mcspi_dma), 1399 GFP_KERNEL); 1400 if (mcspi->dma_channels == NULL) { 1401 status = -ENOMEM; 1402 goto free_master; 1403 } 1404 1405 for (i = 0; i < master->num_chipselect; i++) { 1406 char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name; 1407 char *dma_tx_ch_name = mcspi->dma_channels[i].dma_tx_ch_name; 1408 struct resource *dma_res; 1409 1410 sprintf(dma_rx_ch_name, "rx%d", i); 1411 if (!pdev->dev.of_node) { 1412 dma_res = 1413 platform_get_resource_byname(pdev, 1414 IORESOURCE_DMA, 1415 dma_rx_ch_name); 1416 if (!dma_res) { 1417 dev_dbg(&pdev->dev, 1418 "cannot get DMA RX channel\n"); 1419 status = -ENODEV; 1420 break; 1421 } 1422 1423 mcspi->dma_channels[i].dma_rx_sync_dev = 1424 dma_res->start; 1425 } 1426 sprintf(dma_tx_ch_name, "tx%d", i); 1427 if (!pdev->dev.of_node) { 1428 dma_res = 1429 platform_get_resource_byname(pdev, 1430 IORESOURCE_DMA, 1431 dma_tx_ch_name); 1432 if (!dma_res) { 1433 dev_dbg(&pdev->dev, 1434 "cannot get DMA TX channel\n"); 1435 status = -ENODEV; 1436 break; 1437 } 1438 1439 mcspi->dma_channels[i].dma_tx_sync_dev = 1440 dma_res->start; 1441 } 1442 } 1443 1444 if (status < 0) 1445 goto free_master; 1446 1447 pm_runtime_use_autosuspend(&pdev->dev); 1448 pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); 1449 pm_runtime_enable(&pdev->dev); 1450 1451 status = omap2_mcspi_master_setup(mcspi); 1452 if (status < 0) 1453 goto disable_pm; 1454 1455 status = devm_spi_register_master(&pdev->dev, master); 1456 if (status < 0) 1457 goto disable_pm; 1458 1459 return status; 1460 1461 disable_pm: 1462 pm_runtime_disable(&pdev->dev); 1463 free_master: 1464 spi_master_put(master); 1465 return status; 1466 } 1467 1468 static int omap2_mcspi_remove(struct platform_device *pdev) 1469 { 1470 struct spi_master *master = platform_get_drvdata(pdev); 1471 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1472 1473 pm_runtime_put_sync(mcspi->dev); 1474 pm_runtime_disable(&pdev->dev); 1475 1476 return 0; 1477 } 1478 1479 /* work with hotplug and coldplug */ 1480 MODULE_ALIAS("platform:omap2_mcspi"); 1481 1482 #ifdef CONFIG_SUSPEND 1483 /* 1484 * When SPI wake up from off-mode, CS is in activate state. If it was in 1485 * unactive state when driver was suspend, then force it to unactive state at 1486 * wake up. 1487 */ 1488 static int omap2_mcspi_resume(struct device *dev) 1489 { 1490 struct spi_master *master = dev_get_drvdata(dev); 1491 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1492 struct omap2_mcspi_regs *ctx = &mcspi->ctx; 1493 struct omap2_mcspi_cs *cs; 1494 1495 pm_runtime_get_sync(mcspi->dev); 1496 list_for_each_entry(cs, &ctx->cs, node) { 1497 if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) { 1498 /* 1499 * We need to toggle CS state for OMAP take this 1500 * change in account. 1501 */ 1502 cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE; 1503 writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); 1504 cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE; 1505 writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); 1506 } 1507 } 1508 pm_runtime_mark_last_busy(mcspi->dev); 1509 pm_runtime_put_autosuspend(mcspi->dev); 1510 return 0; 1511 } 1512 #else 1513 #define omap2_mcspi_resume NULL 1514 #endif 1515 1516 static const struct dev_pm_ops omap2_mcspi_pm_ops = { 1517 .resume = omap2_mcspi_resume, 1518 .runtime_resume = omap_mcspi_runtime_resume, 1519 }; 1520 1521 static struct platform_driver omap2_mcspi_driver = { 1522 .driver = { 1523 .name = "omap2_mcspi", 1524 .owner = THIS_MODULE, 1525 .pm = &omap2_mcspi_pm_ops, 1526 .of_match_table = omap_mcspi_of_match, 1527 }, 1528 .probe = omap2_mcspi_probe, 1529 .remove = omap2_mcspi_remove, 1530 }; 1531 1532 module_platform_driver(omap2_mcspi_driver); 1533 MODULE_LICENSE("GPL"); 1534