1 /* 2 * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> 3 * JZ4740 SD/MMC controller driver 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License as published by the 7 * Free Software Foundation; either version 2 of the License, or (at your 8 * option) any later version. 9 * 10 * You should have received a copy of the GNU General Public License along 11 * with this program; if not, write to the Free Software Foundation, Inc., 12 * 675 Mass Ave, Cambridge, MA 02139, USA. 13 * 14 */ 15 16 #include <linux/mmc/host.h> 17 #include <linux/mmc/slot-gpio.h> 18 #include <linux/err.h> 19 #include <linux/io.h> 20 #include <linux/irq.h> 21 #include <linux/interrupt.h> 22 #include <linux/module.h> 23 #include <linux/pinctrl/consumer.h> 24 #include <linux/platform_device.h> 25 #include <linux/delay.h> 26 #include <linux/scatterlist.h> 27 #include <linux/clk.h> 28 29 #include <linux/bitops.h> 30 #include <linux/gpio.h> 31 #include <asm/cacheflush.h> 32 #include <linux/dma-mapping.h> 33 #include <linux/dmaengine.h> 34 35 #include <asm/mach-jz4740/dma.h> 36 #include <asm/mach-jz4740/jz4740_mmc.h> 37 38 #define JZ_REG_MMC_STRPCL 0x00 39 #define JZ_REG_MMC_STATUS 0x04 40 #define JZ_REG_MMC_CLKRT 0x08 41 #define JZ_REG_MMC_CMDAT 0x0C 42 #define JZ_REG_MMC_RESTO 0x10 43 #define JZ_REG_MMC_RDTO 0x14 44 #define JZ_REG_MMC_BLKLEN 0x18 45 #define JZ_REG_MMC_NOB 0x1C 46 #define JZ_REG_MMC_SNOB 0x20 47 #define JZ_REG_MMC_IMASK 0x24 48 #define JZ_REG_MMC_IREG 0x28 49 #define JZ_REG_MMC_CMD 0x2C 50 #define JZ_REG_MMC_ARG 0x30 51 #define JZ_REG_MMC_RESP_FIFO 0x34 52 #define JZ_REG_MMC_RXFIFO 0x38 53 #define JZ_REG_MMC_TXFIFO 0x3C 54 55 #define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7) 56 #define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6) 57 #define JZ_MMC_STRPCL_START_READWAIT BIT(5) 58 #define JZ_MMC_STRPCL_STOP_READWAIT BIT(4) 59 #define JZ_MMC_STRPCL_RESET BIT(3) 60 #define JZ_MMC_STRPCL_START_OP BIT(2) 61 #define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0)) 62 #define JZ_MMC_STRPCL_CLOCK_STOP BIT(0) 63 #define JZ_MMC_STRPCL_CLOCK_START BIT(1) 64 65 66 #define JZ_MMC_STATUS_IS_RESETTING BIT(15) 67 #define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14) 68 #define JZ_MMC_STATUS_PRG_DONE BIT(13) 69 #define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12) 70 #define JZ_MMC_STATUS_END_CMD_RES BIT(11) 71 #define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10) 72 #define JZ_MMC_STATUS_IS_READWAIT BIT(9) 73 #define JZ_MMC_STATUS_CLK_EN BIT(8) 74 #define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7) 75 #define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6) 76 #define JZ_MMC_STATUS_CRC_RES_ERR BIT(5) 77 #define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4) 78 #define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3) 79 #define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2) 80 #define JZ_MMC_STATUS_TIMEOUT_RES BIT(1) 81 #define JZ_MMC_STATUS_TIMEOUT_READ BIT(0) 82 83 #define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0)) 84 #define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2)) 85 86 87 #define JZ_MMC_CMDAT_IO_ABORT BIT(11) 88 #define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10) 89 #define JZ_MMC_CMDAT_DMA_EN BIT(8) 90 #define JZ_MMC_CMDAT_INIT BIT(7) 91 #define JZ_MMC_CMDAT_BUSY BIT(6) 92 #define JZ_MMC_CMDAT_STREAM BIT(5) 93 #define JZ_MMC_CMDAT_WRITE BIT(4) 94 #define JZ_MMC_CMDAT_DATA_EN BIT(3) 95 #define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0)) 96 #define JZ_MMC_CMDAT_RSP_R1 1 97 #define JZ_MMC_CMDAT_RSP_R2 2 98 #define JZ_MMC_CMDAT_RSP_R3 3 99 100 #define JZ_MMC_IRQ_SDIO BIT(7) 101 #define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6) 102 #define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5) 103 #define JZ_MMC_IRQ_END_CMD_RES BIT(2) 104 #define JZ_MMC_IRQ_PRG_DONE BIT(1) 105 #define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0) 106 107 108 #define JZ_MMC_CLK_RATE 24000000 109 110 enum jz4740_mmc_state { 111 JZ4740_MMC_STATE_READ_RESPONSE, 112 JZ4740_MMC_STATE_TRANSFER_DATA, 113 JZ4740_MMC_STATE_SEND_STOP, 114 JZ4740_MMC_STATE_DONE, 115 }; 116 117 struct jz4740_mmc_host_next { 118 int sg_len; 119 s32 cookie; 120 }; 121 122 struct jz4740_mmc_host { 123 struct mmc_host *mmc; 124 struct platform_device *pdev; 125 struct jz4740_mmc_platform_data *pdata; 126 struct clk *clk; 127 128 int irq; 129 int card_detect_irq; 130 131 void __iomem *base; 132 struct resource *mem_res; 133 struct mmc_request *req; 134 struct mmc_command *cmd; 135 136 unsigned long waiting; 137 138 uint32_t cmdat; 139 140 uint16_t irq_mask; 141 142 spinlock_t lock; 143 144 struct timer_list timeout_timer; 145 struct sg_mapping_iter miter; 146 enum jz4740_mmc_state state; 147 148 /* DMA support */ 149 struct dma_chan *dma_rx; 150 struct dma_chan *dma_tx; 151 struct jz4740_mmc_host_next next_data; 152 bool use_dma; 153 int sg_len; 154 155 /* The DMA trigger level is 8 words, that is to say, the DMA read 156 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write 157 * trigger is when data words in MSC_TXFIFO is < 8. 158 */ 159 #define JZ4740_MMC_FIFO_HALF_SIZE 8 160 }; 161 162 /*----------------------------------------------------------------------------*/ 163 /* DMA infrastructure */ 164 165 static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host) 166 { 167 if (!host->use_dma) 168 return; 169 170 dma_release_channel(host->dma_tx); 171 dma_release_channel(host->dma_rx); 172 } 173 174 static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host) 175 { 176 dma_cap_mask_t mask; 177 178 dma_cap_zero(mask); 179 dma_cap_set(DMA_SLAVE, mask); 180 181 host->dma_tx = dma_request_channel(mask, NULL, host); 182 if (!host->dma_tx) { 183 dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n"); 184 return -ENODEV; 185 } 186 187 host->dma_rx = dma_request_channel(mask, NULL, host); 188 if (!host->dma_rx) { 189 dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n"); 190 goto free_master_write; 191 } 192 193 /* Initialize DMA pre request cookie */ 194 host->next_data.cookie = 1; 195 196 return 0; 197 198 free_master_write: 199 dma_release_channel(host->dma_tx); 200 return -ENODEV; 201 } 202 203 static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host, 204 struct mmc_data *data) 205 { 206 return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx; 207 } 208 209 static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host, 210 struct mmc_data *data) 211 { 212 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 213 enum dma_data_direction dir = mmc_get_dma_dir(data); 214 215 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 216 } 217 218 /* Prepares DMA data for current/next transfer, returns non-zero on failure */ 219 static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host, 220 struct mmc_data *data, 221 struct jz4740_mmc_host_next *next, 222 struct dma_chan *chan) 223 { 224 struct jz4740_mmc_host_next *next_data = &host->next_data; 225 enum dma_data_direction dir = mmc_get_dma_dir(data); 226 int sg_len; 227 228 if (!next && data->host_cookie && 229 data->host_cookie != host->next_data.cookie) { 230 dev_warn(mmc_dev(host->mmc), 231 "[%s] invalid cookie: data->host_cookie %d host->next_data.cookie %d\n", 232 __func__, 233 data->host_cookie, 234 host->next_data.cookie); 235 data->host_cookie = 0; 236 } 237 238 /* Check if next job is already prepared */ 239 if (next || data->host_cookie != host->next_data.cookie) { 240 sg_len = dma_map_sg(chan->device->dev, 241 data->sg, 242 data->sg_len, 243 dir); 244 245 } else { 246 sg_len = next_data->sg_len; 247 next_data->sg_len = 0; 248 } 249 250 if (sg_len <= 0) { 251 dev_err(mmc_dev(host->mmc), 252 "Failed to map scatterlist for DMA operation\n"); 253 return -EINVAL; 254 } 255 256 if (next) { 257 next->sg_len = sg_len; 258 data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie; 259 } else 260 host->sg_len = sg_len; 261 262 return 0; 263 } 264 265 static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, 266 struct mmc_data *data) 267 { 268 int ret; 269 struct dma_chan *chan; 270 struct dma_async_tx_descriptor *desc; 271 struct dma_slave_config conf = { 272 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 273 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 274 .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 275 .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 276 }; 277 278 if (data->flags & MMC_DATA_WRITE) { 279 conf.direction = DMA_MEM_TO_DEV; 280 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; 281 conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT; 282 chan = host->dma_tx; 283 } else { 284 conf.direction = DMA_DEV_TO_MEM; 285 conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO; 286 conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE; 287 chan = host->dma_rx; 288 } 289 290 ret = jz4740_mmc_prepare_dma_data(host, data, NULL, chan); 291 if (ret) 292 return ret; 293 294 dmaengine_slave_config(chan, &conf); 295 desc = dmaengine_prep_slave_sg(chan, 296 data->sg, 297 host->sg_len, 298 conf.direction, 299 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 300 if (!desc) { 301 dev_err(mmc_dev(host->mmc), 302 "Failed to allocate DMA %s descriptor", 303 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX"); 304 goto dma_unmap; 305 } 306 307 dmaengine_submit(desc); 308 dma_async_issue_pending(chan); 309 310 return 0; 311 312 dma_unmap: 313 jz4740_mmc_dma_unmap(host, data); 314 return -ENOMEM; 315 } 316 317 static void jz4740_mmc_pre_request(struct mmc_host *mmc, 318 struct mmc_request *mrq) 319 { 320 struct jz4740_mmc_host *host = mmc_priv(mmc); 321 struct mmc_data *data = mrq->data; 322 struct jz4740_mmc_host_next *next_data = &host->next_data; 323 324 BUG_ON(data->host_cookie); 325 326 if (host->use_dma) { 327 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 328 329 if (jz4740_mmc_prepare_dma_data(host, data, next_data, chan)) 330 data->host_cookie = 0; 331 } 332 } 333 334 static void jz4740_mmc_post_request(struct mmc_host *mmc, 335 struct mmc_request *mrq, 336 int err) 337 { 338 struct jz4740_mmc_host *host = mmc_priv(mmc); 339 struct mmc_data *data = mrq->data; 340 341 if (host->use_dma && data->host_cookie) { 342 jz4740_mmc_dma_unmap(host, data); 343 data->host_cookie = 0; 344 } 345 346 if (err) { 347 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 348 349 dmaengine_terminate_all(chan); 350 } 351 } 352 353 /*----------------------------------------------------------------------------*/ 354 355 static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, 356 unsigned int irq, bool enabled) 357 { 358 unsigned long flags; 359 360 spin_lock_irqsave(&host->lock, flags); 361 if (enabled) 362 host->irq_mask &= ~irq; 363 else 364 host->irq_mask |= irq; 365 spin_unlock_irqrestore(&host->lock, flags); 366 367 writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK); 368 } 369 370 static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host, 371 bool start_transfer) 372 { 373 uint16_t val = JZ_MMC_STRPCL_CLOCK_START; 374 375 if (start_transfer) 376 val |= JZ_MMC_STRPCL_START_OP; 377 378 writew(val, host->base + JZ_REG_MMC_STRPCL); 379 } 380 381 static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host) 382 { 383 uint32_t status; 384 unsigned int timeout = 1000; 385 386 writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL); 387 do { 388 status = readl(host->base + JZ_REG_MMC_STATUS); 389 } while (status & JZ_MMC_STATUS_CLK_EN && --timeout); 390 } 391 392 static void jz4740_mmc_reset(struct jz4740_mmc_host *host) 393 { 394 uint32_t status; 395 unsigned int timeout = 1000; 396 397 writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL); 398 udelay(10); 399 do { 400 status = readl(host->base + JZ_REG_MMC_STATUS); 401 } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout); 402 } 403 404 static void jz4740_mmc_request_done(struct jz4740_mmc_host *host) 405 { 406 struct mmc_request *req; 407 408 req = host->req; 409 host->req = NULL; 410 411 mmc_request_done(host->mmc, req); 412 } 413 414 static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host, 415 unsigned int irq) 416 { 417 unsigned int timeout = 0x800; 418 uint16_t status; 419 420 do { 421 status = readw(host->base + JZ_REG_MMC_IREG); 422 } while (!(status & irq) && --timeout); 423 424 if (timeout == 0) { 425 set_bit(0, &host->waiting); 426 mod_timer(&host->timeout_timer, jiffies + 5*HZ); 427 jz4740_mmc_set_irq_enabled(host, irq, true); 428 return true; 429 } 430 431 return false; 432 } 433 434 static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host, 435 struct mmc_data *data) 436 { 437 int status; 438 439 status = readl(host->base + JZ_REG_MMC_STATUS); 440 if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) { 441 if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) { 442 host->req->cmd->error = -ETIMEDOUT; 443 data->error = -ETIMEDOUT; 444 } else { 445 host->req->cmd->error = -EIO; 446 data->error = -EIO; 447 } 448 } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) { 449 if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) { 450 host->req->cmd->error = -ETIMEDOUT; 451 data->error = -ETIMEDOUT; 452 } else { 453 host->req->cmd->error = -EIO; 454 data->error = -EIO; 455 } 456 } 457 } 458 459 static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host, 460 struct mmc_data *data) 461 { 462 struct sg_mapping_iter *miter = &host->miter; 463 void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO; 464 uint32_t *buf; 465 bool timeout; 466 size_t i, j; 467 468 while (sg_miter_next(miter)) { 469 buf = miter->addr; 470 i = miter->length / 4; 471 j = i / 8; 472 i = i & 0x7; 473 while (j) { 474 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); 475 if (unlikely(timeout)) 476 goto poll_timeout; 477 478 writel(buf[0], fifo_addr); 479 writel(buf[1], fifo_addr); 480 writel(buf[2], fifo_addr); 481 writel(buf[3], fifo_addr); 482 writel(buf[4], fifo_addr); 483 writel(buf[5], fifo_addr); 484 writel(buf[6], fifo_addr); 485 writel(buf[7], fifo_addr); 486 buf += 8; 487 --j; 488 } 489 if (unlikely(i)) { 490 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); 491 if (unlikely(timeout)) 492 goto poll_timeout; 493 494 while (i) { 495 writel(*buf, fifo_addr); 496 ++buf; 497 --i; 498 } 499 } 500 data->bytes_xfered += miter->length; 501 } 502 sg_miter_stop(miter); 503 504 return false; 505 506 poll_timeout: 507 miter->consumed = (void *)buf - miter->addr; 508 data->bytes_xfered += miter->consumed; 509 sg_miter_stop(miter); 510 511 return true; 512 } 513 514 static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host, 515 struct mmc_data *data) 516 { 517 struct sg_mapping_iter *miter = &host->miter; 518 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO; 519 uint32_t *buf; 520 uint32_t d; 521 uint16_t status; 522 size_t i, j; 523 unsigned int timeout; 524 525 while (sg_miter_next(miter)) { 526 buf = miter->addr; 527 i = miter->length; 528 j = i / 32; 529 i = i & 0x1f; 530 while (j) { 531 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); 532 if (unlikely(timeout)) 533 goto poll_timeout; 534 535 buf[0] = readl(fifo_addr); 536 buf[1] = readl(fifo_addr); 537 buf[2] = readl(fifo_addr); 538 buf[3] = readl(fifo_addr); 539 buf[4] = readl(fifo_addr); 540 buf[5] = readl(fifo_addr); 541 buf[6] = readl(fifo_addr); 542 buf[7] = readl(fifo_addr); 543 544 buf += 8; 545 --j; 546 } 547 548 if (unlikely(i)) { 549 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); 550 if (unlikely(timeout)) 551 goto poll_timeout; 552 553 while (i >= 4) { 554 *buf++ = readl(fifo_addr); 555 i -= 4; 556 } 557 if (unlikely(i > 0)) { 558 d = readl(fifo_addr); 559 memcpy(buf, &d, i); 560 } 561 } 562 data->bytes_xfered += miter->length; 563 564 /* This can go away once MIPS implements 565 * flush_kernel_dcache_page */ 566 flush_dcache_page(miter->page); 567 } 568 sg_miter_stop(miter); 569 570 /* For whatever reason there is sometime one word more in the fifo then 571 * requested */ 572 timeout = 1000; 573 status = readl(host->base + JZ_REG_MMC_STATUS); 574 while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) { 575 d = readl(fifo_addr); 576 status = readl(host->base + JZ_REG_MMC_STATUS); 577 } 578 579 return false; 580 581 poll_timeout: 582 miter->consumed = (void *)buf - miter->addr; 583 data->bytes_xfered += miter->consumed; 584 sg_miter_stop(miter); 585 586 return true; 587 } 588 589 static void jz4740_mmc_timeout(struct timer_list *t) 590 { 591 struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer); 592 593 if (!test_and_clear_bit(0, &host->waiting)) 594 return; 595 596 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false); 597 598 host->req->cmd->error = -ETIMEDOUT; 599 jz4740_mmc_request_done(host); 600 } 601 602 static void jz4740_mmc_read_response(struct jz4740_mmc_host *host, 603 struct mmc_command *cmd) 604 { 605 int i; 606 uint16_t tmp; 607 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO; 608 609 if (cmd->flags & MMC_RSP_136) { 610 tmp = readw(fifo_addr); 611 for (i = 0; i < 4; ++i) { 612 cmd->resp[i] = tmp << 24; 613 tmp = readw(fifo_addr); 614 cmd->resp[i] |= tmp << 8; 615 tmp = readw(fifo_addr); 616 cmd->resp[i] |= tmp >> 8; 617 } 618 } else { 619 cmd->resp[0] = readw(fifo_addr) << 24; 620 cmd->resp[0] |= readw(fifo_addr) << 8; 621 cmd->resp[0] |= readw(fifo_addr) & 0xff; 622 } 623 } 624 625 static void jz4740_mmc_send_command(struct jz4740_mmc_host *host, 626 struct mmc_command *cmd) 627 { 628 uint32_t cmdat = host->cmdat; 629 630 host->cmdat &= ~JZ_MMC_CMDAT_INIT; 631 jz4740_mmc_clock_disable(host); 632 633 host->cmd = cmd; 634 635 if (cmd->flags & MMC_RSP_BUSY) 636 cmdat |= JZ_MMC_CMDAT_BUSY; 637 638 switch (mmc_resp_type(cmd)) { 639 case MMC_RSP_R1B: 640 case MMC_RSP_R1: 641 cmdat |= JZ_MMC_CMDAT_RSP_R1; 642 break; 643 case MMC_RSP_R2: 644 cmdat |= JZ_MMC_CMDAT_RSP_R2; 645 break; 646 case MMC_RSP_R3: 647 cmdat |= JZ_MMC_CMDAT_RSP_R3; 648 break; 649 default: 650 break; 651 } 652 653 if (cmd->data) { 654 cmdat |= JZ_MMC_CMDAT_DATA_EN; 655 if (cmd->data->flags & MMC_DATA_WRITE) 656 cmdat |= JZ_MMC_CMDAT_WRITE; 657 if (host->use_dma) 658 cmdat |= JZ_MMC_CMDAT_DMA_EN; 659 660 writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN); 661 writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB); 662 } 663 664 writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD); 665 writel(cmd->arg, host->base + JZ_REG_MMC_ARG); 666 writel(cmdat, host->base + JZ_REG_MMC_CMDAT); 667 668 jz4740_mmc_clock_enable(host, 1); 669 } 670 671 static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host) 672 { 673 struct mmc_command *cmd = host->req->cmd; 674 struct mmc_data *data = cmd->data; 675 int direction; 676 677 if (data->flags & MMC_DATA_READ) 678 direction = SG_MITER_TO_SG; 679 else 680 direction = SG_MITER_FROM_SG; 681 682 sg_miter_start(&host->miter, data->sg, data->sg_len, direction); 683 } 684 685 686 static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) 687 { 688 struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid; 689 struct mmc_command *cmd = host->req->cmd; 690 struct mmc_request *req = host->req; 691 struct mmc_data *data = cmd->data; 692 bool timeout = false; 693 694 if (cmd->error) 695 host->state = JZ4740_MMC_STATE_DONE; 696 697 switch (host->state) { 698 case JZ4740_MMC_STATE_READ_RESPONSE: 699 if (cmd->flags & MMC_RSP_PRESENT) 700 jz4740_mmc_read_response(host, cmd); 701 702 if (!data) 703 break; 704 705 jz_mmc_prepare_data_transfer(host); 706 707 case JZ4740_MMC_STATE_TRANSFER_DATA: 708 if (host->use_dma) { 709 /* Use DMA if enabled. 710 * Data transfer direction is defined later by 711 * relying on data flags in 712 * jz4740_mmc_prepare_dma_data() and 713 * jz4740_mmc_start_dma_transfer(). 714 */ 715 timeout = jz4740_mmc_start_dma_transfer(host, data); 716 data->bytes_xfered = data->blocks * data->blksz; 717 } else if (data->flags & MMC_DATA_READ) 718 /* Use PIO if DMA is not enabled. 719 * Data transfer direction was defined before 720 * by relying on data flags in 721 * jz_mmc_prepare_data_transfer(). 722 */ 723 timeout = jz4740_mmc_read_data(host, data); 724 else 725 timeout = jz4740_mmc_write_data(host, data); 726 727 if (unlikely(timeout)) { 728 host->state = JZ4740_MMC_STATE_TRANSFER_DATA; 729 break; 730 } 731 732 jz4740_mmc_transfer_check_state(host, data); 733 734 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE); 735 if (unlikely(timeout)) { 736 host->state = JZ4740_MMC_STATE_SEND_STOP; 737 break; 738 } 739 writew(JZ_MMC_IRQ_DATA_TRAN_DONE, host->base + JZ_REG_MMC_IREG); 740 741 case JZ4740_MMC_STATE_SEND_STOP: 742 if (!req->stop) 743 break; 744 745 jz4740_mmc_send_command(host, req->stop); 746 747 if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) { 748 timeout = jz4740_mmc_poll_irq(host, 749 JZ_MMC_IRQ_PRG_DONE); 750 if (timeout) { 751 host->state = JZ4740_MMC_STATE_DONE; 752 break; 753 } 754 } 755 case JZ4740_MMC_STATE_DONE: 756 break; 757 } 758 759 if (!timeout) 760 jz4740_mmc_request_done(host); 761 762 return IRQ_HANDLED; 763 } 764 765 static irqreturn_t jz_mmc_irq(int irq, void *devid) 766 { 767 struct jz4740_mmc_host *host = devid; 768 struct mmc_command *cmd = host->cmd; 769 uint16_t irq_reg, status, tmp; 770 771 irq_reg = readw(host->base + JZ_REG_MMC_IREG); 772 773 tmp = irq_reg; 774 irq_reg &= ~host->irq_mask; 775 776 tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ | 777 JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE); 778 779 if (tmp != irq_reg) 780 writew(tmp & ~irq_reg, host->base + JZ_REG_MMC_IREG); 781 782 if (irq_reg & JZ_MMC_IRQ_SDIO) { 783 writew(JZ_MMC_IRQ_SDIO, host->base + JZ_REG_MMC_IREG); 784 mmc_signal_sdio_irq(host->mmc); 785 irq_reg &= ~JZ_MMC_IRQ_SDIO; 786 } 787 788 if (host->req && cmd && irq_reg) { 789 if (test_and_clear_bit(0, &host->waiting)) { 790 del_timer(&host->timeout_timer); 791 792 status = readl(host->base + JZ_REG_MMC_STATUS); 793 794 if (status & JZ_MMC_STATUS_TIMEOUT_RES) { 795 cmd->error = -ETIMEDOUT; 796 } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) { 797 cmd->error = -EIO; 798 } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR | 799 JZ_MMC_STATUS_CRC_WRITE_ERROR)) { 800 if (cmd->data) 801 cmd->data->error = -EIO; 802 cmd->error = -EIO; 803 } 804 805 jz4740_mmc_set_irq_enabled(host, irq_reg, false); 806 writew(irq_reg, host->base + JZ_REG_MMC_IREG); 807 808 return IRQ_WAKE_THREAD; 809 } 810 } 811 812 return IRQ_HANDLED; 813 } 814 815 static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate) 816 { 817 int div = 0; 818 int real_rate; 819 820 jz4740_mmc_clock_disable(host); 821 clk_set_rate(host->clk, JZ_MMC_CLK_RATE); 822 823 real_rate = clk_get_rate(host->clk); 824 825 while (real_rate > rate && div < 7) { 826 ++div; 827 real_rate >>= 1; 828 } 829 830 writew(div, host->base + JZ_REG_MMC_CLKRT); 831 return real_rate; 832 } 833 834 static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req) 835 { 836 struct jz4740_mmc_host *host = mmc_priv(mmc); 837 838 host->req = req; 839 840 writew(0xffff, host->base + JZ_REG_MMC_IREG); 841 842 writew(JZ_MMC_IRQ_END_CMD_RES, host->base + JZ_REG_MMC_IREG); 843 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true); 844 845 host->state = JZ4740_MMC_STATE_READ_RESPONSE; 846 set_bit(0, &host->waiting); 847 mod_timer(&host->timeout_timer, jiffies + 5*HZ); 848 jz4740_mmc_send_command(host, req->cmd); 849 } 850 851 static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 852 { 853 struct jz4740_mmc_host *host = mmc_priv(mmc); 854 if (ios->clock) 855 jz4740_mmc_set_clock_rate(host, ios->clock); 856 857 switch (ios->power_mode) { 858 case MMC_POWER_UP: 859 jz4740_mmc_reset(host); 860 if (gpio_is_valid(host->pdata->gpio_power)) 861 gpio_set_value(host->pdata->gpio_power, 862 !host->pdata->power_active_low); 863 host->cmdat |= JZ_MMC_CMDAT_INIT; 864 clk_prepare_enable(host->clk); 865 break; 866 case MMC_POWER_ON: 867 break; 868 default: 869 if (gpio_is_valid(host->pdata->gpio_power)) 870 gpio_set_value(host->pdata->gpio_power, 871 host->pdata->power_active_low); 872 clk_disable_unprepare(host->clk); 873 break; 874 } 875 876 switch (ios->bus_width) { 877 case MMC_BUS_WIDTH_1: 878 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT; 879 break; 880 case MMC_BUS_WIDTH_4: 881 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT; 882 break; 883 default: 884 break; 885 } 886 } 887 888 static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 889 { 890 struct jz4740_mmc_host *host = mmc_priv(mmc); 891 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable); 892 } 893 894 static const struct mmc_host_ops jz4740_mmc_ops = { 895 .request = jz4740_mmc_request, 896 .pre_req = jz4740_mmc_pre_request, 897 .post_req = jz4740_mmc_post_request, 898 .set_ios = jz4740_mmc_set_ios, 899 .get_ro = mmc_gpio_get_ro, 900 .get_cd = mmc_gpio_get_cd, 901 .enable_sdio_irq = jz4740_mmc_enable_sdio_irq, 902 }; 903 904 static int jz4740_mmc_request_gpio(struct device *dev, int gpio, 905 const char *name, bool output, int value) 906 { 907 int ret; 908 909 if (!gpio_is_valid(gpio)) 910 return 0; 911 912 ret = gpio_request(gpio, name); 913 if (ret) { 914 dev_err(dev, "Failed to request %s gpio: %d\n", name, ret); 915 return ret; 916 } 917 918 if (output) 919 gpio_direction_output(gpio, value); 920 else 921 gpio_direction_input(gpio); 922 923 return 0; 924 } 925 926 static int jz4740_mmc_request_gpios(struct mmc_host *mmc, 927 struct platform_device *pdev) 928 { 929 struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; 930 int ret = 0; 931 932 if (!pdata) 933 return 0; 934 935 if (!pdata->card_detect_active_low) 936 mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; 937 if (!pdata->read_only_active_low) 938 mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; 939 940 if (gpio_is_valid(pdata->gpio_card_detect)) { 941 ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0); 942 if (ret) 943 return ret; 944 } 945 946 if (gpio_is_valid(pdata->gpio_read_only)) { 947 ret = mmc_gpio_request_ro(mmc, pdata->gpio_read_only); 948 if (ret) 949 return ret; 950 } 951 952 return jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power, 953 "MMC read only", true, pdata->power_active_low); 954 } 955 956 static void jz4740_mmc_free_gpios(struct platform_device *pdev) 957 { 958 struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; 959 960 if (!pdata) 961 return; 962 963 if (gpio_is_valid(pdata->gpio_power)) 964 gpio_free(pdata->gpio_power); 965 } 966 967 static int jz4740_mmc_probe(struct platform_device* pdev) 968 { 969 int ret; 970 struct mmc_host *mmc; 971 struct jz4740_mmc_host *host; 972 struct jz4740_mmc_platform_data *pdata; 973 974 pdata = pdev->dev.platform_data; 975 976 mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev); 977 if (!mmc) { 978 dev_err(&pdev->dev, "Failed to alloc mmc host structure\n"); 979 return -ENOMEM; 980 } 981 982 host = mmc_priv(mmc); 983 host->pdata = pdata; 984 985 host->irq = platform_get_irq(pdev, 0); 986 if (host->irq < 0) { 987 ret = host->irq; 988 dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret); 989 goto err_free_host; 990 } 991 992 host->clk = devm_clk_get(&pdev->dev, "mmc"); 993 if (IS_ERR(host->clk)) { 994 ret = PTR_ERR(host->clk); 995 dev_err(&pdev->dev, "Failed to get mmc clock\n"); 996 goto err_free_host; 997 } 998 999 host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1000 host->base = devm_ioremap_resource(&pdev->dev, host->mem_res); 1001 if (IS_ERR(host->base)) { 1002 ret = PTR_ERR(host->base); 1003 dev_err(&pdev->dev, "Failed to ioremap base memory\n"); 1004 goto err_free_host; 1005 } 1006 1007 ret = jz4740_mmc_request_gpios(mmc, pdev); 1008 if (ret) 1009 goto err_release_dma; 1010 1011 mmc->ops = &jz4740_mmc_ops; 1012 mmc->f_min = JZ_MMC_CLK_RATE / 128; 1013 mmc->f_max = JZ_MMC_CLK_RATE; 1014 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1015 mmc->caps = (pdata && pdata->data_1bit) ? 0 : MMC_CAP_4_BIT_DATA; 1016 mmc->caps |= MMC_CAP_SDIO_IRQ; 1017 1018 mmc->max_blk_size = (1 << 10) - 1; 1019 mmc->max_blk_count = (1 << 15) - 1; 1020 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1021 1022 mmc->max_segs = 128; 1023 mmc->max_seg_size = mmc->max_req_size; 1024 1025 host->mmc = mmc; 1026 host->pdev = pdev; 1027 spin_lock_init(&host->lock); 1028 host->irq_mask = 0xffff; 1029 1030 ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0, 1031 dev_name(&pdev->dev), host); 1032 if (ret) { 1033 dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); 1034 goto err_free_gpios; 1035 } 1036 1037 jz4740_mmc_reset(host); 1038 jz4740_mmc_clock_disable(host); 1039 timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0); 1040 1041 host->use_dma = true; 1042 if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0) 1043 host->use_dma = false; 1044 1045 platform_set_drvdata(pdev, host); 1046 ret = mmc_add_host(mmc); 1047 1048 if (ret) { 1049 dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret); 1050 goto err_free_irq; 1051 } 1052 dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n"); 1053 1054 dev_info(&pdev->dev, "Using %s, %d-bit mode\n", 1055 host->use_dma ? "DMA" : "PIO", 1056 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); 1057 1058 return 0; 1059 1060 err_free_irq: 1061 free_irq(host->irq, host); 1062 err_free_gpios: 1063 jz4740_mmc_free_gpios(pdev); 1064 err_release_dma: 1065 if (host->use_dma) 1066 jz4740_mmc_release_dma_channels(host); 1067 err_free_host: 1068 mmc_free_host(mmc); 1069 1070 return ret; 1071 } 1072 1073 static int jz4740_mmc_remove(struct platform_device *pdev) 1074 { 1075 struct jz4740_mmc_host *host = platform_get_drvdata(pdev); 1076 1077 del_timer_sync(&host->timeout_timer); 1078 jz4740_mmc_set_irq_enabled(host, 0xff, false); 1079 jz4740_mmc_reset(host); 1080 1081 mmc_remove_host(host->mmc); 1082 1083 free_irq(host->irq, host); 1084 1085 jz4740_mmc_free_gpios(pdev); 1086 1087 if (host->use_dma) 1088 jz4740_mmc_release_dma_channels(host); 1089 1090 mmc_free_host(host->mmc); 1091 1092 return 0; 1093 } 1094 1095 #ifdef CONFIG_PM_SLEEP 1096 1097 static int jz4740_mmc_suspend(struct device *dev) 1098 { 1099 return pinctrl_pm_select_sleep_state(dev); 1100 } 1101 1102 static int jz4740_mmc_resume(struct device *dev) 1103 { 1104 return pinctrl_pm_select_default_state(dev); 1105 } 1106 1107 static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, 1108 jz4740_mmc_resume); 1109 #define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops) 1110 #else 1111 #define JZ4740_MMC_PM_OPS NULL 1112 #endif 1113 1114 static struct platform_driver jz4740_mmc_driver = { 1115 .probe = jz4740_mmc_probe, 1116 .remove = jz4740_mmc_remove, 1117 .driver = { 1118 .name = "jz4740-mmc", 1119 .pm = JZ4740_MMC_PM_OPS, 1120 }, 1121 }; 1122 1123 module_platform_driver(jz4740_mmc_driver); 1124 1125 MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver"); 1126 MODULE_LICENSE("GPL"); 1127 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 1128