1 /* 2 * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> 3 * Copyright (C) 2013, Imagination Technologies 4 * 5 * JZ4740 SD/MMC controller driver 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License as published by the 9 * Free Software Foundation; either version 2 of the License, or (at your 10 * option) any later version. 11 * 12 * You should have received a copy of the GNU General Public License along 13 * with this program; if not, write to the Free Software Foundation, Inc., 14 * 675 Mass Ave, Cambridge, MA 02139, USA. 15 * 16 */ 17 18 #include <linux/bitops.h> 19 #include <linux/clk.h> 20 #include <linux/delay.h> 21 #include <linux/dmaengine.h> 22 #include <linux/dma-mapping.h> 23 #include <linux/err.h> 24 #include <linux/gpio/consumer.h> 25 #include <linux/interrupt.h> 26 #include <linux/io.h> 27 #include <linux/irq.h> 28 #include <linux/mmc/host.h> 29 #include <linux/mmc/slot-gpio.h> 30 #include <linux/module.h> 31 #include <linux/of_device.h> 32 #include <linux/pinctrl/consumer.h> 33 #include <linux/platform_device.h> 34 #include <linux/scatterlist.h> 35 36 #include <asm/cacheflush.h> 37 38 #include <asm/mach-jz4740/dma.h> 39 #include <asm/mach-jz4740/jz4740_mmc.h> 40 41 #define JZ_REG_MMC_STRPCL 0x00 42 #define JZ_REG_MMC_STATUS 0x04 43 #define JZ_REG_MMC_CLKRT 0x08 44 #define JZ_REG_MMC_CMDAT 0x0C 45 #define JZ_REG_MMC_RESTO 0x10 46 #define JZ_REG_MMC_RDTO 0x14 47 #define JZ_REG_MMC_BLKLEN 0x18 48 #define JZ_REG_MMC_NOB 0x1C 49 #define JZ_REG_MMC_SNOB 0x20 50 #define JZ_REG_MMC_IMASK 0x24 51 #define JZ_REG_MMC_IREG 0x28 52 #define JZ_REG_MMC_CMD 0x2C 53 #define JZ_REG_MMC_ARG 0x30 54 #define JZ_REG_MMC_RESP_FIFO 0x34 55 #define JZ_REG_MMC_RXFIFO 0x38 56 #define JZ_REG_MMC_TXFIFO 0x3C 57 #define JZ_REG_MMC_DMAC 0x44 58 59 #define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7) 60 #define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6) 61 #define JZ_MMC_STRPCL_START_READWAIT BIT(5) 62 #define JZ_MMC_STRPCL_STOP_READWAIT BIT(4) 63 #define JZ_MMC_STRPCL_RESET BIT(3) 64 #define JZ_MMC_STRPCL_START_OP BIT(2) 65 #define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0)) 66 #define JZ_MMC_STRPCL_CLOCK_STOP BIT(0) 67 #define JZ_MMC_STRPCL_CLOCK_START BIT(1) 68 69 70 #define JZ_MMC_STATUS_IS_RESETTING BIT(15) 71 #define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14) 72 #define JZ_MMC_STATUS_PRG_DONE BIT(13) 73 #define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12) 74 #define JZ_MMC_STATUS_END_CMD_RES BIT(11) 75 #define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10) 76 #define JZ_MMC_STATUS_IS_READWAIT BIT(9) 77 #define JZ_MMC_STATUS_CLK_EN BIT(8) 78 #define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7) 79 #define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6) 80 #define JZ_MMC_STATUS_CRC_RES_ERR BIT(5) 81 #define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4) 82 #define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3) 83 #define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2) 84 #define JZ_MMC_STATUS_TIMEOUT_RES BIT(1) 85 #define JZ_MMC_STATUS_TIMEOUT_READ BIT(0) 86 87 #define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0)) 88 #define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2)) 89 90 91 #define JZ_MMC_CMDAT_IO_ABORT BIT(11) 92 #define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10) 93 #define JZ_MMC_CMDAT_DMA_EN BIT(8) 94 #define JZ_MMC_CMDAT_INIT BIT(7) 95 #define JZ_MMC_CMDAT_BUSY BIT(6) 96 #define JZ_MMC_CMDAT_STREAM BIT(5) 97 #define JZ_MMC_CMDAT_WRITE BIT(4) 98 #define JZ_MMC_CMDAT_DATA_EN BIT(3) 99 #define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0)) 100 #define JZ_MMC_CMDAT_RSP_R1 1 101 #define JZ_MMC_CMDAT_RSP_R2 2 102 #define JZ_MMC_CMDAT_RSP_R3 3 103 104 #define JZ_MMC_IRQ_SDIO BIT(7) 105 #define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6) 106 #define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5) 107 #define JZ_MMC_IRQ_END_CMD_RES BIT(2) 108 #define JZ_MMC_IRQ_PRG_DONE BIT(1) 109 #define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0) 110 111 #define JZ_MMC_DMAC_DMA_SEL BIT(1) 112 #define JZ_MMC_DMAC_DMA_EN BIT(0) 113 114 #define JZ_MMC_CLK_RATE 24000000 115 116 enum jz4740_mmc_version { 117 JZ_MMC_JZ4740, 118 JZ_MMC_JZ4725B, 119 JZ_MMC_JZ4780, 120 }; 121 122 enum jz4740_mmc_state { 123 JZ4740_MMC_STATE_READ_RESPONSE, 124 JZ4740_MMC_STATE_TRANSFER_DATA, 125 JZ4740_MMC_STATE_SEND_STOP, 126 JZ4740_MMC_STATE_DONE, 127 }; 128 129 /* 130 * The MMC core allows to prepare a mmc_request while another mmc_request 131 * is in-flight. This is used via the pre_req/post_req hooks. 132 * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request. 133 * Following what other drivers do (sdhci, dw_mmc) we use the following cookie 134 * flags to keep track of the mmc_request mapping state. 135 * 136 * COOKIE_UNMAPPED: the request is not mapped. 137 * COOKIE_PREMAPPED: the request was mapped in pre_req, 138 * and should be unmapped in post_req. 139 * COOKIE_MAPPED: the request was mapped in the irq handler, 140 * and should be unmapped before mmc_request_done is called.. 141 */ 142 enum jz4780_cookie { 143 COOKIE_UNMAPPED = 0, 144 COOKIE_PREMAPPED, 145 COOKIE_MAPPED, 146 }; 147 148 struct jz4740_mmc_host { 149 struct mmc_host *mmc; 150 struct platform_device *pdev; 151 struct jz4740_mmc_platform_data *pdata; 152 struct clk *clk; 153 struct gpio_desc *power; 154 155 enum jz4740_mmc_version version; 156 157 int irq; 158 int card_detect_irq; 159 160 void __iomem *base; 161 struct resource *mem_res; 162 struct mmc_request *req; 163 struct mmc_command *cmd; 164 165 unsigned long waiting; 166 167 uint32_t cmdat; 168 169 uint32_t irq_mask; 170 171 spinlock_t lock; 172 173 struct timer_list timeout_timer; 174 struct sg_mapping_iter miter; 175 enum jz4740_mmc_state state; 176 177 /* DMA support */ 178 struct dma_chan *dma_rx; 179 struct dma_chan *dma_tx; 180 bool use_dma; 181 182 /* The DMA trigger level is 8 words, that is to say, the DMA read 183 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write 184 * trigger is when data words in MSC_TXFIFO is < 8. 185 */ 186 #define JZ4740_MMC_FIFO_HALF_SIZE 8 187 }; 188 189 static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host, 190 uint32_t val) 191 { 192 if (host->version >= JZ_MMC_JZ4725B) 193 return writel(val, host->base + JZ_REG_MMC_IMASK); 194 else 195 return writew(val, host->base + JZ_REG_MMC_IMASK); 196 } 197 198 static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host, 199 uint32_t val) 200 { 201 if (host->version >= JZ_MMC_JZ4780) 202 return writel(val, host->base + JZ_REG_MMC_IREG); 203 else 204 return writew(val, host->base + JZ_REG_MMC_IREG); 205 } 206 207 static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host) 208 { 209 if (host->version >= JZ_MMC_JZ4780) 210 return readl(host->base + JZ_REG_MMC_IREG); 211 else 212 return readw(host->base + JZ_REG_MMC_IREG); 213 } 214 215 /*----------------------------------------------------------------------------*/ 216 /* DMA infrastructure */ 217 218 static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host) 219 { 220 if (!host->use_dma) 221 return; 222 223 dma_release_channel(host->dma_tx); 224 dma_release_channel(host->dma_rx); 225 } 226 227 static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host) 228 { 229 host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); 230 if (IS_ERR(host->dma_tx)) { 231 dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n"); 232 return PTR_ERR(host->dma_tx); 233 } 234 235 host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); 236 if (IS_ERR(host->dma_rx)) { 237 dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n"); 238 dma_release_channel(host->dma_tx); 239 return PTR_ERR(host->dma_rx); 240 } 241 242 return 0; 243 } 244 245 static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host, 246 struct mmc_data *data) 247 { 248 return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx; 249 } 250 251 static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host, 252 struct mmc_data *data) 253 { 254 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 255 enum dma_data_direction dir = mmc_get_dma_dir(data); 256 257 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 258 data->host_cookie = COOKIE_UNMAPPED; 259 } 260 261 /* Prepares DMA data for current or next transfer. 262 * A request can be in-flight when this is called. 263 */ 264 static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host, 265 struct mmc_data *data, 266 int cookie) 267 { 268 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 269 enum dma_data_direction dir = mmc_get_dma_dir(data); 270 int sg_count; 271 272 if (data->host_cookie == COOKIE_PREMAPPED) 273 return data->sg_count; 274 275 sg_count = dma_map_sg(chan->device->dev, 276 data->sg, 277 data->sg_len, 278 dir); 279 280 if (sg_count <= 0) { 281 dev_err(mmc_dev(host->mmc), 282 "Failed to map scatterlist for DMA operation\n"); 283 return -EINVAL; 284 } 285 286 data->sg_count = sg_count; 287 data->host_cookie = cookie; 288 289 return data->sg_count; 290 } 291 292 static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, 293 struct mmc_data *data) 294 { 295 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 296 struct dma_async_tx_descriptor *desc; 297 struct dma_slave_config conf = { 298 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 299 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 300 .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 301 .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 302 }; 303 int sg_count; 304 305 if (data->flags & MMC_DATA_WRITE) { 306 conf.direction = DMA_MEM_TO_DEV; 307 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; 308 conf.slave_id = JZ4740_DMA_TYPE_MMC_TRANSMIT; 309 } else { 310 conf.direction = DMA_DEV_TO_MEM; 311 conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO; 312 conf.slave_id = JZ4740_DMA_TYPE_MMC_RECEIVE; 313 } 314 315 sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED); 316 if (sg_count < 0) 317 return sg_count; 318 319 dmaengine_slave_config(chan, &conf); 320 desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count, 321 conf.direction, 322 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 323 if (!desc) { 324 dev_err(mmc_dev(host->mmc), 325 "Failed to allocate DMA %s descriptor", 326 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX"); 327 goto dma_unmap; 328 } 329 330 dmaengine_submit(desc); 331 dma_async_issue_pending(chan); 332 333 return 0; 334 335 dma_unmap: 336 if (data->host_cookie == COOKIE_MAPPED) 337 jz4740_mmc_dma_unmap(host, data); 338 return -ENOMEM; 339 } 340 341 static void jz4740_mmc_pre_request(struct mmc_host *mmc, 342 struct mmc_request *mrq) 343 { 344 struct jz4740_mmc_host *host = mmc_priv(mmc); 345 struct mmc_data *data = mrq->data; 346 347 if (!host->use_dma) 348 return; 349 350 data->host_cookie = COOKIE_UNMAPPED; 351 if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0) 352 data->host_cookie = COOKIE_UNMAPPED; 353 } 354 355 static void jz4740_mmc_post_request(struct mmc_host *mmc, 356 struct mmc_request *mrq, 357 int err) 358 { 359 struct jz4740_mmc_host *host = mmc_priv(mmc); 360 struct mmc_data *data = mrq->data; 361 362 if (data && data->host_cookie != COOKIE_UNMAPPED) 363 jz4740_mmc_dma_unmap(host, data); 364 365 if (err) { 366 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 367 368 dmaengine_terminate_all(chan); 369 } 370 } 371 372 /*----------------------------------------------------------------------------*/ 373 374 static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, 375 unsigned int irq, bool enabled) 376 { 377 unsigned long flags; 378 379 spin_lock_irqsave(&host->lock, flags); 380 if (enabled) 381 host->irq_mask &= ~irq; 382 else 383 host->irq_mask |= irq; 384 385 jz4740_mmc_write_irq_mask(host, host->irq_mask); 386 spin_unlock_irqrestore(&host->lock, flags); 387 } 388 389 static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host, 390 bool start_transfer) 391 { 392 uint16_t val = JZ_MMC_STRPCL_CLOCK_START; 393 394 if (start_transfer) 395 val |= JZ_MMC_STRPCL_START_OP; 396 397 writew(val, host->base + JZ_REG_MMC_STRPCL); 398 } 399 400 static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host) 401 { 402 uint32_t status; 403 unsigned int timeout = 1000; 404 405 writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL); 406 do { 407 status = readl(host->base + JZ_REG_MMC_STATUS); 408 } while (status & JZ_MMC_STATUS_CLK_EN && --timeout); 409 } 410 411 static void jz4740_mmc_reset(struct jz4740_mmc_host *host) 412 { 413 uint32_t status; 414 unsigned int timeout = 1000; 415 416 writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL); 417 udelay(10); 418 do { 419 status = readl(host->base + JZ_REG_MMC_STATUS); 420 } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout); 421 } 422 423 static void jz4740_mmc_request_done(struct jz4740_mmc_host *host) 424 { 425 struct mmc_request *req; 426 struct mmc_data *data; 427 428 req = host->req; 429 data = req->data; 430 host->req = NULL; 431 432 if (data && data->host_cookie == COOKIE_MAPPED) 433 jz4740_mmc_dma_unmap(host, data); 434 mmc_request_done(host->mmc, req); 435 } 436 437 static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host, 438 unsigned int irq) 439 { 440 unsigned int timeout = 0x800; 441 uint32_t status; 442 443 do { 444 status = jz4740_mmc_read_irq_reg(host); 445 } while (!(status & irq) && --timeout); 446 447 if (timeout == 0) { 448 set_bit(0, &host->waiting); 449 mod_timer(&host->timeout_timer, jiffies + 5*HZ); 450 jz4740_mmc_set_irq_enabled(host, irq, true); 451 return true; 452 } 453 454 return false; 455 } 456 457 static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host, 458 struct mmc_data *data) 459 { 460 int status; 461 462 status = readl(host->base + JZ_REG_MMC_STATUS); 463 if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) { 464 if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) { 465 host->req->cmd->error = -ETIMEDOUT; 466 data->error = -ETIMEDOUT; 467 } else { 468 host->req->cmd->error = -EIO; 469 data->error = -EIO; 470 } 471 } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) { 472 if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) { 473 host->req->cmd->error = -ETIMEDOUT; 474 data->error = -ETIMEDOUT; 475 } else { 476 host->req->cmd->error = -EIO; 477 data->error = -EIO; 478 } 479 } 480 } 481 482 static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host, 483 struct mmc_data *data) 484 { 485 struct sg_mapping_iter *miter = &host->miter; 486 void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO; 487 uint32_t *buf; 488 bool timeout; 489 size_t i, j; 490 491 while (sg_miter_next(miter)) { 492 buf = miter->addr; 493 i = miter->length / 4; 494 j = i / 8; 495 i = i & 0x7; 496 while (j) { 497 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); 498 if (unlikely(timeout)) 499 goto poll_timeout; 500 501 writel(buf[0], fifo_addr); 502 writel(buf[1], fifo_addr); 503 writel(buf[2], fifo_addr); 504 writel(buf[3], fifo_addr); 505 writel(buf[4], fifo_addr); 506 writel(buf[5], fifo_addr); 507 writel(buf[6], fifo_addr); 508 writel(buf[7], fifo_addr); 509 buf += 8; 510 --j; 511 } 512 if (unlikely(i)) { 513 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); 514 if (unlikely(timeout)) 515 goto poll_timeout; 516 517 while (i) { 518 writel(*buf, fifo_addr); 519 ++buf; 520 --i; 521 } 522 } 523 data->bytes_xfered += miter->length; 524 } 525 sg_miter_stop(miter); 526 527 return false; 528 529 poll_timeout: 530 miter->consumed = (void *)buf - miter->addr; 531 data->bytes_xfered += miter->consumed; 532 sg_miter_stop(miter); 533 534 return true; 535 } 536 537 static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host, 538 struct mmc_data *data) 539 { 540 struct sg_mapping_iter *miter = &host->miter; 541 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO; 542 uint32_t *buf; 543 uint32_t d; 544 uint32_t status; 545 size_t i, j; 546 unsigned int timeout; 547 548 while (sg_miter_next(miter)) { 549 buf = miter->addr; 550 i = miter->length; 551 j = i / 32; 552 i = i & 0x1f; 553 while (j) { 554 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); 555 if (unlikely(timeout)) 556 goto poll_timeout; 557 558 buf[0] = readl(fifo_addr); 559 buf[1] = readl(fifo_addr); 560 buf[2] = readl(fifo_addr); 561 buf[3] = readl(fifo_addr); 562 buf[4] = readl(fifo_addr); 563 buf[5] = readl(fifo_addr); 564 buf[6] = readl(fifo_addr); 565 buf[7] = readl(fifo_addr); 566 567 buf += 8; 568 --j; 569 } 570 571 if (unlikely(i)) { 572 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); 573 if (unlikely(timeout)) 574 goto poll_timeout; 575 576 while (i >= 4) { 577 *buf++ = readl(fifo_addr); 578 i -= 4; 579 } 580 if (unlikely(i > 0)) { 581 d = readl(fifo_addr); 582 memcpy(buf, &d, i); 583 } 584 } 585 data->bytes_xfered += miter->length; 586 587 /* This can go away once MIPS implements 588 * flush_kernel_dcache_page */ 589 flush_dcache_page(miter->page); 590 } 591 sg_miter_stop(miter); 592 593 /* For whatever reason there is sometime one word more in the fifo then 594 * requested */ 595 timeout = 1000; 596 status = readl(host->base + JZ_REG_MMC_STATUS); 597 while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) { 598 d = readl(fifo_addr); 599 status = readl(host->base + JZ_REG_MMC_STATUS); 600 } 601 602 return false; 603 604 poll_timeout: 605 miter->consumed = (void *)buf - miter->addr; 606 data->bytes_xfered += miter->consumed; 607 sg_miter_stop(miter); 608 609 return true; 610 } 611 612 static void jz4740_mmc_timeout(struct timer_list *t) 613 { 614 struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer); 615 616 if (!test_and_clear_bit(0, &host->waiting)) 617 return; 618 619 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false); 620 621 host->req->cmd->error = -ETIMEDOUT; 622 jz4740_mmc_request_done(host); 623 } 624 625 static void jz4740_mmc_read_response(struct jz4740_mmc_host *host, 626 struct mmc_command *cmd) 627 { 628 int i; 629 uint16_t tmp; 630 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO; 631 632 if (cmd->flags & MMC_RSP_136) { 633 tmp = readw(fifo_addr); 634 for (i = 0; i < 4; ++i) { 635 cmd->resp[i] = tmp << 24; 636 tmp = readw(fifo_addr); 637 cmd->resp[i] |= tmp << 8; 638 tmp = readw(fifo_addr); 639 cmd->resp[i] |= tmp >> 8; 640 } 641 } else { 642 cmd->resp[0] = readw(fifo_addr) << 24; 643 cmd->resp[0] |= readw(fifo_addr) << 8; 644 cmd->resp[0] |= readw(fifo_addr) & 0xff; 645 } 646 } 647 648 static void jz4740_mmc_send_command(struct jz4740_mmc_host *host, 649 struct mmc_command *cmd) 650 { 651 uint32_t cmdat = host->cmdat; 652 653 host->cmdat &= ~JZ_MMC_CMDAT_INIT; 654 jz4740_mmc_clock_disable(host); 655 656 host->cmd = cmd; 657 658 if (cmd->flags & MMC_RSP_BUSY) 659 cmdat |= JZ_MMC_CMDAT_BUSY; 660 661 switch (mmc_resp_type(cmd)) { 662 case MMC_RSP_R1B: 663 case MMC_RSP_R1: 664 cmdat |= JZ_MMC_CMDAT_RSP_R1; 665 break; 666 case MMC_RSP_R2: 667 cmdat |= JZ_MMC_CMDAT_RSP_R2; 668 break; 669 case MMC_RSP_R3: 670 cmdat |= JZ_MMC_CMDAT_RSP_R3; 671 break; 672 default: 673 break; 674 } 675 676 if (cmd->data) { 677 cmdat |= JZ_MMC_CMDAT_DATA_EN; 678 if (cmd->data->flags & MMC_DATA_WRITE) 679 cmdat |= JZ_MMC_CMDAT_WRITE; 680 if (host->use_dma) { 681 /* 682 * The 4780's MMC controller has integrated DMA ability 683 * in addition to being able to use the external DMA 684 * controller. It moves DMA control bits to a separate 685 * register. The DMA_SEL bit chooses the external 686 * controller over the integrated one. Earlier SoCs 687 * can only use the external controller, and have a 688 * single DMA enable bit in CMDAT. 689 */ 690 if (host->version >= JZ_MMC_JZ4780) { 691 writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL, 692 host->base + JZ_REG_MMC_DMAC); 693 } else { 694 cmdat |= JZ_MMC_CMDAT_DMA_EN; 695 } 696 } else if (host->version >= JZ_MMC_JZ4780) { 697 writel(0, host->base + JZ_REG_MMC_DMAC); 698 } 699 700 writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN); 701 writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB); 702 } 703 704 writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD); 705 writel(cmd->arg, host->base + JZ_REG_MMC_ARG); 706 writel(cmdat, host->base + JZ_REG_MMC_CMDAT); 707 708 jz4740_mmc_clock_enable(host, 1); 709 } 710 711 static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host) 712 { 713 struct mmc_command *cmd = host->req->cmd; 714 struct mmc_data *data = cmd->data; 715 int direction; 716 717 if (data->flags & MMC_DATA_READ) 718 direction = SG_MITER_TO_SG; 719 else 720 direction = SG_MITER_FROM_SG; 721 722 sg_miter_start(&host->miter, data->sg, data->sg_len, direction); 723 } 724 725 726 static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) 727 { 728 struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid; 729 struct mmc_command *cmd = host->req->cmd; 730 struct mmc_request *req = host->req; 731 struct mmc_data *data = cmd->data; 732 bool timeout = false; 733 734 if (cmd->error) 735 host->state = JZ4740_MMC_STATE_DONE; 736 737 switch (host->state) { 738 case JZ4740_MMC_STATE_READ_RESPONSE: 739 if (cmd->flags & MMC_RSP_PRESENT) 740 jz4740_mmc_read_response(host, cmd); 741 742 if (!data) 743 break; 744 745 jz_mmc_prepare_data_transfer(host); 746 747 case JZ4740_MMC_STATE_TRANSFER_DATA: 748 if (host->use_dma) { 749 /* Use DMA if enabled. 750 * Data transfer direction is defined later by 751 * relying on data flags in 752 * jz4740_mmc_prepare_dma_data() and 753 * jz4740_mmc_start_dma_transfer(). 754 */ 755 timeout = jz4740_mmc_start_dma_transfer(host, data); 756 data->bytes_xfered = data->blocks * data->blksz; 757 } else if (data->flags & MMC_DATA_READ) 758 /* Use PIO if DMA is not enabled. 759 * Data transfer direction was defined before 760 * by relying on data flags in 761 * jz_mmc_prepare_data_transfer(). 762 */ 763 timeout = jz4740_mmc_read_data(host, data); 764 else 765 timeout = jz4740_mmc_write_data(host, data); 766 767 if (unlikely(timeout)) { 768 host->state = JZ4740_MMC_STATE_TRANSFER_DATA; 769 break; 770 } 771 772 jz4740_mmc_transfer_check_state(host, data); 773 774 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE); 775 if (unlikely(timeout)) { 776 host->state = JZ4740_MMC_STATE_SEND_STOP; 777 break; 778 } 779 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE); 780 781 case JZ4740_MMC_STATE_SEND_STOP: 782 if (!req->stop) 783 break; 784 785 jz4740_mmc_send_command(host, req->stop); 786 787 if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) { 788 timeout = jz4740_mmc_poll_irq(host, 789 JZ_MMC_IRQ_PRG_DONE); 790 if (timeout) { 791 host->state = JZ4740_MMC_STATE_DONE; 792 break; 793 } 794 } 795 case JZ4740_MMC_STATE_DONE: 796 break; 797 } 798 799 if (!timeout) 800 jz4740_mmc_request_done(host); 801 802 return IRQ_HANDLED; 803 } 804 805 static irqreturn_t jz_mmc_irq(int irq, void *devid) 806 { 807 struct jz4740_mmc_host *host = devid; 808 struct mmc_command *cmd = host->cmd; 809 uint32_t irq_reg, status, tmp; 810 811 status = readl(host->base + JZ_REG_MMC_STATUS); 812 irq_reg = jz4740_mmc_read_irq_reg(host); 813 814 tmp = irq_reg; 815 irq_reg &= ~host->irq_mask; 816 817 tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ | 818 JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE); 819 820 if (tmp != irq_reg) 821 jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg); 822 823 if (irq_reg & JZ_MMC_IRQ_SDIO) { 824 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO); 825 mmc_signal_sdio_irq(host->mmc); 826 irq_reg &= ~JZ_MMC_IRQ_SDIO; 827 } 828 829 if (host->req && cmd && irq_reg) { 830 if (test_and_clear_bit(0, &host->waiting)) { 831 del_timer(&host->timeout_timer); 832 833 if (status & JZ_MMC_STATUS_TIMEOUT_RES) { 834 cmd->error = -ETIMEDOUT; 835 } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) { 836 cmd->error = -EIO; 837 } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR | 838 JZ_MMC_STATUS_CRC_WRITE_ERROR)) { 839 if (cmd->data) 840 cmd->data->error = -EIO; 841 cmd->error = -EIO; 842 } 843 844 jz4740_mmc_set_irq_enabled(host, irq_reg, false); 845 jz4740_mmc_write_irq_reg(host, irq_reg); 846 847 return IRQ_WAKE_THREAD; 848 } 849 } 850 851 return IRQ_HANDLED; 852 } 853 854 static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate) 855 { 856 int div = 0; 857 int real_rate; 858 859 jz4740_mmc_clock_disable(host); 860 clk_set_rate(host->clk, host->mmc->f_max); 861 862 real_rate = clk_get_rate(host->clk); 863 864 while (real_rate > rate && div < 7) { 865 ++div; 866 real_rate >>= 1; 867 } 868 869 writew(div, host->base + JZ_REG_MMC_CLKRT); 870 return real_rate; 871 } 872 873 static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req) 874 { 875 struct jz4740_mmc_host *host = mmc_priv(mmc); 876 877 host->req = req; 878 879 jz4740_mmc_write_irq_reg(host, ~0); 880 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true); 881 882 host->state = JZ4740_MMC_STATE_READ_RESPONSE; 883 set_bit(0, &host->waiting); 884 mod_timer(&host->timeout_timer, jiffies + 5*HZ); 885 jz4740_mmc_send_command(host, req->cmd); 886 } 887 888 static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 889 { 890 struct jz4740_mmc_host *host = mmc_priv(mmc); 891 if (ios->clock) 892 jz4740_mmc_set_clock_rate(host, ios->clock); 893 894 switch (ios->power_mode) { 895 case MMC_POWER_UP: 896 jz4740_mmc_reset(host); 897 if (host->power) 898 gpiod_set_value(host->power, 1); 899 host->cmdat |= JZ_MMC_CMDAT_INIT; 900 clk_prepare_enable(host->clk); 901 break; 902 case MMC_POWER_ON: 903 break; 904 default: 905 if (host->power) 906 gpiod_set_value(host->power, 0); 907 clk_disable_unprepare(host->clk); 908 break; 909 } 910 911 switch (ios->bus_width) { 912 case MMC_BUS_WIDTH_1: 913 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT; 914 break; 915 case MMC_BUS_WIDTH_4: 916 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT; 917 break; 918 default: 919 break; 920 } 921 } 922 923 static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 924 { 925 struct jz4740_mmc_host *host = mmc_priv(mmc); 926 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable); 927 } 928 929 static const struct mmc_host_ops jz4740_mmc_ops = { 930 .request = jz4740_mmc_request, 931 .pre_req = jz4740_mmc_pre_request, 932 .post_req = jz4740_mmc_post_request, 933 .set_ios = jz4740_mmc_set_ios, 934 .get_ro = mmc_gpio_get_ro, 935 .get_cd = mmc_gpio_get_cd, 936 .enable_sdio_irq = jz4740_mmc_enable_sdio_irq, 937 }; 938 939 static int jz4740_mmc_request_gpios(struct jz4740_mmc_host *host, 940 struct mmc_host *mmc, 941 struct platform_device *pdev) 942 { 943 struct jz4740_mmc_platform_data *pdata = dev_get_platdata(&pdev->dev); 944 int ret = 0; 945 946 if (!pdata) 947 return 0; 948 949 if (!pdata->card_detect_active_low) 950 mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; 951 if (!pdata->read_only_active_low) 952 mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; 953 954 /* 955 * Get optional card detect and write protect GPIOs, 956 * only back out on probe deferral. 957 */ 958 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); 959 if (ret == -EPROBE_DEFER) 960 return ret; 961 962 ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL); 963 if (ret == -EPROBE_DEFER) 964 return ret; 965 966 host->power = devm_gpiod_get_optional(&pdev->dev, "power", 967 GPIOD_OUT_HIGH); 968 return PTR_ERR_OR_ZERO(host->power); 969 } 970 971 static const struct of_device_id jz4740_mmc_of_match[] = { 972 { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 }, 973 { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B }, 974 { .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 }, 975 {}, 976 }; 977 MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match); 978 979 static int jz4740_mmc_probe(struct platform_device* pdev) 980 { 981 int ret; 982 struct mmc_host *mmc; 983 struct jz4740_mmc_host *host; 984 const struct of_device_id *match; 985 struct jz4740_mmc_platform_data *pdata; 986 987 pdata = dev_get_platdata(&pdev->dev); 988 989 mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev); 990 if (!mmc) { 991 dev_err(&pdev->dev, "Failed to alloc mmc host structure\n"); 992 return -ENOMEM; 993 } 994 995 host = mmc_priv(mmc); 996 host->pdata = pdata; 997 998 match = of_match_device(jz4740_mmc_of_match, &pdev->dev); 999 if (match) { 1000 host->version = (enum jz4740_mmc_version)match->data; 1001 ret = mmc_of_parse(mmc); 1002 if (ret) { 1003 if (ret != -EPROBE_DEFER) 1004 dev_err(&pdev->dev, 1005 "could not parse of data: %d\n", ret); 1006 goto err_free_host; 1007 } 1008 } else { 1009 /* JZ4740 should be the only one using legacy probe */ 1010 host->version = JZ_MMC_JZ4740; 1011 mmc->caps |= MMC_CAP_SDIO_IRQ; 1012 if (!(pdata && pdata->data_1bit)) 1013 mmc->caps |= MMC_CAP_4_BIT_DATA; 1014 ret = jz4740_mmc_request_gpios(host, mmc, pdev); 1015 if (ret) 1016 goto err_free_host; 1017 } 1018 1019 host->irq = platform_get_irq(pdev, 0); 1020 if (host->irq < 0) { 1021 ret = host->irq; 1022 dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret); 1023 goto err_free_host; 1024 } 1025 1026 host->clk = devm_clk_get(&pdev->dev, "mmc"); 1027 if (IS_ERR(host->clk)) { 1028 ret = PTR_ERR(host->clk); 1029 dev_err(&pdev->dev, "Failed to get mmc clock\n"); 1030 goto err_free_host; 1031 } 1032 1033 host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1034 host->base = devm_ioremap_resource(&pdev->dev, host->mem_res); 1035 if (IS_ERR(host->base)) { 1036 ret = PTR_ERR(host->base); 1037 dev_err(&pdev->dev, "Failed to ioremap base memory\n"); 1038 goto err_free_host; 1039 } 1040 1041 mmc->ops = &jz4740_mmc_ops; 1042 if (!mmc->f_max) 1043 mmc->f_max = JZ_MMC_CLK_RATE; 1044 mmc->f_min = mmc->f_max / 128; 1045 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1046 1047 mmc->max_blk_size = (1 << 10) - 1; 1048 mmc->max_blk_count = (1 << 15) - 1; 1049 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1050 1051 mmc->max_segs = 128; 1052 mmc->max_seg_size = mmc->max_req_size; 1053 1054 host->mmc = mmc; 1055 host->pdev = pdev; 1056 spin_lock_init(&host->lock); 1057 host->irq_mask = ~0; 1058 1059 jz4740_mmc_reset(host); 1060 1061 ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0, 1062 dev_name(&pdev->dev), host); 1063 if (ret) { 1064 dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); 1065 goto err_free_host; 1066 } 1067 1068 jz4740_mmc_clock_disable(host); 1069 timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0); 1070 1071 ret = jz4740_mmc_acquire_dma_channels(host); 1072 if (ret == -EPROBE_DEFER) 1073 goto err_free_irq; 1074 host->use_dma = !ret; 1075 1076 platform_set_drvdata(pdev, host); 1077 ret = mmc_add_host(mmc); 1078 1079 if (ret) { 1080 dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret); 1081 goto err_release_dma; 1082 } 1083 dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n"); 1084 1085 dev_info(&pdev->dev, "Using %s, %d-bit mode\n", 1086 host->use_dma ? "DMA" : "PIO", 1087 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); 1088 1089 return 0; 1090 1091 err_release_dma: 1092 if (host->use_dma) 1093 jz4740_mmc_release_dma_channels(host); 1094 err_free_irq: 1095 free_irq(host->irq, host); 1096 err_free_host: 1097 mmc_free_host(mmc); 1098 1099 return ret; 1100 } 1101 1102 static int jz4740_mmc_remove(struct platform_device *pdev) 1103 { 1104 struct jz4740_mmc_host *host = platform_get_drvdata(pdev); 1105 1106 del_timer_sync(&host->timeout_timer); 1107 jz4740_mmc_set_irq_enabled(host, 0xff, false); 1108 jz4740_mmc_reset(host); 1109 1110 mmc_remove_host(host->mmc); 1111 1112 free_irq(host->irq, host); 1113 1114 if (host->use_dma) 1115 jz4740_mmc_release_dma_channels(host); 1116 1117 mmc_free_host(host->mmc); 1118 1119 return 0; 1120 } 1121 1122 #ifdef CONFIG_PM_SLEEP 1123 1124 static int jz4740_mmc_suspend(struct device *dev) 1125 { 1126 return pinctrl_pm_select_sleep_state(dev); 1127 } 1128 1129 static int jz4740_mmc_resume(struct device *dev) 1130 { 1131 return pinctrl_pm_select_default_state(dev); 1132 } 1133 1134 static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, 1135 jz4740_mmc_resume); 1136 #define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops) 1137 #else 1138 #define JZ4740_MMC_PM_OPS NULL 1139 #endif 1140 1141 static struct platform_driver jz4740_mmc_driver = { 1142 .probe = jz4740_mmc_probe, 1143 .remove = jz4740_mmc_remove, 1144 .driver = { 1145 .name = "jz4740-mmc", 1146 .of_match_table = of_match_ptr(jz4740_mmc_of_match), 1147 .pm = JZ4740_MMC_PM_OPS, 1148 }, 1149 }; 1150 1151 module_platform_driver(jz4740_mmc_driver); 1152 1153 MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver"); 1154 MODULE_LICENSE("GPL"); 1155 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 1156