1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> 4 * Copyright (C) 2013, Imagination Technologies 5 * 6 * JZ4740 SD/MMC controller driver 7 */ 8 9 #include <linux/bitops.h> 10 #include <linux/clk.h> 11 #include <linux/delay.h> 12 #include <linux/dmaengine.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/err.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/irq.h> 18 #include <linux/mmc/host.h> 19 #include <linux/mmc/slot-gpio.h> 20 #include <linux/module.h> 21 #include <linux/of_device.h> 22 #include <linux/pinctrl/consumer.h> 23 #include <linux/platform_device.h> 24 #include <linux/scatterlist.h> 25 26 #include <asm/cacheflush.h> 27 28 #define JZ_REG_MMC_STRPCL 0x00 29 #define JZ_REG_MMC_STATUS 0x04 30 #define JZ_REG_MMC_CLKRT 0x08 31 #define JZ_REG_MMC_CMDAT 0x0C 32 #define JZ_REG_MMC_RESTO 0x10 33 #define JZ_REG_MMC_RDTO 0x14 34 #define JZ_REG_MMC_BLKLEN 0x18 35 #define JZ_REG_MMC_NOB 0x1C 36 #define JZ_REG_MMC_SNOB 0x20 37 #define JZ_REG_MMC_IMASK 0x24 38 #define JZ_REG_MMC_IREG 0x28 39 #define JZ_REG_MMC_CMD 0x2C 40 #define JZ_REG_MMC_ARG 0x30 41 #define JZ_REG_MMC_RESP_FIFO 0x34 42 #define JZ_REG_MMC_RXFIFO 0x38 43 #define JZ_REG_MMC_TXFIFO 0x3C 44 #define JZ_REG_MMC_DMAC 0x44 45 46 #define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7) 47 #define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6) 48 #define JZ_MMC_STRPCL_START_READWAIT BIT(5) 49 #define JZ_MMC_STRPCL_STOP_READWAIT BIT(4) 50 #define JZ_MMC_STRPCL_RESET BIT(3) 51 #define JZ_MMC_STRPCL_START_OP BIT(2) 52 #define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0)) 53 #define JZ_MMC_STRPCL_CLOCK_STOP BIT(0) 54 #define JZ_MMC_STRPCL_CLOCK_START BIT(1) 55 56 57 #define JZ_MMC_STATUS_IS_RESETTING BIT(15) 58 #define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14) 59 #define JZ_MMC_STATUS_PRG_DONE BIT(13) 60 #define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12) 61 #define JZ_MMC_STATUS_END_CMD_RES BIT(11) 62 #define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10) 63 #define JZ_MMC_STATUS_IS_READWAIT BIT(9) 64 #define JZ_MMC_STATUS_CLK_EN BIT(8) 65 #define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7) 66 #define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6) 67 #define JZ_MMC_STATUS_CRC_RES_ERR BIT(5) 68 #define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4) 69 #define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3) 70 #define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2) 71 #define JZ_MMC_STATUS_TIMEOUT_RES BIT(1) 72 #define JZ_MMC_STATUS_TIMEOUT_READ BIT(0) 73 74 #define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0)) 75 #define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2)) 76 77 78 #define JZ_MMC_CMDAT_IO_ABORT BIT(11) 79 #define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10) 80 #define JZ_MMC_CMDAT_BUS_WIDTH_8BIT (BIT(10) | BIT(9)) 81 #define JZ_MMC_CMDAT_BUS_WIDTH_MASK (BIT(10) | BIT(9)) 82 #define JZ_MMC_CMDAT_DMA_EN BIT(8) 83 #define JZ_MMC_CMDAT_INIT BIT(7) 84 #define JZ_MMC_CMDAT_BUSY BIT(6) 85 #define JZ_MMC_CMDAT_STREAM BIT(5) 86 #define JZ_MMC_CMDAT_WRITE BIT(4) 87 #define JZ_MMC_CMDAT_DATA_EN BIT(3) 88 #define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0)) 89 #define JZ_MMC_CMDAT_RSP_R1 1 90 #define JZ_MMC_CMDAT_RSP_R2 2 91 #define JZ_MMC_CMDAT_RSP_R3 3 92 93 #define JZ_MMC_IRQ_SDIO BIT(7) 94 #define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6) 95 #define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5) 96 #define JZ_MMC_IRQ_END_CMD_RES BIT(2) 97 #define JZ_MMC_IRQ_PRG_DONE BIT(1) 98 #define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0) 99 100 #define JZ_MMC_DMAC_DMA_SEL BIT(1) 101 #define JZ_MMC_DMAC_DMA_EN BIT(0) 102 103 #define JZ_MMC_CLK_RATE 24000000 104 105 enum jz4740_mmc_version { 106 JZ_MMC_JZ4740, 107 JZ_MMC_JZ4725B, 108 JZ_MMC_JZ4760, 109 JZ_MMC_JZ4780, 110 JZ_MMC_X1000, 111 }; 112 113 enum jz4740_mmc_state { 114 JZ4740_MMC_STATE_READ_RESPONSE, 115 JZ4740_MMC_STATE_TRANSFER_DATA, 116 JZ4740_MMC_STATE_SEND_STOP, 117 JZ4740_MMC_STATE_DONE, 118 }; 119 120 /* 121 * The MMC core allows to prepare a mmc_request while another mmc_request 122 * is in-flight. This is used via the pre_req/post_req hooks. 123 * This driver uses the pre_req/post_req hooks to map/unmap the mmc_request. 124 * Following what other drivers do (sdhci, dw_mmc) we use the following cookie 125 * flags to keep track of the mmc_request mapping state. 126 * 127 * COOKIE_UNMAPPED: the request is not mapped. 128 * COOKIE_PREMAPPED: the request was mapped in pre_req, 129 * and should be unmapped in post_req. 130 * COOKIE_MAPPED: the request was mapped in the irq handler, 131 * and should be unmapped before mmc_request_done is called.. 132 */ 133 enum jz4780_cookie { 134 COOKIE_UNMAPPED = 0, 135 COOKIE_PREMAPPED, 136 COOKIE_MAPPED, 137 }; 138 139 struct jz4740_mmc_host { 140 struct mmc_host *mmc; 141 struct platform_device *pdev; 142 struct clk *clk; 143 144 enum jz4740_mmc_version version; 145 146 int irq; 147 int card_detect_irq; 148 149 void __iomem *base; 150 struct resource *mem_res; 151 struct mmc_request *req; 152 struct mmc_command *cmd; 153 154 unsigned long waiting; 155 156 uint32_t cmdat; 157 158 uint32_t irq_mask; 159 160 spinlock_t lock; 161 162 struct timer_list timeout_timer; 163 struct sg_mapping_iter miter; 164 enum jz4740_mmc_state state; 165 166 /* DMA support */ 167 struct dma_chan *dma_rx; 168 struct dma_chan *dma_tx; 169 bool use_dma; 170 171 /* The DMA trigger level is 8 words, that is to say, the DMA read 172 * trigger is when data words in MSC_RXFIFO is >= 8 and the DMA write 173 * trigger is when data words in MSC_TXFIFO is < 8. 174 */ 175 #define JZ4740_MMC_FIFO_HALF_SIZE 8 176 }; 177 178 static void jz4740_mmc_write_irq_mask(struct jz4740_mmc_host *host, 179 uint32_t val) 180 { 181 if (host->version >= JZ_MMC_JZ4725B) 182 return writel(val, host->base + JZ_REG_MMC_IMASK); 183 else 184 return writew(val, host->base + JZ_REG_MMC_IMASK); 185 } 186 187 static void jz4740_mmc_write_irq_reg(struct jz4740_mmc_host *host, 188 uint32_t val) 189 { 190 if (host->version >= JZ_MMC_JZ4780) 191 writel(val, host->base + JZ_REG_MMC_IREG); 192 else 193 writew(val, host->base + JZ_REG_MMC_IREG); 194 } 195 196 static uint32_t jz4740_mmc_read_irq_reg(struct jz4740_mmc_host *host) 197 { 198 if (host->version >= JZ_MMC_JZ4780) 199 return readl(host->base + JZ_REG_MMC_IREG); 200 else 201 return readw(host->base + JZ_REG_MMC_IREG); 202 } 203 204 /*----------------------------------------------------------------------------*/ 205 /* DMA infrastructure */ 206 207 static void jz4740_mmc_release_dma_channels(struct jz4740_mmc_host *host) 208 { 209 if (!host->use_dma) 210 return; 211 212 dma_release_channel(host->dma_tx); 213 dma_release_channel(host->dma_rx); 214 } 215 216 static int jz4740_mmc_acquire_dma_channels(struct jz4740_mmc_host *host) 217 { 218 host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); 219 if (IS_ERR(host->dma_tx)) { 220 dev_err(mmc_dev(host->mmc), "Failed to get dma_tx channel\n"); 221 return PTR_ERR(host->dma_tx); 222 } 223 224 host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); 225 if (IS_ERR(host->dma_rx)) { 226 dev_err(mmc_dev(host->mmc), "Failed to get dma_rx channel\n"); 227 dma_release_channel(host->dma_tx); 228 return PTR_ERR(host->dma_rx); 229 } 230 231 return 0; 232 } 233 234 static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host, 235 struct mmc_data *data) 236 { 237 return (data->flags & MMC_DATA_READ) ? host->dma_rx : host->dma_tx; 238 } 239 240 static void jz4740_mmc_dma_unmap(struct jz4740_mmc_host *host, 241 struct mmc_data *data) 242 { 243 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 244 enum dma_data_direction dir = mmc_get_dma_dir(data); 245 246 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 247 data->host_cookie = COOKIE_UNMAPPED; 248 } 249 250 /* Prepares DMA data for current or next transfer. 251 * A request can be in-flight when this is called. 252 */ 253 static int jz4740_mmc_prepare_dma_data(struct jz4740_mmc_host *host, 254 struct mmc_data *data, 255 int cookie) 256 { 257 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 258 enum dma_data_direction dir = mmc_get_dma_dir(data); 259 int sg_count; 260 261 if (data->host_cookie == COOKIE_PREMAPPED) 262 return data->sg_count; 263 264 sg_count = dma_map_sg(chan->device->dev, 265 data->sg, 266 data->sg_len, 267 dir); 268 269 if (sg_count <= 0) { 270 dev_err(mmc_dev(host->mmc), 271 "Failed to map scatterlist for DMA operation\n"); 272 return -EINVAL; 273 } 274 275 data->sg_count = sg_count; 276 data->host_cookie = cookie; 277 278 return data->sg_count; 279 } 280 281 static int jz4740_mmc_start_dma_transfer(struct jz4740_mmc_host *host, 282 struct mmc_data *data) 283 { 284 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 285 struct dma_async_tx_descriptor *desc; 286 struct dma_slave_config conf = { 287 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 288 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 289 .src_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 290 .dst_maxburst = JZ4740_MMC_FIFO_HALF_SIZE, 291 }; 292 int sg_count; 293 294 if (data->flags & MMC_DATA_WRITE) { 295 conf.direction = DMA_MEM_TO_DEV; 296 conf.dst_addr = host->mem_res->start + JZ_REG_MMC_TXFIFO; 297 } else { 298 conf.direction = DMA_DEV_TO_MEM; 299 conf.src_addr = host->mem_res->start + JZ_REG_MMC_RXFIFO; 300 } 301 302 sg_count = jz4740_mmc_prepare_dma_data(host, data, COOKIE_MAPPED); 303 if (sg_count < 0) 304 return sg_count; 305 306 dmaengine_slave_config(chan, &conf); 307 desc = dmaengine_prep_slave_sg(chan, data->sg, sg_count, 308 conf.direction, 309 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 310 if (!desc) { 311 dev_err(mmc_dev(host->mmc), 312 "Failed to allocate DMA %s descriptor", 313 conf.direction == DMA_MEM_TO_DEV ? "TX" : "RX"); 314 goto dma_unmap; 315 } 316 317 dmaengine_submit(desc); 318 dma_async_issue_pending(chan); 319 320 return 0; 321 322 dma_unmap: 323 if (data->host_cookie == COOKIE_MAPPED) 324 jz4740_mmc_dma_unmap(host, data); 325 return -ENOMEM; 326 } 327 328 static void jz4740_mmc_pre_request(struct mmc_host *mmc, 329 struct mmc_request *mrq) 330 { 331 struct jz4740_mmc_host *host = mmc_priv(mmc); 332 struct mmc_data *data = mrq->data; 333 334 if (!host->use_dma) 335 return; 336 337 data->host_cookie = COOKIE_UNMAPPED; 338 if (jz4740_mmc_prepare_dma_data(host, data, COOKIE_PREMAPPED) < 0) 339 data->host_cookie = COOKIE_UNMAPPED; 340 } 341 342 static void jz4740_mmc_post_request(struct mmc_host *mmc, 343 struct mmc_request *mrq, 344 int err) 345 { 346 struct jz4740_mmc_host *host = mmc_priv(mmc); 347 struct mmc_data *data = mrq->data; 348 349 if (data && data->host_cookie != COOKIE_UNMAPPED) 350 jz4740_mmc_dma_unmap(host, data); 351 352 if (err) { 353 struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data); 354 355 dmaengine_terminate_all(chan); 356 } 357 } 358 359 /*----------------------------------------------------------------------------*/ 360 361 static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, 362 unsigned int irq, bool enabled) 363 { 364 unsigned long flags; 365 366 spin_lock_irqsave(&host->lock, flags); 367 if (enabled) 368 host->irq_mask &= ~irq; 369 else 370 host->irq_mask |= irq; 371 372 jz4740_mmc_write_irq_mask(host, host->irq_mask); 373 spin_unlock_irqrestore(&host->lock, flags); 374 } 375 376 static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host, 377 bool start_transfer) 378 { 379 uint16_t val = JZ_MMC_STRPCL_CLOCK_START; 380 381 if (start_transfer) 382 val |= JZ_MMC_STRPCL_START_OP; 383 384 writew(val, host->base + JZ_REG_MMC_STRPCL); 385 } 386 387 static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host) 388 { 389 uint32_t status; 390 unsigned int timeout = 1000; 391 392 writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL); 393 do { 394 status = readl(host->base + JZ_REG_MMC_STATUS); 395 } while (status & JZ_MMC_STATUS_CLK_EN && --timeout); 396 } 397 398 static void jz4740_mmc_reset(struct jz4740_mmc_host *host) 399 { 400 uint32_t status; 401 unsigned int timeout = 1000; 402 403 writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL); 404 udelay(10); 405 do { 406 status = readl(host->base + JZ_REG_MMC_STATUS); 407 } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout); 408 } 409 410 static void jz4740_mmc_request_done(struct jz4740_mmc_host *host) 411 { 412 struct mmc_request *req; 413 struct mmc_data *data; 414 415 req = host->req; 416 data = req->data; 417 host->req = NULL; 418 419 if (data && data->host_cookie == COOKIE_MAPPED) 420 jz4740_mmc_dma_unmap(host, data); 421 mmc_request_done(host->mmc, req); 422 } 423 424 static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host, 425 unsigned int irq) 426 { 427 unsigned int timeout = 0x800; 428 uint32_t status; 429 430 do { 431 status = jz4740_mmc_read_irq_reg(host); 432 } while (!(status & irq) && --timeout); 433 434 if (timeout == 0) { 435 set_bit(0, &host->waiting); 436 mod_timer(&host->timeout_timer, jiffies + 5*HZ); 437 jz4740_mmc_set_irq_enabled(host, irq, true); 438 return true; 439 } 440 441 return false; 442 } 443 444 static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host, 445 struct mmc_data *data) 446 { 447 int status; 448 449 status = readl(host->base + JZ_REG_MMC_STATUS); 450 if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) { 451 if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) { 452 host->req->cmd->error = -ETIMEDOUT; 453 data->error = -ETIMEDOUT; 454 } else { 455 host->req->cmd->error = -EIO; 456 data->error = -EIO; 457 } 458 } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) { 459 if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) { 460 host->req->cmd->error = -ETIMEDOUT; 461 data->error = -ETIMEDOUT; 462 } else { 463 host->req->cmd->error = -EIO; 464 data->error = -EIO; 465 } 466 } 467 } 468 469 static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host, 470 struct mmc_data *data) 471 { 472 struct sg_mapping_iter *miter = &host->miter; 473 void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO; 474 uint32_t *buf; 475 bool timeout; 476 size_t i, j; 477 478 while (sg_miter_next(miter)) { 479 buf = miter->addr; 480 i = miter->length / 4; 481 j = i / 8; 482 i = i & 0x7; 483 while (j) { 484 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); 485 if (unlikely(timeout)) 486 goto poll_timeout; 487 488 writel(buf[0], fifo_addr); 489 writel(buf[1], fifo_addr); 490 writel(buf[2], fifo_addr); 491 writel(buf[3], fifo_addr); 492 writel(buf[4], fifo_addr); 493 writel(buf[5], fifo_addr); 494 writel(buf[6], fifo_addr); 495 writel(buf[7], fifo_addr); 496 buf += 8; 497 --j; 498 } 499 if (unlikely(i)) { 500 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); 501 if (unlikely(timeout)) 502 goto poll_timeout; 503 504 while (i) { 505 writel(*buf, fifo_addr); 506 ++buf; 507 --i; 508 } 509 } 510 data->bytes_xfered += miter->length; 511 } 512 sg_miter_stop(miter); 513 514 return false; 515 516 poll_timeout: 517 miter->consumed = (void *)buf - miter->addr; 518 data->bytes_xfered += miter->consumed; 519 sg_miter_stop(miter); 520 521 return true; 522 } 523 524 static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host, 525 struct mmc_data *data) 526 { 527 struct sg_mapping_iter *miter = &host->miter; 528 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO; 529 uint32_t *buf; 530 uint32_t d; 531 uint32_t status; 532 size_t i, j; 533 unsigned int timeout; 534 535 while (sg_miter_next(miter)) { 536 buf = miter->addr; 537 i = miter->length; 538 j = i / 32; 539 i = i & 0x1f; 540 while (j) { 541 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); 542 if (unlikely(timeout)) 543 goto poll_timeout; 544 545 buf[0] = readl(fifo_addr); 546 buf[1] = readl(fifo_addr); 547 buf[2] = readl(fifo_addr); 548 buf[3] = readl(fifo_addr); 549 buf[4] = readl(fifo_addr); 550 buf[5] = readl(fifo_addr); 551 buf[6] = readl(fifo_addr); 552 buf[7] = readl(fifo_addr); 553 554 buf += 8; 555 --j; 556 } 557 558 if (unlikely(i)) { 559 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); 560 if (unlikely(timeout)) 561 goto poll_timeout; 562 563 while (i >= 4) { 564 *buf++ = readl(fifo_addr); 565 i -= 4; 566 } 567 if (unlikely(i > 0)) { 568 d = readl(fifo_addr); 569 memcpy(buf, &d, i); 570 } 571 } 572 data->bytes_xfered += miter->length; 573 574 /* This can go away once MIPS implements 575 * flush_kernel_dcache_page */ 576 flush_dcache_page(miter->page); 577 } 578 sg_miter_stop(miter); 579 580 /* For whatever reason there is sometime one word more in the fifo then 581 * requested */ 582 timeout = 1000; 583 status = readl(host->base + JZ_REG_MMC_STATUS); 584 while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) { 585 d = readl(fifo_addr); 586 status = readl(host->base + JZ_REG_MMC_STATUS); 587 } 588 589 return false; 590 591 poll_timeout: 592 miter->consumed = (void *)buf - miter->addr; 593 data->bytes_xfered += miter->consumed; 594 sg_miter_stop(miter); 595 596 return true; 597 } 598 599 static void jz4740_mmc_timeout(struct timer_list *t) 600 { 601 struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer); 602 603 if (!test_and_clear_bit(0, &host->waiting)) 604 return; 605 606 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false); 607 608 host->req->cmd->error = -ETIMEDOUT; 609 jz4740_mmc_request_done(host); 610 } 611 612 static void jz4740_mmc_read_response(struct jz4740_mmc_host *host, 613 struct mmc_command *cmd) 614 { 615 int i; 616 uint16_t tmp; 617 void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO; 618 619 if (cmd->flags & MMC_RSP_136) { 620 tmp = readw(fifo_addr); 621 for (i = 0; i < 4; ++i) { 622 cmd->resp[i] = tmp << 24; 623 tmp = readw(fifo_addr); 624 cmd->resp[i] |= tmp << 8; 625 tmp = readw(fifo_addr); 626 cmd->resp[i] |= tmp >> 8; 627 } 628 } else { 629 cmd->resp[0] = readw(fifo_addr) << 24; 630 cmd->resp[0] |= readw(fifo_addr) << 8; 631 cmd->resp[0] |= readw(fifo_addr) & 0xff; 632 } 633 } 634 635 static void jz4740_mmc_send_command(struct jz4740_mmc_host *host, 636 struct mmc_command *cmd) 637 { 638 uint32_t cmdat = host->cmdat; 639 640 host->cmdat &= ~JZ_MMC_CMDAT_INIT; 641 jz4740_mmc_clock_disable(host); 642 643 host->cmd = cmd; 644 645 if (cmd->flags & MMC_RSP_BUSY) 646 cmdat |= JZ_MMC_CMDAT_BUSY; 647 648 switch (mmc_resp_type(cmd)) { 649 case MMC_RSP_R1B: 650 case MMC_RSP_R1: 651 cmdat |= JZ_MMC_CMDAT_RSP_R1; 652 break; 653 case MMC_RSP_R2: 654 cmdat |= JZ_MMC_CMDAT_RSP_R2; 655 break; 656 case MMC_RSP_R3: 657 cmdat |= JZ_MMC_CMDAT_RSP_R3; 658 break; 659 default: 660 break; 661 } 662 663 if (cmd->data) { 664 cmdat |= JZ_MMC_CMDAT_DATA_EN; 665 if (cmd->data->flags & MMC_DATA_WRITE) 666 cmdat |= JZ_MMC_CMDAT_WRITE; 667 if (host->use_dma) { 668 /* 669 * The 4780's MMC controller has integrated DMA ability 670 * in addition to being able to use the external DMA 671 * controller. It moves DMA control bits to a separate 672 * register. The DMA_SEL bit chooses the external 673 * controller over the integrated one. Earlier SoCs 674 * can only use the external controller, and have a 675 * single DMA enable bit in CMDAT. 676 */ 677 if (host->version >= JZ_MMC_JZ4780) { 678 writel(JZ_MMC_DMAC_DMA_EN | JZ_MMC_DMAC_DMA_SEL, 679 host->base + JZ_REG_MMC_DMAC); 680 } else { 681 cmdat |= JZ_MMC_CMDAT_DMA_EN; 682 } 683 } else if (host->version >= JZ_MMC_JZ4780) { 684 writel(0, host->base + JZ_REG_MMC_DMAC); 685 } 686 687 writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN); 688 writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB); 689 } 690 691 writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD); 692 writel(cmd->arg, host->base + JZ_REG_MMC_ARG); 693 writel(cmdat, host->base + JZ_REG_MMC_CMDAT); 694 695 jz4740_mmc_clock_enable(host, 1); 696 } 697 698 static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host) 699 { 700 struct mmc_command *cmd = host->req->cmd; 701 struct mmc_data *data = cmd->data; 702 int direction; 703 704 if (data->flags & MMC_DATA_READ) 705 direction = SG_MITER_TO_SG; 706 else 707 direction = SG_MITER_FROM_SG; 708 709 sg_miter_start(&host->miter, data->sg, data->sg_len, direction); 710 } 711 712 713 static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) 714 { 715 struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid; 716 struct mmc_command *cmd = host->req->cmd; 717 struct mmc_request *req = host->req; 718 struct mmc_data *data = cmd->data; 719 bool timeout = false; 720 721 if (cmd->error) 722 host->state = JZ4740_MMC_STATE_DONE; 723 724 switch (host->state) { 725 case JZ4740_MMC_STATE_READ_RESPONSE: 726 if (cmd->flags & MMC_RSP_PRESENT) 727 jz4740_mmc_read_response(host, cmd); 728 729 if (!data) 730 break; 731 732 jz_mmc_prepare_data_transfer(host); 733 /* fall through */ 734 735 case JZ4740_MMC_STATE_TRANSFER_DATA: 736 if (host->use_dma) { 737 /* Use DMA if enabled. 738 * Data transfer direction is defined later by 739 * relying on data flags in 740 * jz4740_mmc_prepare_dma_data() and 741 * jz4740_mmc_start_dma_transfer(). 742 */ 743 timeout = jz4740_mmc_start_dma_transfer(host, data); 744 data->bytes_xfered = data->blocks * data->blksz; 745 } else if (data->flags & MMC_DATA_READ) 746 /* Use PIO if DMA is not enabled. 747 * Data transfer direction was defined before 748 * by relying on data flags in 749 * jz_mmc_prepare_data_transfer(). 750 */ 751 timeout = jz4740_mmc_read_data(host, data); 752 else 753 timeout = jz4740_mmc_write_data(host, data); 754 755 if (unlikely(timeout)) { 756 host->state = JZ4740_MMC_STATE_TRANSFER_DATA; 757 break; 758 } 759 760 jz4740_mmc_transfer_check_state(host, data); 761 762 timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE); 763 if (unlikely(timeout)) { 764 host->state = JZ4740_MMC_STATE_SEND_STOP; 765 break; 766 } 767 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_DATA_TRAN_DONE); 768 /* fall through */ 769 770 case JZ4740_MMC_STATE_SEND_STOP: 771 if (!req->stop) 772 break; 773 774 jz4740_mmc_send_command(host, req->stop); 775 776 if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) { 777 timeout = jz4740_mmc_poll_irq(host, 778 JZ_MMC_IRQ_PRG_DONE); 779 if (timeout) { 780 host->state = JZ4740_MMC_STATE_DONE; 781 break; 782 } 783 } 784 case JZ4740_MMC_STATE_DONE: 785 break; 786 } 787 788 if (!timeout) 789 jz4740_mmc_request_done(host); 790 791 return IRQ_HANDLED; 792 } 793 794 static irqreturn_t jz_mmc_irq(int irq, void *devid) 795 { 796 struct jz4740_mmc_host *host = devid; 797 struct mmc_command *cmd = host->cmd; 798 uint32_t irq_reg, status, tmp; 799 800 status = readl(host->base + JZ_REG_MMC_STATUS); 801 irq_reg = jz4740_mmc_read_irq_reg(host); 802 803 tmp = irq_reg; 804 irq_reg &= ~host->irq_mask; 805 806 tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ | 807 JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE); 808 809 if (tmp != irq_reg) 810 jz4740_mmc_write_irq_reg(host, tmp & ~irq_reg); 811 812 if (irq_reg & JZ_MMC_IRQ_SDIO) { 813 jz4740_mmc_write_irq_reg(host, JZ_MMC_IRQ_SDIO); 814 mmc_signal_sdio_irq(host->mmc); 815 irq_reg &= ~JZ_MMC_IRQ_SDIO; 816 } 817 818 if (host->req && cmd && irq_reg) { 819 if (test_and_clear_bit(0, &host->waiting)) { 820 del_timer(&host->timeout_timer); 821 822 if (status & JZ_MMC_STATUS_TIMEOUT_RES) { 823 cmd->error = -ETIMEDOUT; 824 } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) { 825 cmd->error = -EIO; 826 } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR | 827 JZ_MMC_STATUS_CRC_WRITE_ERROR)) { 828 if (cmd->data) 829 cmd->data->error = -EIO; 830 cmd->error = -EIO; 831 } 832 833 jz4740_mmc_set_irq_enabled(host, irq_reg, false); 834 jz4740_mmc_write_irq_reg(host, irq_reg); 835 836 return IRQ_WAKE_THREAD; 837 } 838 } 839 840 return IRQ_HANDLED; 841 } 842 843 static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate) 844 { 845 int div = 0; 846 int real_rate; 847 848 jz4740_mmc_clock_disable(host); 849 clk_set_rate(host->clk, host->mmc->f_max); 850 851 real_rate = clk_get_rate(host->clk); 852 853 while (real_rate > rate && div < 7) { 854 ++div; 855 real_rate >>= 1; 856 } 857 858 writew(div, host->base + JZ_REG_MMC_CLKRT); 859 return real_rate; 860 } 861 862 static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req) 863 { 864 struct jz4740_mmc_host *host = mmc_priv(mmc); 865 866 host->req = req; 867 868 jz4740_mmc_write_irq_reg(host, ~0); 869 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true); 870 871 host->state = JZ4740_MMC_STATE_READ_RESPONSE; 872 set_bit(0, &host->waiting); 873 mod_timer(&host->timeout_timer, jiffies + 5*HZ); 874 jz4740_mmc_send_command(host, req->cmd); 875 } 876 877 static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 878 { 879 struct jz4740_mmc_host *host = mmc_priv(mmc); 880 if (ios->clock) 881 jz4740_mmc_set_clock_rate(host, ios->clock); 882 883 switch (ios->power_mode) { 884 case MMC_POWER_UP: 885 jz4740_mmc_reset(host); 886 if (!IS_ERR(mmc->supply.vmmc)) 887 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 888 host->cmdat |= JZ_MMC_CMDAT_INIT; 889 clk_prepare_enable(host->clk); 890 break; 891 case MMC_POWER_ON: 892 break; 893 default: 894 if (!IS_ERR(mmc->supply.vmmc)) 895 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 896 clk_disable_unprepare(host->clk); 897 break; 898 } 899 900 switch (ios->bus_width) { 901 case MMC_BUS_WIDTH_1: 902 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; 903 break; 904 case MMC_BUS_WIDTH_4: 905 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; 906 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT; 907 break; 908 case MMC_BUS_WIDTH_8: 909 host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_MASK; 910 host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_8BIT; 911 break; 912 default: 913 break; 914 } 915 } 916 917 static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 918 { 919 struct jz4740_mmc_host *host = mmc_priv(mmc); 920 jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable); 921 } 922 923 static const struct mmc_host_ops jz4740_mmc_ops = { 924 .request = jz4740_mmc_request, 925 .pre_req = jz4740_mmc_pre_request, 926 .post_req = jz4740_mmc_post_request, 927 .set_ios = jz4740_mmc_set_ios, 928 .get_ro = mmc_gpio_get_ro, 929 .get_cd = mmc_gpio_get_cd, 930 .enable_sdio_irq = jz4740_mmc_enable_sdio_irq, 931 }; 932 933 static const struct of_device_id jz4740_mmc_of_match[] = { 934 { .compatible = "ingenic,jz4740-mmc", .data = (void *) JZ_MMC_JZ4740 }, 935 { .compatible = "ingenic,jz4725b-mmc", .data = (void *)JZ_MMC_JZ4725B }, 936 { .compatible = "ingenic,jz4760-mmc", .data = (void *) JZ_MMC_JZ4760 }, 937 { .compatible = "ingenic,jz4780-mmc", .data = (void *) JZ_MMC_JZ4780 }, 938 { .compatible = "ingenic,x1000-mmc", .data = (void *) JZ_MMC_X1000 }, 939 {}, 940 }; 941 MODULE_DEVICE_TABLE(of, jz4740_mmc_of_match); 942 943 static int jz4740_mmc_probe(struct platform_device* pdev) 944 { 945 int ret; 946 struct mmc_host *mmc; 947 struct jz4740_mmc_host *host; 948 const struct of_device_id *match; 949 950 mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev); 951 if (!mmc) { 952 dev_err(&pdev->dev, "Failed to alloc mmc host structure\n"); 953 return -ENOMEM; 954 } 955 956 host = mmc_priv(mmc); 957 958 match = of_match_device(jz4740_mmc_of_match, &pdev->dev); 959 if (match) { 960 host->version = (enum jz4740_mmc_version)match->data; 961 } else { 962 /* JZ4740 should be the only one using legacy probe */ 963 host->version = JZ_MMC_JZ4740; 964 } 965 966 ret = mmc_of_parse(mmc); 967 if (ret) { 968 if (ret != -EPROBE_DEFER) 969 dev_err(&pdev->dev, 970 "could not parse device properties: %d\n", ret); 971 goto err_free_host; 972 } 973 974 mmc_regulator_get_supply(mmc); 975 976 host->irq = platform_get_irq(pdev, 0); 977 if (host->irq < 0) { 978 ret = host->irq; 979 goto err_free_host; 980 } 981 982 host->clk = devm_clk_get(&pdev->dev, "mmc"); 983 if (IS_ERR(host->clk)) { 984 ret = PTR_ERR(host->clk); 985 dev_err(&pdev->dev, "Failed to get mmc clock\n"); 986 goto err_free_host; 987 } 988 989 host->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 990 host->base = devm_ioremap_resource(&pdev->dev, host->mem_res); 991 if (IS_ERR(host->base)) { 992 ret = PTR_ERR(host->base); 993 dev_err(&pdev->dev, "Failed to ioremap base memory\n"); 994 goto err_free_host; 995 } 996 997 mmc->ops = &jz4740_mmc_ops; 998 if (!mmc->f_max) 999 mmc->f_max = JZ_MMC_CLK_RATE; 1000 mmc->f_min = mmc->f_max / 128; 1001 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1002 1003 mmc->max_blk_size = (1 << 10) - 1; 1004 mmc->max_blk_count = (1 << 15) - 1; 1005 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1006 1007 mmc->max_segs = 128; 1008 mmc->max_seg_size = mmc->max_req_size; 1009 1010 host->mmc = mmc; 1011 host->pdev = pdev; 1012 spin_lock_init(&host->lock); 1013 host->irq_mask = ~0; 1014 1015 jz4740_mmc_reset(host); 1016 1017 ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0, 1018 dev_name(&pdev->dev), host); 1019 if (ret) { 1020 dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); 1021 goto err_free_host; 1022 } 1023 1024 jz4740_mmc_clock_disable(host); 1025 timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0); 1026 1027 ret = jz4740_mmc_acquire_dma_channels(host); 1028 if (ret == -EPROBE_DEFER) 1029 goto err_free_irq; 1030 host->use_dma = !ret; 1031 1032 platform_set_drvdata(pdev, host); 1033 ret = mmc_add_host(mmc); 1034 1035 if (ret) { 1036 dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret); 1037 goto err_release_dma; 1038 } 1039 dev_info(&pdev->dev, "Ingenic SD/MMC card driver registered\n"); 1040 1041 dev_info(&pdev->dev, "Using %s, %d-bit mode\n", 1042 host->use_dma ? "DMA" : "PIO", 1043 (mmc->caps & MMC_CAP_8_BIT_DATA) ? 8 : 1044 ((mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1)); 1045 1046 return 0; 1047 1048 err_release_dma: 1049 if (host->use_dma) 1050 jz4740_mmc_release_dma_channels(host); 1051 err_free_irq: 1052 free_irq(host->irq, host); 1053 err_free_host: 1054 mmc_free_host(mmc); 1055 1056 return ret; 1057 } 1058 1059 static int jz4740_mmc_remove(struct platform_device *pdev) 1060 { 1061 struct jz4740_mmc_host *host = platform_get_drvdata(pdev); 1062 1063 del_timer_sync(&host->timeout_timer); 1064 jz4740_mmc_set_irq_enabled(host, 0xff, false); 1065 jz4740_mmc_reset(host); 1066 1067 mmc_remove_host(host->mmc); 1068 1069 free_irq(host->irq, host); 1070 1071 if (host->use_dma) 1072 jz4740_mmc_release_dma_channels(host); 1073 1074 mmc_free_host(host->mmc); 1075 1076 return 0; 1077 } 1078 1079 #ifdef CONFIG_PM_SLEEP 1080 1081 static int jz4740_mmc_suspend(struct device *dev) 1082 { 1083 return pinctrl_pm_select_sleep_state(dev); 1084 } 1085 1086 static int jz4740_mmc_resume(struct device *dev) 1087 { 1088 return pinctrl_pm_select_default_state(dev); 1089 } 1090 1091 static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, 1092 jz4740_mmc_resume); 1093 #define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops) 1094 #else 1095 #define JZ4740_MMC_PM_OPS NULL 1096 #endif 1097 1098 static struct platform_driver jz4740_mmc_driver = { 1099 .probe = jz4740_mmc_probe, 1100 .remove = jz4740_mmc_remove, 1101 .driver = { 1102 .name = "jz4740-mmc", 1103 .of_match_table = of_match_ptr(jz4740_mmc_of_match), 1104 .pm = JZ4740_MMC_PM_OPS, 1105 }, 1106 }; 1107 1108 module_platform_driver(jz4740_mmc_driver); 1109 1110 MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver"); 1111 MODULE_LICENSE("GPL"); 1112 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); 1113