1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. 4 * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> 5 */ 6 7 #include <linux/clk.h> 8 #include <linux/delay.h> 9 #include <linux/device.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/dmaengine.h> 12 #include <linux/highmem.h> 13 #include <linux/interrupt.h> 14 #include <linux/io.h> 15 #include <linux/log2.h> 16 #include <linux/mmc/host.h> 17 #include <linux/mmc/mmc.h> 18 #include <linux/mmc/sd.h> 19 #include <linux/mmc/sdio.h> 20 #include <linux/module.h> 21 #include <linux/pagemap.h> 22 #include <linux/pinctrl/consumer.h> 23 #include <linux/platform_device.h> 24 #include <linux/scatterlist.h> 25 #include <linux/string.h> 26 #include <linux/time.h> 27 #include <linux/virtio.h> 28 #include <linux/workqueue.h> 29 30 #define USDHI6_SD_CMD 0x0000 31 #define USDHI6_SD_PORT_SEL 0x0004 32 #define USDHI6_SD_ARG 0x0008 33 #define USDHI6_SD_STOP 0x0010 34 #define USDHI6_SD_SECCNT 0x0014 35 #define USDHI6_SD_RSP10 0x0018 36 #define USDHI6_SD_RSP32 0x0020 37 #define USDHI6_SD_RSP54 0x0028 38 #define USDHI6_SD_RSP76 0x0030 39 #define USDHI6_SD_INFO1 0x0038 40 #define USDHI6_SD_INFO2 0x003c 41 #define USDHI6_SD_INFO1_MASK 0x0040 42 #define USDHI6_SD_INFO2_MASK 0x0044 43 #define USDHI6_SD_CLK_CTRL 0x0048 44 #define USDHI6_SD_SIZE 0x004c 45 #define USDHI6_SD_OPTION 0x0050 46 #define USDHI6_SD_ERR_STS1 0x0058 47 #define USDHI6_SD_ERR_STS2 0x005c 48 #define USDHI6_SD_BUF0 0x0060 49 #define USDHI6_SDIO_MODE 0x0068 50 #define USDHI6_SDIO_INFO1 0x006c 51 #define USDHI6_SDIO_INFO1_MASK 0x0070 52 #define USDHI6_CC_EXT_MODE 0x01b0 53 #define USDHI6_SOFT_RST 0x01c0 54 #define USDHI6_VERSION 0x01c4 55 #define USDHI6_HOST_MODE 0x01c8 56 #define USDHI6_SDIF_MODE 0x01cc 57 58 #define USDHI6_SD_CMD_APP 0x0040 59 #define USDHI6_SD_CMD_MODE_RSP_AUTO 0x0000 60 #define USDHI6_SD_CMD_MODE_RSP_NONE 0x0300 61 #define USDHI6_SD_CMD_MODE_RSP_R1 0x0400 /* Also R5, R6, R7 */ 62 #define USDHI6_SD_CMD_MODE_RSP_R1B 0x0500 /* R1b */ 63 #define USDHI6_SD_CMD_MODE_RSP_R2 0x0600 64 #define USDHI6_SD_CMD_MODE_RSP_R3 0x0700 /* Also R4 */ 65 #define USDHI6_SD_CMD_DATA 0x0800 66 #define USDHI6_SD_CMD_READ 0x1000 67 #define USDHI6_SD_CMD_MULTI 0x2000 68 #define USDHI6_SD_CMD_CMD12_AUTO_OFF 0x4000 69 70 #define USDHI6_CC_EXT_MODE_SDRW BIT(1) 71 72 #define USDHI6_SD_INFO1_RSP_END BIT(0) 73 #define USDHI6_SD_INFO1_ACCESS_END BIT(2) 74 #define USDHI6_SD_INFO1_CARD_OUT BIT(3) 75 #define USDHI6_SD_INFO1_CARD_IN BIT(4) 76 #define USDHI6_SD_INFO1_CD BIT(5) 77 #define USDHI6_SD_INFO1_WP BIT(7) 78 #define USDHI6_SD_INFO1_D3_CARD_OUT BIT(8) 79 #define USDHI6_SD_INFO1_D3_CARD_IN BIT(9) 80 81 #define USDHI6_SD_INFO2_CMD_ERR BIT(0) 82 #define USDHI6_SD_INFO2_CRC_ERR BIT(1) 83 #define USDHI6_SD_INFO2_END_ERR BIT(2) 84 #define USDHI6_SD_INFO2_TOUT BIT(3) 85 #define USDHI6_SD_INFO2_IWA_ERR BIT(4) 86 #define USDHI6_SD_INFO2_IRA_ERR BIT(5) 87 #define USDHI6_SD_INFO2_RSP_TOUT BIT(6) 88 #define USDHI6_SD_INFO2_SDDAT0 BIT(7) 89 #define USDHI6_SD_INFO2_BRE BIT(8) 90 #define USDHI6_SD_INFO2_BWE BIT(9) 91 #define USDHI6_SD_INFO2_SCLKDIVEN BIT(13) 92 #define USDHI6_SD_INFO2_CBSY BIT(14) 93 #define USDHI6_SD_INFO2_ILA BIT(15) 94 95 #define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN) 96 #define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT) 97 #define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT) 98 #define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT) 99 100 #define USDHI6_SD_INFO2_ERR (USDHI6_SD_INFO2_CMD_ERR | \ 101 USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR | \ 102 USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR | \ 103 USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT | \ 104 USDHI6_SD_INFO2_ILA) 105 106 #define USDHI6_SD_INFO1_IRQ (USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \ 107 USDHI6_SD_INFO1_CARD) 108 109 #define USDHI6_SD_INFO2_IRQ (USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \ 110 USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA) 111 112 #define USDHI6_SD_CLK_CTRL_SCLKEN BIT(8) 113 114 #define USDHI6_SD_STOP_STP BIT(0) 115 #define USDHI6_SD_STOP_SEC BIT(8) 116 117 #define USDHI6_SDIO_INFO1_IOIRQ BIT(0) 118 #define USDHI6_SDIO_INFO1_EXPUB52 BIT(14) 119 #define USDHI6_SDIO_INFO1_EXWT BIT(15) 120 121 #define USDHI6_SD_ERR_STS1_CRC_NO_ERROR BIT(13) 122 123 #define USDHI6_SOFT_RST_RESERVED (BIT(1) | BIT(2)) 124 #define USDHI6_SOFT_RST_RESET BIT(0) 125 126 #define USDHI6_SD_OPTION_TIMEOUT_SHIFT 4 127 #define USDHI6_SD_OPTION_TIMEOUT_MASK (0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT) 128 #define USDHI6_SD_OPTION_WIDTH_1 BIT(15) 129 130 #define USDHI6_SD_PORT_SEL_PORTS_SHIFT 8 131 132 #define USDHI6_SD_CLK_CTRL_DIV_MASK 0xff 133 134 #define USDHI6_SDIO_INFO1_IRQ (USDHI6_SDIO_INFO1_IOIRQ | 3 | \ 135 USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT) 136 137 #define USDHI6_MIN_DMA 64 138 139 enum usdhi6_wait_for { 140 USDHI6_WAIT_FOR_REQUEST, 141 USDHI6_WAIT_FOR_CMD, 142 USDHI6_WAIT_FOR_MREAD, 143 USDHI6_WAIT_FOR_MWRITE, 144 USDHI6_WAIT_FOR_READ, 145 USDHI6_WAIT_FOR_WRITE, 146 USDHI6_WAIT_FOR_DATA_END, 147 USDHI6_WAIT_FOR_STOP, 148 USDHI6_WAIT_FOR_DMA, 149 }; 150 151 struct usdhi6_page { 152 struct page *page; 153 void *mapped; /* mapped page */ 154 }; 155 156 struct usdhi6_host { 157 struct mmc_host *mmc; 158 struct mmc_request *mrq; 159 void __iomem *base; 160 struct clk *clk; 161 162 /* SG memory handling */ 163 164 /* Common for multiple and single block requests */ 165 struct usdhi6_page pg; /* current page from an SG */ 166 void *blk_page; /* either a mapped page, or the bounce buffer */ 167 size_t offset; /* offset within a page, including sg->offset */ 168 169 /* Blocks, crossing a page boundary */ 170 size_t head_len; 171 struct usdhi6_page head_pg; 172 173 /* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */ 174 struct scatterlist bounce_sg; 175 u8 bounce_buf[512]; 176 177 /* Multiple block requests only */ 178 struct scatterlist *sg; /* current SG segment */ 179 int page_idx; /* page index within an SG segment */ 180 181 enum usdhi6_wait_for wait; 182 u32 status_mask; 183 u32 status2_mask; 184 u32 sdio_mask; 185 u32 io_error; 186 u32 irq_status; 187 unsigned long imclk; 188 unsigned long rate; 189 bool app_cmd; 190 191 /* Timeout handling */ 192 struct delayed_work timeout_work; 193 unsigned long timeout; 194 195 /* DMA support */ 196 struct dma_chan *chan_rx; 197 struct dma_chan *chan_tx; 198 bool dma_active; 199 200 /* Pin control */ 201 struct pinctrl *pinctrl; 202 struct pinctrl_state *pins_uhs; 203 }; 204 205 /* I/O primitives */ 206 207 static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data) 208 { 209 iowrite32(data, host->base + reg); 210 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, 211 host->base, reg, data); 212 } 213 214 static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data) 215 { 216 iowrite16(data, host->base + reg); 217 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, 218 host->base, reg, data); 219 } 220 221 static u32 usdhi6_read(struct usdhi6_host *host, u32 reg) 222 { 223 u32 data = ioread32(host->base + reg); 224 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, 225 host->base, reg, data); 226 return data; 227 } 228 229 static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg) 230 { 231 u16 data = ioread16(host->base + reg); 232 dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, 233 host->base, reg, data); 234 return data; 235 } 236 237 static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2) 238 { 239 host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1; 240 host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2; 241 usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask); 242 usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask); 243 } 244 245 static void usdhi6_wait_for_resp(struct usdhi6_host *host) 246 { 247 usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END | 248 USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD, 249 USDHI6_SD_INFO2_ERR); 250 } 251 252 static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read) 253 { 254 usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END | 255 USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR | 256 (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE)); 257 } 258 259 static void usdhi6_only_cd(struct usdhi6_host *host) 260 { 261 /* Mask all except card hotplug */ 262 usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0); 263 } 264 265 static void usdhi6_mask_all(struct usdhi6_host *host) 266 { 267 usdhi6_irq_enable(host, 0, 0); 268 } 269 270 static int usdhi6_error_code(struct usdhi6_host *host) 271 { 272 u32 err; 273 274 usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP); 275 276 if (host->io_error & 277 (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) { 278 u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54); 279 int opc = host->mrq ? host->mrq->cmd->opcode : -1; 280 281 err = usdhi6_read(host, USDHI6_SD_ERR_STS2); 282 /* Response timeout is often normal, don't spam the log */ 283 if (host->wait == USDHI6_WAIT_FOR_CMD) 284 dev_dbg(mmc_dev(host->mmc), 285 "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", 286 err, rsp54, host->wait, opc); 287 else 288 dev_warn(mmc_dev(host->mmc), 289 "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", 290 err, rsp54, host->wait, opc); 291 return -ETIMEDOUT; 292 } 293 294 err = usdhi6_read(host, USDHI6_SD_ERR_STS1); 295 if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR) 296 dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n", 297 err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1); 298 if (host->io_error & USDHI6_SD_INFO2_ILA) 299 return -EILSEQ; 300 301 return -EIO; 302 } 303 304 /* Scatter-Gather management */ 305 306 /* 307 * In PIO mode we have to map each page separately, using kmap(). That way 308 * adjacent pages are mapped to non-adjacent virtual addresses. That's why we 309 * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks 310 * have been observed with an SDIO WiFi card (b43 driver). 311 */ 312 static void usdhi6_blk_bounce(struct usdhi6_host *host, 313 struct scatterlist *sg) 314 { 315 struct mmc_data *data = host->mrq->data; 316 size_t blk_head = host->head_len; 317 318 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n", 319 __func__, host->mrq->cmd->opcode, data->sg_len, 320 data->blksz, data->blocks, sg->offset); 321 322 host->head_pg.page = host->pg.page; 323 host->head_pg.mapped = host->pg.mapped; 324 host->pg.page = nth_page(host->pg.page, 1); 325 host->pg.mapped = kmap(host->pg.page); 326 327 host->blk_page = host->bounce_buf; 328 host->offset = 0; 329 330 if (data->flags & MMC_DATA_READ) 331 return; 332 333 memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head, 334 blk_head); 335 memcpy(host->bounce_buf + blk_head, host->pg.mapped, 336 data->blksz - blk_head); 337 } 338 339 /* Only called for multiple block IO */ 340 static void usdhi6_sg_prep(struct usdhi6_host *host) 341 { 342 struct mmc_request *mrq = host->mrq; 343 struct mmc_data *data = mrq->data; 344 345 usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks); 346 347 host->sg = data->sg; 348 /* TODO: if we always map, this is redundant */ 349 host->offset = host->sg->offset; 350 } 351 352 /* Map the first page in an SG segment: common for multiple and single block IO */ 353 static void *usdhi6_sg_map(struct usdhi6_host *host) 354 { 355 struct mmc_data *data = host->mrq->data; 356 struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg; 357 size_t head = PAGE_SIZE - sg->offset; 358 size_t blk_head = head % data->blksz; 359 360 WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page); 361 if (WARN(sg_dma_len(sg) % data->blksz, 362 "SG size %u isn't a multiple of block size %u\n", 363 sg_dma_len(sg), data->blksz)) 364 return NULL; 365 366 host->pg.page = sg_page(sg); 367 host->pg.mapped = kmap(host->pg.page); 368 host->offset = sg->offset; 369 370 /* 371 * Block size must be a power of 2 for multi-block transfers, 372 * therefore blk_head is equal for all pages in this SG 373 */ 374 host->head_len = blk_head; 375 376 if (head < data->blksz) 377 /* 378 * The first block in the SG crosses a page boundary. 379 * Max blksz = 512, so blocks can only span 2 pages 380 */ 381 usdhi6_blk_bounce(host, sg); 382 else 383 host->blk_page = host->pg.mapped; 384 385 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n", 386 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, 387 sg->offset, host->mrq->cmd->opcode, host->mrq); 388 389 return host->blk_page + host->offset; 390 } 391 392 /* Unmap the current page: common for multiple and single block IO */ 393 static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force) 394 { 395 struct mmc_data *data = host->mrq->data; 396 struct page *page = host->head_pg.page; 397 398 if (page) { 399 /* Previous block was cross-page boundary */ 400 struct scatterlist *sg = data->sg_len > 1 ? 401 host->sg : data->sg; 402 size_t blk_head = host->head_len; 403 404 if (!data->error && data->flags & MMC_DATA_READ) { 405 memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head, 406 host->bounce_buf, blk_head); 407 memcpy(host->pg.mapped, host->bounce_buf + blk_head, 408 data->blksz - blk_head); 409 } 410 411 flush_dcache_page(page); 412 kunmap(page); 413 414 host->head_pg.page = NULL; 415 416 if (!force && sg_dma_len(sg) + sg->offset > 417 (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head) 418 /* More blocks in this SG, don't unmap the next page */ 419 return; 420 } 421 422 page = host->pg.page; 423 if (!page) 424 return; 425 426 flush_dcache_page(page); 427 kunmap(page); 428 429 host->pg.page = NULL; 430 } 431 432 /* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */ 433 static void usdhi6_sg_advance(struct usdhi6_host *host) 434 { 435 struct mmc_data *data = host->mrq->data; 436 size_t done, total; 437 438 /* New offset: set at the end of the previous block */ 439 if (host->head_pg.page) { 440 /* Finished a cross-page block, jump to the new page */ 441 host->page_idx++; 442 host->offset = data->blksz - host->head_len; 443 host->blk_page = host->pg.mapped; 444 usdhi6_sg_unmap(host, false); 445 } else { 446 host->offset += data->blksz; 447 /* The completed block didn't cross a page boundary */ 448 if (host->offset == PAGE_SIZE) { 449 /* If required, we'll map the page below */ 450 host->offset = 0; 451 host->page_idx++; 452 } 453 } 454 455 /* 456 * Now host->blk_page + host->offset point at the end of our last block 457 * and host->page_idx is the index of the page, in which our new block 458 * is located, if any 459 */ 460 461 done = (host->page_idx << PAGE_SHIFT) + host->offset; 462 total = host->sg->offset + sg_dma_len(host->sg); 463 464 dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__, 465 done, total, host->offset); 466 467 if (done < total && host->offset) { 468 /* More blocks in this page */ 469 if (host->offset + data->blksz > PAGE_SIZE) 470 /* We approached at a block, that spans 2 pages */ 471 usdhi6_blk_bounce(host, host->sg); 472 473 return; 474 } 475 476 /* Finished current page or an SG segment */ 477 usdhi6_sg_unmap(host, false); 478 479 if (done == total) { 480 /* 481 * End of an SG segment or the complete SG: jump to the next 482 * segment, we'll map it later in usdhi6_blk_read() or 483 * usdhi6_blk_write() 484 */ 485 struct scatterlist *next = sg_next(host->sg); 486 487 host->page_idx = 0; 488 489 if (!next) 490 host->wait = USDHI6_WAIT_FOR_DATA_END; 491 host->sg = next; 492 493 if (WARN(next && sg_dma_len(next) % data->blksz, 494 "SG size %u isn't a multiple of block size %u\n", 495 sg_dma_len(next), data->blksz)) 496 data->error = -EINVAL; 497 498 return; 499 } 500 501 /* We cannot get here after crossing a page border */ 502 503 /* Next page in the same SG */ 504 host->pg.page = nth_page(sg_page(host->sg), host->page_idx); 505 host->pg.mapped = kmap(host->pg.page); 506 host->blk_page = host->pg.mapped; 507 508 dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n", 509 host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, 510 host->mrq->cmd->opcode, host->mrq); 511 } 512 513 /* DMA handling */ 514 515 static void usdhi6_dma_release(struct usdhi6_host *host) 516 { 517 host->dma_active = false; 518 if (host->chan_tx) { 519 struct dma_chan *chan = host->chan_tx; 520 host->chan_tx = NULL; 521 dma_release_channel(chan); 522 } 523 if (host->chan_rx) { 524 struct dma_chan *chan = host->chan_rx; 525 host->chan_rx = NULL; 526 dma_release_channel(chan); 527 } 528 } 529 530 static void usdhi6_dma_stop_unmap(struct usdhi6_host *host) 531 { 532 struct mmc_data *data = host->mrq->data; 533 534 if (!host->dma_active) 535 return; 536 537 usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); 538 host->dma_active = false; 539 540 if (data->flags & MMC_DATA_READ) 541 dma_unmap_sg(host->chan_rx->device->dev, data->sg, 542 data->sg_len, DMA_FROM_DEVICE); 543 else 544 dma_unmap_sg(host->chan_tx->device->dev, data->sg, 545 data->sg_len, DMA_TO_DEVICE); 546 } 547 548 static void usdhi6_dma_complete(void *arg) 549 { 550 struct usdhi6_host *host = arg; 551 struct mmc_request *mrq = host->mrq; 552 553 if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n", 554 dev_name(mmc_dev(host->mmc)), mrq)) 555 return; 556 557 dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__, 558 mrq->cmd->opcode); 559 560 usdhi6_dma_stop_unmap(host); 561 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); 562 } 563 564 static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan, 565 enum dma_transfer_direction dir) 566 { 567 struct mmc_data *data = host->mrq->data; 568 struct scatterlist *sg = data->sg; 569 struct dma_async_tx_descriptor *desc = NULL; 570 dma_cookie_t cookie = -EINVAL; 571 enum dma_data_direction data_dir; 572 int ret; 573 574 switch (dir) { 575 case DMA_MEM_TO_DEV: 576 data_dir = DMA_TO_DEVICE; 577 break; 578 case DMA_DEV_TO_MEM: 579 data_dir = DMA_FROM_DEVICE; 580 break; 581 default: 582 return -EINVAL; 583 } 584 585 ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir); 586 if (ret > 0) { 587 host->dma_active = true; 588 desc = dmaengine_prep_slave_sg(chan, sg, ret, dir, 589 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 590 } 591 592 if (desc) { 593 desc->callback = usdhi6_dma_complete; 594 desc->callback_param = host; 595 cookie = dmaengine_submit(desc); 596 } 597 598 dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n", 599 __func__, data->sg_len, ret, cookie, desc); 600 601 if (cookie < 0) { 602 /* DMA failed, fall back to PIO */ 603 if (ret >= 0) 604 ret = cookie; 605 usdhi6_dma_release(host); 606 dev_warn(mmc_dev(host->mmc), 607 "DMA failed: %d, falling back to PIO\n", ret); 608 } 609 610 return cookie; 611 } 612 613 static int usdhi6_dma_start(struct usdhi6_host *host) 614 { 615 if (!host->chan_rx || !host->chan_tx) 616 return -ENODEV; 617 618 if (host->mrq->data->flags & MMC_DATA_READ) 619 return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM); 620 621 return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV); 622 } 623 624 static void usdhi6_dma_kill(struct usdhi6_host *host) 625 { 626 struct mmc_data *data = host->mrq->data; 627 628 dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n", 629 __func__, data->sg_len, data->blocks, data->blksz); 630 /* Abort DMA */ 631 if (data->flags & MMC_DATA_READ) 632 dmaengine_terminate_all(host->chan_rx); 633 else 634 dmaengine_terminate_all(host->chan_tx); 635 } 636 637 static void usdhi6_dma_check_error(struct usdhi6_host *host) 638 { 639 struct mmc_data *data = host->mrq->data; 640 641 dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n", 642 __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1)); 643 644 if (host->io_error) { 645 data->error = usdhi6_error_code(host); 646 data->bytes_xfered = 0; 647 usdhi6_dma_kill(host); 648 usdhi6_dma_release(host); 649 dev_warn(mmc_dev(host->mmc), 650 "DMA failed: %d, falling back to PIO\n", data->error); 651 return; 652 } 653 654 /* 655 * The datasheet tells us to check a response from the card, whereas 656 * responses only come after the command phase, not after the data 657 * phase. Let's check anyway. 658 */ 659 if (host->irq_status & USDHI6_SD_INFO1_RSP_END) 660 dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n"); 661 } 662 663 static void usdhi6_dma_kick(struct usdhi6_host *host) 664 { 665 if (host->mrq->data->flags & MMC_DATA_READ) 666 dma_async_issue_pending(host->chan_rx); 667 else 668 dma_async_issue_pending(host->chan_tx); 669 } 670 671 static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) 672 { 673 struct dma_slave_config cfg = { 674 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 675 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 676 }; 677 int ret; 678 679 host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); 680 dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__, 681 host->chan_tx); 682 683 if (IS_ERR(host->chan_tx)) { 684 host->chan_tx = NULL; 685 return; 686 } 687 688 cfg.direction = DMA_MEM_TO_DEV; 689 cfg.dst_addr = start + USDHI6_SD_BUF0; 690 cfg.dst_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */ 691 cfg.src_addr = 0; 692 ret = dmaengine_slave_config(host->chan_tx, &cfg); 693 if (ret < 0) 694 goto e_release_tx; 695 696 host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); 697 dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__, 698 host->chan_rx); 699 700 if (IS_ERR(host->chan_rx)) { 701 host->chan_rx = NULL; 702 goto e_release_tx; 703 } 704 705 cfg.direction = DMA_DEV_TO_MEM; 706 cfg.src_addr = cfg.dst_addr; 707 cfg.src_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */ 708 cfg.dst_addr = 0; 709 ret = dmaengine_slave_config(host->chan_rx, &cfg); 710 if (ret < 0) 711 goto e_release_rx; 712 713 return; 714 715 e_release_rx: 716 dma_release_channel(host->chan_rx); 717 host->chan_rx = NULL; 718 e_release_tx: 719 dma_release_channel(host->chan_tx); 720 host->chan_tx = NULL; 721 } 722 723 /* API helpers */ 724 725 static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios) 726 { 727 unsigned long rate = ios->clock; 728 u32 val; 729 unsigned int i; 730 731 for (i = 1000; i; i--) { 732 if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN) 733 break; 734 usleep_range(10, 100); 735 } 736 737 if (!i) { 738 dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n"); 739 return; 740 } 741 742 val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK; 743 744 if (rate) { 745 unsigned long new_rate; 746 747 if (host->imclk <= rate) { 748 if (ios->timing != MMC_TIMING_UHS_DDR50) { 749 /* Cannot have 1-to-1 clock in DDR mode */ 750 new_rate = host->imclk; 751 val |= 0xff; 752 } else { 753 new_rate = host->imclk / 2; 754 } 755 } else { 756 unsigned long div = 757 roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate)); 758 val |= div >> 2; 759 new_rate = host->imclk / div; 760 } 761 762 if (host->rate == new_rate) 763 return; 764 765 host->rate = new_rate; 766 767 dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n", 768 rate, (val & 0xff) << 2, new_rate); 769 } 770 771 /* 772 * if old or new rate is equal to input rate, have to switch the clock 773 * off before changing and on after 774 */ 775 if (host->imclk == rate || host->imclk == host->rate || !rate) 776 usdhi6_write(host, USDHI6_SD_CLK_CTRL, 777 val & ~USDHI6_SD_CLK_CTRL_SCLKEN); 778 779 if (!rate) { 780 host->rate = 0; 781 return; 782 } 783 784 usdhi6_write(host, USDHI6_SD_CLK_CTRL, val); 785 786 if (host->imclk == rate || host->imclk == host->rate || 787 !(val & USDHI6_SD_CLK_CTRL_SCLKEN)) 788 usdhi6_write(host, USDHI6_SD_CLK_CTRL, 789 val | USDHI6_SD_CLK_CTRL_SCLKEN); 790 } 791 792 static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios) 793 { 794 struct mmc_host *mmc = host->mmc; 795 796 if (!IS_ERR(mmc->supply.vmmc)) 797 /* Errors ignored... */ 798 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 799 ios->power_mode ? ios->vdd : 0); 800 } 801 802 static int usdhi6_reset(struct usdhi6_host *host) 803 { 804 int i; 805 806 usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED); 807 cpu_relax(); 808 usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET); 809 for (i = 1000; i; i--) 810 if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET) 811 break; 812 813 return i ? 0 : -ETIMEDOUT; 814 } 815 816 static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 817 { 818 struct usdhi6_host *host = mmc_priv(mmc); 819 u32 option, mode; 820 int ret; 821 822 dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n", 823 ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing); 824 825 switch (ios->power_mode) { 826 case MMC_POWER_OFF: 827 usdhi6_set_power(host, ios); 828 usdhi6_only_cd(host); 829 break; 830 case MMC_POWER_UP: 831 /* 832 * We only also touch USDHI6_SD_OPTION from .request(), which 833 * cannot race with MMC_POWER_UP 834 */ 835 ret = usdhi6_reset(host); 836 if (ret < 0) { 837 dev_err(mmc_dev(mmc), "Cannot reset the interface!\n"); 838 } else { 839 usdhi6_set_power(host, ios); 840 usdhi6_only_cd(host); 841 } 842 break; 843 case MMC_POWER_ON: 844 option = usdhi6_read(host, USDHI6_SD_OPTION); 845 /* 846 * The eMMC standard only allows 4 or 8 bits in the DDR mode, 847 * the same probably holds for SD cards. We check here anyway, 848 * since the datasheet explicitly requires 4 bits for DDR. 849 */ 850 if (ios->bus_width == MMC_BUS_WIDTH_1) { 851 if (ios->timing == MMC_TIMING_UHS_DDR50) 852 dev_err(mmc_dev(mmc), 853 "4 bits are required for DDR\n"); 854 option |= USDHI6_SD_OPTION_WIDTH_1; 855 mode = 0; 856 } else { 857 option &= ~USDHI6_SD_OPTION_WIDTH_1; 858 mode = ios->timing == MMC_TIMING_UHS_DDR50; 859 } 860 usdhi6_write(host, USDHI6_SD_OPTION, option); 861 usdhi6_write(host, USDHI6_SDIF_MODE, mode); 862 break; 863 } 864 865 if (host->rate != ios->clock) 866 usdhi6_clk_set(host, ios); 867 } 868 869 /* This is data timeout. Response timeout is fixed to 640 clock cycles */ 870 static void usdhi6_timeout_set(struct usdhi6_host *host) 871 { 872 struct mmc_request *mrq = host->mrq; 873 u32 val; 874 unsigned long ticks; 875 876 if (!mrq->data) 877 ticks = host->rate / 1000 * mrq->cmd->busy_timeout; 878 else 879 ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) + 880 mrq->data->timeout_clks; 881 882 if (!ticks || ticks > 1 << 27) 883 /* Max timeout */ 884 val = 14; 885 else if (ticks < 1 << 13) 886 /* Min timeout */ 887 val = 0; 888 else 889 val = order_base_2(ticks) - 13; 890 891 dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n", 892 mrq->data ? "data" : "cmd", ticks, host->rate); 893 894 /* Timeout Counter mask: 0xf0 */ 895 usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) | 896 (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK)); 897 } 898 899 static void usdhi6_request_done(struct usdhi6_host *host) 900 { 901 struct mmc_request *mrq = host->mrq; 902 struct mmc_data *data = mrq->data; 903 904 if (WARN(host->pg.page || host->head_pg.page, 905 "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%zx %ux%u in SG%u!\n", 906 host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode, 907 data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-', 908 data ? host->offset : 0, data ? data->blocks : 0, 909 data ? data->blksz : 0, data ? data->sg_len : 0)) 910 usdhi6_sg_unmap(host, true); 911 912 if (mrq->cmd->error || 913 (data && data->error) || 914 (mrq->stop && mrq->stop->error)) 915 dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n", 916 __func__, mrq->cmd->opcode, data ? data->blocks : 0, 917 data ? data->blksz : 0, 918 mrq->cmd->error, 919 data ? data->error : 1, 920 mrq->stop ? mrq->stop->error : 1); 921 922 /* Disable DMA */ 923 usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); 924 host->wait = USDHI6_WAIT_FOR_REQUEST; 925 host->mrq = NULL; 926 927 mmc_request_done(host->mmc, mrq); 928 } 929 930 static int usdhi6_cmd_flags(struct usdhi6_host *host) 931 { 932 struct mmc_request *mrq = host->mrq; 933 struct mmc_command *cmd = mrq->cmd; 934 u16 opc = cmd->opcode; 935 936 if (host->app_cmd) { 937 host->app_cmd = false; 938 opc |= USDHI6_SD_CMD_APP; 939 } 940 941 if (mrq->data) { 942 opc |= USDHI6_SD_CMD_DATA; 943 944 if (mrq->data->flags & MMC_DATA_READ) 945 opc |= USDHI6_SD_CMD_READ; 946 947 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 948 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 949 (cmd->opcode == SD_IO_RW_EXTENDED && 950 mrq->data->blocks > 1)) { 951 opc |= USDHI6_SD_CMD_MULTI; 952 if (!mrq->stop) 953 opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF; 954 } 955 956 switch (mmc_resp_type(cmd)) { 957 case MMC_RSP_NONE: 958 opc |= USDHI6_SD_CMD_MODE_RSP_NONE; 959 break; 960 case MMC_RSP_R1: 961 opc |= USDHI6_SD_CMD_MODE_RSP_R1; 962 break; 963 case MMC_RSP_R1B: 964 opc |= USDHI6_SD_CMD_MODE_RSP_R1B; 965 break; 966 case MMC_RSP_R2: 967 opc |= USDHI6_SD_CMD_MODE_RSP_R2; 968 break; 969 case MMC_RSP_R3: 970 opc |= USDHI6_SD_CMD_MODE_RSP_R3; 971 break; 972 default: 973 dev_warn(mmc_dev(host->mmc), 974 "Unknown response type %d\n", 975 mmc_resp_type(cmd)); 976 return -EINVAL; 977 } 978 } 979 980 return opc; 981 } 982 983 static int usdhi6_rq_start(struct usdhi6_host *host) 984 { 985 struct mmc_request *mrq = host->mrq; 986 struct mmc_command *cmd = mrq->cmd; 987 struct mmc_data *data = mrq->data; 988 int opc = usdhi6_cmd_flags(host); 989 int i; 990 991 if (opc < 0) 992 return opc; 993 994 for (i = 1000; i; i--) { 995 if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY)) 996 break; 997 usleep_range(10, 100); 998 } 999 1000 if (!i) { 1001 dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n"); 1002 return -EAGAIN; 1003 } 1004 1005 if (data) { 1006 bool use_dma; 1007 int ret = 0; 1008 1009 host->page_idx = 0; 1010 1011 if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) { 1012 switch (data->blksz) { 1013 case 512: 1014 break; 1015 case 32: 1016 case 64: 1017 case 128: 1018 case 256: 1019 if (mrq->stop) 1020 ret = -EINVAL; 1021 break; 1022 default: 1023 ret = -EINVAL; 1024 } 1025 } else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 1026 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) && 1027 data->blksz != 512) { 1028 ret = -EINVAL; 1029 } 1030 1031 if (ret < 0) { 1032 dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n", 1033 __func__, data->blocks, data->blksz); 1034 return -EINVAL; 1035 } 1036 1037 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 1038 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 1039 (cmd->opcode == SD_IO_RW_EXTENDED && 1040 data->blocks > 1)) 1041 usdhi6_sg_prep(host); 1042 1043 usdhi6_write(host, USDHI6_SD_SIZE, data->blksz); 1044 1045 if ((data->blksz >= USDHI6_MIN_DMA || 1046 data->blocks > 1) && 1047 (data->blksz % 4 || 1048 data->sg->offset % 4)) 1049 dev_dbg(mmc_dev(host->mmc), 1050 "Bad SG of %u: %ux%u @ %u\n", data->sg_len, 1051 data->blksz, data->blocks, data->sg->offset); 1052 1053 /* Enable DMA for USDHI6_MIN_DMA bytes or more */ 1054 use_dma = data->blksz >= USDHI6_MIN_DMA && 1055 !(data->blksz % 4) && 1056 usdhi6_dma_start(host) >= DMA_MIN_COOKIE; 1057 1058 if (use_dma) 1059 usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW); 1060 1061 dev_dbg(mmc_dev(host->mmc), 1062 "%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n", 1063 __func__, cmd->opcode, data->blocks, data->blksz, 1064 data->sg_len, use_dma ? "DMA" : "PIO", 1065 data->flags & MMC_DATA_READ ? "read" : "write", 1066 data->sg->offset, mrq->stop ? " + stop" : ""); 1067 } else { 1068 dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n", 1069 __func__, cmd->opcode); 1070 } 1071 1072 /* We have to get a command completion interrupt with DMA too */ 1073 usdhi6_wait_for_resp(host); 1074 1075 host->wait = USDHI6_WAIT_FOR_CMD; 1076 schedule_delayed_work(&host->timeout_work, host->timeout); 1077 1078 /* SEC bit is required to enable block counting by the core */ 1079 usdhi6_write(host, USDHI6_SD_STOP, 1080 data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0); 1081 usdhi6_write(host, USDHI6_SD_ARG, cmd->arg); 1082 1083 /* Kick command execution */ 1084 usdhi6_write(host, USDHI6_SD_CMD, opc); 1085 1086 return 0; 1087 } 1088 1089 static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq) 1090 { 1091 struct usdhi6_host *host = mmc_priv(mmc); 1092 int ret; 1093 1094 cancel_delayed_work_sync(&host->timeout_work); 1095 1096 host->mrq = mrq; 1097 host->sg = NULL; 1098 1099 usdhi6_timeout_set(host); 1100 ret = usdhi6_rq_start(host); 1101 if (ret < 0) { 1102 mrq->cmd->error = ret; 1103 usdhi6_request_done(host); 1104 } 1105 } 1106 1107 static int usdhi6_get_cd(struct mmc_host *mmc) 1108 { 1109 struct usdhi6_host *host = mmc_priv(mmc); 1110 /* Read is atomic, no need to lock */ 1111 u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD; 1112 1113 /* 1114 * level status.CD CD_ACTIVE_HIGH card present 1115 * 1 0 0 0 1116 * 1 0 1 1 1117 * 0 1 0 1 1118 * 0 1 1 0 1119 */ 1120 return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); 1121 } 1122 1123 static int usdhi6_get_ro(struct mmc_host *mmc) 1124 { 1125 struct usdhi6_host *host = mmc_priv(mmc); 1126 /* No locking as above */ 1127 u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP; 1128 1129 /* 1130 * level status.WP RO_ACTIVE_HIGH card read-only 1131 * 1 0 0 0 1132 * 1 0 1 1 1133 * 0 1 0 1 1134 * 0 1 1 0 1135 */ 1136 return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); 1137 } 1138 1139 static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable) 1140 { 1141 struct usdhi6_host *host = mmc_priv(mmc); 1142 1143 dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis"); 1144 1145 if (enable) { 1146 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ; 1147 usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask); 1148 usdhi6_write(host, USDHI6_SDIO_MODE, 1); 1149 } else { 1150 usdhi6_write(host, USDHI6_SDIO_MODE, 0); 1151 usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ); 1152 host->sdio_mask = USDHI6_SDIO_INFO1_IRQ; 1153 } 1154 } 1155 1156 static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage) 1157 { 1158 if (IS_ERR(host->pins_uhs)) 1159 return 0; 1160 1161 switch (voltage) { 1162 case MMC_SIGNAL_VOLTAGE_180: 1163 case MMC_SIGNAL_VOLTAGE_120: 1164 return pinctrl_select_state(host->pinctrl, 1165 host->pins_uhs); 1166 1167 default: 1168 return pinctrl_select_default_state(mmc_dev(host->mmc)); 1169 } 1170 } 1171 1172 static int usdhi6_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) 1173 { 1174 int ret; 1175 1176 ret = mmc_regulator_set_vqmmc(mmc, ios); 1177 if (ret < 0) 1178 return ret; 1179 1180 ret = usdhi6_set_pinstates(mmc_priv(mmc), ios->signal_voltage); 1181 if (ret) 1182 dev_warn_once(mmc_dev(mmc), 1183 "Failed to set pinstate err=%d\n", ret); 1184 return ret; 1185 } 1186 1187 static const struct mmc_host_ops usdhi6_ops = { 1188 .request = usdhi6_request, 1189 .set_ios = usdhi6_set_ios, 1190 .get_cd = usdhi6_get_cd, 1191 .get_ro = usdhi6_get_ro, 1192 .enable_sdio_irq = usdhi6_enable_sdio_irq, 1193 .start_signal_voltage_switch = usdhi6_sig_volt_switch, 1194 }; 1195 1196 /* State machine handlers */ 1197 1198 static void usdhi6_resp_cmd12(struct usdhi6_host *host) 1199 { 1200 struct mmc_command *cmd = host->mrq->stop; 1201 cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10); 1202 } 1203 1204 static void usdhi6_resp_read(struct usdhi6_host *host) 1205 { 1206 struct mmc_command *cmd = host->mrq->cmd; 1207 u32 *rsp = cmd->resp, tmp = 0; 1208 int i; 1209 1210 /* 1211 * RSP10 39-8 1212 * RSP32 71-40 1213 * RSP54 103-72 1214 * RSP76 127-104 1215 * R2-type response: 1216 * resp[0] = r[127..96] 1217 * resp[1] = r[95..64] 1218 * resp[2] = r[63..32] 1219 * resp[3] = r[31..0] 1220 * Other responses: 1221 * resp[0] = r[39..8] 1222 */ 1223 1224 if (mmc_resp_type(cmd) == MMC_RSP_NONE) 1225 return; 1226 1227 if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) { 1228 dev_err(mmc_dev(host->mmc), 1229 "CMD%d: response expected but is missing!\n", cmd->opcode); 1230 return; 1231 } 1232 1233 if (mmc_resp_type(cmd) & MMC_RSP_136) 1234 for (i = 0; i < 4; i++) { 1235 if (i) 1236 rsp[3 - i] = tmp >> 24; 1237 tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8); 1238 rsp[3 - i] |= tmp << 8; 1239 } 1240 else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 1241 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) 1242 /* Read RSP54 to avoid conflict with auto CMD12 */ 1243 rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54); 1244 else 1245 rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10); 1246 1247 dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]); 1248 } 1249 1250 static int usdhi6_blk_read(struct usdhi6_host *host) 1251 { 1252 struct mmc_data *data = host->mrq->data; 1253 u32 *p; 1254 int i, rest; 1255 1256 if (host->io_error) { 1257 data->error = usdhi6_error_code(host); 1258 goto error; 1259 } 1260 1261 if (host->pg.page) { 1262 p = host->blk_page + host->offset; 1263 } else { 1264 p = usdhi6_sg_map(host); 1265 if (!p) { 1266 data->error = -ENOMEM; 1267 goto error; 1268 } 1269 } 1270 1271 for (i = 0; i < data->blksz / 4; i++, p++) 1272 *p = usdhi6_read(host, USDHI6_SD_BUF0); 1273 1274 rest = data->blksz % 4; 1275 for (i = 0; i < (rest + 1) / 2; i++) { 1276 u16 d = usdhi6_read16(host, USDHI6_SD_BUF0); 1277 ((u8 *)p)[2 * i] = ((u8 *)&d)[0]; 1278 if (rest > 1 && !i) 1279 ((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1]; 1280 } 1281 1282 return 0; 1283 1284 error: 1285 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); 1286 host->wait = USDHI6_WAIT_FOR_REQUEST; 1287 return data->error; 1288 } 1289 1290 static int usdhi6_blk_write(struct usdhi6_host *host) 1291 { 1292 struct mmc_data *data = host->mrq->data; 1293 u32 *p; 1294 int i, rest; 1295 1296 if (host->io_error) { 1297 data->error = usdhi6_error_code(host); 1298 goto error; 1299 } 1300 1301 if (host->pg.page) { 1302 p = host->blk_page + host->offset; 1303 } else { 1304 p = usdhi6_sg_map(host); 1305 if (!p) { 1306 data->error = -ENOMEM; 1307 goto error; 1308 } 1309 } 1310 1311 for (i = 0; i < data->blksz / 4; i++, p++) 1312 usdhi6_write(host, USDHI6_SD_BUF0, *p); 1313 1314 rest = data->blksz % 4; 1315 for (i = 0; i < (rest + 1) / 2; i++) { 1316 u16 d; 1317 ((u8 *)&d)[0] = ((u8 *)p)[2 * i]; 1318 if (rest > 1 && !i) 1319 ((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1]; 1320 else 1321 ((u8 *)&d)[1] = 0; 1322 usdhi6_write16(host, USDHI6_SD_BUF0, d); 1323 } 1324 1325 return 0; 1326 1327 error: 1328 dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); 1329 host->wait = USDHI6_WAIT_FOR_REQUEST; 1330 return data->error; 1331 } 1332 1333 static int usdhi6_stop_cmd(struct usdhi6_host *host) 1334 { 1335 struct mmc_request *mrq = host->mrq; 1336 1337 switch (mrq->cmd->opcode) { 1338 case MMC_READ_MULTIPLE_BLOCK: 1339 case MMC_WRITE_MULTIPLE_BLOCK: 1340 if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) { 1341 host->wait = USDHI6_WAIT_FOR_STOP; 1342 return 0; 1343 } 1344 /* fall through - Unsupported STOP command. */ 1345 default: 1346 dev_err(mmc_dev(host->mmc), 1347 "unsupported stop CMD%d for CMD%d\n", 1348 mrq->stop->opcode, mrq->cmd->opcode); 1349 mrq->stop->error = -EOPNOTSUPP; 1350 } 1351 1352 return -EOPNOTSUPP; 1353 } 1354 1355 static bool usdhi6_end_cmd(struct usdhi6_host *host) 1356 { 1357 struct mmc_request *mrq = host->mrq; 1358 struct mmc_command *cmd = mrq->cmd; 1359 1360 if (host->io_error) { 1361 cmd->error = usdhi6_error_code(host); 1362 return false; 1363 } 1364 1365 usdhi6_resp_read(host); 1366 1367 if (!mrq->data) 1368 return false; 1369 1370 if (host->dma_active) { 1371 usdhi6_dma_kick(host); 1372 if (!mrq->stop) 1373 host->wait = USDHI6_WAIT_FOR_DMA; 1374 else if (usdhi6_stop_cmd(host) < 0) 1375 return false; 1376 } else if (mrq->data->flags & MMC_DATA_READ) { 1377 if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || 1378 (cmd->opcode == SD_IO_RW_EXTENDED && 1379 mrq->data->blocks > 1)) 1380 host->wait = USDHI6_WAIT_FOR_MREAD; 1381 else 1382 host->wait = USDHI6_WAIT_FOR_READ; 1383 } else { 1384 if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || 1385 (cmd->opcode == SD_IO_RW_EXTENDED && 1386 mrq->data->blocks > 1)) 1387 host->wait = USDHI6_WAIT_FOR_MWRITE; 1388 else 1389 host->wait = USDHI6_WAIT_FOR_WRITE; 1390 } 1391 1392 return true; 1393 } 1394 1395 static bool usdhi6_read_block(struct usdhi6_host *host) 1396 { 1397 /* ACCESS_END IRQ is already unmasked */ 1398 int ret = usdhi6_blk_read(host); 1399 1400 /* 1401 * Have to force unmapping both pages: the single block could have been 1402 * cross-page, in which case for single-block IO host->page_idx == 0. 1403 * So, if we don't force, the second page won't be unmapped. 1404 */ 1405 usdhi6_sg_unmap(host, true); 1406 1407 if (ret < 0) 1408 return false; 1409 1410 host->wait = USDHI6_WAIT_FOR_DATA_END; 1411 return true; 1412 } 1413 1414 static bool usdhi6_mread_block(struct usdhi6_host *host) 1415 { 1416 int ret = usdhi6_blk_read(host); 1417 1418 if (ret < 0) 1419 return false; 1420 1421 usdhi6_sg_advance(host); 1422 1423 return !host->mrq->data->error && 1424 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); 1425 } 1426 1427 static bool usdhi6_write_block(struct usdhi6_host *host) 1428 { 1429 int ret = usdhi6_blk_write(host); 1430 1431 /* See comment in usdhi6_read_block() */ 1432 usdhi6_sg_unmap(host, true); 1433 1434 if (ret < 0) 1435 return false; 1436 1437 host->wait = USDHI6_WAIT_FOR_DATA_END; 1438 return true; 1439 } 1440 1441 static bool usdhi6_mwrite_block(struct usdhi6_host *host) 1442 { 1443 int ret = usdhi6_blk_write(host); 1444 1445 if (ret < 0) 1446 return false; 1447 1448 usdhi6_sg_advance(host); 1449 1450 return !host->mrq->data->error && 1451 (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); 1452 } 1453 1454 /* Interrupt & timeout handlers */ 1455 1456 static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id) 1457 { 1458 struct usdhi6_host *host = dev_id; 1459 struct mmc_request *mrq; 1460 struct mmc_command *cmd; 1461 struct mmc_data *data; 1462 bool io_wait = false; 1463 1464 cancel_delayed_work_sync(&host->timeout_work); 1465 1466 mrq = host->mrq; 1467 if (!mrq) 1468 return IRQ_HANDLED; 1469 1470 cmd = mrq->cmd; 1471 data = mrq->data; 1472 1473 switch (host->wait) { 1474 case USDHI6_WAIT_FOR_REQUEST: 1475 /* We're too late, the timeout has already kicked in */ 1476 return IRQ_HANDLED; 1477 case USDHI6_WAIT_FOR_CMD: 1478 /* Wait for data? */ 1479 io_wait = usdhi6_end_cmd(host); 1480 break; 1481 case USDHI6_WAIT_FOR_MREAD: 1482 /* Wait for more data? */ 1483 io_wait = usdhi6_mread_block(host); 1484 break; 1485 case USDHI6_WAIT_FOR_READ: 1486 /* Wait for data end? */ 1487 io_wait = usdhi6_read_block(host); 1488 break; 1489 case USDHI6_WAIT_FOR_MWRITE: 1490 /* Wait data to write? */ 1491 io_wait = usdhi6_mwrite_block(host); 1492 break; 1493 case USDHI6_WAIT_FOR_WRITE: 1494 /* Wait for data end? */ 1495 io_wait = usdhi6_write_block(host); 1496 break; 1497 case USDHI6_WAIT_FOR_DMA: 1498 usdhi6_dma_check_error(host); 1499 break; 1500 case USDHI6_WAIT_FOR_STOP: 1501 usdhi6_write(host, USDHI6_SD_STOP, 0); 1502 if (host->io_error) { 1503 int ret = usdhi6_error_code(host); 1504 if (mrq->stop) 1505 mrq->stop->error = ret; 1506 else 1507 mrq->data->error = ret; 1508 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret); 1509 break; 1510 } 1511 usdhi6_resp_cmd12(host); 1512 mrq->stop->error = 0; 1513 break; 1514 case USDHI6_WAIT_FOR_DATA_END: 1515 if (host->io_error) { 1516 mrq->data->error = usdhi6_error_code(host); 1517 dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, 1518 mrq->data->error); 1519 } 1520 break; 1521 default: 1522 cmd->error = -EFAULT; 1523 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); 1524 usdhi6_request_done(host); 1525 return IRQ_HANDLED; 1526 } 1527 1528 if (io_wait) { 1529 schedule_delayed_work(&host->timeout_work, host->timeout); 1530 /* Wait for more data or ACCESS_END */ 1531 if (!host->dma_active) 1532 usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); 1533 return IRQ_HANDLED; 1534 } 1535 1536 if (!cmd->error) { 1537 if (data) { 1538 if (!data->error) { 1539 if (host->wait != USDHI6_WAIT_FOR_STOP && 1540 host->mrq->stop && 1541 !host->mrq->stop->error && 1542 !usdhi6_stop_cmd(host)) { 1543 /* Sending STOP */ 1544 usdhi6_wait_for_resp(host); 1545 1546 schedule_delayed_work(&host->timeout_work, 1547 host->timeout); 1548 1549 return IRQ_HANDLED; 1550 } 1551 1552 data->bytes_xfered = data->blocks * data->blksz; 1553 } else { 1554 /* Data error: might need to unmap the last page */ 1555 dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n", 1556 __func__, data->error); 1557 usdhi6_sg_unmap(host, true); 1558 } 1559 } else if (cmd->opcode == MMC_APP_CMD) { 1560 host->app_cmd = true; 1561 } 1562 } 1563 1564 usdhi6_request_done(host); 1565 1566 return IRQ_HANDLED; 1567 } 1568 1569 static irqreturn_t usdhi6_sd(int irq, void *dev_id) 1570 { 1571 struct usdhi6_host *host = dev_id; 1572 u16 status, status2, error; 1573 1574 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & 1575 ~USDHI6_SD_INFO1_CARD; 1576 status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask; 1577 1578 usdhi6_only_cd(host); 1579 1580 dev_dbg(mmc_dev(host->mmc), 1581 "IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2); 1582 1583 if (!status && !status2) 1584 return IRQ_NONE; 1585 1586 error = status2 & USDHI6_SD_INFO2_ERR; 1587 1588 /* Ack / clear interrupts */ 1589 if (USDHI6_SD_INFO1_IRQ & status) 1590 usdhi6_write(host, USDHI6_SD_INFO1, 1591 0xffff & ~(USDHI6_SD_INFO1_IRQ & status)); 1592 1593 if (USDHI6_SD_INFO2_IRQ & status2) { 1594 if (error) 1595 /* In error cases BWE and BRE aren't cleared automatically */ 1596 status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE; 1597 1598 usdhi6_write(host, USDHI6_SD_INFO2, 1599 0xffff & ~(USDHI6_SD_INFO2_IRQ & status2)); 1600 } 1601 1602 host->io_error = error; 1603 host->irq_status = status; 1604 1605 if (error) { 1606 /* Don't pollute the log with unsupported command timeouts */ 1607 if (host->wait != USDHI6_WAIT_FOR_CMD || 1608 error != USDHI6_SD_INFO2_RSP_TOUT) 1609 dev_warn(mmc_dev(host->mmc), 1610 "%s(): INFO2 error bits 0x%08x\n", 1611 __func__, error); 1612 else 1613 dev_dbg(mmc_dev(host->mmc), 1614 "%s(): INFO2 error bits 0x%08x\n", 1615 __func__, error); 1616 } 1617 1618 return IRQ_WAKE_THREAD; 1619 } 1620 1621 static irqreturn_t usdhi6_sdio(int irq, void *dev_id) 1622 { 1623 struct usdhi6_host *host = dev_id; 1624 u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask; 1625 1626 dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status); 1627 1628 if (!status) 1629 return IRQ_NONE; 1630 1631 usdhi6_write(host, USDHI6_SDIO_INFO1, ~status); 1632 1633 mmc_signal_sdio_irq(host->mmc); 1634 1635 return IRQ_HANDLED; 1636 } 1637 1638 static irqreturn_t usdhi6_cd(int irq, void *dev_id) 1639 { 1640 struct usdhi6_host *host = dev_id; 1641 struct mmc_host *mmc = host->mmc; 1642 u16 status; 1643 1644 /* We're only interested in hotplug events here */ 1645 status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & 1646 USDHI6_SD_INFO1_CARD; 1647 1648 if (!status) 1649 return IRQ_NONE; 1650 1651 /* Ack */ 1652 usdhi6_write(host, USDHI6_SD_INFO1, ~status); 1653 1654 if (!work_pending(&mmc->detect.work) && 1655 (((status & USDHI6_SD_INFO1_CARD_INSERT) && 1656 !mmc->card) || 1657 ((status & USDHI6_SD_INFO1_CARD_EJECT) && 1658 mmc->card))) 1659 mmc_detect_change(mmc, msecs_to_jiffies(100)); 1660 1661 return IRQ_HANDLED; 1662 } 1663 1664 /* 1665 * Actually this should not be needed, if the built-in timeout works reliably in 1666 * the both PIO cases and DMA never fails. But if DMA does fail, a timeout 1667 * handler might be the only way to catch the error. 1668 */ 1669 static void usdhi6_timeout_work(struct work_struct *work) 1670 { 1671 struct delayed_work *d = to_delayed_work(work); 1672 struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work); 1673 struct mmc_request *mrq = host->mrq; 1674 struct mmc_data *data = mrq ? mrq->data : NULL; 1675 struct scatterlist *sg; 1676 1677 dev_warn(mmc_dev(host->mmc), 1678 "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n", 1679 host->dma_active ? "DMA" : "PIO", 1680 host->wait, mrq ? mrq->cmd->opcode : -1, 1681 usdhi6_read(host, USDHI6_SD_INFO1), 1682 usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status); 1683 1684 if (host->dma_active) { 1685 usdhi6_dma_kill(host); 1686 usdhi6_dma_stop_unmap(host); 1687 } 1688 1689 switch (host->wait) { 1690 default: 1691 dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); 1692 /* fall through - mrq can be NULL, but is impossible. */ 1693 case USDHI6_WAIT_FOR_CMD: 1694 usdhi6_error_code(host); 1695 if (mrq) 1696 mrq->cmd->error = -ETIMEDOUT; 1697 break; 1698 case USDHI6_WAIT_FOR_STOP: 1699 usdhi6_error_code(host); 1700 mrq->stop->error = -ETIMEDOUT; 1701 break; 1702 case USDHI6_WAIT_FOR_DMA: 1703 case USDHI6_WAIT_FOR_MREAD: 1704 case USDHI6_WAIT_FOR_MWRITE: 1705 case USDHI6_WAIT_FOR_READ: 1706 case USDHI6_WAIT_FOR_WRITE: 1707 sg = host->sg ?: data->sg; 1708 dev_dbg(mmc_dev(host->mmc), 1709 "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n", 1710 data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx, 1711 host->offset, data->blocks, data->blksz, data->sg_len, 1712 sg_dma_len(sg), sg->offset); 1713 usdhi6_sg_unmap(host, true); 1714 /* fall through - page unmapped in USDHI6_WAIT_FOR_DATA_END. */ 1715 case USDHI6_WAIT_FOR_DATA_END: 1716 usdhi6_error_code(host); 1717 data->error = -ETIMEDOUT; 1718 } 1719 1720 if (mrq) 1721 usdhi6_request_done(host); 1722 } 1723 1724 /* Probe / release */ 1725 1726 static const struct of_device_id usdhi6_of_match[] = { 1727 {.compatible = "renesas,usdhi6rol0"}, 1728 {} 1729 }; 1730 MODULE_DEVICE_TABLE(of, usdhi6_of_match); 1731 1732 static int usdhi6_probe(struct platform_device *pdev) 1733 { 1734 struct device *dev = &pdev->dev; 1735 struct mmc_host *mmc; 1736 struct usdhi6_host *host; 1737 struct resource *res; 1738 int irq_cd, irq_sd, irq_sdio; 1739 u32 version; 1740 int ret; 1741 1742 if (!dev->of_node) 1743 return -ENODEV; 1744 1745 irq_cd = platform_get_irq_byname(pdev, "card detect"); 1746 irq_sd = platform_get_irq_byname(pdev, "data"); 1747 irq_sdio = platform_get_irq_byname(pdev, "SDIO"); 1748 if (irq_sd < 0 || irq_sdio < 0) 1749 return -ENODEV; 1750 1751 mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev); 1752 if (!mmc) 1753 return -ENOMEM; 1754 1755 ret = mmc_regulator_get_supply(mmc); 1756 if (ret) 1757 goto e_free_mmc; 1758 1759 ret = mmc_of_parse(mmc); 1760 if (ret < 0) 1761 goto e_free_mmc; 1762 1763 host = mmc_priv(mmc); 1764 host->mmc = mmc; 1765 host->wait = USDHI6_WAIT_FOR_REQUEST; 1766 host->timeout = msecs_to_jiffies(4000); 1767 1768 host->pinctrl = devm_pinctrl_get(&pdev->dev); 1769 if (IS_ERR(host->pinctrl)) { 1770 ret = PTR_ERR(host->pinctrl); 1771 goto e_free_mmc; 1772 } 1773 1774 host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs"); 1775 1776 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1777 host->base = devm_ioremap_resource(dev, res); 1778 if (IS_ERR(host->base)) { 1779 ret = PTR_ERR(host->base); 1780 goto e_free_mmc; 1781 } 1782 1783 host->clk = devm_clk_get(dev, NULL); 1784 if (IS_ERR(host->clk)) { 1785 ret = PTR_ERR(host->clk); 1786 goto e_free_mmc; 1787 } 1788 1789 host->imclk = clk_get_rate(host->clk); 1790 1791 ret = clk_prepare_enable(host->clk); 1792 if (ret < 0) 1793 goto e_free_mmc; 1794 1795 version = usdhi6_read(host, USDHI6_VERSION); 1796 if ((version & 0xfff) != 0xa0d) { 1797 dev_err(dev, "Version not recognized %x\n", version); 1798 goto e_clk_off; 1799 } 1800 1801 dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n", 1802 usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT); 1803 1804 usdhi6_mask_all(host); 1805 1806 if (irq_cd >= 0) { 1807 ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0, 1808 dev_name(dev), host); 1809 if (ret < 0) 1810 goto e_clk_off; 1811 } else { 1812 mmc->caps |= MMC_CAP_NEEDS_POLL; 1813 } 1814 1815 ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0, 1816 dev_name(dev), host); 1817 if (ret < 0) 1818 goto e_clk_off; 1819 1820 ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0, 1821 dev_name(dev), host); 1822 if (ret < 0) 1823 goto e_clk_off; 1824 1825 INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work); 1826 1827 usdhi6_dma_request(host, res->start); 1828 1829 mmc->ops = &usdhi6_ops; 1830 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | 1831 MMC_CAP_SDIO_IRQ; 1832 /* Set .max_segs to some random number. Feel free to adjust. */ 1833 mmc->max_segs = 32; 1834 mmc->max_blk_size = 512; 1835 mmc->max_req_size = PAGE_SIZE * mmc->max_segs; 1836 mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; 1837 /* 1838 * Setting .max_seg_size to 1 page would simplify our page-mapping code, 1839 * But OTOH, having large segments makes DMA more efficient. We could 1840 * check, whether we managed to get DMA and fall back to 1 page 1841 * segments, but if we do manage to obtain DMA and then it fails at 1842 * run-time and we fall back to PIO, we will continue getting large 1843 * segments. So, we wouldn't be able to get rid of the code anyway. 1844 */ 1845 mmc->max_seg_size = mmc->max_req_size; 1846 if (!mmc->f_max) 1847 mmc->f_max = host->imclk; 1848 mmc->f_min = host->imclk / 512; 1849 1850 platform_set_drvdata(pdev, host); 1851 1852 ret = mmc_add_host(mmc); 1853 if (ret < 0) 1854 goto e_clk_off; 1855 1856 return 0; 1857 1858 e_clk_off: 1859 clk_disable_unprepare(host->clk); 1860 e_free_mmc: 1861 mmc_free_host(mmc); 1862 1863 return ret; 1864 } 1865 1866 static int usdhi6_remove(struct platform_device *pdev) 1867 { 1868 struct usdhi6_host *host = platform_get_drvdata(pdev); 1869 1870 mmc_remove_host(host->mmc); 1871 1872 usdhi6_mask_all(host); 1873 cancel_delayed_work_sync(&host->timeout_work); 1874 usdhi6_dma_release(host); 1875 clk_disable_unprepare(host->clk); 1876 mmc_free_host(host->mmc); 1877 1878 return 0; 1879 } 1880 1881 static struct platform_driver usdhi6_driver = { 1882 .probe = usdhi6_probe, 1883 .remove = usdhi6_remove, 1884 .driver = { 1885 .name = "usdhi6rol0", 1886 .of_match_table = usdhi6_of_match, 1887 }, 1888 }; 1889 1890 module_platform_driver(usdhi6_driver); 1891 1892 MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver"); 1893 MODULE_LICENSE("GPL v2"); 1894 MODULE_ALIAS("platform:usdhi6rol0"); 1895 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); 1896