1 /* 2 * DMA support use of SYS DMAC with SDHI SD/SDIO controller 3 * 4 * Copyright (C) 2016-17 Renesas Electronics Corporation 5 * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang 6 * Copyright (C) 2017 Horms Solutions, Simon Horman 7 * Copyright (C) 2010-2011 Guennadi Liakhovetski 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/device.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/dmaengine.h> 17 #include <linux/mfd/tmio.h> 18 #include <linux/mmc/host.h> 19 #include <linux/mod_devicetable.h> 20 #include <linux/module.h> 21 #include <linux/of_device.h> 22 #include <linux/pagemap.h> 23 #include <linux/scatterlist.h> 24 #include <linux/sys_soc.h> 25 26 #include "renesas_sdhi.h" 27 #include "tmio_mmc.h" 28 29 #define TMIO_MMC_MIN_DMA_LEN 8 30 31 static const struct renesas_sdhi_of_data of_default_cfg = { 32 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, 33 }; 34 35 static const struct renesas_sdhi_of_data of_rz_compatible = { 36 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_32BIT_DATA_PORT | 37 TMIO_MMC_HAVE_CBSY, 38 .tmio_ocr_mask = MMC_VDD_32_33, 39 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, 40 }; 41 42 static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = { 43 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL, 44 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, 45 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT, 46 }; 47 48 /* Definitions for sampling clocks */ 49 static struct renesas_sdhi_scc rcar_gen2_scc_taps[] = { 50 { 51 .clk_rate = 156000000, 52 .tap = 0x00000703, 53 }, 54 { 55 .clk_rate = 0, 56 .tap = 0x00000300, 57 }, 58 }; 59 60 static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = { 61 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL | 62 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2, 63 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | 64 MMC_CAP_CMD23, 65 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT, 66 .dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES, 67 .dma_rx_offset = 0x2000, 68 .scc_offset = 0x0300, 69 .taps = rcar_gen2_scc_taps, 70 .taps_num = ARRAY_SIZE(rcar_gen2_scc_taps), 71 }; 72 73 /* Definitions for sampling clocks */ 74 static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { 75 { 76 .clk_rate = 0, 77 .tap = 0x00000300, 78 }, 79 }; 80 81 static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = { 82 .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL | 83 TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2, 84 .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ | 85 MMC_CAP_CMD23, 86 .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT, 87 .bus_shift = 2, 88 .scc_offset = 0x1000, 89 .taps = rcar_gen3_scc_taps, 90 .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps), 91 }; 92 93 static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = { 94 { .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, }, 95 { .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, }, 96 { .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, }, 97 { .compatible = "renesas,sdhi-r7s72100", .data = &of_rz_compatible, }, 98 { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, }, 99 { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, }, 100 { .compatible = "renesas,sdhi-r8a7743", .data = &of_rcar_gen2_compatible, }, 101 { .compatible = "renesas,sdhi-r8a7745", .data = &of_rcar_gen2_compatible, }, 102 { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, }, 103 { .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, }, 104 { .compatible = "renesas,sdhi-r8a7792", .data = &of_rcar_gen2_compatible, }, 105 { .compatible = "renesas,sdhi-r8a7793", .data = &of_rcar_gen2_compatible, }, 106 { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, }, 107 { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, }, 108 { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, }, 109 { .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, }, 110 { .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, }, 111 { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, }, 112 { .compatible = "renesas,sdhi-shmobile" }, 113 {}, 114 }; 115 MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match); 116 117 static void renesas_sdhi_sys_dmac_enable_dma(struct tmio_mmc_host *host, 118 bool enable) 119 { 120 struct renesas_sdhi *priv = host_to_priv(host); 121 122 if (!host->chan_tx || !host->chan_rx) 123 return; 124 125 if (priv->dma_priv.enable) 126 priv->dma_priv.enable(host, enable); 127 } 128 129 static void renesas_sdhi_sys_dmac_abort_dma(struct tmio_mmc_host *host) 130 { 131 renesas_sdhi_sys_dmac_enable_dma(host, false); 132 133 if (host->chan_rx) 134 dmaengine_terminate_all(host->chan_rx); 135 if (host->chan_tx) 136 dmaengine_terminate_all(host->chan_tx); 137 138 renesas_sdhi_sys_dmac_enable_dma(host, true); 139 } 140 141 static void renesas_sdhi_sys_dmac_dataend_dma(struct tmio_mmc_host *host) 142 { 143 struct renesas_sdhi *priv = host_to_priv(host); 144 145 complete(&priv->dma_priv.dma_dataend); 146 } 147 148 static void renesas_sdhi_sys_dmac_dma_callback(void *arg) 149 { 150 struct tmio_mmc_host *host = arg; 151 struct renesas_sdhi *priv = host_to_priv(host); 152 153 spin_lock_irq(&host->lock); 154 155 if (!host->data) 156 goto out; 157 158 if (host->data->flags & MMC_DATA_READ) 159 dma_unmap_sg(host->chan_rx->device->dev, 160 host->sg_ptr, host->sg_len, 161 DMA_FROM_DEVICE); 162 else 163 dma_unmap_sg(host->chan_tx->device->dev, 164 host->sg_ptr, host->sg_len, 165 DMA_TO_DEVICE); 166 167 spin_unlock_irq(&host->lock); 168 169 wait_for_completion(&priv->dma_priv.dma_dataend); 170 171 spin_lock_irq(&host->lock); 172 tmio_mmc_do_data_irq(host); 173 out: 174 spin_unlock_irq(&host->lock); 175 } 176 177 static void renesas_sdhi_sys_dmac_start_dma_rx(struct tmio_mmc_host *host) 178 { 179 struct renesas_sdhi *priv = host_to_priv(host); 180 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 181 struct dma_async_tx_descriptor *desc = NULL; 182 struct dma_chan *chan = host->chan_rx; 183 dma_cookie_t cookie; 184 int ret, i; 185 bool aligned = true, multiple = true; 186 unsigned int align = (1 << host->pdata->alignment_shift) - 1; 187 188 for_each_sg(sg, sg_tmp, host->sg_len, i) { 189 if (sg_tmp->offset & align) 190 aligned = false; 191 if (sg_tmp->length & align) { 192 multiple = false; 193 break; 194 } 195 } 196 197 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || 198 (align & PAGE_MASK))) || !multiple) { 199 ret = -EINVAL; 200 goto pio; 201 } 202 203 if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 204 host->force_pio = true; 205 return; 206 } 207 208 /* The only sg element can be unaligned, use our bounce buffer then */ 209 if (!aligned) { 210 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 211 host->sg_ptr = &host->bounce_sg; 212 sg = host->sg_ptr; 213 } 214 215 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 216 if (ret > 0) 217 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM, 218 DMA_CTRL_ACK); 219 220 if (desc) { 221 reinit_completion(&priv->dma_priv.dma_dataend); 222 desc->callback = renesas_sdhi_sys_dmac_dma_callback; 223 desc->callback_param = host; 224 225 cookie = dmaengine_submit(desc); 226 if (cookie < 0) { 227 desc = NULL; 228 ret = cookie; 229 } 230 } 231 pio: 232 if (!desc) { 233 /* DMA failed, fall back to PIO */ 234 renesas_sdhi_sys_dmac_enable_dma(host, false); 235 if (ret >= 0) 236 ret = -EIO; 237 host->chan_rx = NULL; 238 dma_release_channel(chan); 239 /* Free the Tx channel too */ 240 chan = host->chan_tx; 241 if (chan) { 242 host->chan_tx = NULL; 243 dma_release_channel(chan); 244 } 245 dev_warn(&host->pdev->dev, 246 "DMA failed: %d, falling back to PIO\n", ret); 247 } 248 } 249 250 static void renesas_sdhi_sys_dmac_start_dma_tx(struct tmio_mmc_host *host) 251 { 252 struct renesas_sdhi *priv = host_to_priv(host); 253 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 254 struct dma_async_tx_descriptor *desc = NULL; 255 struct dma_chan *chan = host->chan_tx; 256 dma_cookie_t cookie; 257 int ret, i; 258 bool aligned = true, multiple = true; 259 unsigned int align = (1 << host->pdata->alignment_shift) - 1; 260 261 for_each_sg(sg, sg_tmp, host->sg_len, i) { 262 if (sg_tmp->offset & align) 263 aligned = false; 264 if (sg_tmp->length & align) { 265 multiple = false; 266 break; 267 } 268 } 269 270 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || 271 (align & PAGE_MASK))) || !multiple) { 272 ret = -EINVAL; 273 goto pio; 274 } 275 276 if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 277 host->force_pio = true; 278 return; 279 } 280 281 /* The only sg element can be unaligned, use our bounce buffer then */ 282 if (!aligned) { 283 unsigned long flags; 284 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); 285 286 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 287 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); 288 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); 289 host->sg_ptr = &host->bounce_sg; 290 sg = host->sg_ptr; 291 } 292 293 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 294 if (ret > 0) 295 desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV, 296 DMA_CTRL_ACK); 297 298 if (desc) { 299 reinit_completion(&priv->dma_priv.dma_dataend); 300 desc->callback = renesas_sdhi_sys_dmac_dma_callback; 301 desc->callback_param = host; 302 303 cookie = dmaengine_submit(desc); 304 if (cookie < 0) { 305 desc = NULL; 306 ret = cookie; 307 } 308 } 309 pio: 310 if (!desc) { 311 /* DMA failed, fall back to PIO */ 312 renesas_sdhi_sys_dmac_enable_dma(host, false); 313 if (ret >= 0) 314 ret = -EIO; 315 host->chan_tx = NULL; 316 dma_release_channel(chan); 317 /* Free the Rx channel too */ 318 chan = host->chan_rx; 319 if (chan) { 320 host->chan_rx = NULL; 321 dma_release_channel(chan); 322 } 323 dev_warn(&host->pdev->dev, 324 "DMA failed: %d, falling back to PIO\n", ret); 325 } 326 } 327 328 static void renesas_sdhi_sys_dmac_start_dma(struct tmio_mmc_host *host, 329 struct mmc_data *data) 330 { 331 if (data->flags & MMC_DATA_READ) { 332 if (host->chan_rx) 333 renesas_sdhi_sys_dmac_start_dma_rx(host); 334 } else { 335 if (host->chan_tx) 336 renesas_sdhi_sys_dmac_start_dma_tx(host); 337 } 338 } 339 340 static void renesas_sdhi_sys_dmac_issue_tasklet_fn(unsigned long priv) 341 { 342 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 343 struct dma_chan *chan = NULL; 344 345 spin_lock_irq(&host->lock); 346 347 if (host->data) { 348 if (host->data->flags & MMC_DATA_READ) 349 chan = host->chan_rx; 350 else 351 chan = host->chan_tx; 352 } 353 354 spin_unlock_irq(&host->lock); 355 356 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 357 358 if (chan) 359 dma_async_issue_pending(chan); 360 } 361 362 static void renesas_sdhi_sys_dmac_request_dma(struct tmio_mmc_host *host, 363 struct tmio_mmc_data *pdata) 364 { 365 struct renesas_sdhi *priv = host_to_priv(host); 366 367 /* We can only either use DMA for both Tx and Rx or not use it at all */ 368 if (!host->pdev->dev.of_node && 369 (!pdata->chan_priv_tx || !pdata->chan_priv_rx)) 370 return; 371 372 if (!host->chan_tx && !host->chan_rx) { 373 struct resource *res = platform_get_resource(host->pdev, 374 IORESOURCE_MEM, 0); 375 struct dma_slave_config cfg = {}; 376 dma_cap_mask_t mask; 377 int ret; 378 379 if (!res) 380 return; 381 382 dma_cap_zero(mask); 383 dma_cap_set(DMA_SLAVE, mask); 384 385 host->chan_tx = dma_request_slave_channel_compat(mask, 386 priv->dma_priv.filter, pdata->chan_priv_tx, 387 &host->pdev->dev, "tx"); 388 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, 389 host->chan_tx); 390 391 if (!host->chan_tx) 392 return; 393 394 cfg.direction = DMA_MEM_TO_DEV; 395 cfg.dst_addr = res->start + 396 (CTL_SD_DATA_PORT << host->bus_shift); 397 cfg.dst_addr_width = priv->dma_priv.dma_buswidth; 398 if (!cfg.dst_addr_width) 399 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 400 cfg.src_addr = 0; 401 ret = dmaengine_slave_config(host->chan_tx, &cfg); 402 if (ret < 0) 403 goto ecfgtx; 404 405 host->chan_rx = dma_request_slave_channel_compat(mask, 406 priv->dma_priv.filter, pdata->chan_priv_rx, 407 &host->pdev->dev, "rx"); 408 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, 409 host->chan_rx); 410 411 if (!host->chan_rx) 412 goto ereqrx; 413 414 cfg.direction = DMA_DEV_TO_MEM; 415 cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset; 416 cfg.src_addr_width = priv->dma_priv.dma_buswidth; 417 if (!cfg.src_addr_width) 418 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 419 cfg.dst_addr = 0; 420 ret = dmaengine_slave_config(host->chan_rx, &cfg); 421 if (ret < 0) 422 goto ecfgrx; 423 424 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); 425 if (!host->bounce_buf) 426 goto ebouncebuf; 427 428 init_completion(&priv->dma_priv.dma_dataend); 429 tasklet_init(&host->dma_issue, 430 renesas_sdhi_sys_dmac_issue_tasklet_fn, 431 (unsigned long)host); 432 } 433 434 renesas_sdhi_sys_dmac_enable_dma(host, true); 435 436 return; 437 438 ebouncebuf: 439 ecfgrx: 440 dma_release_channel(host->chan_rx); 441 host->chan_rx = NULL; 442 ereqrx: 443 ecfgtx: 444 dma_release_channel(host->chan_tx); 445 host->chan_tx = NULL; 446 } 447 448 static void renesas_sdhi_sys_dmac_release_dma(struct tmio_mmc_host *host) 449 { 450 if (host->chan_tx) { 451 struct dma_chan *chan = host->chan_tx; 452 453 host->chan_tx = NULL; 454 dma_release_channel(chan); 455 } 456 if (host->chan_rx) { 457 struct dma_chan *chan = host->chan_rx; 458 459 host->chan_rx = NULL; 460 dma_release_channel(chan); 461 } 462 if (host->bounce_buf) { 463 free_pages((unsigned long)host->bounce_buf, 0); 464 host->bounce_buf = NULL; 465 } 466 } 467 468 static const struct tmio_mmc_dma_ops renesas_sdhi_sys_dmac_dma_ops = { 469 .start = renesas_sdhi_sys_dmac_start_dma, 470 .enable = renesas_sdhi_sys_dmac_enable_dma, 471 .request = renesas_sdhi_sys_dmac_request_dma, 472 .release = renesas_sdhi_sys_dmac_release_dma, 473 .abort = renesas_sdhi_sys_dmac_abort_dma, 474 .dataend = renesas_sdhi_sys_dmac_dataend_dma, 475 }; 476 477 /* 478 * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC 479 * implementation. Currently empty as all supported ES versions use 480 * the internal DMAC. 481 */ 482 static const struct soc_device_attribute gen3_soc_whitelist[] = { 483 { /* sentinel */ } 484 }; 485 486 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev) 487 { 488 if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible && 489 !soc_device_match(gen3_soc_whitelist)) 490 return -ENODEV; 491 492 return renesas_sdhi_probe(pdev, &renesas_sdhi_sys_dmac_dma_ops); 493 } 494 495 static const struct dev_pm_ops renesas_sdhi_sys_dmac_dev_pm_ops = { 496 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 497 pm_runtime_force_resume) 498 SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend, 499 tmio_mmc_host_runtime_resume, 500 NULL) 501 }; 502 503 static struct platform_driver renesas_sys_dmac_sdhi_driver = { 504 .driver = { 505 .name = "sh_mobile_sdhi", 506 .pm = &renesas_sdhi_sys_dmac_dev_pm_ops, 507 .of_match_table = renesas_sdhi_sys_dmac_of_match, 508 }, 509 .probe = renesas_sdhi_sys_dmac_probe, 510 .remove = renesas_sdhi_remove, 511 }; 512 513 module_platform_driver(renesas_sys_dmac_sdhi_driver); 514 515 MODULE_DESCRIPTION("Renesas SDHI driver"); 516 MODULE_AUTHOR("Magnus Damm"); 517 MODULE_LICENSE("GPL v2"); 518 MODULE_ALIAS("platform:sh_mobile_sdhi"); 519