1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Copyright (C) 2017-2018 Socionext Inc. 4 // Author: Masahiro Yamada <yamada.masahiro@socionext.com> 5 6 #include <linux/bitfield.h> 7 #include <linux/bitops.h> 8 #include <linux/clk.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/mfd/tmio.h> 12 #include <linux/mmc/host.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/of_device.h> 16 #include <linux/pinctrl/consumer.h> 17 #include <linux/platform_device.h> 18 #include <linux/reset.h> 19 20 #include "tmio_mmc.h" 21 22 #define UNIPHIER_SD_CLK_CTL_DIV1024 BIT(16) 23 #define UNIPHIER_SD_CLK_CTL_DIV1 BIT(10) 24 #define UNIPHIER_SD_CLKCTL_OFFEN BIT(9) // auto SDCLK stop 25 #define UNIPHIER_SD_CC_EXT_MODE 0x1b0 26 #define UNIPHIER_SD_CC_EXT_MODE_DMA BIT(1) 27 #define UNIPHIER_SD_HOST_MODE 0x1c8 28 #define UNIPHIER_SD_VOLT 0x1e4 29 #define UNIPHIER_SD_VOLT_MASK GENMASK(1, 0) 30 #define UNIPHIER_SD_VOLT_OFF 0 31 #define UNIPHIER_SD_VOLT_330 1 // 3.3V signal 32 #define UNIPHIER_SD_VOLT_180 2 // 1.8V signal 33 #define UNIPHIER_SD_DMA_MODE 0x410 34 #define UNIPHIER_SD_DMA_MODE_DIR_MASK GENMASK(17, 16) 35 #define UNIPHIER_SD_DMA_MODE_DIR_TO_DEV 0 36 #define UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV 1 37 #define UNIPHIER_SD_DMA_MODE_WIDTH_MASK GENMASK(5, 4) 38 #define UNIPHIER_SD_DMA_MODE_WIDTH_8 0 39 #define UNIPHIER_SD_DMA_MODE_WIDTH_16 1 40 #define UNIPHIER_SD_DMA_MODE_WIDTH_32 2 41 #define UNIPHIER_SD_DMA_MODE_WIDTH_64 3 42 #define UNIPHIER_SD_DMA_MODE_ADDR_INC BIT(0) // 1: inc, 0: fixed 43 #define UNIPHIER_SD_DMA_CTL 0x414 44 #define UNIPHIER_SD_DMA_CTL_START BIT(0) // start DMA (auto cleared) 45 #define UNIPHIER_SD_DMA_RST 0x418 46 #define UNIPHIER_SD_DMA_RST_CH1 BIT(9) 47 #define UNIPHIER_SD_DMA_RST_CH0 BIT(8) 48 #define UNIPHIER_SD_DMA_ADDR_L 0x440 49 #define UNIPHIER_SD_DMA_ADDR_H 0x444 50 51 /* 52 * IP is extended to support various features: built-in DMA engine, 53 * 1/1024 divisor, etc. 54 */ 55 #define UNIPHIER_SD_CAP_EXTENDED_IP BIT(0) 56 /* RX channel of the built-in DMA controller is broken (Pro5) */ 57 #define UNIPHIER_SD_CAP_BROKEN_DMA_RX BIT(1) 58 59 struct uniphier_sd_priv { 60 struct tmio_mmc_data tmio_data; 61 struct pinctrl *pinctrl; 62 struct pinctrl_state *pinstate_uhs; 63 struct clk *clk; 64 struct reset_control *rst; 65 struct reset_control *rst_br; 66 struct reset_control *rst_hw; 67 struct dma_chan *chan; 68 enum dma_data_direction dma_dir; 69 unsigned long clk_rate; 70 unsigned long caps; 71 }; 72 73 static void *uniphier_sd_priv(struct tmio_mmc_host *host) 74 { 75 return container_of(host->pdata, struct uniphier_sd_priv, tmio_data); 76 } 77 78 static void uniphier_sd_dma_endisable(struct tmio_mmc_host *host, int enable) 79 { 80 sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? DMA_ENABLE_DMASDRW : 0); 81 } 82 83 /* external DMA engine */ 84 static void uniphier_sd_external_dma_issue(unsigned long arg) 85 { 86 struct tmio_mmc_host *host = (void *)arg; 87 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 88 89 uniphier_sd_dma_endisable(host, 1); 90 dma_async_issue_pending(priv->chan); 91 } 92 93 static void uniphier_sd_external_dma_callback(void *param, 94 const struct dmaengine_result *result) 95 { 96 struct tmio_mmc_host *host = param; 97 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 98 unsigned long flags; 99 100 dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, 101 priv->dma_dir); 102 103 spin_lock_irqsave(&host->lock, flags); 104 105 if (result->result == DMA_TRANS_NOERROR) { 106 /* 107 * When the external DMA engine is enabled, strangely enough, 108 * the DATAEND flag can be asserted even if the DMA engine has 109 * not been kicked yet. Enable the TMIO_STAT_DATAEND irq only 110 * after we make sure the DMA engine finishes the transfer, 111 * hence, in this callback. 112 */ 113 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 114 } else { 115 host->data->error = -ETIMEDOUT; 116 tmio_mmc_do_data_irq(host); 117 } 118 119 spin_unlock_irqrestore(&host->lock, flags); 120 } 121 122 static void uniphier_sd_external_dma_start(struct tmio_mmc_host *host, 123 struct mmc_data *data) 124 { 125 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 126 enum dma_transfer_direction dma_tx_dir; 127 struct dma_async_tx_descriptor *desc; 128 dma_cookie_t cookie; 129 int sg_len; 130 131 if (!priv->chan) 132 goto force_pio; 133 134 if (data->flags & MMC_DATA_READ) { 135 priv->dma_dir = DMA_FROM_DEVICE; 136 dma_tx_dir = DMA_DEV_TO_MEM; 137 } else { 138 priv->dma_dir = DMA_TO_DEVICE; 139 dma_tx_dir = DMA_MEM_TO_DEV; 140 } 141 142 sg_len = dma_map_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, 143 priv->dma_dir); 144 if (sg_len == 0) 145 goto force_pio; 146 147 desc = dmaengine_prep_slave_sg(priv->chan, host->sg_ptr, sg_len, 148 dma_tx_dir, DMA_CTRL_ACK); 149 if (!desc) 150 goto unmap_sg; 151 152 desc->callback_result = uniphier_sd_external_dma_callback; 153 desc->callback_param = host; 154 155 cookie = dmaengine_submit(desc); 156 if (cookie < 0) 157 goto unmap_sg; 158 159 host->dma_on = true; 160 161 return; 162 163 unmap_sg: 164 dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, 165 priv->dma_dir); 166 force_pio: 167 uniphier_sd_dma_endisable(host, 0); 168 } 169 170 static void uniphier_sd_external_dma_enable(struct tmio_mmc_host *host, 171 bool enable) 172 { 173 } 174 175 static void uniphier_sd_external_dma_request(struct tmio_mmc_host *host, 176 struct tmio_mmc_data *pdata) 177 { 178 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 179 struct dma_chan *chan; 180 181 chan = dma_request_chan(mmc_dev(host->mmc), "rx-tx"); 182 if (IS_ERR(chan)) { 183 dev_warn(mmc_dev(host->mmc), 184 "failed to request DMA channel. falling back to PIO\n"); 185 return; /* just use PIO even for -EPROBE_DEFER */ 186 } 187 188 /* this driver uses a single channel for both RX an TX */ 189 priv->chan = chan; 190 host->chan_rx = chan; 191 host->chan_tx = chan; 192 193 tasklet_init(&host->dma_issue, uniphier_sd_external_dma_issue, 194 (unsigned long)host); 195 } 196 197 static void uniphier_sd_external_dma_release(struct tmio_mmc_host *host) 198 { 199 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 200 201 if (priv->chan) 202 dma_release_channel(priv->chan); 203 } 204 205 static void uniphier_sd_external_dma_abort(struct tmio_mmc_host *host) 206 { 207 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 208 209 uniphier_sd_dma_endisable(host, 0); 210 211 if (priv->chan) 212 dmaengine_terminate_sync(priv->chan); 213 } 214 215 static void uniphier_sd_external_dma_dataend(struct tmio_mmc_host *host) 216 { 217 uniphier_sd_dma_endisable(host, 0); 218 219 tmio_mmc_do_data_irq(host); 220 } 221 222 static const struct tmio_mmc_dma_ops uniphier_sd_external_dma_ops = { 223 .start = uniphier_sd_external_dma_start, 224 .enable = uniphier_sd_external_dma_enable, 225 .request = uniphier_sd_external_dma_request, 226 .release = uniphier_sd_external_dma_release, 227 .abort = uniphier_sd_external_dma_abort, 228 .dataend = uniphier_sd_external_dma_dataend, 229 }; 230 231 static void uniphier_sd_internal_dma_issue(unsigned long arg) 232 { 233 struct tmio_mmc_host *host = (void *)arg; 234 unsigned long flags; 235 236 spin_lock_irqsave(&host->lock, flags); 237 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 238 spin_unlock_irqrestore(&host->lock, flags); 239 240 uniphier_sd_dma_endisable(host, 1); 241 writel(UNIPHIER_SD_DMA_CTL_START, host->ctl + UNIPHIER_SD_DMA_CTL); 242 } 243 244 static void uniphier_sd_internal_dma_start(struct tmio_mmc_host *host, 245 struct mmc_data *data) 246 { 247 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 248 struct scatterlist *sg = host->sg_ptr; 249 dma_addr_t dma_addr; 250 unsigned int dma_mode_dir; 251 u32 dma_mode; 252 int sg_len; 253 254 if ((data->flags & MMC_DATA_READ) && !host->chan_rx) 255 goto force_pio; 256 257 if (WARN_ON(host->sg_len != 1)) 258 goto force_pio; 259 260 if (!IS_ALIGNED(sg->offset, 8)) 261 goto force_pio; 262 263 if (data->flags & MMC_DATA_READ) { 264 priv->dma_dir = DMA_FROM_DEVICE; 265 dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV; 266 } else { 267 priv->dma_dir = DMA_TO_DEVICE; 268 dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_TO_DEV; 269 } 270 271 sg_len = dma_map_sg(mmc_dev(host->mmc), sg, 1, priv->dma_dir); 272 if (sg_len == 0) 273 goto force_pio; 274 275 dma_mode = FIELD_PREP(UNIPHIER_SD_DMA_MODE_DIR_MASK, dma_mode_dir); 276 dma_mode |= FIELD_PREP(UNIPHIER_SD_DMA_MODE_WIDTH_MASK, 277 UNIPHIER_SD_DMA_MODE_WIDTH_64); 278 dma_mode |= UNIPHIER_SD_DMA_MODE_ADDR_INC; 279 280 writel(dma_mode, host->ctl + UNIPHIER_SD_DMA_MODE); 281 282 dma_addr = sg_dma_address(data->sg); 283 writel(lower_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_L); 284 writel(upper_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_H); 285 286 host->dma_on = true; 287 288 return; 289 force_pio: 290 uniphier_sd_dma_endisable(host, 0); 291 } 292 293 static void uniphier_sd_internal_dma_enable(struct tmio_mmc_host *host, 294 bool enable) 295 { 296 } 297 298 static void uniphier_sd_internal_dma_request(struct tmio_mmc_host *host, 299 struct tmio_mmc_data *pdata) 300 { 301 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 302 303 /* 304 * Due to a hardware bug, Pro5 cannot use DMA for RX. 305 * We can still use DMA for TX, but PIO for RX. 306 */ 307 if (!(priv->caps & UNIPHIER_SD_CAP_BROKEN_DMA_RX)) 308 host->chan_rx = (void *)0xdeadbeaf; 309 310 host->chan_tx = (void *)0xdeadbeaf; 311 312 tasklet_init(&host->dma_issue, uniphier_sd_internal_dma_issue, 313 (unsigned long)host); 314 } 315 316 static void uniphier_sd_internal_dma_release(struct tmio_mmc_host *host) 317 { 318 /* Each value is set to zero to assume "disabling" each DMA */ 319 host->chan_rx = NULL; 320 host->chan_tx = NULL; 321 } 322 323 static void uniphier_sd_internal_dma_abort(struct tmio_mmc_host *host) 324 { 325 u32 tmp; 326 327 uniphier_sd_dma_endisable(host, 0); 328 329 tmp = readl(host->ctl + UNIPHIER_SD_DMA_RST); 330 tmp &= ~(UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0); 331 writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST); 332 333 tmp |= UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0; 334 writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST); 335 } 336 337 static void uniphier_sd_internal_dma_dataend(struct tmio_mmc_host *host) 338 { 339 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 340 341 uniphier_sd_dma_endisable(host, 0); 342 dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, 1, priv->dma_dir); 343 344 tmio_mmc_do_data_irq(host); 345 } 346 347 static const struct tmio_mmc_dma_ops uniphier_sd_internal_dma_ops = { 348 .start = uniphier_sd_internal_dma_start, 349 .enable = uniphier_sd_internal_dma_enable, 350 .request = uniphier_sd_internal_dma_request, 351 .release = uniphier_sd_internal_dma_release, 352 .abort = uniphier_sd_internal_dma_abort, 353 .dataend = uniphier_sd_internal_dma_dataend, 354 }; 355 356 static int uniphier_sd_clk_enable(struct tmio_mmc_host *host) 357 { 358 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 359 struct mmc_host *mmc = host->mmc; 360 int ret; 361 362 ret = clk_prepare_enable(priv->clk); 363 if (ret) 364 return ret; 365 366 ret = clk_set_rate(priv->clk, ULONG_MAX); 367 if (ret) 368 goto disable_clk; 369 370 priv->clk_rate = clk_get_rate(priv->clk); 371 372 /* If max-frequency property is set, use it. */ 373 if (!mmc->f_max) 374 mmc->f_max = priv->clk_rate; 375 376 /* 377 * 1/512 is the finest divisor in the original IP. Newer versions 378 * also supports 1/1024 divisor. (UniPhier-specific extension) 379 */ 380 if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) 381 mmc->f_min = priv->clk_rate / 1024; 382 else 383 mmc->f_min = priv->clk_rate / 512; 384 385 ret = reset_control_deassert(priv->rst); 386 if (ret) 387 goto disable_clk; 388 389 ret = reset_control_deassert(priv->rst_br); 390 if (ret) 391 goto assert_rst; 392 393 return 0; 394 395 assert_rst: 396 reset_control_assert(priv->rst); 397 disable_clk: 398 clk_disable_unprepare(priv->clk); 399 400 return ret; 401 } 402 403 static void uniphier_sd_clk_disable(struct tmio_mmc_host *host) 404 { 405 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 406 407 reset_control_assert(priv->rst_br); 408 reset_control_assert(priv->rst); 409 clk_disable_unprepare(priv->clk); 410 } 411 412 static void uniphier_sd_hw_reset(struct tmio_mmc_host *host) 413 { 414 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 415 416 reset_control_assert(priv->rst_hw); 417 /* For eMMC, minimum is 1us but give it 9us for good measure */ 418 udelay(9); 419 reset_control_deassert(priv->rst_hw); 420 /* For eMMC, minimum is 200us but give it 300us for good measure */ 421 usleep_range(300, 1000); 422 } 423 424 static void uniphier_sd_set_clock(struct tmio_mmc_host *host, 425 unsigned int clock) 426 { 427 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 428 unsigned long divisor; 429 u32 tmp; 430 431 tmp = readl(host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 432 433 /* stop the clock before changing its rate to avoid a glitch signal */ 434 tmp &= ~CLK_CTL_SCLKEN; 435 writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 436 437 if (clock == 0) 438 return; 439 440 tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1024; 441 tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1; 442 tmp &= ~CLK_CTL_DIV_MASK; 443 444 divisor = priv->clk_rate / clock; 445 446 /* 447 * In the original IP, bit[7:0] represents the divisor. 448 * bit7 set: 1/512, ... bit0 set:1/4, all bits clear: 1/2 449 * 450 * The IP does not define a way to achieve 1/1. For UniPhier variants, 451 * bit10 is used for 1/1. Newer versions of UniPhier variants use 452 * bit16 for 1/1024. 453 */ 454 if (divisor <= 1) 455 tmp |= UNIPHIER_SD_CLK_CTL_DIV1; 456 else if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP && divisor > 512) 457 tmp |= UNIPHIER_SD_CLK_CTL_DIV1024; 458 else 459 tmp |= roundup_pow_of_two(divisor) >> 2; 460 461 writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 462 463 tmp |= CLK_CTL_SCLKEN; 464 writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 465 } 466 467 static void uniphier_sd_host_init(struct tmio_mmc_host *host) 468 { 469 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 470 u32 val; 471 472 /* 473 * Connected to 32bit AXI. 474 * This register holds settings for SoC-specific internal bus 475 * connection. What is worse, the register spec was changed, 476 * breaking the backward compatibility. Write an appropriate 477 * value depending on a flag associated with a compatible string. 478 */ 479 if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) 480 val = 0x00000101; 481 else 482 val = 0x00000000; 483 484 writel(val, host->ctl + UNIPHIER_SD_HOST_MODE); 485 486 val = 0; 487 /* 488 * If supported, the controller can automatically 489 * enable/disable the clock line to the card. 490 */ 491 if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) 492 val |= UNIPHIER_SD_CLKCTL_OFFEN; 493 494 writel(val, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 495 } 496 497 static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc, 498 struct mmc_ios *ios) 499 { 500 struct tmio_mmc_host *host = mmc_priv(mmc); 501 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 502 struct pinctrl_state *pinstate = NULL; 503 u32 val, tmp; 504 505 switch (ios->signal_voltage) { 506 case MMC_SIGNAL_VOLTAGE_330: 507 val = UNIPHIER_SD_VOLT_330; 508 break; 509 case MMC_SIGNAL_VOLTAGE_180: 510 val = UNIPHIER_SD_VOLT_180; 511 pinstate = priv->pinstate_uhs; 512 break; 513 default: 514 return -ENOTSUPP; 515 } 516 517 tmp = readl(host->ctl + UNIPHIER_SD_VOLT); 518 tmp &= ~UNIPHIER_SD_VOLT_MASK; 519 tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val); 520 writel(tmp, host->ctl + UNIPHIER_SD_VOLT); 521 522 if (pinstate) 523 pinctrl_select_state(priv->pinctrl, pinstate); 524 else 525 pinctrl_select_default_state(mmc_dev(mmc)); 526 527 return 0; 528 } 529 530 static int uniphier_sd_uhs_init(struct tmio_mmc_host *host, 531 struct uniphier_sd_priv *priv) 532 { 533 priv->pinctrl = devm_pinctrl_get(mmc_dev(host->mmc)); 534 if (IS_ERR(priv->pinctrl)) 535 return PTR_ERR(priv->pinctrl); 536 537 priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs"); 538 if (IS_ERR(priv->pinstate_uhs)) 539 return PTR_ERR(priv->pinstate_uhs); 540 541 host->ops.start_signal_voltage_switch = 542 uniphier_sd_start_signal_voltage_switch; 543 544 return 0; 545 } 546 547 static int uniphier_sd_probe(struct platform_device *pdev) 548 { 549 struct device *dev = &pdev->dev; 550 struct uniphier_sd_priv *priv; 551 struct tmio_mmc_data *tmio_data; 552 struct tmio_mmc_host *host; 553 int irq, ret; 554 555 irq = platform_get_irq(pdev, 0); 556 if (irq < 0) 557 return irq; 558 559 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 560 if (!priv) 561 return -ENOMEM; 562 563 priv->caps = (unsigned long)of_device_get_match_data(dev); 564 565 priv->clk = devm_clk_get(dev, NULL); 566 if (IS_ERR(priv->clk)) { 567 dev_err(dev, "failed to get clock\n"); 568 return PTR_ERR(priv->clk); 569 } 570 571 priv->rst = devm_reset_control_get_shared(dev, "host"); 572 if (IS_ERR(priv->rst)) { 573 dev_err(dev, "failed to get host reset\n"); 574 return PTR_ERR(priv->rst); 575 } 576 577 /* old version has one more reset */ 578 if (!(priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)) { 579 priv->rst_br = devm_reset_control_get_shared(dev, "bridge"); 580 if (IS_ERR(priv->rst_br)) { 581 dev_err(dev, "failed to get bridge reset\n"); 582 return PTR_ERR(priv->rst_br); 583 } 584 } 585 586 tmio_data = &priv->tmio_data; 587 tmio_data->flags |= TMIO_MMC_32BIT_DATA_PORT; 588 589 host = tmio_mmc_host_alloc(pdev, tmio_data); 590 if (IS_ERR(host)) 591 return PTR_ERR(host); 592 593 if (host->mmc->caps & MMC_CAP_HW_RESET) { 594 priv->rst_hw = devm_reset_control_get_exclusive(dev, "hw"); 595 if (IS_ERR(priv->rst_hw)) { 596 dev_err(dev, "failed to get hw reset\n"); 597 ret = PTR_ERR(priv->rst_hw); 598 goto free_host; 599 } 600 host->hw_reset = uniphier_sd_hw_reset; 601 } 602 603 if (host->mmc->caps & MMC_CAP_UHS) { 604 ret = uniphier_sd_uhs_init(host, priv); 605 if (ret) { 606 dev_warn(dev, 607 "failed to setup UHS (error %d). Disabling UHS.", 608 ret); 609 host->mmc->caps &= ~MMC_CAP_UHS; 610 } 611 } 612 613 ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED, 614 dev_name(dev), host); 615 if (ret) 616 goto free_host; 617 618 if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) 619 host->dma_ops = &uniphier_sd_internal_dma_ops; 620 else 621 host->dma_ops = &uniphier_sd_external_dma_ops; 622 623 host->bus_shift = 1; 624 host->clk_enable = uniphier_sd_clk_enable; 625 host->clk_disable = uniphier_sd_clk_disable; 626 host->set_clock = uniphier_sd_set_clock; 627 628 ret = uniphier_sd_clk_enable(host); 629 if (ret) 630 goto free_host; 631 632 uniphier_sd_host_init(host); 633 634 tmio_data->ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34; 635 if (host->mmc->caps & MMC_CAP_UHS) 636 tmio_data->ocr_mask |= MMC_VDD_165_195; 637 638 tmio_data->max_segs = 1; 639 tmio_data->max_blk_count = U16_MAX; 640 641 ret = tmio_mmc_host_probe(host); 642 if (ret) 643 goto free_host; 644 645 return 0; 646 647 free_host: 648 tmio_mmc_host_free(host); 649 650 return ret; 651 } 652 653 static int uniphier_sd_remove(struct platform_device *pdev) 654 { 655 struct tmio_mmc_host *host = platform_get_drvdata(pdev); 656 657 tmio_mmc_host_remove(host); 658 uniphier_sd_clk_disable(host); 659 660 return 0; 661 } 662 663 static const struct of_device_id uniphier_sd_match[] = { 664 { 665 .compatible = "socionext,uniphier-sd-v2.91", 666 }, 667 { 668 .compatible = "socionext,uniphier-sd-v3.1", 669 .data = (void *)(UNIPHIER_SD_CAP_EXTENDED_IP | 670 UNIPHIER_SD_CAP_BROKEN_DMA_RX), 671 }, 672 { 673 .compatible = "socionext,uniphier-sd-v3.1.1", 674 .data = (void *)UNIPHIER_SD_CAP_EXTENDED_IP, 675 }, 676 { /* sentinel */ } 677 }; 678 MODULE_DEVICE_TABLE(of, uniphier_sd_match); 679 680 static struct platform_driver uniphier_sd_driver = { 681 .probe = uniphier_sd_probe, 682 .remove = uniphier_sd_remove, 683 .driver = { 684 .name = "uniphier-sd", 685 .of_match_table = uniphier_sd_match, 686 }, 687 }; 688 module_platform_driver(uniphier_sd_driver); 689 690 MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); 691 MODULE_DESCRIPTION("UniPhier SD/eMMC host controller driver"); 692 MODULE_LICENSE("GPL v2"); 693