1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Copyright (C) 2017-2018 Socionext Inc. 4 // Author: Masahiro Yamada <yamada.masahiro@socionext.com> 5 6 #include <linux/bitfield.h> 7 #include <linux/bitops.h> 8 #include <linux/clk.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/mfd/tmio.h> 12 #include <linux/mmc/host.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/of_device.h> 16 #include <linux/pinctrl/consumer.h> 17 #include <linux/platform_device.h> 18 #include <linux/reset.h> 19 20 #include "tmio_mmc.h" 21 22 #define UNIPHIER_SD_CLK_CTL_DIV1024 BIT(16) 23 #define UNIPHIER_SD_CLK_CTL_DIV1 BIT(10) 24 #define UNIPHIER_SD_CLKCTL_OFFEN BIT(9) // auto SDCLK stop 25 #define UNIPHIER_SD_CC_EXT_MODE 0x1b0 26 #define UNIPHIER_SD_CC_EXT_MODE_DMA BIT(1) 27 #define UNIPHIER_SD_HOST_MODE 0x1c8 28 #define UNIPHIER_SD_VOLT 0x1e4 29 #define UNIPHIER_SD_VOLT_MASK GENMASK(1, 0) 30 #define UNIPHIER_SD_VOLT_OFF 0 31 #define UNIPHIER_SD_VOLT_330 1 // 3.3V signal 32 #define UNIPHIER_SD_VOLT_180 2 // 1.8V signal 33 #define UNIPHIER_SD_DMA_MODE 0x410 34 #define UNIPHIER_SD_DMA_MODE_DIR_MASK GENMASK(17, 16) 35 #define UNIPHIER_SD_DMA_MODE_DIR_TO_DEV 0 36 #define UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV 1 37 #define UNIPHIER_SD_DMA_MODE_WIDTH_MASK GENMASK(5, 4) 38 #define UNIPHIER_SD_DMA_MODE_WIDTH_8 0 39 #define UNIPHIER_SD_DMA_MODE_WIDTH_16 1 40 #define UNIPHIER_SD_DMA_MODE_WIDTH_32 2 41 #define UNIPHIER_SD_DMA_MODE_WIDTH_64 3 42 #define UNIPHIER_SD_DMA_MODE_ADDR_INC BIT(0) // 1: inc, 0: fixed 43 #define UNIPHIER_SD_DMA_CTL 0x414 44 #define UNIPHIER_SD_DMA_CTL_START BIT(0) // start DMA (auto cleared) 45 #define UNIPHIER_SD_DMA_RST 0x418 46 #define UNIPHIER_SD_DMA_RST_CH1 BIT(9) 47 #define UNIPHIER_SD_DMA_RST_CH0 BIT(8) 48 #define UNIPHIER_SD_DMA_ADDR_L 0x440 49 #define UNIPHIER_SD_DMA_ADDR_H 0x444 50 51 /* 52 * IP is extended to support various features: built-in DMA engine, 53 * 1/1024 divisor, etc. 54 */ 55 #define UNIPHIER_SD_CAP_EXTENDED_IP BIT(0) 56 /* RX channel of the built-in DMA controller is broken (Pro5) */ 57 #define UNIPHIER_SD_CAP_BROKEN_DMA_RX BIT(1) 58 59 struct uniphier_sd_priv { 60 struct tmio_mmc_data tmio_data; 61 struct pinctrl *pinctrl; 62 struct pinctrl_state *pinstate_uhs; 63 struct clk *clk; 64 struct reset_control *rst; 65 struct reset_control *rst_br; 66 struct reset_control *rst_hw; 67 struct dma_chan *chan; 68 enum dma_data_direction dma_dir; 69 unsigned long clk_rate; 70 unsigned long caps; 71 }; 72 73 static void *uniphier_sd_priv(struct tmio_mmc_host *host) 74 { 75 return container_of(host->pdata, struct uniphier_sd_priv, tmio_data); 76 } 77 78 static void uniphier_sd_dma_endisable(struct tmio_mmc_host *host, int enable) 79 { 80 sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? DMA_ENABLE_DMASDRW : 0); 81 } 82 83 /* external DMA engine */ 84 static void uniphier_sd_external_dma_issue(struct tasklet_struct *t) 85 { 86 struct tmio_mmc_host *host = from_tasklet(host, t, dma_issue); 87 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 88 89 uniphier_sd_dma_endisable(host, 1); 90 dma_async_issue_pending(priv->chan); 91 } 92 93 static void uniphier_sd_external_dma_callback(void *param, 94 const struct dmaengine_result *result) 95 { 96 struct tmio_mmc_host *host = param; 97 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 98 unsigned long flags; 99 100 dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, 101 priv->dma_dir); 102 103 spin_lock_irqsave(&host->lock, flags); 104 105 if (result->result == DMA_TRANS_NOERROR) { 106 /* 107 * When the external DMA engine is enabled, strangely enough, 108 * the DATAEND flag can be asserted even if the DMA engine has 109 * not been kicked yet. Enable the TMIO_STAT_DATAEND irq only 110 * after we make sure the DMA engine finishes the transfer, 111 * hence, in this callback. 112 */ 113 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 114 } else { 115 host->data->error = -ETIMEDOUT; 116 tmio_mmc_do_data_irq(host); 117 } 118 119 spin_unlock_irqrestore(&host->lock, flags); 120 } 121 122 static void uniphier_sd_external_dma_start(struct tmio_mmc_host *host, 123 struct mmc_data *data) 124 { 125 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 126 enum dma_transfer_direction dma_tx_dir; 127 struct dma_async_tx_descriptor *desc; 128 dma_cookie_t cookie; 129 int sg_len; 130 131 if (!priv->chan) 132 goto force_pio; 133 134 if (data->flags & MMC_DATA_READ) { 135 priv->dma_dir = DMA_FROM_DEVICE; 136 dma_tx_dir = DMA_DEV_TO_MEM; 137 } else { 138 priv->dma_dir = DMA_TO_DEVICE; 139 dma_tx_dir = DMA_MEM_TO_DEV; 140 } 141 142 sg_len = dma_map_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, 143 priv->dma_dir); 144 if (sg_len == 0) 145 goto force_pio; 146 147 desc = dmaengine_prep_slave_sg(priv->chan, host->sg_ptr, sg_len, 148 dma_tx_dir, DMA_CTRL_ACK); 149 if (!desc) 150 goto unmap_sg; 151 152 desc->callback_result = uniphier_sd_external_dma_callback; 153 desc->callback_param = host; 154 155 cookie = dmaengine_submit(desc); 156 if (cookie < 0) 157 goto unmap_sg; 158 159 host->dma_on = true; 160 161 return; 162 163 unmap_sg: 164 dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, 165 priv->dma_dir); 166 force_pio: 167 uniphier_sd_dma_endisable(host, 0); 168 } 169 170 static void uniphier_sd_external_dma_enable(struct tmio_mmc_host *host, 171 bool enable) 172 { 173 } 174 175 static void uniphier_sd_external_dma_request(struct tmio_mmc_host *host, 176 struct tmio_mmc_data *pdata) 177 { 178 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 179 struct dma_chan *chan; 180 181 chan = dma_request_chan(mmc_dev(host->mmc), "rx-tx"); 182 if (IS_ERR(chan)) { 183 dev_warn(mmc_dev(host->mmc), 184 "failed to request DMA channel. falling back to PIO\n"); 185 return; /* just use PIO even for -EPROBE_DEFER */ 186 } 187 188 /* this driver uses a single channel for both RX an TX */ 189 priv->chan = chan; 190 host->chan_rx = chan; 191 host->chan_tx = chan; 192 193 tasklet_setup(&host->dma_issue, uniphier_sd_external_dma_issue); 194 } 195 196 static void uniphier_sd_external_dma_release(struct tmio_mmc_host *host) 197 { 198 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 199 200 if (priv->chan) 201 dma_release_channel(priv->chan); 202 } 203 204 static void uniphier_sd_external_dma_abort(struct tmio_mmc_host *host) 205 { 206 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 207 208 uniphier_sd_dma_endisable(host, 0); 209 210 if (priv->chan) 211 dmaengine_terminate_sync(priv->chan); 212 } 213 214 static void uniphier_sd_external_dma_dataend(struct tmio_mmc_host *host) 215 { 216 uniphier_sd_dma_endisable(host, 0); 217 218 tmio_mmc_do_data_irq(host); 219 } 220 221 static const struct tmio_mmc_dma_ops uniphier_sd_external_dma_ops = { 222 .start = uniphier_sd_external_dma_start, 223 .enable = uniphier_sd_external_dma_enable, 224 .request = uniphier_sd_external_dma_request, 225 .release = uniphier_sd_external_dma_release, 226 .abort = uniphier_sd_external_dma_abort, 227 .dataend = uniphier_sd_external_dma_dataend, 228 }; 229 230 static void uniphier_sd_internal_dma_issue(struct tasklet_struct *t) 231 { 232 struct tmio_mmc_host *host = from_tasklet(host, t, dma_issue); 233 unsigned long flags; 234 235 spin_lock_irqsave(&host->lock, flags); 236 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 237 spin_unlock_irqrestore(&host->lock, flags); 238 239 uniphier_sd_dma_endisable(host, 1); 240 writel(UNIPHIER_SD_DMA_CTL_START, host->ctl + UNIPHIER_SD_DMA_CTL); 241 } 242 243 static void uniphier_sd_internal_dma_start(struct tmio_mmc_host *host, 244 struct mmc_data *data) 245 { 246 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 247 struct scatterlist *sg = host->sg_ptr; 248 dma_addr_t dma_addr; 249 unsigned int dma_mode_dir; 250 u32 dma_mode; 251 int sg_len; 252 253 if ((data->flags & MMC_DATA_READ) && !host->chan_rx) 254 goto force_pio; 255 256 if (WARN_ON(host->sg_len != 1)) 257 goto force_pio; 258 259 if (!IS_ALIGNED(sg->offset, 8)) 260 goto force_pio; 261 262 if (data->flags & MMC_DATA_READ) { 263 priv->dma_dir = DMA_FROM_DEVICE; 264 dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV; 265 } else { 266 priv->dma_dir = DMA_TO_DEVICE; 267 dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_TO_DEV; 268 } 269 270 sg_len = dma_map_sg(mmc_dev(host->mmc), sg, 1, priv->dma_dir); 271 if (sg_len == 0) 272 goto force_pio; 273 274 dma_mode = FIELD_PREP(UNIPHIER_SD_DMA_MODE_DIR_MASK, dma_mode_dir); 275 dma_mode |= FIELD_PREP(UNIPHIER_SD_DMA_MODE_WIDTH_MASK, 276 UNIPHIER_SD_DMA_MODE_WIDTH_64); 277 dma_mode |= UNIPHIER_SD_DMA_MODE_ADDR_INC; 278 279 writel(dma_mode, host->ctl + UNIPHIER_SD_DMA_MODE); 280 281 dma_addr = sg_dma_address(data->sg); 282 writel(lower_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_L); 283 writel(upper_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_H); 284 285 host->dma_on = true; 286 287 return; 288 force_pio: 289 uniphier_sd_dma_endisable(host, 0); 290 } 291 292 static void uniphier_sd_internal_dma_enable(struct tmio_mmc_host *host, 293 bool enable) 294 { 295 } 296 297 static void uniphier_sd_internal_dma_request(struct tmio_mmc_host *host, 298 struct tmio_mmc_data *pdata) 299 { 300 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 301 302 /* 303 * Due to a hardware bug, Pro5 cannot use DMA for RX. 304 * We can still use DMA for TX, but PIO for RX. 305 */ 306 if (!(priv->caps & UNIPHIER_SD_CAP_BROKEN_DMA_RX)) 307 host->chan_rx = (void *)0xdeadbeaf; 308 309 host->chan_tx = (void *)0xdeadbeaf; 310 311 tasklet_setup(&host->dma_issue, uniphier_sd_internal_dma_issue); 312 } 313 314 static void uniphier_sd_internal_dma_release(struct tmio_mmc_host *host) 315 { 316 /* Each value is set to zero to assume "disabling" each DMA */ 317 host->chan_rx = NULL; 318 host->chan_tx = NULL; 319 } 320 321 static void uniphier_sd_internal_dma_abort(struct tmio_mmc_host *host) 322 { 323 u32 tmp; 324 325 uniphier_sd_dma_endisable(host, 0); 326 327 tmp = readl(host->ctl + UNIPHIER_SD_DMA_RST); 328 tmp &= ~(UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0); 329 writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST); 330 331 tmp |= UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0; 332 writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST); 333 } 334 335 static void uniphier_sd_internal_dma_dataend(struct tmio_mmc_host *host) 336 { 337 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 338 339 uniphier_sd_dma_endisable(host, 0); 340 dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, 1, priv->dma_dir); 341 342 tmio_mmc_do_data_irq(host); 343 } 344 345 static const struct tmio_mmc_dma_ops uniphier_sd_internal_dma_ops = { 346 .start = uniphier_sd_internal_dma_start, 347 .enable = uniphier_sd_internal_dma_enable, 348 .request = uniphier_sd_internal_dma_request, 349 .release = uniphier_sd_internal_dma_release, 350 .abort = uniphier_sd_internal_dma_abort, 351 .dataend = uniphier_sd_internal_dma_dataend, 352 }; 353 354 static int uniphier_sd_clk_enable(struct tmio_mmc_host *host) 355 { 356 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 357 struct mmc_host *mmc = host->mmc; 358 int ret; 359 360 ret = clk_prepare_enable(priv->clk); 361 if (ret) 362 return ret; 363 364 ret = clk_set_rate(priv->clk, ULONG_MAX); 365 if (ret) 366 goto disable_clk; 367 368 priv->clk_rate = clk_get_rate(priv->clk); 369 370 /* If max-frequency property is set, use it. */ 371 if (!mmc->f_max) 372 mmc->f_max = priv->clk_rate; 373 374 /* 375 * 1/512 is the finest divisor in the original IP. Newer versions 376 * also supports 1/1024 divisor. (UniPhier-specific extension) 377 */ 378 if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) 379 mmc->f_min = priv->clk_rate / 1024; 380 else 381 mmc->f_min = priv->clk_rate / 512; 382 383 ret = reset_control_deassert(priv->rst); 384 if (ret) 385 goto disable_clk; 386 387 ret = reset_control_deassert(priv->rst_br); 388 if (ret) 389 goto assert_rst; 390 391 return 0; 392 393 assert_rst: 394 reset_control_assert(priv->rst); 395 disable_clk: 396 clk_disable_unprepare(priv->clk); 397 398 return ret; 399 } 400 401 static void uniphier_sd_clk_disable(struct tmio_mmc_host *host) 402 { 403 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 404 405 reset_control_assert(priv->rst_br); 406 reset_control_assert(priv->rst); 407 clk_disable_unprepare(priv->clk); 408 } 409 410 static void uniphier_sd_hw_reset(struct mmc_host *mmc) 411 { 412 struct tmio_mmc_host *host = mmc_priv(mmc); 413 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 414 415 reset_control_assert(priv->rst_hw); 416 /* For eMMC, minimum is 1us but give it 9us for good measure */ 417 udelay(9); 418 reset_control_deassert(priv->rst_hw); 419 /* For eMMC, minimum is 200us but give it 300us for good measure */ 420 usleep_range(300, 1000); 421 } 422 423 static void uniphier_sd_set_clock(struct tmio_mmc_host *host, 424 unsigned int clock) 425 { 426 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 427 unsigned long divisor; 428 u32 tmp; 429 430 tmp = readl(host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 431 432 /* stop the clock before changing its rate to avoid a glitch signal */ 433 tmp &= ~CLK_CTL_SCLKEN; 434 writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 435 436 if (clock == 0) 437 return; 438 439 tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1024; 440 tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1; 441 tmp &= ~CLK_CTL_DIV_MASK; 442 443 divisor = priv->clk_rate / clock; 444 445 /* 446 * In the original IP, bit[7:0] represents the divisor. 447 * bit7 set: 1/512, ... bit0 set:1/4, all bits clear: 1/2 448 * 449 * The IP does not define a way to achieve 1/1. For UniPhier variants, 450 * bit10 is used for 1/1. Newer versions of UniPhier variants use 451 * bit16 for 1/1024. 452 */ 453 if (divisor <= 1) 454 tmp |= UNIPHIER_SD_CLK_CTL_DIV1; 455 else if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP && divisor > 512) 456 tmp |= UNIPHIER_SD_CLK_CTL_DIV1024; 457 else 458 tmp |= roundup_pow_of_two(divisor) >> 2; 459 460 writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 461 462 tmp |= CLK_CTL_SCLKEN; 463 writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 464 } 465 466 static void uniphier_sd_host_init(struct tmio_mmc_host *host) 467 { 468 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 469 u32 val; 470 471 /* 472 * Connected to 32bit AXI. 473 * This register holds settings for SoC-specific internal bus 474 * connection. What is worse, the register spec was changed, 475 * breaking the backward compatibility. Write an appropriate 476 * value depending on a flag associated with a compatible string. 477 */ 478 if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) 479 val = 0x00000101; 480 else 481 val = 0x00000000; 482 483 writel(val, host->ctl + UNIPHIER_SD_HOST_MODE); 484 485 val = 0; 486 /* 487 * If supported, the controller can automatically 488 * enable/disable the clock line to the card. 489 */ 490 if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) 491 val |= UNIPHIER_SD_CLKCTL_OFFEN; 492 493 writel(val, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 494 } 495 496 static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc, 497 struct mmc_ios *ios) 498 { 499 struct tmio_mmc_host *host = mmc_priv(mmc); 500 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 501 struct pinctrl_state *pinstate = NULL; 502 u32 val, tmp; 503 504 switch (ios->signal_voltage) { 505 case MMC_SIGNAL_VOLTAGE_330: 506 val = UNIPHIER_SD_VOLT_330; 507 break; 508 case MMC_SIGNAL_VOLTAGE_180: 509 val = UNIPHIER_SD_VOLT_180; 510 pinstate = priv->pinstate_uhs; 511 break; 512 default: 513 return -ENOTSUPP; 514 } 515 516 tmp = readl(host->ctl + UNIPHIER_SD_VOLT); 517 tmp &= ~UNIPHIER_SD_VOLT_MASK; 518 tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val); 519 writel(tmp, host->ctl + UNIPHIER_SD_VOLT); 520 521 if (pinstate) 522 pinctrl_select_state(priv->pinctrl, pinstate); 523 else 524 pinctrl_select_default_state(mmc_dev(mmc)); 525 526 return 0; 527 } 528 529 static int uniphier_sd_uhs_init(struct tmio_mmc_host *host, 530 struct uniphier_sd_priv *priv) 531 { 532 priv->pinctrl = devm_pinctrl_get(mmc_dev(host->mmc)); 533 if (IS_ERR(priv->pinctrl)) 534 return PTR_ERR(priv->pinctrl); 535 536 priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs"); 537 if (IS_ERR(priv->pinstate_uhs)) 538 return PTR_ERR(priv->pinstate_uhs); 539 540 host->ops.start_signal_voltage_switch = 541 uniphier_sd_start_signal_voltage_switch; 542 543 return 0; 544 } 545 546 static int uniphier_sd_probe(struct platform_device *pdev) 547 { 548 struct device *dev = &pdev->dev; 549 struct uniphier_sd_priv *priv; 550 struct tmio_mmc_data *tmio_data; 551 struct tmio_mmc_host *host; 552 int irq, ret; 553 554 irq = platform_get_irq(pdev, 0); 555 if (irq < 0) 556 return irq; 557 558 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 559 if (!priv) 560 return -ENOMEM; 561 562 priv->caps = (unsigned long)of_device_get_match_data(dev); 563 564 priv->clk = devm_clk_get(dev, NULL); 565 if (IS_ERR(priv->clk)) { 566 dev_err(dev, "failed to get clock\n"); 567 return PTR_ERR(priv->clk); 568 } 569 570 priv->rst = devm_reset_control_get_shared(dev, "host"); 571 if (IS_ERR(priv->rst)) { 572 dev_err(dev, "failed to get host reset\n"); 573 return PTR_ERR(priv->rst); 574 } 575 576 /* old version has one more reset */ 577 if (!(priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)) { 578 priv->rst_br = devm_reset_control_get_shared(dev, "bridge"); 579 if (IS_ERR(priv->rst_br)) { 580 dev_err(dev, "failed to get bridge reset\n"); 581 return PTR_ERR(priv->rst_br); 582 } 583 } 584 585 tmio_data = &priv->tmio_data; 586 tmio_data->flags |= TMIO_MMC_32BIT_DATA_PORT; 587 tmio_data->flags |= TMIO_MMC_USE_BUSY_TIMEOUT; 588 589 host = tmio_mmc_host_alloc(pdev, tmio_data); 590 if (IS_ERR(host)) 591 return PTR_ERR(host); 592 593 if (host->mmc->caps & MMC_CAP_HW_RESET) { 594 priv->rst_hw = devm_reset_control_get_exclusive(dev, "hw"); 595 if (IS_ERR(priv->rst_hw)) { 596 dev_err(dev, "failed to get hw reset\n"); 597 ret = PTR_ERR(priv->rst_hw); 598 goto free_host; 599 } 600 host->ops.card_hw_reset = uniphier_sd_hw_reset; 601 } 602 603 if (host->mmc->caps & MMC_CAP_UHS) { 604 ret = uniphier_sd_uhs_init(host, priv); 605 if (ret) { 606 dev_warn(dev, 607 "failed to setup UHS (error %d). Disabling UHS.", 608 ret); 609 host->mmc->caps &= ~MMC_CAP_UHS; 610 } 611 } 612 613 if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) 614 host->dma_ops = &uniphier_sd_internal_dma_ops; 615 else 616 host->dma_ops = &uniphier_sd_external_dma_ops; 617 618 host->bus_shift = 1; 619 host->clk_enable = uniphier_sd_clk_enable; 620 host->clk_disable = uniphier_sd_clk_disable; 621 host->set_clock = uniphier_sd_set_clock; 622 623 ret = uniphier_sd_clk_enable(host); 624 if (ret) 625 goto free_host; 626 627 uniphier_sd_host_init(host); 628 629 tmio_data->ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34; 630 if (host->mmc->caps & MMC_CAP_UHS) 631 tmio_data->ocr_mask |= MMC_VDD_165_195; 632 633 tmio_data->max_segs = 1; 634 tmio_data->max_blk_count = U16_MAX; 635 636 ret = tmio_mmc_host_probe(host); 637 if (ret) 638 goto disable_clk; 639 640 ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED, 641 dev_name(dev), host); 642 if (ret) 643 goto remove_host; 644 645 return 0; 646 647 remove_host: 648 tmio_mmc_host_remove(host); 649 disable_clk: 650 uniphier_sd_clk_disable(host); 651 free_host: 652 tmio_mmc_host_free(host); 653 654 return ret; 655 } 656 657 static int uniphier_sd_remove(struct platform_device *pdev) 658 { 659 struct tmio_mmc_host *host = platform_get_drvdata(pdev); 660 661 tmio_mmc_host_remove(host); 662 uniphier_sd_clk_disable(host); 663 tmio_mmc_host_free(host); 664 665 return 0; 666 } 667 668 static const struct of_device_id uniphier_sd_match[] = { 669 { 670 .compatible = "socionext,uniphier-sd-v2.91", 671 }, 672 { 673 .compatible = "socionext,uniphier-sd-v3.1", 674 .data = (void *)(UNIPHIER_SD_CAP_EXTENDED_IP | 675 UNIPHIER_SD_CAP_BROKEN_DMA_RX), 676 }, 677 { 678 .compatible = "socionext,uniphier-sd-v3.1.1", 679 .data = (void *)UNIPHIER_SD_CAP_EXTENDED_IP, 680 }, 681 { /* sentinel */ } 682 }; 683 MODULE_DEVICE_TABLE(of, uniphier_sd_match); 684 685 static struct platform_driver uniphier_sd_driver = { 686 .probe = uniphier_sd_probe, 687 .remove = uniphier_sd_remove, 688 .driver = { 689 .name = "uniphier-sd", 690 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 691 .of_match_table = uniphier_sd_match, 692 }, 693 }; 694 module_platform_driver(uniphier_sd_driver); 695 696 MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); 697 MODULE_DESCRIPTION("UniPhier SD/eMMC host controller driver"); 698 MODULE_LICENSE("GPL v2"); 699