1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Copyright (C) 2017-2018 Socionext Inc. 4 // Author: Masahiro Yamada <yamada.masahiro@socionext.com> 5 6 #include <linux/bitfield.h> 7 #include <linux/bitops.h> 8 #include <linux/clk.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/mfd/tmio.h> 12 #include <linux/mmc/host.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/of_device.h> 16 #include <linux/pinctrl/consumer.h> 17 #include <linux/platform_device.h> 18 #include <linux/reset.h> 19 20 #include "tmio_mmc.h" 21 22 #define UNIPHIER_SD_CLK_CTL_DIV1024 BIT(16) 23 #define UNIPHIER_SD_CLK_CTL_DIV1 BIT(10) 24 #define UNIPHIER_SD_CLKCTL_OFFEN BIT(9) // auto SDCLK stop 25 #define UNIPHIER_SD_CC_EXT_MODE 0x1b0 26 #define UNIPHIER_SD_CC_EXT_MODE_DMA BIT(1) 27 #define UNIPHIER_SD_HOST_MODE 0x1c8 28 #define UNIPHIER_SD_VOLT 0x1e4 29 #define UNIPHIER_SD_VOLT_MASK GENMASK(1, 0) 30 #define UNIPHIER_SD_VOLT_OFF 0 31 #define UNIPHIER_SD_VOLT_330 1 // 3.3V signal 32 #define UNIPHIER_SD_VOLT_180 2 // 1.8V signal 33 #define UNIPHIER_SD_DMA_MODE 0x410 34 #define UNIPHIER_SD_DMA_MODE_DIR_MASK GENMASK(17, 16) 35 #define UNIPHIER_SD_DMA_MODE_DIR_TO_DEV 0 36 #define UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV 1 37 #define UNIPHIER_SD_DMA_MODE_WIDTH_MASK GENMASK(5, 4) 38 #define UNIPHIER_SD_DMA_MODE_WIDTH_8 0 39 #define UNIPHIER_SD_DMA_MODE_WIDTH_16 1 40 #define UNIPHIER_SD_DMA_MODE_WIDTH_32 2 41 #define UNIPHIER_SD_DMA_MODE_WIDTH_64 3 42 #define UNIPHIER_SD_DMA_MODE_ADDR_INC BIT(0) // 1: inc, 0: fixed 43 #define UNIPHIER_SD_DMA_CTL 0x414 44 #define UNIPHIER_SD_DMA_CTL_START BIT(0) // start DMA (auto cleared) 45 #define UNIPHIER_SD_DMA_RST 0x418 46 #define UNIPHIER_SD_DMA_RST_CH1 BIT(9) 47 #define UNIPHIER_SD_DMA_RST_CH0 BIT(8) 48 #define UNIPHIER_SD_DMA_ADDR_L 0x440 49 #define UNIPHIER_SD_DMA_ADDR_H 0x444 50 51 /* 52 * IP is extended to support various features: built-in DMA engine, 53 * 1/1024 divisor, etc. 54 */ 55 #define UNIPHIER_SD_CAP_EXTENDED_IP BIT(0) 56 /* RX channel of the built-in DMA controller is broken (Pro5) */ 57 #define UNIPHIER_SD_CAP_BROKEN_DMA_RX BIT(1) 58 59 struct uniphier_sd_priv { 60 struct tmio_mmc_data tmio_data; 61 struct pinctrl *pinctrl; 62 struct pinctrl_state *pinstate_default; 63 struct pinctrl_state *pinstate_uhs; 64 struct clk *clk; 65 struct reset_control *rst; 66 struct reset_control *rst_br; 67 struct reset_control *rst_hw; 68 struct dma_chan *chan; 69 enum dma_data_direction dma_dir; 70 unsigned long clk_rate; 71 unsigned long caps; 72 }; 73 74 static void *uniphier_sd_priv(struct tmio_mmc_host *host) 75 { 76 return container_of(host->pdata, struct uniphier_sd_priv, tmio_data); 77 } 78 79 static void uniphier_sd_dma_endisable(struct tmio_mmc_host *host, int enable) 80 { 81 sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? DMA_ENABLE_DMASDRW : 0); 82 } 83 84 /* external DMA engine */ 85 static void uniphier_sd_external_dma_issue(unsigned long arg) 86 { 87 struct tmio_mmc_host *host = (void *)arg; 88 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 89 90 uniphier_sd_dma_endisable(host, 1); 91 dma_async_issue_pending(priv->chan); 92 } 93 94 static void uniphier_sd_external_dma_callback(void *param, 95 const struct dmaengine_result *result) 96 { 97 struct tmio_mmc_host *host = param; 98 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 99 unsigned long flags; 100 101 dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, 102 priv->dma_dir); 103 104 spin_lock_irqsave(&host->lock, flags); 105 106 if (result->result == DMA_TRANS_NOERROR) { 107 /* 108 * When the external DMA engine is enabled, strangely enough, 109 * the DATAEND flag can be asserted even if the DMA engine has 110 * not been kicked yet. Enable the TMIO_STAT_DATAEND irq only 111 * after we make sure the DMA engine finishes the transfer, 112 * hence, in this callback. 113 */ 114 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 115 } else { 116 host->data->error = -ETIMEDOUT; 117 tmio_mmc_do_data_irq(host); 118 } 119 120 spin_unlock_irqrestore(&host->lock, flags); 121 } 122 123 static void uniphier_sd_external_dma_start(struct tmio_mmc_host *host, 124 struct mmc_data *data) 125 { 126 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 127 enum dma_transfer_direction dma_tx_dir; 128 struct dma_async_tx_descriptor *desc; 129 dma_cookie_t cookie; 130 int sg_len; 131 132 if (!priv->chan) 133 goto force_pio; 134 135 if (data->flags & MMC_DATA_READ) { 136 priv->dma_dir = DMA_FROM_DEVICE; 137 dma_tx_dir = DMA_DEV_TO_MEM; 138 } else { 139 priv->dma_dir = DMA_TO_DEVICE; 140 dma_tx_dir = DMA_MEM_TO_DEV; 141 } 142 143 sg_len = dma_map_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, 144 priv->dma_dir); 145 if (sg_len == 0) 146 goto force_pio; 147 148 desc = dmaengine_prep_slave_sg(priv->chan, host->sg_ptr, sg_len, 149 dma_tx_dir, DMA_CTRL_ACK); 150 if (!desc) 151 goto unmap_sg; 152 153 desc->callback_result = uniphier_sd_external_dma_callback; 154 desc->callback_param = host; 155 156 cookie = dmaengine_submit(desc); 157 if (cookie < 0) 158 goto unmap_sg; 159 160 host->dma_on = true; 161 162 return; 163 164 unmap_sg: 165 dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, host->sg_len, 166 priv->dma_dir); 167 force_pio: 168 uniphier_sd_dma_endisable(host, 0); 169 } 170 171 static void uniphier_sd_external_dma_enable(struct tmio_mmc_host *host, 172 bool enable) 173 { 174 } 175 176 static void uniphier_sd_external_dma_request(struct tmio_mmc_host *host, 177 struct tmio_mmc_data *pdata) 178 { 179 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 180 struct dma_chan *chan; 181 182 chan = dma_request_chan(mmc_dev(host->mmc), "rx-tx"); 183 if (IS_ERR(chan)) { 184 dev_warn(mmc_dev(host->mmc), 185 "failed to request DMA channel. falling back to PIO\n"); 186 return; /* just use PIO even for -EPROBE_DEFER */ 187 } 188 189 /* this driver uses a single channel for both RX an TX */ 190 priv->chan = chan; 191 host->chan_rx = chan; 192 host->chan_tx = chan; 193 194 tasklet_init(&host->dma_issue, uniphier_sd_external_dma_issue, 195 (unsigned long)host); 196 } 197 198 static void uniphier_sd_external_dma_release(struct tmio_mmc_host *host) 199 { 200 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 201 202 if (priv->chan) 203 dma_release_channel(priv->chan); 204 } 205 206 static void uniphier_sd_external_dma_abort(struct tmio_mmc_host *host) 207 { 208 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 209 210 uniphier_sd_dma_endisable(host, 0); 211 212 if (priv->chan) 213 dmaengine_terminate_sync(priv->chan); 214 } 215 216 static void uniphier_sd_external_dma_dataend(struct tmio_mmc_host *host) 217 { 218 uniphier_sd_dma_endisable(host, 0); 219 220 tmio_mmc_do_data_irq(host); 221 } 222 223 static const struct tmio_mmc_dma_ops uniphier_sd_external_dma_ops = { 224 .start = uniphier_sd_external_dma_start, 225 .enable = uniphier_sd_external_dma_enable, 226 .request = uniphier_sd_external_dma_request, 227 .release = uniphier_sd_external_dma_release, 228 .abort = uniphier_sd_external_dma_abort, 229 .dataend = uniphier_sd_external_dma_dataend, 230 }; 231 232 static void uniphier_sd_internal_dma_issue(unsigned long arg) 233 { 234 struct tmio_mmc_host *host = (void *)arg; 235 unsigned long flags; 236 237 spin_lock_irqsave(&host->lock, flags); 238 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 239 spin_unlock_irqrestore(&host->lock, flags); 240 241 uniphier_sd_dma_endisable(host, 1); 242 writel(UNIPHIER_SD_DMA_CTL_START, host->ctl + UNIPHIER_SD_DMA_CTL); 243 } 244 245 static void uniphier_sd_internal_dma_start(struct tmio_mmc_host *host, 246 struct mmc_data *data) 247 { 248 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 249 struct scatterlist *sg = host->sg_ptr; 250 dma_addr_t dma_addr; 251 unsigned int dma_mode_dir; 252 u32 dma_mode; 253 int sg_len; 254 255 if ((data->flags & MMC_DATA_READ) && !host->chan_rx) 256 goto force_pio; 257 258 if (WARN_ON(host->sg_len != 1)) 259 goto force_pio; 260 261 if (!IS_ALIGNED(sg->offset, 8)) 262 goto force_pio; 263 264 if (data->flags & MMC_DATA_READ) { 265 priv->dma_dir = DMA_FROM_DEVICE; 266 dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_FROM_DEV; 267 } else { 268 priv->dma_dir = DMA_TO_DEVICE; 269 dma_mode_dir = UNIPHIER_SD_DMA_MODE_DIR_TO_DEV; 270 } 271 272 sg_len = dma_map_sg(mmc_dev(host->mmc), sg, 1, priv->dma_dir); 273 if (sg_len == 0) 274 goto force_pio; 275 276 dma_mode = FIELD_PREP(UNIPHIER_SD_DMA_MODE_DIR_MASK, dma_mode_dir); 277 dma_mode |= FIELD_PREP(UNIPHIER_SD_DMA_MODE_WIDTH_MASK, 278 UNIPHIER_SD_DMA_MODE_WIDTH_64); 279 dma_mode |= UNIPHIER_SD_DMA_MODE_ADDR_INC; 280 281 writel(dma_mode, host->ctl + UNIPHIER_SD_DMA_MODE); 282 283 dma_addr = sg_dma_address(data->sg); 284 writel(lower_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_L); 285 writel(upper_32_bits(dma_addr), host->ctl + UNIPHIER_SD_DMA_ADDR_H); 286 287 host->dma_on = true; 288 289 return; 290 force_pio: 291 uniphier_sd_dma_endisable(host, 0); 292 } 293 294 static void uniphier_sd_internal_dma_enable(struct tmio_mmc_host *host, 295 bool enable) 296 { 297 } 298 299 static void uniphier_sd_internal_dma_request(struct tmio_mmc_host *host, 300 struct tmio_mmc_data *pdata) 301 { 302 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 303 304 /* 305 * Due to a hardware bug, Pro5 cannot use DMA for RX. 306 * We can still use DMA for TX, but PIO for RX. 307 */ 308 if (!(priv->caps & UNIPHIER_SD_CAP_BROKEN_DMA_RX)) 309 host->chan_rx = (void *)0xdeadbeaf; 310 311 host->chan_tx = (void *)0xdeadbeaf; 312 313 tasklet_init(&host->dma_issue, uniphier_sd_internal_dma_issue, 314 (unsigned long)host); 315 } 316 317 static void uniphier_sd_internal_dma_release(struct tmio_mmc_host *host) 318 { 319 /* Each value is set to zero to assume "disabling" each DMA */ 320 host->chan_rx = NULL; 321 host->chan_tx = NULL; 322 } 323 324 static void uniphier_sd_internal_dma_abort(struct tmio_mmc_host *host) 325 { 326 u32 tmp; 327 328 uniphier_sd_dma_endisable(host, 0); 329 330 tmp = readl(host->ctl + UNIPHIER_SD_DMA_RST); 331 tmp &= ~(UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0); 332 writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST); 333 334 tmp |= UNIPHIER_SD_DMA_RST_CH1 | UNIPHIER_SD_DMA_RST_CH0; 335 writel(tmp, host->ctl + UNIPHIER_SD_DMA_RST); 336 } 337 338 static void uniphier_sd_internal_dma_dataend(struct tmio_mmc_host *host) 339 { 340 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 341 342 uniphier_sd_dma_endisable(host, 0); 343 dma_unmap_sg(mmc_dev(host->mmc), host->sg_ptr, 1, priv->dma_dir); 344 345 tmio_mmc_do_data_irq(host); 346 } 347 348 static const struct tmio_mmc_dma_ops uniphier_sd_internal_dma_ops = { 349 .start = uniphier_sd_internal_dma_start, 350 .enable = uniphier_sd_internal_dma_enable, 351 .request = uniphier_sd_internal_dma_request, 352 .release = uniphier_sd_internal_dma_release, 353 .abort = uniphier_sd_internal_dma_abort, 354 .dataend = uniphier_sd_internal_dma_dataend, 355 }; 356 357 static int uniphier_sd_clk_enable(struct tmio_mmc_host *host) 358 { 359 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 360 struct mmc_host *mmc = host->mmc; 361 int ret; 362 363 ret = clk_prepare_enable(priv->clk); 364 if (ret) 365 return ret; 366 367 ret = clk_set_rate(priv->clk, ULONG_MAX); 368 if (ret) 369 goto disable_clk; 370 371 priv->clk_rate = clk_get_rate(priv->clk); 372 373 /* If max-frequency property is set, use it. */ 374 if (!mmc->f_max) 375 mmc->f_max = priv->clk_rate; 376 377 /* 378 * 1/512 is the finest divisor in the original IP. Newer versions 379 * also supports 1/1024 divisor. (UniPhier-specific extension) 380 */ 381 if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) 382 mmc->f_min = priv->clk_rate / 1024; 383 else 384 mmc->f_min = priv->clk_rate / 512; 385 386 ret = reset_control_deassert(priv->rst); 387 if (ret) 388 goto disable_clk; 389 390 ret = reset_control_deassert(priv->rst_br); 391 if (ret) 392 goto assert_rst; 393 394 return 0; 395 396 assert_rst: 397 reset_control_assert(priv->rst); 398 disable_clk: 399 clk_disable_unprepare(priv->clk); 400 401 return ret; 402 } 403 404 static void uniphier_sd_clk_disable(struct tmio_mmc_host *host) 405 { 406 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 407 408 reset_control_assert(priv->rst_br); 409 reset_control_assert(priv->rst); 410 clk_disable_unprepare(priv->clk); 411 } 412 413 static void uniphier_sd_hw_reset(struct tmio_mmc_host *host) 414 { 415 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 416 417 reset_control_assert(priv->rst_hw); 418 /* For eMMC, minimum is 1us but give it 9us for good measure */ 419 udelay(9); 420 reset_control_deassert(priv->rst_hw); 421 /* For eMMC, minimum is 200us but give it 300us for good measure */ 422 usleep_range(300, 1000); 423 } 424 425 static void uniphier_sd_set_clock(struct tmio_mmc_host *host, 426 unsigned int clock) 427 { 428 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 429 unsigned long divisor; 430 u32 tmp; 431 432 tmp = readl(host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 433 434 /* stop the clock before changing its rate to avoid a glitch signal */ 435 tmp &= ~CLK_CTL_SCLKEN; 436 writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 437 438 if (clock == 0) 439 return; 440 441 tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1024; 442 tmp &= ~UNIPHIER_SD_CLK_CTL_DIV1; 443 tmp &= ~CLK_CTL_DIV_MASK; 444 445 divisor = priv->clk_rate / clock; 446 447 /* 448 * In the original IP, bit[7:0] represents the divisor. 449 * bit7 set: 1/512, ... bit0 set:1/4, all bits clear: 1/2 450 * 451 * The IP does not define a way to achieve 1/1. For UniPhier variants, 452 * bit10 is used for 1/1. Newer versions of UniPhier variants use 453 * bit16 for 1/1024. 454 */ 455 if (divisor <= 1) 456 tmp |= UNIPHIER_SD_CLK_CTL_DIV1; 457 else if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP && divisor > 512) 458 tmp |= UNIPHIER_SD_CLK_CTL_DIV1024; 459 else 460 tmp |= roundup_pow_of_two(divisor) >> 2; 461 462 writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 463 464 tmp |= CLK_CTL_SCLKEN; 465 writel(tmp, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 466 } 467 468 static void uniphier_sd_host_init(struct tmio_mmc_host *host) 469 { 470 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 471 u32 val; 472 473 /* 474 * Connected to 32bit AXI. 475 * This register holds settings for SoC-specific internal bus 476 * connection. What is worse, the register spec was changed, 477 * breaking the backward compatibility. Write an appropriate 478 * value depending on a flag associated with a compatible string. 479 */ 480 if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) 481 val = 0x00000101; 482 else 483 val = 0x00000000; 484 485 writel(val, host->ctl + UNIPHIER_SD_HOST_MODE); 486 487 val = 0; 488 /* 489 * If supported, the controller can automatically 490 * enable/disable the clock line to the card. 491 */ 492 if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) 493 val |= UNIPHIER_SD_CLKCTL_OFFEN; 494 495 writel(val, host->ctl + (CTL_SD_CARD_CLK_CTL << 1)); 496 } 497 498 static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc, 499 struct mmc_ios *ios) 500 { 501 struct tmio_mmc_host *host = mmc_priv(mmc); 502 struct uniphier_sd_priv *priv = uniphier_sd_priv(host); 503 struct pinctrl_state *pinstate; 504 u32 val, tmp; 505 506 switch (ios->signal_voltage) { 507 case MMC_SIGNAL_VOLTAGE_330: 508 val = UNIPHIER_SD_VOLT_330; 509 pinstate = priv->pinstate_default; 510 break; 511 case MMC_SIGNAL_VOLTAGE_180: 512 val = UNIPHIER_SD_VOLT_180; 513 pinstate = priv->pinstate_uhs; 514 break; 515 default: 516 return -ENOTSUPP; 517 } 518 519 tmp = readl(host->ctl + UNIPHIER_SD_VOLT); 520 tmp &= ~UNIPHIER_SD_VOLT_MASK; 521 tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val); 522 writel(tmp, host->ctl + UNIPHIER_SD_VOLT); 523 524 pinctrl_select_state(priv->pinctrl, pinstate); 525 526 return 0; 527 } 528 529 static int uniphier_sd_uhs_init(struct tmio_mmc_host *host, 530 struct uniphier_sd_priv *priv) 531 { 532 priv->pinctrl = devm_pinctrl_get(mmc_dev(host->mmc)); 533 if (IS_ERR(priv->pinctrl)) 534 return PTR_ERR(priv->pinctrl); 535 536 priv->pinstate_default = pinctrl_lookup_state(priv->pinctrl, 537 PINCTRL_STATE_DEFAULT); 538 if (IS_ERR(priv->pinstate_default)) 539 return PTR_ERR(priv->pinstate_default); 540 541 priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs"); 542 if (IS_ERR(priv->pinstate_uhs)) 543 return PTR_ERR(priv->pinstate_uhs); 544 545 host->ops.start_signal_voltage_switch = 546 uniphier_sd_start_signal_voltage_switch; 547 548 return 0; 549 } 550 551 static int uniphier_sd_probe(struct platform_device *pdev) 552 { 553 struct device *dev = &pdev->dev; 554 struct uniphier_sd_priv *priv; 555 struct tmio_mmc_data *tmio_data; 556 struct tmio_mmc_host *host; 557 int irq, ret; 558 559 irq = platform_get_irq(pdev, 0); 560 if (irq < 0) { 561 dev_err(dev, "failed to get IRQ number"); 562 return irq; 563 } 564 565 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 566 if (!priv) 567 return -ENOMEM; 568 569 priv->caps = (unsigned long)of_device_get_match_data(dev); 570 571 priv->clk = devm_clk_get(dev, NULL); 572 if (IS_ERR(priv->clk)) { 573 dev_err(dev, "failed to get clock\n"); 574 return PTR_ERR(priv->clk); 575 } 576 577 priv->rst = devm_reset_control_get_shared(dev, "host"); 578 if (IS_ERR(priv->rst)) { 579 dev_err(dev, "failed to get host reset\n"); 580 return PTR_ERR(priv->rst); 581 } 582 583 /* old version has one more reset */ 584 if (!(priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP)) { 585 priv->rst_br = devm_reset_control_get_shared(dev, "bridge"); 586 if (IS_ERR(priv->rst_br)) { 587 dev_err(dev, "failed to get bridge reset\n"); 588 return PTR_ERR(priv->rst_br); 589 } 590 } 591 592 tmio_data = &priv->tmio_data; 593 tmio_data->flags |= TMIO_MMC_32BIT_DATA_PORT; 594 595 host = tmio_mmc_host_alloc(pdev, tmio_data); 596 if (IS_ERR(host)) 597 return PTR_ERR(host); 598 599 if (host->mmc->caps & MMC_CAP_HW_RESET) { 600 priv->rst_hw = devm_reset_control_get_exclusive(dev, "hw"); 601 if (IS_ERR(priv->rst_hw)) { 602 dev_err(dev, "failed to get hw reset\n"); 603 ret = PTR_ERR(priv->rst_hw); 604 goto free_host; 605 } 606 host->hw_reset = uniphier_sd_hw_reset; 607 } 608 609 if (host->mmc->caps & MMC_CAP_UHS) { 610 ret = uniphier_sd_uhs_init(host, priv); 611 if (ret) { 612 dev_warn(dev, 613 "failed to setup UHS (error %d). Disabling UHS.", 614 ret); 615 host->mmc->caps &= ~MMC_CAP_UHS; 616 } 617 } 618 619 ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED, 620 dev_name(dev), host); 621 if (ret) 622 goto free_host; 623 624 if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) 625 host->dma_ops = &uniphier_sd_internal_dma_ops; 626 else 627 host->dma_ops = &uniphier_sd_external_dma_ops; 628 629 host->bus_shift = 1; 630 host->clk_enable = uniphier_sd_clk_enable; 631 host->clk_disable = uniphier_sd_clk_disable; 632 host->set_clock = uniphier_sd_set_clock; 633 634 ret = uniphier_sd_clk_enable(host); 635 if (ret) 636 goto free_host; 637 638 uniphier_sd_host_init(host); 639 640 tmio_data->ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34; 641 if (host->mmc->caps & MMC_CAP_UHS) 642 tmio_data->ocr_mask |= MMC_VDD_165_195; 643 644 tmio_data->max_segs = 1; 645 tmio_data->max_blk_count = U16_MAX; 646 647 ret = tmio_mmc_host_probe(host); 648 if (ret) 649 goto free_host; 650 651 return 0; 652 653 free_host: 654 tmio_mmc_host_free(host); 655 656 return ret; 657 } 658 659 static int uniphier_sd_remove(struct platform_device *pdev) 660 { 661 struct tmio_mmc_host *host = platform_get_drvdata(pdev); 662 663 tmio_mmc_host_remove(host); 664 uniphier_sd_clk_disable(host); 665 666 return 0; 667 } 668 669 static const struct of_device_id uniphier_sd_match[] = { 670 { 671 .compatible = "socionext,uniphier-sd-v2.91", 672 }, 673 { 674 .compatible = "socionext,uniphier-sd-v3.1", 675 .data = (void *)(UNIPHIER_SD_CAP_EXTENDED_IP | 676 UNIPHIER_SD_CAP_BROKEN_DMA_RX), 677 }, 678 { 679 .compatible = "socionext,uniphier-sd-v3.1.1", 680 .data = (void *)UNIPHIER_SD_CAP_EXTENDED_IP, 681 }, 682 { /* sentinel */ } 683 }; 684 MODULE_DEVICE_TABLE(of, uniphier_sd_match); 685 686 static struct platform_driver uniphier_sd_driver = { 687 .probe = uniphier_sd_probe, 688 .remove = uniphier_sd_remove, 689 .driver = { 690 .name = "uniphier-sd", 691 .of_match_table = uniphier_sd_match, 692 }, 693 }; 694 module_platform_driver(uniphier_sd_driver); 695 696 MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); 697 MODULE_DESCRIPTION("UniPhier SD/eMMC host controller driver"); 698 MODULE_LICENSE("GPL v2"); 699