1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved 4 * Author: Ludovic.barre@st.com for STMicroelectronics. 5 */ 6 #include <linux/bitfield.h> 7 #include <linux/delay.h> 8 #include <linux/dma-mapping.h> 9 #include <linux/iopoll.h> 10 #include <linux/mmc/host.h> 11 #include <linux/mmc/card.h> 12 #include <linux/of_address.h> 13 #include <linux/reset.h> 14 #include <linux/scatterlist.h> 15 #include "mmci.h" 16 17 #define SDMMC_LLI_BUF_LEN PAGE_SIZE 18 19 #define DLYB_CR 0x0 20 #define DLYB_CR_DEN BIT(0) 21 #define DLYB_CR_SEN BIT(1) 22 23 #define DLYB_CFGR 0x4 24 #define DLYB_CFGR_SEL_MASK GENMASK(3, 0) 25 #define DLYB_CFGR_UNIT_MASK GENMASK(14, 8) 26 #define DLYB_CFGR_LNG_MASK GENMASK(27, 16) 27 #define DLYB_CFGR_LNGF BIT(31) 28 29 #define DLYB_NB_DELAY 11 30 #define DLYB_CFGR_SEL_MAX (DLYB_NB_DELAY + 1) 31 #define DLYB_CFGR_UNIT_MAX 127 32 33 #define DLYB_LNG_TIMEOUT_US 1000 34 #define SDMMC_VSWEND_TIMEOUT_US 10000 35 36 struct sdmmc_lli_desc { 37 u32 idmalar; 38 u32 idmabase; 39 u32 idmasize; 40 }; 41 42 struct sdmmc_idma { 43 dma_addr_t sg_dma; 44 void *sg_cpu; 45 dma_addr_t bounce_dma_addr; 46 void *bounce_buf; 47 bool use_bounce_buffer; 48 }; 49 50 struct sdmmc_dlyb; 51 52 struct sdmmc_tuning_ops { 53 int (*dlyb_enable)(struct sdmmc_dlyb *dlyb); 54 void (*set_input_ck)(struct sdmmc_dlyb *dlyb); 55 int (*tuning_prepare)(struct mmci_host *host); 56 int (*set_cfg)(struct sdmmc_dlyb *dlyb, int unit __maybe_unused, 57 int phase, bool sampler __maybe_unused); 58 }; 59 60 struct sdmmc_dlyb { 61 void __iomem *base; 62 u32 unit; 63 u32 max; 64 struct sdmmc_tuning_ops *ops; 65 }; 66 67 static int sdmmc_idma_validate_data(struct mmci_host *host, 68 struct mmc_data *data) 69 { 70 struct sdmmc_idma *idma = host->dma_priv; 71 struct device *dev = mmc_dev(host->mmc); 72 struct scatterlist *sg; 73 int i; 74 75 /* 76 * idma has constraints on idmabase & idmasize for each element 77 * excepted the last element which has no constraint on idmasize 78 */ 79 idma->use_bounce_buffer = false; 80 for_each_sg(data->sg, sg, data->sg_len - 1, i) { 81 if (!IS_ALIGNED(sg->offset, sizeof(u32)) || 82 !IS_ALIGNED(sg->length, 83 host->variant->stm32_idmabsize_align)) { 84 dev_dbg(mmc_dev(host->mmc), 85 "unaligned scatterlist: ofst:%x length:%d\n", 86 data->sg->offset, data->sg->length); 87 goto use_bounce_buffer; 88 } 89 } 90 91 if (!IS_ALIGNED(sg->offset, sizeof(u32))) { 92 dev_dbg(mmc_dev(host->mmc), 93 "unaligned last scatterlist: ofst:%x length:%d\n", 94 data->sg->offset, data->sg->length); 95 goto use_bounce_buffer; 96 } 97 98 return 0; 99 100 use_bounce_buffer: 101 if (!idma->bounce_buf) { 102 idma->bounce_buf = dmam_alloc_coherent(dev, 103 host->mmc->max_req_size, 104 &idma->bounce_dma_addr, 105 GFP_KERNEL); 106 if (!idma->bounce_buf) { 107 dev_err(dev, "Unable to map allocate DMA bounce buffer.\n"); 108 return -ENOMEM; 109 } 110 } 111 112 idma->use_bounce_buffer = true; 113 114 return 0; 115 } 116 117 static int _sdmmc_idma_prep_data(struct mmci_host *host, 118 struct mmc_data *data) 119 { 120 struct sdmmc_idma *idma = host->dma_priv; 121 122 if (idma->use_bounce_buffer) { 123 if (data->flags & MMC_DATA_WRITE) { 124 unsigned int xfer_bytes = data->blksz * data->blocks; 125 126 sg_copy_to_buffer(data->sg, data->sg_len, 127 idma->bounce_buf, xfer_bytes); 128 dma_wmb(); 129 } 130 } else { 131 int n_elem; 132 133 n_elem = dma_map_sg(mmc_dev(host->mmc), 134 data->sg, 135 data->sg_len, 136 mmc_get_dma_dir(data)); 137 138 if (!n_elem) { 139 dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); 140 return -EINVAL; 141 } 142 } 143 return 0; 144 } 145 146 static int sdmmc_idma_prep_data(struct mmci_host *host, 147 struct mmc_data *data, bool next) 148 { 149 /* Check if job is already prepared. */ 150 if (!next && data->host_cookie == host->next_cookie) 151 return 0; 152 153 return _sdmmc_idma_prep_data(host, data); 154 } 155 156 static void sdmmc_idma_unprep_data(struct mmci_host *host, 157 struct mmc_data *data, int err) 158 { 159 struct sdmmc_idma *idma = host->dma_priv; 160 161 if (idma->use_bounce_buffer) { 162 if (data->flags & MMC_DATA_READ) { 163 unsigned int xfer_bytes = data->blksz * data->blocks; 164 165 sg_copy_from_buffer(data->sg, data->sg_len, 166 idma->bounce_buf, xfer_bytes); 167 } 168 } else { 169 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 170 mmc_get_dma_dir(data)); 171 } 172 } 173 174 static int sdmmc_idma_setup(struct mmci_host *host) 175 { 176 struct sdmmc_idma *idma; 177 struct device *dev = mmc_dev(host->mmc); 178 179 idma = devm_kzalloc(dev, sizeof(*idma), GFP_KERNEL); 180 if (!idma) 181 return -ENOMEM; 182 183 host->dma_priv = idma; 184 185 if (host->variant->dma_lli) { 186 idma->sg_cpu = dmam_alloc_coherent(dev, SDMMC_LLI_BUF_LEN, 187 &idma->sg_dma, GFP_KERNEL); 188 if (!idma->sg_cpu) { 189 dev_err(dev, "Failed to alloc IDMA descriptor\n"); 190 return -ENOMEM; 191 } 192 host->mmc->max_segs = SDMMC_LLI_BUF_LEN / 193 sizeof(struct sdmmc_lli_desc); 194 host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask; 195 196 host->mmc->max_req_size = SZ_1M; 197 } else { 198 host->mmc->max_segs = 1; 199 host->mmc->max_seg_size = host->mmc->max_req_size; 200 } 201 202 return dma_set_max_seg_size(dev, host->mmc->max_seg_size); 203 } 204 205 static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) 206 207 { 208 struct sdmmc_idma *idma = host->dma_priv; 209 struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu; 210 struct mmc_data *data = host->data; 211 struct scatterlist *sg; 212 int i; 213 214 if (!host->variant->dma_lli || data->sg_len == 1 || 215 idma->use_bounce_buffer) { 216 u32 dma_addr; 217 218 if (idma->use_bounce_buffer) 219 dma_addr = idma->bounce_dma_addr; 220 else 221 dma_addr = sg_dma_address(data->sg); 222 223 writel_relaxed(dma_addr, 224 host->base + MMCI_STM32_IDMABASE0R); 225 writel_relaxed(MMCI_STM32_IDMAEN, 226 host->base + MMCI_STM32_IDMACTRLR); 227 return 0; 228 } 229 230 for_each_sg(data->sg, sg, data->sg_len, i) { 231 desc[i].idmalar = (i + 1) * sizeof(struct sdmmc_lli_desc); 232 desc[i].idmalar |= MMCI_STM32_ULA | MMCI_STM32_ULS 233 | MMCI_STM32_ABR; 234 desc[i].idmabase = sg_dma_address(sg); 235 desc[i].idmasize = sg_dma_len(sg); 236 } 237 238 /* notice the end of link list */ 239 desc[data->sg_len - 1].idmalar &= ~MMCI_STM32_ULA; 240 241 dma_wmb(); 242 writel_relaxed(idma->sg_dma, host->base + MMCI_STM32_IDMABAR); 243 writel_relaxed(desc[0].idmalar, host->base + MMCI_STM32_IDMALAR); 244 writel_relaxed(desc[0].idmabase, host->base + MMCI_STM32_IDMABASE0R); 245 writel_relaxed(desc[0].idmasize, host->base + MMCI_STM32_IDMABSIZER); 246 writel_relaxed(MMCI_STM32_IDMAEN | MMCI_STM32_IDMALLIEN, 247 host->base + MMCI_STM32_IDMACTRLR); 248 249 return 0; 250 } 251 252 static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data) 253 { 254 writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); 255 256 if (!data->host_cookie) 257 sdmmc_idma_unprep_data(host, data, 0); 258 } 259 260 static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired) 261 { 262 unsigned int clk = 0, ddr = 0; 263 264 if (host->mmc->ios.timing == MMC_TIMING_MMC_DDR52 || 265 host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) 266 ddr = MCI_STM32_CLK_DDR; 267 268 /* 269 * cclk = mclk / (2 * clkdiv) 270 * clkdiv 0 => bypass 271 * in ddr mode bypass is not possible 272 */ 273 if (desired) { 274 if (desired >= host->mclk && !ddr) { 275 host->cclk = host->mclk; 276 } else { 277 clk = DIV_ROUND_UP(host->mclk, 2 * desired); 278 if (clk > MCI_STM32_CLK_CLKDIV_MSK) 279 clk = MCI_STM32_CLK_CLKDIV_MSK; 280 host->cclk = host->mclk / (2 * clk); 281 } 282 } else { 283 /* 284 * while power-on phase the clock can't be define to 0, 285 * Only power-off and power-cyc deactivate the clock. 286 * if desired clock is 0, set max divider 287 */ 288 clk = MCI_STM32_CLK_CLKDIV_MSK; 289 host->cclk = host->mclk / (2 * clk); 290 } 291 292 /* Set actual clock for debug */ 293 if (host->mmc->ios.power_mode == MMC_POWER_ON) 294 host->mmc->actual_clock = host->cclk; 295 else 296 host->mmc->actual_clock = 0; 297 298 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) 299 clk |= MCI_STM32_CLK_WIDEBUS_4; 300 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) 301 clk |= MCI_STM32_CLK_WIDEBUS_8; 302 303 clk |= MCI_STM32_CLK_HWFCEN; 304 clk |= host->clk_reg_add; 305 clk |= ddr; 306 307 if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50) 308 clk |= MCI_STM32_CLK_BUSSPEED; 309 310 mmci_write_clkreg(host, clk); 311 } 312 313 static void sdmmc_dlyb_mp15_input_ck(struct sdmmc_dlyb *dlyb) 314 { 315 if (!dlyb || !dlyb->base) 316 return; 317 318 /* Output clock = Input clock */ 319 writel_relaxed(0, dlyb->base + DLYB_CR); 320 } 321 322 static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr) 323 { 324 struct mmc_ios ios = host->mmc->ios; 325 struct sdmmc_dlyb *dlyb = host->variant_priv; 326 327 /* adds OF options */ 328 pwr = host->pwr_reg_add; 329 330 if (dlyb && dlyb->ops->set_input_ck) 331 dlyb->ops->set_input_ck(dlyb); 332 333 if (ios.power_mode == MMC_POWER_OFF) { 334 /* Only a reset could power-off sdmmc */ 335 reset_control_assert(host->rst); 336 udelay(2); 337 reset_control_deassert(host->rst); 338 339 /* 340 * Set the SDMMC in Power-cycle state. 341 * This will make that the SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK 342 * are driven low, to prevent the Card from being supplied 343 * through the signal lines. 344 */ 345 mmci_write_pwrreg(host, MCI_STM32_PWR_CYC | pwr); 346 } else if (ios.power_mode == MMC_POWER_ON) { 347 /* 348 * After power-off (reset): the irq mask defined in probe 349 * functionis lost 350 * ault irq mask (probe) must be activated 351 */ 352 writel(MCI_IRQENABLE | host->variant->start_err, 353 host->base + MMCIMASK0); 354 355 /* preserves voltage switch bits */ 356 pwr |= host->pwr_reg & (MCI_STM32_VSWITCHEN | 357 MCI_STM32_VSWITCH); 358 359 /* 360 * After a power-cycle state, we must set the SDMMC in 361 * Power-off. The SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK are 362 * driven high. Then we can set the SDMMC to Power-on state 363 */ 364 mmci_write_pwrreg(host, MCI_PWR_OFF | pwr); 365 mdelay(1); 366 mmci_write_pwrreg(host, MCI_PWR_ON | pwr); 367 } 368 } 369 370 static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host) 371 { 372 u32 datactrl; 373 374 datactrl = mmci_dctrl_blksz(host); 375 376 if (host->hw_revision >= 3) { 377 u32 thr = 0; 378 379 if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104 || 380 host->mmc->ios.timing == MMC_TIMING_MMC_HS200) { 381 thr = ffs(min_t(unsigned int, host->data->blksz, 382 host->variant->fifosize)); 383 thr = min_t(u32, thr, MMCI_STM32_THR_MASK); 384 } 385 386 writel_relaxed(thr, host->base + MMCI_STM32_FIFOTHRR); 387 } 388 389 if (host->mmc->card && mmc_card_sdio(host->mmc->card) && 390 host->data->blocks == 1) 391 datactrl |= MCI_DPSM_STM32_MODE_SDIO; 392 else if (host->data->stop && !host->mrq->sbc) 393 datactrl |= MCI_DPSM_STM32_MODE_BLOCK_STOP; 394 else 395 datactrl |= MCI_DPSM_STM32_MODE_BLOCK; 396 397 return datactrl; 398 } 399 400 static bool sdmmc_busy_complete(struct mmci_host *host, u32 status, u32 err_msk) 401 { 402 void __iomem *base = host->base; 403 u32 busy_d0, busy_d0end, mask, sdmmc_status; 404 405 mask = readl_relaxed(base + MMCIMASK0); 406 sdmmc_status = readl_relaxed(base + MMCISTATUS); 407 busy_d0end = sdmmc_status & MCI_STM32_BUSYD0END; 408 busy_d0 = sdmmc_status & MCI_STM32_BUSYD0; 409 410 /* complete if there is an error or busy_d0end */ 411 if ((status & err_msk) || busy_d0end) 412 goto complete; 413 414 /* 415 * On response the busy signaling is reflected in the BUSYD0 flag. 416 * if busy_d0 is in-progress we must activate busyd0end interrupt 417 * to wait this completion. Else this request has no busy step. 418 */ 419 if (busy_d0) { 420 if (!host->busy_status) { 421 writel_relaxed(mask | host->variant->busy_detect_mask, 422 base + MMCIMASK0); 423 host->busy_status = status & 424 (MCI_CMDSENT | MCI_CMDRESPEND); 425 } 426 return false; 427 } 428 429 complete: 430 if (host->busy_status) { 431 writel_relaxed(mask & ~host->variant->busy_detect_mask, 432 base + MMCIMASK0); 433 host->busy_status = 0; 434 } 435 436 writel_relaxed(host->variant->busy_detect_mask, base + MMCICLEAR); 437 438 return true; 439 } 440 441 static int sdmmc_dlyb_mp15_enable(struct sdmmc_dlyb *dlyb) 442 { 443 writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR); 444 445 return 0; 446 } 447 448 static int sdmmc_dlyb_mp15_set_cfg(struct sdmmc_dlyb *dlyb, 449 int unit, int phase, bool sampler) 450 { 451 u32 cfgr; 452 453 writel_relaxed(DLYB_CR_SEN | DLYB_CR_DEN, dlyb->base + DLYB_CR); 454 455 cfgr = FIELD_PREP(DLYB_CFGR_UNIT_MASK, unit) | 456 FIELD_PREP(DLYB_CFGR_SEL_MASK, phase); 457 writel_relaxed(cfgr, dlyb->base + DLYB_CFGR); 458 459 if (!sampler) 460 writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR); 461 462 return 0; 463 } 464 465 static int sdmmc_dlyb_mp15_prepare(struct mmci_host *host) 466 { 467 struct sdmmc_dlyb *dlyb = host->variant_priv; 468 u32 cfgr; 469 int i, lng, ret; 470 471 for (i = 0; i <= DLYB_CFGR_UNIT_MAX; i++) { 472 dlyb->ops->set_cfg(dlyb, i, DLYB_CFGR_SEL_MAX, true); 473 474 ret = readl_relaxed_poll_timeout(dlyb->base + DLYB_CFGR, cfgr, 475 (cfgr & DLYB_CFGR_LNGF), 476 1, DLYB_LNG_TIMEOUT_US); 477 if (ret) { 478 dev_warn(mmc_dev(host->mmc), 479 "delay line cfg timeout unit:%d cfgr:%d\n", 480 i, cfgr); 481 continue; 482 } 483 484 lng = FIELD_GET(DLYB_CFGR_LNG_MASK, cfgr); 485 if (lng < BIT(DLYB_NB_DELAY) && lng > 0) 486 break; 487 } 488 489 if (i > DLYB_CFGR_UNIT_MAX) 490 return -EINVAL; 491 492 dlyb->unit = i; 493 dlyb->max = __fls(lng); 494 495 return 0; 496 } 497 498 static int sdmmc_dlyb_phase_tuning(struct mmci_host *host, u32 opcode) 499 { 500 struct sdmmc_dlyb *dlyb = host->variant_priv; 501 int cur_len = 0, max_len = 0, end_of_len = 0; 502 int phase, ret; 503 504 for (phase = 0; phase <= dlyb->max; phase++) { 505 ret = dlyb->ops->set_cfg(dlyb, dlyb->unit, phase, false); 506 if (ret) { 507 dev_err(mmc_dev(host->mmc), "tuning config failed\n"); 508 return ret; 509 } 510 511 if (mmc_send_tuning(host->mmc, opcode, NULL)) { 512 cur_len = 0; 513 } else { 514 cur_len++; 515 if (cur_len > max_len) { 516 max_len = cur_len; 517 end_of_len = phase; 518 } 519 } 520 } 521 522 if (!max_len) { 523 dev_err(mmc_dev(host->mmc), "no tuning point found\n"); 524 return -EINVAL; 525 } 526 527 if (dlyb->ops->set_input_ck) 528 dlyb->ops->set_input_ck(dlyb); 529 530 phase = end_of_len - max_len / 2; 531 ret = dlyb->ops->set_cfg(dlyb, dlyb->unit, phase, false); 532 if (ret) { 533 dev_err(mmc_dev(host->mmc), "tuning reconfig failed\n"); 534 return ret; 535 } 536 537 dev_dbg(mmc_dev(host->mmc), "unit:%d max_dly:%d phase:%d\n", 538 dlyb->unit, dlyb->max, phase); 539 540 return 0; 541 } 542 543 static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 544 { 545 struct mmci_host *host = mmc_priv(mmc); 546 struct sdmmc_dlyb *dlyb = host->variant_priv; 547 u32 clk; 548 int ret; 549 550 if ((host->mmc->ios.timing != MMC_TIMING_UHS_SDR104 && 551 host->mmc->ios.timing != MMC_TIMING_MMC_HS200) || 552 host->mmc->actual_clock <= 50000000) 553 return 0; 554 555 if (!dlyb || !dlyb->base) 556 return -EINVAL; 557 558 ret = dlyb->ops->dlyb_enable(dlyb); 559 if (ret) 560 return ret; 561 562 /* 563 * SDMMC_FBCK is selected when an external Delay Block is needed 564 * with SDR104 or HS200. 565 */ 566 clk = host->clk_reg; 567 clk &= ~MCI_STM32_CLK_SEL_MSK; 568 clk |= MCI_STM32_CLK_SELFBCK; 569 mmci_write_clkreg(host, clk); 570 571 ret = dlyb->ops->tuning_prepare(host); 572 if (ret) 573 return ret; 574 575 return sdmmc_dlyb_phase_tuning(host, opcode); 576 } 577 578 static void sdmmc_pre_sig_volt_vswitch(struct mmci_host *host) 579 { 580 /* clear the voltage switch completion flag */ 581 writel_relaxed(MCI_STM32_VSWENDC, host->base + MMCICLEAR); 582 /* enable Voltage switch procedure */ 583 mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCHEN); 584 } 585 586 static int sdmmc_post_sig_volt_switch(struct mmci_host *host, 587 struct mmc_ios *ios) 588 { 589 unsigned long flags; 590 u32 status; 591 int ret = 0; 592 593 spin_lock_irqsave(&host->lock, flags); 594 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 && 595 host->pwr_reg & MCI_STM32_VSWITCHEN) { 596 mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH); 597 spin_unlock_irqrestore(&host->lock, flags); 598 599 /* wait voltage switch completion while 10ms */ 600 ret = readl_relaxed_poll_timeout(host->base + MMCISTATUS, 601 status, 602 (status & MCI_STM32_VSWEND), 603 10, SDMMC_VSWEND_TIMEOUT_US); 604 605 writel_relaxed(MCI_STM32_VSWENDC | MCI_STM32_CKSTOPC, 606 host->base + MMCICLEAR); 607 spin_lock_irqsave(&host->lock, flags); 608 mmci_write_pwrreg(host, host->pwr_reg & 609 ~(MCI_STM32_VSWITCHEN | MCI_STM32_VSWITCH)); 610 } 611 spin_unlock_irqrestore(&host->lock, flags); 612 613 return ret; 614 } 615 616 static struct mmci_host_ops sdmmc_variant_ops = { 617 .validate_data = sdmmc_idma_validate_data, 618 .prep_data = sdmmc_idma_prep_data, 619 .unprep_data = sdmmc_idma_unprep_data, 620 .get_datactrl_cfg = sdmmc_get_dctrl_cfg, 621 .dma_setup = sdmmc_idma_setup, 622 .dma_start = sdmmc_idma_start, 623 .dma_finalize = sdmmc_idma_finalize, 624 .set_clkreg = mmci_sdmmc_set_clkreg, 625 .set_pwrreg = mmci_sdmmc_set_pwrreg, 626 .busy_complete = sdmmc_busy_complete, 627 .pre_sig_volt_switch = sdmmc_pre_sig_volt_vswitch, 628 .post_sig_volt_switch = sdmmc_post_sig_volt_switch, 629 }; 630 631 static struct sdmmc_tuning_ops dlyb_tuning_mp15_ops = { 632 .dlyb_enable = sdmmc_dlyb_mp15_enable, 633 .set_input_ck = sdmmc_dlyb_mp15_input_ck, 634 .tuning_prepare = sdmmc_dlyb_mp15_prepare, 635 .set_cfg = sdmmc_dlyb_mp15_set_cfg, 636 }; 637 638 void sdmmc_variant_init(struct mmci_host *host) 639 { 640 struct device_node *np = host->mmc->parent->of_node; 641 void __iomem *base_dlyb; 642 struct sdmmc_dlyb *dlyb; 643 644 host->ops = &sdmmc_variant_ops; 645 host->pwr_reg = readl_relaxed(host->base + MMCIPOWER); 646 647 base_dlyb = devm_of_iomap(mmc_dev(host->mmc), np, 1, NULL); 648 if (IS_ERR(base_dlyb)) 649 return; 650 651 dlyb = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dlyb), GFP_KERNEL); 652 if (!dlyb) 653 return; 654 655 dlyb->base = base_dlyb; 656 dlyb->ops = &dlyb_tuning_mp15_ops; 657 host->variant_priv = dlyb; 658 host->mmc_ops->execute_tuning = sdmmc_execute_tuning; 659 } 660