1 /* 2 * Amlogic SD/eMMC driver for the GX/S905 family SoCs 3 * 4 * Copyright (c) 2016 BayLibre, SAS. 5 * Author: Kevin Hilman <khilman@baylibre.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of version 2 of the GNU General Public License as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 * The full GNU General Public License is included in this distribution 19 * in the file called COPYING. 20 */ 21 #include <linux/kernel.h> 22 #include <linux/module.h> 23 #include <linux/init.h> 24 #include <linux/device.h> 25 #include <linux/of_device.h> 26 #include <linux/platform_device.h> 27 #include <linux/ioport.h> 28 #include <linux/spinlock.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/mmc.h> 32 #include <linux/mmc/sdio.h> 33 #include <linux/mmc/slot-gpio.h> 34 #include <linux/io.h> 35 #include <linux/clk.h> 36 #include <linux/clk-provider.h> 37 #include <linux/regulator/consumer.h> 38 #include <linux/interrupt.h> 39 #include <linux/bitfield.h> 40 41 #define DRIVER_NAME "meson-gx-mmc" 42 43 #define SD_EMMC_CLOCK 0x0 44 #define CLK_DIV_MASK GENMASK(5, 0) 45 #define CLK_DIV_MAX 63 46 #define CLK_SRC_MASK GENMASK(7, 6) 47 #define CLK_SRC_XTAL 0 /* external crystal */ 48 #define CLK_SRC_XTAL_RATE 24000000 49 #define CLK_SRC_PLL 1 /* FCLK_DIV2 */ 50 #define CLK_SRC_PLL_RATE 1000000000 51 #define CLK_CORE_PHASE_MASK GENMASK(9, 8) 52 #define CLK_TX_PHASE_MASK GENMASK(11, 10) 53 #define CLK_RX_PHASE_MASK GENMASK(13, 12) 54 #define CLK_PHASE_0 0 55 #define CLK_PHASE_90 1 56 #define CLK_PHASE_180 2 57 #define CLK_PHASE_270 3 58 #define CLK_ALWAYS_ON BIT(24) 59 60 #define SD_EMMC_DElAY 0x4 61 #define SD_EMMC_ADJUST 0x8 62 #define SD_EMMC_CALOUT 0x10 63 #define SD_EMMC_START 0x40 64 #define START_DESC_INIT BIT(0) 65 #define START_DESC_BUSY BIT(1) 66 #define START_DESC_ADDR_MASK GENMASK(31, 2) 67 68 #define SD_EMMC_CFG 0x44 69 #define CFG_BUS_WIDTH_MASK GENMASK(1, 0) 70 #define CFG_BUS_WIDTH_1 0x0 71 #define CFG_BUS_WIDTH_4 0x1 72 #define CFG_BUS_WIDTH_8 0x2 73 #define CFG_DDR BIT(2) 74 #define CFG_BLK_LEN_MASK GENMASK(7, 4) 75 #define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8) 76 #define CFG_RC_CC_MASK GENMASK(15, 12) 77 #define CFG_STOP_CLOCK BIT(22) 78 #define CFG_CLK_ALWAYS_ON BIT(18) 79 #define CFG_CHK_DS BIT(20) 80 #define CFG_AUTO_CLK BIT(23) 81 82 #define SD_EMMC_STATUS 0x48 83 #define STATUS_BUSY BIT(31) 84 85 #define SD_EMMC_IRQ_EN 0x4c 86 #define IRQ_EN_MASK GENMASK(13, 0) 87 #define IRQ_RXD_ERR_MASK GENMASK(7, 0) 88 #define IRQ_TXD_ERR BIT(8) 89 #define IRQ_DESC_ERR BIT(9) 90 #define IRQ_RESP_ERR BIT(10) 91 #define IRQ_RESP_TIMEOUT BIT(11) 92 #define IRQ_DESC_TIMEOUT BIT(12) 93 #define IRQ_END_OF_CHAIN BIT(13) 94 #define IRQ_RESP_STATUS BIT(14) 95 #define IRQ_SDIO BIT(15) 96 97 #define SD_EMMC_CMD_CFG 0x50 98 #define SD_EMMC_CMD_ARG 0x54 99 #define SD_EMMC_CMD_DAT 0x58 100 #define SD_EMMC_CMD_RSP 0x5c 101 #define SD_EMMC_CMD_RSP1 0x60 102 #define SD_EMMC_CMD_RSP2 0x64 103 #define SD_EMMC_CMD_RSP3 0x68 104 105 #define SD_EMMC_RXD 0x94 106 #define SD_EMMC_TXD 0x94 107 #define SD_EMMC_LAST_REG SD_EMMC_TXD 108 109 #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */ 110 #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */ 111 #define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */ 112 #define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */ 113 #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */ 114 #define SD_EMMC_DESC_BUF_LEN PAGE_SIZE 115 116 #define SD_EMMC_PRE_REQ_DONE BIT(0) 117 #define SD_EMMC_DESC_CHAIN_MODE BIT(1) 118 119 #define MUX_CLK_NUM_PARENTS 2 120 121 struct meson_tuning_params { 122 u8 core_phase; 123 u8 tx_phase; 124 u8 rx_phase; 125 }; 126 127 struct sd_emmc_desc { 128 u32 cmd_cfg; 129 u32 cmd_arg; 130 u32 cmd_data; 131 u32 cmd_resp; 132 }; 133 134 struct meson_host { 135 struct device *dev; 136 struct mmc_host *mmc; 137 struct mmc_command *cmd; 138 139 spinlock_t lock; 140 void __iomem *regs; 141 struct clk *core_clk; 142 struct clk_mux mux; 143 struct clk *mux_clk; 144 unsigned long current_clock; 145 146 struct clk_divider cfg_div; 147 struct clk *cfg_div_clk; 148 149 unsigned int bounce_buf_size; 150 void *bounce_buf; 151 dma_addr_t bounce_dma_addr; 152 struct sd_emmc_desc *descs; 153 dma_addr_t descs_dma_addr; 154 155 struct meson_tuning_params tp; 156 bool vqmmc_enabled; 157 }; 158 159 #define CMD_CFG_LENGTH_MASK GENMASK(8, 0) 160 #define CMD_CFG_BLOCK_MODE BIT(9) 161 #define CMD_CFG_R1B BIT(10) 162 #define CMD_CFG_END_OF_CHAIN BIT(11) 163 #define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12) 164 #define CMD_CFG_NO_RESP BIT(16) 165 #define CMD_CFG_NO_CMD BIT(17) 166 #define CMD_CFG_DATA_IO BIT(18) 167 #define CMD_CFG_DATA_WR BIT(19) 168 #define CMD_CFG_RESP_NOCRC BIT(20) 169 #define CMD_CFG_RESP_128 BIT(21) 170 #define CMD_CFG_RESP_NUM BIT(22) 171 #define CMD_CFG_DATA_NUM BIT(23) 172 #define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24) 173 #define CMD_CFG_ERROR BIT(30) 174 #define CMD_CFG_OWNER BIT(31) 175 176 #define CMD_DATA_MASK GENMASK(31, 2) 177 #define CMD_DATA_BIG_ENDIAN BIT(1) 178 #define CMD_DATA_SRAM BIT(0) 179 #define CMD_RESP_MASK GENMASK(31, 1) 180 #define CMD_RESP_SRAM BIT(0) 181 182 static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data) 183 { 184 unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC; 185 186 if (!timeout) 187 return SD_EMMC_CMD_TIMEOUT_DATA; 188 189 timeout = roundup_pow_of_two(timeout); 190 191 return min(timeout, 32768U); /* max. 2^15 ms */ 192 } 193 194 static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd) 195 { 196 if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error) 197 return cmd->mrq->cmd; 198 else if (mmc_op_multi(cmd->opcode) && 199 (!cmd->mrq->sbc || cmd->error || cmd->data->error)) 200 return cmd->mrq->stop; 201 else 202 return NULL; 203 } 204 205 static void meson_mmc_get_transfer_mode(struct mmc_host *mmc, 206 struct mmc_request *mrq) 207 { 208 struct mmc_data *data = mrq->data; 209 struct scatterlist *sg; 210 int i; 211 bool use_desc_chain_mode = true; 212 213 for_each_sg(data->sg, sg, data->sg_len, i) 214 /* check for 8 byte alignment */ 215 if (sg->offset & 7) { 216 WARN_ONCE(1, "unaligned scatterlist buffer\n"); 217 use_desc_chain_mode = false; 218 break; 219 } 220 221 if (use_desc_chain_mode) 222 data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE; 223 } 224 225 static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data) 226 { 227 return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE; 228 } 229 230 static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data) 231 { 232 return data && data->flags & MMC_DATA_READ && 233 !meson_mmc_desc_chain_mode(data); 234 } 235 236 static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 237 { 238 struct mmc_data *data = mrq->data; 239 240 if (!data) 241 return; 242 243 meson_mmc_get_transfer_mode(mmc, mrq); 244 data->host_cookie |= SD_EMMC_PRE_REQ_DONE; 245 246 if (!meson_mmc_desc_chain_mode(data)) 247 return; 248 249 data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, 250 mmc_get_dma_dir(data)); 251 if (!data->sg_count) 252 dev_err(mmc_dev(mmc), "dma_map_sg failed"); 253 } 254 255 static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 256 int err) 257 { 258 struct mmc_data *data = mrq->data; 259 260 if (data && meson_mmc_desc_chain_mode(data) && data->sg_count) 261 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 262 mmc_get_dma_dir(data)); 263 } 264 265 static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate) 266 { 267 struct mmc_host *mmc = host->mmc; 268 int ret; 269 u32 cfg; 270 271 if (clk_rate) { 272 if (WARN_ON(clk_rate > mmc->f_max)) 273 clk_rate = mmc->f_max; 274 else if (WARN_ON(clk_rate < mmc->f_min)) 275 clk_rate = mmc->f_min; 276 } 277 278 if (clk_rate == host->current_clock) 279 return 0; 280 281 /* stop clock */ 282 cfg = readl(host->regs + SD_EMMC_CFG); 283 if (!(cfg & CFG_STOP_CLOCK)) { 284 cfg |= CFG_STOP_CLOCK; 285 writel(cfg, host->regs + SD_EMMC_CFG); 286 } 287 288 dev_dbg(host->dev, "change clock rate %u -> %lu\n", 289 mmc->actual_clock, clk_rate); 290 291 if (!clk_rate) { 292 mmc->actual_clock = 0; 293 host->current_clock = 0; 294 /* return with clock being stopped */ 295 return 0; 296 } 297 298 ret = clk_set_rate(host->cfg_div_clk, clk_rate); 299 if (ret) { 300 dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n", 301 clk_rate, ret); 302 return ret; 303 } 304 305 mmc->actual_clock = clk_get_rate(host->cfg_div_clk); 306 host->current_clock = clk_rate; 307 308 if (clk_rate != mmc->actual_clock) 309 dev_dbg(host->dev, 310 "divider requested rate %lu != actual rate %u\n", 311 clk_rate, mmc->actual_clock); 312 313 /* (re)start clock */ 314 cfg = readl(host->regs + SD_EMMC_CFG); 315 cfg &= ~CFG_STOP_CLOCK; 316 writel(cfg, host->regs + SD_EMMC_CFG); 317 318 return 0; 319 } 320 321 /* 322 * The SD/eMMC IP block has an internal mux and divider used for 323 * generating the MMC clock. Use the clock framework to create and 324 * manage these clocks. 325 */ 326 static int meson_mmc_clk_init(struct meson_host *host) 327 { 328 struct clk_init_data init; 329 char clk_name[32]; 330 int i, ret = 0; 331 const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; 332 const char *clk_div_parents[1]; 333 u32 clk_reg, cfg; 334 335 /* get the mux parents */ 336 for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { 337 struct clk *clk; 338 char name[16]; 339 340 snprintf(name, sizeof(name), "clkin%d", i); 341 clk = devm_clk_get(host->dev, name); 342 if (IS_ERR(clk)) { 343 if (clk != ERR_PTR(-EPROBE_DEFER)) 344 dev_err(host->dev, "Missing clock %s\n", name); 345 return PTR_ERR(clk); 346 } 347 348 mux_parent_names[i] = __clk_get_name(clk); 349 } 350 351 /* create the mux */ 352 snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev)); 353 init.name = clk_name; 354 init.ops = &clk_mux_ops; 355 init.flags = 0; 356 init.parent_names = mux_parent_names; 357 init.num_parents = MUX_CLK_NUM_PARENTS; 358 host->mux.reg = host->regs + SD_EMMC_CLOCK; 359 host->mux.shift = __bf_shf(CLK_SRC_MASK); 360 host->mux.mask = CLK_SRC_MASK; 361 host->mux.flags = 0; 362 host->mux.table = NULL; 363 host->mux.hw.init = &init; 364 365 host->mux_clk = devm_clk_register(host->dev, &host->mux.hw); 366 if (WARN_ON(IS_ERR(host->mux_clk))) 367 return PTR_ERR(host->mux_clk); 368 369 /* create the divider */ 370 snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev)); 371 init.name = clk_name; 372 init.ops = &clk_divider_ops; 373 init.flags = CLK_SET_RATE_PARENT; 374 clk_div_parents[0] = __clk_get_name(host->mux_clk); 375 init.parent_names = clk_div_parents; 376 init.num_parents = ARRAY_SIZE(clk_div_parents); 377 378 host->cfg_div.reg = host->regs + SD_EMMC_CLOCK; 379 host->cfg_div.shift = __bf_shf(CLK_DIV_MASK); 380 host->cfg_div.width = __builtin_popcountl(CLK_DIV_MASK); 381 host->cfg_div.hw.init = &init; 382 host->cfg_div.flags = CLK_DIVIDER_ONE_BASED | 383 CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO; 384 385 host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw); 386 if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk))) 387 return PTR_ERR(host->cfg_div_clk); 388 389 /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ 390 clk_reg = 0; 391 clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase); 392 clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase); 393 clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase); 394 clk_reg |= FIELD_PREP(CLK_SRC_MASK, CLK_SRC_XTAL); 395 clk_reg |= FIELD_PREP(CLK_DIV_MASK, CLK_DIV_MAX); 396 clk_reg &= ~CLK_ALWAYS_ON; 397 writel(clk_reg, host->regs + SD_EMMC_CLOCK); 398 399 /* Ensure clock starts in "auto" mode, not "always on" */ 400 cfg = readl(host->regs + SD_EMMC_CFG); 401 cfg &= ~CFG_CLK_ALWAYS_ON; 402 cfg |= CFG_AUTO_CLK; 403 writel(cfg, host->regs + SD_EMMC_CFG); 404 405 ret = clk_prepare_enable(host->cfg_div_clk); 406 if (ret) 407 return ret; 408 409 /* Get the nearest minimum clock to 400KHz */ 410 host->mmc->f_min = clk_round_rate(host->cfg_div_clk, 400000); 411 412 ret = meson_mmc_clk_set(host, host->mmc->f_min); 413 if (ret) 414 clk_disable_unprepare(host->cfg_div_clk); 415 416 return ret; 417 } 418 419 static void meson_mmc_set_tuning_params(struct mmc_host *mmc) 420 { 421 struct meson_host *host = mmc_priv(mmc); 422 u32 regval; 423 424 /* stop clock */ 425 regval = readl(host->regs + SD_EMMC_CFG); 426 regval |= CFG_STOP_CLOCK; 427 writel(regval, host->regs + SD_EMMC_CFG); 428 429 regval = readl(host->regs + SD_EMMC_CLOCK); 430 regval &= ~CLK_CORE_PHASE_MASK; 431 regval |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase); 432 regval &= ~CLK_TX_PHASE_MASK; 433 regval |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase); 434 regval &= ~CLK_RX_PHASE_MASK; 435 regval |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase); 436 writel(regval, host->regs + SD_EMMC_CLOCK); 437 438 /* start clock */ 439 regval = readl(host->regs + SD_EMMC_CFG); 440 regval &= ~CFG_STOP_CLOCK; 441 writel(regval, host->regs + SD_EMMC_CFG); 442 } 443 444 static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 445 { 446 struct meson_host *host = mmc_priv(mmc); 447 u32 bus_width; 448 u32 val, orig; 449 450 /* 451 * GPIO regulator, only controls switching between 1v8 and 452 * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON. 453 */ 454 switch (ios->power_mode) { 455 case MMC_POWER_OFF: 456 if (!IS_ERR(mmc->supply.vmmc)) 457 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 458 459 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 460 regulator_disable(mmc->supply.vqmmc); 461 host->vqmmc_enabled = false; 462 } 463 464 break; 465 466 case MMC_POWER_UP: 467 if (!IS_ERR(mmc->supply.vmmc)) 468 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 469 break; 470 471 case MMC_POWER_ON: 472 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 473 int ret = regulator_enable(mmc->supply.vqmmc); 474 475 if (ret < 0) 476 dev_err(mmc_dev(mmc), 477 "failed to enable vqmmc regulator\n"); 478 else 479 host->vqmmc_enabled = true; 480 } 481 482 break; 483 } 484 485 486 meson_mmc_clk_set(host, ios->clock); 487 488 /* Bus width */ 489 switch (ios->bus_width) { 490 case MMC_BUS_WIDTH_1: 491 bus_width = CFG_BUS_WIDTH_1; 492 break; 493 case MMC_BUS_WIDTH_4: 494 bus_width = CFG_BUS_WIDTH_4; 495 break; 496 case MMC_BUS_WIDTH_8: 497 bus_width = CFG_BUS_WIDTH_8; 498 break; 499 default: 500 dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n", 501 ios->bus_width); 502 bus_width = CFG_BUS_WIDTH_4; 503 } 504 505 val = readl(host->regs + SD_EMMC_CFG); 506 orig = val; 507 508 val &= ~CFG_BUS_WIDTH_MASK; 509 val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width); 510 511 val &= ~CFG_DDR; 512 if (ios->timing == MMC_TIMING_UHS_DDR50 || 513 ios->timing == MMC_TIMING_MMC_DDR52 || 514 ios->timing == MMC_TIMING_MMC_HS400) 515 val |= CFG_DDR; 516 517 val &= ~CFG_CHK_DS; 518 if (ios->timing == MMC_TIMING_MMC_HS400) 519 val |= CFG_CHK_DS; 520 521 if (val != orig) { 522 writel(val, host->regs + SD_EMMC_CFG); 523 dev_dbg(host->dev, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n", 524 __func__, orig, val); 525 } 526 } 527 528 static void meson_mmc_request_done(struct mmc_host *mmc, 529 struct mmc_request *mrq) 530 { 531 struct meson_host *host = mmc_priv(mmc); 532 533 host->cmd = NULL; 534 mmc_request_done(host->mmc, mrq); 535 } 536 537 static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz) 538 { 539 struct meson_host *host = mmc_priv(mmc); 540 u32 cfg, blksz_old; 541 542 cfg = readl(host->regs + SD_EMMC_CFG); 543 blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg); 544 545 if (!is_power_of_2(blksz)) 546 dev_err(host->dev, "blksz %u is not a power of 2\n", blksz); 547 548 blksz = ilog2(blksz); 549 550 /* check if block-size matches, if not update */ 551 if (blksz == blksz_old) 552 return; 553 554 dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__, 555 blksz_old, blksz); 556 557 cfg &= ~CFG_BLK_LEN_MASK; 558 cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz); 559 writel(cfg, host->regs + SD_EMMC_CFG); 560 } 561 562 static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg) 563 { 564 if (cmd->flags & MMC_RSP_PRESENT) { 565 if (cmd->flags & MMC_RSP_136) 566 *cmd_cfg |= CMD_CFG_RESP_128; 567 *cmd_cfg |= CMD_CFG_RESP_NUM; 568 569 if (!(cmd->flags & MMC_RSP_CRC)) 570 *cmd_cfg |= CMD_CFG_RESP_NOCRC; 571 572 if (cmd->flags & MMC_RSP_BUSY) 573 *cmd_cfg |= CMD_CFG_R1B; 574 } else { 575 *cmd_cfg |= CMD_CFG_NO_RESP; 576 } 577 } 578 579 static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg) 580 { 581 struct meson_host *host = mmc_priv(mmc); 582 struct sd_emmc_desc *desc = host->descs; 583 struct mmc_data *data = host->cmd->data; 584 struct scatterlist *sg; 585 u32 start; 586 int i; 587 588 if (data->flags & MMC_DATA_WRITE) 589 cmd_cfg |= CMD_CFG_DATA_WR; 590 591 if (data->blocks > 1) { 592 cmd_cfg |= CMD_CFG_BLOCK_MODE; 593 meson_mmc_set_blksz(mmc, data->blksz); 594 } 595 596 for_each_sg(data->sg, sg, data->sg_count, i) { 597 unsigned int len = sg_dma_len(sg); 598 599 if (data->blocks > 1) 600 len /= data->blksz; 601 602 desc[i].cmd_cfg = cmd_cfg; 603 desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len); 604 if (i > 0) 605 desc[i].cmd_cfg |= CMD_CFG_NO_CMD; 606 desc[i].cmd_arg = host->cmd->arg; 607 desc[i].cmd_resp = 0; 608 desc[i].cmd_data = sg_dma_address(sg); 609 } 610 desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN; 611 612 dma_wmb(); /* ensure descriptor is written before kicked */ 613 start = host->descs_dma_addr | START_DESC_BUSY; 614 writel(start, host->regs + SD_EMMC_START); 615 } 616 617 static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) 618 { 619 struct meson_host *host = mmc_priv(mmc); 620 struct mmc_data *data = cmd->data; 621 u32 cmd_cfg = 0, cmd_data = 0; 622 unsigned int xfer_bytes = 0; 623 624 /* Setup descriptors */ 625 dma_rmb(); 626 627 host->cmd = cmd; 628 629 cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode); 630 cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */ 631 632 meson_mmc_set_response_bits(cmd, &cmd_cfg); 633 634 /* data? */ 635 if (data) { 636 data->bytes_xfered = 0; 637 cmd_cfg |= CMD_CFG_DATA_IO; 638 cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, 639 ilog2(meson_mmc_get_timeout_msecs(data))); 640 641 if (meson_mmc_desc_chain_mode(data)) { 642 meson_mmc_desc_chain_transfer(mmc, cmd_cfg); 643 return; 644 } 645 646 if (data->blocks > 1) { 647 cmd_cfg |= CMD_CFG_BLOCK_MODE; 648 cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, 649 data->blocks); 650 meson_mmc_set_blksz(mmc, data->blksz); 651 } else { 652 cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz); 653 } 654 655 xfer_bytes = data->blksz * data->blocks; 656 if (data->flags & MMC_DATA_WRITE) { 657 cmd_cfg |= CMD_CFG_DATA_WR; 658 WARN_ON(xfer_bytes > host->bounce_buf_size); 659 sg_copy_to_buffer(data->sg, data->sg_len, 660 host->bounce_buf, xfer_bytes); 661 dma_wmb(); 662 } 663 664 cmd_data = host->bounce_dma_addr & CMD_DATA_MASK; 665 } else { 666 cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, 667 ilog2(SD_EMMC_CMD_TIMEOUT)); 668 } 669 670 /* Last descriptor */ 671 cmd_cfg |= CMD_CFG_END_OF_CHAIN; 672 writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG); 673 writel(cmd_data, host->regs + SD_EMMC_CMD_DAT); 674 writel(0, host->regs + SD_EMMC_CMD_RSP); 675 wmb(); /* ensure descriptor is written before kicked */ 676 writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG); 677 } 678 679 static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 680 { 681 struct meson_host *host = mmc_priv(mmc); 682 bool needs_pre_post_req = mrq->data && 683 !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE); 684 685 if (needs_pre_post_req) { 686 meson_mmc_get_transfer_mode(mmc, mrq); 687 if (!meson_mmc_desc_chain_mode(mrq->data)) 688 needs_pre_post_req = false; 689 } 690 691 if (needs_pre_post_req) 692 meson_mmc_pre_req(mmc, mrq); 693 694 /* Stop execution */ 695 writel(0, host->regs + SD_EMMC_START); 696 697 meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd); 698 699 if (needs_pre_post_req) 700 meson_mmc_post_req(mmc, mrq, 0); 701 } 702 703 static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd) 704 { 705 struct meson_host *host = mmc_priv(mmc); 706 707 if (cmd->flags & MMC_RSP_136) { 708 cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3); 709 cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2); 710 cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1); 711 cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP); 712 } else if (cmd->flags & MMC_RSP_PRESENT) { 713 cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP); 714 } 715 } 716 717 static irqreturn_t meson_mmc_irq(int irq, void *dev_id) 718 { 719 struct meson_host *host = dev_id; 720 struct mmc_command *cmd; 721 struct mmc_data *data; 722 u32 irq_en, status, raw_status; 723 irqreturn_t ret = IRQ_HANDLED; 724 725 if (WARN_ON(!host)) 726 return IRQ_NONE; 727 728 cmd = host->cmd; 729 730 if (WARN_ON(!cmd)) 731 return IRQ_NONE; 732 733 data = cmd->data; 734 735 spin_lock(&host->lock); 736 irq_en = readl(host->regs + SD_EMMC_IRQ_EN); 737 raw_status = readl(host->regs + SD_EMMC_STATUS); 738 status = raw_status & irq_en; 739 740 if (!status) { 741 dev_warn(host->dev, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n", 742 raw_status, irq_en); 743 ret = IRQ_NONE; 744 goto out; 745 } 746 747 meson_mmc_read_resp(host->mmc, cmd); 748 749 cmd->error = 0; 750 if (status & IRQ_RXD_ERR_MASK) { 751 dev_dbg(host->dev, "Unhandled IRQ: RXD error\n"); 752 cmd->error = -EILSEQ; 753 } 754 if (status & IRQ_TXD_ERR) { 755 dev_dbg(host->dev, "Unhandled IRQ: TXD error\n"); 756 cmd->error = -EILSEQ; 757 } 758 if (status & IRQ_DESC_ERR) 759 dev_dbg(host->dev, "Unhandled IRQ: Descriptor error\n"); 760 if (status & IRQ_RESP_ERR) { 761 dev_dbg(host->dev, "Unhandled IRQ: Response error\n"); 762 cmd->error = -EILSEQ; 763 } 764 if (status & IRQ_RESP_TIMEOUT) { 765 dev_dbg(host->dev, "Unhandled IRQ: Response timeout\n"); 766 cmd->error = -ETIMEDOUT; 767 } 768 if (status & IRQ_DESC_TIMEOUT) { 769 dev_dbg(host->dev, "Unhandled IRQ: Descriptor timeout\n"); 770 cmd->error = -ETIMEDOUT; 771 } 772 if (status & IRQ_SDIO) 773 dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n"); 774 775 if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) { 776 if (data && !cmd->error) 777 data->bytes_xfered = data->blksz * data->blocks; 778 if (meson_mmc_bounce_buf_read(data) || 779 meson_mmc_get_next_command(cmd)) 780 ret = IRQ_WAKE_THREAD; 781 } else { 782 dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n", 783 status, cmd->opcode, cmd->arg, 784 cmd->flags, cmd->mrq->stop ? 1 : 0); 785 if (cmd->data) { 786 struct mmc_data *data = cmd->data; 787 788 dev_warn(host->dev, "\tblksz %u blocks %u flags 0x%08x (%s%s)", 789 data->blksz, data->blocks, data->flags, 790 data->flags & MMC_DATA_WRITE ? "write" : "", 791 data->flags & MMC_DATA_READ ? "read" : ""); 792 } 793 } 794 795 out: 796 /* ack all (enabled) interrupts */ 797 writel(status, host->regs + SD_EMMC_STATUS); 798 799 if (ret == IRQ_HANDLED) 800 meson_mmc_request_done(host->mmc, cmd->mrq); 801 802 spin_unlock(&host->lock); 803 return ret; 804 } 805 806 static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) 807 { 808 struct meson_host *host = dev_id; 809 struct mmc_command *next_cmd, *cmd = host->cmd; 810 struct mmc_data *data; 811 unsigned int xfer_bytes; 812 813 if (WARN_ON(!cmd)) 814 return IRQ_NONE; 815 816 data = cmd->data; 817 if (meson_mmc_bounce_buf_read(data)) { 818 xfer_bytes = data->blksz * data->blocks; 819 WARN_ON(xfer_bytes > host->bounce_buf_size); 820 sg_copy_from_buffer(data->sg, data->sg_len, 821 host->bounce_buf, xfer_bytes); 822 } 823 824 next_cmd = meson_mmc_get_next_command(cmd); 825 if (next_cmd) 826 meson_mmc_start_cmd(host->mmc, next_cmd); 827 else 828 meson_mmc_request_done(host->mmc, cmd->mrq); 829 830 return IRQ_HANDLED; 831 } 832 833 static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 834 { 835 struct meson_host *host = mmc_priv(mmc); 836 struct meson_tuning_params tp_old = host->tp; 837 int ret = -EINVAL, i, cmd_error; 838 839 dev_info(mmc_dev(mmc), "(re)tuning...\n"); 840 841 for (i = CLK_PHASE_0; i <= CLK_PHASE_270; i++) { 842 host->tp.rx_phase = i; 843 /* exclude the active parameter set if retuning */ 844 if (!memcmp(&tp_old, &host->tp, sizeof(tp_old)) && 845 mmc->doing_retune) 846 continue; 847 meson_mmc_set_tuning_params(mmc); 848 ret = mmc_send_tuning(mmc, opcode, &cmd_error); 849 if (!ret) 850 break; 851 } 852 853 return ret; 854 } 855 856 /* 857 * NOTE: we only need this until the GPIO/pinctrl driver can handle 858 * interrupts. For now, the MMC core will use this for polling. 859 */ 860 static int meson_mmc_get_cd(struct mmc_host *mmc) 861 { 862 int status = mmc_gpio_get_cd(mmc); 863 864 if (status == -ENOSYS) 865 return 1; /* assume present */ 866 867 return status; 868 } 869 870 static void meson_mmc_cfg_init(struct meson_host *host) 871 { 872 u32 cfg = 0; 873 874 cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK, 875 ilog2(SD_EMMC_CFG_RESP_TIMEOUT)); 876 cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP)); 877 cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE)); 878 879 writel(cfg, host->regs + SD_EMMC_CFG); 880 } 881 882 static const struct mmc_host_ops meson_mmc_ops = { 883 .request = meson_mmc_request, 884 .set_ios = meson_mmc_set_ios, 885 .get_cd = meson_mmc_get_cd, 886 .pre_req = meson_mmc_pre_req, 887 .post_req = meson_mmc_post_req, 888 .execute_tuning = meson_mmc_execute_tuning, 889 }; 890 891 static int meson_mmc_probe(struct platform_device *pdev) 892 { 893 struct resource *res; 894 struct meson_host *host; 895 struct mmc_host *mmc; 896 int ret, irq; 897 898 mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); 899 if (!mmc) 900 return -ENOMEM; 901 host = mmc_priv(mmc); 902 host->mmc = mmc; 903 host->dev = &pdev->dev; 904 dev_set_drvdata(&pdev->dev, host); 905 906 spin_lock_init(&host->lock); 907 908 /* Get regulators and the supported OCR mask */ 909 host->vqmmc_enabled = false; 910 ret = mmc_regulator_get_supply(mmc); 911 if (ret == -EPROBE_DEFER) 912 goto free_host; 913 914 ret = mmc_of_parse(mmc); 915 if (ret) { 916 if (ret != -EPROBE_DEFER) 917 dev_warn(&pdev->dev, "error parsing DT: %d\n", ret); 918 goto free_host; 919 } 920 921 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 922 host->regs = devm_ioremap_resource(&pdev->dev, res); 923 if (IS_ERR(host->regs)) { 924 ret = PTR_ERR(host->regs); 925 goto free_host; 926 } 927 928 irq = platform_get_irq(pdev, 0); 929 if (!irq) { 930 dev_err(&pdev->dev, "failed to get interrupt resource.\n"); 931 ret = -EINVAL; 932 goto free_host; 933 } 934 935 host->core_clk = devm_clk_get(&pdev->dev, "core"); 936 if (IS_ERR(host->core_clk)) { 937 ret = PTR_ERR(host->core_clk); 938 goto free_host; 939 } 940 941 ret = clk_prepare_enable(host->core_clk); 942 if (ret) 943 goto free_host; 944 945 host->tp.core_phase = CLK_PHASE_180; 946 host->tp.tx_phase = CLK_PHASE_0; 947 host->tp.rx_phase = CLK_PHASE_0; 948 949 ret = meson_mmc_clk_init(host); 950 if (ret) 951 goto err_core_clk; 952 953 /* Stop execution */ 954 writel(0, host->regs + SD_EMMC_START); 955 956 /* clear, ack, enable all interrupts */ 957 writel(0, host->regs + SD_EMMC_IRQ_EN); 958 writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS); 959 writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN); 960 961 /* set config to sane default */ 962 meson_mmc_cfg_init(host); 963 964 ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, 965 meson_mmc_irq_thread, IRQF_SHARED, 966 NULL, host); 967 if (ret) 968 goto err_div_clk; 969 970 mmc->caps |= MMC_CAP_CMD23; 971 mmc->max_blk_count = CMD_CFG_LENGTH_MASK; 972 mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size; 973 mmc->max_segs = SD_EMMC_DESC_BUF_LEN / sizeof(struct sd_emmc_desc); 974 mmc->max_seg_size = mmc->max_req_size; 975 976 /* data bounce buffer */ 977 host->bounce_buf_size = mmc->max_req_size; 978 host->bounce_buf = 979 dma_alloc_coherent(host->dev, host->bounce_buf_size, 980 &host->bounce_dma_addr, GFP_KERNEL); 981 if (host->bounce_buf == NULL) { 982 dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); 983 ret = -ENOMEM; 984 goto err_div_clk; 985 } 986 987 host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 988 &host->descs_dma_addr, GFP_KERNEL); 989 if (!host->descs) { 990 dev_err(host->dev, "Allocating descriptor DMA buffer failed\n"); 991 ret = -ENOMEM; 992 goto err_bounce_buf; 993 } 994 995 mmc->ops = &meson_mmc_ops; 996 mmc_add_host(mmc); 997 998 return 0; 999 1000 err_bounce_buf: 1001 dma_free_coherent(host->dev, host->bounce_buf_size, 1002 host->bounce_buf, host->bounce_dma_addr); 1003 err_div_clk: 1004 clk_disable_unprepare(host->cfg_div_clk); 1005 err_core_clk: 1006 clk_disable_unprepare(host->core_clk); 1007 free_host: 1008 mmc_free_host(mmc); 1009 return ret; 1010 } 1011 1012 static int meson_mmc_remove(struct platform_device *pdev) 1013 { 1014 struct meson_host *host = dev_get_drvdata(&pdev->dev); 1015 1016 mmc_remove_host(host->mmc); 1017 1018 /* disable interrupts */ 1019 writel(0, host->regs + SD_EMMC_IRQ_EN); 1020 1021 dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 1022 host->descs, host->descs_dma_addr); 1023 dma_free_coherent(host->dev, host->bounce_buf_size, 1024 host->bounce_buf, host->bounce_dma_addr); 1025 1026 clk_disable_unprepare(host->cfg_div_clk); 1027 clk_disable_unprepare(host->core_clk); 1028 1029 mmc_free_host(host->mmc); 1030 return 0; 1031 } 1032 1033 static const struct of_device_id meson_mmc_of_match[] = { 1034 { .compatible = "amlogic,meson-gx-mmc", }, 1035 { .compatible = "amlogic,meson-gxbb-mmc", }, 1036 { .compatible = "amlogic,meson-gxl-mmc", }, 1037 { .compatible = "amlogic,meson-gxm-mmc", }, 1038 {} 1039 }; 1040 MODULE_DEVICE_TABLE(of, meson_mmc_of_match); 1041 1042 static struct platform_driver meson_mmc_driver = { 1043 .probe = meson_mmc_probe, 1044 .remove = meson_mmc_remove, 1045 .driver = { 1046 .name = DRIVER_NAME, 1047 .of_match_table = of_match_ptr(meson_mmc_of_match), 1048 }, 1049 }; 1050 1051 module_platform_driver(meson_mmc_driver); 1052 1053 MODULE_DESCRIPTION("Amlogic S905*/GX* SD/eMMC driver"); 1054 MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>"); 1055 MODULE_LICENSE("GPL v2"); 1056