1 /* 2 * Amlogic SD/eMMC driver for the GX/S905 family SoCs 3 * 4 * Copyright (c) 2016 BayLibre, SAS. 5 * Author: Kevin Hilman <khilman@baylibre.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of version 2 of the GNU General Public License as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but 12 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, see <http://www.gnu.org/licenses/>. 18 * The full GNU General Public License is included in this distribution 19 * in the file called COPYING. 20 */ 21 #include <linux/kernel.h> 22 #include <linux/module.h> 23 #include <linux/init.h> 24 #include <linux/device.h> 25 #include <linux/of_device.h> 26 #include <linux/platform_device.h> 27 #include <linux/ioport.h> 28 #include <linux/spinlock.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/mmc.h> 32 #include <linux/mmc/sdio.h> 33 #include <linux/mmc/slot-gpio.h> 34 #include <linux/io.h> 35 #include <linux/clk.h> 36 #include <linux/clk-provider.h> 37 #include <linux/regulator/consumer.h> 38 #include <linux/interrupt.h> 39 #include <linux/bitfield.h> 40 41 #define DRIVER_NAME "meson-gx-mmc" 42 43 #define SD_EMMC_CLOCK 0x0 44 #define CLK_DIV_MASK GENMASK(5, 0) 45 #define CLK_DIV_MAX 63 46 #define CLK_SRC_MASK GENMASK(7, 6) 47 #define CLK_SRC_XTAL 0 /* external crystal */ 48 #define CLK_SRC_PLL 1 /* FCLK_DIV2 */ 49 #define CLK_CORE_PHASE_MASK GENMASK(9, 8) 50 #define CLK_TX_PHASE_MASK GENMASK(11, 10) 51 #define CLK_RX_PHASE_MASK GENMASK(13, 12) 52 #define CLK_PHASE_0 0 53 #define CLK_PHASE_90 1 54 #define CLK_PHASE_180 2 55 #define CLK_PHASE_270 3 56 #define CLK_ALWAYS_ON BIT(24) 57 58 #define SD_EMMC_DELAY 0x4 59 #define SD_EMMC_ADJUST 0x8 60 #define SD_EMMC_CALOUT 0x10 61 #define SD_EMMC_START 0x40 62 #define START_DESC_INIT BIT(0) 63 #define START_DESC_BUSY BIT(1) 64 #define START_DESC_ADDR_MASK GENMASK(31, 2) 65 66 #define SD_EMMC_CFG 0x44 67 #define CFG_BUS_WIDTH_MASK GENMASK(1, 0) 68 #define CFG_BUS_WIDTH_1 0x0 69 #define CFG_BUS_WIDTH_4 0x1 70 #define CFG_BUS_WIDTH_8 0x2 71 #define CFG_DDR BIT(2) 72 #define CFG_BLK_LEN_MASK GENMASK(7, 4) 73 #define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8) 74 #define CFG_RC_CC_MASK GENMASK(15, 12) 75 #define CFG_STOP_CLOCK BIT(22) 76 #define CFG_CLK_ALWAYS_ON BIT(18) 77 #define CFG_CHK_DS BIT(20) 78 #define CFG_AUTO_CLK BIT(23) 79 80 #define SD_EMMC_STATUS 0x48 81 #define STATUS_BUSY BIT(31) 82 83 #define SD_EMMC_IRQ_EN 0x4c 84 #define IRQ_EN_MASK GENMASK(13, 0) 85 #define IRQ_RXD_ERR_MASK GENMASK(7, 0) 86 #define IRQ_TXD_ERR BIT(8) 87 #define IRQ_DESC_ERR BIT(9) 88 #define IRQ_RESP_ERR BIT(10) 89 #define IRQ_RESP_TIMEOUT BIT(11) 90 #define IRQ_DESC_TIMEOUT BIT(12) 91 #define IRQ_END_OF_CHAIN BIT(13) 92 #define IRQ_RESP_STATUS BIT(14) 93 #define IRQ_SDIO BIT(15) 94 95 #define SD_EMMC_CMD_CFG 0x50 96 #define SD_EMMC_CMD_ARG 0x54 97 #define SD_EMMC_CMD_DAT 0x58 98 #define SD_EMMC_CMD_RSP 0x5c 99 #define SD_EMMC_CMD_RSP1 0x60 100 #define SD_EMMC_CMD_RSP2 0x64 101 #define SD_EMMC_CMD_RSP3 0x68 102 103 #define SD_EMMC_RXD 0x94 104 #define SD_EMMC_TXD 0x94 105 #define SD_EMMC_LAST_REG SD_EMMC_TXD 106 107 #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */ 108 #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */ 109 #define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */ 110 #define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */ 111 #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */ 112 #define SD_EMMC_DESC_BUF_LEN PAGE_SIZE 113 114 #define SD_EMMC_PRE_REQ_DONE BIT(0) 115 #define SD_EMMC_DESC_CHAIN_MODE BIT(1) 116 117 #define MUX_CLK_NUM_PARENTS 2 118 119 struct meson_tuning_params { 120 u8 core_phase; 121 u8 tx_phase; 122 u8 rx_phase; 123 }; 124 125 struct sd_emmc_desc { 126 u32 cmd_cfg; 127 u32 cmd_arg; 128 u32 cmd_data; 129 u32 cmd_resp; 130 }; 131 132 struct meson_host { 133 struct device *dev; 134 struct mmc_host *mmc; 135 struct mmc_command *cmd; 136 137 spinlock_t lock; 138 void __iomem *regs; 139 struct clk *core_clk; 140 struct clk_mux mux; 141 struct clk *mux_clk; 142 unsigned long req_rate; 143 144 struct clk_divider cfg_div; 145 struct clk *cfg_div_clk; 146 147 unsigned int bounce_buf_size; 148 void *bounce_buf; 149 dma_addr_t bounce_dma_addr; 150 struct sd_emmc_desc *descs; 151 dma_addr_t descs_dma_addr; 152 153 struct meson_tuning_params tp; 154 bool vqmmc_enabled; 155 }; 156 157 #define CMD_CFG_LENGTH_MASK GENMASK(8, 0) 158 #define CMD_CFG_BLOCK_MODE BIT(9) 159 #define CMD_CFG_R1B BIT(10) 160 #define CMD_CFG_END_OF_CHAIN BIT(11) 161 #define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12) 162 #define CMD_CFG_NO_RESP BIT(16) 163 #define CMD_CFG_NO_CMD BIT(17) 164 #define CMD_CFG_DATA_IO BIT(18) 165 #define CMD_CFG_DATA_WR BIT(19) 166 #define CMD_CFG_RESP_NOCRC BIT(20) 167 #define CMD_CFG_RESP_128 BIT(21) 168 #define CMD_CFG_RESP_NUM BIT(22) 169 #define CMD_CFG_DATA_NUM BIT(23) 170 #define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24) 171 #define CMD_CFG_ERROR BIT(30) 172 #define CMD_CFG_OWNER BIT(31) 173 174 #define CMD_DATA_MASK GENMASK(31, 2) 175 #define CMD_DATA_BIG_ENDIAN BIT(1) 176 #define CMD_DATA_SRAM BIT(0) 177 #define CMD_RESP_MASK GENMASK(31, 1) 178 #define CMD_RESP_SRAM BIT(0) 179 180 static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data) 181 { 182 unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC; 183 184 if (!timeout) 185 return SD_EMMC_CMD_TIMEOUT_DATA; 186 187 timeout = roundup_pow_of_two(timeout); 188 189 return min(timeout, 32768U); /* max. 2^15 ms */ 190 } 191 192 static struct mmc_command *meson_mmc_get_next_command(struct mmc_command *cmd) 193 { 194 if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error) 195 return cmd->mrq->cmd; 196 else if (mmc_op_multi(cmd->opcode) && 197 (!cmd->mrq->sbc || cmd->error || cmd->data->error)) 198 return cmd->mrq->stop; 199 else 200 return NULL; 201 } 202 203 static void meson_mmc_get_transfer_mode(struct mmc_host *mmc, 204 struct mmc_request *mrq) 205 { 206 struct mmc_data *data = mrq->data; 207 struct scatterlist *sg; 208 int i; 209 bool use_desc_chain_mode = true; 210 211 /* 212 * Broken SDIO with AP6255-based WiFi on Khadas VIM Pro has been 213 * reported. For some strange reason this occurs in descriptor 214 * chain mode only. So let's fall back to bounce buffer mode 215 * for command SD_IO_RW_EXTENDED. 216 */ 217 if (mrq->cmd->opcode == SD_IO_RW_EXTENDED) 218 return; 219 220 for_each_sg(data->sg, sg, data->sg_len, i) 221 /* check for 8 byte alignment */ 222 if (sg->offset & 7) { 223 WARN_ONCE(1, "unaligned scatterlist buffer\n"); 224 use_desc_chain_mode = false; 225 break; 226 } 227 228 if (use_desc_chain_mode) 229 data->host_cookie |= SD_EMMC_DESC_CHAIN_MODE; 230 } 231 232 static inline bool meson_mmc_desc_chain_mode(const struct mmc_data *data) 233 { 234 return data->host_cookie & SD_EMMC_DESC_CHAIN_MODE; 235 } 236 237 static inline bool meson_mmc_bounce_buf_read(const struct mmc_data *data) 238 { 239 return data && data->flags & MMC_DATA_READ && 240 !meson_mmc_desc_chain_mode(data); 241 } 242 243 static void meson_mmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq) 244 { 245 struct mmc_data *data = mrq->data; 246 247 if (!data) 248 return; 249 250 meson_mmc_get_transfer_mode(mmc, mrq); 251 data->host_cookie |= SD_EMMC_PRE_REQ_DONE; 252 253 if (!meson_mmc_desc_chain_mode(data)) 254 return; 255 256 data->sg_count = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, 257 mmc_get_dma_dir(data)); 258 if (!data->sg_count) 259 dev_err(mmc_dev(mmc), "dma_map_sg failed"); 260 } 261 262 static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, 263 int err) 264 { 265 struct mmc_data *data = mrq->data; 266 267 if (data && meson_mmc_desc_chain_mode(data) && data->sg_count) 268 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, 269 mmc_get_dma_dir(data)); 270 } 271 272 static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate) 273 { 274 struct mmc_host *mmc = host->mmc; 275 int ret; 276 u32 cfg; 277 278 /* Same request - bail-out */ 279 if (host->req_rate == clk_rate) 280 return 0; 281 282 /* stop clock */ 283 cfg = readl(host->regs + SD_EMMC_CFG); 284 cfg |= CFG_STOP_CLOCK; 285 writel(cfg, host->regs + SD_EMMC_CFG); 286 host->req_rate = 0; 287 288 if (!clk_rate) { 289 mmc->actual_clock = 0; 290 /* return with clock being stopped */ 291 return 0; 292 } 293 294 ret = clk_set_rate(host->cfg_div_clk, clk_rate); 295 if (ret) { 296 dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n", 297 clk_rate, ret); 298 return ret; 299 } 300 301 host->req_rate = clk_rate; 302 mmc->actual_clock = clk_get_rate(host->cfg_div_clk); 303 304 dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock); 305 if (clk_rate != mmc->actual_clock) 306 dev_dbg(host->dev, "requested rate was %lu\n", clk_rate); 307 308 /* (re)start clock */ 309 cfg = readl(host->regs + SD_EMMC_CFG); 310 cfg &= ~CFG_STOP_CLOCK; 311 writel(cfg, host->regs + SD_EMMC_CFG); 312 313 return 0; 314 } 315 316 /* 317 * The SD/eMMC IP block has an internal mux and divider used for 318 * generating the MMC clock. Use the clock framework to create and 319 * manage these clocks. 320 */ 321 static int meson_mmc_clk_init(struct meson_host *host) 322 { 323 struct clk_init_data init; 324 char clk_name[32]; 325 int i, ret = 0; 326 const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; 327 const char *clk_div_parents[1]; 328 u32 clk_reg; 329 330 /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */ 331 clk_reg = 0; 332 clk_reg |= CLK_ALWAYS_ON; 333 clk_reg |= CLK_DIV_MASK; 334 clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase); 335 clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase); 336 clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase); 337 writel(clk_reg, host->regs + SD_EMMC_CLOCK); 338 339 /* get the mux parents */ 340 for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { 341 struct clk *clk; 342 char name[16]; 343 344 snprintf(name, sizeof(name), "clkin%d", i); 345 clk = devm_clk_get(host->dev, name); 346 if (IS_ERR(clk)) { 347 if (clk != ERR_PTR(-EPROBE_DEFER)) 348 dev_err(host->dev, "Missing clock %s\n", name); 349 return PTR_ERR(clk); 350 } 351 352 mux_parent_names[i] = __clk_get_name(clk); 353 } 354 355 /* create the mux */ 356 snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev)); 357 init.name = clk_name; 358 init.ops = &clk_mux_ops; 359 init.flags = 0; 360 init.parent_names = mux_parent_names; 361 init.num_parents = MUX_CLK_NUM_PARENTS; 362 host->mux.reg = host->regs + SD_EMMC_CLOCK; 363 host->mux.shift = __bf_shf(CLK_SRC_MASK); 364 host->mux.mask = CLK_SRC_MASK >> host->mux.shift; 365 host->mux.flags = 0; 366 host->mux.table = NULL; 367 host->mux.hw.init = &init; 368 369 host->mux_clk = devm_clk_register(host->dev, &host->mux.hw); 370 if (WARN_ON(IS_ERR(host->mux_clk))) 371 return PTR_ERR(host->mux_clk); 372 373 /* create the divider */ 374 snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev)); 375 init.name = clk_name; 376 init.ops = &clk_divider_ops; 377 init.flags = CLK_SET_RATE_PARENT; 378 clk_div_parents[0] = __clk_get_name(host->mux_clk); 379 init.parent_names = clk_div_parents; 380 init.num_parents = ARRAY_SIZE(clk_div_parents); 381 382 host->cfg_div.reg = host->regs + SD_EMMC_CLOCK; 383 host->cfg_div.shift = __bf_shf(CLK_DIV_MASK); 384 host->cfg_div.width = __builtin_popcountl(CLK_DIV_MASK); 385 host->cfg_div.hw.init = &init; 386 host->cfg_div.flags = CLK_DIVIDER_ONE_BASED | 387 CLK_DIVIDER_ROUND_CLOSEST; 388 389 host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw); 390 if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk))) 391 return PTR_ERR(host->cfg_div_clk); 392 393 ret = clk_prepare_enable(host->cfg_div_clk); 394 if (ret) 395 return ret; 396 397 /* Get the nearest minimum clock to 400KHz */ 398 host->mmc->f_min = clk_round_rate(host->cfg_div_clk, 400000); 399 400 ret = meson_mmc_clk_set(host, host->mmc->f_min); 401 if (ret) 402 clk_disable_unprepare(host->cfg_div_clk); 403 404 return ret; 405 } 406 407 static void meson_mmc_set_tuning_params(struct mmc_host *mmc) 408 { 409 struct meson_host *host = mmc_priv(mmc); 410 u32 regval; 411 412 /* stop clock */ 413 regval = readl(host->regs + SD_EMMC_CFG); 414 regval |= CFG_STOP_CLOCK; 415 writel(regval, host->regs + SD_EMMC_CFG); 416 417 regval = readl(host->regs + SD_EMMC_CLOCK); 418 regval &= ~CLK_CORE_PHASE_MASK; 419 regval |= FIELD_PREP(CLK_CORE_PHASE_MASK, host->tp.core_phase); 420 regval &= ~CLK_TX_PHASE_MASK; 421 regval |= FIELD_PREP(CLK_TX_PHASE_MASK, host->tp.tx_phase); 422 regval &= ~CLK_RX_PHASE_MASK; 423 regval |= FIELD_PREP(CLK_RX_PHASE_MASK, host->tp.rx_phase); 424 writel(regval, host->regs + SD_EMMC_CLOCK); 425 426 /* start clock */ 427 regval = readl(host->regs + SD_EMMC_CFG); 428 regval &= ~CFG_STOP_CLOCK; 429 writel(regval, host->regs + SD_EMMC_CFG); 430 } 431 432 static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 433 { 434 struct meson_host *host = mmc_priv(mmc); 435 u32 bus_width, val; 436 int err; 437 438 /* 439 * GPIO regulator, only controls switching between 1v8 and 440 * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON. 441 */ 442 switch (ios->power_mode) { 443 case MMC_POWER_OFF: 444 if (!IS_ERR(mmc->supply.vmmc)) 445 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); 446 447 if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { 448 regulator_disable(mmc->supply.vqmmc); 449 host->vqmmc_enabled = false; 450 } 451 452 break; 453 454 case MMC_POWER_UP: 455 if (!IS_ERR(mmc->supply.vmmc)) 456 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 457 break; 458 459 case MMC_POWER_ON: 460 if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { 461 int ret = regulator_enable(mmc->supply.vqmmc); 462 463 if (ret < 0) 464 dev_err(host->dev, 465 "failed to enable vqmmc regulator\n"); 466 else 467 host->vqmmc_enabled = true; 468 } 469 470 break; 471 } 472 473 /* Bus width */ 474 switch (ios->bus_width) { 475 case MMC_BUS_WIDTH_1: 476 bus_width = CFG_BUS_WIDTH_1; 477 break; 478 case MMC_BUS_WIDTH_4: 479 bus_width = CFG_BUS_WIDTH_4; 480 break; 481 case MMC_BUS_WIDTH_8: 482 bus_width = CFG_BUS_WIDTH_8; 483 break; 484 default: 485 dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n", 486 ios->bus_width); 487 bus_width = CFG_BUS_WIDTH_4; 488 } 489 490 val = readl(host->regs + SD_EMMC_CFG); 491 val &= ~CFG_BUS_WIDTH_MASK; 492 val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width); 493 494 val &= ~CFG_DDR; 495 if (ios->timing == MMC_TIMING_UHS_DDR50 || 496 ios->timing == MMC_TIMING_MMC_DDR52 || 497 ios->timing == MMC_TIMING_MMC_HS400) 498 val |= CFG_DDR; 499 500 val &= ~CFG_CHK_DS; 501 if (ios->timing == MMC_TIMING_MMC_HS400) 502 val |= CFG_CHK_DS; 503 504 err = meson_mmc_clk_set(host, ios->clock); 505 if (err) 506 dev_err(host->dev, "Failed to set clock: %d\n,", err); 507 508 writel(val, host->regs + SD_EMMC_CFG); 509 dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val); 510 } 511 512 static void meson_mmc_request_done(struct mmc_host *mmc, 513 struct mmc_request *mrq) 514 { 515 struct meson_host *host = mmc_priv(mmc); 516 517 host->cmd = NULL; 518 mmc_request_done(host->mmc, mrq); 519 } 520 521 static void meson_mmc_set_blksz(struct mmc_host *mmc, unsigned int blksz) 522 { 523 struct meson_host *host = mmc_priv(mmc); 524 u32 cfg, blksz_old; 525 526 cfg = readl(host->regs + SD_EMMC_CFG); 527 blksz_old = FIELD_GET(CFG_BLK_LEN_MASK, cfg); 528 529 if (!is_power_of_2(blksz)) 530 dev_err(host->dev, "blksz %u is not a power of 2\n", blksz); 531 532 blksz = ilog2(blksz); 533 534 /* check if block-size matches, if not update */ 535 if (blksz == blksz_old) 536 return; 537 538 dev_dbg(host->dev, "%s: update blk_len %d -> %d\n", __func__, 539 blksz_old, blksz); 540 541 cfg &= ~CFG_BLK_LEN_MASK; 542 cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, blksz); 543 writel(cfg, host->regs + SD_EMMC_CFG); 544 } 545 546 static void meson_mmc_set_response_bits(struct mmc_command *cmd, u32 *cmd_cfg) 547 { 548 if (cmd->flags & MMC_RSP_PRESENT) { 549 if (cmd->flags & MMC_RSP_136) 550 *cmd_cfg |= CMD_CFG_RESP_128; 551 *cmd_cfg |= CMD_CFG_RESP_NUM; 552 553 if (!(cmd->flags & MMC_RSP_CRC)) 554 *cmd_cfg |= CMD_CFG_RESP_NOCRC; 555 556 if (cmd->flags & MMC_RSP_BUSY) 557 *cmd_cfg |= CMD_CFG_R1B; 558 } else { 559 *cmd_cfg |= CMD_CFG_NO_RESP; 560 } 561 } 562 563 static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg) 564 { 565 struct meson_host *host = mmc_priv(mmc); 566 struct sd_emmc_desc *desc = host->descs; 567 struct mmc_data *data = host->cmd->data; 568 struct scatterlist *sg; 569 u32 start; 570 int i; 571 572 if (data->flags & MMC_DATA_WRITE) 573 cmd_cfg |= CMD_CFG_DATA_WR; 574 575 if (data->blocks > 1) { 576 cmd_cfg |= CMD_CFG_BLOCK_MODE; 577 meson_mmc_set_blksz(mmc, data->blksz); 578 } 579 580 for_each_sg(data->sg, sg, data->sg_count, i) { 581 unsigned int len = sg_dma_len(sg); 582 583 if (data->blocks > 1) 584 len /= data->blksz; 585 586 desc[i].cmd_cfg = cmd_cfg; 587 desc[i].cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, len); 588 if (i > 0) 589 desc[i].cmd_cfg |= CMD_CFG_NO_CMD; 590 desc[i].cmd_arg = host->cmd->arg; 591 desc[i].cmd_resp = 0; 592 desc[i].cmd_data = sg_dma_address(sg); 593 } 594 desc[data->sg_count - 1].cmd_cfg |= CMD_CFG_END_OF_CHAIN; 595 596 dma_wmb(); /* ensure descriptor is written before kicked */ 597 start = host->descs_dma_addr | START_DESC_BUSY; 598 writel(start, host->regs + SD_EMMC_START); 599 } 600 601 static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd) 602 { 603 struct meson_host *host = mmc_priv(mmc); 604 struct mmc_data *data = cmd->data; 605 u32 cmd_cfg = 0, cmd_data = 0; 606 unsigned int xfer_bytes = 0; 607 608 /* Setup descriptors */ 609 dma_rmb(); 610 611 host->cmd = cmd; 612 613 cmd_cfg |= FIELD_PREP(CMD_CFG_CMD_INDEX_MASK, cmd->opcode); 614 cmd_cfg |= CMD_CFG_OWNER; /* owned by CPU */ 615 616 meson_mmc_set_response_bits(cmd, &cmd_cfg); 617 618 /* data? */ 619 if (data) { 620 data->bytes_xfered = 0; 621 cmd_cfg |= CMD_CFG_DATA_IO; 622 cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, 623 ilog2(meson_mmc_get_timeout_msecs(data))); 624 625 if (meson_mmc_desc_chain_mode(data)) { 626 meson_mmc_desc_chain_transfer(mmc, cmd_cfg); 627 return; 628 } 629 630 if (data->blocks > 1) { 631 cmd_cfg |= CMD_CFG_BLOCK_MODE; 632 cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, 633 data->blocks); 634 meson_mmc_set_blksz(mmc, data->blksz); 635 } else { 636 cmd_cfg |= FIELD_PREP(CMD_CFG_LENGTH_MASK, data->blksz); 637 } 638 639 xfer_bytes = data->blksz * data->blocks; 640 if (data->flags & MMC_DATA_WRITE) { 641 cmd_cfg |= CMD_CFG_DATA_WR; 642 WARN_ON(xfer_bytes > host->bounce_buf_size); 643 sg_copy_to_buffer(data->sg, data->sg_len, 644 host->bounce_buf, xfer_bytes); 645 dma_wmb(); 646 } 647 648 cmd_data = host->bounce_dma_addr & CMD_DATA_MASK; 649 } else { 650 cmd_cfg |= FIELD_PREP(CMD_CFG_TIMEOUT_MASK, 651 ilog2(SD_EMMC_CMD_TIMEOUT)); 652 } 653 654 /* Last descriptor */ 655 cmd_cfg |= CMD_CFG_END_OF_CHAIN; 656 writel(cmd_cfg, host->regs + SD_EMMC_CMD_CFG); 657 writel(cmd_data, host->regs + SD_EMMC_CMD_DAT); 658 writel(0, host->regs + SD_EMMC_CMD_RSP); 659 wmb(); /* ensure descriptor is written before kicked */ 660 writel(cmd->arg, host->regs + SD_EMMC_CMD_ARG); 661 } 662 663 static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) 664 { 665 struct meson_host *host = mmc_priv(mmc); 666 bool needs_pre_post_req = mrq->data && 667 !(mrq->data->host_cookie & SD_EMMC_PRE_REQ_DONE); 668 669 if (needs_pre_post_req) { 670 meson_mmc_get_transfer_mode(mmc, mrq); 671 if (!meson_mmc_desc_chain_mode(mrq->data)) 672 needs_pre_post_req = false; 673 } 674 675 if (needs_pre_post_req) 676 meson_mmc_pre_req(mmc, mrq); 677 678 /* Stop execution */ 679 writel(0, host->regs + SD_EMMC_START); 680 681 meson_mmc_start_cmd(mmc, mrq->sbc ?: mrq->cmd); 682 683 if (needs_pre_post_req) 684 meson_mmc_post_req(mmc, mrq, 0); 685 } 686 687 static void meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd) 688 { 689 struct meson_host *host = mmc_priv(mmc); 690 691 if (cmd->flags & MMC_RSP_136) { 692 cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP3); 693 cmd->resp[1] = readl(host->regs + SD_EMMC_CMD_RSP2); 694 cmd->resp[2] = readl(host->regs + SD_EMMC_CMD_RSP1); 695 cmd->resp[3] = readl(host->regs + SD_EMMC_CMD_RSP); 696 } else if (cmd->flags & MMC_RSP_PRESENT) { 697 cmd->resp[0] = readl(host->regs + SD_EMMC_CMD_RSP); 698 } 699 } 700 701 static irqreturn_t meson_mmc_irq(int irq, void *dev_id) 702 { 703 struct meson_host *host = dev_id; 704 struct mmc_command *cmd; 705 struct mmc_data *data; 706 u32 irq_en, status, raw_status; 707 irqreturn_t ret = IRQ_HANDLED; 708 709 if (WARN_ON(!host)) 710 return IRQ_NONE; 711 712 cmd = host->cmd; 713 714 if (WARN_ON(!cmd)) 715 return IRQ_NONE; 716 717 data = cmd->data; 718 719 spin_lock(&host->lock); 720 irq_en = readl(host->regs + SD_EMMC_IRQ_EN); 721 raw_status = readl(host->regs + SD_EMMC_STATUS); 722 status = raw_status & irq_en; 723 724 if (!status) { 725 dev_warn(host->dev, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n", 726 raw_status, irq_en); 727 ret = IRQ_NONE; 728 goto out; 729 } 730 731 meson_mmc_read_resp(host->mmc, cmd); 732 733 cmd->error = 0; 734 if (status & IRQ_RXD_ERR_MASK) { 735 dev_dbg(host->dev, "Unhandled IRQ: RXD error\n"); 736 cmd->error = -EILSEQ; 737 } 738 if (status & IRQ_TXD_ERR) { 739 dev_dbg(host->dev, "Unhandled IRQ: TXD error\n"); 740 cmd->error = -EILSEQ; 741 } 742 if (status & IRQ_DESC_ERR) 743 dev_dbg(host->dev, "Unhandled IRQ: Descriptor error\n"); 744 if (status & IRQ_RESP_ERR) { 745 dev_dbg(host->dev, "Unhandled IRQ: Response error\n"); 746 cmd->error = -EILSEQ; 747 } 748 if (status & IRQ_RESP_TIMEOUT) { 749 dev_dbg(host->dev, "Unhandled IRQ: Response timeout\n"); 750 cmd->error = -ETIMEDOUT; 751 } 752 if (status & IRQ_DESC_TIMEOUT) { 753 dev_dbg(host->dev, "Unhandled IRQ: Descriptor timeout\n"); 754 cmd->error = -ETIMEDOUT; 755 } 756 if (status & IRQ_SDIO) 757 dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n"); 758 759 if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS)) { 760 if (data && !cmd->error) 761 data->bytes_xfered = data->blksz * data->blocks; 762 if (meson_mmc_bounce_buf_read(data) || 763 meson_mmc_get_next_command(cmd)) 764 ret = IRQ_WAKE_THREAD; 765 } else { 766 dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n", 767 status, cmd->opcode, cmd->arg, 768 cmd->flags, cmd->mrq->stop ? 1 : 0); 769 if (cmd->data) { 770 struct mmc_data *data = cmd->data; 771 772 dev_warn(host->dev, "\tblksz %u blocks %u flags 0x%08x (%s%s)", 773 data->blksz, data->blocks, data->flags, 774 data->flags & MMC_DATA_WRITE ? "write" : "", 775 data->flags & MMC_DATA_READ ? "read" : ""); 776 } 777 } 778 779 out: 780 /* ack all (enabled) interrupts */ 781 writel(status, host->regs + SD_EMMC_STATUS); 782 783 if (ret == IRQ_HANDLED) 784 meson_mmc_request_done(host->mmc, cmd->mrq); 785 786 spin_unlock(&host->lock); 787 return ret; 788 } 789 790 static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id) 791 { 792 struct meson_host *host = dev_id; 793 struct mmc_command *next_cmd, *cmd = host->cmd; 794 struct mmc_data *data; 795 unsigned int xfer_bytes; 796 797 if (WARN_ON(!cmd)) 798 return IRQ_NONE; 799 800 data = cmd->data; 801 if (meson_mmc_bounce_buf_read(data)) { 802 xfer_bytes = data->blksz * data->blocks; 803 WARN_ON(xfer_bytes > host->bounce_buf_size); 804 sg_copy_from_buffer(data->sg, data->sg_len, 805 host->bounce_buf, xfer_bytes); 806 } 807 808 next_cmd = meson_mmc_get_next_command(cmd); 809 if (next_cmd) 810 meson_mmc_start_cmd(host->mmc, next_cmd); 811 else 812 meson_mmc_request_done(host->mmc, cmd->mrq); 813 814 return IRQ_HANDLED; 815 } 816 817 static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 818 { 819 struct meson_host *host = mmc_priv(mmc); 820 struct meson_tuning_params tp_old = host->tp; 821 int ret = -EINVAL, i, cmd_error; 822 823 dev_info(mmc_dev(mmc), "(re)tuning...\n"); 824 825 for (i = CLK_PHASE_0; i <= CLK_PHASE_270; i++) { 826 host->tp.rx_phase = i; 827 /* exclude the active parameter set if retuning */ 828 if (!memcmp(&tp_old, &host->tp, sizeof(tp_old)) && 829 mmc->doing_retune) 830 continue; 831 meson_mmc_set_tuning_params(mmc); 832 ret = mmc_send_tuning(mmc, opcode, &cmd_error); 833 if (!ret) 834 break; 835 } 836 837 return ret; 838 } 839 840 /* 841 * NOTE: we only need this until the GPIO/pinctrl driver can handle 842 * interrupts. For now, the MMC core will use this for polling. 843 */ 844 static int meson_mmc_get_cd(struct mmc_host *mmc) 845 { 846 int status = mmc_gpio_get_cd(mmc); 847 848 if (status == -ENOSYS) 849 return 1; /* assume present */ 850 851 return status; 852 } 853 854 static void meson_mmc_cfg_init(struct meson_host *host) 855 { 856 u32 cfg = 0; 857 858 cfg |= FIELD_PREP(CFG_RESP_TIMEOUT_MASK, 859 ilog2(SD_EMMC_CFG_RESP_TIMEOUT)); 860 cfg |= FIELD_PREP(CFG_RC_CC_MASK, ilog2(SD_EMMC_CFG_CMD_GAP)); 861 cfg |= FIELD_PREP(CFG_BLK_LEN_MASK, ilog2(SD_EMMC_CFG_BLK_SIZE)); 862 863 writel(cfg, host->regs + SD_EMMC_CFG); 864 } 865 866 static const struct mmc_host_ops meson_mmc_ops = { 867 .request = meson_mmc_request, 868 .set_ios = meson_mmc_set_ios, 869 .get_cd = meson_mmc_get_cd, 870 .pre_req = meson_mmc_pre_req, 871 .post_req = meson_mmc_post_req, 872 .execute_tuning = meson_mmc_execute_tuning, 873 }; 874 875 static int meson_mmc_probe(struct platform_device *pdev) 876 { 877 struct resource *res; 878 struct meson_host *host; 879 struct mmc_host *mmc; 880 int ret, irq; 881 882 mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); 883 if (!mmc) 884 return -ENOMEM; 885 host = mmc_priv(mmc); 886 host->mmc = mmc; 887 host->dev = &pdev->dev; 888 dev_set_drvdata(&pdev->dev, host); 889 890 spin_lock_init(&host->lock); 891 892 /* Get regulators and the supported OCR mask */ 893 host->vqmmc_enabled = false; 894 ret = mmc_regulator_get_supply(mmc); 895 if (ret == -EPROBE_DEFER) 896 goto free_host; 897 898 ret = mmc_of_parse(mmc); 899 if (ret) { 900 if (ret != -EPROBE_DEFER) 901 dev_warn(&pdev->dev, "error parsing DT: %d\n", ret); 902 goto free_host; 903 } 904 905 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 906 host->regs = devm_ioremap_resource(&pdev->dev, res); 907 if (IS_ERR(host->regs)) { 908 ret = PTR_ERR(host->regs); 909 goto free_host; 910 } 911 912 irq = platform_get_irq(pdev, 0); 913 if (!irq) { 914 dev_err(&pdev->dev, "failed to get interrupt resource.\n"); 915 ret = -EINVAL; 916 goto free_host; 917 } 918 919 host->core_clk = devm_clk_get(&pdev->dev, "core"); 920 if (IS_ERR(host->core_clk)) { 921 ret = PTR_ERR(host->core_clk); 922 goto free_host; 923 } 924 925 ret = clk_prepare_enable(host->core_clk); 926 if (ret) 927 goto free_host; 928 929 host->tp.core_phase = CLK_PHASE_180; 930 host->tp.tx_phase = CLK_PHASE_0; 931 host->tp.rx_phase = CLK_PHASE_0; 932 933 ret = meson_mmc_clk_init(host); 934 if (ret) 935 goto err_core_clk; 936 937 /* set config to sane default */ 938 meson_mmc_cfg_init(host); 939 940 /* Stop execution */ 941 writel(0, host->regs + SD_EMMC_START); 942 943 /* clear, ack, enable all interrupts */ 944 writel(0, host->regs + SD_EMMC_IRQ_EN); 945 writel(IRQ_EN_MASK, host->regs + SD_EMMC_STATUS); 946 writel(IRQ_EN_MASK, host->regs + SD_EMMC_IRQ_EN); 947 948 ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, 949 meson_mmc_irq_thread, IRQF_SHARED, 950 NULL, host); 951 if (ret) 952 goto err_div_clk; 953 954 mmc->caps |= MMC_CAP_CMD23; 955 mmc->max_blk_count = CMD_CFG_LENGTH_MASK; 956 mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size; 957 mmc->max_segs = SD_EMMC_DESC_BUF_LEN / sizeof(struct sd_emmc_desc); 958 mmc->max_seg_size = mmc->max_req_size; 959 960 /* data bounce buffer */ 961 host->bounce_buf_size = mmc->max_req_size; 962 host->bounce_buf = 963 dma_alloc_coherent(host->dev, host->bounce_buf_size, 964 &host->bounce_dma_addr, GFP_KERNEL); 965 if (host->bounce_buf == NULL) { 966 dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); 967 ret = -ENOMEM; 968 goto err_div_clk; 969 } 970 971 host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 972 &host->descs_dma_addr, GFP_KERNEL); 973 if (!host->descs) { 974 dev_err(host->dev, "Allocating descriptor DMA buffer failed\n"); 975 ret = -ENOMEM; 976 goto err_bounce_buf; 977 } 978 979 mmc->ops = &meson_mmc_ops; 980 mmc_add_host(mmc); 981 982 return 0; 983 984 err_bounce_buf: 985 dma_free_coherent(host->dev, host->bounce_buf_size, 986 host->bounce_buf, host->bounce_dma_addr); 987 err_div_clk: 988 clk_disable_unprepare(host->cfg_div_clk); 989 err_core_clk: 990 clk_disable_unprepare(host->core_clk); 991 free_host: 992 mmc_free_host(mmc); 993 return ret; 994 } 995 996 static int meson_mmc_remove(struct platform_device *pdev) 997 { 998 struct meson_host *host = dev_get_drvdata(&pdev->dev); 999 1000 mmc_remove_host(host->mmc); 1001 1002 /* disable interrupts */ 1003 writel(0, host->regs + SD_EMMC_IRQ_EN); 1004 1005 dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, 1006 host->descs, host->descs_dma_addr); 1007 dma_free_coherent(host->dev, host->bounce_buf_size, 1008 host->bounce_buf, host->bounce_dma_addr); 1009 1010 clk_disable_unprepare(host->cfg_div_clk); 1011 clk_disable_unprepare(host->core_clk); 1012 1013 mmc_free_host(host->mmc); 1014 return 0; 1015 } 1016 1017 static const struct of_device_id meson_mmc_of_match[] = { 1018 { .compatible = "amlogic,meson-gx-mmc", }, 1019 { .compatible = "amlogic,meson-gxbb-mmc", }, 1020 { .compatible = "amlogic,meson-gxl-mmc", }, 1021 { .compatible = "amlogic,meson-gxm-mmc", }, 1022 {} 1023 }; 1024 MODULE_DEVICE_TABLE(of, meson_mmc_of_match); 1025 1026 static struct platform_driver meson_mmc_driver = { 1027 .probe = meson_mmc_probe, 1028 .remove = meson_mmc_remove, 1029 .driver = { 1030 .name = DRIVER_NAME, 1031 .of_match_table = of_match_ptr(meson_mmc_of_match), 1032 }, 1033 }; 1034 1035 module_platform_driver(meson_mmc_driver); 1036 1037 MODULE_DESCRIPTION("Amlogic S905*/GX* SD/eMMC driver"); 1038 MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>"); 1039 MODULE_LICENSE("GPL v2"); 1040