1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver 4 * 5 * Copyright (C) 2006 Texas Instruments. 6 * Original author: Purushotam Kumar 7 * Copyright (C) 2009 David Brownell 8 */ 9 10 #include <linux/module.h> 11 #include <linux/ioport.h> 12 #include <linux/platform_device.h> 13 #include <linux/clk.h> 14 #include <linux/err.h> 15 #include <linux/cpufreq.h> 16 #include <linux/mmc/host.h> 17 #include <linux/io.h> 18 #include <linux/irq.h> 19 #include <linux/delay.h> 20 #include <linux/dmaengine.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/mmc/mmc.h> 23 #include <linux/of.h> 24 #include <linux/mmc/slot-gpio.h> 25 #include <linux/interrupt.h> 26 27 #include <linux/platform_data/mmc-davinci.h> 28 29 /* 30 * Register Definitions 31 */ 32 #define DAVINCI_MMCCTL 0x00 /* Control Register */ 33 #define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ 34 #define DAVINCI_MMCST0 0x08 /* Status Register 0 */ 35 #define DAVINCI_MMCST1 0x0C /* Status Register 1 */ 36 #define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ 37 #define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ 38 #define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ 39 #define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ 40 #define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ 41 #define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ 42 #define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ 43 #define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ 44 #define DAVINCI_MMCCMD 0x30 /* Command Register */ 45 #define DAVINCI_MMCARGHL 0x34 /* Argument Register */ 46 #define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ 47 #define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ 48 #define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ 49 #define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ 50 #define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ 51 #define DAVINCI_MMCETOK 0x4C 52 #define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ 53 #define DAVINCI_MMCCKC 0x54 54 #define DAVINCI_MMCTORC 0x58 55 #define DAVINCI_MMCTODC 0x5C 56 #define DAVINCI_MMCBLNC 0x60 57 #define DAVINCI_SDIOCTL 0x64 58 #define DAVINCI_SDIOST0 0x68 59 #define DAVINCI_SDIOIEN 0x6C 60 #define DAVINCI_SDIOIST 0x70 61 #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 62 63 /* DAVINCI_MMCCTL definitions */ 64 #define MMCCTL_DATRST (1 << 0) 65 #define MMCCTL_CMDRST (1 << 1) 66 #define MMCCTL_WIDTH_8_BIT (1 << 8) 67 #define MMCCTL_WIDTH_4_BIT (1 << 2) 68 #define MMCCTL_DATEG_DISABLED (0 << 6) 69 #define MMCCTL_DATEG_RISING (1 << 6) 70 #define MMCCTL_DATEG_FALLING (2 << 6) 71 #define MMCCTL_DATEG_BOTH (3 << 6) 72 #define MMCCTL_PERMDR_LE (0 << 9) 73 #define MMCCTL_PERMDR_BE (1 << 9) 74 #define MMCCTL_PERMDX_LE (0 << 10) 75 #define MMCCTL_PERMDX_BE (1 << 10) 76 77 /* DAVINCI_MMCCLK definitions */ 78 #define MMCCLK_CLKEN (1 << 8) 79 #define MMCCLK_CLKRT_MASK (0xFF << 0) 80 81 /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ 82 #define MMCST0_DATDNE BIT(0) /* data done */ 83 #define MMCST0_BSYDNE BIT(1) /* busy done */ 84 #define MMCST0_RSPDNE BIT(2) /* command done */ 85 #define MMCST0_TOUTRD BIT(3) /* data read timeout */ 86 #define MMCST0_TOUTRS BIT(4) /* command response timeout */ 87 #define MMCST0_CRCWR BIT(5) /* data write CRC error */ 88 #define MMCST0_CRCRD BIT(6) /* data read CRC error */ 89 #define MMCST0_CRCRS BIT(7) /* command response CRC error */ 90 #define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ 91 #define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ 92 #define MMCST0_DATED BIT(11) /* DAT3 edge detect */ 93 #define MMCST0_TRNDNE BIT(12) /* transfer done */ 94 95 /* DAVINCI_MMCST1 definitions */ 96 #define MMCST1_BUSY (1 << 0) 97 98 /* DAVINCI_MMCCMD definitions */ 99 #define MMCCMD_CMD_MASK (0x3F << 0) 100 #define MMCCMD_PPLEN (1 << 7) 101 #define MMCCMD_BSYEXP (1 << 8) 102 #define MMCCMD_RSPFMT_MASK (3 << 9) 103 #define MMCCMD_RSPFMT_NONE (0 << 9) 104 #define MMCCMD_RSPFMT_R1456 (1 << 9) 105 #define MMCCMD_RSPFMT_R2 (2 << 9) 106 #define MMCCMD_RSPFMT_R3 (3 << 9) 107 #define MMCCMD_DTRW (1 << 11) 108 #define MMCCMD_STRMTP (1 << 12) 109 #define MMCCMD_WDATX (1 << 13) 110 #define MMCCMD_INITCK (1 << 14) 111 #define MMCCMD_DCLR (1 << 15) 112 #define MMCCMD_DMATRIG (1 << 16) 113 114 /* DAVINCI_MMCFIFOCTL definitions */ 115 #define MMCFIFOCTL_FIFORST (1 << 0) 116 #define MMCFIFOCTL_FIFODIR_WR (1 << 1) 117 #define MMCFIFOCTL_FIFODIR_RD (0 << 1) 118 #define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ 119 #define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ 120 #define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ 121 #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 122 #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 123 124 /* DAVINCI_SDIOST0 definitions */ 125 #define SDIOST0_DAT1_HI BIT(0) 126 127 /* DAVINCI_SDIOIEN definitions */ 128 #define SDIOIEN_IOINTEN BIT(0) 129 130 /* DAVINCI_SDIOIST definitions */ 131 #define SDIOIST_IOINT BIT(0) 132 133 /* MMCSD Init clock in Hz in opendrain mode */ 134 #define MMCSD_INIT_CLOCK 200000 135 136 /* 137 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 138 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 139 * for drivers with max_segs == 1, making the segments bigger (64KB) 140 * than the page or two that's otherwise typical. nr_sg (passed from 141 * platform data) == 16 gives at least the same throughput boost, using 142 * EDMA transfer linkage instead of spending CPU time copying pages. 143 */ 144 #define MAX_CCNT ((1 << 16) - 1) 145 146 #define MAX_NR_SG 16 147 148 static unsigned rw_threshold = 32; 149 module_param(rw_threshold, uint, S_IRUGO); 150 MODULE_PARM_DESC(rw_threshold, 151 "Read/Write threshold. Default = 32"); 152 153 static unsigned poll_threshold = 128; 154 module_param(poll_threshold, uint, S_IRUGO); 155 MODULE_PARM_DESC(poll_threshold, 156 "Polling transaction size threshold. Default = 128"); 157 158 static unsigned poll_loopcount = 32; 159 module_param(poll_loopcount, uint, S_IRUGO); 160 MODULE_PARM_DESC(poll_loopcount, 161 "Maximum polling loop count. Default = 32"); 162 163 static unsigned use_dma = 1; 164 module_param(use_dma, uint, 0); 165 MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); 166 167 struct mmc_davinci_host { 168 struct mmc_command *cmd; 169 struct mmc_data *data; 170 struct mmc_host *mmc; 171 struct clk *clk; 172 unsigned int mmc_input_clk; 173 void __iomem *base; 174 struct resource *mem_res; 175 int mmc_irq, sdio_irq; 176 unsigned char bus_mode; 177 178 #define DAVINCI_MMC_DATADIR_NONE 0 179 #define DAVINCI_MMC_DATADIR_READ 1 180 #define DAVINCI_MMC_DATADIR_WRITE 2 181 unsigned char data_dir; 182 183 /* buffer is used during PIO of one scatterlist segment, and 184 * is updated along with buffer_bytes_left. bytes_left applies 185 * to all N blocks of the PIO transfer. 186 */ 187 u8 *buffer; 188 u32 buffer_bytes_left; 189 u32 bytes_left; 190 191 struct dma_chan *dma_tx; 192 struct dma_chan *dma_rx; 193 bool use_dma; 194 bool do_dma; 195 bool sdio_int; 196 bool active_request; 197 198 /* For PIO we walk scatterlists one segment at a time. */ 199 unsigned int sg_len; 200 struct scatterlist *sg; 201 202 /* Version of the MMC/SD controller */ 203 u8 version; 204 /* for ns in one cycle calculation */ 205 unsigned ns_in_one_cycle; 206 /* Number of sg segments */ 207 u8 nr_sg; 208 #ifdef CONFIG_CPU_FREQ 209 struct notifier_block freq_transition; 210 #endif 211 }; 212 213 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); 214 215 /* PIO only */ 216 static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) 217 { 218 host->buffer_bytes_left = sg_dma_len(host->sg); 219 host->buffer = sg_virt(host->sg); 220 if (host->buffer_bytes_left > host->bytes_left) 221 host->buffer_bytes_left = host->bytes_left; 222 } 223 224 static void davinci_fifo_data_trans(struct mmc_davinci_host *host, 225 unsigned int n) 226 { 227 u8 *p; 228 unsigned int i; 229 230 if (host->buffer_bytes_left == 0) { 231 host->sg = sg_next(host->data->sg); 232 mmc_davinci_sg_to_buf(host); 233 } 234 235 p = host->buffer; 236 if (n > host->buffer_bytes_left) 237 n = host->buffer_bytes_left; 238 host->buffer_bytes_left -= n; 239 host->bytes_left -= n; 240 241 /* NOTE: we never transfer more than rw_threshold bytes 242 * to/from the fifo here; there's no I/O overlap. 243 * This also assumes that access width( i.e. ACCWD) is 4 bytes 244 */ 245 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 246 for (i = 0; i < (n >> 2); i++) { 247 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); 248 p = p + 4; 249 } 250 if (n & 3) { 251 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); 252 p = p + (n & 3); 253 } 254 } else { 255 for (i = 0; i < (n >> 2); i++) { 256 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); 257 p = p + 4; 258 } 259 if (n & 3) { 260 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); 261 p = p + (n & 3); 262 } 263 } 264 host->buffer = p; 265 } 266 267 static void mmc_davinci_start_command(struct mmc_davinci_host *host, 268 struct mmc_command *cmd) 269 { 270 u32 cmd_reg = 0; 271 u32 im_val; 272 273 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", 274 cmd->opcode, cmd->arg, 275 ({ char *s; 276 switch (mmc_resp_type(cmd)) { 277 case MMC_RSP_R1: 278 s = ", R1/R5/R6/R7 response"; 279 break; 280 case MMC_RSP_R1B: 281 s = ", R1b response"; 282 break; 283 case MMC_RSP_R2: 284 s = ", R2 response"; 285 break; 286 case MMC_RSP_R3: 287 s = ", R3/R4 response"; 288 break; 289 default: 290 s = ", (R? response)"; 291 break; 292 } s; })); 293 host->cmd = cmd; 294 295 switch (mmc_resp_type(cmd)) { 296 case MMC_RSP_R1B: 297 /* There's some spec confusion about when R1B is 298 * allowed, but if the card doesn't issue a BUSY 299 * then it's harmless for us to allow it. 300 */ 301 cmd_reg |= MMCCMD_BSYEXP; 302 fallthrough; 303 case MMC_RSP_R1: /* 48 bits, CRC */ 304 cmd_reg |= MMCCMD_RSPFMT_R1456; 305 break; 306 case MMC_RSP_R2: /* 136 bits, CRC */ 307 cmd_reg |= MMCCMD_RSPFMT_R2; 308 break; 309 case MMC_RSP_R3: /* 48 bits, no CRC */ 310 cmd_reg |= MMCCMD_RSPFMT_R3; 311 break; 312 default: 313 cmd_reg |= MMCCMD_RSPFMT_NONE; 314 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", 315 mmc_resp_type(cmd)); 316 break; 317 } 318 319 /* Set command index */ 320 cmd_reg |= cmd->opcode; 321 322 /* Enable EDMA transfer triggers */ 323 if (host->do_dma) 324 cmd_reg |= MMCCMD_DMATRIG; 325 326 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && 327 host->data_dir == DAVINCI_MMC_DATADIR_READ) 328 cmd_reg |= MMCCMD_DMATRIG; 329 330 /* Setting whether command involves data transfer or not */ 331 if (cmd->data) 332 cmd_reg |= MMCCMD_WDATX; 333 334 /* Setting whether data read or write */ 335 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) 336 cmd_reg |= MMCCMD_DTRW; 337 338 if (host->bus_mode == MMC_BUSMODE_PUSHPULL) 339 cmd_reg |= MMCCMD_PPLEN; 340 341 /* set Command timeout */ 342 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 343 344 /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ 345 im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; 346 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 347 im_val |= MMCST0_DATDNE | MMCST0_CRCWR; 348 349 if (!host->do_dma) 350 im_val |= MMCST0_DXRDY; 351 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { 352 im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; 353 354 if (!host->do_dma) 355 im_val |= MMCST0_DRRDY; 356 } 357 358 /* 359 * Before non-DMA WRITE commands the controller needs priming: 360 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size 361 */ 362 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) 363 davinci_fifo_data_trans(host, rw_threshold); 364 365 writel(cmd->arg, host->base + DAVINCI_MMCARGHL); 366 writel(cmd_reg, host->base + DAVINCI_MMCCMD); 367 368 host->active_request = true; 369 370 if (!host->do_dma && host->bytes_left <= poll_threshold) { 371 u32 count = poll_loopcount; 372 373 while (host->active_request && count--) { 374 mmc_davinci_irq(0, host); 375 cpu_relax(); 376 } 377 } 378 379 if (host->active_request) 380 writel(im_val, host->base + DAVINCI_MMCIM); 381 } 382 383 /*----------------------------------------------------------------------*/ 384 385 /* DMA infrastructure */ 386 387 static void davinci_abort_dma(struct mmc_davinci_host *host) 388 { 389 struct dma_chan *sync_dev; 390 391 if (host->data_dir == DAVINCI_MMC_DATADIR_READ) 392 sync_dev = host->dma_rx; 393 else 394 sync_dev = host->dma_tx; 395 396 dmaengine_terminate_all(sync_dev); 397 } 398 399 static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, 400 struct mmc_data *data) 401 { 402 struct dma_chan *chan; 403 struct dma_async_tx_descriptor *desc; 404 int ret = 0; 405 406 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 407 struct dma_slave_config dma_tx_conf = { 408 .direction = DMA_MEM_TO_DEV, 409 .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, 410 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 411 .dst_maxburst = 412 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 413 }; 414 chan = host->dma_tx; 415 dmaengine_slave_config(host->dma_tx, &dma_tx_conf); 416 417 desc = dmaengine_prep_slave_sg(host->dma_tx, 418 data->sg, 419 host->sg_len, 420 DMA_MEM_TO_DEV, 421 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 422 if (!desc) { 423 dev_dbg(mmc_dev(host->mmc), 424 "failed to allocate DMA TX descriptor"); 425 ret = -1; 426 goto out; 427 } 428 } else { 429 struct dma_slave_config dma_rx_conf = { 430 .direction = DMA_DEV_TO_MEM, 431 .src_addr = host->mem_res->start + DAVINCI_MMCDRR, 432 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 433 .src_maxburst = 434 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 435 }; 436 chan = host->dma_rx; 437 dmaengine_slave_config(host->dma_rx, &dma_rx_conf); 438 439 desc = dmaengine_prep_slave_sg(host->dma_rx, 440 data->sg, 441 host->sg_len, 442 DMA_DEV_TO_MEM, 443 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 444 if (!desc) { 445 dev_dbg(mmc_dev(host->mmc), 446 "failed to allocate DMA RX descriptor"); 447 ret = -1; 448 goto out; 449 } 450 } 451 452 dmaengine_submit(desc); 453 dma_async_issue_pending(chan); 454 455 out: 456 return ret; 457 } 458 459 static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, 460 struct mmc_data *data) 461 { 462 int i; 463 int mask = rw_threshold - 1; 464 int ret = 0; 465 466 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 467 mmc_get_dma_dir(data)); 468 469 /* no individual DMA segment should need a partial FIFO */ 470 for (i = 0; i < host->sg_len; i++) { 471 if (sg_dma_len(data->sg + i) & mask) { 472 dma_unmap_sg(mmc_dev(host->mmc), 473 data->sg, data->sg_len, 474 mmc_get_dma_dir(data)); 475 return -1; 476 } 477 } 478 479 host->do_dma = 1; 480 ret = mmc_davinci_send_dma_request(host, data); 481 482 return ret; 483 } 484 485 static void davinci_release_dma_channels(struct mmc_davinci_host *host) 486 { 487 if (!host->use_dma) 488 return; 489 490 dma_release_channel(host->dma_tx); 491 dma_release_channel(host->dma_rx); 492 } 493 494 static int davinci_acquire_dma_channels(struct mmc_davinci_host *host) 495 { 496 host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); 497 if (IS_ERR(host->dma_tx)) { 498 dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); 499 return PTR_ERR(host->dma_tx); 500 } 501 502 host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); 503 if (IS_ERR(host->dma_rx)) { 504 dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); 505 dma_release_channel(host->dma_tx); 506 return PTR_ERR(host->dma_rx); 507 } 508 509 return 0; 510 } 511 512 /*----------------------------------------------------------------------*/ 513 514 static void 515 mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) 516 { 517 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; 518 int timeout; 519 struct mmc_data *data = req->data; 520 521 if (host->version == MMC_CTLR_VERSION_2) 522 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; 523 524 host->data = data; 525 if (data == NULL) { 526 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 527 writel(0, host->base + DAVINCI_MMCBLEN); 528 writel(0, host->base + DAVINCI_MMCNBLK); 529 return; 530 } 531 532 dev_dbg(mmc_dev(host->mmc), "%s, %d blocks of %d bytes\n", 533 (data->flags & MMC_DATA_WRITE) ? "write" : "read", 534 data->blocks, data->blksz); 535 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", 536 data->timeout_clks, data->timeout_ns); 537 timeout = data->timeout_clks + 538 (data->timeout_ns / host->ns_in_one_cycle); 539 if (timeout > 0xffff) 540 timeout = 0xffff; 541 542 writel(timeout, host->base + DAVINCI_MMCTOD); 543 writel(data->blocks, host->base + DAVINCI_MMCNBLK); 544 writel(data->blksz, host->base + DAVINCI_MMCBLEN); 545 546 /* Configure the FIFO */ 547 if (data->flags & MMC_DATA_WRITE) { 548 host->data_dir = DAVINCI_MMC_DATADIR_WRITE; 549 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, 550 host->base + DAVINCI_MMCFIFOCTL); 551 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, 552 host->base + DAVINCI_MMCFIFOCTL); 553 } else { 554 host->data_dir = DAVINCI_MMC_DATADIR_READ; 555 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, 556 host->base + DAVINCI_MMCFIFOCTL); 557 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, 558 host->base + DAVINCI_MMCFIFOCTL); 559 } 560 561 host->buffer = NULL; 562 host->bytes_left = data->blocks * data->blksz; 563 564 /* For now we try to use DMA whenever we won't need partial FIFO 565 * reads or writes, either for the whole transfer (as tested here) 566 * or for any individual scatterlist segment (tested when we call 567 * start_dma_transfer). 568 * 569 * While we *could* change that, unusual block sizes are rarely 570 * used. The occasional fallback to PIO should't hurt. 571 */ 572 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 573 && mmc_davinci_start_dma_transfer(host, data) == 0) { 574 /* zero this to ensure we take no PIO paths */ 575 host->bytes_left = 0; 576 } else { 577 /* Revert to CPU Copy */ 578 host->sg_len = data->sg_len; 579 host->sg = host->data->sg; 580 mmc_davinci_sg_to_buf(host); 581 } 582 } 583 584 static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) 585 { 586 struct mmc_davinci_host *host = mmc_priv(mmc); 587 unsigned long timeout = jiffies + msecs_to_jiffies(900); 588 u32 mmcst1 = 0; 589 590 /* Card may still be sending BUSY after a previous operation, 591 * typically some kind of write. If so, we can't proceed yet. 592 */ 593 while (time_before(jiffies, timeout)) { 594 mmcst1 = readl(host->base + DAVINCI_MMCST1); 595 if (!(mmcst1 & MMCST1_BUSY)) 596 break; 597 cpu_relax(); 598 } 599 if (mmcst1 & MMCST1_BUSY) { 600 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); 601 req->cmd->error = -ETIMEDOUT; 602 mmc_request_done(mmc, req); 603 return; 604 } 605 606 host->do_dma = 0; 607 mmc_davinci_prepare_data(host, req); 608 mmc_davinci_start_command(host, req->cmd); 609 } 610 611 static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, 612 unsigned int mmc_req_freq) 613 { 614 unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; 615 616 mmc_pclk = host->mmc_input_clk; 617 if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) 618 mmc_push_pull_divisor = ((unsigned int)mmc_pclk 619 / (2 * mmc_req_freq)) - 1; 620 else 621 mmc_push_pull_divisor = 0; 622 623 mmc_freq = (unsigned int)mmc_pclk 624 / (2 * (mmc_push_pull_divisor + 1)); 625 626 if (mmc_freq > mmc_req_freq) 627 mmc_push_pull_divisor = mmc_push_pull_divisor + 1; 628 /* Convert ns to clock cycles */ 629 if (mmc_req_freq <= 400000) 630 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 631 / (2 * (mmc_push_pull_divisor + 1)))/1000)); 632 else 633 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 634 / (2 * (mmc_push_pull_divisor + 1)))/1000000)); 635 636 return mmc_push_pull_divisor; 637 } 638 639 static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) 640 { 641 unsigned int open_drain_freq = 0, mmc_pclk = 0; 642 unsigned int mmc_push_pull_freq = 0; 643 struct mmc_davinci_host *host = mmc_priv(mmc); 644 645 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 646 u32 temp; 647 648 /* Ignoring the init clock value passed for fixing the inter 649 * operability with different cards. 650 */ 651 open_drain_freq = ((unsigned int)mmc_pclk 652 / (2 * MMCSD_INIT_CLOCK)) - 1; 653 654 if (open_drain_freq > 0xFF) 655 open_drain_freq = 0xFF; 656 657 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 658 temp |= open_drain_freq; 659 writel(temp, host->base + DAVINCI_MMCCLK); 660 661 /* Convert ns to clock cycles */ 662 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); 663 } else { 664 u32 temp; 665 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); 666 667 if (mmc_push_pull_freq > 0xFF) 668 mmc_push_pull_freq = 0xFF; 669 670 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; 671 writel(temp, host->base + DAVINCI_MMCCLK); 672 673 udelay(10); 674 675 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 676 temp |= mmc_push_pull_freq; 677 writel(temp, host->base + DAVINCI_MMCCLK); 678 679 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 680 681 udelay(10); 682 } 683 } 684 685 static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 686 { 687 struct mmc_davinci_host *host = mmc_priv(mmc); 688 struct platform_device *pdev = to_platform_device(mmc->parent); 689 struct davinci_mmc_config *config = pdev->dev.platform_data; 690 691 dev_dbg(mmc_dev(host->mmc), 692 "clock %dHz busmode %d powermode %d Vdd %04x\n", 693 ios->clock, ios->bus_mode, ios->power_mode, 694 ios->vdd); 695 696 switch (ios->power_mode) { 697 case MMC_POWER_OFF: 698 if (config && config->set_power) 699 config->set_power(pdev->id, false); 700 break; 701 case MMC_POWER_UP: 702 if (config && config->set_power) 703 config->set_power(pdev->id, true); 704 break; 705 } 706 707 switch (ios->bus_width) { 708 case MMC_BUS_WIDTH_8: 709 dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); 710 writel((readl(host->base + DAVINCI_MMCCTL) & 711 ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT, 712 host->base + DAVINCI_MMCCTL); 713 break; 714 case MMC_BUS_WIDTH_4: 715 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); 716 if (host->version == MMC_CTLR_VERSION_2) 717 writel((readl(host->base + DAVINCI_MMCCTL) & 718 ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT, 719 host->base + DAVINCI_MMCCTL); 720 else 721 writel(readl(host->base + DAVINCI_MMCCTL) | 722 MMCCTL_WIDTH_4_BIT, 723 host->base + DAVINCI_MMCCTL); 724 break; 725 case MMC_BUS_WIDTH_1: 726 dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n"); 727 if (host->version == MMC_CTLR_VERSION_2) 728 writel(readl(host->base + DAVINCI_MMCCTL) & 729 ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT), 730 host->base + DAVINCI_MMCCTL); 731 else 732 writel(readl(host->base + DAVINCI_MMCCTL) & 733 ~MMCCTL_WIDTH_4_BIT, 734 host->base + DAVINCI_MMCCTL); 735 break; 736 } 737 738 calculate_clk_divider(mmc, ios); 739 740 host->bus_mode = ios->bus_mode; 741 if (ios->power_mode == MMC_POWER_UP) { 742 unsigned long timeout = jiffies + msecs_to_jiffies(50); 743 bool lose = true; 744 745 /* Send clock cycles, poll completion */ 746 writel(0, host->base + DAVINCI_MMCARGHL); 747 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); 748 while (time_before(jiffies, timeout)) { 749 u32 tmp = readl(host->base + DAVINCI_MMCST0); 750 751 if (tmp & MMCST0_RSPDNE) { 752 lose = false; 753 break; 754 } 755 cpu_relax(); 756 } 757 if (lose) 758 dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); 759 } 760 761 /* FIXME on power OFF, reset things ... */ 762 } 763 764 static void 765 mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) 766 { 767 host->data = NULL; 768 769 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { 770 /* 771 * SDIO Interrupt Detection work-around as suggested by 772 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata 773 * 2.1.6): Signal SDIO interrupt only if it is enabled by core 774 */ 775 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & 776 SDIOST0_DAT1_HI)) { 777 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 778 mmc_signal_sdio_irq(host->mmc); 779 } 780 } 781 782 if (host->do_dma) { 783 davinci_abort_dma(host); 784 785 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 786 mmc_get_dma_dir(data)); 787 host->do_dma = false; 788 } 789 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 790 791 if (!data->stop || (host->cmd && host->cmd->error)) { 792 mmc_request_done(host->mmc, data->mrq); 793 writel(0, host->base + DAVINCI_MMCIM); 794 host->active_request = false; 795 } else 796 mmc_davinci_start_command(host, data->stop); 797 } 798 799 static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, 800 struct mmc_command *cmd) 801 { 802 host->cmd = NULL; 803 804 if (cmd->flags & MMC_RSP_PRESENT) { 805 if (cmd->flags & MMC_RSP_136) { 806 /* response type 2 */ 807 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); 808 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); 809 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); 810 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 811 } else { 812 /* response types 1, 1b, 3, 4, 5, 6 */ 813 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 814 } 815 } 816 817 if (host->data == NULL || cmd->error) { 818 if (cmd->error == -ETIMEDOUT) 819 cmd->mrq->cmd->retries = 0; 820 mmc_request_done(host->mmc, cmd->mrq); 821 writel(0, host->base + DAVINCI_MMCIM); 822 host->active_request = false; 823 } 824 } 825 826 static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, 827 int val) 828 { 829 u32 temp; 830 831 temp = readl(host->base + DAVINCI_MMCCTL); 832 if (val) /* reset */ 833 temp |= MMCCTL_CMDRST | MMCCTL_DATRST; 834 else /* enable */ 835 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); 836 837 writel(temp, host->base + DAVINCI_MMCCTL); 838 udelay(10); 839 } 840 841 static void 842 davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 843 { 844 mmc_davinci_reset_ctrl(host, 1); 845 mmc_davinci_reset_ctrl(host, 0); 846 } 847 848 static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) 849 { 850 struct mmc_davinci_host *host = dev_id; 851 unsigned int status; 852 853 status = readl(host->base + DAVINCI_SDIOIST); 854 if (status & SDIOIST_IOINT) { 855 dev_dbg(mmc_dev(host->mmc), 856 "SDIO interrupt status %x\n", status); 857 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 858 mmc_signal_sdio_irq(host->mmc); 859 } 860 return IRQ_HANDLED; 861 } 862 863 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 864 { 865 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 866 unsigned int status, qstatus; 867 int end_command = 0; 868 int end_transfer = 0; 869 struct mmc_data *data = host->data; 870 871 if (host->cmd == NULL && host->data == NULL) { 872 status = readl(host->base + DAVINCI_MMCST0); 873 dev_dbg(mmc_dev(host->mmc), 874 "Spurious interrupt 0x%04x\n", status); 875 /* Disable the interrupt from mmcsd */ 876 writel(0, host->base + DAVINCI_MMCIM); 877 return IRQ_NONE; 878 } 879 880 status = readl(host->base + DAVINCI_MMCST0); 881 qstatus = status; 882 883 /* handle FIFO first when using PIO for data. 884 * bytes_left will decrease to zero as I/O progress and status will 885 * read zero over iteration because this controller status 886 * register(MMCST0) reports any status only once and it is cleared 887 * by read. So, it is not unbouned loop even in the case of 888 * non-dma. 889 */ 890 if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { 891 unsigned long im_val; 892 893 /* 894 * If interrupts fire during the following loop, they will be 895 * handled by the handler, but the PIC will still buffer these. 896 * As a result, the handler will be called again to serve these 897 * needlessly. In order to avoid these spurious interrupts, 898 * keep interrupts masked during the loop. 899 */ 900 im_val = readl(host->base + DAVINCI_MMCIM); 901 writel(0, host->base + DAVINCI_MMCIM); 902 903 do { 904 davinci_fifo_data_trans(host, rw_threshold); 905 status = readl(host->base + DAVINCI_MMCST0); 906 qstatus |= status; 907 } while (host->bytes_left && 908 (status & (MMCST0_DXRDY | MMCST0_DRRDY))); 909 910 /* 911 * If an interrupt is pending, it is assumed it will fire when 912 * it is unmasked. This assumption is also taken when the MMCIM 913 * is first set. Otherwise, writing to MMCIM after reading the 914 * status is race-prone. 915 */ 916 writel(im_val, host->base + DAVINCI_MMCIM); 917 } 918 919 if (qstatus & MMCST0_DATDNE) { 920 /* All blocks sent/received, and CRC checks passed */ 921 if (data != NULL) { 922 if ((host->do_dma == 0) && (host->bytes_left > 0)) { 923 /* if datasize < rw_threshold 924 * no RX ints are generated 925 */ 926 davinci_fifo_data_trans(host, host->bytes_left); 927 } 928 end_transfer = 1; 929 data->bytes_xfered = data->blocks * data->blksz; 930 } else { 931 dev_err(mmc_dev(host->mmc), 932 "DATDNE with no host->data\n"); 933 } 934 } 935 936 if (qstatus & MMCST0_TOUTRD) { 937 /* Read data timeout */ 938 data->error = -ETIMEDOUT; 939 end_transfer = 1; 940 941 dev_dbg(mmc_dev(host->mmc), 942 "read data timeout, status %x\n", 943 qstatus); 944 945 davinci_abort_data(host, data); 946 } 947 948 if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { 949 /* Data CRC error */ 950 data->error = -EILSEQ; 951 end_transfer = 1; 952 953 /* NOTE: this controller uses CRCWR to report both CRC 954 * errors and timeouts (on writes). MMCDRSP values are 955 * only weakly documented, but 0x9f was clearly a timeout 956 * case and the two three-bit patterns in various SD specs 957 * (101, 010) aren't part of it ... 958 */ 959 if (qstatus & MMCST0_CRCWR) { 960 u32 temp = readb(host->base + DAVINCI_MMCDRSP); 961 962 if (temp == 0x9f) 963 data->error = -ETIMEDOUT; 964 } 965 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", 966 (qstatus & MMCST0_CRCWR) ? "write" : "read", 967 (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); 968 969 davinci_abort_data(host, data); 970 } 971 972 if (qstatus & MMCST0_TOUTRS) { 973 /* Command timeout */ 974 if (host->cmd) { 975 dev_dbg(mmc_dev(host->mmc), 976 "CMD%d timeout, status %x\n", 977 host->cmd->opcode, qstatus); 978 host->cmd->error = -ETIMEDOUT; 979 if (data) { 980 end_transfer = 1; 981 davinci_abort_data(host, data); 982 } else 983 end_command = 1; 984 } 985 } 986 987 if (qstatus & MMCST0_CRCRS) { 988 /* Command CRC error */ 989 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); 990 if (host->cmd) { 991 host->cmd->error = -EILSEQ; 992 end_command = 1; 993 } 994 } 995 996 if (qstatus & MMCST0_RSPDNE) { 997 /* End of command phase */ 998 end_command = host->cmd ? 1 : 0; 999 } 1000 1001 if (end_command) 1002 mmc_davinci_cmd_done(host, host->cmd); 1003 if (end_transfer) 1004 mmc_davinci_xfer_done(host, data); 1005 return IRQ_HANDLED; 1006 } 1007 1008 static int mmc_davinci_get_cd(struct mmc_host *mmc) 1009 { 1010 struct platform_device *pdev = to_platform_device(mmc->parent); 1011 struct davinci_mmc_config *config = pdev->dev.platform_data; 1012 1013 if (config && config->get_cd) 1014 return config->get_cd(pdev->id); 1015 1016 return mmc_gpio_get_cd(mmc); 1017 } 1018 1019 static int mmc_davinci_get_ro(struct mmc_host *mmc) 1020 { 1021 struct platform_device *pdev = to_platform_device(mmc->parent); 1022 struct davinci_mmc_config *config = pdev->dev.platform_data; 1023 1024 if (config && config->get_ro) 1025 return config->get_ro(pdev->id); 1026 1027 return mmc_gpio_get_ro(mmc); 1028 } 1029 1030 static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1031 { 1032 struct mmc_davinci_host *host = mmc_priv(mmc); 1033 1034 if (enable) { 1035 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { 1036 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 1037 mmc_signal_sdio_irq(host->mmc); 1038 } else { 1039 host->sdio_int = true; 1040 writel(readl(host->base + DAVINCI_SDIOIEN) | 1041 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); 1042 } 1043 } else { 1044 host->sdio_int = false; 1045 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, 1046 host->base + DAVINCI_SDIOIEN); 1047 } 1048 } 1049 1050 static const struct mmc_host_ops mmc_davinci_ops = { 1051 .request = mmc_davinci_request, 1052 .set_ios = mmc_davinci_set_ios, 1053 .get_cd = mmc_davinci_get_cd, 1054 .get_ro = mmc_davinci_get_ro, 1055 .enable_sdio_irq = mmc_davinci_enable_sdio_irq, 1056 }; 1057 1058 /*----------------------------------------------------------------------*/ 1059 1060 #ifdef CONFIG_CPU_FREQ 1061 static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, 1062 unsigned long val, void *data) 1063 { 1064 struct mmc_davinci_host *host; 1065 unsigned int mmc_pclk; 1066 struct mmc_host *mmc; 1067 unsigned long flags; 1068 1069 host = container_of(nb, struct mmc_davinci_host, freq_transition); 1070 mmc = host->mmc; 1071 mmc_pclk = clk_get_rate(host->clk); 1072 1073 if (val == CPUFREQ_POSTCHANGE) { 1074 spin_lock_irqsave(&mmc->lock, flags); 1075 host->mmc_input_clk = mmc_pclk; 1076 calculate_clk_divider(mmc, &mmc->ios); 1077 spin_unlock_irqrestore(&mmc->lock, flags); 1078 } 1079 1080 return 0; 1081 } 1082 1083 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1084 { 1085 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; 1086 1087 return cpufreq_register_notifier(&host->freq_transition, 1088 CPUFREQ_TRANSITION_NOTIFIER); 1089 } 1090 1091 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1092 { 1093 cpufreq_unregister_notifier(&host->freq_transition, 1094 CPUFREQ_TRANSITION_NOTIFIER); 1095 } 1096 #else 1097 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1098 { 1099 return 0; 1100 } 1101 1102 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1103 { 1104 } 1105 #endif 1106 static void init_mmcsd_host(struct mmc_davinci_host *host) 1107 { 1108 1109 mmc_davinci_reset_ctrl(host, 1); 1110 1111 writel(0, host->base + DAVINCI_MMCCLK); 1112 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1113 1114 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1115 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1116 1117 mmc_davinci_reset_ctrl(host, 0); 1118 } 1119 1120 static const struct platform_device_id davinci_mmc_devtype[] = { 1121 { 1122 .name = "dm6441-mmc", 1123 .driver_data = MMC_CTLR_VERSION_1, 1124 }, { 1125 .name = "da830-mmc", 1126 .driver_data = MMC_CTLR_VERSION_2, 1127 }, 1128 {}, 1129 }; 1130 MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype); 1131 1132 static const struct of_device_id davinci_mmc_dt_ids[] = { 1133 { 1134 .compatible = "ti,dm6441-mmc", 1135 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1], 1136 }, 1137 { 1138 .compatible = "ti,da830-mmc", 1139 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2], 1140 }, 1141 {}, 1142 }; 1143 MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids); 1144 1145 static int mmc_davinci_parse_pdata(struct mmc_host *mmc) 1146 { 1147 struct platform_device *pdev = to_platform_device(mmc->parent); 1148 struct davinci_mmc_config *pdata = pdev->dev.platform_data; 1149 struct mmc_davinci_host *host; 1150 int ret; 1151 1152 if (!pdata) 1153 return -EINVAL; 1154 1155 host = mmc_priv(mmc); 1156 if (!host) 1157 return -EINVAL; 1158 1159 if (pdata && pdata->nr_sg) 1160 host->nr_sg = pdata->nr_sg - 1; 1161 1162 if (pdata && (pdata->wires == 4 || pdata->wires == 0)) 1163 mmc->caps |= MMC_CAP_4_BIT_DATA; 1164 1165 if (pdata && (pdata->wires == 8)) 1166 mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); 1167 1168 mmc->f_min = 312500; 1169 mmc->f_max = 25000000; 1170 if (pdata && pdata->max_freq) 1171 mmc->f_max = pdata->max_freq; 1172 if (pdata && pdata->caps) 1173 mmc->caps |= pdata->caps; 1174 1175 /* Register a cd gpio, if there is not one, enable polling */ 1176 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); 1177 if (ret == -EPROBE_DEFER) 1178 return ret; 1179 else if (ret) 1180 mmc->caps |= MMC_CAP_NEEDS_POLL; 1181 1182 ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); 1183 if (ret == -EPROBE_DEFER) 1184 return ret; 1185 1186 return 0; 1187 } 1188 1189 static int davinci_mmcsd_probe(struct platform_device *pdev) 1190 { 1191 struct mmc_davinci_host *host = NULL; 1192 struct mmc_host *mmc = NULL; 1193 struct resource *r, *mem = NULL; 1194 int ret, irq; 1195 size_t mem_size; 1196 const struct platform_device_id *id_entry; 1197 1198 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1199 if (!r) 1200 return -ENODEV; 1201 irq = platform_get_irq(pdev, 0); 1202 if (irq < 0) 1203 return irq; 1204 1205 mem_size = resource_size(r); 1206 mem = devm_request_mem_region(&pdev->dev, r->start, mem_size, 1207 pdev->name); 1208 if (!mem) 1209 return -EBUSY; 1210 1211 mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); 1212 if (!mmc) 1213 return -ENOMEM; 1214 1215 host = mmc_priv(mmc); 1216 host->mmc = mmc; /* Important */ 1217 1218 host->mem_res = mem; 1219 host->base = devm_ioremap(&pdev->dev, mem->start, mem_size); 1220 if (!host->base) { 1221 ret = -ENOMEM; 1222 goto ioremap_fail; 1223 } 1224 1225 host->clk = devm_clk_get(&pdev->dev, NULL); 1226 if (IS_ERR(host->clk)) { 1227 ret = PTR_ERR(host->clk); 1228 goto clk_get_fail; 1229 } 1230 ret = clk_prepare_enable(host->clk); 1231 if (ret) 1232 goto clk_prepare_enable_fail; 1233 1234 host->mmc_input_clk = clk_get_rate(host->clk); 1235 1236 pdev->id_entry = of_device_get_match_data(&pdev->dev); 1237 if (pdev->id_entry) { 1238 ret = mmc_of_parse(mmc); 1239 if (ret) { 1240 dev_err_probe(&pdev->dev, ret, 1241 "could not parse of data\n"); 1242 goto parse_fail; 1243 } 1244 } else { 1245 ret = mmc_davinci_parse_pdata(mmc); 1246 if (ret) { 1247 dev_err(&pdev->dev, 1248 "could not parse platform data: %d\n", ret); 1249 goto parse_fail; 1250 } } 1251 1252 if (host->nr_sg > MAX_NR_SG || !host->nr_sg) 1253 host->nr_sg = MAX_NR_SG; 1254 1255 init_mmcsd_host(host); 1256 1257 host->use_dma = use_dma; 1258 host->mmc_irq = irq; 1259 host->sdio_irq = platform_get_irq_optional(pdev, 1); 1260 1261 if (host->use_dma) { 1262 ret = davinci_acquire_dma_channels(host); 1263 if (ret == -EPROBE_DEFER) 1264 goto dma_probe_defer; 1265 else if (ret) 1266 host->use_dma = 0; 1267 } 1268 1269 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 1270 1271 id_entry = platform_get_device_id(pdev); 1272 if (id_entry) 1273 host->version = id_entry->driver_data; 1274 1275 mmc->ops = &mmc_davinci_ops; 1276 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1277 1278 /* With no iommu coalescing pages, each phys_seg is a hw_seg. 1279 * Each hw_seg uses one EDMA parameter RAM slot, always one 1280 * channel and then usually some linked slots. 1281 */ 1282 mmc->max_segs = MAX_NR_SG; 1283 1284 /* EDMA limit per hw segment (one or two MBytes) */ 1285 mmc->max_seg_size = MAX_CCNT * rw_threshold; 1286 1287 /* MMC/SD controller limits for multiblock requests */ 1288 mmc->max_blk_size = 4095; /* BLEN is 12 bits */ 1289 mmc->max_blk_count = 65535; /* NBLK is 16 bits */ 1290 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1291 1292 dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs); 1293 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); 1294 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); 1295 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); 1296 1297 platform_set_drvdata(pdev, host); 1298 1299 ret = mmc_davinci_cpufreq_register(host); 1300 if (ret) { 1301 dev_err(&pdev->dev, "failed to register cpufreq\n"); 1302 goto cpu_freq_fail; 1303 } 1304 1305 ret = mmc_add_host(mmc); 1306 if (ret < 0) 1307 goto mmc_add_host_fail; 1308 1309 ret = devm_request_irq(&pdev->dev, irq, mmc_davinci_irq, 0, 1310 mmc_hostname(mmc), host); 1311 if (ret) 1312 goto request_irq_fail; 1313 1314 if (host->sdio_irq >= 0) { 1315 ret = devm_request_irq(&pdev->dev, host->sdio_irq, 1316 mmc_davinci_sdio_irq, 0, 1317 mmc_hostname(mmc), host); 1318 if (!ret) 1319 mmc->caps |= MMC_CAP_SDIO_IRQ; 1320 } 1321 1322 rename_region(mem, mmc_hostname(mmc)); 1323 1324 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1325 host->use_dma ? "DMA" : "PIO", 1326 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); 1327 1328 return 0; 1329 1330 request_irq_fail: 1331 mmc_remove_host(mmc); 1332 mmc_add_host_fail: 1333 mmc_davinci_cpufreq_deregister(host); 1334 cpu_freq_fail: 1335 davinci_release_dma_channels(host); 1336 parse_fail: 1337 dma_probe_defer: 1338 clk_disable_unprepare(host->clk); 1339 clk_prepare_enable_fail: 1340 clk_get_fail: 1341 ioremap_fail: 1342 mmc_free_host(mmc); 1343 1344 return ret; 1345 } 1346 1347 static void davinci_mmcsd_remove(struct platform_device *pdev) 1348 { 1349 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1350 1351 mmc_remove_host(host->mmc); 1352 mmc_davinci_cpufreq_deregister(host); 1353 davinci_release_dma_channels(host); 1354 clk_disable_unprepare(host->clk); 1355 mmc_free_host(host->mmc); 1356 } 1357 1358 #ifdef CONFIG_PM 1359 static int davinci_mmcsd_suspend(struct device *dev) 1360 { 1361 struct mmc_davinci_host *host = dev_get_drvdata(dev); 1362 1363 writel(0, host->base + DAVINCI_MMCIM); 1364 mmc_davinci_reset_ctrl(host, 1); 1365 clk_disable(host->clk); 1366 1367 return 0; 1368 } 1369 1370 static int davinci_mmcsd_resume(struct device *dev) 1371 { 1372 struct mmc_davinci_host *host = dev_get_drvdata(dev); 1373 int ret; 1374 1375 ret = clk_enable(host->clk); 1376 if (ret) 1377 return ret; 1378 1379 mmc_davinci_reset_ctrl(host, 0); 1380 1381 return 0; 1382 } 1383 1384 static const struct dev_pm_ops davinci_mmcsd_pm = { 1385 .suspend = davinci_mmcsd_suspend, 1386 .resume = davinci_mmcsd_resume, 1387 }; 1388 1389 #define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) 1390 #else 1391 #define davinci_mmcsd_pm_ops NULL 1392 #endif 1393 1394 static struct platform_driver davinci_mmcsd_driver = { 1395 .driver = { 1396 .name = "davinci_mmc", 1397 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1398 .pm = davinci_mmcsd_pm_ops, 1399 .of_match_table = davinci_mmc_dt_ids, 1400 }, 1401 .probe = davinci_mmcsd_probe, 1402 .remove_new = davinci_mmcsd_remove, 1403 .id_table = davinci_mmc_devtype, 1404 }; 1405 1406 module_platform_driver(davinci_mmcsd_driver); 1407 1408 MODULE_AUTHOR("Texas Instruments India"); 1409 MODULE_LICENSE("GPL"); 1410 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); 1411 MODULE_ALIAS("platform:davinci_mmc"); 1412 1413