1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver 4 * 5 * Copyright (C) 2006 Texas Instruments. 6 * Original author: Purushotam Kumar 7 * Copyright (C) 2009 David Brownell 8 */ 9 10 #include <linux/module.h> 11 #include <linux/ioport.h> 12 #include <linux/platform_device.h> 13 #include <linux/clk.h> 14 #include <linux/err.h> 15 #include <linux/cpufreq.h> 16 #include <linux/mmc/host.h> 17 #include <linux/io.h> 18 #include <linux/irq.h> 19 #include <linux/delay.h> 20 #include <linux/dmaengine.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/mmc/mmc.h> 23 #include <linux/of.h> 24 #include <linux/of_device.h> 25 #include <linux/mmc/slot-gpio.h> 26 #include <linux/interrupt.h> 27 28 #include <linux/platform_data/mmc-davinci.h> 29 30 /* 31 * Register Definitions 32 */ 33 #define DAVINCI_MMCCTL 0x00 /* Control Register */ 34 #define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ 35 #define DAVINCI_MMCST0 0x08 /* Status Register 0 */ 36 #define DAVINCI_MMCST1 0x0C /* Status Register 1 */ 37 #define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ 38 #define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ 39 #define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ 40 #define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ 41 #define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ 42 #define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ 43 #define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ 44 #define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ 45 #define DAVINCI_MMCCMD 0x30 /* Command Register */ 46 #define DAVINCI_MMCARGHL 0x34 /* Argument Register */ 47 #define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ 48 #define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ 49 #define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ 50 #define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ 51 #define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ 52 #define DAVINCI_MMCETOK 0x4C 53 #define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ 54 #define DAVINCI_MMCCKC 0x54 55 #define DAVINCI_MMCTORC 0x58 56 #define DAVINCI_MMCTODC 0x5C 57 #define DAVINCI_MMCBLNC 0x60 58 #define DAVINCI_SDIOCTL 0x64 59 #define DAVINCI_SDIOST0 0x68 60 #define DAVINCI_SDIOIEN 0x6C 61 #define DAVINCI_SDIOIST 0x70 62 #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 63 64 /* DAVINCI_MMCCTL definitions */ 65 #define MMCCTL_DATRST (1 << 0) 66 #define MMCCTL_CMDRST (1 << 1) 67 #define MMCCTL_WIDTH_8_BIT (1 << 8) 68 #define MMCCTL_WIDTH_4_BIT (1 << 2) 69 #define MMCCTL_DATEG_DISABLED (0 << 6) 70 #define MMCCTL_DATEG_RISING (1 << 6) 71 #define MMCCTL_DATEG_FALLING (2 << 6) 72 #define MMCCTL_DATEG_BOTH (3 << 6) 73 #define MMCCTL_PERMDR_LE (0 << 9) 74 #define MMCCTL_PERMDR_BE (1 << 9) 75 #define MMCCTL_PERMDX_LE (0 << 10) 76 #define MMCCTL_PERMDX_BE (1 << 10) 77 78 /* DAVINCI_MMCCLK definitions */ 79 #define MMCCLK_CLKEN (1 << 8) 80 #define MMCCLK_CLKRT_MASK (0xFF << 0) 81 82 /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ 83 #define MMCST0_DATDNE BIT(0) /* data done */ 84 #define MMCST0_BSYDNE BIT(1) /* busy done */ 85 #define MMCST0_RSPDNE BIT(2) /* command done */ 86 #define MMCST0_TOUTRD BIT(3) /* data read timeout */ 87 #define MMCST0_TOUTRS BIT(4) /* command response timeout */ 88 #define MMCST0_CRCWR BIT(5) /* data write CRC error */ 89 #define MMCST0_CRCRD BIT(6) /* data read CRC error */ 90 #define MMCST0_CRCRS BIT(7) /* command response CRC error */ 91 #define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ 92 #define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ 93 #define MMCST0_DATED BIT(11) /* DAT3 edge detect */ 94 #define MMCST0_TRNDNE BIT(12) /* transfer done */ 95 96 /* DAVINCI_MMCST1 definitions */ 97 #define MMCST1_BUSY (1 << 0) 98 99 /* DAVINCI_MMCCMD definitions */ 100 #define MMCCMD_CMD_MASK (0x3F << 0) 101 #define MMCCMD_PPLEN (1 << 7) 102 #define MMCCMD_BSYEXP (1 << 8) 103 #define MMCCMD_RSPFMT_MASK (3 << 9) 104 #define MMCCMD_RSPFMT_NONE (0 << 9) 105 #define MMCCMD_RSPFMT_R1456 (1 << 9) 106 #define MMCCMD_RSPFMT_R2 (2 << 9) 107 #define MMCCMD_RSPFMT_R3 (3 << 9) 108 #define MMCCMD_DTRW (1 << 11) 109 #define MMCCMD_STRMTP (1 << 12) 110 #define MMCCMD_WDATX (1 << 13) 111 #define MMCCMD_INITCK (1 << 14) 112 #define MMCCMD_DCLR (1 << 15) 113 #define MMCCMD_DMATRIG (1 << 16) 114 115 /* DAVINCI_MMCFIFOCTL definitions */ 116 #define MMCFIFOCTL_FIFORST (1 << 0) 117 #define MMCFIFOCTL_FIFODIR_WR (1 << 1) 118 #define MMCFIFOCTL_FIFODIR_RD (0 << 1) 119 #define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ 120 #define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ 121 #define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ 122 #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 123 #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 124 125 /* DAVINCI_SDIOST0 definitions */ 126 #define SDIOST0_DAT1_HI BIT(0) 127 128 /* DAVINCI_SDIOIEN definitions */ 129 #define SDIOIEN_IOINTEN BIT(0) 130 131 /* DAVINCI_SDIOIST definitions */ 132 #define SDIOIST_IOINT BIT(0) 133 134 /* MMCSD Init clock in Hz in opendrain mode */ 135 #define MMCSD_INIT_CLOCK 200000 136 137 /* 138 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 139 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 140 * for drivers with max_segs == 1, making the segments bigger (64KB) 141 * than the page or two that's otherwise typical. nr_sg (passed from 142 * platform data) == 16 gives at least the same throughput boost, using 143 * EDMA transfer linkage instead of spending CPU time copying pages. 144 */ 145 #define MAX_CCNT ((1 << 16) - 1) 146 147 #define MAX_NR_SG 16 148 149 static unsigned rw_threshold = 32; 150 module_param(rw_threshold, uint, S_IRUGO); 151 MODULE_PARM_DESC(rw_threshold, 152 "Read/Write threshold. Default = 32"); 153 154 static unsigned poll_threshold = 128; 155 module_param(poll_threshold, uint, S_IRUGO); 156 MODULE_PARM_DESC(poll_threshold, 157 "Polling transaction size threshold. Default = 128"); 158 159 static unsigned poll_loopcount = 32; 160 module_param(poll_loopcount, uint, S_IRUGO); 161 MODULE_PARM_DESC(poll_loopcount, 162 "Maximum polling loop count. Default = 32"); 163 164 static unsigned use_dma = 1; 165 module_param(use_dma, uint, 0); 166 MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); 167 168 struct mmc_davinci_host { 169 struct mmc_command *cmd; 170 struct mmc_data *data; 171 struct mmc_host *mmc; 172 struct clk *clk; 173 unsigned int mmc_input_clk; 174 void __iomem *base; 175 struct resource *mem_res; 176 int mmc_irq, sdio_irq; 177 unsigned char bus_mode; 178 179 #define DAVINCI_MMC_DATADIR_NONE 0 180 #define DAVINCI_MMC_DATADIR_READ 1 181 #define DAVINCI_MMC_DATADIR_WRITE 2 182 unsigned char data_dir; 183 184 /* buffer is used during PIO of one scatterlist segment, and 185 * is updated along with buffer_bytes_left. bytes_left applies 186 * to all N blocks of the PIO transfer. 187 */ 188 u8 *buffer; 189 u32 buffer_bytes_left; 190 u32 bytes_left; 191 192 struct dma_chan *dma_tx; 193 struct dma_chan *dma_rx; 194 bool use_dma; 195 bool do_dma; 196 bool sdio_int; 197 bool active_request; 198 199 /* For PIO we walk scatterlists one segment at a time. */ 200 unsigned int sg_len; 201 struct scatterlist *sg; 202 203 /* Version of the MMC/SD controller */ 204 u8 version; 205 /* for ns in one cycle calculation */ 206 unsigned ns_in_one_cycle; 207 /* Number of sg segments */ 208 u8 nr_sg; 209 #ifdef CONFIG_CPU_FREQ 210 struct notifier_block freq_transition; 211 #endif 212 }; 213 214 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); 215 216 /* PIO only */ 217 static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) 218 { 219 host->buffer_bytes_left = sg_dma_len(host->sg); 220 host->buffer = sg_virt(host->sg); 221 if (host->buffer_bytes_left > host->bytes_left) 222 host->buffer_bytes_left = host->bytes_left; 223 } 224 225 static void davinci_fifo_data_trans(struct mmc_davinci_host *host, 226 unsigned int n) 227 { 228 u8 *p; 229 unsigned int i; 230 231 if (host->buffer_bytes_left == 0) { 232 host->sg = sg_next(host->data->sg); 233 mmc_davinci_sg_to_buf(host); 234 } 235 236 p = host->buffer; 237 if (n > host->buffer_bytes_left) 238 n = host->buffer_bytes_left; 239 host->buffer_bytes_left -= n; 240 host->bytes_left -= n; 241 242 /* NOTE: we never transfer more than rw_threshold bytes 243 * to/from the fifo here; there's no I/O overlap. 244 * This also assumes that access width( i.e. ACCWD) is 4 bytes 245 */ 246 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 247 for (i = 0; i < (n >> 2); i++) { 248 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); 249 p = p + 4; 250 } 251 if (n & 3) { 252 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); 253 p = p + (n & 3); 254 } 255 } else { 256 for (i = 0; i < (n >> 2); i++) { 257 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); 258 p = p + 4; 259 } 260 if (n & 3) { 261 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); 262 p = p + (n & 3); 263 } 264 } 265 host->buffer = p; 266 } 267 268 static void mmc_davinci_start_command(struct mmc_davinci_host *host, 269 struct mmc_command *cmd) 270 { 271 u32 cmd_reg = 0; 272 u32 im_val; 273 274 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", 275 cmd->opcode, cmd->arg, 276 ({ char *s; 277 switch (mmc_resp_type(cmd)) { 278 case MMC_RSP_R1: 279 s = ", R1/R5/R6/R7 response"; 280 break; 281 case MMC_RSP_R1B: 282 s = ", R1b response"; 283 break; 284 case MMC_RSP_R2: 285 s = ", R2 response"; 286 break; 287 case MMC_RSP_R3: 288 s = ", R3/R4 response"; 289 break; 290 default: 291 s = ", (R? response)"; 292 break; 293 } s; })); 294 host->cmd = cmd; 295 296 switch (mmc_resp_type(cmd)) { 297 case MMC_RSP_R1B: 298 /* There's some spec confusion about when R1B is 299 * allowed, but if the card doesn't issue a BUSY 300 * then it's harmless for us to allow it. 301 */ 302 cmd_reg |= MMCCMD_BSYEXP; 303 fallthrough; 304 case MMC_RSP_R1: /* 48 bits, CRC */ 305 cmd_reg |= MMCCMD_RSPFMT_R1456; 306 break; 307 case MMC_RSP_R2: /* 136 bits, CRC */ 308 cmd_reg |= MMCCMD_RSPFMT_R2; 309 break; 310 case MMC_RSP_R3: /* 48 bits, no CRC */ 311 cmd_reg |= MMCCMD_RSPFMT_R3; 312 break; 313 default: 314 cmd_reg |= MMCCMD_RSPFMT_NONE; 315 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", 316 mmc_resp_type(cmd)); 317 break; 318 } 319 320 /* Set command index */ 321 cmd_reg |= cmd->opcode; 322 323 /* Enable EDMA transfer triggers */ 324 if (host->do_dma) 325 cmd_reg |= MMCCMD_DMATRIG; 326 327 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && 328 host->data_dir == DAVINCI_MMC_DATADIR_READ) 329 cmd_reg |= MMCCMD_DMATRIG; 330 331 /* Setting whether command involves data transfer or not */ 332 if (cmd->data) 333 cmd_reg |= MMCCMD_WDATX; 334 335 /* Setting whether data read or write */ 336 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) 337 cmd_reg |= MMCCMD_DTRW; 338 339 if (host->bus_mode == MMC_BUSMODE_PUSHPULL) 340 cmd_reg |= MMCCMD_PPLEN; 341 342 /* set Command timeout */ 343 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 344 345 /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ 346 im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; 347 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 348 im_val |= MMCST0_DATDNE | MMCST0_CRCWR; 349 350 if (!host->do_dma) 351 im_val |= MMCST0_DXRDY; 352 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { 353 im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; 354 355 if (!host->do_dma) 356 im_val |= MMCST0_DRRDY; 357 } 358 359 /* 360 * Before non-DMA WRITE commands the controller needs priming: 361 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size 362 */ 363 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) 364 davinci_fifo_data_trans(host, rw_threshold); 365 366 writel(cmd->arg, host->base + DAVINCI_MMCARGHL); 367 writel(cmd_reg, host->base + DAVINCI_MMCCMD); 368 369 host->active_request = true; 370 371 if (!host->do_dma && host->bytes_left <= poll_threshold) { 372 u32 count = poll_loopcount; 373 374 while (host->active_request && count--) { 375 mmc_davinci_irq(0, host); 376 cpu_relax(); 377 } 378 } 379 380 if (host->active_request) 381 writel(im_val, host->base + DAVINCI_MMCIM); 382 } 383 384 /*----------------------------------------------------------------------*/ 385 386 /* DMA infrastructure */ 387 388 static void davinci_abort_dma(struct mmc_davinci_host *host) 389 { 390 struct dma_chan *sync_dev; 391 392 if (host->data_dir == DAVINCI_MMC_DATADIR_READ) 393 sync_dev = host->dma_rx; 394 else 395 sync_dev = host->dma_tx; 396 397 dmaengine_terminate_all(sync_dev); 398 } 399 400 static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, 401 struct mmc_data *data) 402 { 403 struct dma_chan *chan; 404 struct dma_async_tx_descriptor *desc; 405 int ret = 0; 406 407 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 408 struct dma_slave_config dma_tx_conf = { 409 .direction = DMA_MEM_TO_DEV, 410 .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, 411 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 412 .dst_maxburst = 413 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 414 }; 415 chan = host->dma_tx; 416 dmaengine_slave_config(host->dma_tx, &dma_tx_conf); 417 418 desc = dmaengine_prep_slave_sg(host->dma_tx, 419 data->sg, 420 host->sg_len, 421 DMA_MEM_TO_DEV, 422 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 423 if (!desc) { 424 dev_dbg(mmc_dev(host->mmc), 425 "failed to allocate DMA TX descriptor"); 426 ret = -1; 427 goto out; 428 } 429 } else { 430 struct dma_slave_config dma_rx_conf = { 431 .direction = DMA_DEV_TO_MEM, 432 .src_addr = host->mem_res->start + DAVINCI_MMCDRR, 433 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 434 .src_maxburst = 435 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 436 }; 437 chan = host->dma_rx; 438 dmaengine_slave_config(host->dma_rx, &dma_rx_conf); 439 440 desc = dmaengine_prep_slave_sg(host->dma_rx, 441 data->sg, 442 host->sg_len, 443 DMA_DEV_TO_MEM, 444 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 445 if (!desc) { 446 dev_dbg(mmc_dev(host->mmc), 447 "failed to allocate DMA RX descriptor"); 448 ret = -1; 449 goto out; 450 } 451 } 452 453 dmaengine_submit(desc); 454 dma_async_issue_pending(chan); 455 456 out: 457 return ret; 458 } 459 460 static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, 461 struct mmc_data *data) 462 { 463 int i; 464 int mask = rw_threshold - 1; 465 int ret = 0; 466 467 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 468 mmc_get_dma_dir(data)); 469 470 /* no individual DMA segment should need a partial FIFO */ 471 for (i = 0; i < host->sg_len; i++) { 472 if (sg_dma_len(data->sg + i) & mask) { 473 dma_unmap_sg(mmc_dev(host->mmc), 474 data->sg, data->sg_len, 475 mmc_get_dma_dir(data)); 476 return -1; 477 } 478 } 479 480 host->do_dma = 1; 481 ret = mmc_davinci_send_dma_request(host, data); 482 483 return ret; 484 } 485 486 static void davinci_release_dma_channels(struct mmc_davinci_host *host) 487 { 488 if (!host->use_dma) 489 return; 490 491 dma_release_channel(host->dma_tx); 492 dma_release_channel(host->dma_rx); 493 } 494 495 static int davinci_acquire_dma_channels(struct mmc_davinci_host *host) 496 { 497 host->dma_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); 498 if (IS_ERR(host->dma_tx)) { 499 dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); 500 return PTR_ERR(host->dma_tx); 501 } 502 503 host->dma_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); 504 if (IS_ERR(host->dma_rx)) { 505 dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); 506 dma_release_channel(host->dma_tx); 507 return PTR_ERR(host->dma_rx); 508 } 509 510 return 0; 511 } 512 513 /*----------------------------------------------------------------------*/ 514 515 static void 516 mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) 517 { 518 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; 519 int timeout; 520 struct mmc_data *data = req->data; 521 522 if (host->version == MMC_CTLR_VERSION_2) 523 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; 524 525 host->data = data; 526 if (data == NULL) { 527 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 528 writel(0, host->base + DAVINCI_MMCBLEN); 529 writel(0, host->base + DAVINCI_MMCNBLK); 530 return; 531 } 532 533 dev_dbg(mmc_dev(host->mmc), "%s, %d blocks of %d bytes\n", 534 (data->flags & MMC_DATA_WRITE) ? "write" : "read", 535 data->blocks, data->blksz); 536 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", 537 data->timeout_clks, data->timeout_ns); 538 timeout = data->timeout_clks + 539 (data->timeout_ns / host->ns_in_one_cycle); 540 if (timeout > 0xffff) 541 timeout = 0xffff; 542 543 writel(timeout, host->base + DAVINCI_MMCTOD); 544 writel(data->blocks, host->base + DAVINCI_MMCNBLK); 545 writel(data->blksz, host->base + DAVINCI_MMCBLEN); 546 547 /* Configure the FIFO */ 548 if (data->flags & MMC_DATA_WRITE) { 549 host->data_dir = DAVINCI_MMC_DATADIR_WRITE; 550 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, 551 host->base + DAVINCI_MMCFIFOCTL); 552 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, 553 host->base + DAVINCI_MMCFIFOCTL); 554 } else { 555 host->data_dir = DAVINCI_MMC_DATADIR_READ; 556 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, 557 host->base + DAVINCI_MMCFIFOCTL); 558 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, 559 host->base + DAVINCI_MMCFIFOCTL); 560 } 561 562 host->buffer = NULL; 563 host->bytes_left = data->blocks * data->blksz; 564 565 /* For now we try to use DMA whenever we won't need partial FIFO 566 * reads or writes, either for the whole transfer (as tested here) 567 * or for any individual scatterlist segment (tested when we call 568 * start_dma_transfer). 569 * 570 * While we *could* change that, unusual block sizes are rarely 571 * used. The occasional fallback to PIO should't hurt. 572 */ 573 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 574 && mmc_davinci_start_dma_transfer(host, data) == 0) { 575 /* zero this to ensure we take no PIO paths */ 576 host->bytes_left = 0; 577 } else { 578 /* Revert to CPU Copy */ 579 host->sg_len = data->sg_len; 580 host->sg = host->data->sg; 581 mmc_davinci_sg_to_buf(host); 582 } 583 } 584 585 static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) 586 { 587 struct mmc_davinci_host *host = mmc_priv(mmc); 588 unsigned long timeout = jiffies + msecs_to_jiffies(900); 589 u32 mmcst1 = 0; 590 591 /* Card may still be sending BUSY after a previous operation, 592 * typically some kind of write. If so, we can't proceed yet. 593 */ 594 while (time_before(jiffies, timeout)) { 595 mmcst1 = readl(host->base + DAVINCI_MMCST1); 596 if (!(mmcst1 & MMCST1_BUSY)) 597 break; 598 cpu_relax(); 599 } 600 if (mmcst1 & MMCST1_BUSY) { 601 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); 602 req->cmd->error = -ETIMEDOUT; 603 mmc_request_done(mmc, req); 604 return; 605 } 606 607 host->do_dma = 0; 608 mmc_davinci_prepare_data(host, req); 609 mmc_davinci_start_command(host, req->cmd); 610 } 611 612 static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, 613 unsigned int mmc_req_freq) 614 { 615 unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; 616 617 mmc_pclk = host->mmc_input_clk; 618 if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) 619 mmc_push_pull_divisor = ((unsigned int)mmc_pclk 620 / (2 * mmc_req_freq)) - 1; 621 else 622 mmc_push_pull_divisor = 0; 623 624 mmc_freq = (unsigned int)mmc_pclk 625 / (2 * (mmc_push_pull_divisor + 1)); 626 627 if (mmc_freq > mmc_req_freq) 628 mmc_push_pull_divisor = mmc_push_pull_divisor + 1; 629 /* Convert ns to clock cycles */ 630 if (mmc_req_freq <= 400000) 631 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 632 / (2 * (mmc_push_pull_divisor + 1)))/1000)); 633 else 634 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 635 / (2 * (mmc_push_pull_divisor + 1)))/1000000)); 636 637 return mmc_push_pull_divisor; 638 } 639 640 static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) 641 { 642 unsigned int open_drain_freq = 0, mmc_pclk = 0; 643 unsigned int mmc_push_pull_freq = 0; 644 struct mmc_davinci_host *host = mmc_priv(mmc); 645 646 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 647 u32 temp; 648 649 /* Ignoring the init clock value passed for fixing the inter 650 * operability with different cards. 651 */ 652 open_drain_freq = ((unsigned int)mmc_pclk 653 / (2 * MMCSD_INIT_CLOCK)) - 1; 654 655 if (open_drain_freq > 0xFF) 656 open_drain_freq = 0xFF; 657 658 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 659 temp |= open_drain_freq; 660 writel(temp, host->base + DAVINCI_MMCCLK); 661 662 /* Convert ns to clock cycles */ 663 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); 664 } else { 665 u32 temp; 666 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); 667 668 if (mmc_push_pull_freq > 0xFF) 669 mmc_push_pull_freq = 0xFF; 670 671 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; 672 writel(temp, host->base + DAVINCI_MMCCLK); 673 674 udelay(10); 675 676 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 677 temp |= mmc_push_pull_freq; 678 writel(temp, host->base + DAVINCI_MMCCLK); 679 680 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 681 682 udelay(10); 683 } 684 } 685 686 static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 687 { 688 struct mmc_davinci_host *host = mmc_priv(mmc); 689 struct platform_device *pdev = to_platform_device(mmc->parent); 690 struct davinci_mmc_config *config = pdev->dev.platform_data; 691 692 dev_dbg(mmc_dev(host->mmc), 693 "clock %dHz busmode %d powermode %d Vdd %04x\n", 694 ios->clock, ios->bus_mode, ios->power_mode, 695 ios->vdd); 696 697 switch (ios->power_mode) { 698 case MMC_POWER_OFF: 699 if (config && config->set_power) 700 config->set_power(pdev->id, false); 701 break; 702 case MMC_POWER_UP: 703 if (config && config->set_power) 704 config->set_power(pdev->id, true); 705 break; 706 } 707 708 switch (ios->bus_width) { 709 case MMC_BUS_WIDTH_8: 710 dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); 711 writel((readl(host->base + DAVINCI_MMCCTL) & 712 ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT, 713 host->base + DAVINCI_MMCCTL); 714 break; 715 case MMC_BUS_WIDTH_4: 716 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); 717 if (host->version == MMC_CTLR_VERSION_2) 718 writel((readl(host->base + DAVINCI_MMCCTL) & 719 ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT, 720 host->base + DAVINCI_MMCCTL); 721 else 722 writel(readl(host->base + DAVINCI_MMCCTL) | 723 MMCCTL_WIDTH_4_BIT, 724 host->base + DAVINCI_MMCCTL); 725 break; 726 case MMC_BUS_WIDTH_1: 727 dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n"); 728 if (host->version == MMC_CTLR_VERSION_2) 729 writel(readl(host->base + DAVINCI_MMCCTL) & 730 ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT), 731 host->base + DAVINCI_MMCCTL); 732 else 733 writel(readl(host->base + DAVINCI_MMCCTL) & 734 ~MMCCTL_WIDTH_4_BIT, 735 host->base + DAVINCI_MMCCTL); 736 break; 737 } 738 739 calculate_clk_divider(mmc, ios); 740 741 host->bus_mode = ios->bus_mode; 742 if (ios->power_mode == MMC_POWER_UP) { 743 unsigned long timeout = jiffies + msecs_to_jiffies(50); 744 bool lose = true; 745 746 /* Send clock cycles, poll completion */ 747 writel(0, host->base + DAVINCI_MMCARGHL); 748 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); 749 while (time_before(jiffies, timeout)) { 750 u32 tmp = readl(host->base + DAVINCI_MMCST0); 751 752 if (tmp & MMCST0_RSPDNE) { 753 lose = false; 754 break; 755 } 756 cpu_relax(); 757 } 758 if (lose) 759 dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); 760 } 761 762 /* FIXME on power OFF, reset things ... */ 763 } 764 765 static void 766 mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) 767 { 768 host->data = NULL; 769 770 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { 771 /* 772 * SDIO Interrupt Detection work-around as suggested by 773 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata 774 * 2.1.6): Signal SDIO interrupt only if it is enabled by core 775 */ 776 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & 777 SDIOST0_DAT1_HI)) { 778 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 779 mmc_signal_sdio_irq(host->mmc); 780 } 781 } 782 783 if (host->do_dma) { 784 davinci_abort_dma(host); 785 786 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 787 mmc_get_dma_dir(data)); 788 host->do_dma = false; 789 } 790 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 791 792 if (!data->stop || (host->cmd && host->cmd->error)) { 793 mmc_request_done(host->mmc, data->mrq); 794 writel(0, host->base + DAVINCI_MMCIM); 795 host->active_request = false; 796 } else 797 mmc_davinci_start_command(host, data->stop); 798 } 799 800 static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, 801 struct mmc_command *cmd) 802 { 803 host->cmd = NULL; 804 805 if (cmd->flags & MMC_RSP_PRESENT) { 806 if (cmd->flags & MMC_RSP_136) { 807 /* response type 2 */ 808 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); 809 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); 810 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); 811 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 812 } else { 813 /* response types 1, 1b, 3, 4, 5, 6 */ 814 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 815 } 816 } 817 818 if (host->data == NULL || cmd->error) { 819 if (cmd->error == -ETIMEDOUT) 820 cmd->mrq->cmd->retries = 0; 821 mmc_request_done(host->mmc, cmd->mrq); 822 writel(0, host->base + DAVINCI_MMCIM); 823 host->active_request = false; 824 } 825 } 826 827 static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, 828 int val) 829 { 830 u32 temp; 831 832 temp = readl(host->base + DAVINCI_MMCCTL); 833 if (val) /* reset */ 834 temp |= MMCCTL_CMDRST | MMCCTL_DATRST; 835 else /* enable */ 836 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); 837 838 writel(temp, host->base + DAVINCI_MMCCTL); 839 udelay(10); 840 } 841 842 static void 843 davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 844 { 845 mmc_davinci_reset_ctrl(host, 1); 846 mmc_davinci_reset_ctrl(host, 0); 847 } 848 849 static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) 850 { 851 struct mmc_davinci_host *host = dev_id; 852 unsigned int status; 853 854 status = readl(host->base + DAVINCI_SDIOIST); 855 if (status & SDIOIST_IOINT) { 856 dev_dbg(mmc_dev(host->mmc), 857 "SDIO interrupt status %x\n", status); 858 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 859 mmc_signal_sdio_irq(host->mmc); 860 } 861 return IRQ_HANDLED; 862 } 863 864 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 865 { 866 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 867 unsigned int status, qstatus; 868 int end_command = 0; 869 int end_transfer = 0; 870 struct mmc_data *data = host->data; 871 872 if (host->cmd == NULL && host->data == NULL) { 873 status = readl(host->base + DAVINCI_MMCST0); 874 dev_dbg(mmc_dev(host->mmc), 875 "Spurious interrupt 0x%04x\n", status); 876 /* Disable the interrupt from mmcsd */ 877 writel(0, host->base + DAVINCI_MMCIM); 878 return IRQ_NONE; 879 } 880 881 status = readl(host->base + DAVINCI_MMCST0); 882 qstatus = status; 883 884 /* handle FIFO first when using PIO for data. 885 * bytes_left will decrease to zero as I/O progress and status will 886 * read zero over iteration because this controller status 887 * register(MMCST0) reports any status only once and it is cleared 888 * by read. So, it is not unbouned loop even in the case of 889 * non-dma. 890 */ 891 if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { 892 unsigned long im_val; 893 894 /* 895 * If interrupts fire during the following loop, they will be 896 * handled by the handler, but the PIC will still buffer these. 897 * As a result, the handler will be called again to serve these 898 * needlessly. In order to avoid these spurious interrupts, 899 * keep interrupts masked during the loop. 900 */ 901 im_val = readl(host->base + DAVINCI_MMCIM); 902 writel(0, host->base + DAVINCI_MMCIM); 903 904 do { 905 davinci_fifo_data_trans(host, rw_threshold); 906 status = readl(host->base + DAVINCI_MMCST0); 907 qstatus |= status; 908 } while (host->bytes_left && 909 (status & (MMCST0_DXRDY | MMCST0_DRRDY))); 910 911 /* 912 * If an interrupt is pending, it is assumed it will fire when 913 * it is unmasked. This assumption is also taken when the MMCIM 914 * is first set. Otherwise, writing to MMCIM after reading the 915 * status is race-prone. 916 */ 917 writel(im_val, host->base + DAVINCI_MMCIM); 918 } 919 920 if (qstatus & MMCST0_DATDNE) { 921 /* All blocks sent/received, and CRC checks passed */ 922 if (data != NULL) { 923 if ((host->do_dma == 0) && (host->bytes_left > 0)) { 924 /* if datasize < rw_threshold 925 * no RX ints are generated 926 */ 927 davinci_fifo_data_trans(host, host->bytes_left); 928 } 929 end_transfer = 1; 930 data->bytes_xfered = data->blocks * data->blksz; 931 } else { 932 dev_err(mmc_dev(host->mmc), 933 "DATDNE with no host->data\n"); 934 } 935 } 936 937 if (qstatus & MMCST0_TOUTRD) { 938 /* Read data timeout */ 939 data->error = -ETIMEDOUT; 940 end_transfer = 1; 941 942 dev_dbg(mmc_dev(host->mmc), 943 "read data timeout, status %x\n", 944 qstatus); 945 946 davinci_abort_data(host, data); 947 } 948 949 if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { 950 /* Data CRC error */ 951 data->error = -EILSEQ; 952 end_transfer = 1; 953 954 /* NOTE: this controller uses CRCWR to report both CRC 955 * errors and timeouts (on writes). MMCDRSP values are 956 * only weakly documented, but 0x9f was clearly a timeout 957 * case and the two three-bit patterns in various SD specs 958 * (101, 010) aren't part of it ... 959 */ 960 if (qstatus & MMCST0_CRCWR) { 961 u32 temp = readb(host->base + DAVINCI_MMCDRSP); 962 963 if (temp == 0x9f) 964 data->error = -ETIMEDOUT; 965 } 966 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", 967 (qstatus & MMCST0_CRCWR) ? "write" : "read", 968 (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); 969 970 davinci_abort_data(host, data); 971 } 972 973 if (qstatus & MMCST0_TOUTRS) { 974 /* Command timeout */ 975 if (host->cmd) { 976 dev_dbg(mmc_dev(host->mmc), 977 "CMD%d timeout, status %x\n", 978 host->cmd->opcode, qstatus); 979 host->cmd->error = -ETIMEDOUT; 980 if (data) { 981 end_transfer = 1; 982 davinci_abort_data(host, data); 983 } else 984 end_command = 1; 985 } 986 } 987 988 if (qstatus & MMCST0_CRCRS) { 989 /* Command CRC error */ 990 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); 991 if (host->cmd) { 992 host->cmd->error = -EILSEQ; 993 end_command = 1; 994 } 995 } 996 997 if (qstatus & MMCST0_RSPDNE) { 998 /* End of command phase */ 999 end_command = host->cmd ? 1 : 0; 1000 } 1001 1002 if (end_command) 1003 mmc_davinci_cmd_done(host, host->cmd); 1004 if (end_transfer) 1005 mmc_davinci_xfer_done(host, data); 1006 return IRQ_HANDLED; 1007 } 1008 1009 static int mmc_davinci_get_cd(struct mmc_host *mmc) 1010 { 1011 struct platform_device *pdev = to_platform_device(mmc->parent); 1012 struct davinci_mmc_config *config = pdev->dev.platform_data; 1013 1014 if (config && config->get_cd) 1015 return config->get_cd(pdev->id); 1016 1017 return mmc_gpio_get_cd(mmc); 1018 } 1019 1020 static int mmc_davinci_get_ro(struct mmc_host *mmc) 1021 { 1022 struct platform_device *pdev = to_platform_device(mmc->parent); 1023 struct davinci_mmc_config *config = pdev->dev.platform_data; 1024 1025 if (config && config->get_ro) 1026 return config->get_ro(pdev->id); 1027 1028 return mmc_gpio_get_ro(mmc); 1029 } 1030 1031 static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1032 { 1033 struct mmc_davinci_host *host = mmc_priv(mmc); 1034 1035 if (enable) { 1036 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { 1037 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 1038 mmc_signal_sdio_irq(host->mmc); 1039 } else { 1040 host->sdio_int = true; 1041 writel(readl(host->base + DAVINCI_SDIOIEN) | 1042 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); 1043 } 1044 } else { 1045 host->sdio_int = false; 1046 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, 1047 host->base + DAVINCI_SDIOIEN); 1048 } 1049 } 1050 1051 static const struct mmc_host_ops mmc_davinci_ops = { 1052 .request = mmc_davinci_request, 1053 .set_ios = mmc_davinci_set_ios, 1054 .get_cd = mmc_davinci_get_cd, 1055 .get_ro = mmc_davinci_get_ro, 1056 .enable_sdio_irq = mmc_davinci_enable_sdio_irq, 1057 }; 1058 1059 /*----------------------------------------------------------------------*/ 1060 1061 #ifdef CONFIG_CPU_FREQ 1062 static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, 1063 unsigned long val, void *data) 1064 { 1065 struct mmc_davinci_host *host; 1066 unsigned int mmc_pclk; 1067 struct mmc_host *mmc; 1068 unsigned long flags; 1069 1070 host = container_of(nb, struct mmc_davinci_host, freq_transition); 1071 mmc = host->mmc; 1072 mmc_pclk = clk_get_rate(host->clk); 1073 1074 if (val == CPUFREQ_POSTCHANGE) { 1075 spin_lock_irqsave(&mmc->lock, flags); 1076 host->mmc_input_clk = mmc_pclk; 1077 calculate_clk_divider(mmc, &mmc->ios); 1078 spin_unlock_irqrestore(&mmc->lock, flags); 1079 } 1080 1081 return 0; 1082 } 1083 1084 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1085 { 1086 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; 1087 1088 return cpufreq_register_notifier(&host->freq_transition, 1089 CPUFREQ_TRANSITION_NOTIFIER); 1090 } 1091 1092 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1093 { 1094 cpufreq_unregister_notifier(&host->freq_transition, 1095 CPUFREQ_TRANSITION_NOTIFIER); 1096 } 1097 #else 1098 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1099 { 1100 return 0; 1101 } 1102 1103 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1104 { 1105 } 1106 #endif 1107 static void init_mmcsd_host(struct mmc_davinci_host *host) 1108 { 1109 1110 mmc_davinci_reset_ctrl(host, 1); 1111 1112 writel(0, host->base + DAVINCI_MMCCLK); 1113 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1114 1115 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1116 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1117 1118 mmc_davinci_reset_ctrl(host, 0); 1119 } 1120 1121 static const struct platform_device_id davinci_mmc_devtype[] = { 1122 { 1123 .name = "dm6441-mmc", 1124 .driver_data = MMC_CTLR_VERSION_1, 1125 }, { 1126 .name = "da830-mmc", 1127 .driver_data = MMC_CTLR_VERSION_2, 1128 }, 1129 {}, 1130 }; 1131 MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype); 1132 1133 static const struct of_device_id davinci_mmc_dt_ids[] = { 1134 { 1135 .compatible = "ti,dm6441-mmc", 1136 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1], 1137 }, 1138 { 1139 .compatible = "ti,da830-mmc", 1140 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2], 1141 }, 1142 {}, 1143 }; 1144 MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids); 1145 1146 static int mmc_davinci_parse_pdata(struct mmc_host *mmc) 1147 { 1148 struct platform_device *pdev = to_platform_device(mmc->parent); 1149 struct davinci_mmc_config *pdata = pdev->dev.platform_data; 1150 struct mmc_davinci_host *host; 1151 int ret; 1152 1153 if (!pdata) 1154 return -EINVAL; 1155 1156 host = mmc_priv(mmc); 1157 if (!host) 1158 return -EINVAL; 1159 1160 if (pdata && pdata->nr_sg) 1161 host->nr_sg = pdata->nr_sg - 1; 1162 1163 if (pdata && (pdata->wires == 4 || pdata->wires == 0)) 1164 mmc->caps |= MMC_CAP_4_BIT_DATA; 1165 1166 if (pdata && (pdata->wires == 8)) 1167 mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); 1168 1169 mmc->f_min = 312500; 1170 mmc->f_max = 25000000; 1171 if (pdata && pdata->max_freq) 1172 mmc->f_max = pdata->max_freq; 1173 if (pdata && pdata->caps) 1174 mmc->caps |= pdata->caps; 1175 1176 /* Register a cd gpio, if there is not one, enable polling */ 1177 ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); 1178 if (ret == -EPROBE_DEFER) 1179 return ret; 1180 else if (ret) 1181 mmc->caps |= MMC_CAP_NEEDS_POLL; 1182 1183 ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); 1184 if (ret == -EPROBE_DEFER) 1185 return ret; 1186 1187 return 0; 1188 } 1189 1190 static int davinci_mmcsd_probe(struct platform_device *pdev) 1191 { 1192 struct mmc_davinci_host *host = NULL; 1193 struct mmc_host *mmc = NULL; 1194 struct resource *r, *mem = NULL; 1195 int ret, irq; 1196 size_t mem_size; 1197 const struct platform_device_id *id_entry; 1198 1199 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1200 if (!r) 1201 return -ENODEV; 1202 irq = platform_get_irq(pdev, 0); 1203 if (irq < 0) 1204 return irq; 1205 1206 mem_size = resource_size(r); 1207 mem = devm_request_mem_region(&pdev->dev, r->start, mem_size, 1208 pdev->name); 1209 if (!mem) 1210 return -EBUSY; 1211 1212 mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); 1213 if (!mmc) 1214 return -ENOMEM; 1215 1216 host = mmc_priv(mmc); 1217 host->mmc = mmc; /* Important */ 1218 1219 host->mem_res = mem; 1220 host->base = devm_ioremap(&pdev->dev, mem->start, mem_size); 1221 if (!host->base) { 1222 ret = -ENOMEM; 1223 goto ioremap_fail; 1224 } 1225 1226 host->clk = devm_clk_get(&pdev->dev, NULL); 1227 if (IS_ERR(host->clk)) { 1228 ret = PTR_ERR(host->clk); 1229 goto clk_get_fail; 1230 } 1231 ret = clk_prepare_enable(host->clk); 1232 if (ret) 1233 goto clk_prepare_enable_fail; 1234 1235 host->mmc_input_clk = clk_get_rate(host->clk); 1236 1237 pdev->id_entry = of_device_get_match_data(&pdev->dev); 1238 if (pdev->id_entry) { 1239 ret = mmc_of_parse(mmc); 1240 if (ret) { 1241 dev_err_probe(&pdev->dev, ret, 1242 "could not parse of data\n"); 1243 goto parse_fail; 1244 } 1245 } else { 1246 ret = mmc_davinci_parse_pdata(mmc); 1247 if (ret) { 1248 dev_err(&pdev->dev, 1249 "could not parse platform data: %d\n", ret); 1250 goto parse_fail; 1251 } } 1252 1253 if (host->nr_sg > MAX_NR_SG || !host->nr_sg) 1254 host->nr_sg = MAX_NR_SG; 1255 1256 init_mmcsd_host(host); 1257 1258 host->use_dma = use_dma; 1259 host->mmc_irq = irq; 1260 host->sdio_irq = platform_get_irq(pdev, 1); 1261 1262 if (host->use_dma) { 1263 ret = davinci_acquire_dma_channels(host); 1264 if (ret == -EPROBE_DEFER) 1265 goto dma_probe_defer; 1266 else if (ret) 1267 host->use_dma = 0; 1268 } 1269 1270 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 1271 1272 id_entry = platform_get_device_id(pdev); 1273 if (id_entry) 1274 host->version = id_entry->driver_data; 1275 1276 mmc->ops = &mmc_davinci_ops; 1277 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1278 1279 /* With no iommu coalescing pages, each phys_seg is a hw_seg. 1280 * Each hw_seg uses one EDMA parameter RAM slot, always one 1281 * channel and then usually some linked slots. 1282 */ 1283 mmc->max_segs = MAX_NR_SG; 1284 1285 /* EDMA limit per hw segment (one or two MBytes) */ 1286 mmc->max_seg_size = MAX_CCNT * rw_threshold; 1287 1288 /* MMC/SD controller limits for multiblock requests */ 1289 mmc->max_blk_size = 4095; /* BLEN is 12 bits */ 1290 mmc->max_blk_count = 65535; /* NBLK is 16 bits */ 1291 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1292 1293 dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs); 1294 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); 1295 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); 1296 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); 1297 1298 platform_set_drvdata(pdev, host); 1299 1300 ret = mmc_davinci_cpufreq_register(host); 1301 if (ret) { 1302 dev_err(&pdev->dev, "failed to register cpufreq\n"); 1303 goto cpu_freq_fail; 1304 } 1305 1306 ret = mmc_add_host(mmc); 1307 if (ret < 0) 1308 goto mmc_add_host_fail; 1309 1310 ret = devm_request_irq(&pdev->dev, irq, mmc_davinci_irq, 0, 1311 mmc_hostname(mmc), host); 1312 if (ret) 1313 goto request_irq_fail; 1314 1315 if (host->sdio_irq >= 0) { 1316 ret = devm_request_irq(&pdev->dev, host->sdio_irq, 1317 mmc_davinci_sdio_irq, 0, 1318 mmc_hostname(mmc), host); 1319 if (!ret) 1320 mmc->caps |= MMC_CAP_SDIO_IRQ; 1321 } 1322 1323 rename_region(mem, mmc_hostname(mmc)); 1324 1325 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1326 host->use_dma ? "DMA" : "PIO", 1327 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); 1328 1329 return 0; 1330 1331 request_irq_fail: 1332 mmc_remove_host(mmc); 1333 mmc_add_host_fail: 1334 mmc_davinci_cpufreq_deregister(host); 1335 cpu_freq_fail: 1336 davinci_release_dma_channels(host); 1337 parse_fail: 1338 dma_probe_defer: 1339 clk_disable_unprepare(host->clk); 1340 clk_prepare_enable_fail: 1341 clk_get_fail: 1342 ioremap_fail: 1343 mmc_free_host(mmc); 1344 1345 return ret; 1346 } 1347 1348 static int __exit davinci_mmcsd_remove(struct platform_device *pdev) 1349 { 1350 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1351 1352 mmc_remove_host(host->mmc); 1353 mmc_davinci_cpufreq_deregister(host); 1354 davinci_release_dma_channels(host); 1355 clk_disable_unprepare(host->clk); 1356 mmc_free_host(host->mmc); 1357 1358 return 0; 1359 } 1360 1361 #ifdef CONFIG_PM 1362 static int davinci_mmcsd_suspend(struct device *dev) 1363 { 1364 struct mmc_davinci_host *host = dev_get_drvdata(dev); 1365 1366 writel(0, host->base + DAVINCI_MMCIM); 1367 mmc_davinci_reset_ctrl(host, 1); 1368 clk_disable(host->clk); 1369 1370 return 0; 1371 } 1372 1373 static int davinci_mmcsd_resume(struct device *dev) 1374 { 1375 struct mmc_davinci_host *host = dev_get_drvdata(dev); 1376 int ret; 1377 1378 ret = clk_enable(host->clk); 1379 if (ret) 1380 return ret; 1381 1382 mmc_davinci_reset_ctrl(host, 0); 1383 1384 return 0; 1385 } 1386 1387 static const struct dev_pm_ops davinci_mmcsd_pm = { 1388 .suspend = davinci_mmcsd_suspend, 1389 .resume = davinci_mmcsd_resume, 1390 }; 1391 1392 #define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) 1393 #else 1394 #define davinci_mmcsd_pm_ops NULL 1395 #endif 1396 1397 static struct platform_driver davinci_mmcsd_driver = { 1398 .driver = { 1399 .name = "davinci_mmc", 1400 .probe_type = PROBE_PREFER_ASYNCHRONOUS, 1401 .pm = davinci_mmcsd_pm_ops, 1402 .of_match_table = davinci_mmc_dt_ids, 1403 }, 1404 .probe = davinci_mmcsd_probe, 1405 .remove = __exit_p(davinci_mmcsd_remove), 1406 .id_table = davinci_mmc_devtype, 1407 }; 1408 1409 module_platform_driver(davinci_mmcsd_driver); 1410 1411 MODULE_AUTHOR("Texas Instruments India"); 1412 MODULE_LICENSE("GPL"); 1413 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); 1414 MODULE_ALIAS("platform:davinci_mmc"); 1415 1416