1 /* 2 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver 3 * 4 * Copyright (C) 2006 Texas Instruments. 5 * Original author: Purushotam Kumar 6 * Copyright (C) 2009 David Brownell 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/module.h> 24 #include <linux/ioport.h> 25 #include <linux/platform_device.h> 26 #include <linux/clk.h> 27 #include <linux/err.h> 28 #include <linux/cpufreq.h> 29 #include <linux/mmc/host.h> 30 #include <linux/io.h> 31 #include <linux/irq.h> 32 #include <linux/delay.h> 33 #include <linux/dmaengine.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/edma.h> 36 #include <linux/mmc/mmc.h> 37 #include <linux/of.h> 38 #include <linux/of_device.h> 39 40 #include <linux/platform_data/edma.h> 41 #include <linux/platform_data/mmc-davinci.h> 42 43 /* 44 * Register Definitions 45 */ 46 #define DAVINCI_MMCCTL 0x00 /* Control Register */ 47 #define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ 48 #define DAVINCI_MMCST0 0x08 /* Status Register 0 */ 49 #define DAVINCI_MMCST1 0x0C /* Status Register 1 */ 50 #define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ 51 #define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ 52 #define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ 53 #define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ 54 #define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ 55 #define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ 56 #define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ 57 #define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ 58 #define DAVINCI_MMCCMD 0x30 /* Command Register */ 59 #define DAVINCI_MMCARGHL 0x34 /* Argument Register */ 60 #define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ 61 #define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ 62 #define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ 63 #define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ 64 #define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ 65 #define DAVINCI_MMCETOK 0x4C 66 #define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ 67 #define DAVINCI_MMCCKC 0x54 68 #define DAVINCI_MMCTORC 0x58 69 #define DAVINCI_MMCTODC 0x5C 70 #define DAVINCI_MMCBLNC 0x60 71 #define DAVINCI_SDIOCTL 0x64 72 #define DAVINCI_SDIOST0 0x68 73 #define DAVINCI_SDIOIEN 0x6C 74 #define DAVINCI_SDIOIST 0x70 75 #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 76 77 /* DAVINCI_MMCCTL definitions */ 78 #define MMCCTL_DATRST (1 << 0) 79 #define MMCCTL_CMDRST (1 << 1) 80 #define MMCCTL_WIDTH_8_BIT (1 << 8) 81 #define MMCCTL_WIDTH_4_BIT (1 << 2) 82 #define MMCCTL_DATEG_DISABLED (0 << 6) 83 #define MMCCTL_DATEG_RISING (1 << 6) 84 #define MMCCTL_DATEG_FALLING (2 << 6) 85 #define MMCCTL_DATEG_BOTH (3 << 6) 86 #define MMCCTL_PERMDR_LE (0 << 9) 87 #define MMCCTL_PERMDR_BE (1 << 9) 88 #define MMCCTL_PERMDX_LE (0 << 10) 89 #define MMCCTL_PERMDX_BE (1 << 10) 90 91 /* DAVINCI_MMCCLK definitions */ 92 #define MMCCLK_CLKEN (1 << 8) 93 #define MMCCLK_CLKRT_MASK (0xFF << 0) 94 95 /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ 96 #define MMCST0_DATDNE BIT(0) /* data done */ 97 #define MMCST0_BSYDNE BIT(1) /* busy done */ 98 #define MMCST0_RSPDNE BIT(2) /* command done */ 99 #define MMCST0_TOUTRD BIT(3) /* data read timeout */ 100 #define MMCST0_TOUTRS BIT(4) /* command response timeout */ 101 #define MMCST0_CRCWR BIT(5) /* data write CRC error */ 102 #define MMCST0_CRCRD BIT(6) /* data read CRC error */ 103 #define MMCST0_CRCRS BIT(7) /* command response CRC error */ 104 #define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ 105 #define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ 106 #define MMCST0_DATED BIT(11) /* DAT3 edge detect */ 107 #define MMCST0_TRNDNE BIT(12) /* transfer done */ 108 109 /* DAVINCI_MMCST1 definitions */ 110 #define MMCST1_BUSY (1 << 0) 111 112 /* DAVINCI_MMCCMD definitions */ 113 #define MMCCMD_CMD_MASK (0x3F << 0) 114 #define MMCCMD_PPLEN (1 << 7) 115 #define MMCCMD_BSYEXP (1 << 8) 116 #define MMCCMD_RSPFMT_MASK (3 << 9) 117 #define MMCCMD_RSPFMT_NONE (0 << 9) 118 #define MMCCMD_RSPFMT_R1456 (1 << 9) 119 #define MMCCMD_RSPFMT_R2 (2 << 9) 120 #define MMCCMD_RSPFMT_R3 (3 << 9) 121 #define MMCCMD_DTRW (1 << 11) 122 #define MMCCMD_STRMTP (1 << 12) 123 #define MMCCMD_WDATX (1 << 13) 124 #define MMCCMD_INITCK (1 << 14) 125 #define MMCCMD_DCLR (1 << 15) 126 #define MMCCMD_DMATRIG (1 << 16) 127 128 /* DAVINCI_MMCFIFOCTL definitions */ 129 #define MMCFIFOCTL_FIFORST (1 << 0) 130 #define MMCFIFOCTL_FIFODIR_WR (1 << 1) 131 #define MMCFIFOCTL_FIFODIR_RD (0 << 1) 132 #define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ 133 #define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ 134 #define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ 135 #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 136 #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 137 138 /* DAVINCI_SDIOST0 definitions */ 139 #define SDIOST0_DAT1_HI BIT(0) 140 141 /* DAVINCI_SDIOIEN definitions */ 142 #define SDIOIEN_IOINTEN BIT(0) 143 144 /* DAVINCI_SDIOIST definitions */ 145 #define SDIOIST_IOINT BIT(0) 146 147 /* MMCSD Init clock in Hz in opendrain mode */ 148 #define MMCSD_INIT_CLOCK 200000 149 150 /* 151 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 152 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 153 * for drivers with max_segs == 1, making the segments bigger (64KB) 154 * than the page or two that's otherwise typical. nr_sg (passed from 155 * platform data) == 16 gives at least the same throughput boost, using 156 * EDMA transfer linkage instead of spending CPU time copying pages. 157 */ 158 #define MAX_CCNT ((1 << 16) - 1) 159 160 #define MAX_NR_SG 16 161 162 static unsigned rw_threshold = 32; 163 module_param(rw_threshold, uint, S_IRUGO); 164 MODULE_PARM_DESC(rw_threshold, 165 "Read/Write threshold. Default = 32"); 166 167 static unsigned poll_threshold = 128; 168 module_param(poll_threshold, uint, S_IRUGO); 169 MODULE_PARM_DESC(poll_threshold, 170 "Polling transaction size threshold. Default = 128"); 171 172 static unsigned poll_loopcount = 32; 173 module_param(poll_loopcount, uint, S_IRUGO); 174 MODULE_PARM_DESC(poll_loopcount, 175 "Maximum polling loop count. Default = 32"); 176 177 static unsigned __initdata use_dma = 1; 178 module_param(use_dma, uint, 0); 179 MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); 180 181 struct mmc_davinci_host { 182 struct mmc_command *cmd; 183 struct mmc_data *data; 184 struct mmc_host *mmc; 185 struct clk *clk; 186 unsigned int mmc_input_clk; 187 void __iomem *base; 188 struct resource *mem_res; 189 int mmc_irq, sdio_irq; 190 unsigned char bus_mode; 191 192 #define DAVINCI_MMC_DATADIR_NONE 0 193 #define DAVINCI_MMC_DATADIR_READ 1 194 #define DAVINCI_MMC_DATADIR_WRITE 2 195 unsigned char data_dir; 196 197 /* buffer is used during PIO of one scatterlist segment, and 198 * is updated along with buffer_bytes_left. bytes_left applies 199 * to all N blocks of the PIO transfer. 200 */ 201 u8 *buffer; 202 u32 buffer_bytes_left; 203 u32 bytes_left; 204 205 u32 rxdma, txdma; 206 struct dma_chan *dma_tx; 207 struct dma_chan *dma_rx; 208 bool use_dma; 209 bool do_dma; 210 bool sdio_int; 211 bool active_request; 212 213 /* For PIO we walk scatterlists one segment at a time. */ 214 unsigned int sg_len; 215 struct scatterlist *sg; 216 217 /* Version of the MMC/SD controller */ 218 u8 version; 219 /* for ns in one cycle calculation */ 220 unsigned ns_in_one_cycle; 221 /* Number of sg segments */ 222 u8 nr_sg; 223 #ifdef CONFIG_CPU_FREQ 224 struct notifier_block freq_transition; 225 #endif 226 }; 227 228 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); 229 230 /* PIO only */ 231 static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) 232 { 233 host->buffer_bytes_left = sg_dma_len(host->sg); 234 host->buffer = sg_virt(host->sg); 235 if (host->buffer_bytes_left > host->bytes_left) 236 host->buffer_bytes_left = host->bytes_left; 237 } 238 239 static void davinci_fifo_data_trans(struct mmc_davinci_host *host, 240 unsigned int n) 241 { 242 u8 *p; 243 unsigned int i; 244 245 if (host->buffer_bytes_left == 0) { 246 host->sg = sg_next(host->data->sg); 247 mmc_davinci_sg_to_buf(host); 248 } 249 250 p = host->buffer; 251 if (n > host->buffer_bytes_left) 252 n = host->buffer_bytes_left; 253 host->buffer_bytes_left -= n; 254 host->bytes_left -= n; 255 256 /* NOTE: we never transfer more than rw_threshold bytes 257 * to/from the fifo here; there's no I/O overlap. 258 * This also assumes that access width( i.e. ACCWD) is 4 bytes 259 */ 260 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 261 for (i = 0; i < (n >> 2); i++) { 262 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); 263 p = p + 4; 264 } 265 if (n & 3) { 266 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); 267 p = p + (n & 3); 268 } 269 } else { 270 for (i = 0; i < (n >> 2); i++) { 271 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); 272 p = p + 4; 273 } 274 if (n & 3) { 275 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); 276 p = p + (n & 3); 277 } 278 } 279 host->buffer = p; 280 } 281 282 static void mmc_davinci_start_command(struct mmc_davinci_host *host, 283 struct mmc_command *cmd) 284 { 285 u32 cmd_reg = 0; 286 u32 im_val; 287 288 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", 289 cmd->opcode, cmd->arg, 290 ({ char *s; 291 switch (mmc_resp_type(cmd)) { 292 case MMC_RSP_R1: 293 s = ", R1/R5/R6/R7 response"; 294 break; 295 case MMC_RSP_R1B: 296 s = ", R1b response"; 297 break; 298 case MMC_RSP_R2: 299 s = ", R2 response"; 300 break; 301 case MMC_RSP_R3: 302 s = ", R3/R4 response"; 303 break; 304 default: 305 s = ", (R? response)"; 306 break; 307 }; s; })); 308 host->cmd = cmd; 309 310 switch (mmc_resp_type(cmd)) { 311 case MMC_RSP_R1B: 312 /* There's some spec confusion about when R1B is 313 * allowed, but if the card doesn't issue a BUSY 314 * then it's harmless for us to allow it. 315 */ 316 cmd_reg |= MMCCMD_BSYEXP; 317 /* FALLTHROUGH */ 318 case MMC_RSP_R1: /* 48 bits, CRC */ 319 cmd_reg |= MMCCMD_RSPFMT_R1456; 320 break; 321 case MMC_RSP_R2: /* 136 bits, CRC */ 322 cmd_reg |= MMCCMD_RSPFMT_R2; 323 break; 324 case MMC_RSP_R3: /* 48 bits, no CRC */ 325 cmd_reg |= MMCCMD_RSPFMT_R3; 326 break; 327 default: 328 cmd_reg |= MMCCMD_RSPFMT_NONE; 329 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", 330 mmc_resp_type(cmd)); 331 break; 332 } 333 334 /* Set command index */ 335 cmd_reg |= cmd->opcode; 336 337 /* Enable EDMA transfer triggers */ 338 if (host->do_dma) 339 cmd_reg |= MMCCMD_DMATRIG; 340 341 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && 342 host->data_dir == DAVINCI_MMC_DATADIR_READ) 343 cmd_reg |= MMCCMD_DMATRIG; 344 345 /* Setting whether command involves data transfer or not */ 346 if (cmd->data) 347 cmd_reg |= MMCCMD_WDATX; 348 349 /* Setting whether stream or block transfer */ 350 if (cmd->flags & MMC_DATA_STREAM) 351 cmd_reg |= MMCCMD_STRMTP; 352 353 /* Setting whether data read or write */ 354 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) 355 cmd_reg |= MMCCMD_DTRW; 356 357 if (host->bus_mode == MMC_BUSMODE_PUSHPULL) 358 cmd_reg |= MMCCMD_PPLEN; 359 360 /* set Command timeout */ 361 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 362 363 /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ 364 im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; 365 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 366 im_val |= MMCST0_DATDNE | MMCST0_CRCWR; 367 368 if (!host->do_dma) 369 im_val |= MMCST0_DXRDY; 370 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { 371 im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; 372 373 if (!host->do_dma) 374 im_val |= MMCST0_DRRDY; 375 } 376 377 /* 378 * Before non-DMA WRITE commands the controller needs priming: 379 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size 380 */ 381 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) 382 davinci_fifo_data_trans(host, rw_threshold); 383 384 writel(cmd->arg, host->base + DAVINCI_MMCARGHL); 385 writel(cmd_reg, host->base + DAVINCI_MMCCMD); 386 387 host->active_request = true; 388 389 if (!host->do_dma && host->bytes_left <= poll_threshold) { 390 u32 count = poll_loopcount; 391 392 while (host->active_request && count--) { 393 mmc_davinci_irq(0, host); 394 cpu_relax(); 395 } 396 } 397 398 if (host->active_request) 399 writel(im_val, host->base + DAVINCI_MMCIM); 400 } 401 402 /*----------------------------------------------------------------------*/ 403 404 /* DMA infrastructure */ 405 406 static void davinci_abort_dma(struct mmc_davinci_host *host) 407 { 408 struct dma_chan *sync_dev; 409 410 if (host->data_dir == DAVINCI_MMC_DATADIR_READ) 411 sync_dev = host->dma_rx; 412 else 413 sync_dev = host->dma_tx; 414 415 dmaengine_terminate_all(sync_dev); 416 } 417 418 static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, 419 struct mmc_data *data) 420 { 421 struct dma_chan *chan; 422 struct dma_async_tx_descriptor *desc; 423 int ret = 0; 424 425 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 426 struct dma_slave_config dma_tx_conf = { 427 .direction = DMA_MEM_TO_DEV, 428 .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, 429 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 430 .dst_maxburst = 431 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 432 }; 433 chan = host->dma_tx; 434 dmaengine_slave_config(host->dma_tx, &dma_tx_conf); 435 436 desc = dmaengine_prep_slave_sg(host->dma_tx, 437 data->sg, 438 host->sg_len, 439 DMA_MEM_TO_DEV, 440 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 441 if (!desc) { 442 dev_dbg(mmc_dev(host->mmc), 443 "failed to allocate DMA TX descriptor"); 444 ret = -1; 445 goto out; 446 } 447 } else { 448 struct dma_slave_config dma_rx_conf = { 449 .direction = DMA_DEV_TO_MEM, 450 .src_addr = host->mem_res->start + DAVINCI_MMCDRR, 451 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 452 .src_maxburst = 453 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 454 }; 455 chan = host->dma_rx; 456 dmaengine_slave_config(host->dma_rx, &dma_rx_conf); 457 458 desc = dmaengine_prep_slave_sg(host->dma_rx, 459 data->sg, 460 host->sg_len, 461 DMA_DEV_TO_MEM, 462 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 463 if (!desc) { 464 dev_dbg(mmc_dev(host->mmc), 465 "failed to allocate DMA RX descriptor"); 466 ret = -1; 467 goto out; 468 } 469 } 470 471 dmaengine_submit(desc); 472 dma_async_issue_pending(chan); 473 474 out: 475 return ret; 476 } 477 478 static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, 479 struct mmc_data *data) 480 { 481 int i; 482 int mask = rw_threshold - 1; 483 int ret = 0; 484 485 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 486 ((data->flags & MMC_DATA_WRITE) 487 ? DMA_TO_DEVICE 488 : DMA_FROM_DEVICE)); 489 490 /* no individual DMA segment should need a partial FIFO */ 491 for (i = 0; i < host->sg_len; i++) { 492 if (sg_dma_len(data->sg + i) & mask) { 493 dma_unmap_sg(mmc_dev(host->mmc), 494 data->sg, data->sg_len, 495 (data->flags & MMC_DATA_WRITE) 496 ? DMA_TO_DEVICE 497 : DMA_FROM_DEVICE); 498 return -1; 499 } 500 } 501 502 host->do_dma = 1; 503 ret = mmc_davinci_send_dma_request(host, data); 504 505 return ret; 506 } 507 508 static void __init_or_module 509 davinci_release_dma_channels(struct mmc_davinci_host *host) 510 { 511 if (!host->use_dma) 512 return; 513 514 dma_release_channel(host->dma_tx); 515 dma_release_channel(host->dma_rx); 516 } 517 518 static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 519 { 520 int r; 521 dma_cap_mask_t mask; 522 523 dma_cap_zero(mask); 524 dma_cap_set(DMA_SLAVE, mask); 525 526 host->dma_tx = 527 dma_request_slave_channel_compat(mask, edma_filter_fn, 528 &host->txdma, mmc_dev(host->mmc), "tx"); 529 if (!host->dma_tx) { 530 dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); 531 return -ENODEV; 532 } 533 534 host->dma_rx = 535 dma_request_slave_channel_compat(mask, edma_filter_fn, 536 &host->rxdma, mmc_dev(host->mmc), "rx"); 537 if (!host->dma_rx) { 538 dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); 539 r = -ENODEV; 540 goto free_master_write; 541 } 542 543 return 0; 544 545 free_master_write: 546 dma_release_channel(host->dma_tx); 547 548 return r; 549 } 550 551 /*----------------------------------------------------------------------*/ 552 553 static void 554 mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) 555 { 556 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; 557 int timeout; 558 struct mmc_data *data = req->data; 559 560 if (host->version == MMC_CTLR_VERSION_2) 561 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; 562 563 host->data = data; 564 if (data == NULL) { 565 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 566 writel(0, host->base + DAVINCI_MMCBLEN); 567 writel(0, host->base + DAVINCI_MMCNBLK); 568 return; 569 } 570 571 dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n", 572 (data->flags & MMC_DATA_STREAM) ? "stream" : "block", 573 (data->flags & MMC_DATA_WRITE) ? "write" : "read", 574 data->blocks, data->blksz); 575 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", 576 data->timeout_clks, data->timeout_ns); 577 timeout = data->timeout_clks + 578 (data->timeout_ns / host->ns_in_one_cycle); 579 if (timeout > 0xffff) 580 timeout = 0xffff; 581 582 writel(timeout, host->base + DAVINCI_MMCTOD); 583 writel(data->blocks, host->base + DAVINCI_MMCNBLK); 584 writel(data->blksz, host->base + DAVINCI_MMCBLEN); 585 586 /* Configure the FIFO */ 587 switch (data->flags & MMC_DATA_WRITE) { 588 case MMC_DATA_WRITE: 589 host->data_dir = DAVINCI_MMC_DATADIR_WRITE; 590 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, 591 host->base + DAVINCI_MMCFIFOCTL); 592 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, 593 host->base + DAVINCI_MMCFIFOCTL); 594 break; 595 596 default: 597 host->data_dir = DAVINCI_MMC_DATADIR_READ; 598 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, 599 host->base + DAVINCI_MMCFIFOCTL); 600 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, 601 host->base + DAVINCI_MMCFIFOCTL); 602 break; 603 } 604 605 host->buffer = NULL; 606 host->bytes_left = data->blocks * data->blksz; 607 608 /* For now we try to use DMA whenever we won't need partial FIFO 609 * reads or writes, either for the whole transfer (as tested here) 610 * or for any individual scatterlist segment (tested when we call 611 * start_dma_transfer). 612 * 613 * While we *could* change that, unusual block sizes are rarely 614 * used. The occasional fallback to PIO should't hurt. 615 */ 616 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 617 && mmc_davinci_start_dma_transfer(host, data) == 0) { 618 /* zero this to ensure we take no PIO paths */ 619 host->bytes_left = 0; 620 } else { 621 /* Revert to CPU Copy */ 622 host->sg_len = data->sg_len; 623 host->sg = host->data->sg; 624 mmc_davinci_sg_to_buf(host); 625 } 626 } 627 628 static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) 629 { 630 struct mmc_davinci_host *host = mmc_priv(mmc); 631 unsigned long timeout = jiffies + msecs_to_jiffies(900); 632 u32 mmcst1 = 0; 633 634 /* Card may still be sending BUSY after a previous operation, 635 * typically some kind of write. If so, we can't proceed yet. 636 */ 637 while (time_before(jiffies, timeout)) { 638 mmcst1 = readl(host->base + DAVINCI_MMCST1); 639 if (!(mmcst1 & MMCST1_BUSY)) 640 break; 641 cpu_relax(); 642 } 643 if (mmcst1 & MMCST1_BUSY) { 644 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); 645 req->cmd->error = -ETIMEDOUT; 646 mmc_request_done(mmc, req); 647 return; 648 } 649 650 host->do_dma = 0; 651 mmc_davinci_prepare_data(host, req); 652 mmc_davinci_start_command(host, req->cmd); 653 } 654 655 static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, 656 unsigned int mmc_req_freq) 657 { 658 unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; 659 660 mmc_pclk = host->mmc_input_clk; 661 if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) 662 mmc_push_pull_divisor = ((unsigned int)mmc_pclk 663 / (2 * mmc_req_freq)) - 1; 664 else 665 mmc_push_pull_divisor = 0; 666 667 mmc_freq = (unsigned int)mmc_pclk 668 / (2 * (mmc_push_pull_divisor + 1)); 669 670 if (mmc_freq > mmc_req_freq) 671 mmc_push_pull_divisor = mmc_push_pull_divisor + 1; 672 /* Convert ns to clock cycles */ 673 if (mmc_req_freq <= 400000) 674 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 675 / (2 * (mmc_push_pull_divisor + 1)))/1000)); 676 else 677 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 678 / (2 * (mmc_push_pull_divisor + 1)))/1000000)); 679 680 return mmc_push_pull_divisor; 681 } 682 683 static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) 684 { 685 unsigned int open_drain_freq = 0, mmc_pclk = 0; 686 unsigned int mmc_push_pull_freq = 0; 687 struct mmc_davinci_host *host = mmc_priv(mmc); 688 689 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 690 u32 temp; 691 692 /* Ignoring the init clock value passed for fixing the inter 693 * operability with different cards. 694 */ 695 open_drain_freq = ((unsigned int)mmc_pclk 696 / (2 * MMCSD_INIT_CLOCK)) - 1; 697 698 if (open_drain_freq > 0xFF) 699 open_drain_freq = 0xFF; 700 701 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 702 temp |= open_drain_freq; 703 writel(temp, host->base + DAVINCI_MMCCLK); 704 705 /* Convert ns to clock cycles */ 706 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); 707 } else { 708 u32 temp; 709 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); 710 711 if (mmc_push_pull_freq > 0xFF) 712 mmc_push_pull_freq = 0xFF; 713 714 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; 715 writel(temp, host->base + DAVINCI_MMCCLK); 716 717 udelay(10); 718 719 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 720 temp |= mmc_push_pull_freq; 721 writel(temp, host->base + DAVINCI_MMCCLK); 722 723 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 724 725 udelay(10); 726 } 727 } 728 729 static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 730 { 731 struct mmc_davinci_host *host = mmc_priv(mmc); 732 struct platform_device *pdev = to_platform_device(mmc->parent); 733 struct davinci_mmc_config *config = pdev->dev.platform_data; 734 735 dev_dbg(mmc_dev(host->mmc), 736 "clock %dHz busmode %d powermode %d Vdd %04x\n", 737 ios->clock, ios->bus_mode, ios->power_mode, 738 ios->vdd); 739 740 switch (ios->power_mode) { 741 case MMC_POWER_OFF: 742 if (config && config->set_power) 743 config->set_power(pdev->id, false); 744 break; 745 case MMC_POWER_UP: 746 if (config && config->set_power) 747 config->set_power(pdev->id, true); 748 break; 749 } 750 751 switch (ios->bus_width) { 752 case MMC_BUS_WIDTH_8: 753 dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); 754 writel((readl(host->base + DAVINCI_MMCCTL) & 755 ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT, 756 host->base + DAVINCI_MMCCTL); 757 break; 758 case MMC_BUS_WIDTH_4: 759 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); 760 if (host->version == MMC_CTLR_VERSION_2) 761 writel((readl(host->base + DAVINCI_MMCCTL) & 762 ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT, 763 host->base + DAVINCI_MMCCTL); 764 else 765 writel(readl(host->base + DAVINCI_MMCCTL) | 766 MMCCTL_WIDTH_4_BIT, 767 host->base + DAVINCI_MMCCTL); 768 break; 769 case MMC_BUS_WIDTH_1: 770 dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n"); 771 if (host->version == MMC_CTLR_VERSION_2) 772 writel(readl(host->base + DAVINCI_MMCCTL) & 773 ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT), 774 host->base + DAVINCI_MMCCTL); 775 else 776 writel(readl(host->base + DAVINCI_MMCCTL) & 777 ~MMCCTL_WIDTH_4_BIT, 778 host->base + DAVINCI_MMCCTL); 779 break; 780 } 781 782 calculate_clk_divider(mmc, ios); 783 784 host->bus_mode = ios->bus_mode; 785 if (ios->power_mode == MMC_POWER_UP) { 786 unsigned long timeout = jiffies + msecs_to_jiffies(50); 787 bool lose = true; 788 789 /* Send clock cycles, poll completion */ 790 writel(0, host->base + DAVINCI_MMCARGHL); 791 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); 792 while (time_before(jiffies, timeout)) { 793 u32 tmp = readl(host->base + DAVINCI_MMCST0); 794 795 if (tmp & MMCST0_RSPDNE) { 796 lose = false; 797 break; 798 } 799 cpu_relax(); 800 } 801 if (lose) 802 dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); 803 } 804 805 /* FIXME on power OFF, reset things ... */ 806 } 807 808 static void 809 mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) 810 { 811 host->data = NULL; 812 813 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { 814 /* 815 * SDIO Interrupt Detection work-around as suggested by 816 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata 817 * 2.1.6): Signal SDIO interrupt only if it is enabled by core 818 */ 819 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & 820 SDIOST0_DAT1_HI)) { 821 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 822 mmc_signal_sdio_irq(host->mmc); 823 } 824 } 825 826 if (host->do_dma) { 827 davinci_abort_dma(host); 828 829 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 830 (data->flags & MMC_DATA_WRITE) 831 ? DMA_TO_DEVICE 832 : DMA_FROM_DEVICE); 833 host->do_dma = false; 834 } 835 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 836 837 if (!data->stop || (host->cmd && host->cmd->error)) { 838 mmc_request_done(host->mmc, data->mrq); 839 writel(0, host->base + DAVINCI_MMCIM); 840 host->active_request = false; 841 } else 842 mmc_davinci_start_command(host, data->stop); 843 } 844 845 static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, 846 struct mmc_command *cmd) 847 { 848 host->cmd = NULL; 849 850 if (cmd->flags & MMC_RSP_PRESENT) { 851 if (cmd->flags & MMC_RSP_136) { 852 /* response type 2 */ 853 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); 854 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); 855 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); 856 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 857 } else { 858 /* response types 1, 1b, 3, 4, 5, 6 */ 859 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 860 } 861 } 862 863 if (host->data == NULL || cmd->error) { 864 if (cmd->error == -ETIMEDOUT) 865 cmd->mrq->cmd->retries = 0; 866 mmc_request_done(host->mmc, cmd->mrq); 867 writel(0, host->base + DAVINCI_MMCIM); 868 host->active_request = false; 869 } 870 } 871 872 static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, 873 int val) 874 { 875 u32 temp; 876 877 temp = readl(host->base + DAVINCI_MMCCTL); 878 if (val) /* reset */ 879 temp |= MMCCTL_CMDRST | MMCCTL_DATRST; 880 else /* enable */ 881 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); 882 883 writel(temp, host->base + DAVINCI_MMCCTL); 884 udelay(10); 885 } 886 887 static void 888 davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 889 { 890 mmc_davinci_reset_ctrl(host, 1); 891 mmc_davinci_reset_ctrl(host, 0); 892 } 893 894 static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) 895 { 896 struct mmc_davinci_host *host = dev_id; 897 unsigned int status; 898 899 status = readl(host->base + DAVINCI_SDIOIST); 900 if (status & SDIOIST_IOINT) { 901 dev_dbg(mmc_dev(host->mmc), 902 "SDIO interrupt status %x\n", status); 903 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 904 mmc_signal_sdio_irq(host->mmc); 905 } 906 return IRQ_HANDLED; 907 } 908 909 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 910 { 911 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 912 unsigned int status, qstatus; 913 int end_command = 0; 914 int end_transfer = 0; 915 struct mmc_data *data = host->data; 916 917 if (host->cmd == NULL && host->data == NULL) { 918 status = readl(host->base + DAVINCI_MMCST0); 919 dev_dbg(mmc_dev(host->mmc), 920 "Spurious interrupt 0x%04x\n", status); 921 /* Disable the interrupt from mmcsd */ 922 writel(0, host->base + DAVINCI_MMCIM); 923 return IRQ_NONE; 924 } 925 926 status = readl(host->base + DAVINCI_MMCST0); 927 qstatus = status; 928 929 /* handle FIFO first when using PIO for data. 930 * bytes_left will decrease to zero as I/O progress and status will 931 * read zero over iteration because this controller status 932 * register(MMCST0) reports any status only once and it is cleared 933 * by read. So, it is not unbouned loop even in the case of 934 * non-dma. 935 */ 936 if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { 937 unsigned long im_val; 938 939 /* 940 * If interrupts fire during the following loop, they will be 941 * handled by the handler, but the PIC will still buffer these. 942 * As a result, the handler will be called again to serve these 943 * needlessly. In order to avoid these spurious interrupts, 944 * keep interrupts masked during the loop. 945 */ 946 im_val = readl(host->base + DAVINCI_MMCIM); 947 writel(0, host->base + DAVINCI_MMCIM); 948 949 do { 950 davinci_fifo_data_trans(host, rw_threshold); 951 status = readl(host->base + DAVINCI_MMCST0); 952 qstatus |= status; 953 } while (host->bytes_left && 954 (status & (MMCST0_DXRDY | MMCST0_DRRDY))); 955 956 /* 957 * If an interrupt is pending, it is assumed it will fire when 958 * it is unmasked. This assumption is also taken when the MMCIM 959 * is first set. Otherwise, writing to MMCIM after reading the 960 * status is race-prone. 961 */ 962 writel(im_val, host->base + DAVINCI_MMCIM); 963 } 964 965 if (qstatus & MMCST0_DATDNE) { 966 /* All blocks sent/received, and CRC checks passed */ 967 if (data != NULL) { 968 if ((host->do_dma == 0) && (host->bytes_left > 0)) { 969 /* if datasize < rw_threshold 970 * no RX ints are generated 971 */ 972 davinci_fifo_data_trans(host, host->bytes_left); 973 } 974 end_transfer = 1; 975 data->bytes_xfered = data->blocks * data->blksz; 976 } else { 977 dev_err(mmc_dev(host->mmc), 978 "DATDNE with no host->data\n"); 979 } 980 } 981 982 if (qstatus & MMCST0_TOUTRD) { 983 /* Read data timeout */ 984 data->error = -ETIMEDOUT; 985 end_transfer = 1; 986 987 dev_dbg(mmc_dev(host->mmc), 988 "read data timeout, status %x\n", 989 qstatus); 990 991 davinci_abort_data(host, data); 992 } 993 994 if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { 995 /* Data CRC error */ 996 data->error = -EILSEQ; 997 end_transfer = 1; 998 999 /* NOTE: this controller uses CRCWR to report both CRC 1000 * errors and timeouts (on writes). MMCDRSP values are 1001 * only weakly documented, but 0x9f was clearly a timeout 1002 * case and the two three-bit patterns in various SD specs 1003 * (101, 010) aren't part of it ... 1004 */ 1005 if (qstatus & MMCST0_CRCWR) { 1006 u32 temp = readb(host->base + DAVINCI_MMCDRSP); 1007 1008 if (temp == 0x9f) 1009 data->error = -ETIMEDOUT; 1010 } 1011 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", 1012 (qstatus & MMCST0_CRCWR) ? "write" : "read", 1013 (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); 1014 1015 davinci_abort_data(host, data); 1016 } 1017 1018 if (qstatus & MMCST0_TOUTRS) { 1019 /* Command timeout */ 1020 if (host->cmd) { 1021 dev_dbg(mmc_dev(host->mmc), 1022 "CMD%d timeout, status %x\n", 1023 host->cmd->opcode, qstatus); 1024 host->cmd->error = -ETIMEDOUT; 1025 if (data) { 1026 end_transfer = 1; 1027 davinci_abort_data(host, data); 1028 } else 1029 end_command = 1; 1030 } 1031 } 1032 1033 if (qstatus & MMCST0_CRCRS) { 1034 /* Command CRC error */ 1035 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); 1036 if (host->cmd) { 1037 host->cmd->error = -EILSEQ; 1038 end_command = 1; 1039 } 1040 } 1041 1042 if (qstatus & MMCST0_RSPDNE) { 1043 /* End of command phase */ 1044 end_command = (int) host->cmd; 1045 } 1046 1047 if (end_command) 1048 mmc_davinci_cmd_done(host, host->cmd); 1049 if (end_transfer) 1050 mmc_davinci_xfer_done(host, data); 1051 return IRQ_HANDLED; 1052 } 1053 1054 static int mmc_davinci_get_cd(struct mmc_host *mmc) 1055 { 1056 struct platform_device *pdev = to_platform_device(mmc->parent); 1057 struct davinci_mmc_config *config = pdev->dev.platform_data; 1058 1059 if (!config || !config->get_cd) 1060 return -ENOSYS; 1061 return config->get_cd(pdev->id); 1062 } 1063 1064 static int mmc_davinci_get_ro(struct mmc_host *mmc) 1065 { 1066 struct platform_device *pdev = to_platform_device(mmc->parent); 1067 struct davinci_mmc_config *config = pdev->dev.platform_data; 1068 1069 if (!config || !config->get_ro) 1070 return -ENOSYS; 1071 return config->get_ro(pdev->id); 1072 } 1073 1074 static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1075 { 1076 struct mmc_davinci_host *host = mmc_priv(mmc); 1077 1078 if (enable) { 1079 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { 1080 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 1081 mmc_signal_sdio_irq(host->mmc); 1082 } else { 1083 host->sdio_int = true; 1084 writel(readl(host->base + DAVINCI_SDIOIEN) | 1085 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); 1086 } 1087 } else { 1088 host->sdio_int = false; 1089 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, 1090 host->base + DAVINCI_SDIOIEN); 1091 } 1092 } 1093 1094 static struct mmc_host_ops mmc_davinci_ops = { 1095 .request = mmc_davinci_request, 1096 .set_ios = mmc_davinci_set_ios, 1097 .get_cd = mmc_davinci_get_cd, 1098 .get_ro = mmc_davinci_get_ro, 1099 .enable_sdio_irq = mmc_davinci_enable_sdio_irq, 1100 }; 1101 1102 /*----------------------------------------------------------------------*/ 1103 1104 #ifdef CONFIG_CPU_FREQ 1105 static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, 1106 unsigned long val, void *data) 1107 { 1108 struct mmc_davinci_host *host; 1109 unsigned int mmc_pclk; 1110 struct mmc_host *mmc; 1111 unsigned long flags; 1112 1113 host = container_of(nb, struct mmc_davinci_host, freq_transition); 1114 mmc = host->mmc; 1115 mmc_pclk = clk_get_rate(host->clk); 1116 1117 if (val == CPUFREQ_POSTCHANGE) { 1118 spin_lock_irqsave(&mmc->lock, flags); 1119 host->mmc_input_clk = mmc_pclk; 1120 calculate_clk_divider(mmc, &mmc->ios); 1121 spin_unlock_irqrestore(&mmc->lock, flags); 1122 } 1123 1124 return 0; 1125 } 1126 1127 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1128 { 1129 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; 1130 1131 return cpufreq_register_notifier(&host->freq_transition, 1132 CPUFREQ_TRANSITION_NOTIFIER); 1133 } 1134 1135 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1136 { 1137 cpufreq_unregister_notifier(&host->freq_transition, 1138 CPUFREQ_TRANSITION_NOTIFIER); 1139 } 1140 #else 1141 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1142 { 1143 return 0; 1144 } 1145 1146 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1147 { 1148 } 1149 #endif 1150 static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1151 { 1152 1153 mmc_davinci_reset_ctrl(host, 1); 1154 1155 writel(0, host->base + DAVINCI_MMCCLK); 1156 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1157 1158 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1159 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1160 1161 mmc_davinci_reset_ctrl(host, 0); 1162 } 1163 1164 static struct platform_device_id davinci_mmc_devtype[] = { 1165 { 1166 .name = "dm6441-mmc", 1167 .driver_data = MMC_CTLR_VERSION_1, 1168 }, { 1169 .name = "da830-mmc", 1170 .driver_data = MMC_CTLR_VERSION_2, 1171 }, 1172 {}, 1173 }; 1174 MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype); 1175 1176 static const struct of_device_id davinci_mmc_dt_ids[] = { 1177 { 1178 .compatible = "ti,dm6441-mmc", 1179 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1], 1180 }, 1181 { 1182 .compatible = "ti,da830-mmc", 1183 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2], 1184 }, 1185 {}, 1186 }; 1187 MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids); 1188 1189 static struct davinci_mmc_config 1190 *mmc_parse_pdata(struct platform_device *pdev) 1191 { 1192 struct device_node *np; 1193 struct davinci_mmc_config *pdata = pdev->dev.platform_data; 1194 const struct of_device_id *match = 1195 of_match_device(davinci_mmc_dt_ids, &pdev->dev); 1196 u32 data; 1197 1198 np = pdev->dev.of_node; 1199 if (!np) 1200 return pdata; 1201 1202 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1203 if (!pdata) { 1204 dev_err(&pdev->dev, "Failed to allocate memory for struct davinci_mmc_config\n"); 1205 goto nodata; 1206 } 1207 1208 if (match) 1209 pdev->id_entry = match->data; 1210 1211 if (of_property_read_u32(np, "max-frequency", &pdata->max_freq)) 1212 dev_info(&pdev->dev, "'max-frequency' property not specified, defaulting to 25MHz\n"); 1213 1214 of_property_read_u32(np, "bus-width", &data); 1215 switch (data) { 1216 case 1: 1217 case 4: 1218 case 8: 1219 pdata->wires = data; 1220 break; 1221 default: 1222 pdata->wires = 1; 1223 dev_info(&pdev->dev, "Unsupported buswidth, defaulting to 1 bit\n"); 1224 } 1225 nodata: 1226 return pdata; 1227 } 1228 1229 static int __init davinci_mmcsd_probe(struct platform_device *pdev) 1230 { 1231 struct davinci_mmc_config *pdata = NULL; 1232 struct mmc_davinci_host *host = NULL; 1233 struct mmc_host *mmc = NULL; 1234 struct resource *r, *mem = NULL; 1235 int ret = 0, irq = 0; 1236 size_t mem_size; 1237 const struct platform_device_id *id_entry; 1238 1239 pdata = mmc_parse_pdata(pdev); 1240 if (pdata == NULL) { 1241 dev_err(&pdev->dev, "Couldn't get platform data\n"); 1242 return -ENOENT; 1243 } 1244 1245 ret = -ENODEV; 1246 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1247 irq = platform_get_irq(pdev, 0); 1248 if (!r || irq == NO_IRQ) 1249 goto out; 1250 1251 ret = -EBUSY; 1252 mem_size = resource_size(r); 1253 mem = request_mem_region(r->start, mem_size, pdev->name); 1254 if (!mem) 1255 goto out; 1256 1257 ret = -ENOMEM; 1258 mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); 1259 if (!mmc) 1260 goto out; 1261 1262 host = mmc_priv(mmc); 1263 host->mmc = mmc; /* Important */ 1264 1265 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1266 if (!r) 1267 dev_warn(&pdev->dev, "RX DMA resource not specified\n"); 1268 else 1269 host->rxdma = r->start; 1270 1271 r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1272 if (!r) 1273 dev_warn(&pdev->dev, "TX DMA resource not specified\n"); 1274 else 1275 host->txdma = r->start; 1276 1277 host->mem_res = mem; 1278 host->base = ioremap(mem->start, mem_size); 1279 if (!host->base) 1280 goto out; 1281 1282 ret = -ENXIO; 1283 host->clk = clk_get(&pdev->dev, "MMCSDCLK"); 1284 if (IS_ERR(host->clk)) { 1285 ret = PTR_ERR(host->clk); 1286 goto out; 1287 } 1288 clk_enable(host->clk); 1289 host->mmc_input_clk = clk_get_rate(host->clk); 1290 1291 init_mmcsd_host(host); 1292 1293 if (pdata->nr_sg) 1294 host->nr_sg = pdata->nr_sg - 1; 1295 1296 if (host->nr_sg > MAX_NR_SG || !host->nr_sg) 1297 host->nr_sg = MAX_NR_SG; 1298 1299 host->use_dma = use_dma; 1300 host->mmc_irq = irq; 1301 host->sdio_irq = platform_get_irq(pdev, 1); 1302 1303 if (host->use_dma && davinci_acquire_dma_channels(host) != 0) 1304 host->use_dma = 0; 1305 1306 /* REVISIT: someday, support IRQ-driven card detection. */ 1307 mmc->caps |= MMC_CAP_NEEDS_POLL; 1308 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 1309 1310 if (pdata && (pdata->wires == 4 || pdata->wires == 0)) 1311 mmc->caps |= MMC_CAP_4_BIT_DATA; 1312 1313 if (pdata && (pdata->wires == 8)) 1314 mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); 1315 1316 id_entry = platform_get_device_id(pdev); 1317 if (id_entry) 1318 host->version = id_entry->driver_data; 1319 1320 mmc->ops = &mmc_davinci_ops; 1321 mmc->f_min = 312500; 1322 mmc->f_max = 25000000; 1323 if (pdata && pdata->max_freq) 1324 mmc->f_max = pdata->max_freq; 1325 if (pdata && pdata->caps) 1326 mmc->caps |= pdata->caps; 1327 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1328 1329 /* With no iommu coalescing pages, each phys_seg is a hw_seg. 1330 * Each hw_seg uses one EDMA parameter RAM slot, always one 1331 * channel and then usually some linked slots. 1332 */ 1333 mmc->max_segs = MAX_NR_SG; 1334 1335 /* EDMA limit per hw segment (one or two MBytes) */ 1336 mmc->max_seg_size = MAX_CCNT * rw_threshold; 1337 1338 /* MMC/SD controller limits for multiblock requests */ 1339 mmc->max_blk_size = 4095; /* BLEN is 12 bits */ 1340 mmc->max_blk_count = 65535; /* NBLK is 16 bits */ 1341 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1342 1343 dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs); 1344 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); 1345 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); 1346 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); 1347 1348 platform_set_drvdata(pdev, host); 1349 1350 ret = mmc_davinci_cpufreq_register(host); 1351 if (ret) { 1352 dev_err(&pdev->dev, "failed to register cpufreq\n"); 1353 goto cpu_freq_fail; 1354 } 1355 1356 ret = mmc_add_host(mmc); 1357 if (ret < 0) 1358 goto out; 1359 1360 ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host); 1361 if (ret) 1362 goto out; 1363 1364 if (host->sdio_irq >= 0) { 1365 ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0, 1366 mmc_hostname(mmc), host); 1367 if (!ret) 1368 mmc->caps |= MMC_CAP_SDIO_IRQ; 1369 } 1370 1371 rename_region(mem, mmc_hostname(mmc)); 1372 1373 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1374 host->use_dma ? "DMA" : "PIO", 1375 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); 1376 1377 return 0; 1378 1379 out: 1380 mmc_davinci_cpufreq_deregister(host); 1381 cpu_freq_fail: 1382 if (host) { 1383 davinci_release_dma_channels(host); 1384 1385 if (host->clk) { 1386 clk_disable(host->clk); 1387 clk_put(host->clk); 1388 } 1389 1390 if (host->base) 1391 iounmap(host->base); 1392 } 1393 1394 if (mmc) 1395 mmc_free_host(mmc); 1396 1397 if (mem) 1398 release_resource(mem); 1399 1400 dev_dbg(&pdev->dev, "probe err %d\n", ret); 1401 1402 return ret; 1403 } 1404 1405 static int __exit davinci_mmcsd_remove(struct platform_device *pdev) 1406 { 1407 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1408 1409 if (host) { 1410 mmc_davinci_cpufreq_deregister(host); 1411 1412 mmc_remove_host(host->mmc); 1413 free_irq(host->mmc_irq, host); 1414 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) 1415 free_irq(host->sdio_irq, host); 1416 1417 davinci_release_dma_channels(host); 1418 1419 clk_disable(host->clk); 1420 clk_put(host->clk); 1421 1422 iounmap(host->base); 1423 1424 release_resource(host->mem_res); 1425 1426 mmc_free_host(host->mmc); 1427 } 1428 1429 return 0; 1430 } 1431 1432 #ifdef CONFIG_PM 1433 static int davinci_mmcsd_suspend(struct device *dev) 1434 { 1435 struct platform_device *pdev = to_platform_device(dev); 1436 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1437 1438 writel(0, host->base + DAVINCI_MMCIM); 1439 mmc_davinci_reset_ctrl(host, 1); 1440 clk_disable(host->clk); 1441 1442 return 0; 1443 } 1444 1445 static int davinci_mmcsd_resume(struct device *dev) 1446 { 1447 struct platform_device *pdev = to_platform_device(dev); 1448 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1449 1450 clk_enable(host->clk); 1451 mmc_davinci_reset_ctrl(host, 0); 1452 1453 return 0; 1454 } 1455 1456 static const struct dev_pm_ops davinci_mmcsd_pm = { 1457 .suspend = davinci_mmcsd_suspend, 1458 .resume = davinci_mmcsd_resume, 1459 }; 1460 1461 #define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) 1462 #else 1463 #define davinci_mmcsd_pm_ops NULL 1464 #endif 1465 1466 static struct platform_driver davinci_mmcsd_driver = { 1467 .driver = { 1468 .name = "davinci_mmc", 1469 .owner = THIS_MODULE, 1470 .pm = davinci_mmcsd_pm_ops, 1471 .of_match_table = davinci_mmc_dt_ids, 1472 }, 1473 .remove = __exit_p(davinci_mmcsd_remove), 1474 .id_table = davinci_mmc_devtype, 1475 }; 1476 1477 module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe); 1478 1479 MODULE_AUTHOR("Texas Instruments India"); 1480 MODULE_LICENSE("GPL"); 1481 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); 1482 MODULE_ALIAS("platform:davinci_mmc"); 1483 1484