1 /* 2 * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver 3 * 4 * Copyright (C) 2006 Texas Instruments. 5 * Original author: Purushotam Kumar 6 * Copyright (C) 2009 David Brownell 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23 #include <linux/module.h> 24 #include <linux/ioport.h> 25 #include <linux/platform_device.h> 26 #include <linux/clk.h> 27 #include <linux/err.h> 28 #include <linux/cpufreq.h> 29 #include <linux/mmc/host.h> 30 #include <linux/io.h> 31 #include <linux/irq.h> 32 #include <linux/delay.h> 33 #include <linux/dmaengine.h> 34 #include <linux/dma-mapping.h> 35 #include <linux/edma.h> 36 #include <linux/mmc/mmc.h> 37 #include <linux/of.h> 38 #include <linux/of_device.h> 39 40 #include <linux/platform_data/edma.h> 41 #include <linux/platform_data/mmc-davinci.h> 42 43 /* 44 * Register Definitions 45 */ 46 #define DAVINCI_MMCCTL 0x00 /* Control Register */ 47 #define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ 48 #define DAVINCI_MMCST0 0x08 /* Status Register 0 */ 49 #define DAVINCI_MMCST1 0x0C /* Status Register 1 */ 50 #define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ 51 #define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ 52 #define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ 53 #define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ 54 #define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ 55 #define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ 56 #define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ 57 #define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ 58 #define DAVINCI_MMCCMD 0x30 /* Command Register */ 59 #define DAVINCI_MMCARGHL 0x34 /* Argument Register */ 60 #define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ 61 #define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ 62 #define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ 63 #define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ 64 #define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ 65 #define DAVINCI_MMCETOK 0x4C 66 #define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ 67 #define DAVINCI_MMCCKC 0x54 68 #define DAVINCI_MMCTORC 0x58 69 #define DAVINCI_MMCTODC 0x5C 70 #define DAVINCI_MMCBLNC 0x60 71 #define DAVINCI_SDIOCTL 0x64 72 #define DAVINCI_SDIOST0 0x68 73 #define DAVINCI_SDIOIEN 0x6C 74 #define DAVINCI_SDIOIST 0x70 75 #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ 76 77 /* DAVINCI_MMCCTL definitions */ 78 #define MMCCTL_DATRST (1 << 0) 79 #define MMCCTL_CMDRST (1 << 1) 80 #define MMCCTL_WIDTH_8_BIT (1 << 8) 81 #define MMCCTL_WIDTH_4_BIT (1 << 2) 82 #define MMCCTL_DATEG_DISABLED (0 << 6) 83 #define MMCCTL_DATEG_RISING (1 << 6) 84 #define MMCCTL_DATEG_FALLING (2 << 6) 85 #define MMCCTL_DATEG_BOTH (3 << 6) 86 #define MMCCTL_PERMDR_LE (0 << 9) 87 #define MMCCTL_PERMDR_BE (1 << 9) 88 #define MMCCTL_PERMDX_LE (0 << 10) 89 #define MMCCTL_PERMDX_BE (1 << 10) 90 91 /* DAVINCI_MMCCLK definitions */ 92 #define MMCCLK_CLKEN (1 << 8) 93 #define MMCCLK_CLKRT_MASK (0xFF << 0) 94 95 /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ 96 #define MMCST0_DATDNE BIT(0) /* data done */ 97 #define MMCST0_BSYDNE BIT(1) /* busy done */ 98 #define MMCST0_RSPDNE BIT(2) /* command done */ 99 #define MMCST0_TOUTRD BIT(3) /* data read timeout */ 100 #define MMCST0_TOUTRS BIT(4) /* command response timeout */ 101 #define MMCST0_CRCWR BIT(5) /* data write CRC error */ 102 #define MMCST0_CRCRD BIT(6) /* data read CRC error */ 103 #define MMCST0_CRCRS BIT(7) /* command response CRC error */ 104 #define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ 105 #define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ 106 #define MMCST0_DATED BIT(11) /* DAT3 edge detect */ 107 #define MMCST0_TRNDNE BIT(12) /* transfer done */ 108 109 /* DAVINCI_MMCST1 definitions */ 110 #define MMCST1_BUSY (1 << 0) 111 112 /* DAVINCI_MMCCMD definitions */ 113 #define MMCCMD_CMD_MASK (0x3F << 0) 114 #define MMCCMD_PPLEN (1 << 7) 115 #define MMCCMD_BSYEXP (1 << 8) 116 #define MMCCMD_RSPFMT_MASK (3 << 9) 117 #define MMCCMD_RSPFMT_NONE (0 << 9) 118 #define MMCCMD_RSPFMT_R1456 (1 << 9) 119 #define MMCCMD_RSPFMT_R2 (2 << 9) 120 #define MMCCMD_RSPFMT_R3 (3 << 9) 121 #define MMCCMD_DTRW (1 << 11) 122 #define MMCCMD_STRMTP (1 << 12) 123 #define MMCCMD_WDATX (1 << 13) 124 #define MMCCMD_INITCK (1 << 14) 125 #define MMCCMD_DCLR (1 << 15) 126 #define MMCCMD_DMATRIG (1 << 16) 127 128 /* DAVINCI_MMCFIFOCTL definitions */ 129 #define MMCFIFOCTL_FIFORST (1 << 0) 130 #define MMCFIFOCTL_FIFODIR_WR (1 << 1) 131 #define MMCFIFOCTL_FIFODIR_RD (0 << 1) 132 #define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ 133 #define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ 134 #define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ 135 #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ 136 #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ 137 138 /* DAVINCI_SDIOST0 definitions */ 139 #define SDIOST0_DAT1_HI BIT(0) 140 141 /* DAVINCI_SDIOIEN definitions */ 142 #define SDIOIEN_IOINTEN BIT(0) 143 144 /* DAVINCI_SDIOIST definitions */ 145 #define SDIOIST_IOINT BIT(0) 146 147 /* MMCSD Init clock in Hz in opendrain mode */ 148 #define MMCSD_INIT_CLOCK 200000 149 150 /* 151 * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, 152 * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only 153 * for drivers with max_segs == 1, making the segments bigger (64KB) 154 * than the page or two that's otherwise typical. nr_sg (passed from 155 * platform data) == 16 gives at least the same throughput boost, using 156 * EDMA transfer linkage instead of spending CPU time copying pages. 157 */ 158 #define MAX_CCNT ((1 << 16) - 1) 159 160 #define MAX_NR_SG 16 161 162 static unsigned rw_threshold = 32; 163 module_param(rw_threshold, uint, S_IRUGO); 164 MODULE_PARM_DESC(rw_threshold, 165 "Read/Write threshold. Default = 32"); 166 167 static unsigned poll_threshold = 128; 168 module_param(poll_threshold, uint, S_IRUGO); 169 MODULE_PARM_DESC(poll_threshold, 170 "Polling transaction size threshold. Default = 128"); 171 172 static unsigned poll_loopcount = 32; 173 module_param(poll_loopcount, uint, S_IRUGO); 174 MODULE_PARM_DESC(poll_loopcount, 175 "Maximum polling loop count. Default = 32"); 176 177 static unsigned __initdata use_dma = 1; 178 module_param(use_dma, uint, 0); 179 MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); 180 181 struct mmc_davinci_host { 182 struct mmc_command *cmd; 183 struct mmc_data *data; 184 struct mmc_host *mmc; 185 struct clk *clk; 186 unsigned int mmc_input_clk; 187 void __iomem *base; 188 struct resource *mem_res; 189 int mmc_irq, sdio_irq; 190 unsigned char bus_mode; 191 192 #define DAVINCI_MMC_DATADIR_NONE 0 193 #define DAVINCI_MMC_DATADIR_READ 1 194 #define DAVINCI_MMC_DATADIR_WRITE 2 195 unsigned char data_dir; 196 unsigned char suspended; 197 198 /* buffer is used during PIO of one scatterlist segment, and 199 * is updated along with buffer_bytes_left. bytes_left applies 200 * to all N blocks of the PIO transfer. 201 */ 202 u8 *buffer; 203 u32 buffer_bytes_left; 204 u32 bytes_left; 205 206 u32 rxdma, txdma; 207 struct dma_chan *dma_tx; 208 struct dma_chan *dma_rx; 209 bool use_dma; 210 bool do_dma; 211 bool sdio_int; 212 bool active_request; 213 214 /* For PIO we walk scatterlists one segment at a time. */ 215 unsigned int sg_len; 216 struct scatterlist *sg; 217 218 /* Version of the MMC/SD controller */ 219 u8 version; 220 /* for ns in one cycle calculation */ 221 unsigned ns_in_one_cycle; 222 /* Number of sg segments */ 223 u8 nr_sg; 224 #ifdef CONFIG_CPU_FREQ 225 struct notifier_block freq_transition; 226 #endif 227 }; 228 229 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); 230 231 /* PIO only */ 232 static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) 233 { 234 host->buffer_bytes_left = sg_dma_len(host->sg); 235 host->buffer = sg_virt(host->sg); 236 if (host->buffer_bytes_left > host->bytes_left) 237 host->buffer_bytes_left = host->bytes_left; 238 } 239 240 static void davinci_fifo_data_trans(struct mmc_davinci_host *host, 241 unsigned int n) 242 { 243 u8 *p; 244 unsigned int i; 245 246 if (host->buffer_bytes_left == 0) { 247 host->sg = sg_next(host->data->sg); 248 mmc_davinci_sg_to_buf(host); 249 } 250 251 p = host->buffer; 252 if (n > host->buffer_bytes_left) 253 n = host->buffer_bytes_left; 254 host->buffer_bytes_left -= n; 255 host->bytes_left -= n; 256 257 /* NOTE: we never transfer more than rw_threshold bytes 258 * to/from the fifo here; there's no I/O overlap. 259 * This also assumes that access width( i.e. ACCWD) is 4 bytes 260 */ 261 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 262 for (i = 0; i < (n >> 2); i++) { 263 writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); 264 p = p + 4; 265 } 266 if (n & 3) { 267 iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); 268 p = p + (n & 3); 269 } 270 } else { 271 for (i = 0; i < (n >> 2); i++) { 272 *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); 273 p = p + 4; 274 } 275 if (n & 3) { 276 ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); 277 p = p + (n & 3); 278 } 279 } 280 host->buffer = p; 281 } 282 283 static void mmc_davinci_start_command(struct mmc_davinci_host *host, 284 struct mmc_command *cmd) 285 { 286 u32 cmd_reg = 0; 287 u32 im_val; 288 289 dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", 290 cmd->opcode, cmd->arg, 291 ({ char *s; 292 switch (mmc_resp_type(cmd)) { 293 case MMC_RSP_R1: 294 s = ", R1/R5/R6/R7 response"; 295 break; 296 case MMC_RSP_R1B: 297 s = ", R1b response"; 298 break; 299 case MMC_RSP_R2: 300 s = ", R2 response"; 301 break; 302 case MMC_RSP_R3: 303 s = ", R3/R4 response"; 304 break; 305 default: 306 s = ", (R? response)"; 307 break; 308 }; s; })); 309 host->cmd = cmd; 310 311 switch (mmc_resp_type(cmd)) { 312 case MMC_RSP_R1B: 313 /* There's some spec confusion about when R1B is 314 * allowed, but if the card doesn't issue a BUSY 315 * then it's harmless for us to allow it. 316 */ 317 cmd_reg |= MMCCMD_BSYEXP; 318 /* FALLTHROUGH */ 319 case MMC_RSP_R1: /* 48 bits, CRC */ 320 cmd_reg |= MMCCMD_RSPFMT_R1456; 321 break; 322 case MMC_RSP_R2: /* 136 bits, CRC */ 323 cmd_reg |= MMCCMD_RSPFMT_R2; 324 break; 325 case MMC_RSP_R3: /* 48 bits, no CRC */ 326 cmd_reg |= MMCCMD_RSPFMT_R3; 327 break; 328 default: 329 cmd_reg |= MMCCMD_RSPFMT_NONE; 330 dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", 331 mmc_resp_type(cmd)); 332 break; 333 } 334 335 /* Set command index */ 336 cmd_reg |= cmd->opcode; 337 338 /* Enable EDMA transfer triggers */ 339 if (host->do_dma) 340 cmd_reg |= MMCCMD_DMATRIG; 341 342 if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && 343 host->data_dir == DAVINCI_MMC_DATADIR_READ) 344 cmd_reg |= MMCCMD_DMATRIG; 345 346 /* Setting whether command involves data transfer or not */ 347 if (cmd->data) 348 cmd_reg |= MMCCMD_WDATX; 349 350 /* Setting whether stream or block transfer */ 351 if (cmd->flags & MMC_DATA_STREAM) 352 cmd_reg |= MMCCMD_STRMTP; 353 354 /* Setting whether data read or write */ 355 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) 356 cmd_reg |= MMCCMD_DTRW; 357 358 if (host->bus_mode == MMC_BUSMODE_PUSHPULL) 359 cmd_reg |= MMCCMD_PPLEN; 360 361 /* set Command timeout */ 362 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 363 364 /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ 365 im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; 366 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 367 im_val |= MMCST0_DATDNE | MMCST0_CRCWR; 368 369 if (!host->do_dma) 370 im_val |= MMCST0_DXRDY; 371 } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { 372 im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; 373 374 if (!host->do_dma) 375 im_val |= MMCST0_DRRDY; 376 } 377 378 /* 379 * Before non-DMA WRITE commands the controller needs priming: 380 * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size 381 */ 382 if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) 383 davinci_fifo_data_trans(host, rw_threshold); 384 385 writel(cmd->arg, host->base + DAVINCI_MMCARGHL); 386 writel(cmd_reg, host->base + DAVINCI_MMCCMD); 387 388 host->active_request = true; 389 390 if (!host->do_dma && host->bytes_left <= poll_threshold) { 391 u32 count = poll_loopcount; 392 393 while (host->active_request && count--) { 394 mmc_davinci_irq(0, host); 395 cpu_relax(); 396 } 397 } 398 399 if (host->active_request) 400 writel(im_val, host->base + DAVINCI_MMCIM); 401 } 402 403 /*----------------------------------------------------------------------*/ 404 405 /* DMA infrastructure */ 406 407 static void davinci_abort_dma(struct mmc_davinci_host *host) 408 { 409 struct dma_chan *sync_dev; 410 411 if (host->data_dir == DAVINCI_MMC_DATADIR_READ) 412 sync_dev = host->dma_rx; 413 else 414 sync_dev = host->dma_tx; 415 416 dmaengine_terminate_all(sync_dev); 417 } 418 419 static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, 420 struct mmc_data *data) 421 { 422 struct dma_chan *chan; 423 struct dma_async_tx_descriptor *desc; 424 int ret = 0; 425 426 if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { 427 struct dma_slave_config dma_tx_conf = { 428 .direction = DMA_MEM_TO_DEV, 429 .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, 430 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 431 .dst_maxburst = 432 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 433 }; 434 chan = host->dma_tx; 435 dmaengine_slave_config(host->dma_tx, &dma_tx_conf); 436 437 desc = dmaengine_prep_slave_sg(host->dma_tx, 438 data->sg, 439 host->sg_len, 440 DMA_MEM_TO_DEV, 441 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 442 if (!desc) { 443 dev_dbg(mmc_dev(host->mmc), 444 "failed to allocate DMA TX descriptor"); 445 ret = -1; 446 goto out; 447 } 448 } else { 449 struct dma_slave_config dma_rx_conf = { 450 .direction = DMA_DEV_TO_MEM, 451 .src_addr = host->mem_res->start + DAVINCI_MMCDRR, 452 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, 453 .src_maxburst = 454 rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, 455 }; 456 chan = host->dma_rx; 457 dmaengine_slave_config(host->dma_rx, &dma_rx_conf); 458 459 desc = dmaengine_prep_slave_sg(host->dma_rx, 460 data->sg, 461 host->sg_len, 462 DMA_DEV_TO_MEM, 463 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 464 if (!desc) { 465 dev_dbg(mmc_dev(host->mmc), 466 "failed to allocate DMA RX descriptor"); 467 ret = -1; 468 goto out; 469 } 470 } 471 472 dmaengine_submit(desc); 473 dma_async_issue_pending(chan); 474 475 out: 476 return ret; 477 } 478 479 static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, 480 struct mmc_data *data) 481 { 482 int i; 483 int mask = rw_threshold - 1; 484 int ret = 0; 485 486 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 487 ((data->flags & MMC_DATA_WRITE) 488 ? DMA_TO_DEVICE 489 : DMA_FROM_DEVICE)); 490 491 /* no individual DMA segment should need a partial FIFO */ 492 for (i = 0; i < host->sg_len; i++) { 493 if (sg_dma_len(data->sg + i) & mask) { 494 dma_unmap_sg(mmc_dev(host->mmc), 495 data->sg, data->sg_len, 496 (data->flags & MMC_DATA_WRITE) 497 ? DMA_TO_DEVICE 498 : DMA_FROM_DEVICE); 499 return -1; 500 } 501 } 502 503 host->do_dma = 1; 504 ret = mmc_davinci_send_dma_request(host, data); 505 506 return ret; 507 } 508 509 static void __init_or_module 510 davinci_release_dma_channels(struct mmc_davinci_host *host) 511 { 512 if (!host->use_dma) 513 return; 514 515 dma_release_channel(host->dma_tx); 516 dma_release_channel(host->dma_rx); 517 } 518 519 static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) 520 { 521 int r; 522 dma_cap_mask_t mask; 523 524 dma_cap_zero(mask); 525 dma_cap_set(DMA_SLAVE, mask); 526 527 host->dma_tx = 528 dma_request_slave_channel_compat(mask, edma_filter_fn, 529 &host->txdma, mmc_dev(host->mmc), "tx"); 530 if (!host->dma_tx) { 531 dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); 532 return -ENODEV; 533 } 534 535 host->dma_rx = 536 dma_request_slave_channel_compat(mask, edma_filter_fn, 537 &host->rxdma, mmc_dev(host->mmc), "rx"); 538 if (!host->dma_rx) { 539 dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); 540 r = -ENODEV; 541 goto free_master_write; 542 } 543 544 return 0; 545 546 free_master_write: 547 dma_release_channel(host->dma_tx); 548 549 return r; 550 } 551 552 /*----------------------------------------------------------------------*/ 553 554 static void 555 mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) 556 { 557 int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; 558 int timeout; 559 struct mmc_data *data = req->data; 560 561 if (host->version == MMC_CTLR_VERSION_2) 562 fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; 563 564 host->data = data; 565 if (data == NULL) { 566 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 567 writel(0, host->base + DAVINCI_MMCBLEN); 568 writel(0, host->base + DAVINCI_MMCNBLK); 569 return; 570 } 571 572 dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n", 573 (data->flags & MMC_DATA_STREAM) ? "stream" : "block", 574 (data->flags & MMC_DATA_WRITE) ? "write" : "read", 575 data->blocks, data->blksz); 576 dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", 577 data->timeout_clks, data->timeout_ns); 578 timeout = data->timeout_clks + 579 (data->timeout_ns / host->ns_in_one_cycle); 580 if (timeout > 0xffff) 581 timeout = 0xffff; 582 583 writel(timeout, host->base + DAVINCI_MMCTOD); 584 writel(data->blocks, host->base + DAVINCI_MMCNBLK); 585 writel(data->blksz, host->base + DAVINCI_MMCBLEN); 586 587 /* Configure the FIFO */ 588 switch (data->flags & MMC_DATA_WRITE) { 589 case MMC_DATA_WRITE: 590 host->data_dir = DAVINCI_MMC_DATADIR_WRITE; 591 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, 592 host->base + DAVINCI_MMCFIFOCTL); 593 writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, 594 host->base + DAVINCI_MMCFIFOCTL); 595 break; 596 597 default: 598 host->data_dir = DAVINCI_MMC_DATADIR_READ; 599 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, 600 host->base + DAVINCI_MMCFIFOCTL); 601 writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, 602 host->base + DAVINCI_MMCFIFOCTL); 603 break; 604 } 605 606 host->buffer = NULL; 607 host->bytes_left = data->blocks * data->blksz; 608 609 /* For now we try to use DMA whenever we won't need partial FIFO 610 * reads or writes, either for the whole transfer (as tested here) 611 * or for any individual scatterlist segment (tested when we call 612 * start_dma_transfer). 613 * 614 * While we *could* change that, unusual block sizes are rarely 615 * used. The occasional fallback to PIO should't hurt. 616 */ 617 if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 618 && mmc_davinci_start_dma_transfer(host, data) == 0) { 619 /* zero this to ensure we take no PIO paths */ 620 host->bytes_left = 0; 621 } else { 622 /* Revert to CPU Copy */ 623 host->sg_len = data->sg_len; 624 host->sg = host->data->sg; 625 mmc_davinci_sg_to_buf(host); 626 } 627 } 628 629 static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) 630 { 631 struct mmc_davinci_host *host = mmc_priv(mmc); 632 unsigned long timeout = jiffies + msecs_to_jiffies(900); 633 u32 mmcst1 = 0; 634 635 /* Card may still be sending BUSY after a previous operation, 636 * typically some kind of write. If so, we can't proceed yet. 637 */ 638 while (time_before(jiffies, timeout)) { 639 mmcst1 = readl(host->base + DAVINCI_MMCST1); 640 if (!(mmcst1 & MMCST1_BUSY)) 641 break; 642 cpu_relax(); 643 } 644 if (mmcst1 & MMCST1_BUSY) { 645 dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); 646 req->cmd->error = -ETIMEDOUT; 647 mmc_request_done(mmc, req); 648 return; 649 } 650 651 host->do_dma = 0; 652 mmc_davinci_prepare_data(host, req); 653 mmc_davinci_start_command(host, req->cmd); 654 } 655 656 static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, 657 unsigned int mmc_req_freq) 658 { 659 unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; 660 661 mmc_pclk = host->mmc_input_clk; 662 if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) 663 mmc_push_pull_divisor = ((unsigned int)mmc_pclk 664 / (2 * mmc_req_freq)) - 1; 665 else 666 mmc_push_pull_divisor = 0; 667 668 mmc_freq = (unsigned int)mmc_pclk 669 / (2 * (mmc_push_pull_divisor + 1)); 670 671 if (mmc_freq > mmc_req_freq) 672 mmc_push_pull_divisor = mmc_push_pull_divisor + 1; 673 /* Convert ns to clock cycles */ 674 if (mmc_req_freq <= 400000) 675 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 676 / (2 * (mmc_push_pull_divisor + 1)))/1000)); 677 else 678 host->ns_in_one_cycle = (1000000) / (((mmc_pclk 679 / (2 * (mmc_push_pull_divisor + 1)))/1000000)); 680 681 return mmc_push_pull_divisor; 682 } 683 684 static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) 685 { 686 unsigned int open_drain_freq = 0, mmc_pclk = 0; 687 unsigned int mmc_push_pull_freq = 0; 688 struct mmc_davinci_host *host = mmc_priv(mmc); 689 690 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { 691 u32 temp; 692 693 /* Ignoring the init clock value passed for fixing the inter 694 * operability with different cards. 695 */ 696 open_drain_freq = ((unsigned int)mmc_pclk 697 / (2 * MMCSD_INIT_CLOCK)) - 1; 698 699 if (open_drain_freq > 0xFF) 700 open_drain_freq = 0xFF; 701 702 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 703 temp |= open_drain_freq; 704 writel(temp, host->base + DAVINCI_MMCCLK); 705 706 /* Convert ns to clock cycles */ 707 host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); 708 } else { 709 u32 temp; 710 mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); 711 712 if (mmc_push_pull_freq > 0xFF) 713 mmc_push_pull_freq = 0xFF; 714 715 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; 716 writel(temp, host->base + DAVINCI_MMCCLK); 717 718 udelay(10); 719 720 temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; 721 temp |= mmc_push_pull_freq; 722 writel(temp, host->base + DAVINCI_MMCCLK); 723 724 writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 725 726 udelay(10); 727 } 728 } 729 730 static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 731 { 732 struct mmc_davinci_host *host = mmc_priv(mmc); 733 struct platform_device *pdev = to_platform_device(mmc->parent); 734 struct davinci_mmc_config *config = pdev->dev.platform_data; 735 736 dev_dbg(mmc_dev(host->mmc), 737 "clock %dHz busmode %d powermode %d Vdd %04x\n", 738 ios->clock, ios->bus_mode, ios->power_mode, 739 ios->vdd); 740 741 switch (ios->power_mode) { 742 case MMC_POWER_OFF: 743 if (config && config->set_power) 744 config->set_power(pdev->id, false); 745 break; 746 case MMC_POWER_UP: 747 if (config && config->set_power) 748 config->set_power(pdev->id, true); 749 break; 750 } 751 752 switch (ios->bus_width) { 753 case MMC_BUS_WIDTH_8: 754 dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); 755 writel((readl(host->base + DAVINCI_MMCCTL) & 756 ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT, 757 host->base + DAVINCI_MMCCTL); 758 break; 759 case MMC_BUS_WIDTH_4: 760 dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); 761 if (host->version == MMC_CTLR_VERSION_2) 762 writel((readl(host->base + DAVINCI_MMCCTL) & 763 ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT, 764 host->base + DAVINCI_MMCCTL); 765 else 766 writel(readl(host->base + DAVINCI_MMCCTL) | 767 MMCCTL_WIDTH_4_BIT, 768 host->base + DAVINCI_MMCCTL); 769 break; 770 case MMC_BUS_WIDTH_1: 771 dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n"); 772 if (host->version == MMC_CTLR_VERSION_2) 773 writel(readl(host->base + DAVINCI_MMCCTL) & 774 ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT), 775 host->base + DAVINCI_MMCCTL); 776 else 777 writel(readl(host->base + DAVINCI_MMCCTL) & 778 ~MMCCTL_WIDTH_4_BIT, 779 host->base + DAVINCI_MMCCTL); 780 break; 781 } 782 783 calculate_clk_divider(mmc, ios); 784 785 host->bus_mode = ios->bus_mode; 786 if (ios->power_mode == MMC_POWER_UP) { 787 unsigned long timeout = jiffies + msecs_to_jiffies(50); 788 bool lose = true; 789 790 /* Send clock cycles, poll completion */ 791 writel(0, host->base + DAVINCI_MMCARGHL); 792 writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); 793 while (time_before(jiffies, timeout)) { 794 u32 tmp = readl(host->base + DAVINCI_MMCST0); 795 796 if (tmp & MMCST0_RSPDNE) { 797 lose = false; 798 break; 799 } 800 cpu_relax(); 801 } 802 if (lose) 803 dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); 804 } 805 806 /* FIXME on power OFF, reset things ... */ 807 } 808 809 static void 810 mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) 811 { 812 host->data = NULL; 813 814 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { 815 /* 816 * SDIO Interrupt Detection work-around as suggested by 817 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata 818 * 2.1.6): Signal SDIO interrupt only if it is enabled by core 819 */ 820 if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & 821 SDIOST0_DAT1_HI)) { 822 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 823 mmc_signal_sdio_irq(host->mmc); 824 } 825 } 826 827 if (host->do_dma) { 828 davinci_abort_dma(host); 829 830 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 831 (data->flags & MMC_DATA_WRITE) 832 ? DMA_TO_DEVICE 833 : DMA_FROM_DEVICE); 834 host->do_dma = false; 835 } 836 host->data_dir = DAVINCI_MMC_DATADIR_NONE; 837 838 if (!data->stop || (host->cmd && host->cmd->error)) { 839 mmc_request_done(host->mmc, data->mrq); 840 writel(0, host->base + DAVINCI_MMCIM); 841 host->active_request = false; 842 } else 843 mmc_davinci_start_command(host, data->stop); 844 } 845 846 static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, 847 struct mmc_command *cmd) 848 { 849 host->cmd = NULL; 850 851 if (cmd->flags & MMC_RSP_PRESENT) { 852 if (cmd->flags & MMC_RSP_136) { 853 /* response type 2 */ 854 cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); 855 cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); 856 cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); 857 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 858 } else { 859 /* response types 1, 1b, 3, 4, 5, 6 */ 860 cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); 861 } 862 } 863 864 if (host->data == NULL || cmd->error) { 865 if (cmd->error == -ETIMEDOUT) 866 cmd->mrq->cmd->retries = 0; 867 mmc_request_done(host->mmc, cmd->mrq); 868 writel(0, host->base + DAVINCI_MMCIM); 869 host->active_request = false; 870 } 871 } 872 873 static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, 874 int val) 875 { 876 u32 temp; 877 878 temp = readl(host->base + DAVINCI_MMCCTL); 879 if (val) /* reset */ 880 temp |= MMCCTL_CMDRST | MMCCTL_DATRST; 881 else /* enable */ 882 temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); 883 884 writel(temp, host->base + DAVINCI_MMCCTL); 885 udelay(10); 886 } 887 888 static void 889 davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) 890 { 891 mmc_davinci_reset_ctrl(host, 1); 892 mmc_davinci_reset_ctrl(host, 0); 893 } 894 895 static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) 896 { 897 struct mmc_davinci_host *host = dev_id; 898 unsigned int status; 899 900 status = readl(host->base + DAVINCI_SDIOIST); 901 if (status & SDIOIST_IOINT) { 902 dev_dbg(mmc_dev(host->mmc), 903 "SDIO interrupt status %x\n", status); 904 writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 905 mmc_signal_sdio_irq(host->mmc); 906 } 907 return IRQ_HANDLED; 908 } 909 910 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) 911 { 912 struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; 913 unsigned int status, qstatus; 914 int end_command = 0; 915 int end_transfer = 0; 916 struct mmc_data *data = host->data; 917 918 if (host->cmd == NULL && host->data == NULL) { 919 status = readl(host->base + DAVINCI_MMCST0); 920 dev_dbg(mmc_dev(host->mmc), 921 "Spurious interrupt 0x%04x\n", status); 922 /* Disable the interrupt from mmcsd */ 923 writel(0, host->base + DAVINCI_MMCIM); 924 return IRQ_NONE; 925 } 926 927 status = readl(host->base + DAVINCI_MMCST0); 928 qstatus = status; 929 930 /* handle FIFO first when using PIO for data. 931 * bytes_left will decrease to zero as I/O progress and status will 932 * read zero over iteration because this controller status 933 * register(MMCST0) reports any status only once and it is cleared 934 * by read. So, it is not unbouned loop even in the case of 935 * non-dma. 936 */ 937 if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { 938 unsigned long im_val; 939 940 /* 941 * If interrupts fire during the following loop, they will be 942 * handled by the handler, but the PIC will still buffer these. 943 * As a result, the handler will be called again to serve these 944 * needlessly. In order to avoid these spurious interrupts, 945 * keep interrupts masked during the loop. 946 */ 947 im_val = readl(host->base + DAVINCI_MMCIM); 948 writel(0, host->base + DAVINCI_MMCIM); 949 950 do { 951 davinci_fifo_data_trans(host, rw_threshold); 952 status = readl(host->base + DAVINCI_MMCST0); 953 qstatus |= status; 954 } while (host->bytes_left && 955 (status & (MMCST0_DXRDY | MMCST0_DRRDY))); 956 957 /* 958 * If an interrupt is pending, it is assumed it will fire when 959 * it is unmasked. This assumption is also taken when the MMCIM 960 * is first set. Otherwise, writing to MMCIM after reading the 961 * status is race-prone. 962 */ 963 writel(im_val, host->base + DAVINCI_MMCIM); 964 } 965 966 if (qstatus & MMCST0_DATDNE) { 967 /* All blocks sent/received, and CRC checks passed */ 968 if (data != NULL) { 969 if ((host->do_dma == 0) && (host->bytes_left > 0)) { 970 /* if datasize < rw_threshold 971 * no RX ints are generated 972 */ 973 davinci_fifo_data_trans(host, host->bytes_left); 974 } 975 end_transfer = 1; 976 data->bytes_xfered = data->blocks * data->blksz; 977 } else { 978 dev_err(mmc_dev(host->mmc), 979 "DATDNE with no host->data\n"); 980 } 981 } 982 983 if (qstatus & MMCST0_TOUTRD) { 984 /* Read data timeout */ 985 data->error = -ETIMEDOUT; 986 end_transfer = 1; 987 988 dev_dbg(mmc_dev(host->mmc), 989 "read data timeout, status %x\n", 990 qstatus); 991 992 davinci_abort_data(host, data); 993 } 994 995 if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { 996 /* Data CRC error */ 997 data->error = -EILSEQ; 998 end_transfer = 1; 999 1000 /* NOTE: this controller uses CRCWR to report both CRC 1001 * errors and timeouts (on writes). MMCDRSP values are 1002 * only weakly documented, but 0x9f was clearly a timeout 1003 * case and the two three-bit patterns in various SD specs 1004 * (101, 010) aren't part of it ... 1005 */ 1006 if (qstatus & MMCST0_CRCWR) { 1007 u32 temp = readb(host->base + DAVINCI_MMCDRSP); 1008 1009 if (temp == 0x9f) 1010 data->error = -ETIMEDOUT; 1011 } 1012 dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", 1013 (qstatus & MMCST0_CRCWR) ? "write" : "read", 1014 (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); 1015 1016 davinci_abort_data(host, data); 1017 } 1018 1019 if (qstatus & MMCST0_TOUTRS) { 1020 /* Command timeout */ 1021 if (host->cmd) { 1022 dev_dbg(mmc_dev(host->mmc), 1023 "CMD%d timeout, status %x\n", 1024 host->cmd->opcode, qstatus); 1025 host->cmd->error = -ETIMEDOUT; 1026 if (data) { 1027 end_transfer = 1; 1028 davinci_abort_data(host, data); 1029 } else 1030 end_command = 1; 1031 } 1032 } 1033 1034 if (qstatus & MMCST0_CRCRS) { 1035 /* Command CRC error */ 1036 dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); 1037 if (host->cmd) { 1038 host->cmd->error = -EILSEQ; 1039 end_command = 1; 1040 } 1041 } 1042 1043 if (qstatus & MMCST0_RSPDNE) { 1044 /* End of command phase */ 1045 end_command = (int) host->cmd; 1046 } 1047 1048 if (end_command) 1049 mmc_davinci_cmd_done(host, host->cmd); 1050 if (end_transfer) 1051 mmc_davinci_xfer_done(host, data); 1052 return IRQ_HANDLED; 1053 } 1054 1055 static int mmc_davinci_get_cd(struct mmc_host *mmc) 1056 { 1057 struct platform_device *pdev = to_platform_device(mmc->parent); 1058 struct davinci_mmc_config *config = pdev->dev.platform_data; 1059 1060 if (!config || !config->get_cd) 1061 return -ENOSYS; 1062 return config->get_cd(pdev->id); 1063 } 1064 1065 static int mmc_davinci_get_ro(struct mmc_host *mmc) 1066 { 1067 struct platform_device *pdev = to_platform_device(mmc->parent); 1068 struct davinci_mmc_config *config = pdev->dev.platform_data; 1069 1070 if (!config || !config->get_ro) 1071 return -ENOSYS; 1072 return config->get_ro(pdev->id); 1073 } 1074 1075 static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) 1076 { 1077 struct mmc_davinci_host *host = mmc_priv(mmc); 1078 1079 if (enable) { 1080 if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { 1081 writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); 1082 mmc_signal_sdio_irq(host->mmc); 1083 } else { 1084 host->sdio_int = true; 1085 writel(readl(host->base + DAVINCI_SDIOIEN) | 1086 SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); 1087 } 1088 } else { 1089 host->sdio_int = false; 1090 writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, 1091 host->base + DAVINCI_SDIOIEN); 1092 } 1093 } 1094 1095 static struct mmc_host_ops mmc_davinci_ops = { 1096 .request = mmc_davinci_request, 1097 .set_ios = mmc_davinci_set_ios, 1098 .get_cd = mmc_davinci_get_cd, 1099 .get_ro = mmc_davinci_get_ro, 1100 .enable_sdio_irq = mmc_davinci_enable_sdio_irq, 1101 }; 1102 1103 /*----------------------------------------------------------------------*/ 1104 1105 #ifdef CONFIG_CPU_FREQ 1106 static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, 1107 unsigned long val, void *data) 1108 { 1109 struct mmc_davinci_host *host; 1110 unsigned int mmc_pclk; 1111 struct mmc_host *mmc; 1112 unsigned long flags; 1113 1114 host = container_of(nb, struct mmc_davinci_host, freq_transition); 1115 mmc = host->mmc; 1116 mmc_pclk = clk_get_rate(host->clk); 1117 1118 if (val == CPUFREQ_POSTCHANGE) { 1119 spin_lock_irqsave(&mmc->lock, flags); 1120 host->mmc_input_clk = mmc_pclk; 1121 calculate_clk_divider(mmc, &mmc->ios); 1122 spin_unlock_irqrestore(&mmc->lock, flags); 1123 } 1124 1125 return 0; 1126 } 1127 1128 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1129 { 1130 host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; 1131 1132 return cpufreq_register_notifier(&host->freq_transition, 1133 CPUFREQ_TRANSITION_NOTIFIER); 1134 } 1135 1136 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1137 { 1138 cpufreq_unregister_notifier(&host->freq_transition, 1139 CPUFREQ_TRANSITION_NOTIFIER); 1140 } 1141 #else 1142 static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) 1143 { 1144 return 0; 1145 } 1146 1147 static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) 1148 { 1149 } 1150 #endif 1151 static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1152 { 1153 1154 mmc_davinci_reset_ctrl(host, 1); 1155 1156 writel(0, host->base + DAVINCI_MMCCLK); 1157 writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); 1158 1159 writel(0x1FFF, host->base + DAVINCI_MMCTOR); 1160 writel(0xFFFF, host->base + DAVINCI_MMCTOD); 1161 1162 mmc_davinci_reset_ctrl(host, 0); 1163 } 1164 1165 static struct platform_device_id davinci_mmc_devtype[] = { 1166 { 1167 .name = "dm6441-mmc", 1168 .driver_data = MMC_CTLR_VERSION_1, 1169 }, { 1170 .name = "da830-mmc", 1171 .driver_data = MMC_CTLR_VERSION_2, 1172 }, 1173 {}, 1174 }; 1175 MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype); 1176 1177 static const struct of_device_id davinci_mmc_dt_ids[] = { 1178 { 1179 .compatible = "ti,dm6441-mmc", 1180 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1], 1181 }, 1182 { 1183 .compatible = "ti,da830-mmc", 1184 .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2], 1185 }, 1186 {}, 1187 }; 1188 MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids); 1189 1190 static struct davinci_mmc_config 1191 *mmc_parse_pdata(struct platform_device *pdev) 1192 { 1193 struct device_node *np; 1194 struct davinci_mmc_config *pdata = pdev->dev.platform_data; 1195 const struct of_device_id *match = 1196 of_match_device(of_match_ptr(davinci_mmc_dt_ids), &pdev->dev); 1197 u32 data; 1198 1199 np = pdev->dev.of_node; 1200 if (!np) 1201 return pdata; 1202 1203 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1204 if (!pdata) { 1205 dev_err(&pdev->dev, "Failed to allocate memory for struct davinci_mmc_config\n"); 1206 goto nodata; 1207 } 1208 1209 if (match) 1210 pdev->id_entry = match->data; 1211 1212 if (of_property_read_u32(np, "max-frequency", &pdata->max_freq)) 1213 dev_info(&pdev->dev, "'max-frequency' property not specified, defaulting to 25MHz\n"); 1214 1215 of_property_read_u32(np, "bus-width", &data); 1216 switch (data) { 1217 case 1: 1218 case 4: 1219 case 8: 1220 pdata->wires = data; 1221 break; 1222 default: 1223 pdata->wires = 1; 1224 dev_info(&pdev->dev, "Unsupported buswidth, defaulting to 1 bit\n"); 1225 } 1226 nodata: 1227 return pdata; 1228 } 1229 1230 static int __init davinci_mmcsd_probe(struct platform_device *pdev) 1231 { 1232 struct davinci_mmc_config *pdata = NULL; 1233 struct mmc_davinci_host *host = NULL; 1234 struct mmc_host *mmc = NULL; 1235 struct resource *r, *mem = NULL; 1236 int ret = 0, irq = 0; 1237 size_t mem_size; 1238 const struct platform_device_id *id_entry; 1239 1240 pdata = mmc_parse_pdata(pdev); 1241 if (pdata == NULL) { 1242 dev_err(&pdev->dev, "Couldn't get platform data\n"); 1243 return -ENOENT; 1244 } 1245 1246 ret = -ENODEV; 1247 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1248 irq = platform_get_irq(pdev, 0); 1249 if (!r || irq == NO_IRQ) 1250 goto out; 1251 1252 ret = -EBUSY; 1253 mem_size = resource_size(r); 1254 mem = request_mem_region(r->start, mem_size, pdev->name); 1255 if (!mem) 1256 goto out; 1257 1258 ret = -ENOMEM; 1259 mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); 1260 if (!mmc) 1261 goto out; 1262 1263 host = mmc_priv(mmc); 1264 host->mmc = mmc; /* Important */ 1265 1266 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1267 if (!r) 1268 dev_warn(&pdev->dev, "RX DMA resource not specified\n"); 1269 else 1270 host->rxdma = r->start; 1271 1272 r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1273 if (!r) 1274 dev_warn(&pdev->dev, "TX DMA resource not specified\n"); 1275 else 1276 host->txdma = r->start; 1277 1278 host->mem_res = mem; 1279 host->base = ioremap(mem->start, mem_size); 1280 if (!host->base) 1281 goto out; 1282 1283 ret = -ENXIO; 1284 host->clk = clk_get(&pdev->dev, "MMCSDCLK"); 1285 if (IS_ERR(host->clk)) { 1286 ret = PTR_ERR(host->clk); 1287 goto out; 1288 } 1289 clk_enable(host->clk); 1290 host->mmc_input_clk = clk_get_rate(host->clk); 1291 1292 init_mmcsd_host(host); 1293 1294 if (pdata->nr_sg) 1295 host->nr_sg = pdata->nr_sg - 1; 1296 1297 if (host->nr_sg > MAX_NR_SG || !host->nr_sg) 1298 host->nr_sg = MAX_NR_SG; 1299 1300 host->use_dma = use_dma; 1301 host->mmc_irq = irq; 1302 host->sdio_irq = platform_get_irq(pdev, 1); 1303 1304 if (host->use_dma && davinci_acquire_dma_channels(host) != 0) 1305 host->use_dma = 0; 1306 1307 /* REVISIT: someday, support IRQ-driven card detection. */ 1308 mmc->caps |= MMC_CAP_NEEDS_POLL; 1309 mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; 1310 1311 if (pdata && (pdata->wires == 4 || pdata->wires == 0)) 1312 mmc->caps |= MMC_CAP_4_BIT_DATA; 1313 1314 if (pdata && (pdata->wires == 8)) 1315 mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); 1316 1317 id_entry = platform_get_device_id(pdev); 1318 if (id_entry) 1319 host->version = id_entry->driver_data; 1320 1321 mmc->ops = &mmc_davinci_ops; 1322 mmc->f_min = 312500; 1323 mmc->f_max = 25000000; 1324 if (pdata && pdata->max_freq) 1325 mmc->f_max = pdata->max_freq; 1326 if (pdata && pdata->caps) 1327 mmc->caps |= pdata->caps; 1328 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1329 1330 /* With no iommu coalescing pages, each phys_seg is a hw_seg. 1331 * Each hw_seg uses one EDMA parameter RAM slot, always one 1332 * channel and then usually some linked slots. 1333 */ 1334 mmc->max_segs = MAX_NR_SG; 1335 1336 /* EDMA limit per hw segment (one or two MBytes) */ 1337 mmc->max_seg_size = MAX_CCNT * rw_threshold; 1338 1339 /* MMC/SD controller limits for multiblock requests */ 1340 mmc->max_blk_size = 4095; /* BLEN is 12 bits */ 1341 mmc->max_blk_count = 65535; /* NBLK is 16 bits */ 1342 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1343 1344 dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs); 1345 dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); 1346 dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); 1347 dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); 1348 1349 platform_set_drvdata(pdev, host); 1350 1351 ret = mmc_davinci_cpufreq_register(host); 1352 if (ret) { 1353 dev_err(&pdev->dev, "failed to register cpufreq\n"); 1354 goto cpu_freq_fail; 1355 } 1356 1357 ret = mmc_add_host(mmc); 1358 if (ret < 0) 1359 goto out; 1360 1361 ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host); 1362 if (ret) 1363 goto out; 1364 1365 if (host->sdio_irq >= 0) { 1366 ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0, 1367 mmc_hostname(mmc), host); 1368 if (!ret) 1369 mmc->caps |= MMC_CAP_SDIO_IRQ; 1370 } 1371 1372 rename_region(mem, mmc_hostname(mmc)); 1373 1374 dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", 1375 host->use_dma ? "DMA" : "PIO", 1376 (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); 1377 1378 return 0; 1379 1380 out: 1381 mmc_davinci_cpufreq_deregister(host); 1382 cpu_freq_fail: 1383 if (host) { 1384 davinci_release_dma_channels(host); 1385 1386 if (host->clk) { 1387 clk_disable(host->clk); 1388 clk_put(host->clk); 1389 } 1390 1391 if (host->base) 1392 iounmap(host->base); 1393 } 1394 1395 if (mmc) 1396 mmc_free_host(mmc); 1397 1398 if (mem) 1399 release_resource(mem); 1400 1401 dev_dbg(&pdev->dev, "probe err %d\n", ret); 1402 1403 return ret; 1404 } 1405 1406 static int __exit davinci_mmcsd_remove(struct platform_device *pdev) 1407 { 1408 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1409 1410 platform_set_drvdata(pdev, NULL); 1411 if (host) { 1412 mmc_davinci_cpufreq_deregister(host); 1413 1414 mmc_remove_host(host->mmc); 1415 free_irq(host->mmc_irq, host); 1416 if (host->mmc->caps & MMC_CAP_SDIO_IRQ) 1417 free_irq(host->sdio_irq, host); 1418 1419 davinci_release_dma_channels(host); 1420 1421 clk_disable(host->clk); 1422 clk_put(host->clk); 1423 1424 iounmap(host->base); 1425 1426 release_resource(host->mem_res); 1427 1428 mmc_free_host(host->mmc); 1429 } 1430 1431 return 0; 1432 } 1433 1434 #ifdef CONFIG_PM 1435 static int davinci_mmcsd_suspend(struct device *dev) 1436 { 1437 struct platform_device *pdev = to_platform_device(dev); 1438 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1439 int ret; 1440 1441 ret = mmc_suspend_host(host->mmc); 1442 if (!ret) { 1443 writel(0, host->base + DAVINCI_MMCIM); 1444 mmc_davinci_reset_ctrl(host, 1); 1445 clk_disable(host->clk); 1446 host->suspended = 1; 1447 } else { 1448 host->suspended = 0; 1449 } 1450 1451 return ret; 1452 } 1453 1454 static int davinci_mmcsd_resume(struct device *dev) 1455 { 1456 struct platform_device *pdev = to_platform_device(dev); 1457 struct mmc_davinci_host *host = platform_get_drvdata(pdev); 1458 int ret; 1459 1460 if (!host->suspended) 1461 return 0; 1462 1463 clk_enable(host->clk); 1464 1465 mmc_davinci_reset_ctrl(host, 0); 1466 ret = mmc_resume_host(host->mmc); 1467 if (!ret) 1468 host->suspended = 0; 1469 1470 return ret; 1471 } 1472 1473 static const struct dev_pm_ops davinci_mmcsd_pm = { 1474 .suspend = davinci_mmcsd_suspend, 1475 .resume = davinci_mmcsd_resume, 1476 }; 1477 1478 #define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) 1479 #else 1480 #define davinci_mmcsd_pm_ops NULL 1481 #endif 1482 1483 static struct platform_driver davinci_mmcsd_driver = { 1484 .driver = { 1485 .name = "davinci_mmc", 1486 .owner = THIS_MODULE, 1487 .pm = davinci_mmcsd_pm_ops, 1488 .of_match_table = of_match_ptr(davinci_mmc_dt_ids), 1489 }, 1490 .remove = __exit_p(davinci_mmcsd_remove), 1491 .id_table = davinci_mmc_devtype, 1492 }; 1493 1494 module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe); 1495 1496 MODULE_AUTHOR("Texas Instruments India"); 1497 MODULE_LICENSE("GPL"); 1498 MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); 1499 MODULE_ALIAS("platform:davinci_mmc"); 1500 1501