1 /* 2 * WM8505/WM8650 SD/MMC Host Controller 3 * 4 * Copyright (C) 2010 Tony Prisk 5 * Copyright (C) 2008 WonderMedia Technologies, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation 10 */ 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/platform_device.h> 15 #include <linux/ioport.h> 16 #include <linux/errno.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/delay.h> 19 #include <linux/io.h> 20 #include <linux/irq.h> 21 #include <linux/clk.h> 22 #include <linux/gpio.h> 23 #include <linux/interrupt.h> 24 25 #include <linux/of.h> 26 #include <linux/of_address.h> 27 #include <linux/of_irq.h> 28 #include <linux/of_device.h> 29 30 #include <linux/mmc/host.h> 31 #include <linux/mmc/mmc.h> 32 #include <linux/mmc/sd.h> 33 34 #include <asm/byteorder.h> 35 36 37 #define DRIVER_NAME "wmt-sdhc" 38 39 40 /* MMC/SD controller registers */ 41 #define SDMMC_CTLR 0x00 42 #define SDMMC_CMD 0x01 43 #define SDMMC_RSPTYPE 0x02 44 #define SDMMC_ARG 0x04 45 #define SDMMC_BUSMODE 0x08 46 #define SDMMC_BLKLEN 0x0C 47 #define SDMMC_BLKCNT 0x0E 48 #define SDMMC_RSP 0x10 49 #define SDMMC_CBCR 0x20 50 #define SDMMC_INTMASK0 0x24 51 #define SDMMC_INTMASK1 0x25 52 #define SDMMC_STS0 0x28 53 #define SDMMC_STS1 0x29 54 #define SDMMC_STS2 0x2A 55 #define SDMMC_STS3 0x2B 56 #define SDMMC_RSPTIMEOUT 0x2C 57 #define SDMMC_CLK 0x30 /* VT8500 only */ 58 #define SDMMC_EXTCTRL 0x34 59 #define SDMMC_SBLKLEN 0x38 60 #define SDMMC_DMATIMEOUT 0x3C 61 62 63 /* SDMMC_CTLR bit fields */ 64 #define CTLR_CMD_START 0x01 65 #define CTLR_CMD_WRITE 0x04 66 #define CTLR_FIFO_RESET 0x08 67 68 /* SDMMC_BUSMODE bit fields */ 69 #define BM_SPI_MODE 0x01 70 #define BM_FOURBIT_MODE 0x02 71 #define BM_EIGHTBIT_MODE 0x04 72 #define BM_SD_OFF 0x10 73 #define BM_SPI_CS 0x20 74 #define BM_SD_POWER 0x40 75 #define BM_SOFT_RESET 0x80 76 77 /* SDMMC_BLKLEN bit fields */ 78 #define BLKL_CRCERR_ABORT 0x0800 79 #define BLKL_CD_POL_HIGH 0x1000 80 #define BLKL_GPI_CD 0x2000 81 #define BLKL_DATA3_CD 0x4000 82 #define BLKL_INT_ENABLE 0x8000 83 84 /* SDMMC_INTMASK0 bit fields */ 85 #define INT0_MBLK_TRAN_DONE_INT_EN 0x10 86 #define INT0_BLK_TRAN_DONE_INT_EN 0x20 87 #define INT0_CD_INT_EN 0x40 88 #define INT0_DI_INT_EN 0x80 89 90 /* SDMMC_INTMASK1 bit fields */ 91 #define INT1_CMD_RES_TRAN_DONE_INT_EN 0x02 92 #define INT1_CMD_RES_TOUT_INT_EN 0x04 93 #define INT1_MBLK_AUTO_STOP_INT_EN 0x08 94 #define INT1_DATA_TOUT_INT_EN 0x10 95 #define INT1_RESCRC_ERR_INT_EN 0x20 96 #define INT1_RCRC_ERR_INT_EN 0x40 97 #define INT1_WCRC_ERR_INT_EN 0x80 98 99 /* SDMMC_STS0 bit fields */ 100 #define STS0_WRITE_PROTECT 0x02 101 #define STS0_CD_DATA3 0x04 102 #define STS0_CD_GPI 0x08 103 #define STS0_MBLK_DONE 0x10 104 #define STS0_BLK_DONE 0x20 105 #define STS0_CARD_DETECT 0x40 106 #define STS0_DEVICE_INS 0x80 107 108 /* SDMMC_STS1 bit fields */ 109 #define STS1_SDIO_INT 0x01 110 #define STS1_CMDRSP_DONE 0x02 111 #define STS1_RSP_TIMEOUT 0x04 112 #define STS1_AUTOSTOP_DONE 0x08 113 #define STS1_DATA_TIMEOUT 0x10 114 #define STS1_RSP_CRC_ERR 0x20 115 #define STS1_RCRC_ERR 0x40 116 #define STS1_WCRC_ERR 0x80 117 118 /* SDMMC_STS2 bit fields */ 119 #define STS2_CMD_RES_BUSY 0x10 120 #define STS2_DATARSP_BUSY 0x20 121 #define STS2_DIS_FORCECLK 0x80 122 123 /* SDMMC_EXTCTRL bit fields */ 124 #define EXT_EIGHTBIT 0x04 125 126 /* MMC/SD DMA Controller Registers */ 127 #define SDDMA_GCR 0x100 128 #define SDDMA_IER 0x104 129 #define SDDMA_ISR 0x108 130 #define SDDMA_DESPR 0x10C 131 #define SDDMA_RBR 0x110 132 #define SDDMA_DAR 0x114 133 #define SDDMA_BAR 0x118 134 #define SDDMA_CPR 0x11C 135 #define SDDMA_CCR 0x120 136 137 138 /* SDDMA_GCR bit fields */ 139 #define DMA_GCR_DMA_EN 0x00000001 140 #define DMA_GCR_SOFT_RESET 0x00000100 141 142 /* SDDMA_IER bit fields */ 143 #define DMA_IER_INT_EN 0x00000001 144 145 /* SDDMA_ISR bit fields */ 146 #define DMA_ISR_INT_STS 0x00000001 147 148 /* SDDMA_RBR bit fields */ 149 #define DMA_RBR_FORMAT 0x40000000 150 #define DMA_RBR_END 0x80000000 151 152 /* SDDMA_CCR bit fields */ 153 #define DMA_CCR_RUN 0x00000080 154 #define DMA_CCR_IF_TO_PERIPHERAL 0x00000000 155 #define DMA_CCR_PERIPHERAL_TO_IF 0x00400000 156 157 /* SDDMA_CCR event status */ 158 #define DMA_CCR_EVT_NO_STATUS 0x00000000 159 #define DMA_CCR_EVT_UNDERRUN 0x00000001 160 #define DMA_CCR_EVT_OVERRUN 0x00000002 161 #define DMA_CCR_EVT_DESP_READ 0x00000003 162 #define DMA_CCR_EVT_DATA_RW 0x00000004 163 #define DMA_CCR_EVT_EARLY_END 0x00000005 164 #define DMA_CCR_EVT_SUCCESS 0x0000000F 165 166 #define PDMA_READ 0x00 167 #define PDMA_WRITE 0x01 168 169 #define WMT_SD_POWER_OFF 0 170 #define WMT_SD_POWER_ON 1 171 172 struct wmt_dma_descriptor { 173 u32 flags; 174 u32 data_buffer_addr; 175 u32 branch_addr; 176 u32 reserved1; 177 }; 178 179 struct wmt_mci_caps { 180 unsigned int f_min; 181 unsigned int f_max; 182 u32 ocr_avail; 183 u32 caps; 184 u32 max_seg_size; 185 u32 max_segs; 186 u32 max_blk_size; 187 }; 188 189 struct wmt_mci_priv { 190 struct mmc_host *mmc; 191 void __iomem *sdmmc_base; 192 193 int irq_regular; 194 int irq_dma; 195 196 void *dma_desc_buffer; 197 dma_addr_t dma_desc_device_addr; 198 199 struct completion cmdcomp; 200 struct completion datacomp; 201 202 struct completion *comp_cmd; 203 struct completion *comp_dma; 204 205 struct mmc_request *req; 206 struct mmc_command *cmd; 207 208 struct clk *clk_sdmmc; 209 struct device *dev; 210 211 u8 power_inverted; 212 u8 cd_inverted; 213 }; 214 215 static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable) 216 { 217 u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); 218 219 if (enable ^ priv->power_inverted) 220 reg_tmp &= ~BM_SD_OFF; 221 else 222 reg_tmp |= BM_SD_OFF; 223 224 writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE); 225 } 226 227 static void wmt_mci_read_response(struct mmc_host *mmc) 228 { 229 struct wmt_mci_priv *priv; 230 int idx1, idx2; 231 u8 tmp_resp; 232 u32 response; 233 234 priv = mmc_priv(mmc); 235 236 for (idx1 = 0; idx1 < 4; idx1++) { 237 response = 0; 238 for (idx2 = 0; idx2 < 4; idx2++) { 239 if ((idx1 == 3) && (idx2 == 3)) 240 tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP); 241 else 242 tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP + 243 (idx1*4) + idx2 + 1); 244 response |= (tmp_resp << (idx2 * 8)); 245 } 246 priv->cmd->resp[idx1] = cpu_to_be32(response); 247 } 248 } 249 250 static void wmt_mci_start_command(struct wmt_mci_priv *priv) 251 { 252 u32 reg_tmp; 253 254 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); 255 writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR); 256 } 257 258 static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype, 259 u32 arg, u8 rsptype) 260 { 261 struct wmt_mci_priv *priv; 262 u32 reg_tmp; 263 264 priv = mmc_priv(mmc); 265 266 /* write command, arg, resptype registers */ 267 writeb(command, priv->sdmmc_base + SDMMC_CMD); 268 writel(arg, priv->sdmmc_base + SDMMC_ARG); 269 writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE); 270 271 /* reset response FIFO */ 272 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); 273 writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR); 274 275 /* ensure clock enabled - VT3465 */ 276 wmt_set_sd_power(priv, WMT_SD_POWER_ON); 277 278 /* clear status bits */ 279 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); 280 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); 281 writeb(0xFF, priv->sdmmc_base + SDMMC_STS2); 282 writeb(0xFF, priv->sdmmc_base + SDMMC_STS3); 283 284 /* set command type */ 285 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); 286 writeb((reg_tmp & 0x0F) | (cmdtype << 4), 287 priv->sdmmc_base + SDMMC_CTLR); 288 289 return 0; 290 } 291 292 static void wmt_mci_disable_dma(struct wmt_mci_priv *priv) 293 { 294 writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR); 295 writel(0, priv->sdmmc_base + SDDMA_IER); 296 } 297 298 static void wmt_complete_data_request(struct wmt_mci_priv *priv) 299 { 300 struct mmc_request *req; 301 req = priv->req; 302 303 req->data->bytes_xfered = req->data->blksz * req->data->blocks; 304 305 /* unmap the DMA pages used for write data */ 306 if (req->data->flags & MMC_DATA_WRITE) 307 dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, 308 req->data->sg_len, DMA_TO_DEVICE); 309 else 310 dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, 311 req->data->sg_len, DMA_FROM_DEVICE); 312 313 /* Check if the DMA ISR returned a data error */ 314 if ((req->cmd->error) || (req->data->error)) 315 mmc_request_done(priv->mmc, req); 316 else { 317 wmt_mci_read_response(priv->mmc); 318 if (!req->data->stop) { 319 /* single-block read/write requests end here */ 320 mmc_request_done(priv->mmc, req); 321 } else { 322 /* 323 * we change the priv->cmd variable so the response is 324 * stored in the stop struct rather than the original 325 * calling command struct 326 */ 327 priv->comp_cmd = &priv->cmdcomp; 328 init_completion(priv->comp_cmd); 329 priv->cmd = req->data->stop; 330 wmt_mci_send_command(priv->mmc, req->data->stop->opcode, 331 7, req->data->stop->arg, 9); 332 wmt_mci_start_command(priv); 333 } 334 } 335 } 336 337 static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data) 338 { 339 struct wmt_mci_priv *priv; 340 341 int status; 342 343 priv = (struct wmt_mci_priv *)data; 344 345 status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F; 346 347 if (status != DMA_CCR_EVT_SUCCESS) { 348 dev_err(priv->dev, "DMA Error: Status = %d\n", status); 349 priv->req->data->error = -ETIMEDOUT; 350 complete(priv->comp_dma); 351 return IRQ_HANDLED; 352 } 353 354 priv->req->data->error = 0; 355 356 wmt_mci_disable_dma(priv); 357 358 complete(priv->comp_dma); 359 360 if (priv->comp_cmd) { 361 if (completion_done(priv->comp_cmd)) { 362 /* 363 * if the command (regular) interrupt has already 364 * completed, finish off the request otherwise we wait 365 * for the command interrupt and finish from there. 366 */ 367 wmt_complete_data_request(priv); 368 } 369 } 370 371 return IRQ_HANDLED; 372 } 373 374 static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data) 375 { 376 struct wmt_mci_priv *priv; 377 u32 status0; 378 u32 status1; 379 u32 status2; 380 u32 reg_tmp; 381 int cmd_done; 382 383 priv = (struct wmt_mci_priv *)data; 384 cmd_done = 0; 385 status0 = readb(priv->sdmmc_base + SDMMC_STS0); 386 status1 = readb(priv->sdmmc_base + SDMMC_STS1); 387 status2 = readb(priv->sdmmc_base + SDMMC_STS2); 388 389 /* Check for card insertion */ 390 reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0); 391 if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) { 392 mmc_detect_change(priv->mmc, 0); 393 if (priv->cmd) 394 priv->cmd->error = -ETIMEDOUT; 395 if (priv->comp_cmd) 396 complete(priv->comp_cmd); 397 if (priv->comp_dma) { 398 wmt_mci_disable_dma(priv); 399 complete(priv->comp_dma); 400 } 401 writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0); 402 return IRQ_HANDLED; 403 } 404 405 if ((!priv->req->data) || 406 ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) { 407 /* handle non-data & stop_transmission requests */ 408 if (status1 & STS1_CMDRSP_DONE) { 409 priv->cmd->error = 0; 410 cmd_done = 1; 411 } else if ((status1 & STS1_RSP_TIMEOUT) || 412 (status1 & STS1_DATA_TIMEOUT)) { 413 priv->cmd->error = -ETIMEDOUT; 414 cmd_done = 1; 415 } 416 417 if (cmd_done) { 418 priv->comp_cmd = NULL; 419 420 if (!priv->cmd->error) 421 wmt_mci_read_response(priv->mmc); 422 423 priv->cmd = NULL; 424 425 mmc_request_done(priv->mmc, priv->req); 426 } 427 } else { 428 /* handle data requests */ 429 if (status1 & STS1_CMDRSP_DONE) { 430 if (priv->cmd) 431 priv->cmd->error = 0; 432 if (priv->comp_cmd) 433 complete(priv->comp_cmd); 434 } 435 436 if ((status1 & STS1_RSP_TIMEOUT) || 437 (status1 & STS1_DATA_TIMEOUT)) { 438 if (priv->cmd) 439 priv->cmd->error = -ETIMEDOUT; 440 if (priv->comp_cmd) 441 complete(priv->comp_cmd); 442 if (priv->comp_dma) { 443 wmt_mci_disable_dma(priv); 444 complete(priv->comp_dma); 445 } 446 } 447 448 if (priv->comp_dma) { 449 /* 450 * If the dma interrupt has already completed, finish 451 * off the request; otherwise we wait for the DMA 452 * interrupt and finish from there. 453 */ 454 if (completion_done(priv->comp_dma)) 455 wmt_complete_data_request(priv); 456 } 457 } 458 459 writeb(status0, priv->sdmmc_base + SDMMC_STS0); 460 writeb(status1, priv->sdmmc_base + SDMMC_STS1); 461 writeb(status2, priv->sdmmc_base + SDMMC_STS2); 462 463 return IRQ_HANDLED; 464 } 465 466 static void wmt_reset_hardware(struct mmc_host *mmc) 467 { 468 struct wmt_mci_priv *priv; 469 u32 reg_tmp; 470 471 priv = mmc_priv(mmc); 472 473 /* reset controller */ 474 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); 475 writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE); 476 477 /* reset response FIFO */ 478 reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); 479 writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR); 480 481 /* enable GPI pin to detect card */ 482 writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN); 483 484 /* clear interrupt status */ 485 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); 486 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); 487 488 /* setup interrupts */ 489 writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base + 490 SDMMC_INTMASK0); 491 writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN | 492 INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1); 493 494 /* set the DMA timeout */ 495 writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT); 496 497 /* auto clock freezing enable */ 498 reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2); 499 writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2); 500 501 /* set a default clock speed of 400Khz */ 502 clk_set_rate(priv->clk_sdmmc, 400000); 503 } 504 505 static int wmt_dma_init(struct mmc_host *mmc) 506 { 507 struct wmt_mci_priv *priv; 508 509 priv = mmc_priv(mmc); 510 511 writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR); 512 writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR); 513 if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0) 514 return 0; 515 else 516 return 1; 517 } 518 519 static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc, 520 u16 req_count, u32 buffer_addr, u32 branch_addr, int end) 521 { 522 desc->flags = 0x40000000 | req_count; 523 if (end) 524 desc->flags |= 0x80000000; 525 desc->data_buffer_addr = buffer_addr; 526 desc->branch_addr = branch_addr; 527 } 528 529 static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir) 530 { 531 struct wmt_mci_priv *priv; 532 u32 reg_tmp; 533 534 priv = mmc_priv(mmc); 535 536 /* Enable DMA Interrupts */ 537 writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER); 538 539 /* Write DMA Descriptor Pointer Register */ 540 writel(descaddr, priv->sdmmc_base + SDDMA_DESPR); 541 542 writel(0x00, priv->sdmmc_base + SDDMA_CCR); 543 544 if (dir == PDMA_WRITE) { 545 reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); 546 writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base + 547 SDDMA_CCR); 548 } else { 549 reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); 550 writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base + 551 SDDMA_CCR); 552 } 553 } 554 555 static void wmt_dma_start(struct wmt_mci_priv *priv) 556 { 557 u32 reg_tmp; 558 559 reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); 560 writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR); 561 } 562 563 static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req) 564 { 565 struct wmt_mci_priv *priv; 566 struct wmt_dma_descriptor *desc; 567 u8 command; 568 u8 cmdtype; 569 u32 arg; 570 u8 rsptype; 571 u32 reg_tmp; 572 573 struct scatterlist *sg; 574 int i; 575 int sg_cnt; 576 int offset; 577 u32 dma_address; 578 int desc_cnt; 579 580 priv = mmc_priv(mmc); 581 priv->req = req; 582 583 /* 584 * Use the cmd variable to pass a pointer to the resp[] structure 585 * This is required on multi-block requests to pass the pointer to the 586 * stop command 587 */ 588 priv->cmd = req->cmd; 589 590 command = req->cmd->opcode; 591 arg = req->cmd->arg; 592 rsptype = mmc_resp_type(req->cmd); 593 cmdtype = 0; 594 595 /* rsptype=7 only valid for SPI commands - should be =2 for SD */ 596 if (rsptype == 7) 597 rsptype = 2; 598 /* rsptype=21 is R1B, convert for controller */ 599 if (rsptype == 21) 600 rsptype = 9; 601 602 if (!req->data) { 603 wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype); 604 wmt_mci_start_command(priv); 605 /* completion is now handled in the regular_isr() */ 606 } 607 if (req->data) { 608 priv->comp_cmd = &priv->cmdcomp; 609 init_completion(priv->comp_cmd); 610 611 wmt_dma_init(mmc); 612 613 /* set controller data length */ 614 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); 615 writew((reg_tmp & 0xF800) | (req->data->blksz - 1), 616 priv->sdmmc_base + SDMMC_BLKLEN); 617 618 /* set controller block count */ 619 writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT); 620 621 desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer; 622 623 if (req->data->flags & MMC_DATA_WRITE) { 624 sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, 625 req->data->sg_len, DMA_TO_DEVICE); 626 cmdtype = 1; 627 if (req->data->blocks > 1) 628 cmdtype = 3; 629 } else { 630 sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, 631 req->data->sg_len, DMA_FROM_DEVICE); 632 cmdtype = 2; 633 if (req->data->blocks > 1) 634 cmdtype = 4; 635 } 636 637 dma_address = priv->dma_desc_device_addr + 16; 638 desc_cnt = 0; 639 640 for_each_sg(req->data->sg, sg, sg_cnt, i) { 641 offset = 0; 642 while (offset < sg_dma_len(sg)) { 643 wmt_dma_init_descriptor(desc, req->data->blksz, 644 sg_dma_address(sg)+offset, 645 dma_address, 0); 646 desc++; 647 desc_cnt++; 648 offset += req->data->blksz; 649 dma_address += 16; 650 if (desc_cnt == req->data->blocks) 651 break; 652 } 653 } 654 desc--; 655 desc->flags |= 0x80000000; 656 657 if (req->data->flags & MMC_DATA_WRITE) 658 wmt_dma_config(mmc, priv->dma_desc_device_addr, 659 PDMA_WRITE); 660 else 661 wmt_dma_config(mmc, priv->dma_desc_device_addr, 662 PDMA_READ); 663 664 wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype); 665 666 priv->comp_dma = &priv->datacomp; 667 init_completion(priv->comp_dma); 668 669 wmt_dma_start(priv); 670 wmt_mci_start_command(priv); 671 } 672 } 673 674 static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 675 { 676 struct wmt_mci_priv *priv; 677 u32 busmode, extctrl; 678 679 priv = mmc_priv(mmc); 680 681 if (ios->power_mode == MMC_POWER_UP) { 682 wmt_reset_hardware(mmc); 683 684 wmt_set_sd_power(priv, WMT_SD_POWER_ON); 685 } 686 if (ios->power_mode == MMC_POWER_OFF) 687 wmt_set_sd_power(priv, WMT_SD_POWER_OFF); 688 689 if (ios->clock != 0) 690 clk_set_rate(priv->clk_sdmmc, ios->clock); 691 692 busmode = readb(priv->sdmmc_base + SDMMC_BUSMODE); 693 extctrl = readb(priv->sdmmc_base + SDMMC_EXTCTRL); 694 695 busmode &= ~(BM_EIGHTBIT_MODE | BM_FOURBIT_MODE); 696 extctrl &= ~EXT_EIGHTBIT; 697 698 switch (ios->bus_width) { 699 case MMC_BUS_WIDTH_8: 700 busmode |= BM_EIGHTBIT_MODE; 701 extctrl |= EXT_EIGHTBIT; 702 break; 703 case MMC_BUS_WIDTH_4: 704 busmode |= BM_FOURBIT_MODE; 705 break; 706 case MMC_BUS_WIDTH_1: 707 break; 708 } 709 710 writeb(busmode, priv->sdmmc_base + SDMMC_BUSMODE); 711 writeb(extctrl, priv->sdmmc_base + SDMMC_EXTCTRL); 712 } 713 714 static int wmt_mci_get_ro(struct mmc_host *mmc) 715 { 716 struct wmt_mci_priv *priv = mmc_priv(mmc); 717 718 return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT); 719 } 720 721 static int wmt_mci_get_cd(struct mmc_host *mmc) 722 { 723 struct wmt_mci_priv *priv = mmc_priv(mmc); 724 u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3; 725 726 return !(cd ^ priv->cd_inverted); 727 } 728 729 static const struct mmc_host_ops wmt_mci_ops = { 730 .request = wmt_mci_request, 731 .set_ios = wmt_mci_set_ios, 732 .get_ro = wmt_mci_get_ro, 733 .get_cd = wmt_mci_get_cd, 734 }; 735 736 /* Controller capabilities */ 737 static struct wmt_mci_caps wm8505_caps = { 738 .f_min = 390425, 739 .f_max = 50000000, 740 .ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34, 741 .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | 742 MMC_CAP_SD_HIGHSPEED, 743 .max_seg_size = 65024, 744 .max_segs = 128, 745 .max_blk_size = 2048, 746 }; 747 748 static const struct of_device_id wmt_mci_dt_ids[] = { 749 { .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps }, 750 { /* Sentinel */ }, 751 }; 752 753 static int wmt_mci_probe(struct platform_device *pdev) 754 { 755 struct mmc_host *mmc; 756 struct wmt_mci_priv *priv; 757 struct device_node *np = pdev->dev.of_node; 758 const struct of_device_id *of_id = 759 of_match_device(wmt_mci_dt_ids, &pdev->dev); 760 const struct wmt_mci_caps *wmt_caps; 761 int ret; 762 int regular_irq, dma_irq; 763 764 if (!of_id || !of_id->data) { 765 dev_err(&pdev->dev, "Controller capabilities data missing\n"); 766 return -EFAULT; 767 } 768 769 wmt_caps = of_id->data; 770 771 if (!np) { 772 dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n"); 773 return -EFAULT; 774 } 775 776 regular_irq = irq_of_parse_and_map(np, 0); 777 dma_irq = irq_of_parse_and_map(np, 1); 778 779 if (!regular_irq || !dma_irq) { 780 dev_err(&pdev->dev, "Getting IRQs failed!\n"); 781 ret = -ENXIO; 782 goto fail1; 783 } 784 785 mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev); 786 if (!mmc) { 787 dev_err(&pdev->dev, "Failed to allocate mmc_host\n"); 788 ret = -ENOMEM; 789 goto fail1; 790 } 791 792 mmc->ops = &wmt_mci_ops; 793 mmc->f_min = wmt_caps->f_min; 794 mmc->f_max = wmt_caps->f_max; 795 mmc->ocr_avail = wmt_caps->ocr_avail; 796 mmc->caps = wmt_caps->caps; 797 798 mmc->max_seg_size = wmt_caps->max_seg_size; 799 mmc->max_segs = wmt_caps->max_segs; 800 mmc->max_blk_size = wmt_caps->max_blk_size; 801 802 mmc->max_req_size = (16*512*mmc->max_segs); 803 mmc->max_blk_count = mmc->max_req_size / 512; 804 805 priv = mmc_priv(mmc); 806 priv->mmc = mmc; 807 priv->dev = &pdev->dev; 808 809 priv->power_inverted = 0; 810 priv->cd_inverted = 0; 811 812 if (of_get_property(np, "sdon-inverted", NULL)) 813 priv->power_inverted = 1; 814 if (of_get_property(np, "cd-inverted", NULL)) 815 priv->cd_inverted = 1; 816 817 priv->sdmmc_base = of_iomap(np, 0); 818 if (!priv->sdmmc_base) { 819 dev_err(&pdev->dev, "Failed to map IO space\n"); 820 ret = -ENOMEM; 821 goto fail2; 822 } 823 824 priv->irq_regular = regular_irq; 825 priv->irq_dma = dma_irq; 826 827 ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv); 828 if (ret) { 829 dev_err(&pdev->dev, "Register regular IRQ fail\n"); 830 goto fail3; 831 } 832 833 ret = request_irq(dma_irq, wmt_mci_dma_isr, 0, "sdmmc", priv); 834 if (ret) { 835 dev_err(&pdev->dev, "Register DMA IRQ fail\n"); 836 goto fail4; 837 } 838 839 /* alloc some DMA buffers for descriptors/transfers */ 840 priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev, 841 mmc->max_blk_count * 16, 842 &priv->dma_desc_device_addr, 843 GFP_KERNEL); 844 if (!priv->dma_desc_buffer) { 845 dev_err(&pdev->dev, "DMA alloc fail\n"); 846 ret = -EPERM; 847 goto fail5; 848 } 849 850 platform_set_drvdata(pdev, mmc); 851 852 priv->clk_sdmmc = of_clk_get(np, 0); 853 if (IS_ERR(priv->clk_sdmmc)) { 854 dev_err(&pdev->dev, "Error getting clock\n"); 855 ret = PTR_ERR(priv->clk_sdmmc); 856 goto fail5; 857 } 858 859 ret = clk_prepare_enable(priv->clk_sdmmc); 860 if (ret) 861 goto fail6; 862 863 /* configure the controller to a known 'ready' state */ 864 wmt_reset_hardware(mmc); 865 866 mmc_add_host(mmc); 867 868 dev_info(&pdev->dev, "WMT SDHC Controller initialized\n"); 869 870 return 0; 871 fail6: 872 clk_put(priv->clk_sdmmc); 873 fail5: 874 free_irq(dma_irq, priv); 875 fail4: 876 free_irq(regular_irq, priv); 877 fail3: 878 iounmap(priv->sdmmc_base); 879 fail2: 880 mmc_free_host(mmc); 881 fail1: 882 return ret; 883 } 884 885 static int wmt_mci_remove(struct platform_device *pdev) 886 { 887 struct mmc_host *mmc; 888 struct wmt_mci_priv *priv; 889 struct resource *res; 890 u32 reg_tmp; 891 892 mmc = platform_get_drvdata(pdev); 893 priv = mmc_priv(mmc); 894 895 /* reset SD controller */ 896 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); 897 writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE); 898 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); 899 writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN); 900 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); 901 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); 902 903 /* release the dma buffers */ 904 dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16, 905 priv->dma_desc_buffer, priv->dma_desc_device_addr); 906 907 mmc_remove_host(mmc); 908 909 free_irq(priv->irq_regular, priv); 910 free_irq(priv->irq_dma, priv); 911 912 iounmap(priv->sdmmc_base); 913 914 clk_disable_unprepare(priv->clk_sdmmc); 915 clk_put(priv->clk_sdmmc); 916 917 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 918 release_mem_region(res->start, resource_size(res)); 919 920 mmc_free_host(mmc); 921 922 dev_info(&pdev->dev, "WMT MCI device removed\n"); 923 924 return 0; 925 } 926 927 #ifdef CONFIG_PM 928 static int wmt_mci_suspend(struct device *dev) 929 { 930 u32 reg_tmp; 931 struct platform_device *pdev = to_platform_device(dev); 932 struct mmc_host *mmc = platform_get_drvdata(pdev); 933 struct wmt_mci_priv *priv; 934 935 if (!mmc) 936 return 0; 937 938 priv = mmc_priv(mmc); 939 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); 940 writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + 941 SDMMC_BUSMODE); 942 943 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); 944 writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN); 945 946 writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); 947 writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); 948 949 clk_disable(priv->clk_sdmmc); 950 return 0; 951 } 952 953 static int wmt_mci_resume(struct device *dev) 954 { 955 u32 reg_tmp; 956 struct platform_device *pdev = to_platform_device(dev); 957 struct mmc_host *mmc = platform_get_drvdata(pdev); 958 struct wmt_mci_priv *priv; 959 960 if (mmc) { 961 priv = mmc_priv(mmc); 962 clk_enable(priv->clk_sdmmc); 963 964 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); 965 writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + 966 SDMMC_BUSMODE); 967 968 reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); 969 writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE), 970 priv->sdmmc_base + SDMMC_BLKLEN); 971 972 reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0); 973 writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base + 974 SDMMC_INTMASK0); 975 976 } 977 978 return 0; 979 } 980 981 static const struct dev_pm_ops wmt_mci_pm = { 982 .suspend = wmt_mci_suspend, 983 .resume = wmt_mci_resume, 984 }; 985 986 #define wmt_mci_pm_ops (&wmt_mci_pm) 987 988 #else /* !CONFIG_PM */ 989 990 #define wmt_mci_pm_ops NULL 991 992 #endif 993 994 static struct platform_driver wmt_mci_driver = { 995 .probe = wmt_mci_probe, 996 .remove = wmt_mci_remove, 997 .driver = { 998 .name = DRIVER_NAME, 999 .pm = wmt_mci_pm_ops, 1000 .of_match_table = wmt_mci_dt_ids, 1001 }, 1002 }; 1003 1004 module_platform_driver(wmt_mci_driver); 1005 1006 MODULE_DESCRIPTION("Wondermedia MMC/SD Driver"); 1007 MODULE_AUTHOR("Tony Prisk"); 1008 MODULE_LICENSE("GPL v2"); 1009 MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids); 1010