1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * tifm_sd.c - TI FlashMedia driver 4 * 5 * Copyright (C) 2006 Alex Dubov <oakad@yahoo.com> 6 * 7 * Special thanks to Brad Campbell for extensive testing of this driver. 8 */ 9 10 11 #include <linux/tifm.h> 12 #include <linux/mmc/host.h> 13 #include <linux/highmem.h> 14 #include <linux/scatterlist.h> 15 #include <linux/module.h> 16 #include <asm/io.h> 17 18 #define DRIVER_NAME "tifm_sd" 19 #define DRIVER_VERSION "0.8" 20 21 static bool no_dma = 0; 22 static bool fixed_timeout = 0; 23 module_param(no_dma, bool, 0644); 24 module_param(fixed_timeout, bool, 0644); 25 26 /* Constants here are mostly from OMAP5912 datasheet */ 27 #define TIFM_MMCSD_RESET 0x0002 28 #define TIFM_MMCSD_CLKMASK 0x03ff 29 #define TIFM_MMCSD_POWER 0x0800 30 #define TIFM_MMCSD_4BBUS 0x8000 31 #define TIFM_MMCSD_RXDE 0x8000 /* rx dma enable */ 32 #define TIFM_MMCSD_TXDE 0x0080 /* tx dma enable */ 33 #define TIFM_MMCSD_BUFINT 0x0c00 /* set bits: AE, AF */ 34 #define TIFM_MMCSD_DPE 0x0020 /* data timeout counted in kilocycles */ 35 #define TIFM_MMCSD_INAB 0x0080 /* abort / initialize command */ 36 #define TIFM_MMCSD_READ 0x8000 37 38 #define TIFM_MMCSD_ERRMASK 0x01e0 /* set bits: CCRC, CTO, DCRC, DTO */ 39 #define TIFM_MMCSD_EOC 0x0001 /* end of command phase */ 40 #define TIFM_MMCSD_CD 0x0002 /* card detect */ 41 #define TIFM_MMCSD_CB 0x0004 /* card enter busy state */ 42 #define TIFM_MMCSD_BRS 0x0008 /* block received/sent */ 43 #define TIFM_MMCSD_EOFB 0x0010 /* card exit busy state */ 44 #define TIFM_MMCSD_DTO 0x0020 /* data time-out */ 45 #define TIFM_MMCSD_DCRC 0x0040 /* data crc error */ 46 #define TIFM_MMCSD_CTO 0x0080 /* command time-out */ 47 #define TIFM_MMCSD_CCRC 0x0100 /* command crc error */ 48 #define TIFM_MMCSD_AF 0x0400 /* fifo almost full */ 49 #define TIFM_MMCSD_AE 0x0800 /* fifo almost empty */ 50 #define TIFM_MMCSD_OCRB 0x1000 /* OCR busy */ 51 #define TIFM_MMCSD_CIRQ 0x2000 /* card irq (cmd40/sdio) */ 52 #define TIFM_MMCSD_CERR 0x4000 /* card status error */ 53 54 #define TIFM_MMCSD_ODTO 0x0040 /* open drain / extended timeout */ 55 #define TIFM_MMCSD_CARD_RO 0x0200 /* card is read-only */ 56 57 #define TIFM_MMCSD_FIFO_SIZE 0x0020 58 59 #define TIFM_MMCSD_RSP_R0 0x0000 60 #define TIFM_MMCSD_RSP_R1 0x0100 61 #define TIFM_MMCSD_RSP_R2 0x0200 62 #define TIFM_MMCSD_RSP_R3 0x0300 63 #define TIFM_MMCSD_RSP_R4 0x0400 64 #define TIFM_MMCSD_RSP_R5 0x0500 65 #define TIFM_MMCSD_RSP_R6 0x0600 66 67 #define TIFM_MMCSD_RSP_BUSY 0x0800 68 69 #define TIFM_MMCSD_CMD_BC 0x0000 70 #define TIFM_MMCSD_CMD_BCR 0x1000 71 #define TIFM_MMCSD_CMD_AC 0x2000 72 #define TIFM_MMCSD_CMD_ADTC 0x3000 73 74 #define TIFM_MMCSD_MAX_BLOCK_SIZE 0x0800UL 75 76 #define TIFM_MMCSD_REQ_TIMEOUT_MS 1000 77 78 enum { 79 CMD_READY = 0x0001, 80 FIFO_READY = 0x0002, 81 BRS_READY = 0x0004, 82 SCMD_ACTIVE = 0x0008, 83 SCMD_READY = 0x0010, 84 CARD_BUSY = 0x0020, 85 DATA_CARRY = 0x0040 86 }; 87 88 struct tifm_sd { 89 struct tifm_dev *dev; 90 91 unsigned short eject:1, 92 open_drain:1, 93 no_dma:1; 94 unsigned short cmd_flags; 95 96 unsigned int clk_freq; 97 unsigned int clk_div; 98 unsigned long timeout_jiffies; 99 100 struct tasklet_struct finish_tasklet; 101 struct timer_list timer; 102 struct mmc_request *req; 103 104 int sg_len; 105 int sg_pos; 106 unsigned int block_pos; 107 struct scatterlist bounce_buf; 108 unsigned char bounce_buf_data[TIFM_MMCSD_MAX_BLOCK_SIZE]; 109 }; 110 111 /* for some reason, host won't respond correctly to readw/writew */ 112 static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg, 113 unsigned int off, unsigned int cnt) 114 { 115 struct tifm_dev *sock = host->dev; 116 unsigned char *buf; 117 unsigned int pos = 0, val; 118 119 buf = kmap_atomic(pg) + off; 120 if (host->cmd_flags & DATA_CARRY) { 121 buf[pos++] = host->bounce_buf_data[0]; 122 host->cmd_flags &= ~DATA_CARRY; 123 } 124 125 while (pos < cnt) { 126 val = readl(sock->addr + SOCK_MMCSD_DATA); 127 buf[pos++] = val & 0xff; 128 if (pos == cnt) { 129 host->bounce_buf_data[0] = (val >> 8) & 0xff; 130 host->cmd_flags |= DATA_CARRY; 131 break; 132 } 133 buf[pos++] = (val >> 8) & 0xff; 134 } 135 kunmap_atomic(buf - off); 136 } 137 138 static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg, 139 unsigned int off, unsigned int cnt) 140 { 141 struct tifm_dev *sock = host->dev; 142 unsigned char *buf; 143 unsigned int pos = 0, val; 144 145 buf = kmap_atomic(pg) + off; 146 if (host->cmd_flags & DATA_CARRY) { 147 val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00); 148 writel(val, sock->addr + SOCK_MMCSD_DATA); 149 host->cmd_flags &= ~DATA_CARRY; 150 } 151 152 while (pos < cnt) { 153 val = buf[pos++]; 154 if (pos == cnt) { 155 host->bounce_buf_data[0] = val & 0xff; 156 host->cmd_flags |= DATA_CARRY; 157 break; 158 } 159 val |= (buf[pos++] << 8) & 0xff00; 160 writel(val, sock->addr + SOCK_MMCSD_DATA); 161 } 162 kunmap_atomic(buf - off); 163 } 164 165 static void tifm_sd_transfer_data(struct tifm_sd *host) 166 { 167 struct mmc_data *r_data = host->req->cmd->data; 168 struct scatterlist *sg = r_data->sg; 169 unsigned int off, cnt, t_size = TIFM_MMCSD_FIFO_SIZE * 2; 170 unsigned int p_off, p_cnt; 171 struct page *pg; 172 173 if (host->sg_pos == host->sg_len) 174 return; 175 while (t_size) { 176 cnt = sg[host->sg_pos].length - host->block_pos; 177 if (!cnt) { 178 host->block_pos = 0; 179 host->sg_pos++; 180 if (host->sg_pos == host->sg_len) { 181 if ((r_data->flags & MMC_DATA_WRITE) 182 && (host->cmd_flags & DATA_CARRY)) 183 writel(host->bounce_buf_data[0], 184 host->dev->addr 185 + SOCK_MMCSD_DATA); 186 187 return; 188 } 189 cnt = sg[host->sg_pos].length; 190 } 191 off = sg[host->sg_pos].offset + host->block_pos; 192 193 pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT); 194 p_off = offset_in_page(off); 195 p_cnt = PAGE_SIZE - p_off; 196 p_cnt = min(p_cnt, cnt); 197 p_cnt = min(p_cnt, t_size); 198 199 if (r_data->flags & MMC_DATA_READ) 200 tifm_sd_read_fifo(host, pg, p_off, p_cnt); 201 else if (r_data->flags & MMC_DATA_WRITE) 202 tifm_sd_write_fifo(host, pg, p_off, p_cnt); 203 204 t_size -= p_cnt; 205 host->block_pos += p_cnt; 206 } 207 } 208 209 static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off, 210 struct page *src, unsigned int src_off, 211 unsigned int count) 212 { 213 unsigned char *src_buf = kmap_atomic(src) + src_off; 214 unsigned char *dst_buf = kmap_atomic(dst) + dst_off; 215 216 memcpy(dst_buf, src_buf, count); 217 218 kunmap_atomic(dst_buf - dst_off); 219 kunmap_atomic(src_buf - src_off); 220 } 221 222 static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data) 223 { 224 struct scatterlist *sg = r_data->sg; 225 unsigned int t_size = r_data->blksz; 226 unsigned int off, cnt; 227 unsigned int p_off, p_cnt; 228 struct page *pg; 229 230 dev_dbg(&host->dev->dev, "bouncing block\n"); 231 while (t_size) { 232 cnt = sg[host->sg_pos].length - host->block_pos; 233 if (!cnt) { 234 host->block_pos = 0; 235 host->sg_pos++; 236 if (host->sg_pos == host->sg_len) 237 return; 238 cnt = sg[host->sg_pos].length; 239 } 240 off = sg[host->sg_pos].offset + host->block_pos; 241 242 pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT); 243 p_off = offset_in_page(off); 244 p_cnt = PAGE_SIZE - p_off; 245 p_cnt = min(p_cnt, cnt); 246 p_cnt = min(p_cnt, t_size); 247 248 if (r_data->flags & MMC_DATA_WRITE) 249 tifm_sd_copy_page(sg_page(&host->bounce_buf), 250 r_data->blksz - t_size, 251 pg, p_off, p_cnt); 252 else if (r_data->flags & MMC_DATA_READ) 253 tifm_sd_copy_page(pg, p_off, sg_page(&host->bounce_buf), 254 r_data->blksz - t_size, p_cnt); 255 256 t_size -= p_cnt; 257 host->block_pos += p_cnt; 258 } 259 } 260 261 static int tifm_sd_set_dma_data(struct tifm_sd *host, struct mmc_data *r_data) 262 { 263 struct tifm_dev *sock = host->dev; 264 unsigned int t_size = TIFM_DMA_TSIZE * r_data->blksz; 265 unsigned int dma_len, dma_blk_cnt, dma_off; 266 struct scatterlist *sg = NULL; 267 unsigned long flags; 268 269 if (host->sg_pos == host->sg_len) 270 return 1; 271 272 if (host->cmd_flags & DATA_CARRY) { 273 host->cmd_flags &= ~DATA_CARRY; 274 local_irq_save(flags); 275 tifm_sd_bounce_block(host, r_data); 276 local_irq_restore(flags); 277 if (host->sg_pos == host->sg_len) 278 return 1; 279 } 280 281 dma_len = sg_dma_len(&r_data->sg[host->sg_pos]) - host->block_pos; 282 if (!dma_len) { 283 host->block_pos = 0; 284 host->sg_pos++; 285 if (host->sg_pos == host->sg_len) 286 return 1; 287 dma_len = sg_dma_len(&r_data->sg[host->sg_pos]); 288 } 289 290 if (dma_len < t_size) { 291 dma_blk_cnt = dma_len / r_data->blksz; 292 dma_off = host->block_pos; 293 host->block_pos += dma_blk_cnt * r_data->blksz; 294 } else { 295 dma_blk_cnt = TIFM_DMA_TSIZE; 296 dma_off = host->block_pos; 297 host->block_pos += t_size; 298 } 299 300 if (dma_blk_cnt) 301 sg = &r_data->sg[host->sg_pos]; 302 else if (dma_len) { 303 if (r_data->flags & MMC_DATA_WRITE) { 304 local_irq_save(flags); 305 tifm_sd_bounce_block(host, r_data); 306 local_irq_restore(flags); 307 } else 308 host->cmd_flags |= DATA_CARRY; 309 310 sg = &host->bounce_buf; 311 dma_off = 0; 312 dma_blk_cnt = 1; 313 } else 314 return 1; 315 316 dev_dbg(&sock->dev, "setting dma for %d blocks\n", dma_blk_cnt); 317 writel(sg_dma_address(sg) + dma_off, sock->addr + SOCK_DMA_ADDRESS); 318 if (r_data->flags & MMC_DATA_WRITE) 319 writel((dma_blk_cnt << 8) | TIFM_DMA_TX | TIFM_DMA_EN, 320 sock->addr + SOCK_DMA_CONTROL); 321 else 322 writel((dma_blk_cnt << 8) | TIFM_DMA_EN, 323 sock->addr + SOCK_DMA_CONTROL); 324 325 return 0; 326 } 327 328 static unsigned int tifm_sd_op_flags(struct mmc_command *cmd) 329 { 330 unsigned int rc = 0; 331 332 switch (mmc_resp_type(cmd)) { 333 case MMC_RSP_NONE: 334 rc |= TIFM_MMCSD_RSP_R0; 335 break; 336 case MMC_RSP_R1B: 337 rc |= TIFM_MMCSD_RSP_BUSY; 338 fallthrough; 339 case MMC_RSP_R1: 340 rc |= TIFM_MMCSD_RSP_R1; 341 break; 342 case MMC_RSP_R2: 343 rc |= TIFM_MMCSD_RSP_R2; 344 break; 345 case MMC_RSP_R3: 346 rc |= TIFM_MMCSD_RSP_R3; 347 break; 348 default: 349 BUG(); 350 } 351 352 switch (mmc_cmd_type(cmd)) { 353 case MMC_CMD_BC: 354 rc |= TIFM_MMCSD_CMD_BC; 355 break; 356 case MMC_CMD_BCR: 357 rc |= TIFM_MMCSD_CMD_BCR; 358 break; 359 case MMC_CMD_AC: 360 rc |= TIFM_MMCSD_CMD_AC; 361 break; 362 case MMC_CMD_ADTC: 363 rc |= TIFM_MMCSD_CMD_ADTC; 364 break; 365 default: 366 BUG(); 367 } 368 return rc; 369 } 370 371 static void tifm_sd_exec(struct tifm_sd *host, struct mmc_command *cmd) 372 { 373 struct tifm_dev *sock = host->dev; 374 unsigned int cmd_mask = tifm_sd_op_flags(cmd); 375 376 if (host->open_drain) 377 cmd_mask |= TIFM_MMCSD_ODTO; 378 379 if (cmd->data && (cmd->data->flags & MMC_DATA_READ)) 380 cmd_mask |= TIFM_MMCSD_READ; 381 382 dev_dbg(&sock->dev, "executing opcode 0x%x, arg: 0x%x, mask: 0x%x\n", 383 cmd->opcode, cmd->arg, cmd_mask); 384 385 writel((cmd->arg >> 16) & 0xffff, sock->addr + SOCK_MMCSD_ARG_HIGH); 386 writel(cmd->arg & 0xffff, sock->addr + SOCK_MMCSD_ARG_LOW); 387 writel(cmd->opcode | cmd_mask, sock->addr + SOCK_MMCSD_COMMAND); 388 } 389 390 static void tifm_sd_fetch_resp(struct mmc_command *cmd, struct tifm_dev *sock) 391 { 392 cmd->resp[0] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x1c) << 16) 393 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x18); 394 cmd->resp[1] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x14) << 16) 395 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x10); 396 cmd->resp[2] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x0c) << 16) 397 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x08); 398 cmd->resp[3] = (readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x04) << 16) 399 | readl(sock->addr + SOCK_MMCSD_RESPONSE + 0x00); 400 } 401 402 static void tifm_sd_check_status(struct tifm_sd *host) 403 { 404 struct tifm_dev *sock = host->dev; 405 struct mmc_command *cmd = host->req->cmd; 406 407 if (cmd->error) 408 goto finish_request; 409 410 if (!(host->cmd_flags & CMD_READY)) 411 return; 412 413 if (cmd->data) { 414 if (cmd->data->error) { 415 if ((host->cmd_flags & SCMD_ACTIVE) 416 && !(host->cmd_flags & SCMD_READY)) 417 return; 418 419 goto finish_request; 420 } 421 422 if (!(host->cmd_flags & BRS_READY)) 423 return; 424 425 if (!(host->no_dma || (host->cmd_flags & FIFO_READY))) 426 return; 427 428 if (cmd->data->flags & MMC_DATA_WRITE) { 429 if (host->req->stop) { 430 if (!(host->cmd_flags & SCMD_ACTIVE)) { 431 host->cmd_flags |= SCMD_ACTIVE; 432 writel(TIFM_MMCSD_EOFB 433 | readl(sock->addr 434 + SOCK_MMCSD_INT_ENABLE), 435 sock->addr 436 + SOCK_MMCSD_INT_ENABLE); 437 tifm_sd_exec(host, host->req->stop); 438 return; 439 } else { 440 if (!(host->cmd_flags & SCMD_READY) 441 || (host->cmd_flags & CARD_BUSY)) 442 return; 443 writel((~TIFM_MMCSD_EOFB) 444 & readl(sock->addr 445 + SOCK_MMCSD_INT_ENABLE), 446 sock->addr 447 + SOCK_MMCSD_INT_ENABLE); 448 } 449 } else { 450 if (host->cmd_flags & CARD_BUSY) 451 return; 452 writel((~TIFM_MMCSD_EOFB) 453 & readl(sock->addr 454 + SOCK_MMCSD_INT_ENABLE), 455 sock->addr + SOCK_MMCSD_INT_ENABLE); 456 } 457 } else { 458 if (host->req->stop) { 459 if (!(host->cmd_flags & SCMD_ACTIVE)) { 460 host->cmd_flags |= SCMD_ACTIVE; 461 tifm_sd_exec(host, host->req->stop); 462 return; 463 } else { 464 if (!(host->cmd_flags & SCMD_READY)) 465 return; 466 } 467 } 468 } 469 } 470 finish_request: 471 tasklet_schedule(&host->finish_tasklet); 472 } 473 474 /* Called from interrupt handler */ 475 static void tifm_sd_data_event(struct tifm_dev *sock) 476 { 477 struct tifm_sd *host; 478 unsigned int fifo_status = 0; 479 struct mmc_data *r_data = NULL; 480 481 spin_lock(&sock->lock); 482 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); 483 fifo_status = readl(sock->addr + SOCK_DMA_FIFO_STATUS); 484 dev_dbg(&sock->dev, "data event: fifo_status %x, flags %x\n", 485 fifo_status, host->cmd_flags); 486 487 if (host->req) { 488 r_data = host->req->cmd->data; 489 490 if (r_data && (fifo_status & TIFM_FIFO_READY)) { 491 if (tifm_sd_set_dma_data(host, r_data)) { 492 host->cmd_flags |= FIFO_READY; 493 tifm_sd_check_status(host); 494 } 495 } 496 } 497 498 writel(fifo_status, sock->addr + SOCK_DMA_FIFO_STATUS); 499 spin_unlock(&sock->lock); 500 } 501 502 /* Called from interrupt handler */ 503 static void tifm_sd_card_event(struct tifm_dev *sock) 504 { 505 struct tifm_sd *host; 506 unsigned int host_status = 0; 507 int cmd_error = 0; 508 struct mmc_command *cmd = NULL; 509 unsigned long flags; 510 511 spin_lock(&sock->lock); 512 host = mmc_priv((struct mmc_host*)tifm_get_drvdata(sock)); 513 host_status = readl(sock->addr + SOCK_MMCSD_STATUS); 514 dev_dbg(&sock->dev, "host event: host_status %x, flags %x\n", 515 host_status, host->cmd_flags); 516 517 if (host->req) { 518 cmd = host->req->cmd; 519 520 if (host_status & TIFM_MMCSD_ERRMASK) { 521 writel(host_status & TIFM_MMCSD_ERRMASK, 522 sock->addr + SOCK_MMCSD_STATUS); 523 if (host_status & TIFM_MMCSD_CTO) 524 cmd_error = -ETIMEDOUT; 525 else if (host_status & TIFM_MMCSD_CCRC) 526 cmd_error = -EILSEQ; 527 528 if (cmd->data) { 529 if (host_status & TIFM_MMCSD_DTO) 530 cmd->data->error = -ETIMEDOUT; 531 else if (host_status & TIFM_MMCSD_DCRC) 532 cmd->data->error = -EILSEQ; 533 } 534 535 writel(TIFM_FIFO_INT_SETALL, 536 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); 537 writel(TIFM_DMA_RESET, sock->addr + SOCK_DMA_CONTROL); 538 539 if (host->req->stop) { 540 if (host->cmd_flags & SCMD_ACTIVE) { 541 host->req->stop->error = cmd_error; 542 host->cmd_flags |= SCMD_READY; 543 } else { 544 cmd->error = cmd_error; 545 host->cmd_flags |= SCMD_ACTIVE; 546 tifm_sd_exec(host, host->req->stop); 547 goto done; 548 } 549 } else 550 cmd->error = cmd_error; 551 } else { 552 if (host_status & (TIFM_MMCSD_EOC | TIFM_MMCSD_CERR)) { 553 if (!(host->cmd_flags & CMD_READY)) { 554 host->cmd_flags |= CMD_READY; 555 tifm_sd_fetch_resp(cmd, sock); 556 } else if (host->cmd_flags & SCMD_ACTIVE) { 557 host->cmd_flags |= SCMD_READY; 558 tifm_sd_fetch_resp(host->req->stop, 559 sock); 560 } 561 } 562 if (host_status & TIFM_MMCSD_BRS) 563 host->cmd_flags |= BRS_READY; 564 } 565 566 if (host->no_dma && cmd->data) { 567 if (host_status & TIFM_MMCSD_AE) 568 writel(host_status & TIFM_MMCSD_AE, 569 sock->addr + SOCK_MMCSD_STATUS); 570 571 if (host_status & (TIFM_MMCSD_AE | TIFM_MMCSD_AF 572 | TIFM_MMCSD_BRS)) { 573 local_irq_save(flags); 574 tifm_sd_transfer_data(host); 575 local_irq_restore(flags); 576 host_status &= ~TIFM_MMCSD_AE; 577 } 578 } 579 580 if (host_status & TIFM_MMCSD_EOFB) 581 host->cmd_flags &= ~CARD_BUSY; 582 else if (host_status & TIFM_MMCSD_CB) 583 host->cmd_flags |= CARD_BUSY; 584 585 tifm_sd_check_status(host); 586 } 587 done: 588 writel(host_status, sock->addr + SOCK_MMCSD_STATUS); 589 spin_unlock(&sock->lock); 590 } 591 592 static void tifm_sd_set_data_timeout(struct tifm_sd *host, 593 struct mmc_data *data) 594 { 595 struct tifm_dev *sock = host->dev; 596 unsigned int data_timeout = data->timeout_clks; 597 598 if (fixed_timeout) 599 return; 600 601 data_timeout += data->timeout_ns / 602 ((1000000000UL / host->clk_freq) * host->clk_div); 603 604 if (data_timeout < 0xffff) { 605 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); 606 writel((~TIFM_MMCSD_DPE) 607 & readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), 608 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); 609 } else { 610 data_timeout = (data_timeout >> 10) + 1; 611 if (data_timeout > 0xffff) 612 data_timeout = 0; /* set to unlimited */ 613 writel(data_timeout, sock->addr + SOCK_MMCSD_DATA_TO); 614 writel(TIFM_MMCSD_DPE 615 | readl(sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG), 616 sock->addr + SOCK_MMCSD_SDIO_MODE_CONFIG); 617 } 618 } 619 620 static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq) 621 { 622 struct tifm_sd *host = mmc_priv(mmc); 623 struct tifm_dev *sock = host->dev; 624 unsigned long flags; 625 struct mmc_data *r_data = mrq->cmd->data; 626 627 spin_lock_irqsave(&sock->lock, flags); 628 if (host->eject) { 629 mrq->cmd->error = -ENOMEDIUM; 630 goto err_out; 631 } 632 633 if (host->req) { 634 pr_err("%s : unfinished request detected\n", 635 dev_name(&sock->dev)); 636 mrq->cmd->error = -ETIMEDOUT; 637 goto err_out; 638 } 639 640 host->cmd_flags = 0; 641 host->block_pos = 0; 642 host->sg_pos = 0; 643 644 if (mrq->data && !is_power_of_2(mrq->data->blksz)) 645 host->no_dma = 1; 646 else 647 host->no_dma = no_dma ? 1 : 0; 648 649 if (r_data) { 650 tifm_sd_set_data_timeout(host, r_data); 651 652 if ((r_data->flags & MMC_DATA_WRITE) && !mrq->stop) 653 writel(TIFM_MMCSD_EOFB 654 | readl(sock->addr + SOCK_MMCSD_INT_ENABLE), 655 sock->addr + SOCK_MMCSD_INT_ENABLE); 656 657 if (host->no_dma) { 658 writel(TIFM_MMCSD_BUFINT 659 | readl(sock->addr + SOCK_MMCSD_INT_ENABLE), 660 sock->addr + SOCK_MMCSD_INT_ENABLE); 661 writel(((TIFM_MMCSD_FIFO_SIZE - 1) << 8) 662 | (TIFM_MMCSD_FIFO_SIZE - 1), 663 sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 664 665 host->sg_len = r_data->sg_len; 666 } else { 667 sg_init_one(&host->bounce_buf, host->bounce_buf_data, 668 r_data->blksz); 669 670 if(1 != tifm_map_sg(sock, &host->bounce_buf, 1, 671 r_data->flags & MMC_DATA_WRITE 672 ? DMA_TO_DEVICE 673 : DMA_FROM_DEVICE)) { 674 pr_err("%s : scatterlist map failed\n", 675 dev_name(&sock->dev)); 676 mrq->cmd->error = -ENOMEM; 677 goto err_out; 678 } 679 host->sg_len = tifm_map_sg(sock, r_data->sg, 680 r_data->sg_len, 681 r_data->flags 682 & MMC_DATA_WRITE 683 ? DMA_TO_DEVICE 684 : DMA_FROM_DEVICE); 685 if (host->sg_len < 1) { 686 pr_err("%s : scatterlist map failed\n", 687 dev_name(&sock->dev)); 688 tifm_unmap_sg(sock, &host->bounce_buf, 1, 689 r_data->flags & MMC_DATA_WRITE 690 ? DMA_TO_DEVICE 691 : DMA_FROM_DEVICE); 692 mrq->cmd->error = -ENOMEM; 693 goto err_out; 694 } 695 696 writel(TIFM_FIFO_INT_SETALL, 697 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); 698 writel(ilog2(r_data->blksz) - 2, 699 sock->addr + SOCK_FIFO_PAGE_SIZE); 700 writel(TIFM_FIFO_ENABLE, 701 sock->addr + SOCK_FIFO_CONTROL); 702 writel(TIFM_FIFO_INTMASK, 703 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); 704 705 if (r_data->flags & MMC_DATA_WRITE) 706 writel(TIFM_MMCSD_TXDE, 707 sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 708 else 709 writel(TIFM_MMCSD_RXDE, 710 sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 711 712 tifm_sd_set_dma_data(host, r_data); 713 } 714 715 writel(r_data->blocks - 1, 716 sock->addr + SOCK_MMCSD_NUM_BLOCKS); 717 writel(r_data->blksz - 1, 718 sock->addr + SOCK_MMCSD_BLOCK_LEN); 719 } 720 721 host->req = mrq; 722 mod_timer(&host->timer, jiffies + host->timeout_jiffies); 723 writel(TIFM_CTRL_LED | readl(sock->addr + SOCK_CONTROL), 724 sock->addr + SOCK_CONTROL); 725 tifm_sd_exec(host, mrq->cmd); 726 spin_unlock_irqrestore(&sock->lock, flags); 727 return; 728 729 err_out: 730 spin_unlock_irqrestore(&sock->lock, flags); 731 mmc_request_done(mmc, mrq); 732 } 733 734 static void tifm_sd_end_cmd(struct tasklet_struct *t) 735 { 736 struct tifm_sd *host = from_tasklet(host, t, finish_tasklet); 737 struct tifm_dev *sock = host->dev; 738 struct mmc_host *mmc = tifm_get_drvdata(sock); 739 struct mmc_request *mrq; 740 struct mmc_data *r_data = NULL; 741 unsigned long flags; 742 743 spin_lock_irqsave(&sock->lock, flags); 744 745 del_timer(&host->timer); 746 mrq = host->req; 747 host->req = NULL; 748 749 if (!mrq) { 750 pr_err(" %s : no request to complete?\n", 751 dev_name(&sock->dev)); 752 spin_unlock_irqrestore(&sock->lock, flags); 753 return; 754 } 755 756 r_data = mrq->cmd->data; 757 if (r_data) { 758 if (host->no_dma) { 759 writel((~TIFM_MMCSD_BUFINT) 760 & readl(sock->addr + SOCK_MMCSD_INT_ENABLE), 761 sock->addr + SOCK_MMCSD_INT_ENABLE); 762 } else { 763 tifm_unmap_sg(sock, &host->bounce_buf, 1, 764 (r_data->flags & MMC_DATA_WRITE) 765 ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 766 tifm_unmap_sg(sock, r_data->sg, r_data->sg_len, 767 (r_data->flags & MMC_DATA_WRITE) 768 ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 769 } 770 771 r_data->bytes_xfered = r_data->blocks 772 - readl(sock->addr + SOCK_MMCSD_NUM_BLOCKS) - 1; 773 r_data->bytes_xfered *= r_data->blksz; 774 r_data->bytes_xfered += r_data->blksz 775 - readl(sock->addr + SOCK_MMCSD_BLOCK_LEN) + 1; 776 } 777 778 writel((~TIFM_CTRL_LED) & readl(sock->addr + SOCK_CONTROL), 779 sock->addr + SOCK_CONTROL); 780 781 spin_unlock_irqrestore(&sock->lock, flags); 782 mmc_request_done(mmc, mrq); 783 } 784 785 static void tifm_sd_abort(struct timer_list *t) 786 { 787 struct tifm_sd *host = from_timer(host, t, timer); 788 789 pr_err("%s : card failed to respond for a long period of time " 790 "(%x, %x)\n", 791 dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags); 792 793 tifm_eject(host->dev); 794 } 795 796 static void tifm_sd_ios(struct mmc_host *mmc, struct mmc_ios *ios) 797 { 798 struct tifm_sd *host = mmc_priv(mmc); 799 struct tifm_dev *sock = host->dev; 800 unsigned int clk_div1, clk_div2; 801 unsigned long flags; 802 803 spin_lock_irqsave(&sock->lock, flags); 804 805 dev_dbg(&sock->dev, "ios: clock = %u, vdd = %x, bus_mode = %x, " 806 "chip_select = %x, power_mode = %x, bus_width = %x\n", 807 ios->clock, ios->vdd, ios->bus_mode, ios->chip_select, 808 ios->power_mode, ios->bus_width); 809 810 if (ios->bus_width == MMC_BUS_WIDTH_4) { 811 writel(TIFM_MMCSD_4BBUS | readl(sock->addr + SOCK_MMCSD_CONFIG), 812 sock->addr + SOCK_MMCSD_CONFIG); 813 } else { 814 writel((~TIFM_MMCSD_4BBUS) 815 & readl(sock->addr + SOCK_MMCSD_CONFIG), 816 sock->addr + SOCK_MMCSD_CONFIG); 817 } 818 819 if (ios->clock) { 820 clk_div1 = 20000000 / ios->clock; 821 if (!clk_div1) 822 clk_div1 = 1; 823 824 clk_div2 = 24000000 / ios->clock; 825 if (!clk_div2) 826 clk_div2 = 1; 827 828 if ((20000000 / clk_div1) > ios->clock) 829 clk_div1++; 830 if ((24000000 / clk_div2) > ios->clock) 831 clk_div2++; 832 if ((20000000 / clk_div1) > (24000000 / clk_div2)) { 833 host->clk_freq = 20000000; 834 host->clk_div = clk_div1; 835 writel((~TIFM_CTRL_FAST_CLK) 836 & readl(sock->addr + SOCK_CONTROL), 837 sock->addr + SOCK_CONTROL); 838 } else { 839 host->clk_freq = 24000000; 840 host->clk_div = clk_div2; 841 writel(TIFM_CTRL_FAST_CLK 842 | readl(sock->addr + SOCK_CONTROL), 843 sock->addr + SOCK_CONTROL); 844 } 845 } else { 846 host->clk_div = 0; 847 } 848 host->clk_div &= TIFM_MMCSD_CLKMASK; 849 writel(host->clk_div 850 | ((~TIFM_MMCSD_CLKMASK) 851 & readl(sock->addr + SOCK_MMCSD_CONFIG)), 852 sock->addr + SOCK_MMCSD_CONFIG); 853 854 host->open_drain = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN); 855 856 /* chip_select : maybe later */ 857 //vdd 858 //power is set before probe / after remove 859 860 spin_unlock_irqrestore(&sock->lock, flags); 861 } 862 863 static int tifm_sd_ro(struct mmc_host *mmc) 864 { 865 int rc = 0; 866 struct tifm_sd *host = mmc_priv(mmc); 867 struct tifm_dev *sock = host->dev; 868 unsigned long flags; 869 870 spin_lock_irqsave(&sock->lock, flags); 871 if (TIFM_MMCSD_CARD_RO & readl(sock->addr + SOCK_PRESENT_STATE)) 872 rc = 1; 873 spin_unlock_irqrestore(&sock->lock, flags); 874 return rc; 875 } 876 877 static const struct mmc_host_ops tifm_sd_ops = { 878 .request = tifm_sd_request, 879 .set_ios = tifm_sd_ios, 880 .get_ro = tifm_sd_ro 881 }; 882 883 static int tifm_sd_initialize_host(struct tifm_sd *host) 884 { 885 int rc; 886 unsigned int host_status = 0; 887 struct tifm_dev *sock = host->dev; 888 889 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); 890 host->clk_div = 61; 891 host->clk_freq = 20000000; 892 writel(TIFM_MMCSD_RESET, sock->addr + SOCK_MMCSD_SYSTEM_CONTROL); 893 writel(host->clk_div | TIFM_MMCSD_POWER, 894 sock->addr + SOCK_MMCSD_CONFIG); 895 896 /* wait up to 0.51 sec for reset */ 897 for (rc = 32; rc <= 256; rc <<= 1) { 898 if (1 & readl(sock->addr + SOCK_MMCSD_SYSTEM_STATUS)) { 899 rc = 0; 900 break; 901 } 902 msleep(rc); 903 } 904 905 if (rc) { 906 pr_err("%s : controller failed to reset\n", 907 dev_name(&sock->dev)); 908 return -ENODEV; 909 } 910 911 writel(0, sock->addr + SOCK_MMCSD_NUM_BLOCKS); 912 writel(host->clk_div | TIFM_MMCSD_POWER, 913 sock->addr + SOCK_MMCSD_CONFIG); 914 writel(TIFM_MMCSD_RXDE, sock->addr + SOCK_MMCSD_BUFFER_CONFIG); 915 916 // command timeout fixed to 64 clocks for now 917 writel(64, sock->addr + SOCK_MMCSD_COMMAND_TO); 918 writel(TIFM_MMCSD_INAB, sock->addr + SOCK_MMCSD_COMMAND); 919 920 for (rc = 16; rc <= 64; rc <<= 1) { 921 host_status = readl(sock->addr + SOCK_MMCSD_STATUS); 922 writel(host_status, sock->addr + SOCK_MMCSD_STATUS); 923 if (!(host_status & TIFM_MMCSD_ERRMASK) 924 && (host_status & TIFM_MMCSD_EOC)) { 925 rc = 0; 926 break; 927 } 928 msleep(rc); 929 } 930 931 if (rc) { 932 pr_err("%s : card not ready - probe failed on initialization\n", 933 dev_name(&sock->dev)); 934 return -ENODEV; 935 } 936 937 writel(TIFM_MMCSD_CERR | TIFM_MMCSD_BRS | TIFM_MMCSD_EOC 938 | TIFM_MMCSD_ERRMASK, 939 sock->addr + SOCK_MMCSD_INT_ENABLE); 940 941 return 0; 942 } 943 944 static int tifm_sd_probe(struct tifm_dev *sock) 945 { 946 struct mmc_host *mmc; 947 struct tifm_sd *host; 948 int rc = -EIO; 949 950 if (!(TIFM_SOCK_STATE_OCCUPIED 951 & readl(sock->addr + SOCK_PRESENT_STATE))) { 952 pr_warn("%s : card gone, unexpectedly\n", 953 dev_name(&sock->dev)); 954 return rc; 955 } 956 957 mmc = mmc_alloc_host(sizeof(struct tifm_sd), &sock->dev); 958 if (!mmc) 959 return -ENOMEM; 960 961 host = mmc_priv(mmc); 962 tifm_set_drvdata(sock, mmc); 963 host->dev = sock; 964 host->timeout_jiffies = msecs_to_jiffies(TIFM_MMCSD_REQ_TIMEOUT_MS); 965 /* 966 * We use a fixed request timeout of 1s, hence inform the core about it. 967 * A future improvement should instead respect the cmd->busy_timeout. 968 */ 969 mmc->max_busy_timeout = TIFM_MMCSD_REQ_TIMEOUT_MS; 970 971 tasklet_setup(&host->finish_tasklet, tifm_sd_end_cmd); 972 timer_setup(&host->timer, tifm_sd_abort, 0); 973 974 mmc->ops = &tifm_sd_ops; 975 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 976 mmc->caps = MMC_CAP_4_BIT_DATA; 977 mmc->f_min = 20000000 / 60; 978 mmc->f_max = 24000000; 979 980 mmc->max_blk_count = 2048; 981 mmc->max_segs = mmc->max_blk_count; 982 mmc->max_blk_size = min(TIFM_MMCSD_MAX_BLOCK_SIZE, PAGE_SIZE); 983 mmc->max_seg_size = mmc->max_blk_count * mmc->max_blk_size; 984 mmc->max_req_size = mmc->max_seg_size; 985 986 sock->card_event = tifm_sd_card_event; 987 sock->data_event = tifm_sd_data_event; 988 rc = tifm_sd_initialize_host(host); 989 990 if (!rc) 991 rc = mmc_add_host(mmc); 992 if (!rc) 993 return 0; 994 995 mmc_free_host(mmc); 996 return rc; 997 } 998 999 static void tifm_sd_remove(struct tifm_dev *sock) 1000 { 1001 struct mmc_host *mmc = tifm_get_drvdata(sock); 1002 struct tifm_sd *host = mmc_priv(mmc); 1003 unsigned long flags; 1004 1005 spin_lock_irqsave(&sock->lock, flags); 1006 host->eject = 1; 1007 writel(0, sock->addr + SOCK_MMCSD_INT_ENABLE); 1008 spin_unlock_irqrestore(&sock->lock, flags); 1009 1010 tasklet_kill(&host->finish_tasklet); 1011 1012 spin_lock_irqsave(&sock->lock, flags); 1013 if (host->req) { 1014 writel(TIFM_FIFO_INT_SETALL, 1015 sock->addr + SOCK_DMA_FIFO_INT_ENABLE_CLEAR); 1016 writel(0, sock->addr + SOCK_DMA_FIFO_INT_ENABLE_SET); 1017 host->req->cmd->error = -ENOMEDIUM; 1018 if (host->req->stop) 1019 host->req->stop->error = -ENOMEDIUM; 1020 tasklet_schedule(&host->finish_tasklet); 1021 } 1022 spin_unlock_irqrestore(&sock->lock, flags); 1023 mmc_remove_host(mmc); 1024 dev_dbg(&sock->dev, "after remove\n"); 1025 1026 mmc_free_host(mmc); 1027 } 1028 1029 #ifdef CONFIG_PM 1030 1031 static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) 1032 { 1033 return 0; 1034 } 1035 1036 static int tifm_sd_resume(struct tifm_dev *sock) 1037 { 1038 struct mmc_host *mmc = tifm_get_drvdata(sock); 1039 struct tifm_sd *host = mmc_priv(mmc); 1040 int rc; 1041 1042 rc = tifm_sd_initialize_host(host); 1043 dev_dbg(&sock->dev, "resume initialize %d\n", rc); 1044 1045 if (rc) 1046 host->eject = 1; 1047 1048 return rc; 1049 } 1050 1051 #else 1052 1053 #define tifm_sd_suspend NULL 1054 #define tifm_sd_resume NULL 1055 1056 #endif /* CONFIG_PM */ 1057 1058 static struct tifm_device_id tifm_sd_id_tbl[] = { 1059 { TIFM_TYPE_SD }, { } 1060 }; 1061 1062 static struct tifm_driver tifm_sd_driver = { 1063 .driver = { 1064 .name = DRIVER_NAME, 1065 .owner = THIS_MODULE 1066 }, 1067 .id_table = tifm_sd_id_tbl, 1068 .probe = tifm_sd_probe, 1069 .remove = tifm_sd_remove, 1070 .suspend = tifm_sd_suspend, 1071 .resume = tifm_sd_resume 1072 }; 1073 1074 static int __init tifm_sd_init(void) 1075 { 1076 return tifm_register_driver(&tifm_sd_driver); 1077 } 1078 1079 static void __exit tifm_sd_exit(void) 1080 { 1081 tifm_unregister_driver(&tifm_sd_driver); 1082 } 1083 1084 MODULE_AUTHOR("Alex Dubov"); 1085 MODULE_DESCRIPTION("TI FlashMedia SD driver"); 1086 MODULE_LICENSE("GPL"); 1087 MODULE_DEVICE_TABLE(tifm, tifm_sd_id_tbl); 1088 MODULE_VERSION(DRIVER_VERSION); 1089 1090 module_init(tifm_sd_init); 1091 module_exit(tifm_sd_exit); 1092