1 /* 2 * linux/drivers/mmc/host/omap.c 3 * 4 * Copyright (C) 2004 Nokia Corporation 5 * Written by Tuukka Tikkanen and Juha Yrj�l�<juha.yrjola@nokia.com> 6 * Misc hacks here and there by Tony Lindgren <tony@atomide.com> 7 * Other hacks (DMA, SD, etc) by David Brownell 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/moduleparam.h> 16 #include <linux/init.h> 17 #include <linux/ioport.h> 18 #include <linux/platform_device.h> 19 #include <linux/interrupt.h> 20 #include <linux/dma-mapping.h> 21 #include <linux/delay.h> 22 #include <linux/spinlock.h> 23 #include <linux/timer.h> 24 #include <linux/mmc/host.h> 25 #include <linux/mmc/card.h> 26 #include <linux/clk.h> 27 #include <linux/scatterlist.h> 28 #include <linux/i2c/tps65010.h> 29 #include <linux/slab.h> 30 31 #include <asm/io.h> 32 #include <asm/irq.h> 33 34 #include <plat/board.h> 35 #include <plat/mmc.h> 36 #include <mach/gpio.h> 37 #include <plat/dma.h> 38 #include <plat/mux.h> 39 #include <plat/fpga.h> 40 41 #define OMAP_MMC_REG_CMD 0x00 42 #define OMAP_MMC_REG_ARGL 0x01 43 #define OMAP_MMC_REG_ARGH 0x02 44 #define OMAP_MMC_REG_CON 0x03 45 #define OMAP_MMC_REG_STAT 0x04 46 #define OMAP_MMC_REG_IE 0x05 47 #define OMAP_MMC_REG_CTO 0x06 48 #define OMAP_MMC_REG_DTO 0x07 49 #define OMAP_MMC_REG_DATA 0x08 50 #define OMAP_MMC_REG_BLEN 0x09 51 #define OMAP_MMC_REG_NBLK 0x0a 52 #define OMAP_MMC_REG_BUF 0x0b 53 #define OMAP_MMC_REG_SDIO 0x0d 54 #define OMAP_MMC_REG_REV 0x0f 55 #define OMAP_MMC_REG_RSP0 0x10 56 #define OMAP_MMC_REG_RSP1 0x11 57 #define OMAP_MMC_REG_RSP2 0x12 58 #define OMAP_MMC_REG_RSP3 0x13 59 #define OMAP_MMC_REG_RSP4 0x14 60 #define OMAP_MMC_REG_RSP5 0x15 61 #define OMAP_MMC_REG_RSP6 0x16 62 #define OMAP_MMC_REG_RSP7 0x17 63 #define OMAP_MMC_REG_IOSR 0x18 64 #define OMAP_MMC_REG_SYSC 0x19 65 #define OMAP_MMC_REG_SYSS 0x1a 66 67 #define OMAP_MMC_STAT_CARD_ERR (1 << 14) 68 #define OMAP_MMC_STAT_CARD_IRQ (1 << 13) 69 #define OMAP_MMC_STAT_OCR_BUSY (1 << 12) 70 #define OMAP_MMC_STAT_A_EMPTY (1 << 11) 71 #define OMAP_MMC_STAT_A_FULL (1 << 10) 72 #define OMAP_MMC_STAT_CMD_CRC (1 << 8) 73 #define OMAP_MMC_STAT_CMD_TOUT (1 << 7) 74 #define OMAP_MMC_STAT_DATA_CRC (1 << 6) 75 #define OMAP_MMC_STAT_DATA_TOUT (1 << 5) 76 #define OMAP_MMC_STAT_END_BUSY (1 << 4) 77 #define OMAP_MMC_STAT_END_OF_DATA (1 << 3) 78 #define OMAP_MMC_STAT_CARD_BUSY (1 << 2) 79 #define OMAP_MMC_STAT_END_OF_CMD (1 << 0) 80 81 #define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift) 82 #define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg)) 83 #define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg)) 84 85 /* 86 * Command types 87 */ 88 #define OMAP_MMC_CMDTYPE_BC 0 89 #define OMAP_MMC_CMDTYPE_BCR 1 90 #define OMAP_MMC_CMDTYPE_AC 2 91 #define OMAP_MMC_CMDTYPE_ADTC 3 92 93 94 #define DRIVER_NAME "mmci-omap" 95 96 /* Specifies how often in millisecs to poll for card status changes 97 * when the cover switch is open */ 98 #define OMAP_MMC_COVER_POLL_DELAY 500 99 100 struct mmc_omap_host; 101 102 struct mmc_omap_slot { 103 int id; 104 unsigned int vdd; 105 u16 saved_con; 106 u16 bus_mode; 107 unsigned int fclk_freq; 108 unsigned powered:1; 109 110 struct tasklet_struct cover_tasklet; 111 struct timer_list cover_timer; 112 unsigned cover_open; 113 114 struct mmc_request *mrq; 115 struct mmc_omap_host *host; 116 struct mmc_host *mmc; 117 struct omap_mmc_slot_data *pdata; 118 }; 119 120 struct mmc_omap_host { 121 int initialized; 122 int suspended; 123 struct mmc_request * mrq; 124 struct mmc_command * cmd; 125 struct mmc_data * data; 126 struct mmc_host * mmc; 127 struct device * dev; 128 unsigned char id; /* 16xx chips have 2 MMC blocks */ 129 struct clk * iclk; 130 struct clk * fclk; 131 struct resource *mem_res; 132 void __iomem *virt_base; 133 unsigned int phys_base; 134 int irq; 135 unsigned char bus_mode; 136 unsigned char hw_bus_mode; 137 unsigned int reg_shift; 138 139 struct work_struct cmd_abort_work; 140 unsigned abort:1; 141 struct timer_list cmd_abort_timer; 142 143 struct work_struct slot_release_work; 144 struct mmc_omap_slot *next_slot; 145 struct work_struct send_stop_work; 146 struct mmc_data *stop_data; 147 148 unsigned int sg_len; 149 int sg_idx; 150 u16 * buffer; 151 u32 buffer_bytes_left; 152 u32 total_bytes_left; 153 154 unsigned use_dma:1; 155 unsigned brs_received:1, dma_done:1; 156 unsigned dma_is_read:1; 157 unsigned dma_in_use:1; 158 int dma_ch; 159 spinlock_t dma_lock; 160 struct timer_list dma_timer; 161 unsigned dma_len; 162 163 struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS]; 164 struct mmc_omap_slot *current_slot; 165 spinlock_t slot_lock; 166 wait_queue_head_t slot_wq; 167 int nr_slots; 168 169 struct timer_list clk_timer; 170 spinlock_t clk_lock; /* for changing enabled state */ 171 unsigned int fclk_enabled:1; 172 173 struct omap_mmc_platform_data *pdata; 174 }; 175 176 static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot) 177 { 178 unsigned long tick_ns; 179 180 if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) { 181 tick_ns = (1000000000 + slot->fclk_freq - 1) / slot->fclk_freq; 182 ndelay(8 * tick_ns); 183 } 184 } 185 186 static void mmc_omap_fclk_enable(struct mmc_omap_host *host, unsigned int enable) 187 { 188 unsigned long flags; 189 190 spin_lock_irqsave(&host->clk_lock, flags); 191 if (host->fclk_enabled != enable) { 192 host->fclk_enabled = enable; 193 if (enable) 194 clk_enable(host->fclk); 195 else 196 clk_disable(host->fclk); 197 } 198 spin_unlock_irqrestore(&host->clk_lock, flags); 199 } 200 201 static void mmc_omap_select_slot(struct mmc_omap_slot *slot, int claimed) 202 { 203 struct mmc_omap_host *host = slot->host; 204 unsigned long flags; 205 206 if (claimed) 207 goto no_claim; 208 spin_lock_irqsave(&host->slot_lock, flags); 209 while (host->mmc != NULL) { 210 spin_unlock_irqrestore(&host->slot_lock, flags); 211 wait_event(host->slot_wq, host->mmc == NULL); 212 spin_lock_irqsave(&host->slot_lock, flags); 213 } 214 host->mmc = slot->mmc; 215 spin_unlock_irqrestore(&host->slot_lock, flags); 216 no_claim: 217 del_timer(&host->clk_timer); 218 if (host->current_slot != slot || !claimed) 219 mmc_omap_fclk_offdelay(host->current_slot); 220 221 if (host->current_slot != slot) { 222 OMAP_MMC_WRITE(host, CON, slot->saved_con & 0xFC00); 223 if (host->pdata->switch_slot != NULL) 224 host->pdata->switch_slot(mmc_dev(slot->mmc), slot->id); 225 host->current_slot = slot; 226 } 227 228 if (claimed) { 229 mmc_omap_fclk_enable(host, 1); 230 231 /* Doing the dummy read here seems to work around some bug 232 * at least in OMAP24xx silicon where the command would not 233 * start after writing the CMD register. Sigh. */ 234 OMAP_MMC_READ(host, CON); 235 236 OMAP_MMC_WRITE(host, CON, slot->saved_con); 237 } else 238 mmc_omap_fclk_enable(host, 0); 239 } 240 241 static void mmc_omap_start_request(struct mmc_omap_host *host, 242 struct mmc_request *req); 243 244 static void mmc_omap_slot_release_work(struct work_struct *work) 245 { 246 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, 247 slot_release_work); 248 struct mmc_omap_slot *next_slot = host->next_slot; 249 struct mmc_request *rq; 250 251 host->next_slot = NULL; 252 mmc_omap_select_slot(next_slot, 1); 253 254 rq = next_slot->mrq; 255 next_slot->mrq = NULL; 256 mmc_omap_start_request(host, rq); 257 } 258 259 static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled) 260 { 261 struct mmc_omap_host *host = slot->host; 262 unsigned long flags; 263 int i; 264 265 BUG_ON(slot == NULL || host->mmc == NULL); 266 267 if (clk_enabled) 268 /* Keeps clock running for at least 8 cycles on valid freq */ 269 mod_timer(&host->clk_timer, jiffies + HZ/10); 270 else { 271 del_timer(&host->clk_timer); 272 mmc_omap_fclk_offdelay(slot); 273 mmc_omap_fclk_enable(host, 0); 274 } 275 276 spin_lock_irqsave(&host->slot_lock, flags); 277 /* Check for any pending requests */ 278 for (i = 0; i < host->nr_slots; i++) { 279 struct mmc_omap_slot *new_slot; 280 281 if (host->slots[i] == NULL || host->slots[i]->mrq == NULL) 282 continue; 283 284 BUG_ON(host->next_slot != NULL); 285 new_slot = host->slots[i]; 286 /* The current slot should not have a request in queue */ 287 BUG_ON(new_slot == host->current_slot); 288 289 host->next_slot = new_slot; 290 host->mmc = new_slot->mmc; 291 spin_unlock_irqrestore(&host->slot_lock, flags); 292 schedule_work(&host->slot_release_work); 293 return; 294 } 295 296 host->mmc = NULL; 297 wake_up(&host->slot_wq); 298 spin_unlock_irqrestore(&host->slot_lock, flags); 299 } 300 301 static inline 302 int mmc_omap_cover_is_open(struct mmc_omap_slot *slot) 303 { 304 if (slot->pdata->get_cover_state) 305 return slot->pdata->get_cover_state(mmc_dev(slot->mmc), 306 slot->id); 307 return 0; 308 } 309 310 static ssize_t 311 mmc_omap_show_cover_switch(struct device *dev, struct device_attribute *attr, 312 char *buf) 313 { 314 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); 315 struct mmc_omap_slot *slot = mmc_priv(mmc); 316 317 return sprintf(buf, "%s\n", mmc_omap_cover_is_open(slot) ? "open" : 318 "closed"); 319 } 320 321 static DEVICE_ATTR(cover_switch, S_IRUGO, mmc_omap_show_cover_switch, NULL); 322 323 static ssize_t 324 mmc_omap_show_slot_name(struct device *dev, struct device_attribute *attr, 325 char *buf) 326 { 327 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev); 328 struct mmc_omap_slot *slot = mmc_priv(mmc); 329 330 return sprintf(buf, "%s\n", slot->pdata->name); 331 } 332 333 static DEVICE_ATTR(slot_name, S_IRUGO, mmc_omap_show_slot_name, NULL); 334 335 static void 336 mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd) 337 { 338 u32 cmdreg; 339 u32 resptype; 340 u32 cmdtype; 341 342 host->cmd = cmd; 343 344 resptype = 0; 345 cmdtype = 0; 346 347 /* Our hardware needs to know exact type */ 348 switch (mmc_resp_type(cmd)) { 349 case MMC_RSP_NONE: 350 break; 351 case MMC_RSP_R1: 352 case MMC_RSP_R1B: 353 /* resp 1, 1b, 6, 7 */ 354 resptype = 1; 355 break; 356 case MMC_RSP_R2: 357 resptype = 2; 358 break; 359 case MMC_RSP_R3: 360 resptype = 3; 361 break; 362 default: 363 dev_err(mmc_dev(host->mmc), "Invalid response type: %04x\n", mmc_resp_type(cmd)); 364 break; 365 } 366 367 if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) { 368 cmdtype = OMAP_MMC_CMDTYPE_ADTC; 369 } else if (mmc_cmd_type(cmd) == MMC_CMD_BC) { 370 cmdtype = OMAP_MMC_CMDTYPE_BC; 371 } else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) { 372 cmdtype = OMAP_MMC_CMDTYPE_BCR; 373 } else { 374 cmdtype = OMAP_MMC_CMDTYPE_AC; 375 } 376 377 cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12); 378 379 if (host->current_slot->bus_mode == MMC_BUSMODE_OPENDRAIN) 380 cmdreg |= 1 << 6; 381 382 if (cmd->flags & MMC_RSP_BUSY) 383 cmdreg |= 1 << 11; 384 385 if (host->data && !(host->data->flags & MMC_DATA_WRITE)) 386 cmdreg |= 1 << 15; 387 388 mod_timer(&host->cmd_abort_timer, jiffies + HZ/2); 389 390 OMAP_MMC_WRITE(host, CTO, 200); 391 OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff); 392 OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16); 393 OMAP_MMC_WRITE(host, IE, 394 OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL | 395 OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT | 396 OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT | 397 OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR | 398 OMAP_MMC_STAT_END_OF_DATA); 399 OMAP_MMC_WRITE(host, CMD, cmdreg); 400 } 401 402 static void 403 mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data, 404 int abort) 405 { 406 enum dma_data_direction dma_data_dir; 407 408 BUG_ON(host->dma_ch < 0); 409 if (data->error) 410 omap_stop_dma(host->dma_ch); 411 /* Release DMA channel lazily */ 412 mod_timer(&host->dma_timer, jiffies + HZ); 413 if (data->flags & MMC_DATA_WRITE) 414 dma_data_dir = DMA_TO_DEVICE; 415 else 416 dma_data_dir = DMA_FROM_DEVICE; 417 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, 418 dma_data_dir); 419 } 420 421 static void mmc_omap_send_stop_work(struct work_struct *work) 422 { 423 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, 424 send_stop_work); 425 struct mmc_omap_slot *slot = host->current_slot; 426 struct mmc_data *data = host->stop_data; 427 unsigned long tick_ns; 428 429 tick_ns = (1000000000 + slot->fclk_freq - 1)/slot->fclk_freq; 430 ndelay(8*tick_ns); 431 432 mmc_omap_start_command(host, data->stop); 433 } 434 435 static void 436 mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) 437 { 438 if (host->dma_in_use) 439 mmc_omap_release_dma(host, data, data->error); 440 441 host->data = NULL; 442 host->sg_len = 0; 443 444 /* NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing 445 * dozens of requests until the card finishes writing data. 446 * It'd be cheaper to just wait till an EOFB interrupt arrives... 447 */ 448 449 if (!data->stop) { 450 struct mmc_host *mmc; 451 452 host->mrq = NULL; 453 mmc = host->mmc; 454 mmc_omap_release_slot(host->current_slot, 1); 455 mmc_request_done(mmc, data->mrq); 456 return; 457 } 458 459 host->stop_data = data; 460 schedule_work(&host->send_stop_work); 461 } 462 463 static void 464 mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops) 465 { 466 struct mmc_omap_slot *slot = host->current_slot; 467 unsigned int restarts, passes, timeout; 468 u16 stat = 0; 469 470 /* Sending abort takes 80 clocks. Have some extra and round up */ 471 timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq; 472 restarts = 0; 473 while (restarts < maxloops) { 474 OMAP_MMC_WRITE(host, STAT, 0xFFFF); 475 OMAP_MMC_WRITE(host, CMD, (3 << 12) | (1 << 7)); 476 477 passes = 0; 478 while (passes < timeout) { 479 stat = OMAP_MMC_READ(host, STAT); 480 if (stat & OMAP_MMC_STAT_END_OF_CMD) 481 goto out; 482 udelay(1); 483 passes++; 484 } 485 486 restarts++; 487 } 488 out: 489 OMAP_MMC_WRITE(host, STAT, stat); 490 } 491 492 static void 493 mmc_omap_abort_xfer(struct mmc_omap_host *host, struct mmc_data *data) 494 { 495 if (host->dma_in_use) 496 mmc_omap_release_dma(host, data, 1); 497 498 host->data = NULL; 499 host->sg_len = 0; 500 501 mmc_omap_send_abort(host, 10000); 502 } 503 504 static void 505 mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data) 506 { 507 unsigned long flags; 508 int done; 509 510 if (!host->dma_in_use) { 511 mmc_omap_xfer_done(host, data); 512 return; 513 } 514 done = 0; 515 spin_lock_irqsave(&host->dma_lock, flags); 516 if (host->dma_done) 517 done = 1; 518 else 519 host->brs_received = 1; 520 spin_unlock_irqrestore(&host->dma_lock, flags); 521 if (done) 522 mmc_omap_xfer_done(host, data); 523 } 524 525 static void 526 mmc_omap_dma_timer(unsigned long data) 527 { 528 struct mmc_omap_host *host = (struct mmc_omap_host *) data; 529 530 BUG_ON(host->dma_ch < 0); 531 omap_free_dma(host->dma_ch); 532 host->dma_ch = -1; 533 } 534 535 static void 536 mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) 537 { 538 unsigned long flags; 539 int done; 540 541 done = 0; 542 spin_lock_irqsave(&host->dma_lock, flags); 543 if (host->brs_received) 544 done = 1; 545 else 546 host->dma_done = 1; 547 spin_unlock_irqrestore(&host->dma_lock, flags); 548 if (done) 549 mmc_omap_xfer_done(host, data); 550 } 551 552 static void 553 mmc_omap_cmd_done(struct mmc_omap_host *host, struct mmc_command *cmd) 554 { 555 host->cmd = NULL; 556 557 del_timer(&host->cmd_abort_timer); 558 559 if (cmd->flags & MMC_RSP_PRESENT) { 560 if (cmd->flags & MMC_RSP_136) { 561 /* response type 2 */ 562 cmd->resp[3] = 563 OMAP_MMC_READ(host, RSP0) | 564 (OMAP_MMC_READ(host, RSP1) << 16); 565 cmd->resp[2] = 566 OMAP_MMC_READ(host, RSP2) | 567 (OMAP_MMC_READ(host, RSP3) << 16); 568 cmd->resp[1] = 569 OMAP_MMC_READ(host, RSP4) | 570 (OMAP_MMC_READ(host, RSP5) << 16); 571 cmd->resp[0] = 572 OMAP_MMC_READ(host, RSP6) | 573 (OMAP_MMC_READ(host, RSP7) << 16); 574 } else { 575 /* response types 1, 1b, 3, 4, 5, 6 */ 576 cmd->resp[0] = 577 OMAP_MMC_READ(host, RSP6) | 578 (OMAP_MMC_READ(host, RSP7) << 16); 579 } 580 } 581 582 if (host->data == NULL || cmd->error) { 583 struct mmc_host *mmc; 584 585 if (host->data != NULL) 586 mmc_omap_abort_xfer(host, host->data); 587 host->mrq = NULL; 588 mmc = host->mmc; 589 mmc_omap_release_slot(host->current_slot, 1); 590 mmc_request_done(mmc, cmd->mrq); 591 } 592 } 593 594 /* 595 * Abort stuck command. Can occur when card is removed while it is being 596 * read. 597 */ 598 static void mmc_omap_abort_command(struct work_struct *work) 599 { 600 struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, 601 cmd_abort_work); 602 BUG_ON(!host->cmd); 603 604 dev_dbg(mmc_dev(host->mmc), "Aborting stuck command CMD%d\n", 605 host->cmd->opcode); 606 607 if (host->cmd->error == 0) 608 host->cmd->error = -ETIMEDOUT; 609 610 if (host->data == NULL) { 611 struct mmc_command *cmd; 612 struct mmc_host *mmc; 613 614 cmd = host->cmd; 615 host->cmd = NULL; 616 mmc_omap_send_abort(host, 10000); 617 618 host->mrq = NULL; 619 mmc = host->mmc; 620 mmc_omap_release_slot(host->current_slot, 1); 621 mmc_request_done(mmc, cmd->mrq); 622 } else 623 mmc_omap_cmd_done(host, host->cmd); 624 625 host->abort = 0; 626 enable_irq(host->irq); 627 } 628 629 static void 630 mmc_omap_cmd_timer(unsigned long data) 631 { 632 struct mmc_omap_host *host = (struct mmc_omap_host *) data; 633 unsigned long flags; 634 635 spin_lock_irqsave(&host->slot_lock, flags); 636 if (host->cmd != NULL && !host->abort) { 637 OMAP_MMC_WRITE(host, IE, 0); 638 disable_irq(host->irq); 639 host->abort = 1; 640 schedule_work(&host->cmd_abort_work); 641 } 642 spin_unlock_irqrestore(&host->slot_lock, flags); 643 } 644 645 /* PIO only */ 646 static void 647 mmc_omap_sg_to_buf(struct mmc_omap_host *host) 648 { 649 struct scatterlist *sg; 650 651 sg = host->data->sg + host->sg_idx; 652 host->buffer_bytes_left = sg->length; 653 host->buffer = sg_virt(sg); 654 if (host->buffer_bytes_left > host->total_bytes_left) 655 host->buffer_bytes_left = host->total_bytes_left; 656 } 657 658 static void 659 mmc_omap_clk_timer(unsigned long data) 660 { 661 struct mmc_omap_host *host = (struct mmc_omap_host *) data; 662 663 mmc_omap_fclk_enable(host, 0); 664 } 665 666 /* PIO only */ 667 static void 668 mmc_omap_xfer_data(struct mmc_omap_host *host, int write) 669 { 670 int n; 671 672 if (host->buffer_bytes_left == 0) { 673 host->sg_idx++; 674 BUG_ON(host->sg_idx == host->sg_len); 675 mmc_omap_sg_to_buf(host); 676 } 677 n = 64; 678 if (n > host->buffer_bytes_left) 679 n = host->buffer_bytes_left; 680 host->buffer_bytes_left -= n; 681 host->total_bytes_left -= n; 682 host->data->bytes_xfered += n; 683 684 if (write) { 685 __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); 686 } else { 687 __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); 688 } 689 } 690 691 static inline void mmc_omap_report_irq(u16 status) 692 { 693 static const char *mmc_omap_status_bits[] = { 694 "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO", 695 "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR" 696 }; 697 int i, c = 0; 698 699 for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) 700 if (status & (1 << i)) { 701 if (c) 702 printk(" "); 703 printk("%s", mmc_omap_status_bits[i]); 704 c++; 705 } 706 } 707 708 static irqreturn_t mmc_omap_irq(int irq, void *dev_id) 709 { 710 struct mmc_omap_host * host = (struct mmc_omap_host *)dev_id; 711 u16 status; 712 int end_command; 713 int end_transfer; 714 int transfer_error, cmd_error; 715 716 if (host->cmd == NULL && host->data == NULL) { 717 status = OMAP_MMC_READ(host, STAT); 718 dev_info(mmc_dev(host->slots[0]->mmc), 719 "Spurious IRQ 0x%04x\n", status); 720 if (status != 0) { 721 OMAP_MMC_WRITE(host, STAT, status); 722 OMAP_MMC_WRITE(host, IE, 0); 723 } 724 return IRQ_HANDLED; 725 } 726 727 end_command = 0; 728 end_transfer = 0; 729 transfer_error = 0; 730 cmd_error = 0; 731 732 while ((status = OMAP_MMC_READ(host, STAT)) != 0) { 733 int cmd; 734 735 OMAP_MMC_WRITE(host, STAT, status); 736 if (host->cmd != NULL) 737 cmd = host->cmd->opcode; 738 else 739 cmd = -1; 740 #ifdef CONFIG_MMC_DEBUG 741 dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ", 742 status, cmd); 743 mmc_omap_report_irq(status); 744 printk("\n"); 745 #endif 746 if (host->total_bytes_left) { 747 if ((status & OMAP_MMC_STAT_A_FULL) || 748 (status & OMAP_MMC_STAT_END_OF_DATA)) 749 mmc_omap_xfer_data(host, 0); 750 if (status & OMAP_MMC_STAT_A_EMPTY) 751 mmc_omap_xfer_data(host, 1); 752 } 753 754 if (status & OMAP_MMC_STAT_END_OF_DATA) 755 end_transfer = 1; 756 757 if (status & OMAP_MMC_STAT_DATA_TOUT) { 758 dev_dbg(mmc_dev(host->mmc), "data timeout (CMD%d)\n", 759 cmd); 760 if (host->data) { 761 host->data->error = -ETIMEDOUT; 762 transfer_error = 1; 763 } 764 } 765 766 if (status & OMAP_MMC_STAT_DATA_CRC) { 767 if (host->data) { 768 host->data->error = -EILSEQ; 769 dev_dbg(mmc_dev(host->mmc), 770 "data CRC error, bytes left %d\n", 771 host->total_bytes_left); 772 transfer_error = 1; 773 } else { 774 dev_dbg(mmc_dev(host->mmc), "data CRC error\n"); 775 } 776 } 777 778 if (status & OMAP_MMC_STAT_CMD_TOUT) { 779 /* Timeouts are routine with some commands */ 780 if (host->cmd) { 781 struct mmc_omap_slot *slot = 782 host->current_slot; 783 if (slot == NULL || 784 !mmc_omap_cover_is_open(slot)) 785 dev_err(mmc_dev(host->mmc), 786 "command timeout (CMD%d)\n", 787 cmd); 788 host->cmd->error = -ETIMEDOUT; 789 end_command = 1; 790 cmd_error = 1; 791 } 792 } 793 794 if (status & OMAP_MMC_STAT_CMD_CRC) { 795 if (host->cmd) { 796 dev_err(mmc_dev(host->mmc), 797 "command CRC error (CMD%d, arg 0x%08x)\n", 798 cmd, host->cmd->arg); 799 host->cmd->error = -EILSEQ; 800 end_command = 1; 801 cmd_error = 1; 802 } else 803 dev_err(mmc_dev(host->mmc), 804 "command CRC error without cmd?\n"); 805 } 806 807 if (status & OMAP_MMC_STAT_CARD_ERR) { 808 dev_dbg(mmc_dev(host->mmc), 809 "ignoring card status error (CMD%d)\n", 810 cmd); 811 end_command = 1; 812 } 813 814 /* 815 * NOTE: On 1610 the END_OF_CMD may come too early when 816 * starting a write 817 */ 818 if ((status & OMAP_MMC_STAT_END_OF_CMD) && 819 (!(status & OMAP_MMC_STAT_A_EMPTY))) { 820 end_command = 1; 821 } 822 } 823 824 if (cmd_error && host->data) { 825 del_timer(&host->cmd_abort_timer); 826 host->abort = 1; 827 OMAP_MMC_WRITE(host, IE, 0); 828 disable_irq_nosync(host->irq); 829 schedule_work(&host->cmd_abort_work); 830 return IRQ_HANDLED; 831 } 832 833 if (end_command) 834 mmc_omap_cmd_done(host, host->cmd); 835 if (host->data != NULL) { 836 if (transfer_error) 837 mmc_omap_xfer_done(host, host->data); 838 else if (end_transfer) 839 mmc_omap_end_of_data(host, host->data); 840 } 841 842 return IRQ_HANDLED; 843 } 844 845 void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed) 846 { 847 int cover_open; 848 struct mmc_omap_host *host = dev_get_drvdata(dev); 849 struct mmc_omap_slot *slot = host->slots[num]; 850 851 BUG_ON(num >= host->nr_slots); 852 853 /* Other subsystems can call in here before we're initialised. */ 854 if (host->nr_slots == 0 || !host->slots[num]) 855 return; 856 857 cover_open = mmc_omap_cover_is_open(slot); 858 if (cover_open != slot->cover_open) { 859 slot->cover_open = cover_open; 860 sysfs_notify(&slot->mmc->class_dev.kobj, NULL, "cover_switch"); 861 } 862 863 tasklet_hi_schedule(&slot->cover_tasklet); 864 } 865 866 static void mmc_omap_cover_timer(unsigned long arg) 867 { 868 struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg; 869 tasklet_schedule(&slot->cover_tasklet); 870 } 871 872 static void mmc_omap_cover_handler(unsigned long param) 873 { 874 struct mmc_omap_slot *slot = (struct mmc_omap_slot *)param; 875 int cover_open = mmc_omap_cover_is_open(slot); 876 877 mmc_detect_change(slot->mmc, 0); 878 if (!cover_open) 879 return; 880 881 /* 882 * If no card is inserted, we postpone polling until 883 * the cover has been closed. 884 */ 885 if (slot->mmc->card == NULL || !mmc_card_present(slot->mmc->card)) 886 return; 887 888 mod_timer(&slot->cover_timer, 889 jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY)); 890 } 891 892 /* Prepare to transfer the next segment of a scatterlist */ 893 static void 894 mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data) 895 { 896 int dma_ch = host->dma_ch; 897 unsigned long data_addr; 898 u16 buf, frame; 899 u32 count; 900 struct scatterlist *sg = &data->sg[host->sg_idx]; 901 int src_port = 0; 902 int dst_port = 0; 903 int sync_dev = 0; 904 905 data_addr = host->phys_base + OMAP_MMC_REG(host, DATA); 906 frame = data->blksz; 907 count = sg_dma_len(sg); 908 909 if ((data->blocks == 1) && (count > data->blksz)) 910 count = frame; 911 912 host->dma_len = count; 913 914 /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx. 915 * Use 16 or 32 word frames when the blocksize is at least that large. 916 * Blocksize is usually 512 bytes; but not for some SD reads. 917 */ 918 if (cpu_is_omap15xx() && frame > 32) 919 frame = 32; 920 else if (frame > 64) 921 frame = 64; 922 count /= frame; 923 frame >>= 1; 924 925 if (!(data->flags & MMC_DATA_WRITE)) { 926 buf = 0x800f | ((frame - 1) << 8); 927 928 if (cpu_class_is_omap1()) { 929 src_port = OMAP_DMA_PORT_TIPB; 930 dst_port = OMAP_DMA_PORT_EMIFF; 931 } 932 if (cpu_is_omap24xx()) 933 sync_dev = OMAP24XX_DMA_MMC1_RX; 934 935 omap_set_dma_src_params(dma_ch, src_port, 936 OMAP_DMA_AMODE_CONSTANT, 937 data_addr, 0, 0); 938 omap_set_dma_dest_params(dma_ch, dst_port, 939 OMAP_DMA_AMODE_POST_INC, 940 sg_dma_address(sg), 0, 0); 941 omap_set_dma_dest_data_pack(dma_ch, 1); 942 omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); 943 } else { 944 buf = 0x0f80 | ((frame - 1) << 0); 945 946 if (cpu_class_is_omap1()) { 947 src_port = OMAP_DMA_PORT_EMIFF; 948 dst_port = OMAP_DMA_PORT_TIPB; 949 } 950 if (cpu_is_omap24xx()) 951 sync_dev = OMAP24XX_DMA_MMC1_TX; 952 953 omap_set_dma_dest_params(dma_ch, dst_port, 954 OMAP_DMA_AMODE_CONSTANT, 955 data_addr, 0, 0); 956 omap_set_dma_src_params(dma_ch, src_port, 957 OMAP_DMA_AMODE_POST_INC, 958 sg_dma_address(sg), 0, 0); 959 omap_set_dma_src_data_pack(dma_ch, 1); 960 omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); 961 } 962 963 /* Max limit for DMA frame count is 0xffff */ 964 BUG_ON(count > 0xffff); 965 966 OMAP_MMC_WRITE(host, BUF, buf); 967 omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16, 968 frame, count, OMAP_DMA_SYNC_FRAME, 969 sync_dev, 0); 970 } 971 972 /* A scatterlist segment completed */ 973 static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) 974 { 975 struct mmc_omap_host *host = (struct mmc_omap_host *) data; 976 struct mmc_data *mmcdat = host->data; 977 978 if (unlikely(host->dma_ch < 0)) { 979 dev_err(mmc_dev(host->mmc), 980 "DMA callback while DMA not enabled\n"); 981 return; 982 } 983 /* FIXME: We really should do something to _handle_ the errors */ 984 if (ch_status & OMAP1_DMA_TOUT_IRQ) { 985 dev_err(mmc_dev(host->mmc),"DMA timeout\n"); 986 return; 987 } 988 if (ch_status & OMAP_DMA_DROP_IRQ) { 989 dev_err(mmc_dev(host->mmc), "DMA sync error\n"); 990 return; 991 } 992 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { 993 return; 994 } 995 mmcdat->bytes_xfered += host->dma_len; 996 host->sg_idx++; 997 if (host->sg_idx < host->sg_len) { 998 mmc_omap_prepare_dma(host, host->data); 999 omap_start_dma(host->dma_ch); 1000 } else 1001 mmc_omap_dma_done(host, host->data); 1002 } 1003 1004 static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data) 1005 { 1006 const char *dma_dev_name; 1007 int sync_dev, dma_ch, is_read, r; 1008 1009 is_read = !(data->flags & MMC_DATA_WRITE); 1010 del_timer_sync(&host->dma_timer); 1011 if (host->dma_ch >= 0) { 1012 if (is_read == host->dma_is_read) 1013 return 0; 1014 omap_free_dma(host->dma_ch); 1015 host->dma_ch = -1; 1016 } 1017 1018 if (is_read) { 1019 if (host->id == 0) { 1020 sync_dev = OMAP_DMA_MMC_RX; 1021 dma_dev_name = "MMC1 read"; 1022 } else { 1023 sync_dev = OMAP_DMA_MMC2_RX; 1024 dma_dev_name = "MMC2 read"; 1025 } 1026 } else { 1027 if (host->id == 0) { 1028 sync_dev = OMAP_DMA_MMC_TX; 1029 dma_dev_name = "MMC1 write"; 1030 } else { 1031 sync_dev = OMAP_DMA_MMC2_TX; 1032 dma_dev_name = "MMC2 write"; 1033 } 1034 } 1035 r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb, 1036 host, &dma_ch); 1037 if (r != 0) { 1038 dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r); 1039 return r; 1040 } 1041 host->dma_ch = dma_ch; 1042 host->dma_is_read = is_read; 1043 1044 return 0; 1045 } 1046 1047 static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) 1048 { 1049 u16 reg; 1050 1051 reg = OMAP_MMC_READ(host, SDIO); 1052 reg &= ~(1 << 5); 1053 OMAP_MMC_WRITE(host, SDIO, reg); 1054 /* Set maximum timeout */ 1055 OMAP_MMC_WRITE(host, CTO, 0xff); 1056 } 1057 1058 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req) 1059 { 1060 unsigned int timeout, cycle_ns; 1061 u16 reg; 1062 1063 cycle_ns = 1000000000 / host->current_slot->fclk_freq; 1064 timeout = req->data->timeout_ns / cycle_ns; 1065 timeout += req->data->timeout_clks; 1066 1067 /* Check if we need to use timeout multiplier register */ 1068 reg = OMAP_MMC_READ(host, SDIO); 1069 if (timeout > 0xffff) { 1070 reg |= (1 << 5); 1071 timeout /= 1024; 1072 } else 1073 reg &= ~(1 << 5); 1074 OMAP_MMC_WRITE(host, SDIO, reg); 1075 OMAP_MMC_WRITE(host, DTO, timeout); 1076 } 1077 1078 static void 1079 mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) 1080 { 1081 struct mmc_data *data = req->data; 1082 int i, use_dma, block_size; 1083 unsigned sg_len; 1084 1085 host->data = data; 1086 if (data == NULL) { 1087 OMAP_MMC_WRITE(host, BLEN, 0); 1088 OMAP_MMC_WRITE(host, NBLK, 0); 1089 OMAP_MMC_WRITE(host, BUF, 0); 1090 host->dma_in_use = 0; 1091 set_cmd_timeout(host, req); 1092 return; 1093 } 1094 1095 block_size = data->blksz; 1096 1097 OMAP_MMC_WRITE(host, NBLK, data->blocks - 1); 1098 OMAP_MMC_WRITE(host, BLEN, block_size - 1); 1099 set_data_timeout(host, req); 1100 1101 /* cope with calling layer confusion; it issues "single 1102 * block" writes using multi-block scatterlists. 1103 */ 1104 sg_len = (data->blocks == 1) ? 1 : data->sg_len; 1105 1106 /* Only do DMA for entire blocks */ 1107 use_dma = host->use_dma; 1108 if (use_dma) { 1109 for (i = 0; i < sg_len; i++) { 1110 if ((data->sg[i].length % block_size) != 0) { 1111 use_dma = 0; 1112 break; 1113 } 1114 } 1115 } 1116 1117 host->sg_idx = 0; 1118 if (use_dma) { 1119 if (mmc_omap_get_dma_channel(host, data) == 0) { 1120 enum dma_data_direction dma_data_dir; 1121 1122 if (data->flags & MMC_DATA_WRITE) 1123 dma_data_dir = DMA_TO_DEVICE; 1124 else 1125 dma_data_dir = DMA_FROM_DEVICE; 1126 1127 host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, 1128 sg_len, dma_data_dir); 1129 host->total_bytes_left = 0; 1130 mmc_omap_prepare_dma(host, req->data); 1131 host->brs_received = 0; 1132 host->dma_done = 0; 1133 host->dma_in_use = 1; 1134 } else 1135 use_dma = 0; 1136 } 1137 1138 /* Revert to PIO? */ 1139 if (!use_dma) { 1140 OMAP_MMC_WRITE(host, BUF, 0x1f1f); 1141 host->total_bytes_left = data->blocks * block_size; 1142 host->sg_len = sg_len; 1143 mmc_omap_sg_to_buf(host); 1144 host->dma_in_use = 0; 1145 } 1146 } 1147 1148 static void mmc_omap_start_request(struct mmc_omap_host *host, 1149 struct mmc_request *req) 1150 { 1151 BUG_ON(host->mrq != NULL); 1152 1153 host->mrq = req; 1154 1155 /* only touch fifo AFTER the controller readies it */ 1156 mmc_omap_prepare_data(host, req); 1157 mmc_omap_start_command(host, req->cmd); 1158 if (host->dma_in_use) 1159 omap_start_dma(host->dma_ch); 1160 } 1161 1162 static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) 1163 { 1164 struct mmc_omap_slot *slot = mmc_priv(mmc); 1165 struct mmc_omap_host *host = slot->host; 1166 unsigned long flags; 1167 1168 spin_lock_irqsave(&host->slot_lock, flags); 1169 if (host->mmc != NULL) { 1170 BUG_ON(slot->mrq != NULL); 1171 slot->mrq = req; 1172 spin_unlock_irqrestore(&host->slot_lock, flags); 1173 return; 1174 } else 1175 host->mmc = mmc; 1176 spin_unlock_irqrestore(&host->slot_lock, flags); 1177 mmc_omap_select_slot(slot, 1); 1178 mmc_omap_start_request(host, req); 1179 } 1180 1181 static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on, 1182 int vdd) 1183 { 1184 struct mmc_omap_host *host; 1185 1186 host = slot->host; 1187 1188 if (slot->pdata->set_power != NULL) 1189 slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on, 1190 vdd); 1191 1192 if (cpu_is_omap24xx()) { 1193 u16 w; 1194 1195 if (power_on) { 1196 w = OMAP_MMC_READ(host, CON); 1197 OMAP_MMC_WRITE(host, CON, w | (1 << 11)); 1198 } else { 1199 w = OMAP_MMC_READ(host, CON); 1200 OMAP_MMC_WRITE(host, CON, w & ~(1 << 11)); 1201 } 1202 } 1203 } 1204 1205 static int mmc_omap_calc_divisor(struct mmc_host *mmc, struct mmc_ios *ios) 1206 { 1207 struct mmc_omap_slot *slot = mmc_priv(mmc); 1208 struct mmc_omap_host *host = slot->host; 1209 int func_clk_rate = clk_get_rate(host->fclk); 1210 int dsor; 1211 1212 if (ios->clock == 0) 1213 return 0; 1214 1215 dsor = func_clk_rate / ios->clock; 1216 if (dsor < 1) 1217 dsor = 1; 1218 1219 if (func_clk_rate / dsor > ios->clock) 1220 dsor++; 1221 1222 if (dsor > 250) 1223 dsor = 250; 1224 1225 slot->fclk_freq = func_clk_rate / dsor; 1226 1227 if (ios->bus_width == MMC_BUS_WIDTH_4) 1228 dsor |= 1 << 15; 1229 1230 return dsor; 1231 } 1232 1233 static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 1234 { 1235 struct mmc_omap_slot *slot = mmc_priv(mmc); 1236 struct mmc_omap_host *host = slot->host; 1237 int i, dsor; 1238 int clk_enabled; 1239 1240 mmc_omap_select_slot(slot, 0); 1241 1242 dsor = mmc_omap_calc_divisor(mmc, ios); 1243 1244 if (ios->vdd != slot->vdd) 1245 slot->vdd = ios->vdd; 1246 1247 clk_enabled = 0; 1248 switch (ios->power_mode) { 1249 case MMC_POWER_OFF: 1250 mmc_omap_set_power(slot, 0, ios->vdd); 1251 break; 1252 case MMC_POWER_UP: 1253 /* Cannot touch dsor yet, just power up MMC */ 1254 mmc_omap_set_power(slot, 1, ios->vdd); 1255 goto exit; 1256 case MMC_POWER_ON: 1257 mmc_omap_fclk_enable(host, 1); 1258 clk_enabled = 1; 1259 dsor |= 1 << 11; 1260 break; 1261 } 1262 1263 if (slot->bus_mode != ios->bus_mode) { 1264 if (slot->pdata->set_bus_mode != NULL) 1265 slot->pdata->set_bus_mode(mmc_dev(mmc), slot->id, 1266 ios->bus_mode); 1267 slot->bus_mode = ios->bus_mode; 1268 } 1269 1270 /* On insanely high arm_per frequencies something sometimes 1271 * goes somehow out of sync, and the POW bit is not being set, 1272 * which results in the while loop below getting stuck. 1273 * Writing to the CON register twice seems to do the trick. */ 1274 for (i = 0; i < 2; i++) 1275 OMAP_MMC_WRITE(host, CON, dsor); 1276 slot->saved_con = dsor; 1277 if (ios->power_mode == MMC_POWER_ON) { 1278 /* worst case at 400kHz, 80 cycles makes 200 microsecs */ 1279 int usecs = 250; 1280 1281 /* Send clock cycles, poll completion */ 1282 OMAP_MMC_WRITE(host, IE, 0); 1283 OMAP_MMC_WRITE(host, STAT, 0xffff); 1284 OMAP_MMC_WRITE(host, CMD, 1 << 7); 1285 while (usecs > 0 && (OMAP_MMC_READ(host, STAT) & 1) == 0) { 1286 udelay(1); 1287 usecs--; 1288 } 1289 OMAP_MMC_WRITE(host, STAT, 1); 1290 } 1291 1292 exit: 1293 mmc_omap_release_slot(slot, clk_enabled); 1294 } 1295 1296 static const struct mmc_host_ops mmc_omap_ops = { 1297 .request = mmc_omap_request, 1298 .set_ios = mmc_omap_set_ios, 1299 }; 1300 1301 static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id) 1302 { 1303 struct mmc_omap_slot *slot = NULL; 1304 struct mmc_host *mmc; 1305 int r; 1306 1307 mmc = mmc_alloc_host(sizeof(struct mmc_omap_slot), host->dev); 1308 if (mmc == NULL) 1309 return -ENOMEM; 1310 1311 slot = mmc_priv(mmc); 1312 slot->host = host; 1313 slot->mmc = mmc; 1314 slot->id = id; 1315 slot->pdata = &host->pdata->slots[id]; 1316 1317 host->slots[id] = slot; 1318 1319 mmc->caps = 0; 1320 if (host->pdata->slots[id].wires >= 4) 1321 mmc->caps |= MMC_CAP_4_BIT_DATA; 1322 1323 mmc->ops = &mmc_omap_ops; 1324 mmc->f_min = 400000; 1325 1326 if (cpu_class_is_omap2()) 1327 mmc->f_max = 48000000; 1328 else 1329 mmc->f_max = 24000000; 1330 if (host->pdata->max_freq) 1331 mmc->f_max = min(host->pdata->max_freq, mmc->f_max); 1332 mmc->ocr_avail = slot->pdata->ocr_mask; 1333 1334 /* Use scatterlist DMA to reduce per-transfer costs. 1335 * NOTE max_seg_size assumption that small blocks aren't 1336 * normally used (except e.g. for reading SD registers). 1337 */ 1338 mmc->max_segs = 32; 1339 mmc->max_blk_size = 2048; /* BLEN is 11 bits (+1) */ 1340 mmc->max_blk_count = 2048; /* NBLK is 11 bits (+1) */ 1341 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; 1342 mmc->max_seg_size = mmc->max_req_size; 1343 1344 r = mmc_add_host(mmc); 1345 if (r < 0) 1346 goto err_remove_host; 1347 1348 if (slot->pdata->name != NULL) { 1349 r = device_create_file(&mmc->class_dev, 1350 &dev_attr_slot_name); 1351 if (r < 0) 1352 goto err_remove_host; 1353 } 1354 1355 if (slot->pdata->get_cover_state != NULL) { 1356 r = device_create_file(&mmc->class_dev, 1357 &dev_attr_cover_switch); 1358 if (r < 0) 1359 goto err_remove_slot_name; 1360 1361 setup_timer(&slot->cover_timer, mmc_omap_cover_timer, 1362 (unsigned long)slot); 1363 tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler, 1364 (unsigned long)slot); 1365 tasklet_schedule(&slot->cover_tasklet); 1366 } 1367 1368 return 0; 1369 1370 err_remove_slot_name: 1371 if (slot->pdata->name != NULL) 1372 device_remove_file(&mmc->class_dev, &dev_attr_slot_name); 1373 err_remove_host: 1374 mmc_remove_host(mmc); 1375 mmc_free_host(mmc); 1376 return r; 1377 } 1378 1379 static void mmc_omap_remove_slot(struct mmc_omap_slot *slot) 1380 { 1381 struct mmc_host *mmc = slot->mmc; 1382 1383 if (slot->pdata->name != NULL) 1384 device_remove_file(&mmc->class_dev, &dev_attr_slot_name); 1385 if (slot->pdata->get_cover_state != NULL) 1386 device_remove_file(&mmc->class_dev, &dev_attr_cover_switch); 1387 1388 tasklet_kill(&slot->cover_tasklet); 1389 del_timer_sync(&slot->cover_timer); 1390 flush_scheduled_work(); 1391 1392 mmc_remove_host(mmc); 1393 mmc_free_host(mmc); 1394 } 1395 1396 static int __init mmc_omap_probe(struct platform_device *pdev) 1397 { 1398 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; 1399 struct mmc_omap_host *host = NULL; 1400 struct resource *res; 1401 int i, ret = 0; 1402 int irq; 1403 1404 if (pdata == NULL) { 1405 dev_err(&pdev->dev, "platform data missing\n"); 1406 return -ENXIO; 1407 } 1408 if (pdata->nr_slots == 0) { 1409 dev_err(&pdev->dev, "no slots\n"); 1410 return -ENXIO; 1411 } 1412 1413 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1414 irq = platform_get_irq(pdev, 0); 1415 if (res == NULL || irq < 0) 1416 return -ENXIO; 1417 1418 res = request_mem_region(res->start, res->end - res->start + 1, 1419 pdev->name); 1420 if (res == NULL) 1421 return -EBUSY; 1422 1423 host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL); 1424 if (host == NULL) { 1425 ret = -ENOMEM; 1426 goto err_free_mem_region; 1427 } 1428 1429 INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work); 1430 INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work); 1431 1432 INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command); 1433 setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer, 1434 (unsigned long) host); 1435 1436 spin_lock_init(&host->clk_lock); 1437 setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); 1438 1439 spin_lock_init(&host->dma_lock); 1440 setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host); 1441 spin_lock_init(&host->slot_lock); 1442 init_waitqueue_head(&host->slot_wq); 1443 1444 host->pdata = pdata; 1445 host->dev = &pdev->dev; 1446 platform_set_drvdata(pdev, host); 1447 1448 host->id = pdev->id; 1449 host->mem_res = res; 1450 host->irq = irq; 1451 1452 host->use_dma = 1; 1453 host->dev->dma_mask = &pdata->dma_mask; 1454 host->dma_ch = -1; 1455 1456 host->irq = irq; 1457 host->phys_base = host->mem_res->start; 1458 host->virt_base = ioremap(res->start, res->end - res->start + 1); 1459 if (!host->virt_base) 1460 goto err_ioremap; 1461 1462 host->iclk = clk_get(&pdev->dev, "ick"); 1463 if (IS_ERR(host->iclk)) { 1464 ret = PTR_ERR(host->iclk); 1465 goto err_free_mmc_host; 1466 } 1467 clk_enable(host->iclk); 1468 1469 host->fclk = clk_get(&pdev->dev, "fck"); 1470 if (IS_ERR(host->fclk)) { 1471 ret = PTR_ERR(host->fclk); 1472 goto err_free_iclk; 1473 } 1474 1475 ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); 1476 if (ret) 1477 goto err_free_fclk; 1478 1479 if (pdata->init != NULL) { 1480 ret = pdata->init(&pdev->dev); 1481 if (ret < 0) 1482 goto err_free_irq; 1483 } 1484 1485 host->nr_slots = pdata->nr_slots; 1486 for (i = 0; i < pdata->nr_slots; i++) { 1487 ret = mmc_omap_new_slot(host, i); 1488 if (ret < 0) { 1489 while (--i >= 0) 1490 mmc_omap_remove_slot(host->slots[i]); 1491 1492 goto err_plat_cleanup; 1493 } 1494 } 1495 1496 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2); 1497 1498 return 0; 1499 1500 err_plat_cleanup: 1501 if (pdata->cleanup) 1502 pdata->cleanup(&pdev->dev); 1503 err_free_irq: 1504 free_irq(host->irq, host); 1505 err_free_fclk: 1506 clk_put(host->fclk); 1507 err_free_iclk: 1508 clk_disable(host->iclk); 1509 clk_put(host->iclk); 1510 err_free_mmc_host: 1511 iounmap(host->virt_base); 1512 err_ioremap: 1513 kfree(host); 1514 err_free_mem_region: 1515 release_mem_region(res->start, res->end - res->start + 1); 1516 return ret; 1517 } 1518 1519 static int mmc_omap_remove(struct platform_device *pdev) 1520 { 1521 struct mmc_omap_host *host = platform_get_drvdata(pdev); 1522 int i; 1523 1524 platform_set_drvdata(pdev, NULL); 1525 1526 BUG_ON(host == NULL); 1527 1528 for (i = 0; i < host->nr_slots; i++) 1529 mmc_omap_remove_slot(host->slots[i]); 1530 1531 if (host->pdata->cleanup) 1532 host->pdata->cleanup(&pdev->dev); 1533 1534 mmc_omap_fclk_enable(host, 0); 1535 free_irq(host->irq, host); 1536 clk_put(host->fclk); 1537 clk_disable(host->iclk); 1538 clk_put(host->iclk); 1539 1540 iounmap(host->virt_base); 1541 release_mem_region(pdev->resource[0].start, 1542 pdev->resource[0].end - pdev->resource[0].start + 1); 1543 1544 kfree(host); 1545 1546 return 0; 1547 } 1548 1549 #ifdef CONFIG_PM 1550 static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg) 1551 { 1552 int i, ret = 0; 1553 struct mmc_omap_host *host = platform_get_drvdata(pdev); 1554 1555 if (host == NULL || host->suspended) 1556 return 0; 1557 1558 for (i = 0; i < host->nr_slots; i++) { 1559 struct mmc_omap_slot *slot; 1560 1561 slot = host->slots[i]; 1562 ret = mmc_suspend_host(slot->mmc); 1563 if (ret < 0) { 1564 while (--i >= 0) { 1565 slot = host->slots[i]; 1566 mmc_resume_host(slot->mmc); 1567 } 1568 return ret; 1569 } 1570 } 1571 host->suspended = 1; 1572 return 0; 1573 } 1574 1575 static int mmc_omap_resume(struct platform_device *pdev) 1576 { 1577 int i, ret = 0; 1578 struct mmc_omap_host *host = platform_get_drvdata(pdev); 1579 1580 if (host == NULL || !host->suspended) 1581 return 0; 1582 1583 for (i = 0; i < host->nr_slots; i++) { 1584 struct mmc_omap_slot *slot; 1585 slot = host->slots[i]; 1586 ret = mmc_resume_host(slot->mmc); 1587 if (ret < 0) 1588 return ret; 1589 1590 host->suspended = 0; 1591 } 1592 return 0; 1593 } 1594 #else 1595 #define mmc_omap_suspend NULL 1596 #define mmc_omap_resume NULL 1597 #endif 1598 1599 static struct platform_driver mmc_omap_driver = { 1600 .remove = mmc_omap_remove, 1601 .suspend = mmc_omap_suspend, 1602 .resume = mmc_omap_resume, 1603 .driver = { 1604 .name = DRIVER_NAME, 1605 .owner = THIS_MODULE, 1606 }, 1607 }; 1608 1609 static int __init mmc_omap_init(void) 1610 { 1611 return platform_driver_probe(&mmc_omap_driver, mmc_omap_probe); 1612 } 1613 1614 static void __exit mmc_omap_exit(void) 1615 { 1616 platform_driver_unregister(&mmc_omap_driver); 1617 } 1618 1619 module_init(mmc_omap_init); 1620 module_exit(mmc_omap_exit); 1621 1622 MODULE_DESCRIPTION("OMAP Multimedia Card driver"); 1623 MODULE_LICENSE("GPL"); 1624 MODULE_ALIAS("platform:" DRIVER_NAME); 1625 MODULE_AUTHOR("Juha Yrj�l�"); 1626