1 /* 2 * Block driver for media (i.e., flash cards) 3 * 4 * Copyright 2002 Hewlett-Packard Company 5 * Copyright 2005-2008 Pierre Ossman 6 * 7 * Use consistent with the GNU GPL is permitted, 8 * provided that this copyright notice is 9 * preserved in its entirety in all copies and derived works. 10 * 11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, 12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS 13 * FITNESS FOR ANY PARTICULAR PURPOSE. 14 * 15 * Many thanks to Alessandro Rubini and Jonathan Corbet! 16 * 17 * Author: Andrew Christian 18 * 28 May 2002 19 */ 20 #include <linux/moduleparam.h> 21 #include <linux/module.h> 22 #include <linux/init.h> 23 24 #include <linux/kernel.h> 25 #include <linux/fs.h> 26 #include <linux/slab.h> 27 #include <linux/errno.h> 28 #include <linux/hdreg.h> 29 #include <linux/kdev_t.h> 30 #include <linux/blkdev.h> 31 #include <linux/mutex.h> 32 #include <linux/scatterlist.h> 33 #include <linux/string_helpers.h> 34 #include <linux/delay.h> 35 #include <linux/capability.h> 36 #include <linux/compat.h> 37 #include <linux/pm_runtime.h> 38 #include <linux/idr.h> 39 40 #include <linux/mmc/ioctl.h> 41 #include <linux/mmc/card.h> 42 #include <linux/mmc/host.h> 43 #include <linux/mmc/mmc.h> 44 #include <linux/mmc/sd.h> 45 46 #include <linux/uaccess.h> 47 48 #include "queue.h" 49 #include "block.h" 50 #include "core.h" 51 #include "card.h" 52 #include "host.h" 53 #include "bus.h" 54 #include "mmc_ops.h" 55 #include "quirks.h" 56 #include "sd_ops.h" 57 58 MODULE_ALIAS("mmc:block"); 59 #ifdef MODULE_PARAM_PREFIX 60 #undef MODULE_PARAM_PREFIX 61 #endif 62 #define MODULE_PARAM_PREFIX "mmcblk." 63 64 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ 65 #define MMC_SANITIZE_REQ_TIMEOUT 240000 66 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 67 68 #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ 69 (rq_data_dir(req) == WRITE)) 70 static DEFINE_MUTEX(block_mutex); 71 72 /* 73 * The defaults come from config options but can be overriden by module 74 * or bootarg options. 75 */ 76 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; 77 78 /* 79 * We've only got one major, so number of mmcblk devices is 80 * limited to (1 << 20) / number of minors per device. It is also 81 * limited by the MAX_DEVICES below. 82 */ 83 static int max_devices; 84 85 #define MAX_DEVICES 256 86 87 static DEFINE_IDA(mmc_blk_ida); 88 89 /* 90 * There is one mmc_blk_data per slot. 91 */ 92 struct mmc_blk_data { 93 spinlock_t lock; 94 struct device *parent; 95 struct gendisk *disk; 96 struct mmc_queue queue; 97 struct list_head part; 98 99 unsigned int flags; 100 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ 101 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ 102 103 unsigned int usage; 104 unsigned int read_only; 105 unsigned int part_type; 106 unsigned int reset_done; 107 #define MMC_BLK_READ BIT(0) 108 #define MMC_BLK_WRITE BIT(1) 109 #define MMC_BLK_DISCARD BIT(2) 110 #define MMC_BLK_SECDISCARD BIT(3) 111 112 /* 113 * Only set in main mmc_blk_data associated 114 * with mmc_card with dev_set_drvdata, and keeps 115 * track of the current selected device partition. 116 */ 117 unsigned int part_curr; 118 struct device_attribute force_ro; 119 struct device_attribute power_ro_lock; 120 int area_type; 121 }; 122 123 static DEFINE_MUTEX(open_lock); 124 125 module_param(perdev_minors, int, 0444); 126 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 127 128 static inline int mmc_blk_part_switch(struct mmc_card *card, 129 struct mmc_blk_data *md); 130 static int get_card_status(struct mmc_card *card, u32 *status, int retries); 131 132 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 133 { 134 struct mmc_blk_data *md; 135 136 mutex_lock(&open_lock); 137 md = disk->private_data; 138 if (md && md->usage == 0) 139 md = NULL; 140 if (md) 141 md->usage++; 142 mutex_unlock(&open_lock); 143 144 return md; 145 } 146 147 static inline int mmc_get_devidx(struct gendisk *disk) 148 { 149 int devidx = disk->first_minor / perdev_minors; 150 return devidx; 151 } 152 153 static void mmc_blk_put(struct mmc_blk_data *md) 154 { 155 mutex_lock(&open_lock); 156 md->usage--; 157 if (md->usage == 0) { 158 int devidx = mmc_get_devidx(md->disk); 159 blk_cleanup_queue(md->queue.queue); 160 ida_simple_remove(&mmc_blk_ida, devidx); 161 put_disk(md->disk); 162 kfree(md); 163 } 164 mutex_unlock(&open_lock); 165 } 166 167 static ssize_t power_ro_lock_show(struct device *dev, 168 struct device_attribute *attr, char *buf) 169 { 170 int ret; 171 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 172 struct mmc_card *card = md->queue.card; 173 int locked = 0; 174 175 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) 176 locked = 2; 177 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) 178 locked = 1; 179 180 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); 181 182 mmc_blk_put(md); 183 184 return ret; 185 } 186 187 static ssize_t power_ro_lock_store(struct device *dev, 188 struct device_attribute *attr, const char *buf, size_t count) 189 { 190 int ret; 191 struct mmc_blk_data *md, *part_md; 192 struct mmc_card *card; 193 unsigned long set; 194 195 if (kstrtoul(buf, 0, &set)) 196 return -EINVAL; 197 198 if (set != 1) 199 return count; 200 201 md = mmc_blk_get(dev_to_disk(dev)); 202 card = md->queue.card; 203 204 mmc_get_card(card); 205 206 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 207 card->ext_csd.boot_ro_lock | 208 EXT_CSD_BOOT_WP_B_PWR_WP_EN, 209 card->ext_csd.part_time); 210 if (ret) 211 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret); 212 else 213 card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; 214 215 mmc_put_card(card); 216 217 if (!ret) { 218 pr_info("%s: Locking boot partition ro until next power on\n", 219 md->disk->disk_name); 220 set_disk_ro(md->disk, 1); 221 222 list_for_each_entry(part_md, &md->part, part) 223 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { 224 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); 225 set_disk_ro(part_md->disk, 1); 226 } 227 } 228 229 mmc_blk_put(md); 230 return count; 231 } 232 233 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 234 char *buf) 235 { 236 int ret; 237 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 238 239 ret = snprintf(buf, PAGE_SIZE, "%d\n", 240 get_disk_ro(dev_to_disk(dev)) ^ 241 md->read_only); 242 mmc_blk_put(md); 243 return ret; 244 } 245 246 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 247 const char *buf, size_t count) 248 { 249 int ret; 250 char *end; 251 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 252 unsigned long set = simple_strtoul(buf, &end, 0); 253 if (end == buf) { 254 ret = -EINVAL; 255 goto out; 256 } 257 258 set_disk_ro(dev_to_disk(dev), set || md->read_only); 259 ret = count; 260 out: 261 mmc_blk_put(md); 262 return ret; 263 } 264 265 static int mmc_blk_open(struct block_device *bdev, fmode_t mode) 266 { 267 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 268 int ret = -ENXIO; 269 270 mutex_lock(&block_mutex); 271 if (md) { 272 if (md->usage == 2) 273 check_disk_change(bdev); 274 ret = 0; 275 276 if ((mode & FMODE_WRITE) && md->read_only) { 277 mmc_blk_put(md); 278 ret = -EROFS; 279 } 280 } 281 mutex_unlock(&block_mutex); 282 283 return ret; 284 } 285 286 static void mmc_blk_release(struct gendisk *disk, fmode_t mode) 287 { 288 struct mmc_blk_data *md = disk->private_data; 289 290 mutex_lock(&block_mutex); 291 mmc_blk_put(md); 292 mutex_unlock(&block_mutex); 293 } 294 295 static int 296 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 297 { 298 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); 299 geo->heads = 4; 300 geo->sectors = 16; 301 return 0; 302 } 303 304 struct mmc_blk_ioc_data { 305 struct mmc_ioc_cmd ic; 306 unsigned char *buf; 307 u64 buf_bytes; 308 }; 309 310 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( 311 struct mmc_ioc_cmd __user *user) 312 { 313 struct mmc_blk_ioc_data *idata; 314 int err; 315 316 idata = kmalloc(sizeof(*idata), GFP_KERNEL); 317 if (!idata) { 318 err = -ENOMEM; 319 goto out; 320 } 321 322 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { 323 err = -EFAULT; 324 goto idata_err; 325 } 326 327 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; 328 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { 329 err = -EOVERFLOW; 330 goto idata_err; 331 } 332 333 if (!idata->buf_bytes) { 334 idata->buf = NULL; 335 return idata; 336 } 337 338 idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL); 339 if (!idata->buf) { 340 err = -ENOMEM; 341 goto idata_err; 342 } 343 344 if (copy_from_user(idata->buf, (void __user *)(unsigned long) 345 idata->ic.data_ptr, idata->buf_bytes)) { 346 err = -EFAULT; 347 goto copy_err; 348 } 349 350 return idata; 351 352 copy_err: 353 kfree(idata->buf); 354 idata_err: 355 kfree(idata); 356 out: 357 return ERR_PTR(err); 358 } 359 360 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, 361 struct mmc_blk_ioc_data *idata) 362 { 363 struct mmc_ioc_cmd *ic = &idata->ic; 364 365 if (copy_to_user(&(ic_ptr->response), ic->response, 366 sizeof(ic->response))) 367 return -EFAULT; 368 369 if (!idata->ic.write_flag) { 370 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr, 371 idata->buf, idata->buf_bytes)) 372 return -EFAULT; 373 } 374 375 return 0; 376 } 377 378 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, 379 u32 retries_max) 380 { 381 int err; 382 u32 retry_count = 0; 383 384 if (!status || !retries_max) 385 return -EINVAL; 386 387 do { 388 err = get_card_status(card, status, 5); 389 if (err) 390 break; 391 392 if (!R1_STATUS(*status) && 393 (R1_CURRENT_STATE(*status) != R1_STATE_PRG)) 394 break; /* RPMB programming operation complete */ 395 396 /* 397 * Rechedule to give the MMC device a chance to continue 398 * processing the previous command without being polled too 399 * frequently. 400 */ 401 usleep_range(1000, 5000); 402 } while (++retry_count < retries_max); 403 404 if (retry_count == retries_max) 405 err = -EPERM; 406 407 return err; 408 } 409 410 static int ioctl_do_sanitize(struct mmc_card *card) 411 { 412 int err; 413 414 if (!mmc_can_sanitize(card)) { 415 pr_warn("%s: %s - SANITIZE is not supported\n", 416 mmc_hostname(card->host), __func__); 417 err = -EOPNOTSUPP; 418 goto out; 419 } 420 421 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", 422 mmc_hostname(card->host), __func__); 423 424 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 425 EXT_CSD_SANITIZE_START, 1, 426 MMC_SANITIZE_REQ_TIMEOUT); 427 428 if (err) 429 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", 430 mmc_hostname(card->host), __func__, err); 431 432 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), 433 __func__); 434 out: 435 return err; 436 } 437 438 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, 439 struct mmc_blk_ioc_data *idata) 440 { 441 struct mmc_command cmd = {}; 442 struct mmc_data data = {}; 443 struct mmc_request mrq = {}; 444 struct scatterlist sg; 445 int err; 446 int is_rpmb = false; 447 u32 status = 0; 448 449 if (!card || !md || !idata) 450 return -EINVAL; 451 452 if (md->area_type & MMC_BLK_DATA_AREA_RPMB) 453 is_rpmb = true; 454 455 cmd.opcode = idata->ic.opcode; 456 cmd.arg = idata->ic.arg; 457 cmd.flags = idata->ic.flags; 458 459 if (idata->buf_bytes) { 460 data.sg = &sg; 461 data.sg_len = 1; 462 data.blksz = idata->ic.blksz; 463 data.blocks = idata->ic.blocks; 464 465 sg_init_one(data.sg, idata->buf, idata->buf_bytes); 466 467 if (idata->ic.write_flag) 468 data.flags = MMC_DATA_WRITE; 469 else 470 data.flags = MMC_DATA_READ; 471 472 /* data.flags must already be set before doing this. */ 473 mmc_set_data_timeout(&data, card); 474 475 /* Allow overriding the timeout_ns for empirical tuning. */ 476 if (idata->ic.data_timeout_ns) 477 data.timeout_ns = idata->ic.data_timeout_ns; 478 479 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { 480 /* 481 * Pretend this is a data transfer and rely on the 482 * host driver to compute timeout. When all host 483 * drivers support cmd.cmd_timeout for R1B, this 484 * can be changed to: 485 * 486 * mrq.data = NULL; 487 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; 488 */ 489 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; 490 } 491 492 mrq.data = &data; 493 } 494 495 mrq.cmd = &cmd; 496 497 err = mmc_blk_part_switch(card, md); 498 if (err) 499 return err; 500 501 if (idata->ic.is_acmd) { 502 err = mmc_app_cmd(card->host, card); 503 if (err) 504 return err; 505 } 506 507 if (is_rpmb) { 508 err = mmc_set_blockcount(card, data.blocks, 509 idata->ic.write_flag & (1 << 31)); 510 if (err) 511 return err; 512 } 513 514 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && 515 (cmd.opcode == MMC_SWITCH)) { 516 err = ioctl_do_sanitize(card); 517 518 if (err) 519 pr_err("%s: ioctl_do_sanitize() failed. err = %d", 520 __func__, err); 521 522 return err; 523 } 524 525 mmc_wait_for_req(card->host, &mrq); 526 527 if (cmd.error) { 528 dev_err(mmc_dev(card->host), "%s: cmd error %d\n", 529 __func__, cmd.error); 530 return cmd.error; 531 } 532 if (data.error) { 533 dev_err(mmc_dev(card->host), "%s: data error %d\n", 534 __func__, data.error); 535 return data.error; 536 } 537 538 /* 539 * According to the SD specs, some commands require a delay after 540 * issuing the command. 541 */ 542 if (idata->ic.postsleep_min_us) 543 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); 544 545 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); 546 547 if (is_rpmb) { 548 /* 549 * Ensure RPMB command has completed by polling CMD13 550 * "Send Status". 551 */ 552 err = ioctl_rpmb_card_status_poll(card, &status, 5); 553 if (err) 554 dev_err(mmc_dev(card->host), 555 "%s: Card Status=0x%08X, error %d\n", 556 __func__, status, err); 557 } 558 559 return err; 560 } 561 562 static int mmc_blk_ioctl_cmd(struct block_device *bdev, 563 struct mmc_ioc_cmd __user *ic_ptr) 564 { 565 struct mmc_blk_ioc_data *idata; 566 struct mmc_blk_data *md; 567 struct mmc_card *card; 568 int err = 0, ioc_err = 0; 569 570 /* 571 * The caller must have CAP_SYS_RAWIO, and must be calling this on the 572 * whole block device, not on a partition. This prevents overspray 573 * between sibling partitions. 574 */ 575 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) 576 return -EPERM; 577 578 idata = mmc_blk_ioctl_copy_from_user(ic_ptr); 579 if (IS_ERR(idata)) 580 return PTR_ERR(idata); 581 582 md = mmc_blk_get(bdev->bd_disk); 583 if (!md) { 584 err = -EINVAL; 585 goto cmd_err; 586 } 587 588 card = md->queue.card; 589 if (IS_ERR(card)) { 590 err = PTR_ERR(card); 591 goto cmd_done; 592 } 593 594 mmc_get_card(card); 595 596 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata); 597 598 /* Always switch back to main area after RPMB access */ 599 if (md->area_type & MMC_BLK_DATA_AREA_RPMB) 600 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev)); 601 602 mmc_put_card(card); 603 604 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); 605 606 cmd_done: 607 mmc_blk_put(md); 608 cmd_err: 609 kfree(idata->buf); 610 kfree(idata); 611 return ioc_err ? ioc_err : err; 612 } 613 614 static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, 615 struct mmc_ioc_multi_cmd __user *user) 616 { 617 struct mmc_blk_ioc_data **idata = NULL; 618 struct mmc_ioc_cmd __user *cmds = user->cmds; 619 struct mmc_card *card; 620 struct mmc_blk_data *md; 621 int i, err = 0, ioc_err = 0; 622 __u64 num_of_cmds; 623 624 /* 625 * The caller must have CAP_SYS_RAWIO, and must be calling this on the 626 * whole block device, not on a partition. This prevents overspray 627 * between sibling partitions. 628 */ 629 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) 630 return -EPERM; 631 632 if (copy_from_user(&num_of_cmds, &user->num_of_cmds, 633 sizeof(num_of_cmds))) 634 return -EFAULT; 635 636 if (num_of_cmds > MMC_IOC_MAX_CMDS) 637 return -EINVAL; 638 639 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL); 640 if (!idata) 641 return -ENOMEM; 642 643 for (i = 0; i < num_of_cmds; i++) { 644 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]); 645 if (IS_ERR(idata[i])) { 646 err = PTR_ERR(idata[i]); 647 num_of_cmds = i; 648 goto cmd_err; 649 } 650 } 651 652 md = mmc_blk_get(bdev->bd_disk); 653 if (!md) { 654 err = -EINVAL; 655 goto cmd_err; 656 } 657 658 card = md->queue.card; 659 if (IS_ERR(card)) { 660 err = PTR_ERR(card); 661 goto cmd_done; 662 } 663 664 mmc_get_card(card); 665 666 for (i = 0; i < num_of_cmds && !ioc_err; i++) 667 ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]); 668 669 /* Always switch back to main area after RPMB access */ 670 if (md->area_type & MMC_BLK_DATA_AREA_RPMB) 671 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev)); 672 673 mmc_put_card(card); 674 675 /* copy to user if data and response */ 676 for (i = 0; i < num_of_cmds && !err; i++) 677 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]); 678 679 cmd_done: 680 mmc_blk_put(md); 681 cmd_err: 682 for (i = 0; i < num_of_cmds; i++) { 683 kfree(idata[i]->buf); 684 kfree(idata[i]); 685 } 686 kfree(idata); 687 return ioc_err ? ioc_err : err; 688 } 689 690 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, 691 unsigned int cmd, unsigned long arg) 692 { 693 switch (cmd) { 694 case MMC_IOC_CMD: 695 return mmc_blk_ioctl_cmd(bdev, 696 (struct mmc_ioc_cmd __user *)arg); 697 case MMC_IOC_MULTI_CMD: 698 return mmc_blk_ioctl_multi_cmd(bdev, 699 (struct mmc_ioc_multi_cmd __user *)arg); 700 default: 701 return -EINVAL; 702 } 703 } 704 705 #ifdef CONFIG_COMPAT 706 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, 707 unsigned int cmd, unsigned long arg) 708 { 709 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); 710 } 711 #endif 712 713 static const struct block_device_operations mmc_bdops = { 714 .open = mmc_blk_open, 715 .release = mmc_blk_release, 716 .getgeo = mmc_blk_getgeo, 717 .owner = THIS_MODULE, 718 .ioctl = mmc_blk_ioctl, 719 #ifdef CONFIG_COMPAT 720 .compat_ioctl = mmc_blk_compat_ioctl, 721 #endif 722 }; 723 724 static inline int mmc_blk_part_switch(struct mmc_card *card, 725 struct mmc_blk_data *md) 726 { 727 int ret; 728 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); 729 730 if (main_md->part_curr == md->part_type) 731 return 0; 732 733 if (mmc_card_mmc(card)) { 734 u8 part_config = card->ext_csd.part_config; 735 736 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) 737 mmc_retune_pause(card->host); 738 739 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 740 part_config |= md->part_type; 741 742 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 743 EXT_CSD_PART_CONFIG, part_config, 744 card->ext_csd.part_time); 745 if (ret) { 746 if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) 747 mmc_retune_unpause(card->host); 748 return ret; 749 } 750 751 card->ext_csd.part_config = part_config; 752 753 if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB) 754 mmc_retune_unpause(card->host); 755 } 756 757 main_md->part_curr = md->part_type; 758 return 0; 759 } 760 761 static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) 762 { 763 int err; 764 u32 result; 765 __be32 *blocks; 766 767 struct mmc_request mrq = {}; 768 struct mmc_command cmd = {}; 769 struct mmc_data data = {}; 770 771 struct scatterlist sg; 772 773 cmd.opcode = MMC_APP_CMD; 774 cmd.arg = card->rca << 16; 775 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 776 777 err = mmc_wait_for_cmd(card->host, &cmd, 0); 778 if (err) 779 return err; 780 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) 781 return -EIO; 782 783 memset(&cmd, 0, sizeof(struct mmc_command)); 784 785 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; 786 cmd.arg = 0; 787 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 788 789 data.blksz = 4; 790 data.blocks = 1; 791 data.flags = MMC_DATA_READ; 792 data.sg = &sg; 793 data.sg_len = 1; 794 mmc_set_data_timeout(&data, card); 795 796 mrq.cmd = &cmd; 797 mrq.data = &data; 798 799 blocks = kmalloc(4, GFP_KERNEL); 800 if (!blocks) 801 return -ENOMEM; 802 803 sg_init_one(&sg, blocks, 4); 804 805 mmc_wait_for_req(card->host, &mrq); 806 807 result = ntohl(*blocks); 808 kfree(blocks); 809 810 if (cmd.error || data.error) 811 return -EIO; 812 813 *written_blocks = result; 814 815 return 0; 816 } 817 818 static int get_card_status(struct mmc_card *card, u32 *status, int retries) 819 { 820 struct mmc_command cmd = {}; 821 int err; 822 823 cmd.opcode = MMC_SEND_STATUS; 824 if (!mmc_host_is_spi(card->host)) 825 cmd.arg = card->rca << 16; 826 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 827 err = mmc_wait_for_cmd(card->host, &cmd, retries); 828 if (err == 0) 829 *status = cmd.resp[0]; 830 return err; 831 } 832 833 static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, 834 bool hw_busy_detect, struct request *req, bool *gen_err) 835 { 836 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); 837 int err = 0; 838 u32 status; 839 840 do { 841 err = get_card_status(card, &status, 5); 842 if (err) { 843 pr_err("%s: error %d requesting status\n", 844 req->rq_disk->disk_name, err); 845 return err; 846 } 847 848 if (status & R1_ERROR) { 849 pr_err("%s: %s: error sending status cmd, status %#x\n", 850 req->rq_disk->disk_name, __func__, status); 851 *gen_err = true; 852 } 853 854 /* We may rely on the host hw to handle busy detection.*/ 855 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && 856 hw_busy_detect) 857 break; 858 859 /* 860 * Timeout if the device never becomes ready for data and never 861 * leaves the program state. 862 */ 863 if (time_after(jiffies, timeout)) { 864 pr_err("%s: Card stuck in programming state! %s %s\n", 865 mmc_hostname(card->host), 866 req->rq_disk->disk_name, __func__); 867 return -ETIMEDOUT; 868 } 869 870 /* 871 * Some cards mishandle the status bits, 872 * so make sure to check both the busy 873 * indication and the card state. 874 */ 875 } while (!(status & R1_READY_FOR_DATA) || 876 (R1_CURRENT_STATE(status) == R1_STATE_PRG)); 877 878 return err; 879 } 880 881 static int send_stop(struct mmc_card *card, unsigned int timeout_ms, 882 struct request *req, bool *gen_err, u32 *stop_status) 883 { 884 struct mmc_host *host = card->host; 885 struct mmc_command cmd = {}; 886 int err; 887 bool use_r1b_resp = rq_data_dir(req) == WRITE; 888 889 /* 890 * Normally we use R1B responses for WRITE, but in cases where the host 891 * has specified a max_busy_timeout we need to validate it. A failure 892 * means we need to prevent the host from doing hw busy detection, which 893 * is done by converting to a R1 response instead. 894 */ 895 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) 896 use_r1b_resp = false; 897 898 cmd.opcode = MMC_STOP_TRANSMISSION; 899 if (use_r1b_resp) { 900 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 901 cmd.busy_timeout = timeout_ms; 902 } else { 903 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 904 } 905 906 err = mmc_wait_for_cmd(host, &cmd, 5); 907 if (err) 908 return err; 909 910 *stop_status = cmd.resp[0]; 911 912 /* No need to check card status in case of READ. */ 913 if (rq_data_dir(req) == READ) 914 return 0; 915 916 if (!mmc_host_is_spi(host) && 917 (*stop_status & R1_ERROR)) { 918 pr_err("%s: %s: general error sending stop command, resp %#x\n", 919 req->rq_disk->disk_name, __func__, *stop_status); 920 *gen_err = true; 921 } 922 923 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err); 924 } 925 926 #define ERR_NOMEDIUM 3 927 #define ERR_RETRY 2 928 #define ERR_ABORT 1 929 #define ERR_CONTINUE 0 930 931 static int mmc_blk_cmd_error(struct request *req, const char *name, int error, 932 bool status_valid, u32 status) 933 { 934 switch (error) { 935 case -EILSEQ: 936 /* response crc error, retry the r/w cmd */ 937 pr_err("%s: %s sending %s command, card status %#x\n", 938 req->rq_disk->disk_name, "response CRC error", 939 name, status); 940 return ERR_RETRY; 941 942 case -ETIMEDOUT: 943 pr_err("%s: %s sending %s command, card status %#x\n", 944 req->rq_disk->disk_name, "timed out", name, status); 945 946 /* If the status cmd initially failed, retry the r/w cmd */ 947 if (!status_valid) { 948 pr_err("%s: status not valid, retrying timeout\n", 949 req->rq_disk->disk_name); 950 return ERR_RETRY; 951 } 952 953 /* 954 * If it was a r/w cmd crc error, or illegal command 955 * (eg, issued in wrong state) then retry - we should 956 * have corrected the state problem above. 957 */ 958 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) { 959 pr_err("%s: command error, retrying timeout\n", 960 req->rq_disk->disk_name); 961 return ERR_RETRY; 962 } 963 964 /* Otherwise abort the command */ 965 return ERR_ABORT; 966 967 default: 968 /* We don't understand the error code the driver gave us */ 969 pr_err("%s: unknown error %d sending read/write command, card status %#x\n", 970 req->rq_disk->disk_name, error, status); 971 return ERR_ABORT; 972 } 973 } 974 975 /* 976 * Initial r/w and stop cmd error recovery. 977 * We don't know whether the card received the r/w cmd or not, so try to 978 * restore things back to a sane state. Essentially, we do this as follows: 979 * - Obtain card status. If the first attempt to obtain card status fails, 980 * the status word will reflect the failed status cmd, not the failed 981 * r/w cmd. If we fail to obtain card status, it suggests we can no 982 * longer communicate with the card. 983 * - Check the card state. If the card received the cmd but there was a 984 * transient problem with the response, it might still be in a data transfer 985 * mode. Try to send it a stop command. If this fails, we can't recover. 986 * - If the r/w cmd failed due to a response CRC error, it was probably 987 * transient, so retry the cmd. 988 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. 989 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or 990 * illegal cmd, retry. 991 * Otherwise we don't understand what happened, so abort. 992 */ 993 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, 994 struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err) 995 { 996 bool prev_cmd_status_valid = true; 997 u32 status, stop_status = 0; 998 int err, retry; 999 1000 if (mmc_card_removed(card)) 1001 return ERR_NOMEDIUM; 1002 1003 /* 1004 * Try to get card status which indicates both the card state 1005 * and why there was no response. If the first attempt fails, 1006 * we can't be sure the returned status is for the r/w command. 1007 */ 1008 for (retry = 2; retry >= 0; retry--) { 1009 err = get_card_status(card, &status, 0); 1010 if (!err) 1011 break; 1012 1013 /* Re-tune if needed */ 1014 mmc_retune_recheck(card->host); 1015 1016 prev_cmd_status_valid = false; 1017 pr_err("%s: error %d sending status command, %sing\n", 1018 req->rq_disk->disk_name, err, retry ? "retry" : "abort"); 1019 } 1020 1021 /* We couldn't get a response from the card. Give up. */ 1022 if (err) { 1023 /* Check if the card is removed */ 1024 if (mmc_detect_card_removed(card->host)) 1025 return ERR_NOMEDIUM; 1026 return ERR_ABORT; 1027 } 1028 1029 /* Flag ECC errors */ 1030 if ((status & R1_CARD_ECC_FAILED) || 1031 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || 1032 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) 1033 *ecc_err = true; 1034 1035 /* Flag General errors */ 1036 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) 1037 if ((status & R1_ERROR) || 1038 (brq->stop.resp[0] & R1_ERROR)) { 1039 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n", 1040 req->rq_disk->disk_name, __func__, 1041 brq->stop.resp[0], status); 1042 *gen_err = true; 1043 } 1044 1045 /* 1046 * Check the current card state. If it is in some data transfer 1047 * mode, tell it to stop (and hopefully transition back to TRAN.) 1048 */ 1049 if (R1_CURRENT_STATE(status) == R1_STATE_DATA || 1050 R1_CURRENT_STATE(status) == R1_STATE_RCV) { 1051 err = send_stop(card, 1052 DIV_ROUND_UP(brq->data.timeout_ns, 1000000), 1053 req, gen_err, &stop_status); 1054 if (err) { 1055 pr_err("%s: error %d sending stop command\n", 1056 req->rq_disk->disk_name, err); 1057 /* 1058 * If the stop cmd also timed out, the card is probably 1059 * not present, so abort. Other errors are bad news too. 1060 */ 1061 return ERR_ABORT; 1062 } 1063 1064 if (stop_status & R1_CARD_ECC_FAILED) 1065 *ecc_err = true; 1066 } 1067 1068 /* Check for set block count errors */ 1069 if (brq->sbc.error) 1070 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, 1071 prev_cmd_status_valid, status); 1072 1073 /* Check for r/w command errors */ 1074 if (brq->cmd.error) 1075 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, 1076 prev_cmd_status_valid, status); 1077 1078 /* Data errors */ 1079 if (!brq->stop.error) 1080 return ERR_CONTINUE; 1081 1082 /* Now for stop errors. These aren't fatal to the transfer. */ 1083 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", 1084 req->rq_disk->disk_name, brq->stop.error, 1085 brq->cmd.resp[0], status); 1086 1087 /* 1088 * Subsitute in our own stop status as this will give the error 1089 * state which happened during the execution of the r/w command. 1090 */ 1091 if (stop_status) { 1092 brq->stop.resp[0] = stop_status; 1093 brq->stop.error = 0; 1094 } 1095 return ERR_CONTINUE; 1096 } 1097 1098 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, 1099 int type) 1100 { 1101 int err; 1102 1103 if (md->reset_done & type) 1104 return -EEXIST; 1105 1106 md->reset_done |= type; 1107 err = mmc_hw_reset(host); 1108 /* Ensure we switch back to the correct partition */ 1109 if (err != -EOPNOTSUPP) { 1110 struct mmc_blk_data *main_md = 1111 dev_get_drvdata(&host->card->dev); 1112 int part_err; 1113 1114 main_md->part_curr = main_md->part_type; 1115 part_err = mmc_blk_part_switch(host->card, md); 1116 if (part_err) { 1117 /* 1118 * We have failed to get back into the correct 1119 * partition, so we need to abort the whole request. 1120 */ 1121 return -ENODEV; 1122 } 1123 } 1124 return err; 1125 } 1126 1127 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) 1128 { 1129 md->reset_done &= ~type; 1130 } 1131 1132 int mmc_access_rpmb(struct mmc_queue *mq) 1133 { 1134 struct mmc_blk_data *md = mq->blkdata; 1135 /* 1136 * If this is a RPMB partition access, return ture 1137 */ 1138 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) 1139 return true; 1140 1141 return false; 1142 } 1143 1144 static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 1145 { 1146 struct mmc_blk_data *md = mq->blkdata; 1147 struct mmc_card *card = md->queue.card; 1148 unsigned int from, nr, arg; 1149 int err = 0, type = MMC_BLK_DISCARD; 1150 1151 if (!mmc_can_erase(card)) { 1152 err = -EOPNOTSUPP; 1153 goto fail; 1154 } 1155 1156 from = blk_rq_pos(req); 1157 nr = blk_rq_sectors(req); 1158 1159 if (mmc_can_discard(card)) 1160 arg = MMC_DISCARD_ARG; 1161 else if (mmc_can_trim(card)) 1162 arg = MMC_TRIM_ARG; 1163 else 1164 arg = MMC_ERASE_ARG; 1165 do { 1166 err = 0; 1167 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1168 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1169 INAND_CMD38_ARG_EXT_CSD, 1170 arg == MMC_TRIM_ARG ? 1171 INAND_CMD38_ARG_TRIM : 1172 INAND_CMD38_ARG_ERASE, 1173 0); 1174 } 1175 if (!err) 1176 err = mmc_erase(card, from, nr, arg); 1177 } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); 1178 if (!err) 1179 mmc_blk_reset_success(md, type); 1180 fail: 1181 blk_end_request(req, err, blk_rq_bytes(req)); 1182 } 1183 1184 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, 1185 struct request *req) 1186 { 1187 struct mmc_blk_data *md = mq->blkdata; 1188 struct mmc_card *card = md->queue.card; 1189 unsigned int from, nr, arg; 1190 int err = 0, type = MMC_BLK_SECDISCARD; 1191 1192 if (!(mmc_can_secure_erase_trim(card))) { 1193 err = -EOPNOTSUPP; 1194 goto out; 1195 } 1196 1197 from = blk_rq_pos(req); 1198 nr = blk_rq_sectors(req); 1199 1200 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) 1201 arg = MMC_SECURE_TRIM1_ARG; 1202 else 1203 arg = MMC_SECURE_ERASE_ARG; 1204 1205 retry: 1206 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1207 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1208 INAND_CMD38_ARG_EXT_CSD, 1209 arg == MMC_SECURE_TRIM1_ARG ? 1210 INAND_CMD38_ARG_SECTRIM1 : 1211 INAND_CMD38_ARG_SECERASE, 1212 0); 1213 if (err) 1214 goto out_retry; 1215 } 1216 1217 err = mmc_erase(card, from, nr, arg); 1218 if (err == -EIO) 1219 goto out_retry; 1220 if (err) 1221 goto out; 1222 1223 if (arg == MMC_SECURE_TRIM1_ARG) { 1224 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1225 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1226 INAND_CMD38_ARG_EXT_CSD, 1227 INAND_CMD38_ARG_SECTRIM2, 1228 0); 1229 if (err) 1230 goto out_retry; 1231 } 1232 1233 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 1234 if (err == -EIO) 1235 goto out_retry; 1236 if (err) 1237 goto out; 1238 } 1239 1240 out_retry: 1241 if (err && !mmc_blk_reset(md, card->host, type)) 1242 goto retry; 1243 if (!err) 1244 mmc_blk_reset_success(md, type); 1245 out: 1246 blk_end_request(req, err, blk_rq_bytes(req)); 1247 } 1248 1249 static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) 1250 { 1251 struct mmc_blk_data *md = mq->blkdata; 1252 struct mmc_card *card = md->queue.card; 1253 int ret = 0; 1254 1255 ret = mmc_flush_cache(card); 1256 if (ret) 1257 ret = -EIO; 1258 1259 blk_end_request_all(req, ret); 1260 } 1261 1262 /* 1263 * Reformat current write as a reliable write, supporting 1264 * both legacy and the enhanced reliable write MMC cards. 1265 * In each transfer we'll handle only as much as a single 1266 * reliable write can handle, thus finish the request in 1267 * partial completions. 1268 */ 1269 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, 1270 struct mmc_card *card, 1271 struct request *req) 1272 { 1273 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { 1274 /* Legacy mode imposes restrictions on transfers. */ 1275 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) 1276 brq->data.blocks = 1; 1277 1278 if (brq->data.blocks > card->ext_csd.rel_sectors) 1279 brq->data.blocks = card->ext_csd.rel_sectors; 1280 else if (brq->data.blocks < card->ext_csd.rel_sectors) 1281 brq->data.blocks = 1; 1282 } 1283 } 1284 1285 #define CMD_ERRORS \ 1286 (R1_OUT_OF_RANGE | /* Command argument out of range */ \ 1287 R1_ADDRESS_ERROR | /* Misaligned address */ \ 1288 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ 1289 R1_WP_VIOLATION | /* Tried to write to protected block */ \ 1290 R1_CC_ERROR | /* Card controller error */ \ 1291 R1_ERROR) /* General/unknown error */ 1292 1293 static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, 1294 struct mmc_async_req *areq) 1295 { 1296 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, 1297 areq); 1298 struct mmc_blk_request *brq = &mq_mrq->brq; 1299 struct request *req = mq_mrq->req; 1300 int need_retune = card->host->need_retune; 1301 bool ecc_err = false; 1302 bool gen_err = false; 1303 1304 /* 1305 * sbc.error indicates a problem with the set block count 1306 * command. No data will have been transferred. 1307 * 1308 * cmd.error indicates a problem with the r/w command. No 1309 * data will have been transferred. 1310 * 1311 * stop.error indicates a problem with the stop command. Data 1312 * may have been transferred, or may still be transferring. 1313 */ 1314 if (brq->sbc.error || brq->cmd.error || brq->stop.error || 1315 brq->data.error) { 1316 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { 1317 case ERR_RETRY: 1318 return MMC_BLK_RETRY; 1319 case ERR_ABORT: 1320 return MMC_BLK_ABORT; 1321 case ERR_NOMEDIUM: 1322 return MMC_BLK_NOMEDIUM; 1323 case ERR_CONTINUE: 1324 break; 1325 } 1326 } 1327 1328 /* 1329 * Check for errors relating to the execution of the 1330 * initial command - such as address errors. No data 1331 * has been transferred. 1332 */ 1333 if (brq->cmd.resp[0] & CMD_ERRORS) { 1334 pr_err("%s: r/w command failed, status = %#x\n", 1335 req->rq_disk->disk_name, brq->cmd.resp[0]); 1336 return MMC_BLK_ABORT; 1337 } 1338 1339 /* 1340 * Everything else is either success, or a data error of some 1341 * kind. If it was a write, we may have transitioned to 1342 * program mode, which we have to wait for it to complete. 1343 */ 1344 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { 1345 int err; 1346 1347 /* Check stop command response */ 1348 if (brq->stop.resp[0] & R1_ERROR) { 1349 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", 1350 req->rq_disk->disk_name, __func__, 1351 brq->stop.resp[0]); 1352 gen_err = true; 1353 } 1354 1355 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, 1356 &gen_err); 1357 if (err) 1358 return MMC_BLK_CMD_ERR; 1359 } 1360 1361 /* if general error occurs, retry the write operation. */ 1362 if (gen_err) { 1363 pr_warn("%s: retrying write for general error\n", 1364 req->rq_disk->disk_name); 1365 return MMC_BLK_RETRY; 1366 } 1367 1368 if (brq->data.error) { 1369 if (need_retune && !brq->retune_retry_done) { 1370 pr_debug("%s: retrying because a re-tune was needed\n", 1371 req->rq_disk->disk_name); 1372 brq->retune_retry_done = 1; 1373 return MMC_BLK_RETRY; 1374 } 1375 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", 1376 req->rq_disk->disk_name, brq->data.error, 1377 (unsigned)blk_rq_pos(req), 1378 (unsigned)blk_rq_sectors(req), 1379 brq->cmd.resp[0], brq->stop.resp[0]); 1380 1381 if (rq_data_dir(req) == READ) { 1382 if (ecc_err) 1383 return MMC_BLK_ECC_ERR; 1384 return MMC_BLK_DATA_ERR; 1385 } else { 1386 return MMC_BLK_CMD_ERR; 1387 } 1388 } 1389 1390 if (!brq->data.bytes_xfered) 1391 return MMC_BLK_RETRY; 1392 1393 if (blk_rq_bytes(req) != brq->data.bytes_xfered) 1394 return MMC_BLK_PARTIAL; 1395 1396 return MMC_BLK_SUCCESS; 1397 } 1398 1399 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 1400 struct mmc_card *card, 1401 int disable_multi, 1402 struct mmc_queue *mq) 1403 { 1404 u32 readcmd, writecmd; 1405 struct mmc_blk_request *brq = &mqrq->brq; 1406 struct request *req = mqrq->req; 1407 struct mmc_blk_data *md = mq->blkdata; 1408 bool do_data_tag; 1409 1410 /* 1411 * Reliable writes are used to implement Forced Unit Access and 1412 * are supported only on MMCs. 1413 */ 1414 bool do_rel_wr = (req->cmd_flags & REQ_FUA) && 1415 (rq_data_dir(req) == WRITE) && 1416 (md->flags & MMC_BLK_REL_WR); 1417 1418 memset(brq, 0, sizeof(struct mmc_blk_request)); 1419 brq->mrq.cmd = &brq->cmd; 1420 brq->mrq.data = &brq->data; 1421 1422 brq->cmd.arg = blk_rq_pos(req); 1423 if (!mmc_card_blockaddr(card)) 1424 brq->cmd.arg <<= 9; 1425 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 1426 brq->data.blksz = 512; 1427 brq->stop.opcode = MMC_STOP_TRANSMISSION; 1428 brq->stop.arg = 0; 1429 brq->data.blocks = blk_rq_sectors(req); 1430 1431 /* 1432 * The block layer doesn't support all sector count 1433 * restrictions, so we need to be prepared for too big 1434 * requests. 1435 */ 1436 if (brq->data.blocks > card->host->max_blk_count) 1437 brq->data.blocks = card->host->max_blk_count; 1438 1439 if (brq->data.blocks > 1) { 1440 /* 1441 * After a read error, we redo the request one sector 1442 * at a time in order to accurately determine which 1443 * sectors can be read successfully. 1444 */ 1445 if (disable_multi) 1446 brq->data.blocks = 1; 1447 1448 /* 1449 * Some controllers have HW issues while operating 1450 * in multiple I/O mode 1451 */ 1452 if (card->host->ops->multi_io_quirk) 1453 brq->data.blocks = card->host->ops->multi_io_quirk(card, 1454 (rq_data_dir(req) == READ) ? 1455 MMC_DATA_READ : MMC_DATA_WRITE, 1456 brq->data.blocks); 1457 } 1458 1459 if (brq->data.blocks > 1 || do_rel_wr) { 1460 /* SPI multiblock writes terminate using a special 1461 * token, not a STOP_TRANSMISSION request. 1462 */ 1463 if (!mmc_host_is_spi(card->host) || 1464 rq_data_dir(req) == READ) 1465 brq->mrq.stop = &brq->stop; 1466 readcmd = MMC_READ_MULTIPLE_BLOCK; 1467 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 1468 } else { 1469 brq->mrq.stop = NULL; 1470 readcmd = MMC_READ_SINGLE_BLOCK; 1471 writecmd = MMC_WRITE_BLOCK; 1472 } 1473 if (rq_data_dir(req) == READ) { 1474 brq->cmd.opcode = readcmd; 1475 brq->data.flags = MMC_DATA_READ; 1476 if (brq->mrq.stop) 1477 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | 1478 MMC_CMD_AC; 1479 } else { 1480 brq->cmd.opcode = writecmd; 1481 brq->data.flags = MMC_DATA_WRITE; 1482 if (brq->mrq.stop) 1483 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | 1484 MMC_CMD_AC; 1485 } 1486 1487 if (do_rel_wr) 1488 mmc_apply_rel_rw(brq, card, req); 1489 1490 /* 1491 * Data tag is used only during writing meta data to speed 1492 * up write and any subsequent read of this meta data 1493 */ 1494 do_data_tag = (card->ext_csd.data_tag_unit_size) && 1495 (req->cmd_flags & REQ_META) && 1496 (rq_data_dir(req) == WRITE) && 1497 ((brq->data.blocks * brq->data.blksz) >= 1498 card->ext_csd.data_tag_unit_size); 1499 1500 /* 1501 * Pre-defined multi-block transfers are preferable to 1502 * open ended-ones (and necessary for reliable writes). 1503 * However, it is not sufficient to just send CMD23, 1504 * and avoid the final CMD12, as on an error condition 1505 * CMD12 (stop) needs to be sent anyway. This, coupled 1506 * with Auto-CMD23 enhancements provided by some 1507 * hosts, means that the complexity of dealing 1508 * with this is best left to the host. If CMD23 is 1509 * supported by card and host, we'll fill sbc in and let 1510 * the host deal with handling it correctly. This means 1511 * that for hosts that don't expose MMC_CAP_CMD23, no 1512 * change of behavior will be observed. 1513 * 1514 * N.B: Some MMC cards experience perf degradation. 1515 * We'll avoid using CMD23-bounded multiblock writes for 1516 * these, while retaining features like reliable writes. 1517 */ 1518 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && 1519 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || 1520 do_data_tag)) { 1521 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; 1522 brq->sbc.arg = brq->data.blocks | 1523 (do_rel_wr ? (1 << 31) : 0) | 1524 (do_data_tag ? (1 << 29) : 0); 1525 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 1526 brq->mrq.sbc = &brq->sbc; 1527 } 1528 1529 mmc_set_data_timeout(&brq->data, card); 1530 1531 brq->data.sg = mqrq->sg; 1532 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); 1533 1534 /* 1535 * Adjust the sg list so it is the same size as the 1536 * request. 1537 */ 1538 if (brq->data.blocks != blk_rq_sectors(req)) { 1539 int i, data_size = brq->data.blocks << 9; 1540 struct scatterlist *sg; 1541 1542 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { 1543 data_size -= sg->length; 1544 if (data_size <= 0) { 1545 sg->length += data_size; 1546 i++; 1547 break; 1548 } 1549 } 1550 brq->data.sg_len = i; 1551 } 1552 1553 mqrq->areq.mrq = &brq->mrq; 1554 mqrq->areq.err_check = mmc_blk_err_check; 1555 1556 mmc_queue_bounce_pre(mqrq); 1557 } 1558 1559 static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, 1560 struct mmc_blk_request *brq, struct request *req, 1561 bool old_req_pending) 1562 { 1563 bool req_pending; 1564 1565 /* 1566 * If this is an SD card and we're writing, we can first 1567 * mark the known good sectors as ok. 1568 * 1569 * If the card is not SD, we can still ok written sectors 1570 * as reported by the controller (which might be less than 1571 * the real number of written sectors, but never more). 1572 */ 1573 if (mmc_card_sd(card)) { 1574 u32 blocks; 1575 int err; 1576 1577 err = mmc_sd_num_wr_blocks(card, &blocks); 1578 if (err) 1579 req_pending = old_req_pending; 1580 else 1581 req_pending = blk_end_request(req, 0, blocks << 9); 1582 } else { 1583 req_pending = blk_end_request(req, 0, brq->data.bytes_xfered); 1584 } 1585 return req_pending; 1586 } 1587 1588 static void mmc_blk_rw_cmd_abort(struct mmc_card *card, struct request *req) 1589 { 1590 if (mmc_card_removed(card)) 1591 req->rq_flags |= RQF_QUIET; 1592 while (blk_end_request(req, -EIO, blk_rq_cur_bytes(req))); 1593 } 1594 1595 /** 1596 * mmc_blk_rw_try_restart() - tries to restart the current async request 1597 * @mq: the queue with the card and host to restart 1598 * @req: a new request that want to be started after the current one 1599 */ 1600 static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req) 1601 { 1602 if (!req) 1603 return; 1604 1605 /* 1606 * If the card was removed, just cancel everything and return. 1607 */ 1608 if (mmc_card_removed(mq->card)) { 1609 req->rq_flags |= RQF_QUIET; 1610 blk_end_request_all(req, -EIO); 1611 return; 1612 } 1613 /* Else proceed and try to restart the current async request */ 1614 mmc_blk_rw_rq_prep(mq->mqrq_cur, mq->card, 0, mq); 1615 mmc_start_areq(mq->card->host, &mq->mqrq_cur->areq, NULL); 1616 } 1617 1618 static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) 1619 { 1620 struct mmc_blk_data *md = mq->blkdata; 1621 struct mmc_card *card = md->queue.card; 1622 struct mmc_blk_request *brq; 1623 int disable_multi = 0, retry = 0, type, retune_retry_done = 0; 1624 enum mmc_blk_status status; 1625 struct mmc_queue_req *mq_rq; 1626 struct request *old_req; 1627 struct mmc_async_req *new_areq; 1628 struct mmc_async_req *old_areq; 1629 bool req_pending = true; 1630 1631 if (!new_req && !mq->mqrq_prev->req) 1632 return; 1633 1634 do { 1635 if (new_req) { 1636 /* 1637 * When 4KB native sector is enabled, only 8 blocks 1638 * multiple read or write is allowed 1639 */ 1640 if (mmc_large_sector(card) && 1641 !IS_ALIGNED(blk_rq_sectors(new_req), 8)) { 1642 pr_err("%s: Transfer size is not 4KB sector size aligned\n", 1643 new_req->rq_disk->disk_name); 1644 mmc_blk_rw_cmd_abort(card, new_req); 1645 return; 1646 } 1647 1648 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1649 new_areq = &mq->mqrq_cur->areq; 1650 } else 1651 new_areq = NULL; 1652 1653 old_areq = mmc_start_areq(card->host, new_areq, &status); 1654 if (!old_areq) { 1655 /* 1656 * We have just put the first request into the pipeline 1657 * and there is nothing more to do until it is 1658 * complete. 1659 */ 1660 if (status == MMC_BLK_NEW_REQUEST) 1661 mq->new_request = true; 1662 return; 1663 } 1664 1665 /* 1666 * An asynchronous request has been completed and we proceed 1667 * to handle the result of it. 1668 */ 1669 mq_rq = container_of(old_areq, struct mmc_queue_req, areq); 1670 brq = &mq_rq->brq; 1671 old_req = mq_rq->req; 1672 type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1673 mmc_queue_bounce_post(mq_rq); 1674 1675 switch (status) { 1676 case MMC_BLK_SUCCESS: 1677 case MMC_BLK_PARTIAL: 1678 /* 1679 * A block was successfully transferred. 1680 */ 1681 mmc_blk_reset_success(md, type); 1682 1683 req_pending = blk_end_request(old_req, 0, 1684 brq->data.bytes_xfered); 1685 /* 1686 * If the blk_end_request function returns non-zero even 1687 * though all data has been transferred and no errors 1688 * were returned by the host controller, it's a bug. 1689 */ 1690 if (status == MMC_BLK_SUCCESS && req_pending) { 1691 pr_err("%s BUG rq_tot %d d_xfer %d\n", 1692 __func__, blk_rq_bytes(old_req), 1693 brq->data.bytes_xfered); 1694 mmc_blk_rw_cmd_abort(card, old_req); 1695 return; 1696 } 1697 break; 1698 case MMC_BLK_CMD_ERR: 1699 req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); 1700 if (mmc_blk_reset(md, card->host, type)) { 1701 if (req_pending) 1702 mmc_blk_rw_cmd_abort(card, old_req); 1703 mmc_blk_rw_try_restart(mq, new_req); 1704 return; 1705 } 1706 if (!req_pending) { 1707 mmc_blk_rw_try_restart(mq, new_req); 1708 return; 1709 } 1710 break; 1711 case MMC_BLK_RETRY: 1712 retune_retry_done = brq->retune_retry_done; 1713 if (retry++ < 5) 1714 break; 1715 /* Fall through */ 1716 case MMC_BLK_ABORT: 1717 if (!mmc_blk_reset(md, card->host, type)) 1718 break; 1719 mmc_blk_rw_cmd_abort(card, old_req); 1720 mmc_blk_rw_try_restart(mq, new_req); 1721 return; 1722 case MMC_BLK_DATA_ERR: { 1723 int err; 1724 1725 err = mmc_blk_reset(md, card->host, type); 1726 if (!err) 1727 break; 1728 if (err == -ENODEV) { 1729 mmc_blk_rw_cmd_abort(card, old_req); 1730 mmc_blk_rw_try_restart(mq, new_req); 1731 return; 1732 } 1733 /* Fall through */ 1734 } 1735 case MMC_BLK_ECC_ERR: 1736 if (brq->data.blocks > 1) { 1737 /* Redo read one sector at a time */ 1738 pr_warn("%s: retrying using single block read\n", 1739 old_req->rq_disk->disk_name); 1740 disable_multi = 1; 1741 break; 1742 } 1743 /* 1744 * After an error, we redo I/O one sector at a 1745 * time, so we only reach here after trying to 1746 * read a single sector. 1747 */ 1748 req_pending = blk_end_request(old_req, -EIO, 1749 brq->data.blksz); 1750 if (!req_pending) { 1751 mmc_blk_rw_try_restart(mq, new_req); 1752 return; 1753 } 1754 break; 1755 case MMC_BLK_NOMEDIUM: 1756 mmc_blk_rw_cmd_abort(card, old_req); 1757 mmc_blk_rw_try_restart(mq, new_req); 1758 return; 1759 default: 1760 pr_err("%s: Unhandled return value (%d)", 1761 old_req->rq_disk->disk_name, status); 1762 mmc_blk_rw_cmd_abort(card, old_req); 1763 mmc_blk_rw_try_restart(mq, new_req); 1764 return; 1765 } 1766 1767 if (req_pending) { 1768 /* 1769 * In case of a incomplete request 1770 * prepare it again and resend. 1771 */ 1772 mmc_blk_rw_rq_prep(mq_rq, card, 1773 disable_multi, mq); 1774 mmc_start_areq(card->host, 1775 &mq_rq->areq, NULL); 1776 mq_rq->brq.retune_retry_done = retune_retry_done; 1777 } 1778 } while (req_pending); 1779 } 1780 1781 void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 1782 { 1783 int ret; 1784 struct mmc_blk_data *md = mq->blkdata; 1785 struct mmc_card *card = md->queue.card; 1786 bool req_is_special = mmc_req_is_special(req); 1787 1788 if (req && !mq->mqrq_prev->req) 1789 /* claim host only for the first request */ 1790 mmc_get_card(card); 1791 1792 ret = mmc_blk_part_switch(card, md); 1793 if (ret) { 1794 if (req) { 1795 blk_end_request_all(req, -EIO); 1796 } 1797 goto out; 1798 } 1799 1800 mq->new_request = false; 1801 if (req && req_op(req) == REQ_OP_DISCARD) { 1802 /* complete ongoing async transfer before issuing discard */ 1803 if (card->host->areq) 1804 mmc_blk_issue_rw_rq(mq, NULL); 1805 mmc_blk_issue_discard_rq(mq, req); 1806 } else if (req && req_op(req) == REQ_OP_SECURE_ERASE) { 1807 /* complete ongoing async transfer before issuing secure erase*/ 1808 if (card->host->areq) 1809 mmc_blk_issue_rw_rq(mq, NULL); 1810 mmc_blk_issue_secdiscard_rq(mq, req); 1811 } else if (req && req_op(req) == REQ_OP_FLUSH) { 1812 /* complete ongoing async transfer before issuing flush */ 1813 if (card->host->areq) 1814 mmc_blk_issue_rw_rq(mq, NULL); 1815 mmc_blk_issue_flush(mq, req); 1816 } else { 1817 mmc_blk_issue_rw_rq(mq, req); 1818 card->host->context_info.is_waiting_last_req = false; 1819 } 1820 1821 out: 1822 if ((!req && !mq->new_request) || req_is_special) 1823 /* 1824 * Release host when there are no more requests 1825 * and after special request(discard, flush) is done. 1826 * In case sepecial request, there is no reentry to 1827 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'. 1828 */ 1829 mmc_put_card(card); 1830 } 1831 1832 static inline int mmc_blk_readonly(struct mmc_card *card) 1833 { 1834 return mmc_card_readonly(card) || 1835 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 1836 } 1837 1838 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, 1839 struct device *parent, 1840 sector_t size, 1841 bool default_ro, 1842 const char *subname, 1843 int area_type) 1844 { 1845 struct mmc_blk_data *md; 1846 int devidx, ret; 1847 1848 devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL); 1849 if (devidx < 0) 1850 return ERR_PTR(devidx); 1851 1852 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); 1853 if (!md) { 1854 ret = -ENOMEM; 1855 goto out; 1856 } 1857 1858 md->area_type = area_type; 1859 1860 /* 1861 * Set the read-only status based on the supported commands 1862 * and the write protect switch. 1863 */ 1864 md->read_only = mmc_blk_readonly(card); 1865 1866 md->disk = alloc_disk(perdev_minors); 1867 if (md->disk == NULL) { 1868 ret = -ENOMEM; 1869 goto err_kfree; 1870 } 1871 1872 spin_lock_init(&md->lock); 1873 INIT_LIST_HEAD(&md->part); 1874 md->usage = 1; 1875 1876 ret = mmc_init_queue(&md->queue, card, &md->lock, subname); 1877 if (ret) 1878 goto err_putdisk; 1879 1880 md->queue.blkdata = md; 1881 1882 md->disk->major = MMC_BLOCK_MAJOR; 1883 md->disk->first_minor = devidx * perdev_minors; 1884 md->disk->fops = &mmc_bdops; 1885 md->disk->private_data = md; 1886 md->disk->queue = md->queue.queue; 1887 md->parent = parent; 1888 set_disk_ro(md->disk, md->read_only || default_ro); 1889 md->disk->flags = GENHD_FL_EXT_DEVT; 1890 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT)) 1891 md->disk->flags |= GENHD_FL_NO_PART_SCAN; 1892 1893 /* 1894 * As discussed on lkml, GENHD_FL_REMOVABLE should: 1895 * 1896 * - be set for removable media with permanent block devices 1897 * - be unset for removable block devices with permanent media 1898 * 1899 * Since MMC block devices clearly fall under the second 1900 * case, we do not set GENHD_FL_REMOVABLE. Userspace 1901 * should use the block device creation/destruction hotplug 1902 * messages to tell when the card is present. 1903 */ 1904 1905 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 1906 "mmcblk%u%s", card->host->index, subname ? subname : ""); 1907 1908 if (mmc_card_mmc(card)) 1909 blk_queue_logical_block_size(md->queue.queue, 1910 card->ext_csd.data_sector_size); 1911 else 1912 blk_queue_logical_block_size(md->queue.queue, 512); 1913 1914 set_capacity(md->disk, size); 1915 1916 if (mmc_host_cmd23(card->host)) { 1917 if ((mmc_card_mmc(card) && 1918 card->csd.mmca_vsn >= CSD_SPEC_VER_3) || 1919 (mmc_card_sd(card) && 1920 card->scr.cmds & SD_SCR_CMD23_SUPPORT)) 1921 md->flags |= MMC_BLK_CMD23; 1922 } 1923 1924 if (mmc_card_mmc(card) && 1925 md->flags & MMC_BLK_CMD23 && 1926 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || 1927 card->ext_csd.rel_sectors)) { 1928 md->flags |= MMC_BLK_REL_WR; 1929 blk_queue_write_cache(md->queue.queue, true, true); 1930 } 1931 1932 return md; 1933 1934 err_putdisk: 1935 put_disk(md->disk); 1936 err_kfree: 1937 kfree(md); 1938 out: 1939 ida_simple_remove(&mmc_blk_ida, devidx); 1940 return ERR_PTR(ret); 1941 } 1942 1943 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 1944 { 1945 sector_t size; 1946 1947 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 1948 /* 1949 * The EXT_CSD sector count is in number or 512 byte 1950 * sectors. 1951 */ 1952 size = card->ext_csd.sectors; 1953 } else { 1954 /* 1955 * The CSD capacity field is in units of read_blkbits. 1956 * set_capacity takes units of 512 bytes. 1957 */ 1958 size = (typeof(sector_t))card->csd.capacity 1959 << (card->csd.read_blkbits - 9); 1960 } 1961 1962 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL, 1963 MMC_BLK_DATA_AREA_MAIN); 1964 } 1965 1966 static int mmc_blk_alloc_part(struct mmc_card *card, 1967 struct mmc_blk_data *md, 1968 unsigned int part_type, 1969 sector_t size, 1970 bool default_ro, 1971 const char *subname, 1972 int area_type) 1973 { 1974 char cap_str[10]; 1975 struct mmc_blk_data *part_md; 1976 1977 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, 1978 subname, area_type); 1979 if (IS_ERR(part_md)) 1980 return PTR_ERR(part_md); 1981 part_md->part_type = part_type; 1982 list_add(&part_md->part, &md->part); 1983 1984 string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2, 1985 cap_str, sizeof(cap_str)); 1986 pr_info("%s: %s %s partition %u %s\n", 1987 part_md->disk->disk_name, mmc_card_id(card), 1988 mmc_card_name(card), part_md->part_type, cap_str); 1989 return 0; 1990 } 1991 1992 /* MMC Physical partitions consist of two boot partitions and 1993 * up to four general purpose partitions. 1994 * For each partition enabled in EXT_CSD a block device will be allocatedi 1995 * to provide access to the partition. 1996 */ 1997 1998 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) 1999 { 2000 int idx, ret = 0; 2001 2002 if (!mmc_card_mmc(card)) 2003 return 0; 2004 2005 for (idx = 0; idx < card->nr_parts; idx++) { 2006 if (card->part[idx].size) { 2007 ret = mmc_blk_alloc_part(card, md, 2008 card->part[idx].part_cfg, 2009 card->part[idx].size >> 9, 2010 card->part[idx].force_ro, 2011 card->part[idx].name, 2012 card->part[idx].area_type); 2013 if (ret) 2014 return ret; 2015 } 2016 } 2017 2018 return ret; 2019 } 2020 2021 static void mmc_blk_remove_req(struct mmc_blk_data *md) 2022 { 2023 struct mmc_card *card; 2024 2025 if (md) { 2026 /* 2027 * Flush remaining requests and free queues. It 2028 * is freeing the queue that stops new requests 2029 * from being accepted. 2030 */ 2031 card = md->queue.card; 2032 mmc_cleanup_queue(&md->queue); 2033 if (md->disk->flags & GENHD_FL_UP) { 2034 device_remove_file(disk_to_dev(md->disk), &md->force_ro); 2035 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && 2036 card->ext_csd.boot_ro_lockable) 2037 device_remove_file(disk_to_dev(md->disk), 2038 &md->power_ro_lock); 2039 2040 del_gendisk(md->disk); 2041 } 2042 mmc_blk_put(md); 2043 } 2044 } 2045 2046 static void mmc_blk_remove_parts(struct mmc_card *card, 2047 struct mmc_blk_data *md) 2048 { 2049 struct list_head *pos, *q; 2050 struct mmc_blk_data *part_md; 2051 2052 list_for_each_safe(pos, q, &md->part) { 2053 part_md = list_entry(pos, struct mmc_blk_data, part); 2054 list_del(pos); 2055 mmc_blk_remove_req(part_md); 2056 } 2057 } 2058 2059 static int mmc_add_disk(struct mmc_blk_data *md) 2060 { 2061 int ret; 2062 struct mmc_card *card = md->queue.card; 2063 2064 device_add_disk(md->parent, md->disk); 2065 md->force_ro.show = force_ro_show; 2066 md->force_ro.store = force_ro_store; 2067 sysfs_attr_init(&md->force_ro.attr); 2068 md->force_ro.attr.name = "force_ro"; 2069 md->force_ro.attr.mode = S_IRUGO | S_IWUSR; 2070 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); 2071 if (ret) 2072 goto force_ro_fail; 2073 2074 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && 2075 card->ext_csd.boot_ro_lockable) { 2076 umode_t mode; 2077 2078 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) 2079 mode = S_IRUGO; 2080 else 2081 mode = S_IRUGO | S_IWUSR; 2082 2083 md->power_ro_lock.show = power_ro_lock_show; 2084 md->power_ro_lock.store = power_ro_lock_store; 2085 sysfs_attr_init(&md->power_ro_lock.attr); 2086 md->power_ro_lock.attr.mode = mode; 2087 md->power_ro_lock.attr.name = 2088 "ro_lock_until_next_power_on"; 2089 ret = device_create_file(disk_to_dev(md->disk), 2090 &md->power_ro_lock); 2091 if (ret) 2092 goto power_ro_lock_fail; 2093 } 2094 return ret; 2095 2096 power_ro_lock_fail: 2097 device_remove_file(disk_to_dev(md->disk), &md->force_ro); 2098 force_ro_fail: 2099 del_gendisk(md->disk); 2100 2101 return ret; 2102 } 2103 2104 static int mmc_blk_probe(struct mmc_card *card) 2105 { 2106 struct mmc_blk_data *md, *part_md; 2107 char cap_str[10]; 2108 2109 /* 2110 * Check that the card supports the command class(es) we need. 2111 */ 2112 if (!(card->csd.cmdclass & CCC_BLOCK_READ)) 2113 return -ENODEV; 2114 2115 mmc_fixup_device(card, mmc_blk_fixups); 2116 2117 md = mmc_blk_alloc(card); 2118 if (IS_ERR(md)) 2119 return PTR_ERR(md); 2120 2121 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2, 2122 cap_str, sizeof(cap_str)); 2123 pr_info("%s: %s %s %s %s\n", 2124 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 2125 cap_str, md->read_only ? "(ro)" : ""); 2126 2127 if (mmc_blk_alloc_parts(card, md)) 2128 goto out; 2129 2130 dev_set_drvdata(&card->dev, md); 2131 2132 if (mmc_add_disk(md)) 2133 goto out; 2134 2135 list_for_each_entry(part_md, &md->part, part) { 2136 if (mmc_add_disk(part_md)) 2137 goto out; 2138 } 2139 2140 pm_runtime_set_autosuspend_delay(&card->dev, 3000); 2141 pm_runtime_use_autosuspend(&card->dev); 2142 2143 /* 2144 * Don't enable runtime PM for SD-combo cards here. Leave that 2145 * decision to be taken during the SDIO init sequence instead. 2146 */ 2147 if (card->type != MMC_TYPE_SD_COMBO) { 2148 pm_runtime_set_active(&card->dev); 2149 pm_runtime_enable(&card->dev); 2150 } 2151 2152 return 0; 2153 2154 out: 2155 mmc_blk_remove_parts(card, md); 2156 mmc_blk_remove_req(md); 2157 return 0; 2158 } 2159 2160 static void mmc_blk_remove(struct mmc_card *card) 2161 { 2162 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 2163 2164 mmc_blk_remove_parts(card, md); 2165 pm_runtime_get_sync(&card->dev); 2166 mmc_claim_host(card->host); 2167 mmc_blk_part_switch(card, md); 2168 mmc_release_host(card->host); 2169 if (card->type != MMC_TYPE_SD_COMBO) 2170 pm_runtime_disable(&card->dev); 2171 pm_runtime_put_noidle(&card->dev); 2172 mmc_blk_remove_req(md); 2173 dev_set_drvdata(&card->dev, NULL); 2174 } 2175 2176 static int _mmc_blk_suspend(struct mmc_card *card) 2177 { 2178 struct mmc_blk_data *part_md; 2179 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 2180 2181 if (md) { 2182 mmc_queue_suspend(&md->queue); 2183 list_for_each_entry(part_md, &md->part, part) { 2184 mmc_queue_suspend(&part_md->queue); 2185 } 2186 } 2187 return 0; 2188 } 2189 2190 static void mmc_blk_shutdown(struct mmc_card *card) 2191 { 2192 _mmc_blk_suspend(card); 2193 } 2194 2195 #ifdef CONFIG_PM_SLEEP 2196 static int mmc_blk_suspend(struct device *dev) 2197 { 2198 struct mmc_card *card = mmc_dev_to_card(dev); 2199 2200 return _mmc_blk_suspend(card); 2201 } 2202 2203 static int mmc_blk_resume(struct device *dev) 2204 { 2205 struct mmc_blk_data *part_md; 2206 struct mmc_blk_data *md = dev_get_drvdata(dev); 2207 2208 if (md) { 2209 /* 2210 * Resume involves the card going into idle state, 2211 * so current partition is always the main one. 2212 */ 2213 md->part_curr = md->part_type; 2214 mmc_queue_resume(&md->queue); 2215 list_for_each_entry(part_md, &md->part, part) { 2216 mmc_queue_resume(&part_md->queue); 2217 } 2218 } 2219 return 0; 2220 } 2221 #endif 2222 2223 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume); 2224 2225 static struct mmc_driver mmc_driver = { 2226 .drv = { 2227 .name = "mmcblk", 2228 .pm = &mmc_blk_pm_ops, 2229 }, 2230 .probe = mmc_blk_probe, 2231 .remove = mmc_blk_remove, 2232 .shutdown = mmc_blk_shutdown, 2233 }; 2234 2235 static int __init mmc_blk_init(void) 2236 { 2237 int res; 2238 2239 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) 2240 pr_info("mmcblk: using %d minors per device\n", perdev_minors); 2241 2242 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors); 2243 2244 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 2245 if (res) 2246 goto out; 2247 2248 res = mmc_register_driver(&mmc_driver); 2249 if (res) 2250 goto out2; 2251 2252 return 0; 2253 out2: 2254 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 2255 out: 2256 return res; 2257 } 2258 2259 static void __exit mmc_blk_exit(void) 2260 { 2261 mmc_unregister_driver(&mmc_driver); 2262 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 2263 } 2264 2265 module_init(mmc_blk_init); 2266 module_exit(mmc_blk_exit); 2267 2268 MODULE_LICENSE("GPL"); 2269 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); 2270 2271