1 /* 2 * Block driver for media (i.e., flash cards) 3 * 4 * Copyright 2002 Hewlett-Packard Company 5 * Copyright 2005-2008 Pierre Ossman 6 * 7 * Use consistent with the GNU GPL is permitted, 8 * provided that this copyright notice is 9 * preserved in its entirety in all copies and derived works. 10 * 11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED, 12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS 13 * FITNESS FOR ANY PARTICULAR PURPOSE. 14 * 15 * Many thanks to Alessandro Rubini and Jonathan Corbet! 16 * 17 * Author: Andrew Christian 18 * 28 May 2002 19 */ 20 #include <linux/moduleparam.h> 21 #include <linux/module.h> 22 #include <linux/init.h> 23 24 #include <linux/kernel.h> 25 #include <linux/fs.h> 26 #include <linux/slab.h> 27 #include <linux/errno.h> 28 #include <linux/hdreg.h> 29 #include <linux/kdev_t.h> 30 #include <linux/blkdev.h> 31 #include <linux/mutex.h> 32 #include <linux/scatterlist.h> 33 #include <linux/string_helpers.h> 34 #include <linux/delay.h> 35 #include <linux/capability.h> 36 #include <linux/compat.h> 37 #include <linux/pm_runtime.h> 38 #include <linux/idr.h> 39 40 #include <linux/mmc/ioctl.h> 41 #include <linux/mmc/card.h> 42 #include <linux/mmc/host.h> 43 #include <linux/mmc/mmc.h> 44 #include <linux/mmc/sd.h> 45 46 #include <linux/uaccess.h> 47 48 #include "queue.h" 49 #include "block.h" 50 #include "core.h" 51 #include "card.h" 52 #include "host.h" 53 #include "bus.h" 54 #include "mmc_ops.h" 55 #include "quirks.h" 56 #include "sd_ops.h" 57 58 MODULE_ALIAS("mmc:block"); 59 #ifdef MODULE_PARAM_PREFIX 60 #undef MODULE_PARAM_PREFIX 61 #endif 62 #define MODULE_PARAM_PREFIX "mmcblk." 63 64 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ 65 #define MMC_SANITIZE_REQ_TIMEOUT 240000 66 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 67 68 #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ 69 (rq_data_dir(req) == WRITE)) 70 static DEFINE_MUTEX(block_mutex); 71 72 /* 73 * The defaults come from config options but can be overriden by module 74 * or bootarg options. 75 */ 76 static int perdev_minors = CONFIG_MMC_BLOCK_MINORS; 77 78 /* 79 * We've only got one major, so number of mmcblk devices is 80 * limited to (1 << 20) / number of minors per device. It is also 81 * limited by the MAX_DEVICES below. 82 */ 83 static int max_devices; 84 85 #define MAX_DEVICES 256 86 87 static DEFINE_IDA(mmc_blk_ida); 88 89 /* 90 * There is one mmc_blk_data per slot. 91 */ 92 struct mmc_blk_data { 93 spinlock_t lock; 94 struct device *parent; 95 struct gendisk *disk; 96 struct mmc_queue queue; 97 struct list_head part; 98 99 unsigned int flags; 100 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ 101 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ 102 103 unsigned int usage; 104 unsigned int read_only; 105 unsigned int part_type; 106 unsigned int reset_done; 107 #define MMC_BLK_READ BIT(0) 108 #define MMC_BLK_WRITE BIT(1) 109 #define MMC_BLK_DISCARD BIT(2) 110 #define MMC_BLK_SECDISCARD BIT(3) 111 112 /* 113 * Only set in main mmc_blk_data associated 114 * with mmc_card with dev_set_drvdata, and keeps 115 * track of the current selected device partition. 116 */ 117 unsigned int part_curr; 118 struct device_attribute force_ro; 119 struct device_attribute power_ro_lock; 120 int area_type; 121 }; 122 123 static DEFINE_MUTEX(open_lock); 124 125 module_param(perdev_minors, int, 0444); 126 MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 127 128 static inline int mmc_blk_part_switch(struct mmc_card *card, 129 struct mmc_blk_data *md); 130 131 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) 132 { 133 struct mmc_blk_data *md; 134 135 mutex_lock(&open_lock); 136 md = disk->private_data; 137 if (md && md->usage == 0) 138 md = NULL; 139 if (md) 140 md->usage++; 141 mutex_unlock(&open_lock); 142 143 return md; 144 } 145 146 static inline int mmc_get_devidx(struct gendisk *disk) 147 { 148 int devidx = disk->first_minor / perdev_minors; 149 return devidx; 150 } 151 152 static void mmc_blk_put(struct mmc_blk_data *md) 153 { 154 mutex_lock(&open_lock); 155 md->usage--; 156 if (md->usage == 0) { 157 int devidx = mmc_get_devidx(md->disk); 158 blk_cleanup_queue(md->queue.queue); 159 ida_simple_remove(&mmc_blk_ida, devidx); 160 put_disk(md->disk); 161 kfree(md); 162 } 163 mutex_unlock(&open_lock); 164 } 165 166 static ssize_t power_ro_lock_show(struct device *dev, 167 struct device_attribute *attr, char *buf) 168 { 169 int ret; 170 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 171 struct mmc_card *card = md->queue.card; 172 int locked = 0; 173 174 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) 175 locked = 2; 176 else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) 177 locked = 1; 178 179 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); 180 181 mmc_blk_put(md); 182 183 return ret; 184 } 185 186 static ssize_t power_ro_lock_store(struct device *dev, 187 struct device_attribute *attr, const char *buf, size_t count) 188 { 189 int ret; 190 struct mmc_blk_data *md, *part_md; 191 struct mmc_card *card; 192 struct mmc_queue *mq; 193 struct request *req; 194 unsigned long set; 195 196 if (kstrtoul(buf, 0, &set)) 197 return -EINVAL; 198 199 if (set != 1) 200 return count; 201 202 md = mmc_blk_get(dev_to_disk(dev)); 203 mq = &md->queue; 204 card = md->queue.card; 205 206 /* Dispatch locking to the block layer */ 207 req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, __GFP_RECLAIM); 208 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP; 209 blk_execute_rq(mq->queue, NULL, req, 0); 210 ret = req_to_mmc_queue_req(req)->drv_op_result; 211 212 if (!ret) { 213 pr_info("%s: Locking boot partition ro until next power on\n", 214 md->disk->disk_name); 215 set_disk_ro(md->disk, 1); 216 217 list_for_each_entry(part_md, &md->part, part) 218 if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { 219 pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); 220 set_disk_ro(part_md->disk, 1); 221 } 222 } 223 224 mmc_blk_put(md); 225 return count; 226 } 227 228 static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, 229 char *buf) 230 { 231 int ret; 232 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 233 234 ret = snprintf(buf, PAGE_SIZE, "%d\n", 235 get_disk_ro(dev_to_disk(dev)) ^ 236 md->read_only); 237 mmc_blk_put(md); 238 return ret; 239 } 240 241 static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, 242 const char *buf, size_t count) 243 { 244 int ret; 245 char *end; 246 struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); 247 unsigned long set = simple_strtoul(buf, &end, 0); 248 if (end == buf) { 249 ret = -EINVAL; 250 goto out; 251 } 252 253 set_disk_ro(dev_to_disk(dev), set || md->read_only); 254 ret = count; 255 out: 256 mmc_blk_put(md); 257 return ret; 258 } 259 260 static int mmc_blk_open(struct block_device *bdev, fmode_t mode) 261 { 262 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 263 int ret = -ENXIO; 264 265 mutex_lock(&block_mutex); 266 if (md) { 267 if (md->usage == 2) 268 check_disk_change(bdev); 269 ret = 0; 270 271 if ((mode & FMODE_WRITE) && md->read_only) { 272 mmc_blk_put(md); 273 ret = -EROFS; 274 } 275 } 276 mutex_unlock(&block_mutex); 277 278 return ret; 279 } 280 281 static void mmc_blk_release(struct gendisk *disk, fmode_t mode) 282 { 283 struct mmc_blk_data *md = disk->private_data; 284 285 mutex_lock(&block_mutex); 286 mmc_blk_put(md); 287 mutex_unlock(&block_mutex); 288 } 289 290 static int 291 mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 292 { 293 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); 294 geo->heads = 4; 295 geo->sectors = 16; 296 return 0; 297 } 298 299 struct mmc_blk_ioc_data { 300 struct mmc_ioc_cmd ic; 301 unsigned char *buf; 302 u64 buf_bytes; 303 }; 304 305 static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( 306 struct mmc_ioc_cmd __user *user) 307 { 308 struct mmc_blk_ioc_data *idata; 309 int err; 310 311 idata = kmalloc(sizeof(*idata), GFP_KERNEL); 312 if (!idata) { 313 err = -ENOMEM; 314 goto out; 315 } 316 317 if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { 318 err = -EFAULT; 319 goto idata_err; 320 } 321 322 idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; 323 if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { 324 err = -EOVERFLOW; 325 goto idata_err; 326 } 327 328 if (!idata->buf_bytes) { 329 idata->buf = NULL; 330 return idata; 331 } 332 333 idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL); 334 if (!idata->buf) { 335 err = -ENOMEM; 336 goto idata_err; 337 } 338 339 if (copy_from_user(idata->buf, (void __user *)(unsigned long) 340 idata->ic.data_ptr, idata->buf_bytes)) { 341 err = -EFAULT; 342 goto copy_err; 343 } 344 345 return idata; 346 347 copy_err: 348 kfree(idata->buf); 349 idata_err: 350 kfree(idata); 351 out: 352 return ERR_PTR(err); 353 } 354 355 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, 356 struct mmc_blk_ioc_data *idata) 357 { 358 struct mmc_ioc_cmd *ic = &idata->ic; 359 360 if (copy_to_user(&(ic_ptr->response), ic->response, 361 sizeof(ic->response))) 362 return -EFAULT; 363 364 if (!idata->ic.write_flag) { 365 if (copy_to_user((void __user *)(unsigned long)ic->data_ptr, 366 idata->buf, idata->buf_bytes)) 367 return -EFAULT; 368 } 369 370 return 0; 371 } 372 373 static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, 374 u32 retries_max) 375 { 376 int err; 377 u32 retry_count = 0; 378 379 if (!status || !retries_max) 380 return -EINVAL; 381 382 do { 383 err = __mmc_send_status(card, status, 5); 384 if (err) 385 break; 386 387 if (!R1_STATUS(*status) && 388 (R1_CURRENT_STATE(*status) != R1_STATE_PRG)) 389 break; /* RPMB programming operation complete */ 390 391 /* 392 * Rechedule to give the MMC device a chance to continue 393 * processing the previous command without being polled too 394 * frequently. 395 */ 396 usleep_range(1000, 5000); 397 } while (++retry_count < retries_max); 398 399 if (retry_count == retries_max) 400 err = -EPERM; 401 402 return err; 403 } 404 405 static int ioctl_do_sanitize(struct mmc_card *card) 406 { 407 int err; 408 409 if (!mmc_can_sanitize(card)) { 410 pr_warn("%s: %s - SANITIZE is not supported\n", 411 mmc_hostname(card->host), __func__); 412 err = -EOPNOTSUPP; 413 goto out; 414 } 415 416 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", 417 mmc_hostname(card->host), __func__); 418 419 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 420 EXT_CSD_SANITIZE_START, 1, 421 MMC_SANITIZE_REQ_TIMEOUT); 422 423 if (err) 424 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", 425 mmc_hostname(card->host), __func__, err); 426 427 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), 428 __func__); 429 out: 430 return err; 431 } 432 433 static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, 434 struct mmc_blk_ioc_data *idata) 435 { 436 struct mmc_command cmd = {}; 437 struct mmc_data data = {}; 438 struct mmc_request mrq = {}; 439 struct scatterlist sg; 440 int err; 441 bool is_rpmb = false; 442 u32 status = 0; 443 444 if (!card || !md || !idata) 445 return -EINVAL; 446 447 if (md->area_type & MMC_BLK_DATA_AREA_RPMB) 448 is_rpmb = true; 449 450 cmd.opcode = idata->ic.opcode; 451 cmd.arg = idata->ic.arg; 452 cmd.flags = idata->ic.flags; 453 454 if (idata->buf_bytes) { 455 data.sg = &sg; 456 data.sg_len = 1; 457 data.blksz = idata->ic.blksz; 458 data.blocks = idata->ic.blocks; 459 460 sg_init_one(data.sg, idata->buf, idata->buf_bytes); 461 462 if (idata->ic.write_flag) 463 data.flags = MMC_DATA_WRITE; 464 else 465 data.flags = MMC_DATA_READ; 466 467 /* data.flags must already be set before doing this. */ 468 mmc_set_data_timeout(&data, card); 469 470 /* Allow overriding the timeout_ns for empirical tuning. */ 471 if (idata->ic.data_timeout_ns) 472 data.timeout_ns = idata->ic.data_timeout_ns; 473 474 if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { 475 /* 476 * Pretend this is a data transfer and rely on the 477 * host driver to compute timeout. When all host 478 * drivers support cmd.cmd_timeout for R1B, this 479 * can be changed to: 480 * 481 * mrq.data = NULL; 482 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; 483 */ 484 data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; 485 } 486 487 mrq.data = &data; 488 } 489 490 mrq.cmd = &cmd; 491 492 err = mmc_blk_part_switch(card, md); 493 if (err) 494 return err; 495 496 if (idata->ic.is_acmd) { 497 err = mmc_app_cmd(card->host, card); 498 if (err) 499 return err; 500 } 501 502 if (is_rpmb) { 503 err = mmc_set_blockcount(card, data.blocks, 504 idata->ic.write_flag & (1 << 31)); 505 if (err) 506 return err; 507 } 508 509 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && 510 (cmd.opcode == MMC_SWITCH)) { 511 err = ioctl_do_sanitize(card); 512 513 if (err) 514 pr_err("%s: ioctl_do_sanitize() failed. err = %d", 515 __func__, err); 516 517 return err; 518 } 519 520 mmc_wait_for_req(card->host, &mrq); 521 522 if (cmd.error) { 523 dev_err(mmc_dev(card->host), "%s: cmd error %d\n", 524 __func__, cmd.error); 525 return cmd.error; 526 } 527 if (data.error) { 528 dev_err(mmc_dev(card->host), "%s: data error %d\n", 529 __func__, data.error); 530 return data.error; 531 } 532 533 /* 534 * According to the SD specs, some commands require a delay after 535 * issuing the command. 536 */ 537 if (idata->ic.postsleep_min_us) 538 usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); 539 540 memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); 541 542 if (is_rpmb) { 543 /* 544 * Ensure RPMB command has completed by polling CMD13 545 * "Send Status". 546 */ 547 err = ioctl_rpmb_card_status_poll(card, &status, 5); 548 if (err) 549 dev_err(mmc_dev(card->host), 550 "%s: Card Status=0x%08X, error %d\n", 551 __func__, status, err); 552 } 553 554 return err; 555 } 556 557 static int mmc_blk_ioctl_cmd(struct block_device *bdev, 558 struct mmc_ioc_cmd __user *ic_ptr) 559 { 560 struct mmc_blk_ioc_data *idata; 561 struct mmc_blk_ioc_data *idatas[1]; 562 struct mmc_blk_data *md; 563 struct mmc_queue *mq; 564 struct mmc_card *card; 565 int err = 0, ioc_err = 0; 566 struct request *req; 567 568 /* 569 * The caller must have CAP_SYS_RAWIO, and must be calling this on the 570 * whole block device, not on a partition. This prevents overspray 571 * between sibling partitions. 572 */ 573 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) 574 return -EPERM; 575 576 idata = mmc_blk_ioctl_copy_from_user(ic_ptr); 577 if (IS_ERR(idata)) 578 return PTR_ERR(idata); 579 580 md = mmc_blk_get(bdev->bd_disk); 581 if (!md) { 582 err = -EINVAL; 583 goto cmd_err; 584 } 585 586 card = md->queue.card; 587 if (IS_ERR(card)) { 588 err = PTR_ERR(card); 589 goto cmd_done; 590 } 591 592 /* 593 * Dispatch the ioctl() into the block request queue. 594 */ 595 mq = &md->queue; 596 req = blk_get_request(mq->queue, 597 idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 598 __GFP_RECLAIM); 599 idatas[0] = idata; 600 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; 601 req_to_mmc_queue_req(req)->idata = idatas; 602 req_to_mmc_queue_req(req)->ioc_count = 1; 603 blk_execute_rq(mq->queue, NULL, req, 0); 604 ioc_err = req_to_mmc_queue_req(req)->drv_op_result; 605 err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); 606 blk_put_request(req); 607 608 cmd_done: 609 mmc_blk_put(md); 610 cmd_err: 611 kfree(idata->buf); 612 kfree(idata); 613 return ioc_err ? ioc_err : err; 614 } 615 616 static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, 617 struct mmc_ioc_multi_cmd __user *user) 618 { 619 struct mmc_blk_ioc_data **idata = NULL; 620 struct mmc_ioc_cmd __user *cmds = user->cmds; 621 struct mmc_card *card; 622 struct mmc_blk_data *md; 623 struct mmc_queue *mq; 624 int i, err = 0, ioc_err = 0; 625 __u64 num_of_cmds; 626 struct request *req; 627 628 /* 629 * The caller must have CAP_SYS_RAWIO, and must be calling this on the 630 * whole block device, not on a partition. This prevents overspray 631 * between sibling partitions. 632 */ 633 if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) 634 return -EPERM; 635 636 if (copy_from_user(&num_of_cmds, &user->num_of_cmds, 637 sizeof(num_of_cmds))) 638 return -EFAULT; 639 640 if (!num_of_cmds) 641 return 0; 642 643 if (num_of_cmds > MMC_IOC_MAX_CMDS) 644 return -EINVAL; 645 646 idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL); 647 if (!idata) 648 return -ENOMEM; 649 650 for (i = 0; i < num_of_cmds; i++) { 651 idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]); 652 if (IS_ERR(idata[i])) { 653 err = PTR_ERR(idata[i]); 654 num_of_cmds = i; 655 goto cmd_err; 656 } 657 } 658 659 md = mmc_blk_get(bdev->bd_disk); 660 if (!md) { 661 err = -EINVAL; 662 goto cmd_err; 663 } 664 665 card = md->queue.card; 666 if (IS_ERR(card)) { 667 err = PTR_ERR(card); 668 goto cmd_done; 669 } 670 671 672 /* 673 * Dispatch the ioctl()s into the block request queue. 674 */ 675 mq = &md->queue; 676 req = blk_get_request(mq->queue, 677 idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 678 __GFP_RECLAIM); 679 req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; 680 req_to_mmc_queue_req(req)->idata = idata; 681 req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; 682 blk_execute_rq(mq->queue, NULL, req, 0); 683 ioc_err = req_to_mmc_queue_req(req)->drv_op_result; 684 685 /* copy to user if data and response */ 686 for (i = 0; i < num_of_cmds && !err; i++) 687 err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]); 688 689 blk_put_request(req); 690 691 cmd_done: 692 mmc_blk_put(md); 693 cmd_err: 694 for (i = 0; i < num_of_cmds; i++) { 695 kfree(idata[i]->buf); 696 kfree(idata[i]); 697 } 698 kfree(idata); 699 return ioc_err ? ioc_err : err; 700 } 701 702 static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, 703 unsigned int cmd, unsigned long arg) 704 { 705 switch (cmd) { 706 case MMC_IOC_CMD: 707 return mmc_blk_ioctl_cmd(bdev, 708 (struct mmc_ioc_cmd __user *)arg); 709 case MMC_IOC_MULTI_CMD: 710 return mmc_blk_ioctl_multi_cmd(bdev, 711 (struct mmc_ioc_multi_cmd __user *)arg); 712 default: 713 return -EINVAL; 714 } 715 } 716 717 #ifdef CONFIG_COMPAT 718 static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, 719 unsigned int cmd, unsigned long arg) 720 { 721 return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); 722 } 723 #endif 724 725 static const struct block_device_operations mmc_bdops = { 726 .open = mmc_blk_open, 727 .release = mmc_blk_release, 728 .getgeo = mmc_blk_getgeo, 729 .owner = THIS_MODULE, 730 .ioctl = mmc_blk_ioctl, 731 #ifdef CONFIG_COMPAT 732 .compat_ioctl = mmc_blk_compat_ioctl, 733 #endif 734 }; 735 736 static int mmc_blk_part_switch_pre(struct mmc_card *card, 737 unsigned int part_type) 738 { 739 int ret = 0; 740 741 if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { 742 if (card->ext_csd.cmdq_en) { 743 ret = mmc_cmdq_disable(card); 744 if (ret) 745 return ret; 746 } 747 mmc_retune_pause(card->host); 748 } 749 750 return ret; 751 } 752 753 static int mmc_blk_part_switch_post(struct mmc_card *card, 754 unsigned int part_type) 755 { 756 int ret = 0; 757 758 if (part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) { 759 mmc_retune_unpause(card->host); 760 if (card->reenable_cmdq && !card->ext_csd.cmdq_en) 761 ret = mmc_cmdq_enable(card); 762 } 763 764 return ret; 765 } 766 767 static inline int mmc_blk_part_switch(struct mmc_card *card, 768 struct mmc_blk_data *md) 769 { 770 int ret = 0; 771 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); 772 773 if (main_md->part_curr == md->part_type) 774 return 0; 775 776 if (mmc_card_mmc(card)) { 777 u8 part_config = card->ext_csd.part_config; 778 779 ret = mmc_blk_part_switch_pre(card, md->part_type); 780 if (ret) 781 return ret; 782 783 part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; 784 part_config |= md->part_type; 785 786 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 787 EXT_CSD_PART_CONFIG, part_config, 788 card->ext_csd.part_time); 789 if (ret) { 790 mmc_blk_part_switch_post(card, md->part_type); 791 return ret; 792 } 793 794 card->ext_csd.part_config = part_config; 795 796 ret = mmc_blk_part_switch_post(card, main_md->part_curr); 797 } 798 799 main_md->part_curr = md->part_type; 800 return ret; 801 } 802 803 static int mmc_sd_num_wr_blocks(struct mmc_card *card, u32 *written_blocks) 804 { 805 int err; 806 u32 result; 807 __be32 *blocks; 808 809 struct mmc_request mrq = {}; 810 struct mmc_command cmd = {}; 811 struct mmc_data data = {}; 812 813 struct scatterlist sg; 814 815 cmd.opcode = MMC_APP_CMD; 816 cmd.arg = card->rca << 16; 817 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 818 819 err = mmc_wait_for_cmd(card->host, &cmd, 0); 820 if (err) 821 return err; 822 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) 823 return -EIO; 824 825 memset(&cmd, 0, sizeof(struct mmc_command)); 826 827 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS; 828 cmd.arg = 0; 829 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 830 831 data.blksz = 4; 832 data.blocks = 1; 833 data.flags = MMC_DATA_READ; 834 data.sg = &sg; 835 data.sg_len = 1; 836 mmc_set_data_timeout(&data, card); 837 838 mrq.cmd = &cmd; 839 mrq.data = &data; 840 841 blocks = kmalloc(4, GFP_KERNEL); 842 if (!blocks) 843 return -ENOMEM; 844 845 sg_init_one(&sg, blocks, 4); 846 847 mmc_wait_for_req(card->host, &mrq); 848 849 result = ntohl(*blocks); 850 kfree(blocks); 851 852 if (cmd.error || data.error) 853 return -EIO; 854 855 *written_blocks = result; 856 857 return 0; 858 } 859 860 static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, 861 bool hw_busy_detect, struct request *req, bool *gen_err) 862 { 863 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); 864 int err = 0; 865 u32 status; 866 867 do { 868 err = __mmc_send_status(card, &status, 5); 869 if (err) { 870 pr_err("%s: error %d requesting status\n", 871 req->rq_disk->disk_name, err); 872 return err; 873 } 874 875 if (status & R1_ERROR) { 876 pr_err("%s: %s: error sending status cmd, status %#x\n", 877 req->rq_disk->disk_name, __func__, status); 878 *gen_err = true; 879 } 880 881 /* We may rely on the host hw to handle busy detection.*/ 882 if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && 883 hw_busy_detect) 884 break; 885 886 /* 887 * Timeout if the device never becomes ready for data and never 888 * leaves the program state. 889 */ 890 if (time_after(jiffies, timeout)) { 891 pr_err("%s: Card stuck in programming state! %s %s\n", 892 mmc_hostname(card->host), 893 req->rq_disk->disk_name, __func__); 894 return -ETIMEDOUT; 895 } 896 897 /* 898 * Some cards mishandle the status bits, 899 * so make sure to check both the busy 900 * indication and the card state. 901 */ 902 } while (!(status & R1_READY_FOR_DATA) || 903 (R1_CURRENT_STATE(status) == R1_STATE_PRG)); 904 905 return err; 906 } 907 908 static int send_stop(struct mmc_card *card, unsigned int timeout_ms, 909 struct request *req, bool *gen_err, u32 *stop_status) 910 { 911 struct mmc_host *host = card->host; 912 struct mmc_command cmd = {}; 913 int err; 914 bool use_r1b_resp = rq_data_dir(req) == WRITE; 915 916 /* 917 * Normally we use R1B responses for WRITE, but in cases where the host 918 * has specified a max_busy_timeout we need to validate it. A failure 919 * means we need to prevent the host from doing hw busy detection, which 920 * is done by converting to a R1 response instead. 921 */ 922 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) 923 use_r1b_resp = false; 924 925 cmd.opcode = MMC_STOP_TRANSMISSION; 926 if (use_r1b_resp) { 927 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 928 cmd.busy_timeout = timeout_ms; 929 } else { 930 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 931 } 932 933 err = mmc_wait_for_cmd(host, &cmd, 5); 934 if (err) 935 return err; 936 937 *stop_status = cmd.resp[0]; 938 939 /* No need to check card status in case of READ. */ 940 if (rq_data_dir(req) == READ) 941 return 0; 942 943 if (!mmc_host_is_spi(host) && 944 (*stop_status & R1_ERROR)) { 945 pr_err("%s: %s: general error sending stop command, resp %#x\n", 946 req->rq_disk->disk_name, __func__, *stop_status); 947 *gen_err = true; 948 } 949 950 return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err); 951 } 952 953 #define ERR_NOMEDIUM 3 954 #define ERR_RETRY 2 955 #define ERR_ABORT 1 956 #define ERR_CONTINUE 0 957 958 static int mmc_blk_cmd_error(struct request *req, const char *name, int error, 959 bool status_valid, u32 status) 960 { 961 switch (error) { 962 case -EILSEQ: 963 /* response crc error, retry the r/w cmd */ 964 pr_err("%s: %s sending %s command, card status %#x\n", 965 req->rq_disk->disk_name, "response CRC error", 966 name, status); 967 return ERR_RETRY; 968 969 case -ETIMEDOUT: 970 pr_err("%s: %s sending %s command, card status %#x\n", 971 req->rq_disk->disk_name, "timed out", name, status); 972 973 /* If the status cmd initially failed, retry the r/w cmd */ 974 if (!status_valid) { 975 pr_err("%s: status not valid, retrying timeout\n", 976 req->rq_disk->disk_name); 977 return ERR_RETRY; 978 } 979 980 /* 981 * If it was a r/w cmd crc error, or illegal command 982 * (eg, issued in wrong state) then retry - we should 983 * have corrected the state problem above. 984 */ 985 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) { 986 pr_err("%s: command error, retrying timeout\n", 987 req->rq_disk->disk_name); 988 return ERR_RETRY; 989 } 990 991 /* Otherwise abort the command */ 992 return ERR_ABORT; 993 994 default: 995 /* We don't understand the error code the driver gave us */ 996 pr_err("%s: unknown error %d sending read/write command, card status %#x\n", 997 req->rq_disk->disk_name, error, status); 998 return ERR_ABORT; 999 } 1000 } 1001 1002 /* 1003 * Initial r/w and stop cmd error recovery. 1004 * We don't know whether the card received the r/w cmd or not, so try to 1005 * restore things back to a sane state. Essentially, we do this as follows: 1006 * - Obtain card status. If the first attempt to obtain card status fails, 1007 * the status word will reflect the failed status cmd, not the failed 1008 * r/w cmd. If we fail to obtain card status, it suggests we can no 1009 * longer communicate with the card. 1010 * - Check the card state. If the card received the cmd but there was a 1011 * transient problem with the response, it might still be in a data transfer 1012 * mode. Try to send it a stop command. If this fails, we can't recover. 1013 * - If the r/w cmd failed due to a response CRC error, it was probably 1014 * transient, so retry the cmd. 1015 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. 1016 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or 1017 * illegal cmd, retry. 1018 * Otherwise we don't understand what happened, so abort. 1019 */ 1020 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, 1021 struct mmc_blk_request *brq, bool *ecc_err, bool *gen_err) 1022 { 1023 bool prev_cmd_status_valid = true; 1024 u32 status, stop_status = 0; 1025 int err, retry; 1026 1027 if (mmc_card_removed(card)) 1028 return ERR_NOMEDIUM; 1029 1030 /* 1031 * Try to get card status which indicates both the card state 1032 * and why there was no response. If the first attempt fails, 1033 * we can't be sure the returned status is for the r/w command. 1034 */ 1035 for (retry = 2; retry >= 0; retry--) { 1036 err = __mmc_send_status(card, &status, 0); 1037 if (!err) 1038 break; 1039 1040 /* Re-tune if needed */ 1041 mmc_retune_recheck(card->host); 1042 1043 prev_cmd_status_valid = false; 1044 pr_err("%s: error %d sending status command, %sing\n", 1045 req->rq_disk->disk_name, err, retry ? "retry" : "abort"); 1046 } 1047 1048 /* We couldn't get a response from the card. Give up. */ 1049 if (err) { 1050 /* Check if the card is removed */ 1051 if (mmc_detect_card_removed(card->host)) 1052 return ERR_NOMEDIUM; 1053 return ERR_ABORT; 1054 } 1055 1056 /* Flag ECC errors */ 1057 if ((status & R1_CARD_ECC_FAILED) || 1058 (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || 1059 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) 1060 *ecc_err = true; 1061 1062 /* Flag General errors */ 1063 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) 1064 if ((status & R1_ERROR) || 1065 (brq->stop.resp[0] & R1_ERROR)) { 1066 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n", 1067 req->rq_disk->disk_name, __func__, 1068 brq->stop.resp[0], status); 1069 *gen_err = true; 1070 } 1071 1072 /* 1073 * Check the current card state. If it is in some data transfer 1074 * mode, tell it to stop (and hopefully transition back to TRAN.) 1075 */ 1076 if (R1_CURRENT_STATE(status) == R1_STATE_DATA || 1077 R1_CURRENT_STATE(status) == R1_STATE_RCV) { 1078 err = send_stop(card, 1079 DIV_ROUND_UP(brq->data.timeout_ns, 1000000), 1080 req, gen_err, &stop_status); 1081 if (err) { 1082 pr_err("%s: error %d sending stop command\n", 1083 req->rq_disk->disk_name, err); 1084 /* 1085 * If the stop cmd also timed out, the card is probably 1086 * not present, so abort. Other errors are bad news too. 1087 */ 1088 return ERR_ABORT; 1089 } 1090 1091 if (stop_status & R1_CARD_ECC_FAILED) 1092 *ecc_err = true; 1093 } 1094 1095 /* Check for set block count errors */ 1096 if (brq->sbc.error) 1097 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, 1098 prev_cmd_status_valid, status); 1099 1100 /* Check for r/w command errors */ 1101 if (brq->cmd.error) 1102 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, 1103 prev_cmd_status_valid, status); 1104 1105 /* Data errors */ 1106 if (!brq->stop.error) 1107 return ERR_CONTINUE; 1108 1109 /* Now for stop errors. These aren't fatal to the transfer. */ 1110 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", 1111 req->rq_disk->disk_name, brq->stop.error, 1112 brq->cmd.resp[0], status); 1113 1114 /* 1115 * Subsitute in our own stop status as this will give the error 1116 * state which happened during the execution of the r/w command. 1117 */ 1118 if (stop_status) { 1119 brq->stop.resp[0] = stop_status; 1120 brq->stop.error = 0; 1121 } 1122 return ERR_CONTINUE; 1123 } 1124 1125 static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, 1126 int type) 1127 { 1128 int err; 1129 1130 if (md->reset_done & type) 1131 return -EEXIST; 1132 1133 md->reset_done |= type; 1134 err = mmc_hw_reset(host); 1135 /* Ensure we switch back to the correct partition */ 1136 if (err != -EOPNOTSUPP) { 1137 struct mmc_blk_data *main_md = 1138 dev_get_drvdata(&host->card->dev); 1139 int part_err; 1140 1141 main_md->part_curr = main_md->part_type; 1142 part_err = mmc_blk_part_switch(host->card, md); 1143 if (part_err) { 1144 /* 1145 * We have failed to get back into the correct 1146 * partition, so we need to abort the whole request. 1147 */ 1148 return -ENODEV; 1149 } 1150 } 1151 return err; 1152 } 1153 1154 static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) 1155 { 1156 md->reset_done &= ~type; 1157 } 1158 1159 int mmc_access_rpmb(struct mmc_queue *mq) 1160 { 1161 struct mmc_blk_data *md = mq->blkdata; 1162 /* 1163 * If this is a RPMB partition access, return ture 1164 */ 1165 if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) 1166 return true; 1167 1168 return false; 1169 } 1170 1171 /* 1172 * The non-block commands come back from the block layer after it queued it and 1173 * processed it with all other requests and then they get issued in this 1174 * function. 1175 */ 1176 static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) 1177 { 1178 struct mmc_queue_req *mq_rq; 1179 struct mmc_card *card = mq->card; 1180 struct mmc_blk_data *md = mq->blkdata; 1181 int ret; 1182 int i; 1183 1184 mq_rq = req_to_mmc_queue_req(req); 1185 1186 switch (mq_rq->drv_op) { 1187 case MMC_DRV_OP_IOCTL: 1188 for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { 1189 ret = __mmc_blk_ioctl_cmd(card, md, mq_rq->idata[i]); 1190 if (ret) 1191 break; 1192 } 1193 /* Always switch back to main area after RPMB access */ 1194 if (md->area_type & MMC_BLK_DATA_AREA_RPMB) 1195 mmc_blk_part_switch(card, dev_get_drvdata(&card->dev)); 1196 break; 1197 case MMC_DRV_OP_BOOT_WP: 1198 ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1199 card->ext_csd.boot_ro_lock | 1200 EXT_CSD_BOOT_WP_B_PWR_WP_EN, 1201 card->ext_csd.part_time); 1202 if (ret) 1203 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", 1204 md->disk->disk_name, ret); 1205 else 1206 card->ext_csd.boot_ro_lock |= 1207 EXT_CSD_BOOT_WP_B_PWR_WP_EN; 1208 break; 1209 default: 1210 pr_err("%s: unknown driver specific operation\n", 1211 md->disk->disk_name); 1212 ret = -EINVAL; 1213 break; 1214 } 1215 mq_rq->drv_op_result = ret; 1216 blk_end_request_all(req, ret); 1217 } 1218 1219 static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 1220 { 1221 struct mmc_blk_data *md = mq->blkdata; 1222 struct mmc_card *card = md->queue.card; 1223 unsigned int from, nr, arg; 1224 int err = 0, type = MMC_BLK_DISCARD; 1225 blk_status_t status = BLK_STS_OK; 1226 1227 if (!mmc_can_erase(card)) { 1228 status = BLK_STS_NOTSUPP; 1229 goto fail; 1230 } 1231 1232 from = blk_rq_pos(req); 1233 nr = blk_rq_sectors(req); 1234 1235 if (mmc_can_discard(card)) 1236 arg = MMC_DISCARD_ARG; 1237 else if (mmc_can_trim(card)) 1238 arg = MMC_TRIM_ARG; 1239 else 1240 arg = MMC_ERASE_ARG; 1241 do { 1242 err = 0; 1243 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1244 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1245 INAND_CMD38_ARG_EXT_CSD, 1246 arg == MMC_TRIM_ARG ? 1247 INAND_CMD38_ARG_TRIM : 1248 INAND_CMD38_ARG_ERASE, 1249 0); 1250 } 1251 if (!err) 1252 err = mmc_erase(card, from, nr, arg); 1253 } while (err == -EIO && !mmc_blk_reset(md, card->host, type)); 1254 if (err) 1255 status = BLK_STS_IOERR; 1256 else 1257 mmc_blk_reset_success(md, type); 1258 fail: 1259 blk_end_request(req, status, blk_rq_bytes(req)); 1260 } 1261 1262 static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, 1263 struct request *req) 1264 { 1265 struct mmc_blk_data *md = mq->blkdata; 1266 struct mmc_card *card = md->queue.card; 1267 unsigned int from, nr, arg; 1268 int err = 0, type = MMC_BLK_SECDISCARD; 1269 blk_status_t status = BLK_STS_OK; 1270 1271 if (!(mmc_can_secure_erase_trim(card))) { 1272 status = BLK_STS_NOTSUPP; 1273 goto out; 1274 } 1275 1276 from = blk_rq_pos(req); 1277 nr = blk_rq_sectors(req); 1278 1279 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) 1280 arg = MMC_SECURE_TRIM1_ARG; 1281 else 1282 arg = MMC_SECURE_ERASE_ARG; 1283 1284 retry: 1285 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1286 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1287 INAND_CMD38_ARG_EXT_CSD, 1288 arg == MMC_SECURE_TRIM1_ARG ? 1289 INAND_CMD38_ARG_SECTRIM1 : 1290 INAND_CMD38_ARG_SECERASE, 1291 0); 1292 if (err) 1293 goto out_retry; 1294 } 1295 1296 err = mmc_erase(card, from, nr, arg); 1297 if (err == -EIO) 1298 goto out_retry; 1299 if (err) { 1300 status = BLK_STS_IOERR; 1301 goto out; 1302 } 1303 1304 if (arg == MMC_SECURE_TRIM1_ARG) { 1305 if (card->quirks & MMC_QUIRK_INAND_CMD38) { 1306 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1307 INAND_CMD38_ARG_EXT_CSD, 1308 INAND_CMD38_ARG_SECTRIM2, 1309 0); 1310 if (err) 1311 goto out_retry; 1312 } 1313 1314 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); 1315 if (err == -EIO) 1316 goto out_retry; 1317 if (err) { 1318 status = BLK_STS_IOERR; 1319 goto out; 1320 } 1321 } 1322 1323 out_retry: 1324 if (err && !mmc_blk_reset(md, card->host, type)) 1325 goto retry; 1326 if (!err) 1327 mmc_blk_reset_success(md, type); 1328 out: 1329 blk_end_request(req, status, blk_rq_bytes(req)); 1330 } 1331 1332 static void mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) 1333 { 1334 struct mmc_blk_data *md = mq->blkdata; 1335 struct mmc_card *card = md->queue.card; 1336 int ret = 0; 1337 1338 ret = mmc_flush_cache(card); 1339 blk_end_request_all(req, ret ? BLK_STS_IOERR : BLK_STS_OK); 1340 } 1341 1342 /* 1343 * Reformat current write as a reliable write, supporting 1344 * both legacy and the enhanced reliable write MMC cards. 1345 * In each transfer we'll handle only as much as a single 1346 * reliable write can handle, thus finish the request in 1347 * partial completions. 1348 */ 1349 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, 1350 struct mmc_card *card, 1351 struct request *req) 1352 { 1353 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { 1354 /* Legacy mode imposes restrictions on transfers. */ 1355 if (!IS_ALIGNED(blk_rq_pos(req), card->ext_csd.rel_sectors)) 1356 brq->data.blocks = 1; 1357 1358 if (brq->data.blocks > card->ext_csd.rel_sectors) 1359 brq->data.blocks = card->ext_csd.rel_sectors; 1360 else if (brq->data.blocks < card->ext_csd.rel_sectors) 1361 brq->data.blocks = 1; 1362 } 1363 } 1364 1365 #define CMD_ERRORS \ 1366 (R1_OUT_OF_RANGE | /* Command argument out of range */ \ 1367 R1_ADDRESS_ERROR | /* Misaligned address */ \ 1368 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ 1369 R1_WP_VIOLATION | /* Tried to write to protected block */ \ 1370 R1_CARD_ECC_FAILED | /* Card ECC failed */ \ 1371 R1_CC_ERROR | /* Card controller error */ \ 1372 R1_ERROR) /* General/unknown error */ 1373 1374 static bool mmc_blk_has_cmd_err(struct mmc_command *cmd) 1375 { 1376 if (!cmd->error && cmd->resp[0] & CMD_ERRORS) 1377 cmd->error = -EIO; 1378 1379 return cmd->error; 1380 } 1381 1382 static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, 1383 struct mmc_async_req *areq) 1384 { 1385 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, 1386 areq); 1387 struct mmc_blk_request *brq = &mq_mrq->brq; 1388 struct request *req = mmc_queue_req_to_req(mq_mrq); 1389 int need_retune = card->host->need_retune; 1390 bool ecc_err = false; 1391 bool gen_err = false; 1392 1393 /* 1394 * sbc.error indicates a problem with the set block count 1395 * command. No data will have been transferred. 1396 * 1397 * cmd.error indicates a problem with the r/w command. No 1398 * data will have been transferred. 1399 * 1400 * stop.error indicates a problem with the stop command. Data 1401 * may have been transferred, or may still be transferring. 1402 */ 1403 if (brq->sbc.error || brq->cmd.error || mmc_blk_has_cmd_err(&brq->stop) || 1404 brq->data.error) { 1405 switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { 1406 case ERR_RETRY: 1407 return MMC_BLK_RETRY; 1408 case ERR_ABORT: 1409 return MMC_BLK_ABORT; 1410 case ERR_NOMEDIUM: 1411 return MMC_BLK_NOMEDIUM; 1412 case ERR_CONTINUE: 1413 break; 1414 } 1415 } 1416 1417 /* 1418 * Check for errors relating to the execution of the 1419 * initial command - such as address errors. No data 1420 * has been transferred. 1421 */ 1422 if (brq->cmd.resp[0] & CMD_ERRORS) { 1423 pr_err("%s: r/w command failed, status = %#x\n", 1424 req->rq_disk->disk_name, brq->cmd.resp[0]); 1425 return MMC_BLK_ABORT; 1426 } 1427 1428 /* 1429 * Everything else is either success, or a data error of some 1430 * kind. If it was a write, we may have transitioned to 1431 * program mode, which we have to wait for it to complete. 1432 */ 1433 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { 1434 int err; 1435 1436 /* Check stop command response */ 1437 if (brq->stop.resp[0] & R1_ERROR) { 1438 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", 1439 req->rq_disk->disk_name, __func__, 1440 brq->stop.resp[0]); 1441 gen_err = true; 1442 } 1443 1444 err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, 1445 &gen_err); 1446 if (err) 1447 return MMC_BLK_CMD_ERR; 1448 } 1449 1450 /* if general error occurs, retry the write operation. */ 1451 if (gen_err) { 1452 pr_warn("%s: retrying write for general error\n", 1453 req->rq_disk->disk_name); 1454 return MMC_BLK_RETRY; 1455 } 1456 1457 /* Some errors (ECC) are flagged on the next commmand, so check stop, too */ 1458 if (brq->data.error || brq->stop.error) { 1459 if (need_retune && !brq->retune_retry_done) { 1460 pr_debug("%s: retrying because a re-tune was needed\n", 1461 req->rq_disk->disk_name); 1462 brq->retune_retry_done = 1; 1463 return MMC_BLK_RETRY; 1464 } 1465 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", 1466 req->rq_disk->disk_name, brq->data.error ?: brq->stop.error, 1467 (unsigned)blk_rq_pos(req), 1468 (unsigned)blk_rq_sectors(req), 1469 brq->cmd.resp[0], brq->stop.resp[0]); 1470 1471 if (rq_data_dir(req) == READ) { 1472 if (ecc_err) 1473 return MMC_BLK_ECC_ERR; 1474 return MMC_BLK_DATA_ERR; 1475 } else { 1476 return MMC_BLK_CMD_ERR; 1477 } 1478 } 1479 1480 if (!brq->data.bytes_xfered) 1481 return MMC_BLK_RETRY; 1482 1483 if (blk_rq_bytes(req) != brq->data.bytes_xfered) 1484 return MMC_BLK_PARTIAL; 1485 1486 return MMC_BLK_SUCCESS; 1487 } 1488 1489 static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, 1490 int disable_multi, bool *do_rel_wr, 1491 bool *do_data_tag) 1492 { 1493 struct mmc_blk_data *md = mq->blkdata; 1494 struct mmc_card *card = md->queue.card; 1495 struct mmc_blk_request *brq = &mqrq->brq; 1496 struct request *req = mmc_queue_req_to_req(mqrq); 1497 1498 /* 1499 * Reliable writes are used to implement Forced Unit Access and 1500 * are supported only on MMCs. 1501 */ 1502 *do_rel_wr = (req->cmd_flags & REQ_FUA) && 1503 rq_data_dir(req) == WRITE && 1504 (md->flags & MMC_BLK_REL_WR); 1505 1506 memset(brq, 0, sizeof(struct mmc_blk_request)); 1507 1508 brq->mrq.data = &brq->data; 1509 1510 brq->stop.opcode = MMC_STOP_TRANSMISSION; 1511 brq->stop.arg = 0; 1512 1513 if (rq_data_dir(req) == READ) { 1514 brq->data.flags = MMC_DATA_READ; 1515 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; 1516 } else { 1517 brq->data.flags = MMC_DATA_WRITE; 1518 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 1519 } 1520 1521 brq->data.blksz = 512; 1522 brq->data.blocks = blk_rq_sectors(req); 1523 1524 /* 1525 * The block layer doesn't support all sector count 1526 * restrictions, so we need to be prepared for too big 1527 * requests. 1528 */ 1529 if (brq->data.blocks > card->host->max_blk_count) 1530 brq->data.blocks = card->host->max_blk_count; 1531 1532 if (brq->data.blocks > 1) { 1533 /* 1534 * After a read error, we redo the request one sector 1535 * at a time in order to accurately determine which 1536 * sectors can be read successfully. 1537 */ 1538 if (disable_multi) 1539 brq->data.blocks = 1; 1540 1541 /* 1542 * Some controllers have HW issues while operating 1543 * in multiple I/O mode 1544 */ 1545 if (card->host->ops->multi_io_quirk) 1546 brq->data.blocks = card->host->ops->multi_io_quirk(card, 1547 (rq_data_dir(req) == READ) ? 1548 MMC_DATA_READ : MMC_DATA_WRITE, 1549 brq->data.blocks); 1550 } 1551 1552 if (*do_rel_wr) 1553 mmc_apply_rel_rw(brq, card, req); 1554 1555 /* 1556 * Data tag is used only during writing meta data to speed 1557 * up write and any subsequent read of this meta data 1558 */ 1559 *do_data_tag = card->ext_csd.data_tag_unit_size && 1560 (req->cmd_flags & REQ_META) && 1561 (rq_data_dir(req) == WRITE) && 1562 ((brq->data.blocks * brq->data.blksz) >= 1563 card->ext_csd.data_tag_unit_size); 1564 1565 mmc_set_data_timeout(&brq->data, card); 1566 1567 brq->data.sg = mqrq->sg; 1568 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); 1569 1570 /* 1571 * Adjust the sg list so it is the same size as the 1572 * request. 1573 */ 1574 if (brq->data.blocks != blk_rq_sectors(req)) { 1575 int i, data_size = brq->data.blocks << 9; 1576 struct scatterlist *sg; 1577 1578 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { 1579 data_size -= sg->length; 1580 if (data_size <= 0) { 1581 sg->length += data_size; 1582 i++; 1583 break; 1584 } 1585 } 1586 brq->data.sg_len = i; 1587 } 1588 1589 mqrq->areq.mrq = &brq->mrq; 1590 1591 mmc_queue_bounce_pre(mqrq); 1592 } 1593 1594 static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 1595 struct mmc_card *card, 1596 int disable_multi, 1597 struct mmc_queue *mq) 1598 { 1599 u32 readcmd, writecmd; 1600 struct mmc_blk_request *brq = &mqrq->brq; 1601 struct request *req = mmc_queue_req_to_req(mqrq); 1602 struct mmc_blk_data *md = mq->blkdata; 1603 bool do_rel_wr, do_data_tag; 1604 1605 mmc_blk_data_prep(mq, mqrq, disable_multi, &do_rel_wr, &do_data_tag); 1606 1607 brq->mrq.cmd = &brq->cmd; 1608 1609 brq->cmd.arg = blk_rq_pos(req); 1610 if (!mmc_card_blockaddr(card)) 1611 brq->cmd.arg <<= 9; 1612 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 1613 1614 if (brq->data.blocks > 1 || do_rel_wr) { 1615 /* SPI multiblock writes terminate using a special 1616 * token, not a STOP_TRANSMISSION request. 1617 */ 1618 if (!mmc_host_is_spi(card->host) || 1619 rq_data_dir(req) == READ) 1620 brq->mrq.stop = &brq->stop; 1621 readcmd = MMC_READ_MULTIPLE_BLOCK; 1622 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 1623 } else { 1624 brq->mrq.stop = NULL; 1625 readcmd = MMC_READ_SINGLE_BLOCK; 1626 writecmd = MMC_WRITE_BLOCK; 1627 } 1628 brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd; 1629 1630 /* 1631 * Pre-defined multi-block transfers are preferable to 1632 * open ended-ones (and necessary for reliable writes). 1633 * However, it is not sufficient to just send CMD23, 1634 * and avoid the final CMD12, as on an error condition 1635 * CMD12 (stop) needs to be sent anyway. This, coupled 1636 * with Auto-CMD23 enhancements provided by some 1637 * hosts, means that the complexity of dealing 1638 * with this is best left to the host. If CMD23 is 1639 * supported by card and host, we'll fill sbc in and let 1640 * the host deal with handling it correctly. This means 1641 * that for hosts that don't expose MMC_CAP_CMD23, no 1642 * change of behavior will be observed. 1643 * 1644 * N.B: Some MMC cards experience perf degradation. 1645 * We'll avoid using CMD23-bounded multiblock writes for 1646 * these, while retaining features like reliable writes. 1647 */ 1648 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && 1649 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || 1650 do_data_tag)) { 1651 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; 1652 brq->sbc.arg = brq->data.blocks | 1653 (do_rel_wr ? (1 << 31) : 0) | 1654 (do_data_tag ? (1 << 29) : 0); 1655 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 1656 brq->mrq.sbc = &brq->sbc; 1657 } 1658 1659 mqrq->areq.err_check = mmc_blk_err_check; 1660 } 1661 1662 static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, 1663 struct mmc_blk_request *brq, struct request *req, 1664 bool old_req_pending) 1665 { 1666 bool req_pending; 1667 1668 /* 1669 * If this is an SD card and we're writing, we can first 1670 * mark the known good sectors as ok. 1671 * 1672 * If the card is not SD, we can still ok written sectors 1673 * as reported by the controller (which might be less than 1674 * the real number of written sectors, but never more). 1675 */ 1676 if (mmc_card_sd(card)) { 1677 u32 blocks; 1678 int err; 1679 1680 err = mmc_sd_num_wr_blocks(card, &blocks); 1681 if (err) 1682 req_pending = old_req_pending; 1683 else 1684 req_pending = blk_end_request(req, 0, blocks << 9); 1685 } else { 1686 req_pending = blk_end_request(req, 0, brq->data.bytes_xfered); 1687 } 1688 return req_pending; 1689 } 1690 1691 static void mmc_blk_rw_cmd_abort(struct mmc_queue *mq, struct mmc_card *card, 1692 struct request *req, 1693 struct mmc_queue_req *mqrq) 1694 { 1695 if (mmc_card_removed(card)) 1696 req->rq_flags |= RQF_QUIET; 1697 while (blk_end_request(req, BLK_STS_IOERR, blk_rq_cur_bytes(req))); 1698 mq->qcnt--; 1699 } 1700 1701 /** 1702 * mmc_blk_rw_try_restart() - tries to restart the current async request 1703 * @mq: the queue with the card and host to restart 1704 * @req: a new request that want to be started after the current one 1705 */ 1706 static void mmc_blk_rw_try_restart(struct mmc_queue *mq, struct request *req, 1707 struct mmc_queue_req *mqrq) 1708 { 1709 if (!req) 1710 return; 1711 1712 /* 1713 * If the card was removed, just cancel everything and return. 1714 */ 1715 if (mmc_card_removed(mq->card)) { 1716 req->rq_flags |= RQF_QUIET; 1717 blk_end_request_all(req, BLK_STS_IOERR); 1718 mq->qcnt--; /* FIXME: just set to 0? */ 1719 return; 1720 } 1721 /* Else proceed and try to restart the current async request */ 1722 mmc_blk_rw_rq_prep(mqrq, mq->card, 0, mq); 1723 mmc_start_areq(mq->card->host, &mqrq->areq, NULL); 1724 } 1725 1726 static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) 1727 { 1728 struct mmc_blk_data *md = mq->blkdata; 1729 struct mmc_card *card = md->queue.card; 1730 struct mmc_blk_request *brq; 1731 int disable_multi = 0, retry = 0, type, retune_retry_done = 0; 1732 enum mmc_blk_status status; 1733 struct mmc_queue_req *mqrq_cur = NULL; 1734 struct mmc_queue_req *mq_rq; 1735 struct request *old_req; 1736 struct mmc_async_req *new_areq; 1737 struct mmc_async_req *old_areq; 1738 bool req_pending = true; 1739 1740 if (new_req) { 1741 mqrq_cur = req_to_mmc_queue_req(new_req); 1742 mq->qcnt++; 1743 } 1744 1745 if (!mq->qcnt) 1746 return; 1747 1748 do { 1749 if (new_req) { 1750 /* 1751 * When 4KB native sector is enabled, only 8 blocks 1752 * multiple read or write is allowed 1753 */ 1754 if (mmc_large_sector(card) && 1755 !IS_ALIGNED(blk_rq_sectors(new_req), 8)) { 1756 pr_err("%s: Transfer size is not 4KB sector size aligned\n", 1757 new_req->rq_disk->disk_name); 1758 mmc_blk_rw_cmd_abort(mq, card, new_req, mqrq_cur); 1759 return; 1760 } 1761 1762 mmc_blk_rw_rq_prep(mqrq_cur, card, 0, mq); 1763 new_areq = &mqrq_cur->areq; 1764 } else 1765 new_areq = NULL; 1766 1767 old_areq = mmc_start_areq(card->host, new_areq, &status); 1768 if (!old_areq) { 1769 /* 1770 * We have just put the first request into the pipeline 1771 * and there is nothing more to do until it is 1772 * complete. 1773 */ 1774 return; 1775 } 1776 1777 /* 1778 * An asynchronous request has been completed and we proceed 1779 * to handle the result of it. 1780 */ 1781 mq_rq = container_of(old_areq, struct mmc_queue_req, areq); 1782 brq = &mq_rq->brq; 1783 old_req = mmc_queue_req_to_req(mq_rq); 1784 type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1785 mmc_queue_bounce_post(mq_rq); 1786 1787 switch (status) { 1788 case MMC_BLK_SUCCESS: 1789 case MMC_BLK_PARTIAL: 1790 /* 1791 * A block was successfully transferred. 1792 */ 1793 mmc_blk_reset_success(md, type); 1794 1795 req_pending = blk_end_request(old_req, BLK_STS_OK, 1796 brq->data.bytes_xfered); 1797 /* 1798 * If the blk_end_request function returns non-zero even 1799 * though all data has been transferred and no errors 1800 * were returned by the host controller, it's a bug. 1801 */ 1802 if (status == MMC_BLK_SUCCESS && req_pending) { 1803 pr_err("%s BUG rq_tot %d d_xfer %d\n", 1804 __func__, blk_rq_bytes(old_req), 1805 brq->data.bytes_xfered); 1806 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); 1807 return; 1808 } 1809 break; 1810 case MMC_BLK_CMD_ERR: 1811 req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); 1812 if (mmc_blk_reset(md, card->host, type)) { 1813 if (req_pending) 1814 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); 1815 else 1816 mq->qcnt--; 1817 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); 1818 return; 1819 } 1820 if (!req_pending) { 1821 mq->qcnt--; 1822 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); 1823 return; 1824 } 1825 break; 1826 case MMC_BLK_RETRY: 1827 retune_retry_done = brq->retune_retry_done; 1828 if (retry++ < 5) 1829 break; 1830 /* Fall through */ 1831 case MMC_BLK_ABORT: 1832 if (!mmc_blk_reset(md, card->host, type)) 1833 break; 1834 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); 1835 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); 1836 return; 1837 case MMC_BLK_DATA_ERR: { 1838 int err; 1839 1840 err = mmc_blk_reset(md, card->host, type); 1841 if (!err) 1842 break; 1843 if (err == -ENODEV) { 1844 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); 1845 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); 1846 return; 1847 } 1848 /* Fall through */ 1849 } 1850 case MMC_BLK_ECC_ERR: 1851 if (brq->data.blocks > 1) { 1852 /* Redo read one sector at a time */ 1853 pr_warn("%s: retrying using single block read\n", 1854 old_req->rq_disk->disk_name); 1855 disable_multi = 1; 1856 break; 1857 } 1858 /* 1859 * After an error, we redo I/O one sector at a 1860 * time, so we only reach here after trying to 1861 * read a single sector. 1862 */ 1863 req_pending = blk_end_request(old_req, BLK_STS_IOERR, 1864 brq->data.blksz); 1865 if (!req_pending) { 1866 mq->qcnt--; 1867 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); 1868 return; 1869 } 1870 break; 1871 case MMC_BLK_NOMEDIUM: 1872 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); 1873 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); 1874 return; 1875 default: 1876 pr_err("%s: Unhandled return value (%d)", 1877 old_req->rq_disk->disk_name, status); 1878 mmc_blk_rw_cmd_abort(mq, card, old_req, mq_rq); 1879 mmc_blk_rw_try_restart(mq, new_req, mqrq_cur); 1880 return; 1881 } 1882 1883 if (req_pending) { 1884 /* 1885 * In case of a incomplete request 1886 * prepare it again and resend. 1887 */ 1888 mmc_blk_rw_rq_prep(mq_rq, card, 1889 disable_multi, mq); 1890 mmc_start_areq(card->host, 1891 &mq_rq->areq, NULL); 1892 mq_rq->brq.retune_retry_done = retune_retry_done; 1893 } 1894 } while (req_pending); 1895 1896 mq->qcnt--; 1897 } 1898 1899 void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 1900 { 1901 int ret; 1902 struct mmc_blk_data *md = mq->blkdata; 1903 struct mmc_card *card = md->queue.card; 1904 1905 if (req && !mq->qcnt) 1906 /* claim host only for the first request */ 1907 mmc_get_card(card); 1908 1909 ret = mmc_blk_part_switch(card, md); 1910 if (ret) { 1911 if (req) { 1912 blk_end_request_all(req, BLK_STS_IOERR); 1913 } 1914 goto out; 1915 } 1916 1917 if (req) { 1918 switch (req_op(req)) { 1919 case REQ_OP_DRV_IN: 1920 case REQ_OP_DRV_OUT: 1921 /* 1922 * Complete ongoing async transfer before issuing 1923 * ioctl()s 1924 */ 1925 if (mq->qcnt) 1926 mmc_blk_issue_rw_rq(mq, NULL); 1927 mmc_blk_issue_drv_op(mq, req); 1928 break; 1929 case REQ_OP_DISCARD: 1930 /* 1931 * Complete ongoing async transfer before issuing 1932 * discard. 1933 */ 1934 if (mq->qcnt) 1935 mmc_blk_issue_rw_rq(mq, NULL); 1936 mmc_blk_issue_discard_rq(mq, req); 1937 break; 1938 case REQ_OP_SECURE_ERASE: 1939 /* 1940 * Complete ongoing async transfer before issuing 1941 * secure erase. 1942 */ 1943 if (mq->qcnt) 1944 mmc_blk_issue_rw_rq(mq, NULL); 1945 mmc_blk_issue_secdiscard_rq(mq, req); 1946 break; 1947 case REQ_OP_FLUSH: 1948 /* 1949 * Complete ongoing async transfer before issuing 1950 * flush. 1951 */ 1952 if (mq->qcnt) 1953 mmc_blk_issue_rw_rq(mq, NULL); 1954 mmc_blk_issue_flush(mq, req); 1955 break; 1956 default: 1957 /* Normal request, just issue it */ 1958 mmc_blk_issue_rw_rq(mq, req); 1959 card->host->context_info.is_waiting_last_req = false; 1960 break; 1961 } 1962 } else { 1963 /* No request, flushing the pipeline with NULL */ 1964 mmc_blk_issue_rw_rq(mq, NULL); 1965 card->host->context_info.is_waiting_last_req = false; 1966 } 1967 1968 out: 1969 if (!mq->qcnt) 1970 mmc_put_card(card); 1971 } 1972 1973 static inline int mmc_blk_readonly(struct mmc_card *card) 1974 { 1975 return mmc_card_readonly(card) || 1976 !(card->csd.cmdclass & CCC_BLOCK_WRITE); 1977 } 1978 1979 static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, 1980 struct device *parent, 1981 sector_t size, 1982 bool default_ro, 1983 const char *subname, 1984 int area_type) 1985 { 1986 struct mmc_blk_data *md; 1987 int devidx, ret; 1988 1989 devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL); 1990 if (devidx < 0) 1991 return ERR_PTR(devidx); 1992 1993 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); 1994 if (!md) { 1995 ret = -ENOMEM; 1996 goto out; 1997 } 1998 1999 md->area_type = area_type; 2000 2001 /* 2002 * Set the read-only status based on the supported commands 2003 * and the write protect switch. 2004 */ 2005 md->read_only = mmc_blk_readonly(card); 2006 2007 md->disk = alloc_disk(perdev_minors); 2008 if (md->disk == NULL) { 2009 ret = -ENOMEM; 2010 goto err_kfree; 2011 } 2012 2013 spin_lock_init(&md->lock); 2014 INIT_LIST_HEAD(&md->part); 2015 md->usage = 1; 2016 2017 ret = mmc_init_queue(&md->queue, card, &md->lock, subname); 2018 if (ret) 2019 goto err_putdisk; 2020 2021 md->queue.blkdata = md; 2022 2023 md->disk->major = MMC_BLOCK_MAJOR; 2024 md->disk->first_minor = devidx * perdev_minors; 2025 md->disk->fops = &mmc_bdops; 2026 md->disk->private_data = md; 2027 md->disk->queue = md->queue.queue; 2028 md->parent = parent; 2029 set_disk_ro(md->disk, md->read_only || default_ro); 2030 md->disk->flags = GENHD_FL_EXT_DEVT; 2031 if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT)) 2032 md->disk->flags |= GENHD_FL_NO_PART_SCAN; 2033 2034 /* 2035 * As discussed on lkml, GENHD_FL_REMOVABLE should: 2036 * 2037 * - be set for removable media with permanent block devices 2038 * - be unset for removable block devices with permanent media 2039 * 2040 * Since MMC block devices clearly fall under the second 2041 * case, we do not set GENHD_FL_REMOVABLE. Userspace 2042 * should use the block device creation/destruction hotplug 2043 * messages to tell when the card is present. 2044 */ 2045 2046 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 2047 "mmcblk%u%s", card->host->index, subname ? subname : ""); 2048 2049 if (mmc_card_mmc(card)) 2050 blk_queue_logical_block_size(md->queue.queue, 2051 card->ext_csd.data_sector_size); 2052 else 2053 blk_queue_logical_block_size(md->queue.queue, 512); 2054 2055 set_capacity(md->disk, size); 2056 2057 if (mmc_host_cmd23(card->host)) { 2058 if ((mmc_card_mmc(card) && 2059 card->csd.mmca_vsn >= CSD_SPEC_VER_3) || 2060 (mmc_card_sd(card) && 2061 card->scr.cmds & SD_SCR_CMD23_SUPPORT)) 2062 md->flags |= MMC_BLK_CMD23; 2063 } 2064 2065 if (mmc_card_mmc(card) && 2066 md->flags & MMC_BLK_CMD23 && 2067 ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || 2068 card->ext_csd.rel_sectors)) { 2069 md->flags |= MMC_BLK_REL_WR; 2070 blk_queue_write_cache(md->queue.queue, true, true); 2071 } 2072 2073 return md; 2074 2075 err_putdisk: 2076 put_disk(md->disk); 2077 err_kfree: 2078 kfree(md); 2079 out: 2080 ida_simple_remove(&mmc_blk_ida, devidx); 2081 return ERR_PTR(ret); 2082 } 2083 2084 static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) 2085 { 2086 sector_t size; 2087 2088 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 2089 /* 2090 * The EXT_CSD sector count is in number or 512 byte 2091 * sectors. 2092 */ 2093 size = card->ext_csd.sectors; 2094 } else { 2095 /* 2096 * The CSD capacity field is in units of read_blkbits. 2097 * set_capacity takes units of 512 bytes. 2098 */ 2099 size = (typeof(sector_t))card->csd.capacity 2100 << (card->csd.read_blkbits - 9); 2101 } 2102 2103 return mmc_blk_alloc_req(card, &card->dev, size, false, NULL, 2104 MMC_BLK_DATA_AREA_MAIN); 2105 } 2106 2107 static int mmc_blk_alloc_part(struct mmc_card *card, 2108 struct mmc_blk_data *md, 2109 unsigned int part_type, 2110 sector_t size, 2111 bool default_ro, 2112 const char *subname, 2113 int area_type) 2114 { 2115 char cap_str[10]; 2116 struct mmc_blk_data *part_md; 2117 2118 part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, 2119 subname, area_type); 2120 if (IS_ERR(part_md)) 2121 return PTR_ERR(part_md); 2122 part_md->part_type = part_type; 2123 list_add(&part_md->part, &md->part); 2124 2125 string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2, 2126 cap_str, sizeof(cap_str)); 2127 pr_info("%s: %s %s partition %u %s\n", 2128 part_md->disk->disk_name, mmc_card_id(card), 2129 mmc_card_name(card), part_md->part_type, cap_str); 2130 return 0; 2131 } 2132 2133 /* MMC Physical partitions consist of two boot partitions and 2134 * up to four general purpose partitions. 2135 * For each partition enabled in EXT_CSD a block device will be allocatedi 2136 * to provide access to the partition. 2137 */ 2138 2139 static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) 2140 { 2141 int idx, ret = 0; 2142 2143 if (!mmc_card_mmc(card)) 2144 return 0; 2145 2146 for (idx = 0; idx < card->nr_parts; idx++) { 2147 if (card->part[idx].size) { 2148 ret = mmc_blk_alloc_part(card, md, 2149 card->part[idx].part_cfg, 2150 card->part[idx].size >> 9, 2151 card->part[idx].force_ro, 2152 card->part[idx].name, 2153 card->part[idx].area_type); 2154 if (ret) 2155 return ret; 2156 } 2157 } 2158 2159 return ret; 2160 } 2161 2162 static void mmc_blk_remove_req(struct mmc_blk_data *md) 2163 { 2164 struct mmc_card *card; 2165 2166 if (md) { 2167 /* 2168 * Flush remaining requests and free queues. It 2169 * is freeing the queue that stops new requests 2170 * from being accepted. 2171 */ 2172 card = md->queue.card; 2173 blk_set_queue_dying(md->queue.queue); 2174 mmc_cleanup_queue(&md->queue); 2175 if (md->disk->flags & GENHD_FL_UP) { 2176 device_remove_file(disk_to_dev(md->disk), &md->force_ro); 2177 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && 2178 card->ext_csd.boot_ro_lockable) 2179 device_remove_file(disk_to_dev(md->disk), 2180 &md->power_ro_lock); 2181 2182 del_gendisk(md->disk); 2183 } 2184 mmc_blk_put(md); 2185 } 2186 } 2187 2188 static void mmc_blk_remove_parts(struct mmc_card *card, 2189 struct mmc_blk_data *md) 2190 { 2191 struct list_head *pos, *q; 2192 struct mmc_blk_data *part_md; 2193 2194 list_for_each_safe(pos, q, &md->part) { 2195 part_md = list_entry(pos, struct mmc_blk_data, part); 2196 list_del(pos); 2197 mmc_blk_remove_req(part_md); 2198 } 2199 } 2200 2201 static int mmc_add_disk(struct mmc_blk_data *md) 2202 { 2203 int ret; 2204 struct mmc_card *card = md->queue.card; 2205 2206 device_add_disk(md->parent, md->disk); 2207 md->force_ro.show = force_ro_show; 2208 md->force_ro.store = force_ro_store; 2209 sysfs_attr_init(&md->force_ro.attr); 2210 md->force_ro.attr.name = "force_ro"; 2211 md->force_ro.attr.mode = S_IRUGO | S_IWUSR; 2212 ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); 2213 if (ret) 2214 goto force_ro_fail; 2215 2216 if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && 2217 card->ext_csd.boot_ro_lockable) { 2218 umode_t mode; 2219 2220 if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) 2221 mode = S_IRUGO; 2222 else 2223 mode = S_IRUGO | S_IWUSR; 2224 2225 md->power_ro_lock.show = power_ro_lock_show; 2226 md->power_ro_lock.store = power_ro_lock_store; 2227 sysfs_attr_init(&md->power_ro_lock.attr); 2228 md->power_ro_lock.attr.mode = mode; 2229 md->power_ro_lock.attr.name = 2230 "ro_lock_until_next_power_on"; 2231 ret = device_create_file(disk_to_dev(md->disk), 2232 &md->power_ro_lock); 2233 if (ret) 2234 goto power_ro_lock_fail; 2235 } 2236 return ret; 2237 2238 power_ro_lock_fail: 2239 device_remove_file(disk_to_dev(md->disk), &md->force_ro); 2240 force_ro_fail: 2241 del_gendisk(md->disk); 2242 2243 return ret; 2244 } 2245 2246 static int mmc_blk_probe(struct mmc_card *card) 2247 { 2248 struct mmc_blk_data *md, *part_md; 2249 char cap_str[10]; 2250 2251 /* 2252 * Check that the card supports the command class(es) we need. 2253 */ 2254 if (!(card->csd.cmdclass & CCC_BLOCK_READ)) 2255 return -ENODEV; 2256 2257 mmc_fixup_device(card, mmc_blk_fixups); 2258 2259 md = mmc_blk_alloc(card); 2260 if (IS_ERR(md)) 2261 return PTR_ERR(md); 2262 2263 string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2, 2264 cap_str, sizeof(cap_str)); 2265 pr_info("%s: %s %s %s %s\n", 2266 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), 2267 cap_str, md->read_only ? "(ro)" : ""); 2268 2269 if (mmc_blk_alloc_parts(card, md)) 2270 goto out; 2271 2272 dev_set_drvdata(&card->dev, md); 2273 2274 if (mmc_add_disk(md)) 2275 goto out; 2276 2277 list_for_each_entry(part_md, &md->part, part) { 2278 if (mmc_add_disk(part_md)) 2279 goto out; 2280 } 2281 2282 pm_runtime_set_autosuspend_delay(&card->dev, 3000); 2283 pm_runtime_use_autosuspend(&card->dev); 2284 2285 /* 2286 * Don't enable runtime PM for SD-combo cards here. Leave that 2287 * decision to be taken during the SDIO init sequence instead. 2288 */ 2289 if (card->type != MMC_TYPE_SD_COMBO) { 2290 pm_runtime_set_active(&card->dev); 2291 pm_runtime_enable(&card->dev); 2292 } 2293 2294 return 0; 2295 2296 out: 2297 mmc_blk_remove_parts(card, md); 2298 mmc_blk_remove_req(md); 2299 return 0; 2300 } 2301 2302 static void mmc_blk_remove(struct mmc_card *card) 2303 { 2304 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 2305 2306 mmc_blk_remove_parts(card, md); 2307 pm_runtime_get_sync(&card->dev); 2308 mmc_claim_host(card->host); 2309 mmc_blk_part_switch(card, md); 2310 mmc_release_host(card->host); 2311 if (card->type != MMC_TYPE_SD_COMBO) 2312 pm_runtime_disable(&card->dev); 2313 pm_runtime_put_noidle(&card->dev); 2314 mmc_blk_remove_req(md); 2315 dev_set_drvdata(&card->dev, NULL); 2316 } 2317 2318 static int _mmc_blk_suspend(struct mmc_card *card) 2319 { 2320 struct mmc_blk_data *part_md; 2321 struct mmc_blk_data *md = dev_get_drvdata(&card->dev); 2322 2323 if (md) { 2324 mmc_queue_suspend(&md->queue); 2325 list_for_each_entry(part_md, &md->part, part) { 2326 mmc_queue_suspend(&part_md->queue); 2327 } 2328 } 2329 return 0; 2330 } 2331 2332 static void mmc_blk_shutdown(struct mmc_card *card) 2333 { 2334 _mmc_blk_suspend(card); 2335 } 2336 2337 #ifdef CONFIG_PM_SLEEP 2338 static int mmc_blk_suspend(struct device *dev) 2339 { 2340 struct mmc_card *card = mmc_dev_to_card(dev); 2341 2342 return _mmc_blk_suspend(card); 2343 } 2344 2345 static int mmc_blk_resume(struct device *dev) 2346 { 2347 struct mmc_blk_data *part_md; 2348 struct mmc_blk_data *md = dev_get_drvdata(dev); 2349 2350 if (md) { 2351 /* 2352 * Resume involves the card going into idle state, 2353 * so current partition is always the main one. 2354 */ 2355 md->part_curr = md->part_type; 2356 mmc_queue_resume(&md->queue); 2357 list_for_each_entry(part_md, &md->part, part) { 2358 mmc_queue_resume(&part_md->queue); 2359 } 2360 } 2361 return 0; 2362 } 2363 #endif 2364 2365 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume); 2366 2367 static struct mmc_driver mmc_driver = { 2368 .drv = { 2369 .name = "mmcblk", 2370 .pm = &mmc_blk_pm_ops, 2371 }, 2372 .probe = mmc_blk_probe, 2373 .remove = mmc_blk_remove, 2374 .shutdown = mmc_blk_shutdown, 2375 }; 2376 2377 static int __init mmc_blk_init(void) 2378 { 2379 int res; 2380 2381 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) 2382 pr_info("mmcblk: using %d minors per device\n", perdev_minors); 2383 2384 max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors); 2385 2386 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); 2387 if (res) 2388 goto out; 2389 2390 res = mmc_register_driver(&mmc_driver); 2391 if (res) 2392 goto out2; 2393 2394 return 0; 2395 out2: 2396 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 2397 out: 2398 return res; 2399 } 2400 2401 static void __exit mmc_blk_exit(void) 2402 { 2403 mmc_unregister_driver(&mmc_driver); 2404 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); 2405 } 2406 2407 module_init(mmc_blk_init); 2408 module_exit(mmc_blk_exit); 2409 2410 MODULE_LICENSE("GPL"); 2411 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); 2412 2413