1 /******************************************************************************* 2 * Filename: target_core_iblock.c 3 * 4 * This file contains the Storage Engine <-> Linux BlockIO transport 5 * specific functions. 6 * 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 8 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 9 * Copyright (c) 2007-2010 Rising Tide Systems 10 * Copyright (c) 2008-2010 Linux-iSCSI.org 11 * 12 * Nicholas A. Bellinger <nab@kernel.org> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 27 * 28 ******************************************************************************/ 29 30 #include <linux/string.h> 31 #include <linux/parser.h> 32 #include <linux/timer.h> 33 #include <linux/fs.h> 34 #include <linux/blkdev.h> 35 #include <linux/slab.h> 36 #include <linux/spinlock.h> 37 #include <linux/bio.h> 38 #include <linux/genhd.h> 39 #include <linux/file.h> 40 #include <linux/module.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_host.h> 43 #include <asm/unaligned.h> 44 45 #include <target/target_core_base.h> 46 #include <target/target_core_backend.h> 47 48 #include "target_core_iblock.h" 49 50 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ 51 #define IBLOCK_BIO_POOL_SIZE 128 52 53 static struct se_subsystem_api iblock_template; 54 55 static void iblock_bio_done(struct bio *, int); 56 57 /* iblock_attach_hba(): (Part of se_subsystem_api_t template) 58 * 59 * 60 */ 61 static int iblock_attach_hba(struct se_hba *hba, u32 host_id) 62 { 63 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 64 " Generic Target Core Stack %s\n", hba->hba_id, 65 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); 66 return 0; 67 } 68 69 static void iblock_detach_hba(struct se_hba *hba) 70 { 71 } 72 73 static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) 74 { 75 struct iblock_dev *ib_dev = NULL; 76 77 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 78 if (!ib_dev) { 79 pr_err("Unable to allocate struct iblock_dev\n"); 80 return NULL; 81 } 82 83 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); 84 85 return ib_dev; 86 } 87 88 static struct se_device *iblock_create_virtdevice( 89 struct se_hba *hba, 90 struct se_subsystem_dev *se_dev, 91 void *p) 92 { 93 struct iblock_dev *ib_dev = p; 94 struct se_device *dev; 95 struct se_dev_limits dev_limits; 96 struct block_device *bd = NULL; 97 struct request_queue *q; 98 struct queue_limits *limits; 99 u32 dev_flags = 0; 100 fmode_t mode; 101 int ret = -EINVAL; 102 103 if (!ib_dev) { 104 pr_err("Unable to locate struct iblock_dev parameter\n"); 105 return ERR_PTR(ret); 106 } 107 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 108 109 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); 110 if (!ib_dev->ibd_bio_set) { 111 pr_err("IBLOCK: Unable to create bioset()\n"); 112 return ERR_PTR(-ENOMEM); 113 } 114 pr_debug("IBLOCK: Created bio_set()\n"); 115 /* 116 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path 117 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. 118 */ 119 pr_debug( "IBLOCK: Claiming struct block_device: %s\n", 120 ib_dev->ibd_udev_path); 121 122 mode = FMODE_READ|FMODE_EXCL; 123 if (!ib_dev->ibd_readonly) 124 mode |= FMODE_WRITE; 125 126 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); 127 if (IS_ERR(bd)) { 128 ret = PTR_ERR(bd); 129 goto failed; 130 } 131 /* 132 * Setup the local scope queue_limits from struct request_queue->limits 133 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 134 */ 135 q = bdev_get_queue(bd); 136 limits = &dev_limits.limits; 137 limits->logical_block_size = bdev_logical_block_size(bd); 138 limits->max_hw_sectors = UINT_MAX; 139 limits->max_sectors = UINT_MAX; 140 dev_limits.hw_queue_depth = q->nr_requests; 141 dev_limits.queue_depth = q->nr_requests; 142 143 ib_dev->ibd_bd = bd; 144 145 dev = transport_add_device_to_core_hba(hba, 146 &iblock_template, se_dev, dev_flags, ib_dev, 147 &dev_limits, "IBLOCK", IBLOCK_VERSION); 148 if (!dev) 149 goto failed; 150 151 /* 152 * Check if the underlying struct block_device request_queue supports 153 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 154 * in ATA and we need to set TPE=1 155 */ 156 if (blk_queue_discard(q)) { 157 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = 158 q->limits.max_discard_sectors; 159 /* 160 * Currently hardcoded to 1 in Linux/SCSI code.. 161 */ 162 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; 163 dev->se_sub_dev->se_dev_attrib.unmap_granularity = 164 q->limits.discard_granularity >> 9; 165 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 166 q->limits.discard_alignment; 167 168 pr_debug("IBLOCK: BLOCK Discard support available," 169 " disabled by default\n"); 170 } 171 172 if (blk_queue_nonrot(q)) 173 dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; 174 175 return dev; 176 177 failed: 178 if (ib_dev->ibd_bio_set) { 179 bioset_free(ib_dev->ibd_bio_set); 180 ib_dev->ibd_bio_set = NULL; 181 } 182 ib_dev->ibd_bd = NULL; 183 return ERR_PTR(ret); 184 } 185 186 static void iblock_free_device(void *p) 187 { 188 struct iblock_dev *ib_dev = p; 189 190 if (ib_dev->ibd_bd != NULL) 191 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 192 if (ib_dev->ibd_bio_set != NULL) 193 bioset_free(ib_dev->ibd_bio_set); 194 kfree(ib_dev); 195 } 196 197 static unsigned long long iblock_emulate_read_cap_with_block_size( 198 struct se_device *dev, 199 struct block_device *bd, 200 struct request_queue *q) 201 { 202 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), 203 bdev_logical_block_size(bd)) - 1); 204 u32 block_size = bdev_logical_block_size(bd); 205 206 if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) 207 return blocks_long; 208 209 switch (block_size) { 210 case 4096: 211 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 212 case 2048: 213 blocks_long <<= 1; 214 break; 215 case 1024: 216 blocks_long <<= 2; 217 break; 218 case 512: 219 blocks_long <<= 3; 220 default: 221 break; 222 } 223 break; 224 case 2048: 225 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 226 case 4096: 227 blocks_long >>= 1; 228 break; 229 case 1024: 230 blocks_long <<= 1; 231 break; 232 case 512: 233 blocks_long <<= 2; 234 break; 235 default: 236 break; 237 } 238 break; 239 case 1024: 240 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 241 case 4096: 242 blocks_long >>= 2; 243 break; 244 case 2048: 245 blocks_long >>= 1; 246 break; 247 case 512: 248 blocks_long <<= 1; 249 break; 250 default: 251 break; 252 } 253 break; 254 case 512: 255 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 256 case 4096: 257 blocks_long >>= 3; 258 break; 259 case 2048: 260 blocks_long >>= 2; 261 break; 262 case 1024: 263 blocks_long >>= 1; 264 break; 265 default: 266 break; 267 } 268 break; 269 default: 270 break; 271 } 272 273 return blocks_long; 274 } 275 276 static void iblock_end_io_flush(struct bio *bio, int err) 277 { 278 struct se_cmd *cmd = bio->bi_private; 279 280 if (err) 281 pr_err("IBLOCK: cache flush failed: %d\n", err); 282 283 if (cmd) { 284 if (err) { 285 cmd->scsi_sense_reason = 286 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 287 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 288 } else { 289 target_complete_cmd(cmd, SAM_STAT_GOOD); 290 } 291 } 292 293 bio_put(bio); 294 } 295 296 /* 297 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must 298 * always flush the whole cache. 299 */ 300 static int iblock_execute_sync_cache(struct se_cmd *cmd) 301 { 302 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 303 int immed = (cmd->t_task_cdb[1] & 0x2); 304 struct bio *bio; 305 306 /* 307 * If the Immediate bit is set, queue up the GOOD response 308 * for this SYNCHRONIZE_CACHE op. 309 */ 310 if (immed) 311 target_complete_cmd(cmd, SAM_STAT_GOOD); 312 313 bio = bio_alloc(GFP_KERNEL, 0); 314 bio->bi_end_io = iblock_end_io_flush; 315 bio->bi_bdev = ib_dev->ibd_bd; 316 if (!immed) 317 bio->bi_private = cmd; 318 submit_bio(WRITE_FLUSH, bio); 319 return 0; 320 } 321 322 static int iblock_execute_unmap(struct se_cmd *cmd) 323 { 324 struct se_device *dev = cmd->se_dev; 325 struct iblock_dev *ibd = dev->dev_ptr; 326 unsigned char *buf, *ptr = NULL; 327 sector_t lba; 328 int size = cmd->data_length; 329 u32 range; 330 int ret = 0; 331 int dl, bd_dl; 332 333 buf = transport_kmap_data_sg(cmd); 334 335 dl = get_unaligned_be16(&buf[0]); 336 bd_dl = get_unaligned_be16(&buf[2]); 337 338 size = min(size - 8, bd_dl); 339 if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 340 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 341 ret = -EINVAL; 342 goto err; 343 } 344 345 /* First UNMAP block descriptor starts at 8 byte offset */ 346 ptr = &buf[8]; 347 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 348 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 349 350 while (size >= 16) { 351 lba = get_unaligned_be64(&ptr[0]); 352 range = get_unaligned_be32(&ptr[8]); 353 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 354 (unsigned long long)lba, range); 355 356 if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) { 357 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 358 ret = -EINVAL; 359 goto err; 360 } 361 362 if (lba + range > dev->transport->get_blocks(dev) + 1) { 363 cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE; 364 ret = -EINVAL; 365 goto err; 366 } 367 368 ret = blkdev_issue_discard(ibd->ibd_bd, lba, range, 369 GFP_KERNEL, 0); 370 if (ret < 0) { 371 pr_err("blkdev_issue_discard() failed: %d\n", 372 ret); 373 goto err; 374 } 375 376 ptr += 16; 377 size -= 16; 378 } 379 380 err: 381 transport_kunmap_data_sg(cmd); 382 if (!ret) 383 target_complete_cmd(cmd, GOOD); 384 return ret; 385 } 386 387 static int iblock_execute_write_same(struct se_cmd *cmd) 388 { 389 struct iblock_dev *ibd = cmd->se_dev->dev_ptr; 390 int ret; 391 392 ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba, 393 spc_get_write_same_sectors(cmd), GFP_KERNEL, 394 0); 395 if (ret < 0) { 396 pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); 397 return ret; 398 } 399 400 target_complete_cmd(cmd, GOOD); 401 return 0; 402 } 403 404 enum { 405 Opt_udev_path, Opt_readonly, Opt_force, Opt_err 406 }; 407 408 static match_table_t tokens = { 409 {Opt_udev_path, "udev_path=%s"}, 410 {Opt_readonly, "readonly=%d"}, 411 {Opt_force, "force=%d"}, 412 {Opt_err, NULL} 413 }; 414 415 static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, 416 struct se_subsystem_dev *se_dev, 417 const char *page, ssize_t count) 418 { 419 struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; 420 char *orig, *ptr, *arg_p, *opts; 421 substring_t args[MAX_OPT_ARGS]; 422 int ret = 0, token; 423 unsigned long tmp_readonly; 424 425 opts = kstrdup(page, GFP_KERNEL); 426 if (!opts) 427 return -ENOMEM; 428 429 orig = opts; 430 431 while ((ptr = strsep(&opts, ",\n")) != NULL) { 432 if (!*ptr) 433 continue; 434 435 token = match_token(ptr, tokens, args); 436 switch (token) { 437 case Opt_udev_path: 438 if (ib_dev->ibd_bd) { 439 pr_err("Unable to set udev_path= while" 440 " ib_dev->ibd_bd exists\n"); 441 ret = -EEXIST; 442 goto out; 443 } 444 arg_p = match_strdup(&args[0]); 445 if (!arg_p) { 446 ret = -ENOMEM; 447 break; 448 } 449 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, 450 "%s", arg_p); 451 kfree(arg_p); 452 pr_debug("IBLOCK: Referencing UDEV path: %s\n", 453 ib_dev->ibd_udev_path); 454 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 455 break; 456 case Opt_readonly: 457 arg_p = match_strdup(&args[0]); 458 if (!arg_p) { 459 ret = -ENOMEM; 460 break; 461 } 462 ret = strict_strtoul(arg_p, 0, &tmp_readonly); 463 kfree(arg_p); 464 if (ret < 0) { 465 pr_err("strict_strtoul() failed for" 466 " readonly=\n"); 467 goto out; 468 } 469 ib_dev->ibd_readonly = tmp_readonly; 470 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); 471 break; 472 case Opt_force: 473 break; 474 default: 475 break; 476 } 477 } 478 479 out: 480 kfree(orig); 481 return (!ret) ? count : ret; 482 } 483 484 static ssize_t iblock_check_configfs_dev_params( 485 struct se_hba *hba, 486 struct se_subsystem_dev *se_dev) 487 { 488 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 489 490 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { 491 pr_err("Missing udev_path= parameters for IBLOCK\n"); 492 return -EINVAL; 493 } 494 495 return 0; 496 } 497 498 static ssize_t iblock_show_configfs_dev_params( 499 struct se_hba *hba, 500 struct se_subsystem_dev *se_dev, 501 char *b) 502 { 503 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 504 struct block_device *bd = ibd->ibd_bd; 505 char buf[BDEVNAME_SIZE]; 506 ssize_t bl = 0; 507 508 if (bd) 509 bl += sprintf(b + bl, "iBlock device: %s", 510 bdevname(bd, buf)); 511 if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) 512 bl += sprintf(b + bl, " UDEV PATH: %s", 513 ibd->ibd_udev_path); 514 bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly); 515 516 bl += sprintf(b + bl, " "); 517 if (bd) { 518 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 519 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 520 "" : (bd->bd_holder == ibd) ? 521 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 522 } else { 523 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 524 } 525 526 return bl; 527 } 528 529 static void iblock_complete_cmd(struct se_cmd *cmd) 530 { 531 struct iblock_req *ibr = cmd->priv; 532 u8 status; 533 534 if (!atomic_dec_and_test(&ibr->pending)) 535 return; 536 537 if (atomic_read(&ibr->ib_bio_err_cnt)) 538 status = SAM_STAT_CHECK_CONDITION; 539 else 540 status = SAM_STAT_GOOD; 541 542 target_complete_cmd(cmd, status); 543 kfree(ibr); 544 } 545 546 static void iblock_bio_destructor(struct bio *bio) 547 { 548 struct se_cmd *cmd = bio->bi_private; 549 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 550 551 bio_free(bio, ib_dev->ibd_bio_set); 552 } 553 554 static struct bio * 555 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) 556 { 557 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 558 struct bio *bio; 559 560 /* 561 * Only allocate as many vector entries as the bio code allows us to, 562 * we'll loop later on until we have handled the whole request. 563 */ 564 if (sg_num > BIO_MAX_PAGES) 565 sg_num = BIO_MAX_PAGES; 566 567 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 568 if (!bio) { 569 pr_err("Unable to allocate memory for bio\n"); 570 return NULL; 571 } 572 573 bio->bi_bdev = ib_dev->ibd_bd; 574 bio->bi_private = cmd; 575 bio->bi_destructor = iblock_bio_destructor; 576 bio->bi_end_io = &iblock_bio_done; 577 bio->bi_sector = lba; 578 return bio; 579 } 580 581 static void iblock_submit_bios(struct bio_list *list, int rw) 582 { 583 struct blk_plug plug; 584 struct bio *bio; 585 586 blk_start_plug(&plug); 587 while ((bio = bio_list_pop(list))) 588 submit_bio(rw, bio); 589 blk_finish_plug(&plug); 590 } 591 592 static int iblock_execute_rw(struct se_cmd *cmd) 593 { 594 struct scatterlist *sgl = cmd->t_data_sg; 595 u32 sgl_nents = cmd->t_data_nents; 596 enum dma_data_direction data_direction = cmd->data_direction; 597 struct se_device *dev = cmd->se_dev; 598 struct iblock_req *ibr; 599 struct bio *bio; 600 struct bio_list list; 601 struct scatterlist *sg; 602 u32 sg_num = sgl_nents; 603 sector_t block_lba; 604 unsigned bio_cnt; 605 int rw; 606 int i; 607 608 if (data_direction == DMA_TO_DEVICE) { 609 /* 610 * Force data to disk if we pretend to not have a volatile 611 * write cache, or the initiator set the Force Unit Access bit. 612 */ 613 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 614 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 615 (cmd->se_cmd_flags & SCF_FUA))) 616 rw = WRITE_FUA; 617 else 618 rw = WRITE; 619 } else { 620 rw = READ; 621 } 622 623 /* 624 * Convert the blocksize advertised to the initiator to the 512 byte 625 * units unconditionally used by the Linux block layer. 626 */ 627 if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) 628 block_lba = (cmd->t_task_lba << 3); 629 else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) 630 block_lba = (cmd->t_task_lba << 2); 631 else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) 632 block_lba = (cmd->t_task_lba << 1); 633 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) 634 block_lba = cmd->t_task_lba; 635 else { 636 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 637 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 638 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 639 return -ENOSYS; 640 } 641 642 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 643 if (!ibr) 644 goto fail; 645 cmd->priv = ibr; 646 647 bio = iblock_get_bio(cmd, block_lba, sgl_nents); 648 if (!bio) 649 goto fail_free_ibr; 650 651 bio_list_init(&list); 652 bio_list_add(&list, bio); 653 654 atomic_set(&ibr->pending, 2); 655 bio_cnt = 1; 656 657 for_each_sg(sgl, sg, sgl_nents, i) { 658 /* 659 * XXX: if the length the device accepts is shorter than the 660 * length of the S/G list entry this will cause and 661 * endless loop. Better hope no driver uses huge pages. 662 */ 663 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 664 != sg->length) { 665 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { 666 iblock_submit_bios(&list, rw); 667 bio_cnt = 0; 668 } 669 670 bio = iblock_get_bio(cmd, block_lba, sg_num); 671 if (!bio) 672 goto fail_put_bios; 673 674 atomic_inc(&ibr->pending); 675 bio_list_add(&list, bio); 676 bio_cnt++; 677 } 678 679 /* Always in 512 byte units for Linux/Block */ 680 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 681 sg_num--; 682 } 683 684 iblock_submit_bios(&list, rw); 685 iblock_complete_cmd(cmd); 686 return 0; 687 688 fail_put_bios: 689 while ((bio = bio_list_pop(&list))) 690 bio_put(bio); 691 fail_free_ibr: 692 kfree(ibr); 693 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 694 fail: 695 return -ENOMEM; 696 } 697 698 static u32 iblock_get_device_rev(struct se_device *dev) 699 { 700 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ 701 } 702 703 static u32 iblock_get_device_type(struct se_device *dev) 704 { 705 return TYPE_DISK; 706 } 707 708 static sector_t iblock_get_blocks(struct se_device *dev) 709 { 710 struct iblock_dev *ibd = dev->dev_ptr; 711 struct block_device *bd = ibd->ibd_bd; 712 struct request_queue *q = bdev_get_queue(bd); 713 714 return iblock_emulate_read_cap_with_block_size(dev, bd, q); 715 } 716 717 static void iblock_bio_done(struct bio *bio, int err) 718 { 719 struct se_cmd *cmd = bio->bi_private; 720 struct iblock_req *ibr = cmd->priv; 721 722 /* 723 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 724 */ 725 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) 726 err = -EIO; 727 728 if (err != 0) { 729 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," 730 " err: %d\n", bio, err); 731 /* 732 * Bump the ib_bio_err_cnt and release bio. 733 */ 734 atomic_inc(&ibr->ib_bio_err_cnt); 735 smp_mb__after_atomic_inc(); 736 } 737 738 bio_put(bio); 739 740 iblock_complete_cmd(cmd); 741 } 742 743 static struct spc_ops iblock_spc_ops = { 744 .execute_rw = iblock_execute_rw, 745 .execute_sync_cache = iblock_execute_sync_cache, 746 .execute_write_same = iblock_execute_write_same, 747 .execute_unmap = iblock_execute_unmap, 748 }; 749 750 static int iblock_parse_cdb(struct se_cmd *cmd) 751 { 752 return sbc_parse_cdb(cmd, &iblock_spc_ops); 753 } 754 755 static struct se_subsystem_api iblock_template = { 756 .name = "iblock", 757 .owner = THIS_MODULE, 758 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 759 .write_cache_emulated = 1, 760 .fua_write_emulated = 1, 761 .attach_hba = iblock_attach_hba, 762 .detach_hba = iblock_detach_hba, 763 .allocate_virtdevice = iblock_allocate_virtdevice, 764 .create_virtdevice = iblock_create_virtdevice, 765 .free_device = iblock_free_device, 766 .parse_cdb = iblock_parse_cdb, 767 .check_configfs_dev_params = iblock_check_configfs_dev_params, 768 .set_configfs_dev_params = iblock_set_configfs_dev_params, 769 .show_configfs_dev_params = iblock_show_configfs_dev_params, 770 .get_device_rev = iblock_get_device_rev, 771 .get_device_type = iblock_get_device_type, 772 .get_blocks = iblock_get_blocks, 773 }; 774 775 static int __init iblock_module_init(void) 776 { 777 return transport_subsystem_register(&iblock_template); 778 } 779 780 static void iblock_module_exit(void) 781 { 782 transport_subsystem_release(&iblock_template); 783 } 784 785 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); 786 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 787 MODULE_LICENSE("GPL"); 788 789 module_init(iblock_module_init); 790 module_exit(iblock_module_exit); 791