1 /******************************************************************************* 2 * Filename: target_core_iblock.c 3 * 4 * This file contains the Storage Engine <-> Linux BlockIO transport 5 * specific functions. 6 * 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 8 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 9 * Copyright (c) 2007-2010 Rising Tide Systems 10 * Copyright (c) 2008-2010 Linux-iSCSI.org 11 * 12 * Nicholas A. Bellinger <nab@kernel.org> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 27 * 28 ******************************************************************************/ 29 30 #include <linux/string.h> 31 #include <linux/parser.h> 32 #include <linux/timer.h> 33 #include <linux/fs.h> 34 #include <linux/blkdev.h> 35 #include <linux/slab.h> 36 #include <linux/spinlock.h> 37 #include <linux/bio.h> 38 #include <linux/genhd.h> 39 #include <linux/file.h> 40 #include <linux/module.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_host.h> 43 #include <asm/unaligned.h> 44 45 #include <target/target_core_base.h> 46 #include <target/target_core_backend.h> 47 48 #include "target_core_iblock.h" 49 50 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ 51 #define IBLOCK_BIO_POOL_SIZE 128 52 53 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev) 54 { 55 return container_of(dev, struct iblock_dev, dev); 56 } 57 58 59 static struct se_subsystem_api iblock_template; 60 61 static void iblock_bio_done(struct bio *, int); 62 63 /* iblock_attach_hba(): (Part of se_subsystem_api_t template) 64 * 65 * 66 */ 67 static int iblock_attach_hba(struct se_hba *hba, u32 host_id) 68 { 69 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 70 " Generic Target Core Stack %s\n", hba->hba_id, 71 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); 72 return 0; 73 } 74 75 static void iblock_detach_hba(struct se_hba *hba) 76 { 77 } 78 79 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name) 80 { 81 struct iblock_dev *ib_dev = NULL; 82 83 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 84 if (!ib_dev) { 85 pr_err("Unable to allocate struct iblock_dev\n"); 86 return NULL; 87 } 88 89 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); 90 91 return &ib_dev->dev; 92 } 93 94 static int iblock_configure_device(struct se_device *dev) 95 { 96 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 97 struct request_queue *q; 98 struct block_device *bd = NULL; 99 fmode_t mode; 100 int ret = -ENOMEM; 101 102 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { 103 pr_err("Missing udev_path= parameters for IBLOCK\n"); 104 return -EINVAL; 105 } 106 107 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); 108 if (!ib_dev->ibd_bio_set) { 109 pr_err("IBLOCK: Unable to create bioset\n"); 110 goto out; 111 } 112 113 pr_debug( "IBLOCK: Claiming struct block_device: %s\n", 114 ib_dev->ibd_udev_path); 115 116 mode = FMODE_READ|FMODE_EXCL; 117 if (!ib_dev->ibd_readonly) 118 mode |= FMODE_WRITE; 119 120 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); 121 if (IS_ERR(bd)) { 122 ret = PTR_ERR(bd); 123 goto out_free_bioset; 124 } 125 ib_dev->ibd_bd = bd; 126 127 q = bdev_get_queue(bd); 128 129 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); 130 dev->dev_attrib.hw_max_sectors = UINT_MAX; 131 dev->dev_attrib.hw_queue_depth = q->nr_requests; 132 133 /* 134 * Check if the underlying struct block_device request_queue supports 135 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 136 * in ATA and we need to set TPE=1 137 */ 138 if (blk_queue_discard(q)) { 139 dev->dev_attrib.max_unmap_lba_count = 140 q->limits.max_discard_sectors; 141 142 /* 143 * Currently hardcoded to 1 in Linux/SCSI code.. 144 */ 145 dev->dev_attrib.max_unmap_block_desc_count = 1; 146 dev->dev_attrib.unmap_granularity = 147 q->limits.discard_granularity >> 9; 148 dev->dev_attrib.unmap_granularity_alignment = 149 q->limits.discard_alignment; 150 151 pr_debug("IBLOCK: BLOCK Discard support available," 152 " disabled by default\n"); 153 } 154 155 if (blk_queue_nonrot(q)) 156 dev->dev_attrib.is_nonrot = 1; 157 return 0; 158 159 out_free_bioset: 160 bioset_free(ib_dev->ibd_bio_set); 161 ib_dev->ibd_bio_set = NULL; 162 out: 163 return ret; 164 } 165 166 static void iblock_free_device(struct se_device *dev) 167 { 168 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 169 170 if (ib_dev->ibd_bd != NULL) 171 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 172 if (ib_dev->ibd_bio_set != NULL) 173 bioset_free(ib_dev->ibd_bio_set); 174 kfree(ib_dev); 175 } 176 177 static unsigned long long iblock_emulate_read_cap_with_block_size( 178 struct se_device *dev, 179 struct block_device *bd, 180 struct request_queue *q) 181 { 182 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), 183 bdev_logical_block_size(bd)) - 1); 184 u32 block_size = bdev_logical_block_size(bd); 185 186 if (block_size == dev->dev_attrib.block_size) 187 return blocks_long; 188 189 switch (block_size) { 190 case 4096: 191 switch (dev->dev_attrib.block_size) { 192 case 2048: 193 blocks_long <<= 1; 194 break; 195 case 1024: 196 blocks_long <<= 2; 197 break; 198 case 512: 199 blocks_long <<= 3; 200 default: 201 break; 202 } 203 break; 204 case 2048: 205 switch (dev->dev_attrib.block_size) { 206 case 4096: 207 blocks_long >>= 1; 208 break; 209 case 1024: 210 blocks_long <<= 1; 211 break; 212 case 512: 213 blocks_long <<= 2; 214 break; 215 default: 216 break; 217 } 218 break; 219 case 1024: 220 switch (dev->dev_attrib.block_size) { 221 case 4096: 222 blocks_long >>= 2; 223 break; 224 case 2048: 225 blocks_long >>= 1; 226 break; 227 case 512: 228 blocks_long <<= 1; 229 break; 230 default: 231 break; 232 } 233 break; 234 case 512: 235 switch (dev->dev_attrib.block_size) { 236 case 4096: 237 blocks_long >>= 3; 238 break; 239 case 2048: 240 blocks_long >>= 2; 241 break; 242 case 1024: 243 blocks_long >>= 1; 244 break; 245 default: 246 break; 247 } 248 break; 249 default: 250 break; 251 } 252 253 return blocks_long; 254 } 255 256 static void iblock_end_io_flush(struct bio *bio, int err) 257 { 258 struct se_cmd *cmd = bio->bi_private; 259 260 if (err) 261 pr_err("IBLOCK: cache flush failed: %d\n", err); 262 263 if (cmd) { 264 if (err) { 265 cmd->scsi_sense_reason = 266 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 267 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 268 } else { 269 target_complete_cmd(cmd, SAM_STAT_GOOD); 270 } 271 } 272 273 bio_put(bio); 274 } 275 276 /* 277 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must 278 * always flush the whole cache. 279 */ 280 static int iblock_execute_sync_cache(struct se_cmd *cmd) 281 { 282 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 283 int immed = (cmd->t_task_cdb[1] & 0x2); 284 struct bio *bio; 285 286 /* 287 * If the Immediate bit is set, queue up the GOOD response 288 * for this SYNCHRONIZE_CACHE op. 289 */ 290 if (immed) 291 target_complete_cmd(cmd, SAM_STAT_GOOD); 292 293 bio = bio_alloc(GFP_KERNEL, 0); 294 bio->bi_end_io = iblock_end_io_flush; 295 bio->bi_bdev = ib_dev->ibd_bd; 296 if (!immed) 297 bio->bi_private = cmd; 298 submit_bio(WRITE_FLUSH, bio); 299 return 0; 300 } 301 302 static int iblock_execute_unmap(struct se_cmd *cmd) 303 { 304 struct se_device *dev = cmd->se_dev; 305 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 306 unsigned char *buf, *ptr = NULL; 307 sector_t lba; 308 int size; 309 u32 range; 310 int ret = 0; 311 int dl, bd_dl; 312 313 if (cmd->data_length < 8) { 314 pr_warn("UNMAP parameter list length %u too small\n", 315 cmd->data_length); 316 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 317 return -EINVAL; 318 } 319 320 buf = transport_kmap_data_sg(cmd); 321 322 dl = get_unaligned_be16(&buf[0]); 323 bd_dl = get_unaligned_be16(&buf[2]); 324 325 size = cmd->data_length - 8; 326 if (bd_dl > size) 327 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 328 cmd->data_length, bd_dl); 329 else 330 size = bd_dl; 331 332 if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) { 333 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 334 ret = -EINVAL; 335 goto err; 336 } 337 338 /* First UNMAP block descriptor starts at 8 byte offset */ 339 ptr = &buf[8]; 340 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 341 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 342 343 while (size >= 16) { 344 lba = get_unaligned_be64(&ptr[0]); 345 range = get_unaligned_be32(&ptr[8]); 346 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 347 (unsigned long long)lba, range); 348 349 if (range > dev->dev_attrib.max_unmap_lba_count) { 350 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 351 ret = -EINVAL; 352 goto err; 353 } 354 355 if (lba + range > dev->transport->get_blocks(dev) + 1) { 356 cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE; 357 ret = -EINVAL; 358 goto err; 359 } 360 361 ret = blkdev_issue_discard(ib_dev->ibd_bd, lba, range, 362 GFP_KERNEL, 0); 363 if (ret < 0) { 364 pr_err("blkdev_issue_discard() failed: %d\n", 365 ret); 366 goto err; 367 } 368 369 ptr += 16; 370 size -= 16; 371 } 372 373 err: 374 transport_kunmap_data_sg(cmd); 375 if (!ret) 376 target_complete_cmd(cmd, GOOD); 377 return ret; 378 } 379 380 static int iblock_execute_write_same(struct se_cmd *cmd) 381 { 382 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 383 int ret; 384 385 ret = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba, 386 spc_get_write_same_sectors(cmd), GFP_KERNEL, 387 0); 388 if (ret < 0) { 389 pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); 390 return ret; 391 } 392 393 target_complete_cmd(cmd, GOOD); 394 return 0; 395 } 396 397 enum { 398 Opt_udev_path, Opt_readonly, Opt_force, Opt_err 399 }; 400 401 static match_table_t tokens = { 402 {Opt_udev_path, "udev_path=%s"}, 403 {Opt_readonly, "readonly=%d"}, 404 {Opt_force, "force=%d"}, 405 {Opt_err, NULL} 406 }; 407 408 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev, 409 const char *page, ssize_t count) 410 { 411 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 412 char *orig, *ptr, *arg_p, *opts; 413 substring_t args[MAX_OPT_ARGS]; 414 int ret = 0, token; 415 unsigned long tmp_readonly; 416 417 opts = kstrdup(page, GFP_KERNEL); 418 if (!opts) 419 return -ENOMEM; 420 421 orig = opts; 422 423 while ((ptr = strsep(&opts, ",\n")) != NULL) { 424 if (!*ptr) 425 continue; 426 427 token = match_token(ptr, tokens, args); 428 switch (token) { 429 case Opt_udev_path: 430 if (ib_dev->ibd_bd) { 431 pr_err("Unable to set udev_path= while" 432 " ib_dev->ibd_bd exists\n"); 433 ret = -EEXIST; 434 goto out; 435 } 436 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0], 437 SE_UDEV_PATH_LEN) == 0) { 438 ret = -EINVAL; 439 break; 440 } 441 pr_debug("IBLOCK: Referencing UDEV path: %s\n", 442 ib_dev->ibd_udev_path); 443 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 444 break; 445 case Opt_readonly: 446 arg_p = match_strdup(&args[0]); 447 if (!arg_p) { 448 ret = -ENOMEM; 449 break; 450 } 451 ret = strict_strtoul(arg_p, 0, &tmp_readonly); 452 kfree(arg_p); 453 if (ret < 0) { 454 pr_err("strict_strtoul() failed for" 455 " readonly=\n"); 456 goto out; 457 } 458 ib_dev->ibd_readonly = tmp_readonly; 459 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); 460 break; 461 case Opt_force: 462 break; 463 default: 464 break; 465 } 466 } 467 468 out: 469 kfree(orig); 470 return (!ret) ? count : ret; 471 } 472 473 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) 474 { 475 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 476 struct block_device *bd = ib_dev->ibd_bd; 477 char buf[BDEVNAME_SIZE]; 478 ssize_t bl = 0; 479 480 if (bd) 481 bl += sprintf(b + bl, "iBlock device: %s", 482 bdevname(bd, buf)); 483 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) 484 bl += sprintf(b + bl, " UDEV PATH: %s", 485 ib_dev->ibd_udev_path); 486 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly); 487 488 bl += sprintf(b + bl, " "); 489 if (bd) { 490 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 491 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 492 "" : (bd->bd_holder == ib_dev) ? 493 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 494 } else { 495 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 496 } 497 498 return bl; 499 } 500 501 static void iblock_complete_cmd(struct se_cmd *cmd) 502 { 503 struct iblock_req *ibr = cmd->priv; 504 u8 status; 505 506 if (!atomic_dec_and_test(&ibr->pending)) 507 return; 508 509 if (atomic_read(&ibr->ib_bio_err_cnt)) 510 status = SAM_STAT_CHECK_CONDITION; 511 else 512 status = SAM_STAT_GOOD; 513 514 target_complete_cmd(cmd, status); 515 kfree(ibr); 516 } 517 518 static struct bio * 519 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) 520 { 521 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 522 struct bio *bio; 523 524 /* 525 * Only allocate as many vector entries as the bio code allows us to, 526 * we'll loop later on until we have handled the whole request. 527 */ 528 if (sg_num > BIO_MAX_PAGES) 529 sg_num = BIO_MAX_PAGES; 530 531 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 532 if (!bio) { 533 pr_err("Unable to allocate memory for bio\n"); 534 return NULL; 535 } 536 537 bio->bi_bdev = ib_dev->ibd_bd; 538 bio->bi_private = cmd; 539 bio->bi_end_io = &iblock_bio_done; 540 bio->bi_sector = lba; 541 return bio; 542 } 543 544 static void iblock_submit_bios(struct bio_list *list, int rw) 545 { 546 struct blk_plug plug; 547 struct bio *bio; 548 549 blk_start_plug(&plug); 550 while ((bio = bio_list_pop(list))) 551 submit_bio(rw, bio); 552 blk_finish_plug(&plug); 553 } 554 555 static int iblock_execute_rw(struct se_cmd *cmd) 556 { 557 struct scatterlist *sgl = cmd->t_data_sg; 558 u32 sgl_nents = cmd->t_data_nents; 559 enum dma_data_direction data_direction = cmd->data_direction; 560 struct se_device *dev = cmd->se_dev; 561 struct iblock_req *ibr; 562 struct bio *bio; 563 struct bio_list list; 564 struct scatterlist *sg; 565 u32 sg_num = sgl_nents; 566 sector_t block_lba; 567 unsigned bio_cnt; 568 int rw; 569 int i; 570 571 if (data_direction == DMA_TO_DEVICE) { 572 /* 573 * Force data to disk if we pretend to not have a volatile 574 * write cache, or the initiator set the Force Unit Access bit. 575 */ 576 if (dev->dev_attrib.emulate_write_cache == 0 || 577 (dev->dev_attrib.emulate_fua_write > 0 && 578 (cmd->se_cmd_flags & SCF_FUA))) 579 rw = WRITE_FUA; 580 else 581 rw = WRITE; 582 } else { 583 rw = READ; 584 } 585 586 /* 587 * Convert the blocksize advertised to the initiator to the 512 byte 588 * units unconditionally used by the Linux block layer. 589 */ 590 if (dev->dev_attrib.block_size == 4096) 591 block_lba = (cmd->t_task_lba << 3); 592 else if (dev->dev_attrib.block_size == 2048) 593 block_lba = (cmd->t_task_lba << 2); 594 else if (dev->dev_attrib.block_size == 1024) 595 block_lba = (cmd->t_task_lba << 1); 596 else if (dev->dev_attrib.block_size == 512) 597 block_lba = cmd->t_task_lba; 598 else { 599 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 600 " %u\n", dev->dev_attrib.block_size); 601 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 602 return -ENOSYS; 603 } 604 605 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 606 if (!ibr) 607 goto fail; 608 cmd->priv = ibr; 609 610 if (!sgl_nents) { 611 atomic_set(&ibr->pending, 1); 612 iblock_complete_cmd(cmd); 613 return 0; 614 } 615 616 bio = iblock_get_bio(cmd, block_lba, sgl_nents); 617 if (!bio) 618 goto fail_free_ibr; 619 620 bio_list_init(&list); 621 bio_list_add(&list, bio); 622 623 atomic_set(&ibr->pending, 2); 624 bio_cnt = 1; 625 626 for_each_sg(sgl, sg, sgl_nents, i) { 627 /* 628 * XXX: if the length the device accepts is shorter than the 629 * length of the S/G list entry this will cause and 630 * endless loop. Better hope no driver uses huge pages. 631 */ 632 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 633 != sg->length) { 634 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { 635 iblock_submit_bios(&list, rw); 636 bio_cnt = 0; 637 } 638 639 bio = iblock_get_bio(cmd, block_lba, sg_num); 640 if (!bio) 641 goto fail_put_bios; 642 643 atomic_inc(&ibr->pending); 644 bio_list_add(&list, bio); 645 bio_cnt++; 646 } 647 648 /* Always in 512 byte units for Linux/Block */ 649 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 650 sg_num--; 651 } 652 653 iblock_submit_bios(&list, rw); 654 iblock_complete_cmd(cmd); 655 return 0; 656 657 fail_put_bios: 658 while ((bio = bio_list_pop(&list))) 659 bio_put(bio); 660 fail_free_ibr: 661 kfree(ibr); 662 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 663 fail: 664 return -ENOMEM; 665 } 666 667 static sector_t iblock_get_blocks(struct se_device *dev) 668 { 669 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 670 struct block_device *bd = ib_dev->ibd_bd; 671 struct request_queue *q = bdev_get_queue(bd); 672 673 return iblock_emulate_read_cap_with_block_size(dev, bd, q); 674 } 675 676 static void iblock_bio_done(struct bio *bio, int err) 677 { 678 struct se_cmd *cmd = bio->bi_private; 679 struct iblock_req *ibr = cmd->priv; 680 681 /* 682 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 683 */ 684 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) 685 err = -EIO; 686 687 if (err != 0) { 688 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," 689 " err: %d\n", bio, err); 690 /* 691 * Bump the ib_bio_err_cnt and release bio. 692 */ 693 atomic_inc(&ibr->ib_bio_err_cnt); 694 smp_mb__after_atomic_inc(); 695 } 696 697 bio_put(bio); 698 699 iblock_complete_cmd(cmd); 700 } 701 702 static struct sbc_ops iblock_sbc_ops = { 703 .execute_rw = iblock_execute_rw, 704 .execute_sync_cache = iblock_execute_sync_cache, 705 .execute_write_same = iblock_execute_write_same, 706 .execute_unmap = iblock_execute_unmap, 707 }; 708 709 static int iblock_parse_cdb(struct se_cmd *cmd) 710 { 711 return sbc_parse_cdb(cmd, &iblock_sbc_ops); 712 } 713 714 static struct se_subsystem_api iblock_template = { 715 .name = "iblock", 716 .inquiry_prod = "IBLOCK", 717 .inquiry_rev = IBLOCK_VERSION, 718 .owner = THIS_MODULE, 719 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 720 .attach_hba = iblock_attach_hba, 721 .detach_hba = iblock_detach_hba, 722 .alloc_device = iblock_alloc_device, 723 .configure_device = iblock_configure_device, 724 .free_device = iblock_free_device, 725 .parse_cdb = iblock_parse_cdb, 726 .set_configfs_dev_params = iblock_set_configfs_dev_params, 727 .show_configfs_dev_params = iblock_show_configfs_dev_params, 728 .get_device_rev = sbc_get_device_rev, 729 .get_device_type = sbc_get_device_type, 730 .get_blocks = iblock_get_blocks, 731 }; 732 733 static int __init iblock_module_init(void) 734 { 735 return transport_subsystem_register(&iblock_template); 736 } 737 738 static void iblock_module_exit(void) 739 { 740 transport_subsystem_release(&iblock_template); 741 } 742 743 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); 744 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 745 MODULE_LICENSE("GPL"); 746 747 module_init(iblock_module_init); 748 module_exit(iblock_module_exit); 749