1 /******************************************************************************* 2 * Filename: target_core_iblock.c 3 * 4 * This file contains the Storage Engine <-> Linux BlockIO transport 5 * specific functions. 6 * 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 8 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 9 * Copyright (c) 2007-2010 Rising Tide Systems 10 * Copyright (c) 2008-2010 Linux-iSCSI.org 11 * 12 * Nicholas A. Bellinger <nab@kernel.org> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 27 * 28 ******************************************************************************/ 29 30 #include <linux/string.h> 31 #include <linux/parser.h> 32 #include <linux/timer.h> 33 #include <linux/fs.h> 34 #include <linux/blkdev.h> 35 #include <linux/slab.h> 36 #include <linux/spinlock.h> 37 #include <linux/bio.h> 38 #include <linux/genhd.h> 39 #include <linux/file.h> 40 #include <linux/module.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_host.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_backend.h> 46 47 #include "target_core_iblock.h" 48 49 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ 50 #define IBLOCK_BIO_POOL_SIZE 128 51 52 static struct se_subsystem_api iblock_template; 53 54 static void iblock_bio_done(struct bio *, int); 55 56 /* iblock_attach_hba(): (Part of se_subsystem_api_t template) 57 * 58 * 59 */ 60 static int iblock_attach_hba(struct se_hba *hba, u32 host_id) 61 { 62 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 63 " Generic Target Core Stack %s\n", hba->hba_id, 64 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); 65 return 0; 66 } 67 68 static void iblock_detach_hba(struct se_hba *hba) 69 { 70 } 71 72 static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) 73 { 74 struct iblock_dev *ib_dev = NULL; 75 76 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 77 if (!ib_dev) { 78 pr_err("Unable to allocate struct iblock_dev\n"); 79 return NULL; 80 } 81 82 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); 83 84 return ib_dev; 85 } 86 87 static struct se_device *iblock_create_virtdevice( 88 struct se_hba *hba, 89 struct se_subsystem_dev *se_dev, 90 void *p) 91 { 92 struct iblock_dev *ib_dev = p; 93 struct se_device *dev; 94 struct se_dev_limits dev_limits; 95 struct block_device *bd = NULL; 96 struct request_queue *q; 97 struct queue_limits *limits; 98 u32 dev_flags = 0; 99 int ret = -EINVAL; 100 101 if (!ib_dev) { 102 pr_err("Unable to locate struct iblock_dev parameter\n"); 103 return ERR_PTR(ret); 104 } 105 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 106 107 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); 108 if (!ib_dev->ibd_bio_set) { 109 pr_err("IBLOCK: Unable to create bioset()\n"); 110 return ERR_PTR(-ENOMEM); 111 } 112 pr_debug("IBLOCK: Created bio_set()\n"); 113 /* 114 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path 115 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. 116 */ 117 pr_debug( "IBLOCK: Claiming struct block_device: %s\n", 118 ib_dev->ibd_udev_path); 119 120 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, 121 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); 122 if (IS_ERR(bd)) { 123 ret = PTR_ERR(bd); 124 goto failed; 125 } 126 /* 127 * Setup the local scope queue_limits from struct request_queue->limits 128 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 129 */ 130 q = bdev_get_queue(bd); 131 limits = &dev_limits.limits; 132 limits->logical_block_size = bdev_logical_block_size(bd); 133 limits->max_hw_sectors = UINT_MAX; 134 limits->max_sectors = UINT_MAX; 135 dev_limits.hw_queue_depth = q->nr_requests; 136 dev_limits.queue_depth = q->nr_requests; 137 138 ib_dev->ibd_bd = bd; 139 140 dev = transport_add_device_to_core_hba(hba, 141 &iblock_template, se_dev, dev_flags, ib_dev, 142 &dev_limits, "IBLOCK", IBLOCK_VERSION); 143 if (!dev) 144 goto failed; 145 146 /* 147 * Check if the underlying struct block_device request_queue supports 148 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 149 * in ATA and we need to set TPE=1 150 */ 151 if (blk_queue_discard(q)) { 152 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = 153 q->limits.max_discard_sectors; 154 /* 155 * Currently hardcoded to 1 in Linux/SCSI code.. 156 */ 157 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; 158 dev->se_sub_dev->se_dev_attrib.unmap_granularity = 159 q->limits.discard_granularity >> 9; 160 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 161 q->limits.discard_alignment; 162 163 pr_debug("IBLOCK: BLOCK Discard support available," 164 " disabled by default\n"); 165 } 166 167 if (blk_queue_nonrot(q)) 168 dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; 169 170 return dev; 171 172 failed: 173 if (ib_dev->ibd_bio_set) { 174 bioset_free(ib_dev->ibd_bio_set); 175 ib_dev->ibd_bio_set = NULL; 176 } 177 ib_dev->ibd_bd = NULL; 178 return ERR_PTR(ret); 179 } 180 181 static void iblock_free_device(void *p) 182 { 183 struct iblock_dev *ib_dev = p; 184 185 if (ib_dev->ibd_bd != NULL) 186 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 187 if (ib_dev->ibd_bio_set != NULL) 188 bioset_free(ib_dev->ibd_bio_set); 189 kfree(ib_dev); 190 } 191 192 static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) 193 { 194 return container_of(task, struct iblock_req, ib_task); 195 } 196 197 static struct se_task * 198 iblock_alloc_task(unsigned char *cdb) 199 { 200 struct iblock_req *ib_req; 201 202 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 203 if (!ib_req) { 204 pr_err("Unable to allocate memory for struct iblock_req\n"); 205 return NULL; 206 } 207 208 atomic_set(&ib_req->pending, 1); 209 return &ib_req->ib_task; 210 } 211 212 static unsigned long long iblock_emulate_read_cap_with_block_size( 213 struct se_device *dev, 214 struct block_device *bd, 215 struct request_queue *q) 216 { 217 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), 218 bdev_logical_block_size(bd)) - 1); 219 u32 block_size = bdev_logical_block_size(bd); 220 221 if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) 222 return blocks_long; 223 224 switch (block_size) { 225 case 4096: 226 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 227 case 2048: 228 blocks_long <<= 1; 229 break; 230 case 1024: 231 blocks_long <<= 2; 232 break; 233 case 512: 234 blocks_long <<= 3; 235 default: 236 break; 237 } 238 break; 239 case 2048: 240 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 241 case 4096: 242 blocks_long >>= 1; 243 break; 244 case 1024: 245 blocks_long <<= 1; 246 break; 247 case 512: 248 blocks_long <<= 2; 249 break; 250 default: 251 break; 252 } 253 break; 254 case 1024: 255 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 256 case 4096: 257 blocks_long >>= 2; 258 break; 259 case 2048: 260 blocks_long >>= 1; 261 break; 262 case 512: 263 blocks_long <<= 1; 264 break; 265 default: 266 break; 267 } 268 break; 269 case 512: 270 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 271 case 4096: 272 blocks_long >>= 3; 273 break; 274 case 2048: 275 blocks_long >>= 2; 276 break; 277 case 1024: 278 blocks_long >>= 1; 279 break; 280 default: 281 break; 282 } 283 break; 284 default: 285 break; 286 } 287 288 return blocks_long; 289 } 290 291 static void iblock_end_io_flush(struct bio *bio, int err) 292 { 293 struct se_cmd *cmd = bio->bi_private; 294 295 if (err) 296 pr_err("IBLOCK: cache flush failed: %d\n", err); 297 298 if (cmd) 299 transport_complete_sync_cache(cmd, err == 0); 300 bio_put(bio); 301 } 302 303 /* 304 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must 305 * always flush the whole cache. 306 */ 307 static void iblock_emulate_sync_cache(struct se_task *task) 308 { 309 struct se_cmd *cmd = task->task_se_cmd; 310 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 311 int immed = (cmd->t_task_cdb[1] & 0x2); 312 struct bio *bio; 313 314 /* 315 * If the Immediate bit is set, queue up the GOOD response 316 * for this SYNCHRONIZE_CACHE op. 317 */ 318 if (immed) 319 transport_complete_sync_cache(cmd, 1); 320 321 bio = bio_alloc(GFP_KERNEL, 0); 322 bio->bi_end_io = iblock_end_io_flush; 323 bio->bi_bdev = ib_dev->ibd_bd; 324 if (!immed) 325 bio->bi_private = cmd; 326 submit_bio(WRITE_FLUSH, bio); 327 } 328 329 static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) 330 { 331 struct iblock_dev *ibd = dev->dev_ptr; 332 struct block_device *bd = ibd->ibd_bd; 333 int barrier = 0; 334 335 return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); 336 } 337 338 static void iblock_free_task(struct se_task *task) 339 { 340 kfree(IBLOCK_REQ(task)); 341 } 342 343 enum { 344 Opt_udev_path, Opt_force, Opt_err 345 }; 346 347 static match_table_t tokens = { 348 {Opt_udev_path, "udev_path=%s"}, 349 {Opt_force, "force=%d"}, 350 {Opt_err, NULL} 351 }; 352 353 static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, 354 struct se_subsystem_dev *se_dev, 355 const char *page, ssize_t count) 356 { 357 struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; 358 char *orig, *ptr, *arg_p, *opts; 359 substring_t args[MAX_OPT_ARGS]; 360 int ret = 0, token; 361 362 opts = kstrdup(page, GFP_KERNEL); 363 if (!opts) 364 return -ENOMEM; 365 366 orig = opts; 367 368 while ((ptr = strsep(&opts, ",\n")) != NULL) { 369 if (!*ptr) 370 continue; 371 372 token = match_token(ptr, tokens, args); 373 switch (token) { 374 case Opt_udev_path: 375 if (ib_dev->ibd_bd) { 376 pr_err("Unable to set udev_path= while" 377 " ib_dev->ibd_bd exists\n"); 378 ret = -EEXIST; 379 goto out; 380 } 381 arg_p = match_strdup(&args[0]); 382 if (!arg_p) { 383 ret = -ENOMEM; 384 break; 385 } 386 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, 387 "%s", arg_p); 388 kfree(arg_p); 389 pr_debug("IBLOCK: Referencing UDEV path: %s\n", 390 ib_dev->ibd_udev_path); 391 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 392 break; 393 case Opt_force: 394 break; 395 default: 396 break; 397 } 398 } 399 400 out: 401 kfree(orig); 402 return (!ret) ? count : ret; 403 } 404 405 static ssize_t iblock_check_configfs_dev_params( 406 struct se_hba *hba, 407 struct se_subsystem_dev *se_dev) 408 { 409 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 410 411 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { 412 pr_err("Missing udev_path= parameters for IBLOCK\n"); 413 return -EINVAL; 414 } 415 416 return 0; 417 } 418 419 static ssize_t iblock_show_configfs_dev_params( 420 struct se_hba *hba, 421 struct se_subsystem_dev *se_dev, 422 char *b) 423 { 424 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 425 struct block_device *bd = ibd->ibd_bd; 426 char buf[BDEVNAME_SIZE]; 427 ssize_t bl = 0; 428 429 if (bd) 430 bl += sprintf(b + bl, "iBlock device: %s", 431 bdevname(bd, buf)); 432 if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) { 433 bl += sprintf(b + bl, " UDEV PATH: %s\n", 434 ibd->ibd_udev_path); 435 } else 436 bl += sprintf(b + bl, "\n"); 437 438 bl += sprintf(b + bl, " "); 439 if (bd) { 440 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 441 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 442 "" : (bd->bd_holder == ibd) ? 443 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 444 } else { 445 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 446 } 447 448 return bl; 449 } 450 451 static void iblock_bio_destructor(struct bio *bio) 452 { 453 struct se_task *task = bio->bi_private; 454 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; 455 456 bio_free(bio, ib_dev->ibd_bio_set); 457 } 458 459 static struct bio * 460 iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num) 461 { 462 struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; 463 struct iblock_req *ib_req = IBLOCK_REQ(task); 464 struct bio *bio; 465 466 /* 467 * Only allocate as many vector entries as the bio code allows us to, 468 * we'll loop later on until we have handled the whole request. 469 */ 470 if (sg_num > BIO_MAX_PAGES) 471 sg_num = BIO_MAX_PAGES; 472 473 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 474 if (!bio) { 475 pr_err("Unable to allocate memory for bio\n"); 476 return NULL; 477 } 478 479 pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:" 480 " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set); 481 pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size); 482 483 bio->bi_bdev = ib_dev->ibd_bd; 484 bio->bi_private = task; 485 bio->bi_destructor = iblock_bio_destructor; 486 bio->bi_end_io = &iblock_bio_done; 487 bio->bi_sector = lba; 488 atomic_inc(&ib_req->pending); 489 490 pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector); 491 pr_debug("Set ib_req->pending: %d\n", atomic_read(&ib_req->pending)); 492 return bio; 493 } 494 495 static void iblock_submit_bios(struct bio_list *list, int rw) 496 { 497 struct blk_plug plug; 498 struct bio *bio; 499 500 blk_start_plug(&plug); 501 while ((bio = bio_list_pop(list))) 502 submit_bio(rw, bio); 503 blk_finish_plug(&plug); 504 } 505 506 static int iblock_do_task(struct se_task *task) 507 { 508 struct se_cmd *cmd = task->task_se_cmd; 509 struct se_device *dev = cmd->se_dev; 510 struct iblock_req *ibr = IBLOCK_REQ(task); 511 struct bio *bio; 512 struct bio_list list; 513 struct scatterlist *sg; 514 u32 i, sg_num = task->task_sg_nents; 515 sector_t block_lba; 516 unsigned bio_cnt; 517 int rw; 518 519 if (task->task_data_direction == DMA_TO_DEVICE) { 520 /* 521 * Force data to disk if we pretend to not have a volatile 522 * write cache, or the initiator set the Force Unit Access bit. 523 */ 524 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 525 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 526 (cmd->se_cmd_flags & SCF_FUA))) 527 rw = WRITE_FUA; 528 else 529 rw = WRITE; 530 } else { 531 rw = READ; 532 } 533 534 /* 535 * Do starting conversion up from non 512-byte blocksize with 536 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. 537 */ 538 if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) 539 block_lba = (task->task_lba << 3); 540 else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) 541 block_lba = (task->task_lba << 2); 542 else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) 543 block_lba = (task->task_lba << 1); 544 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) 545 block_lba = task->task_lba; 546 else { 547 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 548 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 549 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 550 return -ENOSYS; 551 } 552 553 bio = iblock_get_bio(task, block_lba, sg_num); 554 if (!bio) { 555 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 556 return -ENOMEM; 557 } 558 559 bio_list_init(&list); 560 bio_list_add(&list, bio); 561 bio_cnt = 1; 562 563 for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { 564 /* 565 * XXX: if the length the device accepts is shorter than the 566 * length of the S/G list entry this will cause and 567 * endless loop. Better hope no driver uses huge pages. 568 */ 569 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 570 != sg->length) { 571 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { 572 iblock_submit_bios(&list, rw); 573 bio_cnt = 0; 574 } 575 576 bio = iblock_get_bio(task, block_lba, sg_num); 577 if (!bio) 578 goto fail; 579 bio_list_add(&list, bio); 580 bio_cnt++; 581 } 582 583 /* Always in 512 byte units for Linux/Block */ 584 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 585 sg_num--; 586 } 587 588 iblock_submit_bios(&list, rw); 589 590 if (atomic_dec_and_test(&ibr->pending)) { 591 transport_complete_task(task, 592 !atomic_read(&ibr->ib_bio_err_cnt)); 593 } 594 return 0; 595 596 fail: 597 while ((bio = bio_list_pop(&list))) 598 bio_put(bio); 599 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 600 return -ENOMEM; 601 } 602 603 static u32 iblock_get_device_rev(struct se_device *dev) 604 { 605 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ 606 } 607 608 static u32 iblock_get_device_type(struct se_device *dev) 609 { 610 return TYPE_DISK; 611 } 612 613 static sector_t iblock_get_blocks(struct se_device *dev) 614 { 615 struct iblock_dev *ibd = dev->dev_ptr; 616 struct block_device *bd = ibd->ibd_bd; 617 struct request_queue *q = bdev_get_queue(bd); 618 619 return iblock_emulate_read_cap_with_block_size(dev, bd, q); 620 } 621 622 static void iblock_bio_done(struct bio *bio, int err) 623 { 624 struct se_task *task = bio->bi_private; 625 struct iblock_req *ibr = IBLOCK_REQ(task); 626 627 /* 628 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 629 */ 630 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) 631 err = -EIO; 632 633 if (err != 0) { 634 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," 635 " err: %d\n", bio, err); 636 /* 637 * Bump the ib_bio_err_cnt and release bio. 638 */ 639 atomic_inc(&ibr->ib_bio_err_cnt); 640 smp_mb__after_atomic_inc(); 641 } 642 643 bio_put(bio); 644 645 if (!atomic_dec_and_test(&ibr->pending)) 646 return; 647 648 pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", 649 task, bio, task->task_lba, 650 (unsigned long long)bio->bi_sector, err); 651 652 transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt)); 653 } 654 655 static struct se_subsystem_api iblock_template = { 656 .name = "iblock", 657 .owner = THIS_MODULE, 658 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 659 .write_cache_emulated = 1, 660 .fua_write_emulated = 1, 661 .attach_hba = iblock_attach_hba, 662 .detach_hba = iblock_detach_hba, 663 .allocate_virtdevice = iblock_allocate_virtdevice, 664 .create_virtdevice = iblock_create_virtdevice, 665 .free_device = iblock_free_device, 666 .alloc_task = iblock_alloc_task, 667 .do_task = iblock_do_task, 668 .do_discard = iblock_do_discard, 669 .do_sync_cache = iblock_emulate_sync_cache, 670 .free_task = iblock_free_task, 671 .check_configfs_dev_params = iblock_check_configfs_dev_params, 672 .set_configfs_dev_params = iblock_set_configfs_dev_params, 673 .show_configfs_dev_params = iblock_show_configfs_dev_params, 674 .get_device_rev = iblock_get_device_rev, 675 .get_device_type = iblock_get_device_type, 676 .get_blocks = iblock_get_blocks, 677 }; 678 679 static int __init iblock_module_init(void) 680 { 681 return transport_subsystem_register(&iblock_template); 682 } 683 684 static void iblock_module_exit(void) 685 { 686 transport_subsystem_release(&iblock_template); 687 } 688 689 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); 690 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 691 MODULE_LICENSE("GPL"); 692 693 module_init(iblock_module_init); 694 module_exit(iblock_module_exit); 695