1 /******************************************************************************* 2 * Filename: target_core_iblock.c 3 * 4 * This file contains the Storage Engine <-> Linux BlockIO transport 5 * specific functions. 6 * 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 8 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 9 * Copyright (c) 2007-2010 Rising Tide Systems 10 * Copyright (c) 2008-2010 Linux-iSCSI.org 11 * 12 * Nicholas A. Bellinger <nab@kernel.org> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 27 * 28 ******************************************************************************/ 29 30 #include <linux/string.h> 31 #include <linux/parser.h> 32 #include <linux/timer.h> 33 #include <linux/fs.h> 34 #include <linux/blkdev.h> 35 #include <linux/slab.h> 36 #include <linux/spinlock.h> 37 #include <linux/bio.h> 38 #include <linux/genhd.h> 39 #include <linux/file.h> 40 #include <linux/module.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_host.h> 43 #include <asm/unaligned.h> 44 45 #include <target/target_core_base.h> 46 #include <target/target_core_backend.h> 47 48 #include "target_core_iblock.h" 49 50 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ 51 #define IBLOCK_BIO_POOL_SIZE 128 52 53 static struct se_subsystem_api iblock_template; 54 55 static void iblock_bio_done(struct bio *, int); 56 57 /* iblock_attach_hba(): (Part of se_subsystem_api_t template) 58 * 59 * 60 */ 61 static int iblock_attach_hba(struct se_hba *hba, u32 host_id) 62 { 63 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 64 " Generic Target Core Stack %s\n", hba->hba_id, 65 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); 66 return 0; 67 } 68 69 static void iblock_detach_hba(struct se_hba *hba) 70 { 71 } 72 73 static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) 74 { 75 struct iblock_dev *ib_dev = NULL; 76 77 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 78 if (!ib_dev) { 79 pr_err("Unable to allocate struct iblock_dev\n"); 80 return NULL; 81 } 82 83 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); 84 85 return ib_dev; 86 } 87 88 static struct se_device *iblock_create_virtdevice( 89 struct se_hba *hba, 90 struct se_subsystem_dev *se_dev, 91 void *p) 92 { 93 struct iblock_dev *ib_dev = p; 94 struct se_device *dev; 95 struct se_dev_limits dev_limits; 96 struct block_device *bd = NULL; 97 struct request_queue *q; 98 struct queue_limits *limits; 99 u32 dev_flags = 0; 100 fmode_t mode; 101 int ret = -EINVAL; 102 103 if (!ib_dev) { 104 pr_err("Unable to locate struct iblock_dev parameter\n"); 105 return ERR_PTR(ret); 106 } 107 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 108 109 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); 110 if (!ib_dev->ibd_bio_set) { 111 pr_err("IBLOCK: Unable to create bioset()\n"); 112 return ERR_PTR(-ENOMEM); 113 } 114 pr_debug("IBLOCK: Created bio_set()\n"); 115 /* 116 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path 117 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. 118 */ 119 pr_debug( "IBLOCK: Claiming struct block_device: %s\n", 120 ib_dev->ibd_udev_path); 121 122 mode = FMODE_READ|FMODE_EXCL; 123 if (!ib_dev->ibd_readonly) 124 mode |= FMODE_WRITE; 125 126 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); 127 if (IS_ERR(bd)) { 128 ret = PTR_ERR(bd); 129 goto failed; 130 } 131 /* 132 * Setup the local scope queue_limits from struct request_queue->limits 133 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 134 */ 135 q = bdev_get_queue(bd); 136 limits = &dev_limits.limits; 137 limits->logical_block_size = bdev_logical_block_size(bd); 138 limits->max_hw_sectors = UINT_MAX; 139 limits->max_sectors = UINT_MAX; 140 dev_limits.hw_queue_depth = q->nr_requests; 141 dev_limits.queue_depth = q->nr_requests; 142 143 ib_dev->ibd_bd = bd; 144 145 dev = transport_add_device_to_core_hba(hba, 146 &iblock_template, se_dev, dev_flags, ib_dev, 147 &dev_limits, "IBLOCK", IBLOCK_VERSION); 148 if (!dev) 149 goto failed; 150 151 /* 152 * Check if the underlying struct block_device request_queue supports 153 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 154 * in ATA and we need to set TPE=1 155 */ 156 if (blk_queue_discard(q)) { 157 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = 158 q->limits.max_discard_sectors; 159 /* 160 * Currently hardcoded to 1 in Linux/SCSI code.. 161 */ 162 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; 163 dev->se_sub_dev->se_dev_attrib.unmap_granularity = 164 q->limits.discard_granularity >> 9; 165 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 166 q->limits.discard_alignment; 167 168 pr_debug("IBLOCK: BLOCK Discard support available," 169 " disabled by default\n"); 170 } 171 172 if (blk_queue_nonrot(q)) 173 dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; 174 175 return dev; 176 177 failed: 178 if (ib_dev->ibd_bio_set) { 179 bioset_free(ib_dev->ibd_bio_set); 180 ib_dev->ibd_bio_set = NULL; 181 } 182 ib_dev->ibd_bd = NULL; 183 return ERR_PTR(ret); 184 } 185 186 static void iblock_free_device(void *p) 187 { 188 struct iblock_dev *ib_dev = p; 189 190 if (ib_dev->ibd_bd != NULL) 191 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 192 if (ib_dev->ibd_bio_set != NULL) 193 bioset_free(ib_dev->ibd_bio_set); 194 kfree(ib_dev); 195 } 196 197 static unsigned long long iblock_emulate_read_cap_with_block_size( 198 struct se_device *dev, 199 struct block_device *bd, 200 struct request_queue *q) 201 { 202 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), 203 bdev_logical_block_size(bd)) - 1); 204 u32 block_size = bdev_logical_block_size(bd); 205 206 if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) 207 return blocks_long; 208 209 switch (block_size) { 210 case 4096: 211 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 212 case 2048: 213 blocks_long <<= 1; 214 break; 215 case 1024: 216 blocks_long <<= 2; 217 break; 218 case 512: 219 blocks_long <<= 3; 220 default: 221 break; 222 } 223 break; 224 case 2048: 225 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 226 case 4096: 227 blocks_long >>= 1; 228 break; 229 case 1024: 230 blocks_long <<= 1; 231 break; 232 case 512: 233 blocks_long <<= 2; 234 break; 235 default: 236 break; 237 } 238 break; 239 case 1024: 240 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 241 case 4096: 242 blocks_long >>= 2; 243 break; 244 case 2048: 245 blocks_long >>= 1; 246 break; 247 case 512: 248 blocks_long <<= 1; 249 break; 250 default: 251 break; 252 } 253 break; 254 case 512: 255 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 256 case 4096: 257 blocks_long >>= 3; 258 break; 259 case 2048: 260 blocks_long >>= 2; 261 break; 262 case 1024: 263 blocks_long >>= 1; 264 break; 265 default: 266 break; 267 } 268 break; 269 default: 270 break; 271 } 272 273 return blocks_long; 274 } 275 276 static void iblock_end_io_flush(struct bio *bio, int err) 277 { 278 struct se_cmd *cmd = bio->bi_private; 279 280 if (err) 281 pr_err("IBLOCK: cache flush failed: %d\n", err); 282 283 if (cmd) { 284 if (err) { 285 cmd->scsi_sense_reason = 286 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 287 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 288 } else { 289 target_complete_cmd(cmd, SAM_STAT_GOOD); 290 } 291 } 292 293 bio_put(bio); 294 } 295 296 /* 297 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must 298 * always flush the whole cache. 299 */ 300 static int iblock_execute_sync_cache(struct se_cmd *cmd) 301 { 302 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 303 int immed = (cmd->t_task_cdb[1] & 0x2); 304 struct bio *bio; 305 306 /* 307 * If the Immediate bit is set, queue up the GOOD response 308 * for this SYNCHRONIZE_CACHE op. 309 */ 310 if (immed) 311 target_complete_cmd(cmd, SAM_STAT_GOOD); 312 313 bio = bio_alloc(GFP_KERNEL, 0); 314 bio->bi_end_io = iblock_end_io_flush; 315 bio->bi_bdev = ib_dev->ibd_bd; 316 if (!immed) 317 bio->bi_private = cmd; 318 submit_bio(WRITE_FLUSH, bio); 319 return 0; 320 } 321 322 static int iblock_execute_unmap(struct se_cmd *cmd) 323 { 324 struct se_device *dev = cmd->se_dev; 325 struct iblock_dev *ibd = dev->dev_ptr; 326 unsigned char *buf, *ptr = NULL; 327 sector_t lba; 328 int size; 329 u32 range; 330 int ret = 0; 331 int dl, bd_dl; 332 333 if (cmd->data_length < 8) { 334 pr_warn("UNMAP parameter list length %u too small\n", 335 cmd->data_length); 336 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 337 return -EINVAL; 338 } 339 340 buf = transport_kmap_data_sg(cmd); 341 342 dl = get_unaligned_be16(&buf[0]); 343 bd_dl = get_unaligned_be16(&buf[2]); 344 345 size = cmd->data_length - 8; 346 if (bd_dl > size) 347 pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", 348 cmd->data_length, bd_dl); 349 else 350 size = bd_dl; 351 352 if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { 353 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 354 ret = -EINVAL; 355 goto err; 356 } 357 358 /* First UNMAP block descriptor starts at 8 byte offset */ 359 ptr = &buf[8]; 360 pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u" 361 " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr); 362 363 while (size >= 16) { 364 lba = get_unaligned_be64(&ptr[0]); 365 range = get_unaligned_be32(&ptr[8]); 366 pr_debug("UNMAP: Using lba: %llu and range: %u\n", 367 (unsigned long long)lba, range); 368 369 if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) { 370 cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; 371 ret = -EINVAL; 372 goto err; 373 } 374 375 if (lba + range > dev->transport->get_blocks(dev) + 1) { 376 cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE; 377 ret = -EINVAL; 378 goto err; 379 } 380 381 ret = blkdev_issue_discard(ibd->ibd_bd, lba, range, 382 GFP_KERNEL, 0); 383 if (ret < 0) { 384 pr_err("blkdev_issue_discard() failed: %d\n", 385 ret); 386 goto err; 387 } 388 389 ptr += 16; 390 size -= 16; 391 } 392 393 err: 394 transport_kunmap_data_sg(cmd); 395 if (!ret) 396 target_complete_cmd(cmd, GOOD); 397 return ret; 398 } 399 400 static int iblock_execute_write_same(struct se_cmd *cmd) 401 { 402 struct iblock_dev *ibd = cmd->se_dev->dev_ptr; 403 int ret; 404 405 ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba, 406 spc_get_write_same_sectors(cmd), GFP_KERNEL, 407 0); 408 if (ret < 0) { 409 pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n"); 410 return ret; 411 } 412 413 target_complete_cmd(cmd, GOOD); 414 return 0; 415 } 416 417 enum { 418 Opt_udev_path, Opt_readonly, Opt_force, Opt_err 419 }; 420 421 static match_table_t tokens = { 422 {Opt_udev_path, "udev_path=%s"}, 423 {Opt_readonly, "readonly=%d"}, 424 {Opt_force, "force=%d"}, 425 {Opt_err, NULL} 426 }; 427 428 static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, 429 struct se_subsystem_dev *se_dev, 430 const char *page, ssize_t count) 431 { 432 struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; 433 char *orig, *ptr, *arg_p, *opts; 434 substring_t args[MAX_OPT_ARGS]; 435 int ret = 0, token; 436 unsigned long tmp_readonly; 437 438 opts = kstrdup(page, GFP_KERNEL); 439 if (!opts) 440 return -ENOMEM; 441 442 orig = opts; 443 444 while ((ptr = strsep(&opts, ",\n")) != NULL) { 445 if (!*ptr) 446 continue; 447 448 token = match_token(ptr, tokens, args); 449 switch (token) { 450 case Opt_udev_path: 451 if (ib_dev->ibd_bd) { 452 pr_err("Unable to set udev_path= while" 453 " ib_dev->ibd_bd exists\n"); 454 ret = -EEXIST; 455 goto out; 456 } 457 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0], 458 SE_UDEV_PATH_LEN) == 0) { 459 ret = -EINVAL; 460 break; 461 } 462 pr_debug("IBLOCK: Referencing UDEV path: %s\n", 463 ib_dev->ibd_udev_path); 464 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 465 break; 466 case Opt_readonly: 467 arg_p = match_strdup(&args[0]); 468 if (!arg_p) { 469 ret = -ENOMEM; 470 break; 471 } 472 ret = strict_strtoul(arg_p, 0, &tmp_readonly); 473 kfree(arg_p); 474 if (ret < 0) { 475 pr_err("strict_strtoul() failed for" 476 " readonly=\n"); 477 goto out; 478 } 479 ib_dev->ibd_readonly = tmp_readonly; 480 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); 481 break; 482 case Opt_force: 483 break; 484 default: 485 break; 486 } 487 } 488 489 out: 490 kfree(orig); 491 return (!ret) ? count : ret; 492 } 493 494 static ssize_t iblock_check_configfs_dev_params( 495 struct se_hba *hba, 496 struct se_subsystem_dev *se_dev) 497 { 498 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 499 500 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { 501 pr_err("Missing udev_path= parameters for IBLOCK\n"); 502 return -EINVAL; 503 } 504 505 return 0; 506 } 507 508 static ssize_t iblock_show_configfs_dev_params( 509 struct se_hba *hba, 510 struct se_subsystem_dev *se_dev, 511 char *b) 512 { 513 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 514 struct block_device *bd = ibd->ibd_bd; 515 char buf[BDEVNAME_SIZE]; 516 ssize_t bl = 0; 517 518 if (bd) 519 bl += sprintf(b + bl, "iBlock device: %s", 520 bdevname(bd, buf)); 521 if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) 522 bl += sprintf(b + bl, " UDEV PATH: %s", 523 ibd->ibd_udev_path); 524 bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly); 525 526 bl += sprintf(b + bl, " "); 527 if (bd) { 528 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 529 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 530 "" : (bd->bd_holder == ibd) ? 531 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 532 } else { 533 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 534 } 535 536 return bl; 537 } 538 539 static void iblock_complete_cmd(struct se_cmd *cmd) 540 { 541 struct iblock_req *ibr = cmd->priv; 542 u8 status; 543 544 if (!atomic_dec_and_test(&ibr->pending)) 545 return; 546 547 if (atomic_read(&ibr->ib_bio_err_cnt)) 548 status = SAM_STAT_CHECK_CONDITION; 549 else 550 status = SAM_STAT_GOOD; 551 552 target_complete_cmd(cmd, status); 553 kfree(ibr); 554 } 555 556 static struct bio * 557 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) 558 { 559 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 560 struct bio *bio; 561 562 /* 563 * Only allocate as many vector entries as the bio code allows us to, 564 * we'll loop later on until we have handled the whole request. 565 */ 566 if (sg_num > BIO_MAX_PAGES) 567 sg_num = BIO_MAX_PAGES; 568 569 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 570 if (!bio) { 571 pr_err("Unable to allocate memory for bio\n"); 572 return NULL; 573 } 574 575 bio->bi_bdev = ib_dev->ibd_bd; 576 bio->bi_private = cmd; 577 bio->bi_end_io = &iblock_bio_done; 578 bio->bi_sector = lba; 579 return bio; 580 } 581 582 static void iblock_submit_bios(struct bio_list *list, int rw) 583 { 584 struct blk_plug plug; 585 struct bio *bio; 586 587 blk_start_plug(&plug); 588 while ((bio = bio_list_pop(list))) 589 submit_bio(rw, bio); 590 blk_finish_plug(&plug); 591 } 592 593 static int iblock_execute_rw(struct se_cmd *cmd) 594 { 595 struct scatterlist *sgl = cmd->t_data_sg; 596 u32 sgl_nents = cmd->t_data_nents; 597 enum dma_data_direction data_direction = cmd->data_direction; 598 struct se_device *dev = cmd->se_dev; 599 struct iblock_req *ibr; 600 struct bio *bio; 601 struct bio_list list; 602 struct scatterlist *sg; 603 u32 sg_num = sgl_nents; 604 sector_t block_lba; 605 unsigned bio_cnt; 606 int rw; 607 int i; 608 609 if (data_direction == DMA_TO_DEVICE) { 610 /* 611 * Force data to disk if we pretend to not have a volatile 612 * write cache, or the initiator set the Force Unit Access bit. 613 */ 614 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 615 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 616 (cmd->se_cmd_flags & SCF_FUA))) 617 rw = WRITE_FUA; 618 else 619 rw = WRITE; 620 } else { 621 rw = READ; 622 } 623 624 /* 625 * Convert the blocksize advertised to the initiator to the 512 byte 626 * units unconditionally used by the Linux block layer. 627 */ 628 if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) 629 block_lba = (cmd->t_task_lba << 3); 630 else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) 631 block_lba = (cmd->t_task_lba << 2); 632 else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) 633 block_lba = (cmd->t_task_lba << 1); 634 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) 635 block_lba = cmd->t_task_lba; 636 else { 637 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 638 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 639 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 640 return -ENOSYS; 641 } 642 643 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 644 if (!ibr) 645 goto fail; 646 cmd->priv = ibr; 647 648 if (!sgl_nents) { 649 atomic_set(&ibr->pending, 1); 650 iblock_complete_cmd(cmd); 651 return 0; 652 } 653 654 bio = iblock_get_bio(cmd, block_lba, sgl_nents); 655 if (!bio) 656 goto fail_free_ibr; 657 658 bio_list_init(&list); 659 bio_list_add(&list, bio); 660 661 atomic_set(&ibr->pending, 2); 662 bio_cnt = 1; 663 664 for_each_sg(sgl, sg, sgl_nents, i) { 665 /* 666 * XXX: if the length the device accepts is shorter than the 667 * length of the S/G list entry this will cause and 668 * endless loop. Better hope no driver uses huge pages. 669 */ 670 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 671 != sg->length) { 672 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { 673 iblock_submit_bios(&list, rw); 674 bio_cnt = 0; 675 } 676 677 bio = iblock_get_bio(cmd, block_lba, sg_num); 678 if (!bio) 679 goto fail_put_bios; 680 681 atomic_inc(&ibr->pending); 682 bio_list_add(&list, bio); 683 bio_cnt++; 684 } 685 686 /* Always in 512 byte units for Linux/Block */ 687 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 688 sg_num--; 689 } 690 691 iblock_submit_bios(&list, rw); 692 iblock_complete_cmd(cmd); 693 return 0; 694 695 fail_put_bios: 696 while ((bio = bio_list_pop(&list))) 697 bio_put(bio); 698 fail_free_ibr: 699 kfree(ibr); 700 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 701 fail: 702 return -ENOMEM; 703 } 704 705 static u32 iblock_get_device_rev(struct se_device *dev) 706 { 707 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ 708 } 709 710 static u32 iblock_get_device_type(struct se_device *dev) 711 { 712 return TYPE_DISK; 713 } 714 715 static sector_t iblock_get_blocks(struct se_device *dev) 716 { 717 struct iblock_dev *ibd = dev->dev_ptr; 718 struct block_device *bd = ibd->ibd_bd; 719 struct request_queue *q = bdev_get_queue(bd); 720 721 return iblock_emulate_read_cap_with_block_size(dev, bd, q); 722 } 723 724 static void iblock_bio_done(struct bio *bio, int err) 725 { 726 struct se_cmd *cmd = bio->bi_private; 727 struct iblock_req *ibr = cmd->priv; 728 729 /* 730 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 731 */ 732 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err) 733 err = -EIO; 734 735 if (err != 0) { 736 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p," 737 " err: %d\n", bio, err); 738 /* 739 * Bump the ib_bio_err_cnt and release bio. 740 */ 741 atomic_inc(&ibr->ib_bio_err_cnt); 742 smp_mb__after_atomic_inc(); 743 } 744 745 bio_put(bio); 746 747 iblock_complete_cmd(cmd); 748 } 749 750 static struct spc_ops iblock_spc_ops = { 751 .execute_rw = iblock_execute_rw, 752 .execute_sync_cache = iblock_execute_sync_cache, 753 .execute_write_same = iblock_execute_write_same, 754 .execute_unmap = iblock_execute_unmap, 755 }; 756 757 static int iblock_parse_cdb(struct se_cmd *cmd) 758 { 759 return sbc_parse_cdb(cmd, &iblock_spc_ops); 760 } 761 762 static struct se_subsystem_api iblock_template = { 763 .name = "iblock", 764 .owner = THIS_MODULE, 765 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 766 .attach_hba = iblock_attach_hba, 767 .detach_hba = iblock_detach_hba, 768 .allocate_virtdevice = iblock_allocate_virtdevice, 769 .create_virtdevice = iblock_create_virtdevice, 770 .free_device = iblock_free_device, 771 .parse_cdb = iblock_parse_cdb, 772 .check_configfs_dev_params = iblock_check_configfs_dev_params, 773 .set_configfs_dev_params = iblock_set_configfs_dev_params, 774 .show_configfs_dev_params = iblock_show_configfs_dev_params, 775 .get_device_rev = iblock_get_device_rev, 776 .get_device_type = iblock_get_device_type, 777 .get_blocks = iblock_get_blocks, 778 }; 779 780 static int __init iblock_module_init(void) 781 { 782 return transport_subsystem_register(&iblock_template); 783 } 784 785 static void iblock_module_exit(void) 786 { 787 transport_subsystem_release(&iblock_template); 788 } 789 790 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); 791 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 792 MODULE_LICENSE("GPL"); 793 794 module_init(iblock_module_init); 795 module_exit(iblock_module_exit); 796