1 /******************************************************************************* 2 * Filename: target_core_iblock.c 3 * 4 * This file contains the Storage Engine <-> Linux BlockIO transport 5 * specific functions. 6 * 7 * (c) Copyright 2003-2013 Datera, Inc. 8 * 9 * Nicholas A. Bellinger <nab@kernel.org> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 24 * 25 ******************************************************************************/ 26 27 #include <linux/string.h> 28 #include <linux/parser.h> 29 #include <linux/timer.h> 30 #include <linux/fs.h> 31 #include <linux/blkdev.h> 32 #include <linux/slab.h> 33 #include <linux/spinlock.h> 34 #include <linux/bio.h> 35 #include <linux/genhd.h> 36 #include <linux/file.h> 37 #include <linux/module.h> 38 #include <scsi/scsi_proto.h> 39 #include <asm/unaligned.h> 40 41 #include <target/target_core_base.h> 42 #include <target/target_core_backend.h> 43 44 #include "target_core_iblock.h" 45 46 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ 47 #define IBLOCK_BIO_POOL_SIZE 128 48 49 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev) 50 { 51 return container_of(dev, struct iblock_dev, dev); 52 } 53 54 55 static int iblock_attach_hba(struct se_hba *hba, u32 host_id) 56 { 57 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 58 " Generic Target Core Stack %s\n", hba->hba_id, 59 IBLOCK_VERSION, TARGET_CORE_VERSION); 60 return 0; 61 } 62 63 static void iblock_detach_hba(struct se_hba *hba) 64 { 65 } 66 67 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name) 68 { 69 struct iblock_dev *ib_dev = NULL; 70 71 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 72 if (!ib_dev) { 73 pr_err("Unable to allocate struct iblock_dev\n"); 74 return NULL; 75 } 76 77 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); 78 79 return &ib_dev->dev; 80 } 81 82 static int iblock_configure_device(struct se_device *dev) 83 { 84 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 85 struct request_queue *q; 86 struct block_device *bd = NULL; 87 struct blk_integrity *bi; 88 fmode_t mode; 89 int ret = -ENOMEM; 90 91 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { 92 pr_err("Missing udev_path= parameters for IBLOCK\n"); 93 return -EINVAL; 94 } 95 96 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0); 97 if (!ib_dev->ibd_bio_set) { 98 pr_err("IBLOCK: Unable to create bioset\n"); 99 goto out; 100 } 101 102 pr_debug( "IBLOCK: Claiming struct block_device: %s\n", 103 ib_dev->ibd_udev_path); 104 105 mode = FMODE_READ|FMODE_EXCL; 106 if (!ib_dev->ibd_readonly) 107 mode |= FMODE_WRITE; 108 else 109 dev->dev_flags |= DF_READ_ONLY; 110 111 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); 112 if (IS_ERR(bd)) { 113 ret = PTR_ERR(bd); 114 goto out_free_bioset; 115 } 116 ib_dev->ibd_bd = bd; 117 118 q = bdev_get_queue(bd); 119 120 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); 121 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); 122 dev->dev_attrib.hw_queue_depth = q->nr_requests; 123 124 /* 125 * Check if the underlying struct block_device request_queue supports 126 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 127 * in ATA and we need to set TPE=1 128 */ 129 if (blk_queue_discard(q)) { 130 dev->dev_attrib.max_unmap_lba_count = 131 q->limits.max_discard_sectors; 132 133 /* 134 * Currently hardcoded to 1 in Linux/SCSI code.. 135 */ 136 dev->dev_attrib.max_unmap_block_desc_count = 1; 137 dev->dev_attrib.unmap_granularity = 138 q->limits.discard_granularity >> 9; 139 dev->dev_attrib.unmap_granularity_alignment = 140 q->limits.discard_alignment; 141 dev->dev_attrib.unmap_zeroes_data = 142 q->limits.discard_zeroes_data; 143 144 pr_debug("IBLOCK: BLOCK Discard support available," 145 " disabled by default\n"); 146 } 147 /* 148 * Enable write same emulation for IBLOCK and use 0xFFFF as 149 * the smaller WRITE_SAME(10) only has a two-byte block count. 150 */ 151 dev->dev_attrib.max_write_same_len = 0xFFFF; 152 153 if (blk_queue_nonrot(q)) 154 dev->dev_attrib.is_nonrot = 1; 155 156 bi = bdev_get_integrity(bd); 157 if (bi) { 158 struct bio_set *bs = ib_dev->ibd_bio_set; 159 160 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") || 161 !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) { 162 pr_err("IBLOCK export of blk_integrity: %s not" 163 " supported\n", bi->profile->name); 164 ret = -ENOSYS; 165 goto out_blkdev_put; 166 } 167 168 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) { 169 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; 170 } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) { 171 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; 172 } 173 174 if (dev->dev_attrib.pi_prot_type) { 175 if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { 176 pr_err("Unable to allocate bioset for PI\n"); 177 ret = -ENOMEM; 178 goto out_blkdev_put; 179 } 180 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", 181 bs->bio_integrity_pool); 182 } 183 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; 184 } 185 186 return 0; 187 188 out_blkdev_put: 189 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 190 out_free_bioset: 191 bioset_free(ib_dev->ibd_bio_set); 192 ib_dev->ibd_bio_set = NULL; 193 out: 194 return ret; 195 } 196 197 static void iblock_dev_call_rcu(struct rcu_head *p) 198 { 199 struct se_device *dev = container_of(p, struct se_device, rcu_head); 200 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 201 202 kfree(ib_dev); 203 } 204 205 static void iblock_free_device(struct se_device *dev) 206 { 207 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 208 209 if (ib_dev->ibd_bd != NULL) 210 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 211 if (ib_dev->ibd_bio_set != NULL) 212 bioset_free(ib_dev->ibd_bio_set); 213 214 call_rcu(&dev->rcu_head, iblock_dev_call_rcu); 215 } 216 217 static unsigned long long iblock_emulate_read_cap_with_block_size( 218 struct se_device *dev, 219 struct block_device *bd, 220 struct request_queue *q) 221 { 222 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), 223 bdev_logical_block_size(bd)) - 1); 224 u32 block_size = bdev_logical_block_size(bd); 225 226 if (block_size == dev->dev_attrib.block_size) 227 return blocks_long; 228 229 switch (block_size) { 230 case 4096: 231 switch (dev->dev_attrib.block_size) { 232 case 2048: 233 blocks_long <<= 1; 234 break; 235 case 1024: 236 blocks_long <<= 2; 237 break; 238 case 512: 239 blocks_long <<= 3; 240 default: 241 break; 242 } 243 break; 244 case 2048: 245 switch (dev->dev_attrib.block_size) { 246 case 4096: 247 blocks_long >>= 1; 248 break; 249 case 1024: 250 blocks_long <<= 1; 251 break; 252 case 512: 253 blocks_long <<= 2; 254 break; 255 default: 256 break; 257 } 258 break; 259 case 1024: 260 switch (dev->dev_attrib.block_size) { 261 case 4096: 262 blocks_long >>= 2; 263 break; 264 case 2048: 265 blocks_long >>= 1; 266 break; 267 case 512: 268 blocks_long <<= 1; 269 break; 270 default: 271 break; 272 } 273 break; 274 case 512: 275 switch (dev->dev_attrib.block_size) { 276 case 4096: 277 blocks_long >>= 3; 278 break; 279 case 2048: 280 blocks_long >>= 2; 281 break; 282 case 1024: 283 blocks_long >>= 1; 284 break; 285 default: 286 break; 287 } 288 break; 289 default: 290 break; 291 } 292 293 return blocks_long; 294 } 295 296 static void iblock_complete_cmd(struct se_cmd *cmd) 297 { 298 struct iblock_req *ibr = cmd->priv; 299 u8 status; 300 301 if (!atomic_dec_and_test(&ibr->pending)) 302 return; 303 304 if (atomic_read(&ibr->ib_bio_err_cnt)) 305 status = SAM_STAT_CHECK_CONDITION; 306 else 307 status = SAM_STAT_GOOD; 308 309 target_complete_cmd(cmd, status); 310 kfree(ibr); 311 } 312 313 static void iblock_bio_done(struct bio *bio) 314 { 315 struct se_cmd *cmd = bio->bi_private; 316 struct iblock_req *ibr = cmd->priv; 317 318 if (bio->bi_error) { 319 pr_err("bio error: %p, err: %d\n", bio, bio->bi_error); 320 /* 321 * Bump the ib_bio_err_cnt and release bio. 322 */ 323 atomic_inc(&ibr->ib_bio_err_cnt); 324 smp_mb__after_atomic(); 325 } 326 327 bio_put(bio); 328 329 iblock_complete_cmd(cmd); 330 } 331 332 static struct bio * 333 iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num) 334 { 335 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 336 struct bio *bio; 337 338 /* 339 * Only allocate as many vector entries as the bio code allows us to, 340 * we'll loop later on until we have handled the whole request. 341 */ 342 if (sg_num > BIO_MAX_PAGES) 343 sg_num = BIO_MAX_PAGES; 344 345 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 346 if (!bio) { 347 pr_err("Unable to allocate memory for bio\n"); 348 return NULL; 349 } 350 351 bio->bi_bdev = ib_dev->ibd_bd; 352 bio->bi_private = cmd; 353 bio->bi_end_io = &iblock_bio_done; 354 bio->bi_iter.bi_sector = lba; 355 356 return bio; 357 } 358 359 static void iblock_submit_bios(struct bio_list *list, int rw) 360 { 361 struct blk_plug plug; 362 struct bio *bio; 363 364 blk_start_plug(&plug); 365 while ((bio = bio_list_pop(list))) 366 submit_bio(rw, bio); 367 blk_finish_plug(&plug); 368 } 369 370 static void iblock_end_io_flush(struct bio *bio) 371 { 372 struct se_cmd *cmd = bio->bi_private; 373 374 if (bio->bi_error) 375 pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_error); 376 377 if (cmd) { 378 if (bio->bi_error) 379 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 380 else 381 target_complete_cmd(cmd, SAM_STAT_GOOD); 382 } 383 384 bio_put(bio); 385 } 386 387 /* 388 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must 389 * always flush the whole cache. 390 */ 391 static sense_reason_t 392 iblock_execute_sync_cache(struct se_cmd *cmd) 393 { 394 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 395 int immed = (cmd->t_task_cdb[1] & 0x2); 396 struct bio *bio; 397 398 /* 399 * If the Immediate bit is set, queue up the GOOD response 400 * for this SYNCHRONIZE_CACHE op. 401 */ 402 if (immed) 403 target_complete_cmd(cmd, SAM_STAT_GOOD); 404 405 bio = bio_alloc(GFP_KERNEL, 0); 406 bio->bi_end_io = iblock_end_io_flush; 407 bio->bi_bdev = ib_dev->ibd_bd; 408 if (!immed) 409 bio->bi_private = cmd; 410 submit_bio(WRITE_FLUSH, bio); 411 return 0; 412 } 413 414 static sense_reason_t 415 iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) 416 { 417 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; 418 int ret; 419 420 ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); 421 if (ret < 0) { 422 pr_err("blkdev_issue_discard() failed: %d\n", ret); 423 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 424 } 425 426 return 0; 427 } 428 429 static sense_reason_t 430 iblock_execute_write_same(struct se_cmd *cmd) 431 { 432 struct iblock_req *ibr; 433 struct scatterlist *sg; 434 struct bio *bio; 435 struct bio_list list; 436 sector_t block_lba = cmd->t_task_lba; 437 sector_t sectors = sbc_get_write_same_sectors(cmd); 438 439 if (cmd->prot_op) { 440 pr_err("WRITE_SAME: Protection information with IBLOCK" 441 " backends not supported\n"); 442 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 443 } 444 sg = &cmd->t_data_sg[0]; 445 446 if (cmd->t_data_nents > 1 || 447 sg->length != cmd->se_dev->dev_attrib.block_size) { 448 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" 449 " block_size: %u\n", cmd->t_data_nents, sg->length, 450 cmd->se_dev->dev_attrib.block_size); 451 return TCM_INVALID_CDB_FIELD; 452 } 453 454 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 455 if (!ibr) 456 goto fail; 457 cmd->priv = ibr; 458 459 bio = iblock_get_bio(cmd, block_lba, 1); 460 if (!bio) 461 goto fail_free_ibr; 462 463 bio_list_init(&list); 464 bio_list_add(&list, bio); 465 466 atomic_set(&ibr->pending, 1); 467 468 while (sectors) { 469 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 470 != sg->length) { 471 472 bio = iblock_get_bio(cmd, block_lba, 1); 473 if (!bio) 474 goto fail_put_bios; 475 476 atomic_inc(&ibr->pending); 477 bio_list_add(&list, bio); 478 } 479 480 /* Always in 512 byte units for Linux/Block */ 481 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 482 sectors -= 1; 483 } 484 485 iblock_submit_bios(&list, WRITE); 486 return 0; 487 488 fail_put_bios: 489 while ((bio = bio_list_pop(&list))) 490 bio_put(bio); 491 fail_free_ibr: 492 kfree(ibr); 493 fail: 494 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 495 } 496 497 enum { 498 Opt_udev_path, Opt_readonly, Opt_force, Opt_err 499 }; 500 501 static match_table_t tokens = { 502 {Opt_udev_path, "udev_path=%s"}, 503 {Opt_readonly, "readonly=%d"}, 504 {Opt_force, "force=%d"}, 505 {Opt_err, NULL} 506 }; 507 508 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev, 509 const char *page, ssize_t count) 510 { 511 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 512 char *orig, *ptr, *arg_p, *opts; 513 substring_t args[MAX_OPT_ARGS]; 514 int ret = 0, token; 515 unsigned long tmp_readonly; 516 517 opts = kstrdup(page, GFP_KERNEL); 518 if (!opts) 519 return -ENOMEM; 520 521 orig = opts; 522 523 while ((ptr = strsep(&opts, ",\n")) != NULL) { 524 if (!*ptr) 525 continue; 526 527 token = match_token(ptr, tokens, args); 528 switch (token) { 529 case Opt_udev_path: 530 if (ib_dev->ibd_bd) { 531 pr_err("Unable to set udev_path= while" 532 " ib_dev->ibd_bd exists\n"); 533 ret = -EEXIST; 534 goto out; 535 } 536 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0], 537 SE_UDEV_PATH_LEN) == 0) { 538 ret = -EINVAL; 539 break; 540 } 541 pr_debug("IBLOCK: Referencing UDEV path: %s\n", 542 ib_dev->ibd_udev_path); 543 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 544 break; 545 case Opt_readonly: 546 arg_p = match_strdup(&args[0]); 547 if (!arg_p) { 548 ret = -ENOMEM; 549 break; 550 } 551 ret = kstrtoul(arg_p, 0, &tmp_readonly); 552 kfree(arg_p); 553 if (ret < 0) { 554 pr_err("kstrtoul() failed for" 555 " readonly=\n"); 556 goto out; 557 } 558 ib_dev->ibd_readonly = tmp_readonly; 559 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); 560 break; 561 case Opt_force: 562 break; 563 default: 564 break; 565 } 566 } 567 568 out: 569 kfree(orig); 570 return (!ret) ? count : ret; 571 } 572 573 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) 574 { 575 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 576 struct block_device *bd = ib_dev->ibd_bd; 577 char buf[BDEVNAME_SIZE]; 578 ssize_t bl = 0; 579 580 if (bd) 581 bl += sprintf(b + bl, "iBlock device: %s", 582 bdevname(bd, buf)); 583 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) 584 bl += sprintf(b + bl, " UDEV PATH: %s", 585 ib_dev->ibd_udev_path); 586 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly); 587 588 bl += sprintf(b + bl, " "); 589 if (bd) { 590 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 591 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 592 "" : (bd->bd_holder == ib_dev) ? 593 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 594 } else { 595 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 596 } 597 598 return bl; 599 } 600 601 static int 602 iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio) 603 { 604 struct se_device *dev = cmd->se_dev; 605 struct blk_integrity *bi; 606 struct bio_integrity_payload *bip; 607 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 608 struct scatterlist *sg; 609 int i, rc; 610 611 bi = bdev_get_integrity(ib_dev->ibd_bd); 612 if (!bi) { 613 pr_err("Unable to locate bio_integrity\n"); 614 return -ENODEV; 615 } 616 617 bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents); 618 if (IS_ERR(bip)) { 619 pr_err("Unable to allocate bio_integrity_payload\n"); 620 return PTR_ERR(bip); 621 } 622 623 bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) * 624 dev->prot_length; 625 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector; 626 627 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, 628 (unsigned long long)bip->bip_iter.bi_sector); 629 630 for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) { 631 632 rc = bio_integrity_add_page(bio, sg_page(sg), sg->length, 633 sg->offset); 634 if (rc != sg->length) { 635 pr_err("bio_integrity_add_page() failed; %d\n", rc); 636 return -ENOMEM; 637 } 638 639 pr_debug("Added bio integrity page: %p length: %d offset; %d\n", 640 sg_page(sg), sg->length, sg->offset); 641 } 642 643 return 0; 644 } 645 646 static sense_reason_t 647 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 648 enum dma_data_direction data_direction) 649 { 650 struct se_device *dev = cmd->se_dev; 651 struct iblock_req *ibr; 652 struct bio *bio, *bio_start; 653 struct bio_list list; 654 struct scatterlist *sg; 655 u32 sg_num = sgl_nents; 656 sector_t block_lba; 657 unsigned bio_cnt; 658 int rw = 0; 659 int i; 660 661 if (data_direction == DMA_TO_DEVICE) { 662 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 663 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); 664 /* 665 * Force writethrough using WRITE_FUA if a volatile write cache 666 * is not enabled, or if initiator set the Force Unit Access bit. 667 */ 668 if (q->flush_flags & REQ_FUA) { 669 if (cmd->se_cmd_flags & SCF_FUA) 670 rw = WRITE_FUA; 671 else if (!(q->flush_flags & REQ_FLUSH)) 672 rw = WRITE_FUA; 673 else 674 rw = WRITE; 675 } else { 676 rw = WRITE; 677 } 678 } else { 679 rw = READ; 680 } 681 682 /* 683 * Convert the blocksize advertised to the initiator to the 512 byte 684 * units unconditionally used by the Linux block layer. 685 */ 686 if (dev->dev_attrib.block_size == 4096) 687 block_lba = (cmd->t_task_lba << 3); 688 else if (dev->dev_attrib.block_size == 2048) 689 block_lba = (cmd->t_task_lba << 2); 690 else if (dev->dev_attrib.block_size == 1024) 691 block_lba = (cmd->t_task_lba << 1); 692 else if (dev->dev_attrib.block_size == 512) 693 block_lba = cmd->t_task_lba; 694 else { 695 pr_err("Unsupported SCSI -> BLOCK LBA conversion:" 696 " %u\n", dev->dev_attrib.block_size); 697 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 698 } 699 700 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 701 if (!ibr) 702 goto fail; 703 cmd->priv = ibr; 704 705 if (!sgl_nents) { 706 atomic_set(&ibr->pending, 1); 707 iblock_complete_cmd(cmd); 708 return 0; 709 } 710 711 bio = iblock_get_bio(cmd, block_lba, sgl_nents); 712 if (!bio) 713 goto fail_free_ibr; 714 715 bio_start = bio; 716 bio_list_init(&list); 717 bio_list_add(&list, bio); 718 719 atomic_set(&ibr->pending, 2); 720 bio_cnt = 1; 721 722 for_each_sg(sgl, sg, sgl_nents, i) { 723 /* 724 * XXX: if the length the device accepts is shorter than the 725 * length of the S/G list entry this will cause and 726 * endless loop. Better hope no driver uses huge pages. 727 */ 728 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 729 != sg->length) { 730 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { 731 iblock_submit_bios(&list, rw); 732 bio_cnt = 0; 733 } 734 735 bio = iblock_get_bio(cmd, block_lba, sg_num); 736 if (!bio) 737 goto fail_put_bios; 738 739 atomic_inc(&ibr->pending); 740 bio_list_add(&list, bio); 741 bio_cnt++; 742 } 743 744 /* Always in 512 byte units for Linux/Block */ 745 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 746 sg_num--; 747 } 748 749 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { 750 int rc = iblock_alloc_bip(cmd, bio_start); 751 if (rc) 752 goto fail_put_bios; 753 } 754 755 iblock_submit_bios(&list, rw); 756 iblock_complete_cmd(cmd); 757 return 0; 758 759 fail_put_bios: 760 while ((bio = bio_list_pop(&list))) 761 bio_put(bio); 762 fail_free_ibr: 763 kfree(ibr); 764 fail: 765 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 766 } 767 768 static sector_t iblock_get_blocks(struct se_device *dev) 769 { 770 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 771 struct block_device *bd = ib_dev->ibd_bd; 772 struct request_queue *q = bdev_get_queue(bd); 773 774 return iblock_emulate_read_cap_with_block_size(dev, bd, q); 775 } 776 777 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) 778 { 779 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 780 struct block_device *bd = ib_dev->ibd_bd; 781 int ret; 782 783 ret = bdev_alignment_offset(bd); 784 if (ret == -1) 785 return 0; 786 787 /* convert offset-bytes to offset-lbas */ 788 return ret / bdev_logical_block_size(bd); 789 } 790 791 static unsigned int iblock_get_lbppbe(struct se_device *dev) 792 { 793 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 794 struct block_device *bd = ib_dev->ibd_bd; 795 int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd); 796 797 return ilog2(logs_per_phys); 798 } 799 800 static unsigned int iblock_get_io_min(struct se_device *dev) 801 { 802 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 803 struct block_device *bd = ib_dev->ibd_bd; 804 805 return bdev_io_min(bd); 806 } 807 808 static unsigned int iblock_get_io_opt(struct se_device *dev) 809 { 810 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 811 struct block_device *bd = ib_dev->ibd_bd; 812 813 return bdev_io_opt(bd); 814 } 815 816 static struct sbc_ops iblock_sbc_ops = { 817 .execute_rw = iblock_execute_rw, 818 .execute_sync_cache = iblock_execute_sync_cache, 819 .execute_write_same = iblock_execute_write_same, 820 .execute_unmap = iblock_execute_unmap, 821 }; 822 823 static sense_reason_t 824 iblock_parse_cdb(struct se_cmd *cmd) 825 { 826 return sbc_parse_cdb(cmd, &iblock_sbc_ops); 827 } 828 829 static bool iblock_get_write_cache(struct se_device *dev) 830 { 831 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 832 struct block_device *bd = ib_dev->ibd_bd; 833 struct request_queue *q = bdev_get_queue(bd); 834 835 return q->flush_flags & REQ_FLUSH; 836 } 837 838 static const struct target_backend_ops iblock_ops = { 839 .name = "iblock", 840 .inquiry_prod = "IBLOCK", 841 .inquiry_rev = IBLOCK_VERSION, 842 .owner = THIS_MODULE, 843 .attach_hba = iblock_attach_hba, 844 .detach_hba = iblock_detach_hba, 845 .alloc_device = iblock_alloc_device, 846 .configure_device = iblock_configure_device, 847 .free_device = iblock_free_device, 848 .parse_cdb = iblock_parse_cdb, 849 .set_configfs_dev_params = iblock_set_configfs_dev_params, 850 .show_configfs_dev_params = iblock_show_configfs_dev_params, 851 .get_device_type = sbc_get_device_type, 852 .get_blocks = iblock_get_blocks, 853 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas, 854 .get_lbppbe = iblock_get_lbppbe, 855 .get_io_min = iblock_get_io_min, 856 .get_io_opt = iblock_get_io_opt, 857 .get_write_cache = iblock_get_write_cache, 858 .tb_dev_attrib_attrs = sbc_attrib_attrs, 859 }; 860 861 static int __init iblock_module_init(void) 862 { 863 return transport_backend_register(&iblock_ops); 864 } 865 866 static void __exit iblock_module_exit(void) 867 { 868 target_backend_unregister(&iblock_ops); 869 } 870 871 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); 872 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 873 MODULE_LICENSE("GPL"); 874 875 module_init(iblock_module_init); 876 module_exit(iblock_module_exit); 877