1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /******************************************************************************* 3 * Filename: target_core_iblock.c 4 * 5 * This file contains the Storage Engine <-> Linux BlockIO transport 6 * specific functions. 7 * 8 * (c) Copyright 2003-2013 Datera, Inc. 9 * 10 * Nicholas A. Bellinger <nab@kernel.org> 11 * 12 ******************************************************************************/ 13 14 #include <linux/string.h> 15 #include <linux/parser.h> 16 #include <linux/timer.h> 17 #include <linux/fs.h> 18 #include <linux/blkdev.h> 19 #include <linux/blk-integrity.h> 20 #include <linux/slab.h> 21 #include <linux/spinlock.h> 22 #include <linux/bio.h> 23 #include <linux/file.h> 24 #include <linux/module.h> 25 #include <linux/scatterlist.h> 26 #include <scsi/scsi_proto.h> 27 #include <asm/unaligned.h> 28 29 #include <target/target_core_base.h> 30 #include <target/target_core_backend.h> 31 32 #include "target_core_iblock.h" 33 34 #define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */ 35 #define IBLOCK_BIO_POOL_SIZE 128 36 37 static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev) 38 { 39 return container_of(dev, struct iblock_dev, dev); 40 } 41 42 43 static int iblock_attach_hba(struct se_hba *hba, u32 host_id) 44 { 45 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 46 " Generic Target Core Stack %s\n", hba->hba_id, 47 IBLOCK_VERSION, TARGET_CORE_VERSION); 48 return 0; 49 } 50 51 static void iblock_detach_hba(struct se_hba *hba) 52 { 53 } 54 55 static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name) 56 { 57 struct iblock_dev *ib_dev = NULL; 58 59 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 60 if (!ib_dev) { 61 pr_err("Unable to allocate struct iblock_dev\n"); 62 return NULL; 63 } 64 65 ib_dev->ibd_plug = kcalloc(nr_cpu_ids, sizeof(*ib_dev->ibd_plug), 66 GFP_KERNEL); 67 if (!ib_dev->ibd_plug) 68 goto free_dev; 69 70 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name); 71 72 return &ib_dev->dev; 73 74 free_dev: 75 kfree(ib_dev); 76 return NULL; 77 } 78 79 static bool iblock_configure_unmap(struct se_device *dev) 80 { 81 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 82 83 return target_configure_unmap_from_queue(&dev->dev_attrib, 84 ib_dev->ibd_bd); 85 } 86 87 static int iblock_configure_device(struct se_device *dev) 88 { 89 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 90 struct request_queue *q; 91 struct block_device *bd = NULL; 92 struct blk_integrity *bi; 93 fmode_t mode; 94 unsigned int max_write_zeroes_sectors; 95 int ret; 96 97 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { 98 pr_err("Missing udev_path= parameters for IBLOCK\n"); 99 return -EINVAL; 100 } 101 102 ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 103 if (ret) { 104 pr_err("IBLOCK: Unable to create bioset\n"); 105 goto out; 106 } 107 108 pr_debug( "IBLOCK: Claiming struct block_device: %s\n", 109 ib_dev->ibd_udev_path); 110 111 mode = FMODE_READ|FMODE_EXCL; 112 if (!ib_dev->ibd_readonly) 113 mode |= FMODE_WRITE; 114 else 115 dev->dev_flags |= DF_READ_ONLY; 116 117 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); 118 if (IS_ERR(bd)) { 119 ret = PTR_ERR(bd); 120 goto out_free_bioset; 121 } 122 ib_dev->ibd_bd = bd; 123 124 q = bdev_get_queue(bd); 125 126 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); 127 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); 128 dev->dev_attrib.hw_queue_depth = q->nr_requests; 129 130 /* 131 * Enable write same emulation for IBLOCK and use 0xFFFF as 132 * the smaller WRITE_SAME(10) only has a two-byte block count. 133 */ 134 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd); 135 if (max_write_zeroes_sectors) 136 dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors; 137 else 138 dev->dev_attrib.max_write_same_len = 0xFFFF; 139 140 if (bdev_nonrot(bd)) 141 dev->dev_attrib.is_nonrot = 1; 142 143 bi = bdev_get_integrity(bd); 144 if (bi) { 145 struct bio_set *bs = &ib_dev->ibd_bio_set; 146 147 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-IP") || 148 !strcmp(bi->profile->name, "T10-DIF-TYPE1-IP")) { 149 pr_err("IBLOCK export of blk_integrity: %s not" 150 " supported\n", bi->profile->name); 151 ret = -ENOSYS; 152 goto out_blkdev_put; 153 } 154 155 if (!strcmp(bi->profile->name, "T10-DIF-TYPE3-CRC")) { 156 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT; 157 } else if (!strcmp(bi->profile->name, "T10-DIF-TYPE1-CRC")) { 158 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT; 159 } 160 161 if (dev->dev_attrib.pi_prot_type) { 162 if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) { 163 pr_err("Unable to allocate bioset for PI\n"); 164 ret = -ENOMEM; 165 goto out_blkdev_put; 166 } 167 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n", 168 &bs->bio_integrity_pool); 169 } 170 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type; 171 } 172 173 return 0; 174 175 out_blkdev_put: 176 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 177 out_free_bioset: 178 bioset_exit(&ib_dev->ibd_bio_set); 179 out: 180 return ret; 181 } 182 183 static void iblock_dev_call_rcu(struct rcu_head *p) 184 { 185 struct se_device *dev = container_of(p, struct se_device, rcu_head); 186 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 187 188 kfree(ib_dev->ibd_plug); 189 kfree(ib_dev); 190 } 191 192 static void iblock_free_device(struct se_device *dev) 193 { 194 call_rcu(&dev->rcu_head, iblock_dev_call_rcu); 195 } 196 197 static void iblock_destroy_device(struct se_device *dev) 198 { 199 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 200 201 if (ib_dev->ibd_bd != NULL) 202 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 203 bioset_exit(&ib_dev->ibd_bio_set); 204 } 205 206 static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev) 207 { 208 struct iblock_dev *ib_dev = IBLOCK_DEV(se_dev); 209 struct iblock_dev_plug *ib_dev_plug; 210 211 /* 212 * Each se_device has a per cpu work this can be run from. We 213 * shouldn't have multiple threads on the same cpu calling this 214 * at the same time. 215 */ 216 ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()]; 217 if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags)) 218 return NULL; 219 220 blk_start_plug(&ib_dev_plug->blk_plug); 221 return &ib_dev_plug->se_plug; 222 } 223 224 static void iblock_unplug_device(struct se_dev_plug *se_plug) 225 { 226 struct iblock_dev_plug *ib_dev_plug = container_of(se_plug, 227 struct iblock_dev_plug, se_plug); 228 229 blk_finish_plug(&ib_dev_plug->blk_plug); 230 clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags); 231 } 232 233 static sector_t iblock_get_blocks(struct se_device *dev) 234 { 235 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 236 u32 block_size = bdev_logical_block_size(ib_dev->ibd_bd); 237 unsigned long long blocks_long = 238 div_u64(bdev_nr_bytes(ib_dev->ibd_bd), block_size) - 1; 239 240 if (block_size == dev->dev_attrib.block_size) 241 return blocks_long; 242 243 switch (block_size) { 244 case 4096: 245 switch (dev->dev_attrib.block_size) { 246 case 2048: 247 blocks_long <<= 1; 248 break; 249 case 1024: 250 blocks_long <<= 2; 251 break; 252 case 512: 253 blocks_long <<= 3; 254 break; 255 default: 256 break; 257 } 258 break; 259 case 2048: 260 switch (dev->dev_attrib.block_size) { 261 case 4096: 262 blocks_long >>= 1; 263 break; 264 case 1024: 265 blocks_long <<= 1; 266 break; 267 case 512: 268 blocks_long <<= 2; 269 break; 270 default: 271 break; 272 } 273 break; 274 case 1024: 275 switch (dev->dev_attrib.block_size) { 276 case 4096: 277 blocks_long >>= 2; 278 break; 279 case 2048: 280 blocks_long >>= 1; 281 break; 282 case 512: 283 blocks_long <<= 1; 284 break; 285 default: 286 break; 287 } 288 break; 289 case 512: 290 switch (dev->dev_attrib.block_size) { 291 case 4096: 292 blocks_long >>= 3; 293 break; 294 case 2048: 295 blocks_long >>= 2; 296 break; 297 case 1024: 298 blocks_long >>= 1; 299 break; 300 default: 301 break; 302 } 303 break; 304 default: 305 break; 306 } 307 308 return blocks_long; 309 } 310 311 static void iblock_complete_cmd(struct se_cmd *cmd) 312 { 313 struct iblock_req *ibr = cmd->priv; 314 u8 status; 315 316 if (!refcount_dec_and_test(&ibr->pending)) 317 return; 318 319 if (atomic_read(&ibr->ib_bio_err_cnt)) 320 status = SAM_STAT_CHECK_CONDITION; 321 else 322 status = SAM_STAT_GOOD; 323 324 target_complete_cmd(cmd, status); 325 kfree(ibr); 326 } 327 328 static void iblock_bio_done(struct bio *bio) 329 { 330 struct se_cmd *cmd = bio->bi_private; 331 struct iblock_req *ibr = cmd->priv; 332 333 if (bio->bi_status) { 334 pr_err("bio error: %p, err: %d\n", bio, bio->bi_status); 335 /* 336 * Bump the ib_bio_err_cnt and release bio. 337 */ 338 atomic_inc(&ibr->ib_bio_err_cnt); 339 smp_mb__after_atomic(); 340 } 341 342 bio_put(bio); 343 344 iblock_complete_cmd(cmd); 345 } 346 347 static struct bio *iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, 348 blk_opf_t opf) 349 { 350 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 351 struct bio *bio; 352 353 /* 354 * Only allocate as many vector entries as the bio code allows us to, 355 * we'll loop later on until we have handled the whole request. 356 */ 357 bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf, 358 GFP_NOIO, &ib_dev->ibd_bio_set); 359 if (!bio) { 360 pr_err("Unable to allocate memory for bio\n"); 361 return NULL; 362 } 363 364 bio->bi_private = cmd; 365 bio->bi_end_io = &iblock_bio_done; 366 bio->bi_iter.bi_sector = lba; 367 368 return bio; 369 } 370 371 static void iblock_submit_bios(struct bio_list *list) 372 { 373 struct blk_plug plug; 374 struct bio *bio; 375 /* 376 * The block layer handles nested plugs, so just plug/unplug to handle 377 * fabric drivers that didn't support batching and multi bio cmds. 378 */ 379 blk_start_plug(&plug); 380 while ((bio = bio_list_pop(list))) 381 submit_bio(bio); 382 blk_finish_plug(&plug); 383 } 384 385 static void iblock_end_io_flush(struct bio *bio) 386 { 387 struct se_cmd *cmd = bio->bi_private; 388 389 if (bio->bi_status) 390 pr_err("IBLOCK: cache flush failed: %d\n", bio->bi_status); 391 392 if (cmd) { 393 if (bio->bi_status) 394 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); 395 else 396 target_complete_cmd(cmd, SAM_STAT_GOOD); 397 } 398 399 bio_put(bio); 400 } 401 402 /* 403 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must 404 * always flush the whole cache. 405 */ 406 static sense_reason_t 407 iblock_execute_sync_cache(struct se_cmd *cmd) 408 { 409 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); 410 int immed = (cmd->t_task_cdb[1] & 0x2); 411 struct bio *bio; 412 413 /* 414 * If the Immediate bit is set, queue up the GOOD response 415 * for this SYNCHRONIZE_CACHE op. 416 */ 417 if (immed) 418 target_complete_cmd(cmd, SAM_STAT_GOOD); 419 420 bio = bio_alloc(ib_dev->ibd_bd, 0, REQ_OP_WRITE | REQ_PREFLUSH, 421 GFP_KERNEL); 422 bio->bi_end_io = iblock_end_io_flush; 423 if (!immed) 424 bio->bi_private = cmd; 425 submit_bio(bio); 426 return 0; 427 } 428 429 static sense_reason_t 430 iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) 431 { 432 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; 433 struct se_device *dev = cmd->se_dev; 434 int ret; 435 436 ret = blkdev_issue_discard(bdev, 437 target_to_linux_sector(dev, lba), 438 target_to_linux_sector(dev, nolb), 439 GFP_KERNEL); 440 if (ret < 0) { 441 pr_err("blkdev_issue_discard() failed: %d\n", ret); 442 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 443 } 444 445 return 0; 446 } 447 448 static sense_reason_t 449 iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd) 450 { 451 struct se_device *dev = cmd->se_dev; 452 struct scatterlist *sg = &cmd->t_data_sg[0]; 453 unsigned char *buf, *not_zero; 454 int ret; 455 456 buf = kmap(sg_page(sg)) + sg->offset; 457 if (!buf) 458 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 459 /* 460 * Fall back to block_execute_write_same() slow-path if 461 * incoming WRITE_SAME payload does not contain zeros. 462 */ 463 not_zero = memchr_inv(buf, 0x00, cmd->data_length); 464 kunmap(sg_page(sg)); 465 466 if (not_zero) 467 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 468 469 ret = blkdev_issue_zeroout(bdev, 470 target_to_linux_sector(dev, cmd->t_task_lba), 471 target_to_linux_sector(dev, 472 sbc_get_write_same_sectors(cmd)), 473 GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); 474 if (ret) 475 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 476 477 target_complete_cmd(cmd, SAM_STAT_GOOD); 478 return 0; 479 } 480 481 static sense_reason_t 482 iblock_execute_write_same(struct se_cmd *cmd) 483 { 484 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; 485 struct iblock_req *ibr; 486 struct scatterlist *sg; 487 struct bio *bio; 488 struct bio_list list; 489 struct se_device *dev = cmd->se_dev; 490 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); 491 sector_t sectors = target_to_linux_sector(dev, 492 sbc_get_write_same_sectors(cmd)); 493 494 if (cmd->prot_op) { 495 pr_err("WRITE_SAME: Protection information with IBLOCK" 496 " backends not supported\n"); 497 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 498 } 499 500 if (!cmd->t_data_nents) 501 return TCM_INVALID_CDB_FIELD; 502 503 sg = &cmd->t_data_sg[0]; 504 505 if (cmd->t_data_nents > 1 || 506 sg->length != cmd->se_dev->dev_attrib.block_size) { 507 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u" 508 " block_size: %u\n", cmd->t_data_nents, sg->length, 509 cmd->se_dev->dev_attrib.block_size); 510 return TCM_INVALID_CDB_FIELD; 511 } 512 513 if (bdev_write_zeroes_sectors(bdev)) { 514 if (!iblock_execute_zero_out(bdev, cmd)) 515 return 0; 516 } 517 518 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 519 if (!ibr) 520 goto fail; 521 cmd->priv = ibr; 522 523 bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE); 524 if (!bio) 525 goto fail_free_ibr; 526 527 bio_list_init(&list); 528 bio_list_add(&list, bio); 529 530 refcount_set(&ibr->pending, 1); 531 532 while (sectors) { 533 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 534 != sg->length) { 535 536 bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE); 537 if (!bio) 538 goto fail_put_bios; 539 540 refcount_inc(&ibr->pending); 541 bio_list_add(&list, bio); 542 } 543 544 /* Always in 512 byte units for Linux/Block */ 545 block_lba += sg->length >> SECTOR_SHIFT; 546 sectors -= sg->length >> SECTOR_SHIFT; 547 } 548 549 iblock_submit_bios(&list); 550 return 0; 551 552 fail_put_bios: 553 while ((bio = bio_list_pop(&list))) 554 bio_put(bio); 555 fail_free_ibr: 556 kfree(ibr); 557 fail: 558 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 559 } 560 561 enum { 562 Opt_udev_path, Opt_readonly, Opt_force, Opt_err 563 }; 564 565 static match_table_t tokens = { 566 {Opt_udev_path, "udev_path=%s"}, 567 {Opt_readonly, "readonly=%d"}, 568 {Opt_force, "force=%d"}, 569 {Opt_err, NULL} 570 }; 571 572 static ssize_t iblock_set_configfs_dev_params(struct se_device *dev, 573 const char *page, ssize_t count) 574 { 575 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 576 char *orig, *ptr, *arg_p, *opts; 577 substring_t args[MAX_OPT_ARGS]; 578 int ret = 0, token; 579 unsigned long tmp_readonly; 580 581 opts = kstrdup(page, GFP_KERNEL); 582 if (!opts) 583 return -ENOMEM; 584 585 orig = opts; 586 587 while ((ptr = strsep(&opts, ",\n")) != NULL) { 588 if (!*ptr) 589 continue; 590 591 token = match_token(ptr, tokens, args); 592 switch (token) { 593 case Opt_udev_path: 594 if (ib_dev->ibd_bd) { 595 pr_err("Unable to set udev_path= while" 596 " ib_dev->ibd_bd exists\n"); 597 ret = -EEXIST; 598 goto out; 599 } 600 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0], 601 SE_UDEV_PATH_LEN) == 0) { 602 ret = -EINVAL; 603 break; 604 } 605 pr_debug("IBLOCK: Referencing UDEV path: %s\n", 606 ib_dev->ibd_udev_path); 607 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 608 break; 609 case Opt_readonly: 610 arg_p = match_strdup(&args[0]); 611 if (!arg_p) { 612 ret = -ENOMEM; 613 break; 614 } 615 ret = kstrtoul(arg_p, 0, &tmp_readonly); 616 kfree(arg_p); 617 if (ret < 0) { 618 pr_err("kstrtoul() failed for" 619 " readonly=\n"); 620 goto out; 621 } 622 ib_dev->ibd_readonly = tmp_readonly; 623 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); 624 break; 625 case Opt_force: 626 break; 627 default: 628 break; 629 } 630 } 631 632 out: 633 kfree(orig); 634 return (!ret) ? count : ret; 635 } 636 637 static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b) 638 { 639 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 640 struct block_device *bd = ib_dev->ibd_bd; 641 ssize_t bl = 0; 642 643 if (bd) 644 bl += sprintf(b + bl, "iBlock device: %pg", bd); 645 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) 646 bl += sprintf(b + bl, " UDEV PATH: %s", 647 ib_dev->ibd_udev_path); 648 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly); 649 650 bl += sprintf(b + bl, " "); 651 if (bd) { 652 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 653 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), 654 "CLAIMED: IBLOCK"); 655 } else { 656 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 657 } 658 659 return bl; 660 } 661 662 static int 663 iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio, 664 struct sg_mapping_iter *miter) 665 { 666 struct se_device *dev = cmd->se_dev; 667 struct blk_integrity *bi; 668 struct bio_integrity_payload *bip; 669 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 670 int rc; 671 size_t resid, len; 672 673 bi = bdev_get_integrity(ib_dev->ibd_bd); 674 if (!bi) { 675 pr_err("Unable to locate bio_integrity\n"); 676 return -ENODEV; 677 } 678 679 bip = bio_integrity_alloc(bio, GFP_NOIO, bio_max_segs(cmd->t_prot_nents)); 680 if (IS_ERR(bip)) { 681 pr_err("Unable to allocate bio_integrity_payload\n"); 682 return PTR_ERR(bip); 683 } 684 685 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); 686 /* virtual start sector must be in integrity interval units */ 687 bip_set_seed(bip, bio->bi_iter.bi_sector >> 688 (bi->interval_exp - SECTOR_SHIFT)); 689 690 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, 691 (unsigned long long)bip->bip_iter.bi_sector); 692 693 resid = bip->bip_iter.bi_size; 694 while (resid > 0 && sg_miter_next(miter)) { 695 696 len = min_t(size_t, miter->length, resid); 697 rc = bio_integrity_add_page(bio, miter->page, len, 698 offset_in_page(miter->addr)); 699 if (rc != len) { 700 pr_err("bio_integrity_add_page() failed; %d\n", rc); 701 sg_miter_stop(miter); 702 return -ENOMEM; 703 } 704 705 pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n", 706 miter->page, len, offset_in_page(miter->addr)); 707 708 resid -= len; 709 if (len < miter->length) 710 miter->consumed -= miter->length - len; 711 } 712 sg_miter_stop(miter); 713 714 return 0; 715 } 716 717 static sense_reason_t 718 iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, 719 enum dma_data_direction data_direction) 720 { 721 struct se_device *dev = cmd->se_dev; 722 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); 723 struct iblock_req *ibr; 724 struct bio *bio; 725 struct bio_list list; 726 struct scatterlist *sg; 727 u32 sg_num = sgl_nents; 728 blk_opf_t opf; 729 unsigned bio_cnt; 730 int i, rc; 731 struct sg_mapping_iter prot_miter; 732 unsigned int miter_dir; 733 734 if (data_direction == DMA_TO_DEVICE) { 735 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 736 /* 737 * Force writethrough using REQ_FUA if a volatile write cache 738 * is not enabled, or if initiator set the Force Unit Access bit. 739 */ 740 opf = REQ_OP_WRITE; 741 miter_dir = SG_MITER_TO_SG; 742 if (bdev_fua(ib_dev->ibd_bd)) { 743 if (cmd->se_cmd_flags & SCF_FUA) 744 opf |= REQ_FUA; 745 else if (!bdev_write_cache(ib_dev->ibd_bd)) 746 opf |= REQ_FUA; 747 } 748 } else { 749 opf = REQ_OP_READ; 750 miter_dir = SG_MITER_FROM_SG; 751 } 752 753 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 754 if (!ibr) 755 goto fail; 756 cmd->priv = ibr; 757 758 if (!sgl_nents) { 759 refcount_set(&ibr->pending, 1); 760 iblock_complete_cmd(cmd); 761 return 0; 762 } 763 764 bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf); 765 if (!bio) 766 goto fail_free_ibr; 767 768 bio_list_init(&list); 769 bio_list_add(&list, bio); 770 771 refcount_set(&ibr->pending, 2); 772 bio_cnt = 1; 773 774 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) 775 sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents, 776 miter_dir); 777 778 for_each_sg(sgl, sg, sgl_nents, i) { 779 /* 780 * XXX: if the length the device accepts is shorter than the 781 * length of the S/G list entry this will cause and 782 * endless loop. Better hope no driver uses huge pages. 783 */ 784 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) 785 != sg->length) { 786 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { 787 rc = iblock_alloc_bip(cmd, bio, &prot_miter); 788 if (rc) 789 goto fail_put_bios; 790 } 791 792 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) { 793 iblock_submit_bios(&list); 794 bio_cnt = 0; 795 } 796 797 bio = iblock_get_bio(cmd, block_lba, sg_num, opf); 798 if (!bio) 799 goto fail_put_bios; 800 801 refcount_inc(&ibr->pending); 802 bio_list_add(&list, bio); 803 bio_cnt++; 804 } 805 806 /* Always in 512 byte units for Linux/Block */ 807 block_lba += sg->length >> SECTOR_SHIFT; 808 sg_num--; 809 } 810 811 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) { 812 rc = iblock_alloc_bip(cmd, bio, &prot_miter); 813 if (rc) 814 goto fail_put_bios; 815 } 816 817 iblock_submit_bios(&list); 818 iblock_complete_cmd(cmd); 819 return 0; 820 821 fail_put_bios: 822 while ((bio = bio_list_pop(&list))) 823 bio_put(bio); 824 fail_free_ibr: 825 kfree(ibr); 826 fail: 827 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 828 } 829 830 static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) 831 { 832 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 833 struct block_device *bd = ib_dev->ibd_bd; 834 int ret; 835 836 ret = bdev_alignment_offset(bd); 837 if (ret == -1) 838 return 0; 839 840 /* convert offset-bytes to offset-lbas */ 841 return ret / bdev_logical_block_size(bd); 842 } 843 844 static unsigned int iblock_get_lbppbe(struct se_device *dev) 845 { 846 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 847 struct block_device *bd = ib_dev->ibd_bd; 848 unsigned int logs_per_phys = 849 bdev_physical_block_size(bd) / bdev_logical_block_size(bd); 850 851 return ilog2(logs_per_phys); 852 } 853 854 static unsigned int iblock_get_io_min(struct se_device *dev) 855 { 856 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 857 struct block_device *bd = ib_dev->ibd_bd; 858 859 return bdev_io_min(bd); 860 } 861 862 static unsigned int iblock_get_io_opt(struct se_device *dev) 863 { 864 struct iblock_dev *ib_dev = IBLOCK_DEV(dev); 865 struct block_device *bd = ib_dev->ibd_bd; 866 867 return bdev_io_opt(bd); 868 } 869 870 static struct sbc_ops iblock_sbc_ops = { 871 .execute_rw = iblock_execute_rw, 872 .execute_sync_cache = iblock_execute_sync_cache, 873 .execute_write_same = iblock_execute_write_same, 874 .execute_unmap = iblock_execute_unmap, 875 }; 876 877 static sense_reason_t 878 iblock_parse_cdb(struct se_cmd *cmd) 879 { 880 return sbc_parse_cdb(cmd, &iblock_sbc_ops); 881 } 882 883 static bool iblock_get_write_cache(struct se_device *dev) 884 { 885 return bdev_write_cache(IBLOCK_DEV(dev)->ibd_bd); 886 } 887 888 static const struct target_backend_ops iblock_ops = { 889 .name = "iblock", 890 .inquiry_prod = "IBLOCK", 891 .inquiry_rev = IBLOCK_VERSION, 892 .owner = THIS_MODULE, 893 .attach_hba = iblock_attach_hba, 894 .detach_hba = iblock_detach_hba, 895 .alloc_device = iblock_alloc_device, 896 .configure_device = iblock_configure_device, 897 .destroy_device = iblock_destroy_device, 898 .free_device = iblock_free_device, 899 .configure_unmap = iblock_configure_unmap, 900 .plug_device = iblock_plug_device, 901 .unplug_device = iblock_unplug_device, 902 .parse_cdb = iblock_parse_cdb, 903 .set_configfs_dev_params = iblock_set_configfs_dev_params, 904 .show_configfs_dev_params = iblock_show_configfs_dev_params, 905 .get_device_type = sbc_get_device_type, 906 .get_blocks = iblock_get_blocks, 907 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas, 908 .get_lbppbe = iblock_get_lbppbe, 909 .get_io_min = iblock_get_io_min, 910 .get_io_opt = iblock_get_io_opt, 911 .get_write_cache = iblock_get_write_cache, 912 .tb_dev_attrib_attrs = sbc_attrib_attrs, 913 }; 914 915 static int __init iblock_module_init(void) 916 { 917 return transport_backend_register(&iblock_ops); 918 } 919 920 static void __exit iblock_module_exit(void) 921 { 922 target_backend_unregister(&iblock_ops); 923 } 924 925 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); 926 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 927 MODULE_LICENSE("GPL"); 928 929 module_init(iblock_module_init); 930 module_exit(iblock_module_exit); 931