1 /******************************************************************************* 2 * Filename: target_core_iblock.c 3 * 4 * This file contains the Storage Engine <-> Linux BlockIO transport 5 * specific functions. 6 * 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 8 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 9 * Copyright (c) 2007-2010 Rising Tide Systems 10 * Copyright (c) 2008-2010 Linux-iSCSI.org 11 * 12 * Nicholas A. Bellinger <nab@kernel.org> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 27 * 28 ******************************************************************************/ 29 30 #include <linux/version.h> 31 #include <linux/string.h> 32 #include <linux/parser.h> 33 #include <linux/timer.h> 34 #include <linux/fs.h> 35 #include <linux/blkdev.h> 36 #include <linux/slab.h> 37 #include <linux/spinlock.h> 38 #include <linux/bio.h> 39 #include <linux/genhd.h> 40 #include <linux/file.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_host.h> 43 44 #include <target/target_core_base.h> 45 #include <target/target_core_device.h> 46 #include <target/target_core_transport.h> 47 48 #include "target_core_iblock.h" 49 50 #if 0 51 #define DEBUG_IBLOCK(x...) printk(x) 52 #else 53 #define DEBUG_IBLOCK(x...) 54 #endif 55 56 static struct se_subsystem_api iblock_template; 57 58 static void iblock_bio_done(struct bio *, int); 59 60 /* iblock_attach_hba(): (Part of se_subsystem_api_t template) 61 * 62 * 63 */ 64 static int iblock_attach_hba(struct se_hba *hba, u32 host_id) 65 { 66 struct iblock_hba *ib_host; 67 68 ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL); 69 if (!(ib_host)) { 70 printk(KERN_ERR "Unable to allocate memory for" 71 " struct iblock_hba\n"); 72 return -ENOMEM; 73 } 74 75 ib_host->iblock_host_id = host_id; 76 77 hba->hba_ptr = ib_host; 78 79 printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on" 80 " Generic Target Core Stack %s\n", hba->hba_id, 81 IBLOCK_VERSION, TARGET_CORE_MOD_VERSION); 82 83 printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n", 84 hba->hba_id, ib_host->iblock_host_id); 85 86 return 0; 87 } 88 89 static void iblock_detach_hba(struct se_hba *hba) 90 { 91 struct iblock_hba *ib_host = hba->hba_ptr; 92 93 printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic" 94 " Target Core\n", hba->hba_id, ib_host->iblock_host_id); 95 96 kfree(ib_host); 97 hba->hba_ptr = NULL; 98 } 99 100 static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name) 101 { 102 struct iblock_dev *ib_dev = NULL; 103 struct iblock_hba *ib_host = hba->hba_ptr; 104 105 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); 106 if (!(ib_dev)) { 107 printk(KERN_ERR "Unable to allocate struct iblock_dev\n"); 108 return NULL; 109 } 110 ib_dev->ibd_host = ib_host; 111 112 printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name); 113 114 return ib_dev; 115 } 116 117 static struct se_device *iblock_create_virtdevice( 118 struct se_hba *hba, 119 struct se_subsystem_dev *se_dev, 120 void *p) 121 { 122 struct iblock_dev *ib_dev = p; 123 struct se_device *dev; 124 struct se_dev_limits dev_limits; 125 struct block_device *bd = NULL; 126 struct request_queue *q; 127 struct queue_limits *limits; 128 u32 dev_flags = 0; 129 int ret = -EINVAL; 130 131 if (!(ib_dev)) { 132 printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n"); 133 return ERR_PTR(ret); 134 } 135 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 136 /* 137 * These settings need to be made tunable.. 138 */ 139 ib_dev->ibd_bio_set = bioset_create(32, 64); 140 if (!(ib_dev->ibd_bio_set)) { 141 printk(KERN_ERR "IBLOCK: Unable to create bioset()\n"); 142 return ERR_PTR(-ENOMEM); 143 } 144 printk(KERN_INFO "IBLOCK: Created bio_set()\n"); 145 /* 146 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path 147 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run. 148 */ 149 printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n", 150 ib_dev->ibd_udev_path); 151 152 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, 153 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); 154 if (IS_ERR(bd)) { 155 ret = PTR_ERR(bd); 156 goto failed; 157 } 158 /* 159 * Setup the local scope queue_limits from struct request_queue->limits 160 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 161 */ 162 q = bdev_get_queue(bd); 163 limits = &dev_limits.limits; 164 limits->logical_block_size = bdev_logical_block_size(bd); 165 limits->max_hw_sectors = queue_max_hw_sectors(q); 166 limits->max_sectors = queue_max_sectors(q); 167 dev_limits.hw_queue_depth = q->nr_requests; 168 dev_limits.queue_depth = q->nr_requests; 169 170 ib_dev->ibd_bd = bd; 171 172 dev = transport_add_device_to_core_hba(hba, 173 &iblock_template, se_dev, dev_flags, ib_dev, 174 &dev_limits, "IBLOCK", IBLOCK_VERSION); 175 if (!(dev)) 176 goto failed; 177 178 /* 179 * Check if the underlying struct block_device request_queue supports 180 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 181 * in ATA and we need to set TPE=1 182 */ 183 if (blk_queue_discard(q)) { 184 dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = 185 q->limits.max_discard_sectors; 186 /* 187 * Currently hardcoded to 1 in Linux/SCSI code.. 188 */ 189 dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1; 190 dev->se_sub_dev->se_dev_attrib.unmap_granularity = 191 q->limits.discard_granularity; 192 dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = 193 q->limits.discard_alignment; 194 195 printk(KERN_INFO "IBLOCK: BLOCK Discard support available," 196 " disabled by default\n"); 197 } 198 199 if (blk_queue_nonrot(q)) 200 dev->se_sub_dev->se_dev_attrib.is_nonrot = 1; 201 202 return dev; 203 204 failed: 205 if (ib_dev->ibd_bio_set) { 206 bioset_free(ib_dev->ibd_bio_set); 207 ib_dev->ibd_bio_set = NULL; 208 } 209 ib_dev->ibd_bd = NULL; 210 return ERR_PTR(ret); 211 } 212 213 static void iblock_free_device(void *p) 214 { 215 struct iblock_dev *ib_dev = p; 216 217 if (ib_dev->ibd_bd != NULL) 218 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 219 if (ib_dev->ibd_bio_set != NULL) 220 bioset_free(ib_dev->ibd_bio_set); 221 kfree(ib_dev); 222 } 223 224 static inline struct iblock_req *IBLOCK_REQ(struct se_task *task) 225 { 226 return container_of(task, struct iblock_req, ib_task); 227 } 228 229 static struct se_task * 230 iblock_alloc_task(struct se_cmd *cmd) 231 { 232 struct iblock_req *ib_req; 233 234 ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 235 if (!(ib_req)) { 236 printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n"); 237 return NULL; 238 } 239 240 ib_req->ib_dev = cmd->se_dev->dev_ptr; 241 atomic_set(&ib_req->ib_bio_cnt, 0); 242 return &ib_req->ib_task; 243 } 244 245 static unsigned long long iblock_emulate_read_cap_with_block_size( 246 struct se_device *dev, 247 struct block_device *bd, 248 struct request_queue *q) 249 { 250 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode), 251 bdev_logical_block_size(bd)) - 1); 252 u32 block_size = bdev_logical_block_size(bd); 253 254 if (block_size == dev->se_sub_dev->se_dev_attrib.block_size) 255 return blocks_long; 256 257 switch (block_size) { 258 case 4096: 259 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 260 case 2048: 261 blocks_long <<= 1; 262 break; 263 case 1024: 264 blocks_long <<= 2; 265 break; 266 case 512: 267 blocks_long <<= 3; 268 default: 269 break; 270 } 271 break; 272 case 2048: 273 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 274 case 4096: 275 blocks_long >>= 1; 276 break; 277 case 1024: 278 blocks_long <<= 1; 279 break; 280 case 512: 281 blocks_long <<= 2; 282 break; 283 default: 284 break; 285 } 286 break; 287 case 1024: 288 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 289 case 4096: 290 blocks_long >>= 2; 291 break; 292 case 2048: 293 blocks_long >>= 1; 294 break; 295 case 512: 296 blocks_long <<= 1; 297 break; 298 default: 299 break; 300 } 301 break; 302 case 512: 303 switch (dev->se_sub_dev->se_dev_attrib.block_size) { 304 case 4096: 305 blocks_long >>= 3; 306 break; 307 case 2048: 308 blocks_long >>= 2; 309 break; 310 case 1024: 311 blocks_long >>= 1; 312 break; 313 default: 314 break; 315 } 316 break; 317 default: 318 break; 319 } 320 321 return blocks_long; 322 } 323 324 /* 325 * Emulate SYCHRONIZE_CACHE_* 326 */ 327 static void iblock_emulate_sync_cache(struct se_task *task) 328 { 329 struct se_cmd *cmd = task->task_se_cmd; 330 struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; 331 int immed = (cmd->t_task_cdb[1] & 0x2); 332 sector_t error_sector; 333 int ret; 334 335 /* 336 * If the Immediate bit is set, queue up the GOOD response 337 * for this SYNCHRONIZE_CACHE op 338 */ 339 if (immed) 340 transport_complete_sync_cache(cmd, 1); 341 342 /* 343 * blkdev_issue_flush() does not support a specifying a range, so 344 * we have to flush the entire cache. 345 */ 346 ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); 347 if (ret != 0) { 348 printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d " 349 " error_sector: %llu\n", ret, 350 (unsigned long long)error_sector); 351 } 352 353 if (!immed) 354 transport_complete_sync_cache(cmd, ret == 0); 355 } 356 357 /* 358 * Tell TCM Core that we are capable of WriteCache emulation for 359 * an underlying struct se_device. 360 */ 361 static int iblock_emulated_write_cache(struct se_device *dev) 362 { 363 return 1; 364 } 365 366 static int iblock_emulated_dpo(struct se_device *dev) 367 { 368 return 0; 369 } 370 371 /* 372 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs 373 * for TYPE_DISK. 374 */ 375 static int iblock_emulated_fua_write(struct se_device *dev) 376 { 377 return 1; 378 } 379 380 static int iblock_emulated_fua_read(struct se_device *dev) 381 { 382 return 0; 383 } 384 385 static int iblock_do_task(struct se_task *task) 386 { 387 struct se_device *dev = task->task_se_cmd->se_dev; 388 struct iblock_req *req = IBLOCK_REQ(task); 389 struct bio *bio = req->ib_bio, *nbio = NULL; 390 struct blk_plug plug; 391 int rw; 392 393 if (task->task_data_direction == DMA_TO_DEVICE) { 394 /* 395 * Force data to disk if we pretend to not have a volatile 396 * write cache, or the initiator set the Force Unit Access bit. 397 */ 398 if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || 399 (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 400 task->task_se_cmd->t_tasks_fua)) 401 rw = WRITE_FUA; 402 else 403 rw = WRITE; 404 } else { 405 rw = READ; 406 } 407 408 blk_start_plug(&plug); 409 while (bio) { 410 nbio = bio->bi_next; 411 bio->bi_next = NULL; 412 DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p" 413 " bio->bi_sector: %llu\n", task, bio, bio->bi_sector); 414 415 submit_bio(rw, bio); 416 bio = nbio; 417 } 418 blk_finish_plug(&plug); 419 420 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 421 } 422 423 static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) 424 { 425 struct iblock_dev *ibd = dev->dev_ptr; 426 struct block_device *bd = ibd->ibd_bd; 427 int barrier = 0; 428 429 return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier); 430 } 431 432 static void iblock_free_task(struct se_task *task) 433 { 434 struct iblock_req *req = IBLOCK_REQ(task); 435 struct bio *bio, *hbio = req->ib_bio; 436 /* 437 * We only release the bio(s) here if iblock_bio_done() has not called 438 * bio_put() -> iblock_bio_destructor(). 439 */ 440 while (hbio != NULL) { 441 bio = hbio; 442 hbio = hbio->bi_next; 443 bio->bi_next = NULL; 444 bio_put(bio); 445 } 446 447 kfree(req); 448 } 449 450 enum { 451 Opt_udev_path, Opt_force, Opt_err 452 }; 453 454 static match_table_t tokens = { 455 {Opt_udev_path, "udev_path=%s"}, 456 {Opt_force, "force=%d"}, 457 {Opt_err, NULL} 458 }; 459 460 static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba, 461 struct se_subsystem_dev *se_dev, 462 const char *page, ssize_t count) 463 { 464 struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr; 465 char *orig, *ptr, *arg_p, *opts; 466 substring_t args[MAX_OPT_ARGS]; 467 int ret = 0, token; 468 469 opts = kstrdup(page, GFP_KERNEL); 470 if (!opts) 471 return -ENOMEM; 472 473 orig = opts; 474 475 while ((ptr = strsep(&opts, ",")) != NULL) { 476 if (!*ptr) 477 continue; 478 479 token = match_token(ptr, tokens, args); 480 switch (token) { 481 case Opt_udev_path: 482 if (ib_dev->ibd_bd) { 483 printk(KERN_ERR "Unable to set udev_path= while" 484 " ib_dev->ibd_bd exists\n"); 485 ret = -EEXIST; 486 goto out; 487 } 488 arg_p = match_strdup(&args[0]); 489 if (!arg_p) { 490 ret = -ENOMEM; 491 break; 492 } 493 snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN, 494 "%s", arg_p); 495 kfree(arg_p); 496 printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n", 497 ib_dev->ibd_udev_path); 498 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; 499 break; 500 case Opt_force: 501 break; 502 default: 503 break; 504 } 505 } 506 507 out: 508 kfree(orig); 509 return (!ret) ? count : ret; 510 } 511 512 static ssize_t iblock_check_configfs_dev_params( 513 struct se_hba *hba, 514 struct se_subsystem_dev *se_dev) 515 { 516 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 517 518 if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) { 519 printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n"); 520 return -EINVAL; 521 } 522 523 return 0; 524 } 525 526 static ssize_t iblock_show_configfs_dev_params( 527 struct se_hba *hba, 528 struct se_subsystem_dev *se_dev, 529 char *b) 530 { 531 struct iblock_dev *ibd = se_dev->se_dev_su_ptr; 532 struct block_device *bd = ibd->ibd_bd; 533 char buf[BDEVNAME_SIZE]; 534 ssize_t bl = 0; 535 536 if (bd) 537 bl += sprintf(b + bl, "iBlock device: %s", 538 bdevname(bd, buf)); 539 if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) { 540 bl += sprintf(b + bl, " UDEV PATH: %s\n", 541 ibd->ibd_udev_path); 542 } else 543 bl += sprintf(b + bl, "\n"); 544 545 bl += sprintf(b + bl, " "); 546 if (bd) { 547 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n", 548 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ? 549 "" : (bd->bd_holder == (struct iblock_dev *)ibd) ? 550 "CLAIMED: IBLOCK" : "CLAIMED: OS"); 551 } else { 552 bl += sprintf(b + bl, "Major: 0 Minor: 0\n"); 553 } 554 555 return bl; 556 } 557 558 static void iblock_bio_destructor(struct bio *bio) 559 { 560 struct se_task *task = bio->bi_private; 561 struct iblock_dev *ib_dev = task->se_dev->dev_ptr; 562 563 bio_free(bio, ib_dev->ibd_bio_set); 564 } 565 566 static struct bio *iblock_get_bio( 567 struct se_task *task, 568 struct iblock_req *ib_req, 569 struct iblock_dev *ib_dev, 570 int *ret, 571 sector_t lba, 572 u32 sg_num) 573 { 574 struct bio *bio; 575 576 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set); 577 if (!(bio)) { 578 printk(KERN_ERR "Unable to allocate memory for bio\n"); 579 *ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 580 return NULL; 581 } 582 583 DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:" 584 " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set); 585 DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size); 586 587 bio->bi_bdev = ib_dev->ibd_bd; 588 bio->bi_private = task; 589 bio->bi_destructor = iblock_bio_destructor; 590 bio->bi_end_io = &iblock_bio_done; 591 bio->bi_sector = lba; 592 atomic_inc(&ib_req->ib_bio_cnt); 593 594 DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector); 595 DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n", 596 atomic_read(&ib_req->ib_bio_cnt)); 597 return bio; 598 } 599 600 static int iblock_map_task_SG(struct se_task *task) 601 { 602 struct se_cmd *cmd = task->task_se_cmd; 603 struct se_device *dev = cmd->se_dev; 604 struct iblock_dev *ib_dev = task->se_dev->dev_ptr; 605 struct iblock_req *ib_req = IBLOCK_REQ(task); 606 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; 607 struct scatterlist *sg; 608 int ret = 0; 609 u32 i, sg_num = task->task_sg_num; 610 sector_t block_lba; 611 /* 612 * Do starting conversion up from non 512-byte blocksize with 613 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. 614 */ 615 if (dev->se_sub_dev->se_dev_attrib.block_size == 4096) 616 block_lba = (task->task_lba << 3); 617 else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048) 618 block_lba = (task->task_lba << 2); 619 else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024) 620 block_lba = (task->task_lba << 1); 621 else if (dev->se_sub_dev->se_dev_attrib.block_size == 512) 622 block_lba = task->task_lba; 623 else { 624 printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:" 625 " %u\n", dev->se_sub_dev->se_dev_attrib.block_size); 626 return PYX_TRANSPORT_LU_COMM_FAILURE; 627 } 628 629 bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); 630 if (!(bio)) 631 return ret; 632 633 ib_req->ib_bio = bio; 634 hbio = tbio = bio; 635 /* 636 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist 637 * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory. 638 */ 639 for_each_sg(task->task_sg, sg, task->task_sg_num, i) { 640 DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:" 641 " %p len: %u offset: %u\n", task, bio, sg_page(sg), 642 sg->length, sg->offset); 643 again: 644 ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); 645 if (ret != sg->length) { 646 647 DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n", 648 bio->bi_sector); 649 DEBUG_IBLOCK("** task->task_size: %u\n", 650 task->task_size); 651 DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n", 652 bio->bi_max_vecs); 653 DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n", 654 bio->bi_vcnt); 655 656 bio = iblock_get_bio(task, ib_req, ib_dev, &ret, 657 block_lba, sg_num); 658 if (!(bio)) 659 goto fail; 660 661 tbio = tbio->bi_next = bio; 662 DEBUG_IBLOCK("-----------------> Added +1 bio: %p to" 663 " list, Going to again\n", bio); 664 goto again; 665 } 666 /* Always in 512 byte units for Linux/Block */ 667 block_lba += sg->length >> IBLOCK_LBA_SHIFT; 668 sg_num--; 669 DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented" 670 " sg_num to %u\n", task, sg_num); 671 DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba" 672 " to %llu\n", task, block_lba); 673 DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:" 674 " %u\n", task, bio->bi_vcnt); 675 } 676 677 return 0; 678 fail: 679 while (hbio) { 680 bio = hbio; 681 hbio = hbio->bi_next; 682 bio->bi_next = NULL; 683 bio_put(bio); 684 } 685 return ret; 686 } 687 688 static unsigned char *iblock_get_cdb(struct se_task *task) 689 { 690 return IBLOCK_REQ(task)->ib_scsi_cdb; 691 } 692 693 static u32 iblock_get_device_rev(struct se_device *dev) 694 { 695 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ 696 } 697 698 static u32 iblock_get_device_type(struct se_device *dev) 699 { 700 return TYPE_DISK; 701 } 702 703 static sector_t iblock_get_blocks(struct se_device *dev) 704 { 705 struct iblock_dev *ibd = dev->dev_ptr; 706 struct block_device *bd = ibd->ibd_bd; 707 struct request_queue *q = bdev_get_queue(bd); 708 709 return iblock_emulate_read_cap_with_block_size(dev, bd, q); 710 } 711 712 static void iblock_bio_done(struct bio *bio, int err) 713 { 714 struct se_task *task = bio->bi_private; 715 struct iblock_req *ibr = IBLOCK_REQ(task); 716 /* 717 * Set -EIO if !BIO_UPTODATE and the passed is still err=0 718 */ 719 if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err)) 720 err = -EIO; 721 722 if (err != 0) { 723 printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p," 724 " err: %d\n", bio, err); 725 /* 726 * Bump the ib_bio_err_cnt and release bio. 727 */ 728 atomic_inc(&ibr->ib_bio_err_cnt); 729 smp_mb__after_atomic_inc(); 730 bio_put(bio); 731 /* 732 * Wait to complete the task until the last bio as completed. 733 */ 734 if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) 735 return; 736 737 ibr->ib_bio = NULL; 738 transport_complete_task(task, 0); 739 return; 740 } 741 DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", 742 task, bio, task->task_lba, bio->bi_sector, err); 743 /* 744 * bio_put() will call iblock_bio_destructor() to release the bio back 745 * to ibr->ib_bio_set. 746 */ 747 bio_put(bio); 748 /* 749 * Wait to complete the task until the last bio as completed. 750 */ 751 if (!(atomic_dec_and_test(&ibr->ib_bio_cnt))) 752 return; 753 /* 754 * Return GOOD status for task if zero ib_bio_err_cnt exists. 755 */ 756 ibr->ib_bio = NULL; 757 transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt))); 758 } 759 760 static struct se_subsystem_api iblock_template = { 761 .name = "iblock", 762 .owner = THIS_MODULE, 763 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 764 .map_task_SG = iblock_map_task_SG, 765 .attach_hba = iblock_attach_hba, 766 .detach_hba = iblock_detach_hba, 767 .allocate_virtdevice = iblock_allocate_virtdevice, 768 .create_virtdevice = iblock_create_virtdevice, 769 .free_device = iblock_free_device, 770 .dpo_emulated = iblock_emulated_dpo, 771 .fua_write_emulated = iblock_emulated_fua_write, 772 .fua_read_emulated = iblock_emulated_fua_read, 773 .write_cache_emulated = iblock_emulated_write_cache, 774 .alloc_task = iblock_alloc_task, 775 .do_task = iblock_do_task, 776 .do_discard = iblock_do_discard, 777 .do_sync_cache = iblock_emulate_sync_cache, 778 .free_task = iblock_free_task, 779 .check_configfs_dev_params = iblock_check_configfs_dev_params, 780 .set_configfs_dev_params = iblock_set_configfs_dev_params, 781 .show_configfs_dev_params = iblock_show_configfs_dev_params, 782 .get_cdb = iblock_get_cdb, 783 .get_device_rev = iblock_get_device_rev, 784 .get_device_type = iblock_get_device_type, 785 .get_blocks = iblock_get_blocks, 786 }; 787 788 static int __init iblock_module_init(void) 789 { 790 return transport_subsystem_register(&iblock_template); 791 } 792 793 static void iblock_module_exit(void) 794 { 795 transport_subsystem_release(&iblock_template); 796 } 797 798 MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin"); 799 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 800 MODULE_LICENSE("GPL"); 801 802 module_init(iblock_module_init); 803 module_exit(iblock_module_exit); 804