1 /******************************************************************************* 2 * Filename: target_core_pscsi.c 3 * 4 * This file contains the generic target mode <-> Linux SCSI subsystem plugin. 5 * 6 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 7 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 8 * Copyright (c) 2007-2010 Rising Tide Systems 9 * Copyright (c) 2008-2010 Linux-iSCSI.org 10 * 11 * Nicholas A. Bellinger <nab@kernel.org> 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 26 * 27 ******************************************************************************/ 28 29 #include <linux/version.h> 30 #include <linux/string.h> 31 #include <linux/parser.h> 32 #include <linux/timer.h> 33 #include <linux/blkdev.h> 34 #include <linux/blk_types.h> 35 #include <linux/slab.h> 36 #include <linux/spinlock.h> 37 #include <linux/smp_lock.h> 38 #include <linux/genhd.h> 39 #include <linux/cdrom.h> 40 #include <linux/file.h> 41 #include <scsi/scsi.h> 42 #include <scsi/scsi_device.h> 43 #include <scsi/scsi_cmnd.h> 44 #include <scsi/scsi_host.h> 45 #include <scsi/libsas.h> /* For TASK_ATTR_* */ 46 47 #include <target/target_core_base.h> 48 #include <target/target_core_device.h> 49 #include <target/target_core_transport.h> 50 51 #include "target_core_pscsi.h" 52 53 #define ISPRINT(a) ((a >= ' ') && (a <= '~')) 54 55 static struct se_subsystem_api pscsi_template; 56 57 static void pscsi_req_done(struct request *, int); 58 59 /* pscsi_get_sh(): 60 * 61 * 62 */ 63 static struct Scsi_Host *pscsi_get_sh(u32 host_no) 64 { 65 struct Scsi_Host *sh = NULL; 66 67 sh = scsi_host_lookup(host_no); 68 if (IS_ERR(sh)) { 69 printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:" 70 " %u\n", host_no); 71 return NULL; 72 } 73 74 return sh; 75 } 76 77 /* pscsi_attach_hba(): 78 * 79 * pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host. 80 * from the passed SCSI Host ID. 81 */ 82 static int pscsi_attach_hba(struct se_hba *hba, u32 host_id) 83 { 84 int hba_depth; 85 struct pscsi_hba_virt *phv; 86 87 phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL); 88 if (!(phv)) { 89 printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n"); 90 return -1; 91 } 92 phv->phv_host_id = host_id; 93 phv->phv_mode = PHV_VIRUTAL_HOST_ID; 94 hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; 95 atomic_set(&hba->left_queue_depth, hba_depth); 96 atomic_set(&hba->max_queue_depth, hba_depth); 97 98 hba->hba_ptr = (void *)phv; 99 100 printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on" 101 " Generic Target Core Stack %s\n", hba->hba_id, 102 PSCSI_VERSION, TARGET_CORE_MOD_VERSION); 103 printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic" 104 " Target Core with TCQ Depth: %d\n", hba->hba_id, 105 atomic_read(&hba->max_queue_depth)); 106 107 return 0; 108 } 109 110 static void pscsi_detach_hba(struct se_hba *hba) 111 { 112 struct pscsi_hba_virt *phv = hba->hba_ptr; 113 struct Scsi_Host *scsi_host = phv->phv_lld_host; 114 115 if (scsi_host) { 116 scsi_host_put(scsi_host); 117 118 printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from" 119 " Generic Target Core\n", hba->hba_id, 120 (scsi_host->hostt->name) ? (scsi_host->hostt->name) : 121 "Unknown"); 122 } else 123 printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA" 124 " from Generic Target Core\n", hba->hba_id); 125 126 kfree(phv); 127 hba->hba_ptr = NULL; 128 } 129 130 static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag) 131 { 132 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; 133 struct Scsi_Host *sh = phv->phv_lld_host; 134 int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH; 135 /* 136 * Release the struct Scsi_Host 137 */ 138 if (!(mode_flag)) { 139 if (!(sh)) 140 return 0; 141 142 phv->phv_lld_host = NULL; 143 phv->phv_mode = PHV_VIRUTAL_HOST_ID; 144 atomic_set(&hba->left_queue_depth, hba_depth); 145 atomic_set(&hba->max_queue_depth, hba_depth); 146 147 printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough" 148 " %s\n", hba->hba_id, (sh->hostt->name) ? 149 (sh->hostt->name) : "Unknown"); 150 151 scsi_host_put(sh); 152 return 0; 153 } 154 /* 155 * Otherwise, locate struct Scsi_Host from the original passed 156 * pSCSI Host ID and enable for phba mode 157 */ 158 sh = pscsi_get_sh(phv->phv_host_id); 159 if (!(sh)) { 160 printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for" 161 " phv_host_id: %d\n", phv->phv_host_id); 162 return -1; 163 } 164 /* 165 * Usually the SCSI LLD will use the hostt->can_queue value to define 166 * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set 167 * this at all and set sh->can_queue at runtime. 168 */ 169 hba_depth = (sh->hostt->can_queue > sh->can_queue) ? 170 sh->hostt->can_queue : sh->can_queue; 171 172 atomic_set(&hba->left_queue_depth, hba_depth); 173 atomic_set(&hba->max_queue_depth, hba_depth); 174 175 phv->phv_lld_host = sh; 176 phv->phv_mode = PHV_LLD_SCSI_HOST_NO; 177 178 printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n", 179 hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); 180 181 return 1; 182 } 183 184 static void pscsi_tape_read_blocksize(struct se_device *dev, 185 struct scsi_device *sdev) 186 { 187 unsigned char cdb[MAX_COMMAND_SIZE], *buf; 188 int ret; 189 190 buf = kzalloc(12, GFP_KERNEL); 191 if (!buf) 192 return; 193 194 memset(cdb, 0, MAX_COMMAND_SIZE); 195 cdb[0] = MODE_SENSE; 196 cdb[4] = 0x0c; /* 12 bytes */ 197 198 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL, 199 HZ, 1, NULL); 200 if (ret) 201 goto out_free; 202 203 /* 204 * If MODE_SENSE still returns zero, set the default value to 1024. 205 */ 206 sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); 207 if (!sdev->sector_size) 208 sdev->sector_size = 1024; 209 out_free: 210 kfree(buf); 211 } 212 213 static void 214 pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn) 215 { 216 unsigned char *buf; 217 218 if (sdev->inquiry_len < INQUIRY_LEN) 219 return; 220 221 buf = sdev->inquiry; 222 if (!buf) 223 return; 224 /* 225 * Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev() 226 */ 227 memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor)); 228 memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model)); 229 memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision)); 230 } 231 232 static int 233 pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) 234 { 235 unsigned char cdb[MAX_COMMAND_SIZE], *buf; 236 int ret; 237 238 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); 239 if (!buf) 240 return -1; 241 242 memset(cdb, 0, MAX_COMMAND_SIZE); 243 cdb[0] = INQUIRY; 244 cdb[1] = 0x01; /* Query VPD */ 245 cdb[2] = 0x80; /* Unit Serial Number */ 246 cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff; 247 cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff); 248 249 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 250 INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL); 251 if (ret) 252 goto out_free; 253 254 snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]); 255 256 wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL; 257 258 kfree(buf); 259 return 0; 260 261 out_free: 262 kfree(buf); 263 return -1; 264 } 265 266 static void 267 pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, 268 struct t10_wwn *wwn) 269 { 270 unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83; 271 int ident_len, page_len, off = 4, ret; 272 struct t10_vpd *vpd; 273 274 buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL); 275 if (!buf) 276 return; 277 278 memset(cdb, 0, MAX_COMMAND_SIZE); 279 cdb[0] = INQUIRY; 280 cdb[1] = 0x01; /* Query VPD */ 281 cdb[2] = 0x83; /* Device Identifier */ 282 cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff; 283 cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff); 284 285 ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 286 INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, 287 NULL, HZ, 1, NULL); 288 if (ret) 289 goto out; 290 291 page_len = (buf[2] << 8) | buf[3]; 292 while (page_len > 0) { 293 /* Grab a pointer to the Identification descriptor */ 294 page_83 = &buf[off]; 295 ident_len = page_83[3]; 296 if (!ident_len) { 297 printk(KERN_ERR "page_83[3]: identifier" 298 " length zero!\n"); 299 break; 300 } 301 printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len); 302 303 vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL); 304 if (!vpd) { 305 printk(KERN_ERR "Unable to allocate memory for" 306 " struct t10_vpd\n"); 307 goto out; 308 } 309 INIT_LIST_HEAD(&vpd->vpd_list); 310 311 transport_set_vpd_proto_id(vpd, page_83); 312 transport_set_vpd_assoc(vpd, page_83); 313 314 if (transport_set_vpd_ident_type(vpd, page_83) < 0) { 315 off += (ident_len + 4); 316 page_len -= (ident_len + 4); 317 kfree(vpd); 318 continue; 319 } 320 if (transport_set_vpd_ident(vpd, page_83) < 0) { 321 off += (ident_len + 4); 322 page_len -= (ident_len + 4); 323 kfree(vpd); 324 continue; 325 } 326 327 list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list); 328 off += (ident_len + 4); 329 page_len -= (ident_len + 4); 330 } 331 332 out: 333 kfree(buf); 334 } 335 336 /* pscsi_add_device_to_list(): 337 * 338 * 339 */ 340 static struct se_device *pscsi_add_device_to_list( 341 struct se_hba *hba, 342 struct se_subsystem_dev *se_dev, 343 struct pscsi_dev_virt *pdv, 344 struct scsi_device *sd, 345 int dev_flags) 346 { 347 struct se_device *dev; 348 struct se_dev_limits dev_limits; 349 struct request_queue *q; 350 struct queue_limits *limits; 351 352 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 353 354 if (!sd->queue_depth) { 355 sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH; 356 357 printk(KERN_ERR "Set broken SCSI Device %d:%d:%d" 358 " queue_depth to %d\n", sd->channel, sd->id, 359 sd->lun, sd->queue_depth); 360 } 361 /* 362 * Setup the local scope queue_limits from struct request_queue->limits 363 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 364 */ 365 q = sd->request_queue; 366 limits = &dev_limits.limits; 367 limits->logical_block_size = sd->sector_size; 368 limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ? 369 queue_max_hw_sectors(q) : sd->host->max_sectors; 370 limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ? 371 queue_max_sectors(q) : sd->host->max_sectors; 372 dev_limits.hw_queue_depth = sd->queue_depth; 373 dev_limits.queue_depth = sd->queue_depth; 374 /* 375 * Setup our standard INQUIRY info into se_dev->t10_wwn 376 */ 377 pscsi_set_inquiry_info(sd, &se_dev->t10_wwn); 378 379 /* 380 * Set the pointer pdv->pdv_sd to from passed struct scsi_device, 381 * which has already been referenced with Linux SCSI code with 382 * scsi_device_get() in this file's pscsi_create_virtdevice(). 383 * 384 * The passthrough operations called by the transport_add_device_* 385 * function below will require this pointer to be set for passthroug 386 * ops. 387 * 388 * For the shutdown case in pscsi_free_device(), this struct 389 * scsi_device reference is released with Linux SCSI code 390 * scsi_device_put() and the pdv->pdv_sd cleared. 391 */ 392 pdv->pdv_sd = sd; 393 394 dev = transport_add_device_to_core_hba(hba, &pscsi_template, 395 se_dev, dev_flags, (void *)pdv, 396 &dev_limits, NULL, NULL); 397 if (!(dev)) { 398 pdv->pdv_sd = NULL; 399 return NULL; 400 } 401 402 /* 403 * Locate VPD WWN Information used for various purposes within 404 * the Storage Engine. 405 */ 406 if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) { 407 /* 408 * If VPD Unit Serial returned GOOD status, try 409 * VPD Device Identification page (0x83). 410 */ 411 pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn); 412 } 413 414 /* 415 * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. 416 */ 417 if (sd->type == TYPE_TAPE) 418 pscsi_tape_read_blocksize(dev, sd); 419 return dev; 420 } 421 422 static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name) 423 { 424 struct pscsi_dev_virt *pdv; 425 426 pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL); 427 if (!(pdv)) { 428 printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n"); 429 return NULL; 430 } 431 pdv->pdv_se_hba = hba; 432 433 printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name); 434 return (void *)pdv; 435 } 436 437 /* 438 * Called with struct Scsi_Host->host_lock called. 439 */ 440 static struct se_device *pscsi_create_type_disk( 441 struct scsi_device *sd, 442 struct pscsi_dev_virt *pdv, 443 struct se_subsystem_dev *se_dev, 444 struct se_hba *hba) 445 { 446 struct se_device *dev; 447 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; 448 struct Scsi_Host *sh = sd->host; 449 struct block_device *bd; 450 u32 dev_flags = 0; 451 452 if (scsi_device_get(sd)) { 453 printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", 454 sh->host_no, sd->channel, sd->id, sd->lun); 455 spin_unlock_irq(sh->host_lock); 456 return NULL; 457 } 458 spin_unlock_irq(sh->host_lock); 459 /* 460 * Claim exclusive struct block_device access to struct scsi_device 461 * for TYPE_DISK using supplied udev_path 462 */ 463 bd = blkdev_get_by_path(se_dev->se_dev_udev_path, 464 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); 465 if (IS_ERR(bd)) { 466 printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); 467 scsi_device_put(sd); 468 return NULL; 469 } 470 pdv->pdv_bd = bd; 471 472 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 473 if (!(dev)) { 474 blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 475 scsi_device_put(sd); 476 return NULL; 477 } 478 printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n", 479 phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); 480 481 return dev; 482 } 483 484 /* 485 * Called with struct Scsi_Host->host_lock called. 486 */ 487 static struct se_device *pscsi_create_type_rom( 488 struct scsi_device *sd, 489 struct pscsi_dev_virt *pdv, 490 struct se_subsystem_dev *se_dev, 491 struct se_hba *hba) 492 { 493 struct se_device *dev; 494 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; 495 struct Scsi_Host *sh = sd->host; 496 u32 dev_flags = 0; 497 498 if (scsi_device_get(sd)) { 499 printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n", 500 sh->host_no, sd->channel, sd->id, sd->lun); 501 spin_unlock_irq(sh->host_lock); 502 return NULL; 503 } 504 spin_unlock_irq(sh->host_lock); 505 506 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 507 if (!(dev)) { 508 scsi_device_put(sd); 509 return NULL; 510 } 511 printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 512 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 513 sd->channel, sd->id, sd->lun); 514 515 return dev; 516 } 517 518 /* 519 *Called with struct Scsi_Host->host_lock called. 520 */ 521 static struct se_device *pscsi_create_type_other( 522 struct scsi_device *sd, 523 struct pscsi_dev_virt *pdv, 524 struct se_subsystem_dev *se_dev, 525 struct se_hba *hba) 526 { 527 struct se_device *dev; 528 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr; 529 struct Scsi_Host *sh = sd->host; 530 u32 dev_flags = 0; 531 532 spin_unlock_irq(sh->host_lock); 533 dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags); 534 if (!(dev)) 535 return NULL; 536 537 printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n", 538 phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, 539 sd->channel, sd->id, sd->lun); 540 541 return dev; 542 } 543 544 static struct se_device *pscsi_create_virtdevice( 545 struct se_hba *hba, 546 struct se_subsystem_dev *se_dev, 547 void *p) 548 { 549 struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p; 550 struct se_device *dev; 551 struct scsi_device *sd; 552 struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr; 553 struct Scsi_Host *sh = phv->phv_lld_host; 554 int legacy_mode_enable = 0; 555 556 if (!(pdv)) { 557 printk(KERN_ERR "Unable to locate struct pscsi_dev_virt" 558 " parameter\n"); 559 return NULL; 560 } 561 /* 562 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the 563 * struct Scsi_Host we will need to bring the TCM/pSCSI object online 564 */ 565 if (!(sh)) { 566 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 567 printk(KERN_ERR "pSCSI: Unable to locate struct" 568 " Scsi_Host for PHV_LLD_SCSI_HOST_NO\n"); 569 return NULL; 570 } 571 /* 572 * For the newer PHV_VIRUTAL_HOST_ID struct scsi_device 573 * reference, we enforce that udev_path has been set 574 */ 575 if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) { 576 printk(KERN_ERR "pSCSI: udev_path attribute has not" 577 " been set before ENABLE=1\n"); 578 return NULL; 579 } 580 /* 581 * If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID, 582 * use the original TCM hba ID to reference Linux/SCSI Host No 583 * and enable for PHV_LLD_SCSI_HOST_NO mode. 584 */ 585 if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) { 586 spin_lock(&hba->device_lock); 587 if (!(list_empty(&hba->hba_dev_list))) { 588 printk(KERN_ERR "pSCSI: Unable to set hba_mode" 589 " with active devices\n"); 590 spin_unlock(&hba->device_lock); 591 return NULL; 592 } 593 spin_unlock(&hba->device_lock); 594 595 if (pscsi_pmode_enable_hba(hba, 1) != 1) 596 return NULL; 597 598 legacy_mode_enable = 1; 599 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE; 600 sh = phv->phv_lld_host; 601 } else { 602 sh = pscsi_get_sh(pdv->pdv_host_id); 603 if (!(sh)) { 604 printk(KERN_ERR "pSCSI: Unable to locate" 605 " pdv_host_id: %d\n", pdv->pdv_host_id); 606 return NULL; 607 } 608 } 609 } else { 610 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) { 611 printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while" 612 " struct Scsi_Host exists\n"); 613 return NULL; 614 } 615 } 616 617 spin_lock_irq(sh->host_lock); 618 list_for_each_entry(sd, &sh->__devices, siblings) { 619 if ((pdv->pdv_channel_id != sd->channel) || 620 (pdv->pdv_target_id != sd->id) || 621 (pdv->pdv_lun_id != sd->lun)) 622 continue; 623 /* 624 * Functions will release the held struct scsi_host->host_lock 625 * before calling calling pscsi_add_device_to_list() to register 626 * struct scsi_device with target_core_mod. 627 */ 628 switch (sd->type) { 629 case TYPE_DISK: 630 dev = pscsi_create_type_disk(sd, pdv, se_dev, hba); 631 break; 632 case TYPE_ROM: 633 dev = pscsi_create_type_rom(sd, pdv, se_dev, hba); 634 break; 635 default: 636 dev = pscsi_create_type_other(sd, pdv, se_dev, hba); 637 break; 638 } 639 640 if (!(dev)) { 641 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) 642 scsi_host_put(sh); 643 else if (legacy_mode_enable) { 644 pscsi_pmode_enable_hba(hba, 0); 645 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 646 } 647 pdv->pdv_sd = NULL; 648 return NULL; 649 } 650 return dev; 651 } 652 spin_unlock_irq(sh->host_lock); 653 654 printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, 655 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id); 656 657 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) 658 scsi_host_put(sh); 659 else if (legacy_mode_enable) { 660 pscsi_pmode_enable_hba(hba, 0); 661 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE; 662 } 663 664 return NULL; 665 } 666 667 /* pscsi_free_device(): (Part of se_subsystem_api_t template) 668 * 669 * 670 */ 671 static void pscsi_free_device(void *p) 672 { 673 struct pscsi_dev_virt *pdv = p; 674 struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr; 675 struct scsi_device *sd = pdv->pdv_sd; 676 677 if (sd) { 678 /* 679 * Release exclusive pSCSI internal struct block_device claim for 680 * struct scsi_device with TYPE_DISK from pscsi_create_type_disk() 681 */ 682 if ((sd->type == TYPE_DISK) && pdv->pdv_bd) { 683 blkdev_put(pdv->pdv_bd, 684 FMODE_WRITE|FMODE_READ|FMODE_EXCL); 685 pdv->pdv_bd = NULL; 686 } 687 /* 688 * For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference 689 * to struct Scsi_Host now. 690 */ 691 if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && 692 (phv->phv_lld_host != NULL)) 693 scsi_host_put(phv->phv_lld_host); 694 695 if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) 696 scsi_device_put(sd); 697 698 pdv->pdv_sd = NULL; 699 } 700 701 kfree(pdv); 702 } 703 704 static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task) 705 { 706 return container_of(task, struct pscsi_plugin_task, pscsi_task); 707 } 708 709 710 /* pscsi_transport_complete(): 711 * 712 * 713 */ 714 static int pscsi_transport_complete(struct se_task *task) 715 { 716 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; 717 struct scsi_device *sd = pdv->pdv_sd; 718 int result; 719 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 720 unsigned char *cdb = &pt->pscsi_cdb[0]; 721 722 result = pt->pscsi_result; 723 /* 724 * Hack to make sure that Write-Protect modepage is set if R/O mode is 725 * forced. 726 */ 727 if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && 728 (status_byte(result) << 1) == SAM_STAT_GOOD) { 729 if (!TASK_CMD(task)->se_deve) 730 goto after_mode_sense; 731 732 if (TASK_CMD(task)->se_deve->lun_flags & 733 TRANSPORT_LUNFLAGS_READ_ONLY) { 734 unsigned char *buf = (unsigned char *) 735 T_TASK(task->task_se_cmd)->t_task_buf; 736 737 if (cdb[0] == MODE_SENSE_10) { 738 if (!(buf[3] & 0x80)) 739 buf[3] |= 0x80; 740 } else { 741 if (!(buf[2] & 0x80)) 742 buf[2] |= 0x80; 743 } 744 } 745 } 746 after_mode_sense: 747 748 if (sd->type != TYPE_TAPE) 749 goto after_mode_select; 750 751 /* 752 * Hack to correctly obtain the initiator requested blocksize for 753 * TYPE_TAPE. Since this value is dependent upon each tape media, 754 * struct scsi_device->sector_size will not contain the correct value 755 * by default, so we go ahead and set it so 756 * TRANSPORT(dev)->get_blockdev() returns the correct value to the 757 * storage engine. 758 */ 759 if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && 760 (status_byte(result) << 1) == SAM_STAT_GOOD) { 761 unsigned char *buf; 762 struct scatterlist *sg = task->task_sg; 763 u16 bdl; 764 u32 blocksize; 765 766 buf = sg_virt(&sg[0]); 767 if (!(buf)) { 768 printk(KERN_ERR "Unable to get buf for scatterlist\n"); 769 goto after_mode_select; 770 } 771 772 if (cdb[0] == MODE_SELECT) 773 bdl = (buf[3]); 774 else 775 bdl = (buf[6] << 8) | (buf[7]); 776 777 if (!bdl) 778 goto after_mode_select; 779 780 if (cdb[0] == MODE_SELECT) 781 blocksize = (buf[9] << 16) | (buf[10] << 8) | 782 (buf[11]); 783 else 784 blocksize = (buf[13] << 16) | (buf[14] << 8) | 785 (buf[15]); 786 787 sd->sector_size = blocksize; 788 } 789 after_mode_select: 790 791 if (status_byte(result) & CHECK_CONDITION) 792 return 1; 793 794 return 0; 795 } 796 797 static struct se_task * 798 pscsi_alloc_task(struct se_cmd *cmd) 799 { 800 struct pscsi_plugin_task *pt; 801 unsigned char *cdb = T_TASK(cmd)->t_task_cdb; 802 803 pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL); 804 if (!pt) { 805 printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n"); 806 return NULL; 807 } 808 809 /* 810 * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation, 811 * allocate the extended CDB buffer for per struct se_task context 812 * pt->pscsi_cdb now. 813 */ 814 if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) { 815 816 pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL); 817 if (!(pt->pscsi_cdb)) { 818 printk(KERN_ERR "pSCSI: Unable to allocate extended" 819 " pt->pscsi_cdb\n"); 820 return NULL; 821 } 822 } else 823 pt->pscsi_cdb = &pt->__pscsi_cdb[0]; 824 825 return &pt->pscsi_task; 826 } 827 828 static inline void pscsi_blk_init_request( 829 struct se_task *task, 830 struct pscsi_plugin_task *pt, 831 struct request *req, 832 int bidi_read) 833 { 834 /* 835 * Defined as "scsi command" in include/linux/blkdev.h. 836 */ 837 req->cmd_type = REQ_TYPE_BLOCK_PC; 838 /* 839 * For the extra BIDI-COMMAND READ struct request we do not 840 * need to setup the remaining structure members 841 */ 842 if (bidi_read) 843 return; 844 /* 845 * Setup the done function pointer for struct request, 846 * also set the end_io_data pointer.to struct se_task. 847 */ 848 req->end_io = pscsi_req_done; 849 req->end_io_data = (void *)task; 850 /* 851 * Load the referenced struct se_task's SCSI CDB into 852 * include/linux/blkdev.h:struct request->cmd 853 */ 854 req->cmd_len = scsi_command_size(pt->pscsi_cdb); 855 req->cmd = &pt->pscsi_cdb[0]; 856 /* 857 * Setup pointer for outgoing sense data. 858 */ 859 req->sense = (void *)&pt->pscsi_sense[0]; 860 req->sense_len = 0; 861 } 862 863 /* 864 * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB 865 */ 866 static int pscsi_blk_get_request(struct se_task *task) 867 { 868 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 869 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; 870 871 pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, 872 (task->task_data_direction == DMA_TO_DEVICE), 873 GFP_KERNEL); 874 if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) { 875 printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n", 876 IS_ERR(pt->pscsi_req)); 877 return PYX_TRANSPORT_LU_COMM_FAILURE; 878 } 879 /* 880 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, 881 * and setup rq callback, CDB and sense. 882 */ 883 pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); 884 return 0; 885 } 886 887 /* pscsi_do_task(): (Part of se_subsystem_api_t template) 888 * 889 * 890 */ 891 static int pscsi_do_task(struct se_task *task) 892 { 893 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 894 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; 895 /* 896 * Set the struct request->timeout value based on peripheral 897 * device type from SCSI. 898 */ 899 if (pdv->pdv_sd->type == TYPE_DISK) 900 pt->pscsi_req->timeout = PS_TIMEOUT_DISK; 901 else 902 pt->pscsi_req->timeout = PS_TIMEOUT_OTHER; 903 904 pt->pscsi_req->retries = PS_RETRY; 905 /* 906 * Queue the struct request into the struct scsi_device->request_queue. 907 * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd 908 * descriptor 909 */ 910 blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req, 911 (task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ), 912 pscsi_req_done); 913 914 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 915 } 916 917 static void pscsi_free_task(struct se_task *task) 918 { 919 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 920 struct se_cmd *cmd = task->task_se_cmd; 921 922 /* 923 * Release the extended CDB allocation from pscsi_alloc_task() 924 * if one exists. 925 */ 926 if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) 927 kfree(pt->pscsi_cdb); 928 /* 929 * We do not release the bio(s) here associated with this task, as 930 * this is handled by bio_put() and pscsi_bi_endio(). 931 */ 932 kfree(pt); 933 } 934 935 enum { 936 Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id, 937 Opt_scsi_lun_id, Opt_err 938 }; 939 940 static match_table_t tokens = { 941 {Opt_scsi_host_id, "scsi_host_id=%d"}, 942 {Opt_scsi_channel_id, "scsi_channel_id=%d"}, 943 {Opt_scsi_target_id, "scsi_target_id=%d"}, 944 {Opt_scsi_lun_id, "scsi_lun_id=%d"}, 945 {Opt_err, NULL} 946 }; 947 948 static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba, 949 struct se_subsystem_dev *se_dev, 950 const char *page, 951 ssize_t count) 952 { 953 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; 954 struct pscsi_hba_virt *phv = hba->hba_ptr; 955 char *orig, *ptr, *opts; 956 substring_t args[MAX_OPT_ARGS]; 957 int ret = 0, arg, token; 958 959 opts = kstrdup(page, GFP_KERNEL); 960 if (!opts) 961 return -ENOMEM; 962 963 orig = opts; 964 965 while ((ptr = strsep(&opts, ",")) != NULL) { 966 if (!*ptr) 967 continue; 968 969 token = match_token(ptr, tokens, args); 970 switch (token) { 971 case Opt_scsi_host_id: 972 if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) { 973 printk(KERN_ERR "PSCSI[%d]: Unable to accept" 974 " scsi_host_id while phv_mode ==" 975 " PHV_LLD_SCSI_HOST_NO\n", 976 phv->phv_host_id); 977 ret = -EINVAL; 978 goto out; 979 } 980 match_int(args, &arg); 981 pdv->pdv_host_id = arg; 982 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:" 983 " %d\n", phv->phv_host_id, pdv->pdv_host_id); 984 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; 985 break; 986 case Opt_scsi_channel_id: 987 match_int(args, &arg); 988 pdv->pdv_channel_id = arg; 989 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel" 990 " ID: %d\n", phv->phv_host_id, 991 pdv->pdv_channel_id); 992 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; 993 break; 994 case Opt_scsi_target_id: 995 match_int(args, &arg); 996 pdv->pdv_target_id = arg; 997 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target" 998 " ID: %d\n", phv->phv_host_id, 999 pdv->pdv_target_id); 1000 pdv->pdv_flags |= PDF_HAS_TARGET_ID; 1001 break; 1002 case Opt_scsi_lun_id: 1003 match_int(args, &arg); 1004 pdv->pdv_lun_id = arg; 1005 printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:" 1006 " %d\n", phv->phv_host_id, pdv->pdv_lun_id); 1007 pdv->pdv_flags |= PDF_HAS_LUN_ID; 1008 break; 1009 default: 1010 break; 1011 } 1012 } 1013 1014 out: 1015 kfree(orig); 1016 return (!ret) ? count : ret; 1017 } 1018 1019 static ssize_t pscsi_check_configfs_dev_params( 1020 struct se_hba *hba, 1021 struct se_subsystem_dev *se_dev) 1022 { 1023 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; 1024 1025 if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) || 1026 !(pdv->pdv_flags & PDF_HAS_TARGET_ID) || 1027 !(pdv->pdv_flags & PDF_HAS_LUN_ID)) { 1028 printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and" 1029 " scsi_lun_id= parameters\n"); 1030 return -1; 1031 } 1032 1033 return 0; 1034 } 1035 1036 static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba, 1037 struct se_subsystem_dev *se_dev, 1038 char *b) 1039 { 1040 struct pscsi_hba_virt *phv = hba->hba_ptr; 1041 struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr; 1042 struct scsi_device *sd = pdv->pdv_sd; 1043 unsigned char host_id[16]; 1044 ssize_t bl; 1045 int i; 1046 1047 if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) 1048 snprintf(host_id, 16, "%d", pdv->pdv_host_id); 1049 else 1050 snprintf(host_id, 16, "PHBA Mode"); 1051 1052 bl = sprintf(b, "SCSI Device Bus Location:" 1053 " Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n", 1054 pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id, 1055 host_id); 1056 1057 if (sd) { 1058 bl += sprintf(b + bl, " "); 1059 bl += sprintf(b + bl, "Vendor: "); 1060 for (i = 0; i < 8; i++) { 1061 if (ISPRINT(sd->vendor[i])) /* printable character? */ 1062 bl += sprintf(b + bl, "%c", sd->vendor[i]); 1063 else 1064 bl += sprintf(b + bl, " "); 1065 } 1066 bl += sprintf(b + bl, " Model: "); 1067 for (i = 0; i < 16; i++) { 1068 if (ISPRINT(sd->model[i])) /* printable character ? */ 1069 bl += sprintf(b + bl, "%c", sd->model[i]); 1070 else 1071 bl += sprintf(b + bl, " "); 1072 } 1073 bl += sprintf(b + bl, " Rev: "); 1074 for (i = 0; i < 4; i++) { 1075 if (ISPRINT(sd->rev[i])) /* printable character ? */ 1076 bl += sprintf(b + bl, "%c", sd->rev[i]); 1077 else 1078 bl += sprintf(b + bl, " "); 1079 } 1080 bl += sprintf(b + bl, "\n"); 1081 } 1082 return bl; 1083 } 1084 1085 static void pscsi_bi_endio(struct bio *bio, int error) 1086 { 1087 bio_put(bio); 1088 } 1089 1090 static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num) 1091 { 1092 struct bio *bio; 1093 /* 1094 * Use bio_malloc() following the comment in for bio -> struct request 1095 * in block/blk-core.c:blk_make_request() 1096 */ 1097 bio = bio_kmalloc(GFP_KERNEL, sg_num); 1098 if (!(bio)) { 1099 printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n"); 1100 return NULL; 1101 } 1102 bio->bi_end_io = pscsi_bi_endio; 1103 1104 return bio; 1105 } 1106 1107 #if 0 1108 #define DEBUG_PSCSI(x...) printk(x) 1109 #else 1110 #define DEBUG_PSCSI(x...) 1111 #endif 1112 1113 static int __pscsi_map_task_SG( 1114 struct se_task *task, 1115 struct scatterlist *task_sg, 1116 u32 task_sg_num, 1117 int bidi_read) 1118 { 1119 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1120 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; 1121 struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; 1122 struct page *page; 1123 struct scatterlist *sg; 1124 u32 data_len = task->task_size, i, len, bytes, off; 1125 int nr_pages = (task->task_size + task_sg[0].offset + 1126 PAGE_SIZE - 1) >> PAGE_SHIFT; 1127 int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; 1128 int rw = (task->task_data_direction == DMA_TO_DEVICE); 1129 1130 if (!task->task_size) 1131 return 0; 1132 /* 1133 * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup 1134 * the bio_vec maplist from TC< struct se_mem -> task->task_sg -> 1135 * struct scatterlist memory. The struct se_task->task_sg[] currently needs 1136 * to be attached to struct bios for submission to Linux/SCSI using 1137 * struct request to struct scsi_device->request_queue. 1138 * 1139 * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI 1140 * is ported to upstream SCSI passthrough functionality that accepts 1141 * struct scatterlist->page_link or struct page as a paraemeter. 1142 */ 1143 DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages); 1144 1145 for_each_sg(task_sg, sg, task_sg_num, i) { 1146 page = sg_page(sg); 1147 off = sg->offset; 1148 len = sg->length; 1149 1150 DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i, 1151 page, len, off); 1152 1153 while (len > 0 && data_len > 0) { 1154 bytes = min_t(unsigned int, len, PAGE_SIZE - off); 1155 bytes = min(bytes, data_len); 1156 1157 if (!(bio)) { 1158 nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); 1159 nr_pages -= nr_vecs; 1160 /* 1161 * Calls bio_kmalloc() and sets bio->bi_end_io() 1162 */ 1163 bio = pscsi_get_bio(pdv, nr_vecs); 1164 if (!(bio)) 1165 goto fail; 1166 1167 if (rw) 1168 bio->bi_rw |= REQ_WRITE; 1169 1170 DEBUG_PSCSI("PSCSI: Allocated bio: %p," 1171 " dir: %s nr_vecs: %d\n", bio, 1172 (rw) ? "rw" : "r", nr_vecs); 1173 /* 1174 * Set *hbio pointer to handle the case: 1175 * nr_pages > BIO_MAX_PAGES, where additional 1176 * bios need to be added to complete a given 1177 * struct se_task 1178 */ 1179 if (!hbio) 1180 hbio = tbio = bio; 1181 else 1182 tbio = tbio->bi_next = bio; 1183 } 1184 1185 DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d" 1186 " bio: %p page: %p len: %d off: %d\n", i, bio, 1187 page, len, off); 1188 1189 rc = bio_add_pc_page(pdv->pdv_sd->request_queue, 1190 bio, page, bytes, off); 1191 if (rc != bytes) 1192 goto fail; 1193 1194 DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n", 1195 bio->bi_vcnt, nr_vecs); 1196 1197 if (bio->bi_vcnt > nr_vecs) { 1198 DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:" 1199 " %d i: %d bio: %p, allocating another" 1200 " bio\n", bio->bi_vcnt, i, bio); 1201 /* 1202 * Clear the pointer so that another bio will 1203 * be allocated with pscsi_get_bio() above, the 1204 * current bio has already been set *tbio and 1205 * bio->bi_next. 1206 */ 1207 bio = NULL; 1208 } 1209 1210 page++; 1211 len -= bytes; 1212 data_len -= bytes; 1213 off = 0; 1214 } 1215 } 1216 /* 1217 * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND 1218 * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] 1219 */ 1220 if (!(bidi_read)) { 1221 /* 1222 * Starting with v2.6.31, call blk_make_request() passing in *hbio to 1223 * allocate the pSCSI task a struct request. 1224 */ 1225 pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, 1226 hbio, GFP_KERNEL); 1227 if (!(pt->pscsi_req)) { 1228 printk(KERN_ERR "pSCSI: blk_make_request() failed\n"); 1229 goto fail; 1230 } 1231 /* 1232 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, 1233 * and setup rq callback, CDB and sense. 1234 */ 1235 pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); 1236 1237 return task->task_sg_num; 1238 } 1239 /* 1240 * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND 1241 * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[] 1242 */ 1243 pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, 1244 hbio, GFP_KERNEL); 1245 if (!(pt->pscsi_req->next_rq)) { 1246 printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n"); 1247 goto fail; 1248 } 1249 pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1); 1250 1251 return task->task_sg_num; 1252 fail: 1253 while (hbio) { 1254 bio = hbio; 1255 hbio = hbio->bi_next; 1256 bio->bi_next = NULL; 1257 bio_endio(bio, 0); 1258 } 1259 return ret; 1260 } 1261 1262 static int pscsi_map_task_SG(struct se_task *task) 1263 { 1264 int ret; 1265 1266 /* 1267 * Setup the main struct request for the task->task_sg[] payload 1268 */ 1269 1270 ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0); 1271 if (ret >= 0 && task->task_sg_bidi) { 1272 /* 1273 * If present, set up the extra BIDI-COMMAND SCSI READ 1274 * struct request and payload. 1275 */ 1276 ret = __pscsi_map_task_SG(task, task->task_sg_bidi, 1277 task->task_sg_num, 1); 1278 } 1279 1280 if (ret < 0) 1281 return PYX_TRANSPORT_LU_COMM_FAILURE; 1282 return 0; 1283 } 1284 1285 /* pscsi_map_task_non_SG(): 1286 * 1287 * 1288 */ 1289 static int pscsi_map_task_non_SG(struct se_task *task) 1290 { 1291 struct se_cmd *cmd = TASK_CMD(task); 1292 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1293 struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; 1294 int ret = 0; 1295 1296 if (pscsi_blk_get_request(task) < 0) 1297 return PYX_TRANSPORT_LU_COMM_FAILURE; 1298 1299 if (!task->task_size) 1300 return 0; 1301 1302 ret = blk_rq_map_kern(pdv->pdv_sd->request_queue, 1303 pt->pscsi_req, T_TASK(cmd)->t_task_buf, 1304 task->task_size, GFP_KERNEL); 1305 if (ret < 0) { 1306 printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret); 1307 return PYX_TRANSPORT_LU_COMM_FAILURE; 1308 } 1309 return 0; 1310 } 1311 1312 static int pscsi_CDB_none(struct se_task *task) 1313 { 1314 return pscsi_blk_get_request(task); 1315 } 1316 1317 /* pscsi_get_cdb(): 1318 * 1319 * 1320 */ 1321 static unsigned char *pscsi_get_cdb(struct se_task *task) 1322 { 1323 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1324 1325 return pt->pscsi_cdb; 1326 } 1327 1328 /* pscsi_get_sense_buffer(): 1329 * 1330 * 1331 */ 1332 static unsigned char *pscsi_get_sense_buffer(struct se_task *task) 1333 { 1334 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1335 1336 return (unsigned char *)&pt->pscsi_sense[0]; 1337 } 1338 1339 /* pscsi_get_device_rev(): 1340 * 1341 * 1342 */ 1343 static u32 pscsi_get_device_rev(struct se_device *dev) 1344 { 1345 struct pscsi_dev_virt *pdv = dev->dev_ptr; 1346 struct scsi_device *sd = pdv->pdv_sd; 1347 1348 return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1; 1349 } 1350 1351 /* pscsi_get_device_type(): 1352 * 1353 * 1354 */ 1355 static u32 pscsi_get_device_type(struct se_device *dev) 1356 { 1357 struct pscsi_dev_virt *pdv = dev->dev_ptr; 1358 struct scsi_device *sd = pdv->pdv_sd; 1359 1360 return sd->type; 1361 } 1362 1363 static sector_t pscsi_get_blocks(struct se_device *dev) 1364 { 1365 struct pscsi_dev_virt *pdv = dev->dev_ptr; 1366 1367 if (pdv->pdv_bd && pdv->pdv_bd->bd_part) 1368 return pdv->pdv_bd->bd_part->nr_sects; 1369 1370 dump_stack(); 1371 return 0; 1372 } 1373 1374 /* pscsi_handle_SAM_STATUS_failures(): 1375 * 1376 * 1377 */ 1378 static inline void pscsi_process_SAM_status( 1379 struct se_task *task, 1380 struct pscsi_plugin_task *pt) 1381 { 1382 task->task_scsi_status = status_byte(pt->pscsi_result); 1383 if ((task->task_scsi_status)) { 1384 task->task_scsi_status <<= 1; 1385 printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:" 1386 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], 1387 pt->pscsi_result); 1388 } 1389 1390 switch (host_byte(pt->pscsi_result)) { 1391 case DID_OK: 1392 transport_complete_task(task, (!task->task_scsi_status)); 1393 break; 1394 default: 1395 printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:" 1396 " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0], 1397 pt->pscsi_result); 1398 task->task_scsi_status = SAM_STAT_CHECK_CONDITION; 1399 task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1400 TASK_CMD(task)->transport_error_status = 1401 PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; 1402 transport_complete_task(task, 0); 1403 break; 1404 } 1405 1406 return; 1407 } 1408 1409 static void pscsi_req_done(struct request *req, int uptodate) 1410 { 1411 struct se_task *task = req->end_io_data; 1412 struct pscsi_plugin_task *pt = PSCSI_TASK(task); 1413 1414 pt->pscsi_result = req->errors; 1415 pt->pscsi_resid = req->resid_len; 1416 1417 pscsi_process_SAM_status(task, pt); 1418 /* 1419 * Release BIDI-READ if present 1420 */ 1421 if (req->next_rq != NULL) 1422 __blk_put_request(req->q, req->next_rq); 1423 1424 __blk_put_request(req->q, req); 1425 pt->pscsi_req = NULL; 1426 } 1427 1428 static struct se_subsystem_api pscsi_template = { 1429 .name = "pscsi", 1430 .owner = THIS_MODULE, 1431 .transport_type = TRANSPORT_PLUGIN_PHBA_PDEV, 1432 .cdb_none = pscsi_CDB_none, 1433 .map_task_non_SG = pscsi_map_task_non_SG, 1434 .map_task_SG = pscsi_map_task_SG, 1435 .attach_hba = pscsi_attach_hba, 1436 .detach_hba = pscsi_detach_hba, 1437 .pmode_enable_hba = pscsi_pmode_enable_hba, 1438 .allocate_virtdevice = pscsi_allocate_virtdevice, 1439 .create_virtdevice = pscsi_create_virtdevice, 1440 .free_device = pscsi_free_device, 1441 .transport_complete = pscsi_transport_complete, 1442 .alloc_task = pscsi_alloc_task, 1443 .do_task = pscsi_do_task, 1444 .free_task = pscsi_free_task, 1445 .check_configfs_dev_params = pscsi_check_configfs_dev_params, 1446 .set_configfs_dev_params = pscsi_set_configfs_dev_params, 1447 .show_configfs_dev_params = pscsi_show_configfs_dev_params, 1448 .get_cdb = pscsi_get_cdb, 1449 .get_sense_buffer = pscsi_get_sense_buffer, 1450 .get_device_rev = pscsi_get_device_rev, 1451 .get_device_type = pscsi_get_device_type, 1452 .get_blocks = pscsi_get_blocks, 1453 }; 1454 1455 static int __init pscsi_module_init(void) 1456 { 1457 return transport_subsystem_register(&pscsi_template); 1458 } 1459 1460 static void pscsi_module_exit(void) 1461 { 1462 transport_subsystem_release(&pscsi_template); 1463 } 1464 1465 MODULE_DESCRIPTION("TCM PSCSI subsystem plugin"); 1466 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 1467 MODULE_LICENSE("GPL"); 1468 1469 module_init(pscsi_module_init); 1470 module_exit(pscsi_module_exit); 1471