1 /* 2 * SCSI Primary Commands (SPC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <asm/unaligned.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_tcq.h> 29 30 #include <target/target_core_base.h> 31 #include <target/target_core_backend.h> 32 #include <target/target_core_fabric.h> 33 34 #include "target_core_internal.h" 35 #include "target_core_alua.h" 36 #include "target_core_pr.h" 37 #include "target_core_ua.h" 38 #include "target_core_xcopy.h" 39 40 static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) 41 { 42 struct t10_alua_tg_pt_gp *tg_pt_gp; 43 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 44 45 /* 46 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. 47 */ 48 buf[5] = 0x80; 49 50 /* 51 * Set TPGS field for explicit and/or implicit ALUA access type 52 * and opteration. 53 * 54 * See spc4r17 section 6.4.2 Table 135 55 */ 56 if (!port) 57 return; 58 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 59 if (!tg_pt_gp_mem) 60 return; 61 62 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 63 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 64 if (tg_pt_gp) 65 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; 66 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 67 } 68 69 sense_reason_t 70 spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) 71 { 72 struct se_lun *lun = cmd->se_lun; 73 struct se_device *dev = cmd->se_dev; 74 struct se_session *sess = cmd->se_sess; 75 76 /* Set RMB (removable media) for tape devices */ 77 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 78 buf[1] = 0x80; 79 80 buf[2] = 0x05; /* SPC-3 */ 81 82 /* 83 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2 84 * 85 * SPC4 says: 86 * A RESPONSE DATA FORMAT field set to 2h indicates that the 87 * standard INQUIRY data is in the format defined in this 88 * standard. Response data format values less than 2h are 89 * obsolete. Response data format values greater than 2h are 90 * reserved. 91 */ 92 buf[3] = 2; 93 94 /* 95 * Enable SCCS and TPGS fields for Emulated ALUA 96 */ 97 spc_fill_alua_data(lun->lun_sep, buf); 98 99 /* 100 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY 101 */ 102 if (dev->dev_attrib.emulate_3pc) 103 buf[5] |= 0x8; 104 /* 105 * Set Protection (PROTECT) bit when DIF has been enabled on the 106 * device, and the transport supports VERIFY + PASS. 107 */ 108 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 109 if (dev->dev_attrib.pi_prot_type) 110 buf[5] |= 0x1; 111 } 112 113 buf[7] = 0x2; /* CmdQue=1 */ 114 115 memcpy(&buf[8], "LIO-ORG ", 8); 116 memset(&buf[16], 0x20, 16); 117 memcpy(&buf[16], dev->t10_wwn.model, 118 min_t(size_t, strlen(dev->t10_wwn.model), 16)); 119 memcpy(&buf[32], dev->t10_wwn.revision, 120 min_t(size_t, strlen(dev->t10_wwn.revision), 4)); 121 buf[4] = 31; /* Set additional length to 31 */ 122 123 return 0; 124 } 125 EXPORT_SYMBOL(spc_emulate_inquiry_std); 126 127 /* unit serial number */ 128 static sense_reason_t 129 spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) 130 { 131 struct se_device *dev = cmd->se_dev; 132 u16 len; 133 134 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 135 len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); 136 len++; /* Extra Byte for NULL Terminator */ 137 buf[3] = len; 138 } 139 return 0; 140 } 141 142 void spc_parse_naa_6h_vendor_specific(struct se_device *dev, 143 unsigned char *buf) 144 { 145 unsigned char *p = &dev->t10_wwn.unit_serial[0]; 146 int cnt; 147 bool next = true; 148 149 /* 150 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on 151 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field 152 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION 153 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL 154 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure 155 * per device uniqeness. 156 */ 157 for (cnt = 0; *p && cnt < 13; p++) { 158 int val = hex_to_bin(*p); 159 160 if (val < 0) 161 continue; 162 163 if (next) { 164 next = false; 165 buf[cnt++] |= val; 166 } else { 167 next = true; 168 buf[cnt] = val << 4; 169 } 170 } 171 } 172 173 /* 174 * Device identification VPD, for a complete list of 175 * DESIGNATOR TYPEs see spc4r17 Table 459. 176 */ 177 sense_reason_t 178 spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) 179 { 180 struct se_device *dev = cmd->se_dev; 181 struct se_lun *lun = cmd->se_lun; 182 struct se_port *port = NULL; 183 struct se_portal_group *tpg = NULL; 184 struct t10_alua_lu_gp_member *lu_gp_mem; 185 struct t10_alua_tg_pt_gp *tg_pt_gp; 186 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 187 unsigned char *prod = &dev->t10_wwn.model[0]; 188 u32 prod_len; 189 u32 unit_serial_len, off = 0; 190 u16 len = 0, id_len; 191 192 off = 4; 193 194 /* 195 * NAA IEEE Registered Extended Assigned designator format, see 196 * spc4r17 section 7.7.3.6.5 197 * 198 * We depend upon a target_core_mod/ConfigFS provided 199 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial 200 * value in order to return the NAA id. 201 */ 202 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)) 203 goto check_t10_vend_desc; 204 205 /* CODE SET == Binary */ 206 buf[off++] = 0x1; 207 208 /* Set ASSOCIATION == addressed logical unit: 0)b */ 209 buf[off] = 0x00; 210 211 /* Identifier/Designator type == NAA identifier */ 212 buf[off++] |= 0x3; 213 off++; 214 215 /* Identifier/Designator length */ 216 buf[off++] = 0x10; 217 218 /* 219 * Start NAA IEEE Registered Extended Identifier/Designator 220 */ 221 buf[off++] = (0x6 << 4); 222 223 /* 224 * Use OpenFabrics IEEE Company ID: 00 14 05 225 */ 226 buf[off++] = 0x01; 227 buf[off++] = 0x40; 228 buf[off] = (0x5 << 4); 229 230 /* 231 * Return ConfigFS Unit Serial Number information for 232 * VENDOR_SPECIFIC_IDENTIFIER and 233 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION 234 */ 235 spc_parse_naa_6h_vendor_specific(dev, &buf[off]); 236 237 len = 20; 238 off = (len + 4); 239 240 check_t10_vend_desc: 241 /* 242 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4 243 */ 244 id_len = 8; /* For Vendor field */ 245 prod_len = 4; /* For VPD Header */ 246 prod_len += 8; /* For Vendor field */ 247 prod_len += strlen(prod); 248 prod_len++; /* For : */ 249 250 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 251 unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]); 252 unit_serial_len++; /* For NULL Terminator */ 253 254 id_len += sprintf(&buf[off+12], "%s:%s", prod, 255 &dev->t10_wwn.unit_serial[0]); 256 } 257 buf[off] = 0x2; /* ASCII */ 258 buf[off+1] = 0x1; /* T10 Vendor ID */ 259 buf[off+2] = 0x0; 260 memcpy(&buf[off+4], "LIO-ORG", 8); 261 /* Extra Byte for NULL Terminator */ 262 id_len++; 263 /* Identifier Length */ 264 buf[off+3] = id_len; 265 /* Header size for Designation descriptor */ 266 len += (id_len + 4); 267 off += (id_len + 4); 268 /* 269 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD 270 */ 271 port = lun->lun_sep; 272 if (port) { 273 struct t10_alua_lu_gp *lu_gp; 274 u32 padding, scsi_name_len, scsi_target_len; 275 u16 lu_gp_id = 0; 276 u16 tg_pt_gp_id = 0; 277 u16 tpgt; 278 279 tpg = port->sep_tpg; 280 /* 281 * Relative target port identifer, see spc4r17 282 * section 7.7.3.7 283 * 284 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 285 * section 7.5.1 Table 362 286 */ 287 buf[off] = 288 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 289 buf[off++] |= 0x1; /* CODE SET == Binary */ 290 buf[off] = 0x80; /* Set PIV=1 */ 291 /* Set ASSOCIATION == target port: 01b */ 292 buf[off] |= 0x10; 293 /* DESIGNATOR TYPE == Relative target port identifer */ 294 buf[off++] |= 0x4; 295 off++; /* Skip over Reserved */ 296 buf[off++] = 4; /* DESIGNATOR LENGTH */ 297 /* Skip over Obsolete field in RTPI payload 298 * in Table 472 */ 299 off += 2; 300 buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 301 buf[off++] = (port->sep_rtpi & 0xff); 302 len += 8; /* Header size + Designation descriptor */ 303 /* 304 * Target port group identifier, see spc4r17 305 * section 7.7.3.8 306 * 307 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 308 * section 7.5.1 Table 362 309 */ 310 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 311 if (!tg_pt_gp_mem) 312 goto check_lu_gp; 313 314 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 315 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 316 if (!tg_pt_gp) { 317 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 318 goto check_lu_gp; 319 } 320 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; 321 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 322 323 buf[off] = 324 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 325 buf[off++] |= 0x1; /* CODE SET == Binary */ 326 buf[off] = 0x80; /* Set PIV=1 */ 327 /* Set ASSOCIATION == target port: 01b */ 328 buf[off] |= 0x10; 329 /* DESIGNATOR TYPE == Target port group identifier */ 330 buf[off++] |= 0x5; 331 off++; /* Skip over Reserved */ 332 buf[off++] = 4; /* DESIGNATOR LENGTH */ 333 off += 2; /* Skip over Reserved Field */ 334 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff); 335 buf[off++] = (tg_pt_gp_id & 0xff); 336 len += 8; /* Header size + Designation descriptor */ 337 /* 338 * Logical Unit Group identifier, see spc4r17 339 * section 7.7.3.8 340 */ 341 check_lu_gp: 342 lu_gp_mem = dev->dev_alua_lu_gp_mem; 343 if (!lu_gp_mem) 344 goto check_scsi_name; 345 346 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 347 lu_gp = lu_gp_mem->lu_gp; 348 if (!lu_gp) { 349 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 350 goto check_scsi_name; 351 } 352 lu_gp_id = lu_gp->lu_gp_id; 353 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 354 355 buf[off++] |= 0x1; /* CODE SET == Binary */ 356 /* DESIGNATOR TYPE == Logical Unit Group identifier */ 357 buf[off++] |= 0x6; 358 off++; /* Skip over Reserved */ 359 buf[off++] = 4; /* DESIGNATOR LENGTH */ 360 off += 2; /* Skip over Reserved Field */ 361 buf[off++] = ((lu_gp_id >> 8) & 0xff); 362 buf[off++] = (lu_gp_id & 0xff); 363 len += 8; /* Header size + Designation descriptor */ 364 /* 365 * SCSI name string designator, see spc4r17 366 * section 7.7.3.11 367 * 368 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 369 * section 7.5.1 Table 362 370 */ 371 check_scsi_name: 372 buf[off] = 373 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 374 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 375 buf[off] = 0x80; /* Set PIV=1 */ 376 /* Set ASSOCIATION == target port: 01b */ 377 buf[off] |= 0x10; 378 /* DESIGNATOR TYPE == SCSI name string */ 379 buf[off++] |= 0x8; 380 off += 2; /* Skip over Reserved and length */ 381 /* 382 * SCSI name string identifer containing, $FABRIC_MOD 383 * dependent information. For LIO-Target and iSCSI 384 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in 385 * UTF-8 encoding. 386 */ 387 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); 388 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", 389 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); 390 scsi_name_len += 1 /* Include NULL terminator */; 391 /* 392 * The null-terminated, null-padded (see 4.4.2) SCSI 393 * NAME STRING field contains a UTF-8 format string. 394 * The number of bytes in the SCSI NAME STRING field 395 * (i.e., the value in the DESIGNATOR LENGTH field) 396 * shall be no larger than 256 and shall be a multiple 397 * of four. 398 */ 399 padding = ((-scsi_name_len) & 3); 400 if (padding) 401 scsi_name_len += padding; 402 if (scsi_name_len > 256) 403 scsi_name_len = 256; 404 405 buf[off-1] = scsi_name_len; 406 off += scsi_name_len; 407 /* Header size + Designation descriptor */ 408 len += (scsi_name_len + 4); 409 410 /* 411 * Target device designator 412 */ 413 buf[off] = 414 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 415 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 416 buf[off] = 0x80; /* Set PIV=1 */ 417 /* Set ASSOCIATION == target device: 10b */ 418 buf[off] |= 0x20; 419 /* DESIGNATOR TYPE == SCSI name string */ 420 buf[off++] |= 0x8; 421 off += 2; /* Skip over Reserved and length */ 422 /* 423 * SCSI name string identifer containing, $FABRIC_MOD 424 * dependent information. For LIO-Target and iSCSI 425 * Target Port, this means "<iSCSI name>" in 426 * UTF-8 encoding. 427 */ 428 scsi_target_len = sprintf(&buf[off], "%s", 429 tpg->se_tpg_tfo->tpg_get_wwn(tpg)); 430 scsi_target_len += 1 /* Include NULL terminator */; 431 /* 432 * The null-terminated, null-padded (see 4.4.2) SCSI 433 * NAME STRING field contains a UTF-8 format string. 434 * The number of bytes in the SCSI NAME STRING field 435 * (i.e., the value in the DESIGNATOR LENGTH field) 436 * shall be no larger than 256 and shall be a multiple 437 * of four. 438 */ 439 padding = ((-scsi_target_len) & 3); 440 if (padding) 441 scsi_target_len += padding; 442 if (scsi_target_len > 256) 443 scsi_target_len = 256; 444 445 buf[off-1] = scsi_target_len; 446 off += scsi_target_len; 447 448 /* Header size + Designation descriptor */ 449 len += (scsi_target_len + 4); 450 } 451 buf[2] = ((len >> 8) & 0xff); 452 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ 453 return 0; 454 } 455 EXPORT_SYMBOL(spc_emulate_evpd_83); 456 457 static bool 458 spc_check_dev_wce(struct se_device *dev) 459 { 460 bool wce = false; 461 462 if (dev->transport->get_write_cache) 463 wce = dev->transport->get_write_cache(dev); 464 else if (dev->dev_attrib.emulate_write_cache > 0) 465 wce = true; 466 467 return wce; 468 } 469 470 /* Extended INQUIRY Data VPD Page */ 471 static sense_reason_t 472 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 473 { 474 struct se_device *dev = cmd->se_dev; 475 struct se_session *sess = cmd->se_sess; 476 477 buf[3] = 0x3c; 478 /* 479 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK 480 * only for TYPE3 protection. 481 */ 482 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 483 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT) 484 buf[4] = 0x5; 485 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT) 486 buf[4] = 0x4; 487 } 488 489 /* Set HEADSUP, ORDSUP, SIMPSUP */ 490 buf[5] = 0x07; 491 492 /* If WriteCache emulation is enabled, set V_SUP */ 493 if (spc_check_dev_wce(dev)) 494 buf[6] = 0x01; 495 /* If an LBA map is present set R_SUP */ 496 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); 497 if (!list_empty(&dev->t10_alua.lba_map_list)) 498 buf[8] = 0x10; 499 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock); 500 return 0; 501 } 502 503 /* Block Limits VPD page */ 504 static sense_reason_t 505 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 506 { 507 struct se_device *dev = cmd->se_dev; 508 u32 max_sectors; 509 int have_tp = 0; 510 int opt, min; 511 512 /* 513 * Following spc3r22 section 6.5.3 Block Limits VPD page, when 514 * emulate_tpu=1 or emulate_tpws=1 we will be expect a 515 * different page length for Thin Provisioning. 516 */ 517 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 518 have_tp = 1; 519 520 buf[0] = dev->transport->get_device_type(dev); 521 buf[3] = have_tp ? 0x3c : 0x10; 522 523 /* Set WSNZ to 1 */ 524 buf[4] = 0x01; 525 /* 526 * Set MAXIMUM COMPARE AND WRITE LENGTH 527 */ 528 if (dev->dev_attrib.emulate_caw) 529 buf[5] = 0x01; 530 531 /* 532 * Set OPTIMAL TRANSFER LENGTH GRANULARITY 533 */ 534 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) 535 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); 536 else 537 put_unaligned_be16(1, &buf[6]); 538 539 /* 540 * Set MAXIMUM TRANSFER LENGTH 541 */ 542 max_sectors = min(dev->dev_attrib.fabric_max_sectors, 543 dev->dev_attrib.hw_max_sectors); 544 put_unaligned_be32(max_sectors, &buf[8]); 545 546 /* 547 * Set OPTIMAL TRANSFER LENGTH 548 */ 549 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) 550 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); 551 else 552 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); 553 554 /* 555 * Exit now if we don't support TP. 556 */ 557 if (!have_tp) 558 goto max_write_same; 559 560 /* 561 * Set MAXIMUM UNMAP LBA COUNT 562 */ 563 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]); 564 565 /* 566 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT 567 */ 568 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count, 569 &buf[24]); 570 571 /* 572 * Set OPTIMAL UNMAP GRANULARITY 573 */ 574 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]); 575 576 /* 577 * UNMAP GRANULARITY ALIGNMENT 578 */ 579 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment, 580 &buf[32]); 581 if (dev->dev_attrib.unmap_granularity_alignment != 0) 582 buf[32] |= 0x80; /* Set the UGAVALID bit */ 583 584 /* 585 * MAXIMUM WRITE SAME LENGTH 586 */ 587 max_write_same: 588 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]); 589 590 return 0; 591 } 592 593 /* Block Device Characteristics VPD page */ 594 static sense_reason_t 595 spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) 596 { 597 struct se_device *dev = cmd->se_dev; 598 599 buf[0] = dev->transport->get_device_type(dev); 600 buf[3] = 0x3c; 601 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0; 602 603 return 0; 604 } 605 606 /* Thin Provisioning VPD */ 607 static sense_reason_t 608 spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) 609 { 610 struct se_device *dev = cmd->se_dev; 611 612 /* 613 * From spc3r22 section 6.5.4 Thin Provisioning VPD page: 614 * 615 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to 616 * zero, then the page length shall be set to 0004h. If the DP bit 617 * is set to one, then the page length shall be set to the value 618 * defined in table 162. 619 */ 620 buf[0] = dev->transport->get_device_type(dev); 621 622 /* 623 * Set Hardcoded length mentioned above for DP=0 624 */ 625 put_unaligned_be16(0x0004, &buf[2]); 626 627 /* 628 * The THRESHOLD EXPONENT field indicates the threshold set size in 629 * LBAs as a power of 2 (i.e., the threshold set size is equal to 630 * 2(threshold exponent)). 631 * 632 * Note that this is currently set to 0x00 as mkp says it will be 633 * changing again. We can enable this once it has settled in T10 634 * and is actually used by Linux/SCSI ML code. 635 */ 636 buf[4] = 0x00; 637 638 /* 639 * A TPU bit set to one indicates that the device server supports 640 * the UNMAP command (see 5.25). A TPU bit set to zero indicates 641 * that the device server does not support the UNMAP command. 642 */ 643 if (dev->dev_attrib.emulate_tpu != 0) 644 buf[5] = 0x80; 645 646 /* 647 * A TPWS bit set to one indicates that the device server supports 648 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs. 649 * A TPWS bit set to zero indicates that the device server does not 650 * support the use of the WRITE SAME (16) command to unmap LBAs. 651 */ 652 if (dev->dev_attrib.emulate_tpws != 0) 653 buf[5] |= 0x40; 654 655 return 0; 656 } 657 658 /* Referrals VPD page */ 659 static sense_reason_t 660 spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) 661 { 662 struct se_device *dev = cmd->se_dev; 663 664 buf[0] = dev->transport->get_device_type(dev); 665 buf[3] = 0x0c; 666 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); 667 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[12]); 668 669 return 0; 670 } 671 672 static sense_reason_t 673 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); 674 675 static struct { 676 uint8_t page; 677 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *); 678 } evpd_handlers[] = { 679 { .page = 0x00, .emulate = spc_emulate_evpd_00 }, 680 { .page = 0x80, .emulate = spc_emulate_evpd_80 }, 681 { .page = 0x83, .emulate = spc_emulate_evpd_83 }, 682 { .page = 0x86, .emulate = spc_emulate_evpd_86 }, 683 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, 684 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, 685 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, 686 { .page = 0xb3, .emulate = spc_emulate_evpd_b3 }, 687 }; 688 689 /* supported vital product data pages */ 690 static sense_reason_t 691 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) 692 { 693 int p; 694 695 /* 696 * Only report the INQUIRY EVPD=1 pages after a valid NAA 697 * Registered Extended LUN WWN has been set via ConfigFS 698 * during device creation/restart. 699 */ 700 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 701 buf[3] = ARRAY_SIZE(evpd_handlers); 702 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) 703 buf[p + 4] = evpd_handlers[p].page; 704 } 705 706 return 0; 707 } 708 709 static sense_reason_t 710 spc_emulate_inquiry(struct se_cmd *cmd) 711 { 712 struct se_device *dev = cmd->se_dev; 713 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; 714 unsigned char *rbuf; 715 unsigned char *cdb = cmd->t_task_cdb; 716 unsigned char *buf; 717 sense_reason_t ret; 718 int p; 719 int len = 0; 720 721 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); 722 if (!buf) { 723 pr_err("Unable to allocate response buffer for INQUIRY\n"); 724 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 725 } 726 727 if (dev == tpg->tpg_virt_lun0.lun_se_dev) 728 buf[0] = 0x3f; /* Not connected */ 729 else 730 buf[0] = dev->transport->get_device_type(dev); 731 732 if (!(cdb[1] & 0x1)) { 733 if (cdb[2]) { 734 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", 735 cdb[2]); 736 ret = TCM_INVALID_CDB_FIELD; 737 goto out; 738 } 739 740 ret = spc_emulate_inquiry_std(cmd, buf); 741 len = buf[4] + 5; 742 goto out; 743 } 744 745 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { 746 if (cdb[2] == evpd_handlers[p].page) { 747 buf[1] = cdb[2]; 748 ret = evpd_handlers[p].emulate(cmd, buf); 749 len = get_unaligned_be16(&buf[2]) + 4; 750 goto out; 751 } 752 } 753 754 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 755 ret = TCM_INVALID_CDB_FIELD; 756 757 out: 758 rbuf = transport_kmap_data_sg(cmd); 759 if (rbuf) { 760 memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length)); 761 transport_kunmap_data_sg(cmd); 762 } 763 kfree(buf); 764 765 if (!ret) 766 target_complete_cmd_with_length(cmd, GOOD, len); 767 return ret; 768 } 769 770 static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p) 771 { 772 p[0] = 0x01; 773 p[1] = 0x0a; 774 775 /* No changeable values for now */ 776 if (pc == 1) 777 goto out; 778 779 out: 780 return 12; 781 } 782 783 static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p) 784 { 785 struct se_device *dev = cmd->se_dev; 786 struct se_session *sess = cmd->se_sess; 787 788 p[0] = 0x0a; 789 p[1] = 0x0a; 790 791 /* No changeable values for now */ 792 if (pc == 1) 793 goto out; 794 795 p[2] = 2; 796 /* 797 * From spc4r23, 7.4.7 Control mode page 798 * 799 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies 800 * restrictions on the algorithm used for reordering commands 801 * having the SIMPLE task attribute (see SAM-4). 802 * 803 * Table 368 -- QUEUE ALGORITHM MODIFIER field 804 * Code Description 805 * 0h Restricted reordering 806 * 1h Unrestricted reordering allowed 807 * 2h to 7h Reserved 808 * 8h to Fh Vendor specific 809 * 810 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that 811 * the device server shall order the processing sequence of commands 812 * having the SIMPLE task attribute such that data integrity is maintained 813 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol 814 * requests is halted at any time, the final value of all data observable 815 * on the medium shall be the same as if all the commands had been processed 816 * with the ORDERED task attribute). 817 * 818 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the 819 * device server may reorder the processing sequence of commands having the 820 * SIMPLE task attribute in any manner. Any data integrity exposures related to 821 * command sequence order shall be explicitly handled by the application client 822 * through the selection of appropriate ommands and task attributes. 823 */ 824 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; 825 /* 826 * From spc4r17, section 7.4.6 Control mode Page 827 * 828 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b 829 * 830 * 00b: The logical unit shall clear any unit attention condition 831 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 832 * status and shall not establish a unit attention condition when a com- 833 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT 834 * status. 835 * 836 * 10b: The logical unit shall not clear any unit attention condition 837 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 838 * status and shall not establish a unit attention condition when 839 * a command is completed with BUSY, TASK SET FULL, or RESERVATION 840 * CONFLICT status. 841 * 842 * 11b a The logical unit shall not clear any unit attention condition 843 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 844 * status and shall establish a unit attention condition for the 845 * initiator port associated with the I_T nexus on which the BUSY, 846 * TASK SET FULL, or RESERVATION CONFLICT status is being returned. 847 * Depending on the status, the additional sense code shall be set to 848 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS 849 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE 850 * command, a unit attention condition shall be established only once 851 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless 852 * to the number of commands completed with one of those status codes. 853 */ 854 p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : 855 (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; 856 /* 857 * From spc4r17, section 7.4.6 Control mode Page 858 * 859 * Task Aborted Status (TAS) bit set to zero. 860 * 861 * A task aborted status (TAS) bit set to zero specifies that aborted 862 * tasks shall be terminated by the device server without any response 863 * to the application client. A TAS bit set to one specifies that tasks 864 * aborted by the actions of an I_T nexus other than the I_T nexus on 865 * which the command was received shall be completed with TASK ABORTED 866 * status (see SAM-4). 867 */ 868 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; 869 /* 870 * From spc4r30, section 7.5.7 Control mode page 871 * 872 * Application Tag Owner (ATO) bit set to one. 873 * 874 * If the ATO bit is set to one the device server shall not modify the 875 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection 876 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE 877 * TAG field. 878 */ 879 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 880 if (dev->dev_attrib.pi_prot_type) 881 p[5] |= 0x80; 882 } 883 884 p[8] = 0xff; 885 p[9] = 0xff; 886 p[11] = 30; 887 888 out: 889 return 12; 890 } 891 892 static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) 893 { 894 struct se_device *dev = cmd->se_dev; 895 896 p[0] = 0x08; 897 p[1] = 0x12; 898 899 /* No changeable values for now */ 900 if (pc == 1) 901 goto out; 902 903 if (spc_check_dev_wce(dev)) 904 p[2] = 0x04; /* Write Cache Enable */ 905 p[12] = 0x20; /* Disabled Read Ahead */ 906 907 out: 908 return 20; 909 } 910 911 static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p) 912 { 913 p[0] = 0x1c; 914 p[1] = 0x0a; 915 916 /* No changeable values for now */ 917 if (pc == 1) 918 goto out; 919 920 out: 921 return 12; 922 } 923 924 static struct { 925 uint8_t page; 926 uint8_t subpage; 927 int (*emulate)(struct se_cmd *, u8, unsigned char *); 928 } modesense_handlers[] = { 929 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery }, 930 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, 931 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control }, 932 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions }, 933 }; 934 935 static void spc_modesense_write_protect(unsigned char *buf, int type) 936 { 937 /* 938 * I believe that the WP bit (bit 7) in the mode header is the same for 939 * all device types.. 940 */ 941 switch (type) { 942 case TYPE_DISK: 943 case TYPE_TAPE: 944 default: 945 buf[0] |= 0x80; /* WP bit */ 946 break; 947 } 948 } 949 950 static void spc_modesense_dpofua(unsigned char *buf, int type) 951 { 952 switch (type) { 953 case TYPE_DISK: 954 buf[0] |= 0x10; /* DPOFUA bit */ 955 break; 956 default: 957 break; 958 } 959 } 960 961 static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 962 { 963 *buf++ = 8; 964 put_unaligned_be32(min(blocks, 0xffffffffull), buf); 965 buf += 4; 966 put_unaligned_be32(block_size, buf); 967 return 9; 968 } 969 970 static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 971 { 972 if (blocks <= 0xffffffff) 973 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3; 974 975 *buf++ = 1; /* LONGLBA */ 976 buf += 2; 977 *buf++ = 16; 978 put_unaligned_be64(blocks, buf); 979 buf += 12; 980 put_unaligned_be32(block_size, buf); 981 982 return 17; 983 } 984 985 static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) 986 { 987 struct se_device *dev = cmd->se_dev; 988 char *cdb = cmd->t_task_cdb; 989 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf; 990 int type = dev->transport->get_device_type(dev); 991 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); 992 bool dbd = !!(cdb[1] & 0x08); 993 bool llba = ten ? !!(cdb[1] & 0x10) : false; 994 u8 pc = cdb[2] >> 6; 995 u8 page = cdb[2] & 0x3f; 996 u8 subpage = cdb[3]; 997 int length = 0; 998 int ret; 999 int i; 1000 1001 memset(buf, 0, SE_MODE_PAGE_BUF); 1002 1003 /* 1004 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for 1005 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). 1006 */ 1007 length = ten ? 3 : 2; 1008 1009 /* DEVICE-SPECIFIC PARAMETER */ 1010 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || 1011 (cmd->se_deve && 1012 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 1013 spc_modesense_write_protect(&buf[length], type); 1014 1015 if ((spc_check_dev_wce(dev)) && 1016 (dev->dev_attrib.emulate_fua_write > 0)) 1017 spc_modesense_dpofua(&buf[length], type); 1018 1019 ++length; 1020 1021 /* BLOCK DESCRIPTOR */ 1022 1023 /* 1024 * For now we only include a block descriptor for disk (SBC) 1025 * devices; other command sets use a slightly different format. 1026 */ 1027 if (!dbd && type == TYPE_DISK) { 1028 u64 blocks = dev->transport->get_blocks(dev); 1029 u32 block_size = dev->dev_attrib.block_size; 1030 1031 if (ten) { 1032 if (llba) { 1033 length += spc_modesense_long_blockdesc(&buf[length], 1034 blocks, block_size); 1035 } else { 1036 length += 3; 1037 length += spc_modesense_blockdesc(&buf[length], 1038 blocks, block_size); 1039 } 1040 } else { 1041 length += spc_modesense_blockdesc(&buf[length], blocks, 1042 block_size); 1043 } 1044 } else { 1045 if (ten) 1046 length += 4; 1047 else 1048 length += 1; 1049 } 1050 1051 if (page == 0x3f) { 1052 if (subpage != 0x00 && subpage != 0xff) { 1053 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); 1054 return TCM_INVALID_CDB_FIELD; 1055 } 1056 1057 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) { 1058 /* 1059 * Tricky way to say all subpage 00h for 1060 * subpage==0, all subpages for subpage==0xff 1061 * (and we just checked above that those are 1062 * the only two possibilities). 1063 */ 1064 if ((modesense_handlers[i].subpage & ~subpage) == 0) { 1065 ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1066 if (!ten && length + ret >= 255) 1067 break; 1068 length += ret; 1069 } 1070 } 1071 1072 goto set_length; 1073 } 1074 1075 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1076 if (modesense_handlers[i].page == page && 1077 modesense_handlers[i].subpage == subpage) { 1078 length += modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1079 goto set_length; 1080 } 1081 1082 /* 1083 * We don't intend to implement: 1084 * - obsolete page 03h "format parameters" (checked by Solaris) 1085 */ 1086 if (page != 0x03) 1087 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 1088 page, subpage); 1089 1090 return TCM_UNKNOWN_MODE_PAGE; 1091 1092 set_length: 1093 if (ten) 1094 put_unaligned_be16(length - 2, buf); 1095 else 1096 buf[0] = length - 1; 1097 1098 rbuf = transport_kmap_data_sg(cmd); 1099 if (rbuf) { 1100 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); 1101 transport_kunmap_data_sg(cmd); 1102 } 1103 1104 target_complete_cmd_with_length(cmd, GOOD, length); 1105 return 0; 1106 } 1107 1108 static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) 1109 { 1110 char *cdb = cmd->t_task_cdb; 1111 bool ten = cdb[0] == MODE_SELECT_10; 1112 int off = ten ? 8 : 4; 1113 bool pf = !!(cdb[1] & 0x10); 1114 u8 page, subpage; 1115 unsigned char *buf; 1116 unsigned char tbuf[SE_MODE_PAGE_BUF]; 1117 int length; 1118 int ret = 0; 1119 int i; 1120 1121 if (!cmd->data_length) { 1122 target_complete_cmd(cmd, GOOD); 1123 return 0; 1124 } 1125 1126 if (cmd->data_length < off + 2) 1127 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1128 1129 buf = transport_kmap_data_sg(cmd); 1130 if (!buf) 1131 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1132 1133 if (!pf) { 1134 ret = TCM_INVALID_CDB_FIELD; 1135 goto out; 1136 } 1137 1138 page = buf[off] & 0x3f; 1139 subpage = buf[off] & 0x40 ? buf[off + 1] : 0; 1140 1141 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1142 if (modesense_handlers[i].page == page && 1143 modesense_handlers[i].subpage == subpage) { 1144 memset(tbuf, 0, SE_MODE_PAGE_BUF); 1145 length = modesense_handlers[i].emulate(cmd, 0, tbuf); 1146 goto check_contents; 1147 } 1148 1149 ret = TCM_UNKNOWN_MODE_PAGE; 1150 goto out; 1151 1152 check_contents: 1153 if (cmd->data_length < off + length) { 1154 ret = TCM_PARAMETER_LIST_LENGTH_ERROR; 1155 goto out; 1156 } 1157 1158 if (memcmp(buf + off, tbuf, length)) 1159 ret = TCM_INVALID_PARAMETER_LIST; 1160 1161 out: 1162 transport_kunmap_data_sg(cmd); 1163 1164 if (!ret) 1165 target_complete_cmd(cmd, GOOD); 1166 return ret; 1167 } 1168 1169 static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd) 1170 { 1171 unsigned char *cdb = cmd->t_task_cdb; 1172 unsigned char *rbuf; 1173 u8 ua_asc = 0, ua_ascq = 0; 1174 unsigned char buf[SE_SENSE_BUF]; 1175 1176 memset(buf, 0, SE_SENSE_BUF); 1177 1178 if (cdb[1] & 0x01) { 1179 pr_err("REQUEST_SENSE description emulation not" 1180 " supported\n"); 1181 return TCM_INVALID_CDB_FIELD; 1182 } 1183 1184 rbuf = transport_kmap_data_sg(cmd); 1185 if (!rbuf) 1186 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1187 1188 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { 1189 /* 1190 * CURRENT ERROR, UNIT ATTENTION 1191 */ 1192 buf[0] = 0x70; 1193 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 1194 1195 /* 1196 * The Additional Sense Code (ASC) from the UNIT ATTENTION 1197 */ 1198 buf[SPC_ASC_KEY_OFFSET] = ua_asc; 1199 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq; 1200 buf[7] = 0x0A; 1201 } else { 1202 /* 1203 * CURRENT ERROR, NO SENSE 1204 */ 1205 buf[0] = 0x70; 1206 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE; 1207 1208 /* 1209 * NO ADDITIONAL SENSE INFORMATION 1210 */ 1211 buf[SPC_ASC_KEY_OFFSET] = 0x00; 1212 buf[7] = 0x0A; 1213 } 1214 1215 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 1216 transport_kunmap_data_sg(cmd); 1217 1218 target_complete_cmd(cmd, GOOD); 1219 return 0; 1220 } 1221 1222 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) 1223 { 1224 struct se_dev_entry *deve; 1225 struct se_session *sess = cmd->se_sess; 1226 unsigned char *buf; 1227 u32 lun_count = 0, offset = 8, i; 1228 1229 if (cmd->data_length < 16) { 1230 pr_warn("REPORT LUNS allocation length %u too small\n", 1231 cmd->data_length); 1232 return TCM_INVALID_CDB_FIELD; 1233 } 1234 1235 buf = transport_kmap_data_sg(cmd); 1236 if (!buf) 1237 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1238 1239 /* 1240 * If no struct se_session pointer is present, this struct se_cmd is 1241 * coming via a target_core_mod PASSTHROUGH op, and not through 1242 * a $FABRIC_MOD. In that case, report LUN=0 only. 1243 */ 1244 if (!sess) { 1245 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); 1246 lun_count = 1; 1247 goto done; 1248 } 1249 1250 spin_lock_irq(&sess->se_node_acl->device_list_lock); 1251 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 1252 deve = sess->se_node_acl->device_list[i]; 1253 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 1254 continue; 1255 /* 1256 * We determine the correct LUN LIST LENGTH even once we 1257 * have reached the initial allocation length. 1258 * See SPC2-R20 7.19. 1259 */ 1260 lun_count++; 1261 if ((offset + 8) > cmd->data_length) 1262 continue; 1263 1264 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); 1265 offset += 8; 1266 } 1267 spin_unlock_irq(&sess->se_node_acl->device_list_lock); 1268 1269 /* 1270 * See SPC3 r07, page 159. 1271 */ 1272 done: 1273 lun_count *= 8; 1274 buf[0] = ((lun_count >> 24) & 0xff); 1275 buf[1] = ((lun_count >> 16) & 0xff); 1276 buf[2] = ((lun_count >> 8) & 0xff); 1277 buf[3] = (lun_count & 0xff); 1278 transport_kunmap_data_sg(cmd); 1279 1280 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); 1281 return 0; 1282 } 1283 EXPORT_SYMBOL(spc_emulate_report_luns); 1284 1285 static sense_reason_t 1286 spc_emulate_testunitready(struct se_cmd *cmd) 1287 { 1288 target_complete_cmd(cmd, GOOD); 1289 return 0; 1290 } 1291 1292 sense_reason_t 1293 spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) 1294 { 1295 struct se_device *dev = cmd->se_dev; 1296 unsigned char *cdb = cmd->t_task_cdb; 1297 1298 switch (cdb[0]) { 1299 case MODE_SELECT: 1300 *size = cdb[4]; 1301 cmd->execute_cmd = spc_emulate_modeselect; 1302 break; 1303 case MODE_SELECT_10: 1304 *size = (cdb[7] << 8) + cdb[8]; 1305 cmd->execute_cmd = spc_emulate_modeselect; 1306 break; 1307 case MODE_SENSE: 1308 *size = cdb[4]; 1309 cmd->execute_cmd = spc_emulate_modesense; 1310 break; 1311 case MODE_SENSE_10: 1312 *size = (cdb[7] << 8) + cdb[8]; 1313 cmd->execute_cmd = spc_emulate_modesense; 1314 break; 1315 case LOG_SELECT: 1316 case LOG_SENSE: 1317 *size = (cdb[7] << 8) + cdb[8]; 1318 break; 1319 case PERSISTENT_RESERVE_IN: 1320 *size = (cdb[7] << 8) + cdb[8]; 1321 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1322 break; 1323 case PERSISTENT_RESERVE_OUT: 1324 *size = (cdb[7] << 8) + cdb[8]; 1325 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1326 break; 1327 case RELEASE: 1328 case RELEASE_10: 1329 if (cdb[0] == RELEASE_10) 1330 *size = (cdb[7] << 8) | cdb[8]; 1331 else 1332 *size = cmd->data_length; 1333 1334 cmd->execute_cmd = target_scsi2_reservation_release; 1335 break; 1336 case RESERVE: 1337 case RESERVE_10: 1338 /* 1339 * The SPC-2 RESERVE does not contain a size in the SCSI CDB. 1340 * Assume the passthrough or $FABRIC_MOD will tell us about it. 1341 */ 1342 if (cdb[0] == RESERVE_10) 1343 *size = (cdb[7] << 8) | cdb[8]; 1344 else 1345 *size = cmd->data_length; 1346 1347 cmd->execute_cmd = target_scsi2_reservation_reserve; 1348 break; 1349 case REQUEST_SENSE: 1350 *size = cdb[4]; 1351 cmd->execute_cmd = spc_emulate_request_sense; 1352 break; 1353 case INQUIRY: 1354 *size = (cdb[3] << 8) + cdb[4]; 1355 1356 /* 1357 * Do implicit HEAD_OF_QUEUE processing for INQUIRY. 1358 * See spc4r17 section 5.3 1359 */ 1360 cmd->sam_task_attr = MSG_HEAD_TAG; 1361 cmd->execute_cmd = spc_emulate_inquiry; 1362 break; 1363 case SECURITY_PROTOCOL_IN: 1364 case SECURITY_PROTOCOL_OUT: 1365 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1366 break; 1367 case EXTENDED_COPY: 1368 *size = get_unaligned_be32(&cdb[10]); 1369 cmd->execute_cmd = target_do_xcopy; 1370 break; 1371 case RECEIVE_COPY_RESULTS: 1372 *size = get_unaligned_be32(&cdb[10]); 1373 cmd->execute_cmd = target_do_receive_copy_results; 1374 break; 1375 case READ_ATTRIBUTE: 1376 case WRITE_ATTRIBUTE: 1377 *size = (cdb[10] << 24) | (cdb[11] << 16) | 1378 (cdb[12] << 8) | cdb[13]; 1379 break; 1380 case RECEIVE_DIAGNOSTIC: 1381 case SEND_DIAGNOSTIC: 1382 *size = (cdb[3] << 8) | cdb[4]; 1383 break; 1384 case WRITE_BUFFER: 1385 *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 1386 break; 1387 case REPORT_LUNS: 1388 cmd->execute_cmd = spc_emulate_report_luns; 1389 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1390 /* 1391 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS 1392 * See spc4r17 section 5.3 1393 */ 1394 cmd->sam_task_attr = MSG_HEAD_TAG; 1395 break; 1396 case TEST_UNIT_READY: 1397 cmd->execute_cmd = spc_emulate_testunitready; 1398 *size = 0; 1399 break; 1400 case MAINTENANCE_IN: 1401 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1402 /* 1403 * MAINTENANCE_IN from SCC-2 1404 * Check for emulated MI_REPORT_TARGET_PGS 1405 */ 1406 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) { 1407 cmd->execute_cmd = 1408 target_emulate_report_target_port_groups; 1409 } 1410 *size = get_unaligned_be32(&cdb[6]); 1411 } else { 1412 /* 1413 * GPCMD_SEND_KEY from multi media commands 1414 */ 1415 *size = get_unaligned_be16(&cdb[8]); 1416 } 1417 break; 1418 case MAINTENANCE_OUT: 1419 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1420 /* 1421 * MAINTENANCE_OUT from SCC-2 1422 * Check for emulated MO_SET_TARGET_PGS. 1423 */ 1424 if (cdb[1] == MO_SET_TARGET_PGS) { 1425 cmd->execute_cmd = 1426 target_emulate_set_target_port_groups; 1427 } 1428 *size = get_unaligned_be32(&cdb[6]); 1429 } else { 1430 /* 1431 * GPCMD_SEND_KEY from multi media commands 1432 */ 1433 *size = get_unaligned_be16(&cdb[8]); 1434 } 1435 break; 1436 default: 1437 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" 1438 " 0x%02x, sending CHECK_CONDITION.\n", 1439 cmd->se_tfo->get_fabric_name(), cdb[0]); 1440 return TCM_UNSUPPORTED_SCSI_OPCODE; 1441 } 1442 1443 return 0; 1444 } 1445 EXPORT_SYMBOL(spc_parse_cdb); 1446