1 /* 2 * SCSI Primary Commands (SPC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <asm/unaligned.h> 26 27 #include <scsi/scsi_proto.h> 28 #include <scsi/scsi_common.h> 29 #include <scsi/scsi_tcq.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_alua.h" 37 #include "target_core_pr.h" 38 #include "target_core_ua.h" 39 #include "target_core_xcopy.h" 40 41 static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf) 42 { 43 struct t10_alua_tg_pt_gp *tg_pt_gp; 44 45 /* 46 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. 47 */ 48 buf[5] = 0x80; 49 50 /* 51 * Set TPGS field for explicit and/or implicit ALUA access type 52 * and opteration. 53 * 54 * See spc4r17 section 6.4.2 Table 135 55 */ 56 spin_lock(&lun->lun_tg_pt_gp_lock); 57 tg_pt_gp = lun->lun_tg_pt_gp; 58 if (tg_pt_gp) 59 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; 60 spin_unlock(&lun->lun_tg_pt_gp_lock); 61 } 62 63 sense_reason_t 64 spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) 65 { 66 struct se_lun *lun = cmd->se_lun; 67 struct se_device *dev = cmd->se_dev; 68 struct se_session *sess = cmd->se_sess; 69 70 /* Set RMB (removable media) for tape devices */ 71 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 72 buf[1] = 0x80; 73 74 buf[2] = 0x05; /* SPC-3 */ 75 76 /* 77 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2 78 * 79 * SPC4 says: 80 * A RESPONSE DATA FORMAT field set to 2h indicates that the 81 * standard INQUIRY data is in the format defined in this 82 * standard. Response data format values less than 2h are 83 * obsolete. Response data format values greater than 2h are 84 * reserved. 85 */ 86 buf[3] = 2; 87 88 /* 89 * Enable SCCS and TPGS fields for Emulated ALUA 90 */ 91 spc_fill_alua_data(lun, buf); 92 93 /* 94 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY 95 */ 96 if (dev->dev_attrib.emulate_3pc) 97 buf[5] |= 0x8; 98 /* 99 * Set Protection (PROTECT) bit when DIF has been enabled on the 100 * device, and the fabric supports VERIFY + PASS. Also report 101 * PROTECT=1 if sess_prot_type has been configured to allow T10-PI 102 * to unprotected devices. 103 */ 104 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 105 if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type) 106 buf[5] |= 0x1; 107 } 108 109 buf[7] = 0x2; /* CmdQue=1 */ 110 111 memcpy(&buf[8], "LIO-ORG ", 8); 112 memset(&buf[16], 0x20, 16); 113 memcpy(&buf[16], dev->t10_wwn.model, 114 min_t(size_t, strlen(dev->t10_wwn.model), 16)); 115 memcpy(&buf[32], dev->t10_wwn.revision, 116 min_t(size_t, strlen(dev->t10_wwn.revision), 4)); 117 buf[4] = 31; /* Set additional length to 31 */ 118 119 return 0; 120 } 121 EXPORT_SYMBOL(spc_emulate_inquiry_std); 122 123 /* unit serial number */ 124 static sense_reason_t 125 spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) 126 { 127 struct se_device *dev = cmd->se_dev; 128 u16 len; 129 130 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 131 len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); 132 len++; /* Extra Byte for NULL Terminator */ 133 buf[3] = len; 134 } 135 return 0; 136 } 137 138 void spc_parse_naa_6h_vendor_specific(struct se_device *dev, 139 unsigned char *buf) 140 { 141 unsigned char *p = &dev->t10_wwn.unit_serial[0]; 142 int cnt; 143 bool next = true; 144 145 /* 146 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on 147 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field 148 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION 149 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL 150 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure 151 * per device uniqeness. 152 */ 153 for (cnt = 0; *p && cnt < 13; p++) { 154 int val = hex_to_bin(*p); 155 156 if (val < 0) 157 continue; 158 159 if (next) { 160 next = false; 161 buf[cnt++] |= val; 162 } else { 163 next = true; 164 buf[cnt] = val << 4; 165 } 166 } 167 } 168 169 /* 170 * Device identification VPD, for a complete list of 171 * DESIGNATOR TYPEs see spc4r17 Table 459. 172 */ 173 sense_reason_t 174 spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) 175 { 176 struct se_device *dev = cmd->se_dev; 177 struct se_lun *lun = cmd->se_lun; 178 struct se_portal_group *tpg = NULL; 179 struct t10_alua_lu_gp_member *lu_gp_mem; 180 struct t10_alua_tg_pt_gp *tg_pt_gp; 181 unsigned char *prod = &dev->t10_wwn.model[0]; 182 u32 prod_len; 183 u32 unit_serial_len, off = 0; 184 u16 len = 0, id_len; 185 186 off = 4; 187 188 /* 189 * NAA IEEE Registered Extended Assigned designator format, see 190 * spc4r17 section 7.7.3.6.5 191 * 192 * We depend upon a target_core_mod/ConfigFS provided 193 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial 194 * value in order to return the NAA id. 195 */ 196 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)) 197 goto check_t10_vend_desc; 198 199 /* CODE SET == Binary */ 200 buf[off++] = 0x1; 201 202 /* Set ASSOCIATION == addressed logical unit: 0)b */ 203 buf[off] = 0x00; 204 205 /* Identifier/Designator type == NAA identifier */ 206 buf[off++] |= 0x3; 207 off++; 208 209 /* Identifier/Designator length */ 210 buf[off++] = 0x10; 211 212 /* 213 * Start NAA IEEE Registered Extended Identifier/Designator 214 */ 215 buf[off++] = (0x6 << 4); 216 217 /* 218 * Use OpenFabrics IEEE Company ID: 00 14 05 219 */ 220 buf[off++] = 0x01; 221 buf[off++] = 0x40; 222 buf[off] = (0x5 << 4); 223 224 /* 225 * Return ConfigFS Unit Serial Number information for 226 * VENDOR_SPECIFIC_IDENTIFIER and 227 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION 228 */ 229 spc_parse_naa_6h_vendor_specific(dev, &buf[off]); 230 231 len = 20; 232 off = (len + 4); 233 234 check_t10_vend_desc: 235 /* 236 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4 237 */ 238 id_len = 8; /* For Vendor field */ 239 prod_len = 4; /* For VPD Header */ 240 prod_len += 8; /* For Vendor field */ 241 prod_len += strlen(prod); 242 prod_len++; /* For : */ 243 244 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 245 unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]); 246 unit_serial_len++; /* For NULL Terminator */ 247 248 id_len += sprintf(&buf[off+12], "%s:%s", prod, 249 &dev->t10_wwn.unit_serial[0]); 250 } 251 buf[off] = 0x2; /* ASCII */ 252 buf[off+1] = 0x1; /* T10 Vendor ID */ 253 buf[off+2] = 0x0; 254 memcpy(&buf[off+4], "LIO-ORG", 8); 255 /* Extra Byte for NULL Terminator */ 256 id_len++; 257 /* Identifier Length */ 258 buf[off+3] = id_len; 259 /* Header size for Designation descriptor */ 260 len += (id_len + 4); 261 off += (id_len + 4); 262 263 if (1) { 264 struct t10_alua_lu_gp *lu_gp; 265 u32 padding, scsi_name_len, scsi_target_len; 266 u16 lu_gp_id = 0; 267 u16 tg_pt_gp_id = 0; 268 u16 tpgt; 269 270 tpg = lun->lun_tpg; 271 /* 272 * Relative target port identifer, see spc4r17 273 * section 7.7.3.7 274 * 275 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 276 * section 7.5.1 Table 362 277 */ 278 buf[off] = tpg->proto_id << 4; 279 buf[off++] |= 0x1; /* CODE SET == Binary */ 280 buf[off] = 0x80; /* Set PIV=1 */ 281 /* Set ASSOCIATION == target port: 01b */ 282 buf[off] |= 0x10; 283 /* DESIGNATOR TYPE == Relative target port identifer */ 284 buf[off++] |= 0x4; 285 off++; /* Skip over Reserved */ 286 buf[off++] = 4; /* DESIGNATOR LENGTH */ 287 /* Skip over Obsolete field in RTPI payload 288 * in Table 472 */ 289 off += 2; 290 buf[off++] = ((lun->lun_rtpi >> 8) & 0xff); 291 buf[off++] = (lun->lun_rtpi & 0xff); 292 len += 8; /* Header size + Designation descriptor */ 293 /* 294 * Target port group identifier, see spc4r17 295 * section 7.7.3.8 296 * 297 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 298 * section 7.5.1 Table 362 299 */ 300 spin_lock(&lun->lun_tg_pt_gp_lock); 301 tg_pt_gp = lun->lun_tg_pt_gp; 302 if (!tg_pt_gp) { 303 spin_unlock(&lun->lun_tg_pt_gp_lock); 304 goto check_lu_gp; 305 } 306 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; 307 spin_unlock(&lun->lun_tg_pt_gp_lock); 308 309 buf[off] = tpg->proto_id << 4; 310 buf[off++] |= 0x1; /* CODE SET == Binary */ 311 buf[off] = 0x80; /* Set PIV=1 */ 312 /* Set ASSOCIATION == target port: 01b */ 313 buf[off] |= 0x10; 314 /* DESIGNATOR TYPE == Target port group identifier */ 315 buf[off++] |= 0x5; 316 off++; /* Skip over Reserved */ 317 buf[off++] = 4; /* DESIGNATOR LENGTH */ 318 off += 2; /* Skip over Reserved Field */ 319 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff); 320 buf[off++] = (tg_pt_gp_id & 0xff); 321 len += 8; /* Header size + Designation descriptor */ 322 /* 323 * Logical Unit Group identifier, see spc4r17 324 * section 7.7.3.8 325 */ 326 check_lu_gp: 327 lu_gp_mem = dev->dev_alua_lu_gp_mem; 328 if (!lu_gp_mem) 329 goto check_scsi_name; 330 331 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 332 lu_gp = lu_gp_mem->lu_gp; 333 if (!lu_gp) { 334 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 335 goto check_scsi_name; 336 } 337 lu_gp_id = lu_gp->lu_gp_id; 338 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 339 340 buf[off++] |= 0x1; /* CODE SET == Binary */ 341 /* DESIGNATOR TYPE == Logical Unit Group identifier */ 342 buf[off++] |= 0x6; 343 off++; /* Skip over Reserved */ 344 buf[off++] = 4; /* DESIGNATOR LENGTH */ 345 off += 2; /* Skip over Reserved Field */ 346 buf[off++] = ((lu_gp_id >> 8) & 0xff); 347 buf[off++] = (lu_gp_id & 0xff); 348 len += 8; /* Header size + Designation descriptor */ 349 /* 350 * SCSI name string designator, see spc4r17 351 * section 7.7.3.11 352 * 353 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 354 * section 7.5.1 Table 362 355 */ 356 check_scsi_name: 357 buf[off] = tpg->proto_id << 4; 358 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 359 buf[off] = 0x80; /* Set PIV=1 */ 360 /* Set ASSOCIATION == target port: 01b */ 361 buf[off] |= 0x10; 362 /* DESIGNATOR TYPE == SCSI name string */ 363 buf[off++] |= 0x8; 364 off += 2; /* Skip over Reserved and length */ 365 /* 366 * SCSI name string identifer containing, $FABRIC_MOD 367 * dependent information. For LIO-Target and iSCSI 368 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in 369 * UTF-8 encoding. 370 */ 371 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); 372 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", 373 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); 374 scsi_name_len += 1 /* Include NULL terminator */; 375 /* 376 * The null-terminated, null-padded (see 4.4.2) SCSI 377 * NAME STRING field contains a UTF-8 format string. 378 * The number of bytes in the SCSI NAME STRING field 379 * (i.e., the value in the DESIGNATOR LENGTH field) 380 * shall be no larger than 256 and shall be a multiple 381 * of four. 382 */ 383 padding = ((-scsi_name_len) & 3); 384 if (padding) 385 scsi_name_len += padding; 386 if (scsi_name_len > 256) 387 scsi_name_len = 256; 388 389 buf[off-1] = scsi_name_len; 390 off += scsi_name_len; 391 /* Header size + Designation descriptor */ 392 len += (scsi_name_len + 4); 393 394 /* 395 * Target device designator 396 */ 397 buf[off] = tpg->proto_id << 4; 398 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 399 buf[off] = 0x80; /* Set PIV=1 */ 400 /* Set ASSOCIATION == target device: 10b */ 401 buf[off] |= 0x20; 402 /* DESIGNATOR TYPE == SCSI name string */ 403 buf[off++] |= 0x8; 404 off += 2; /* Skip over Reserved and length */ 405 /* 406 * SCSI name string identifer containing, $FABRIC_MOD 407 * dependent information. For LIO-Target and iSCSI 408 * Target Port, this means "<iSCSI name>" in 409 * UTF-8 encoding. 410 */ 411 scsi_target_len = sprintf(&buf[off], "%s", 412 tpg->se_tpg_tfo->tpg_get_wwn(tpg)); 413 scsi_target_len += 1 /* Include NULL terminator */; 414 /* 415 * The null-terminated, null-padded (see 4.4.2) SCSI 416 * NAME STRING field contains a UTF-8 format string. 417 * The number of bytes in the SCSI NAME STRING field 418 * (i.e., the value in the DESIGNATOR LENGTH field) 419 * shall be no larger than 256 and shall be a multiple 420 * of four. 421 */ 422 padding = ((-scsi_target_len) & 3); 423 if (padding) 424 scsi_target_len += padding; 425 if (scsi_target_len > 256) 426 scsi_target_len = 256; 427 428 buf[off-1] = scsi_target_len; 429 off += scsi_target_len; 430 431 /* Header size + Designation descriptor */ 432 len += (scsi_target_len + 4); 433 } 434 buf[2] = ((len >> 8) & 0xff); 435 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ 436 return 0; 437 } 438 EXPORT_SYMBOL(spc_emulate_evpd_83); 439 440 /* Extended INQUIRY Data VPD Page */ 441 static sense_reason_t 442 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 443 { 444 struct se_device *dev = cmd->se_dev; 445 struct se_session *sess = cmd->se_sess; 446 447 buf[3] = 0x3c; 448 /* 449 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK 450 * only for TYPE3 protection. 451 */ 452 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 453 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT || 454 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) 455 buf[4] = 0x5; 456 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || 457 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) 458 buf[4] = 0x4; 459 } 460 461 /* logical unit supports type 1 and type 3 protection */ 462 if ((dev->transport->get_device_type(dev) == TYPE_DISK) && 463 (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) && 464 (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) { 465 buf[4] |= (0x3 << 3); 466 } 467 468 /* Set HEADSUP, ORDSUP, SIMPSUP */ 469 buf[5] = 0x07; 470 471 /* If WriteCache emulation is enabled, set V_SUP */ 472 if (target_check_wce(dev)) 473 buf[6] = 0x01; 474 /* If an LBA map is present set R_SUP */ 475 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); 476 if (!list_empty(&dev->t10_alua.lba_map_list)) 477 buf[8] = 0x10; 478 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock); 479 return 0; 480 } 481 482 /* Block Limits VPD page */ 483 static sense_reason_t 484 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 485 { 486 struct se_device *dev = cmd->se_dev; 487 int have_tp = 0; 488 int opt, min; 489 490 /* 491 * Following spc3r22 section 6.5.3 Block Limits VPD page, when 492 * emulate_tpu=1 or emulate_tpws=1 we will be expect a 493 * different page length for Thin Provisioning. 494 */ 495 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 496 have_tp = 1; 497 498 buf[0] = dev->transport->get_device_type(dev); 499 buf[3] = have_tp ? 0x3c : 0x10; 500 501 /* Set WSNZ to 1 */ 502 buf[4] = 0x01; 503 /* 504 * Set MAXIMUM COMPARE AND WRITE LENGTH 505 */ 506 if (dev->dev_attrib.emulate_caw) 507 buf[5] = 0x01; 508 509 /* 510 * Set OPTIMAL TRANSFER LENGTH GRANULARITY 511 */ 512 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) 513 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); 514 else 515 put_unaligned_be16(1, &buf[6]); 516 517 /* 518 * Set MAXIMUM TRANSFER LENGTH 519 */ 520 put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]); 521 522 /* 523 * Set OPTIMAL TRANSFER LENGTH 524 */ 525 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) 526 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); 527 else 528 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); 529 530 /* 531 * Exit now if we don't support TP. 532 */ 533 if (!have_tp) 534 goto max_write_same; 535 536 /* 537 * Set MAXIMUM UNMAP LBA COUNT 538 */ 539 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]); 540 541 /* 542 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT 543 */ 544 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count, 545 &buf[24]); 546 547 /* 548 * Set OPTIMAL UNMAP GRANULARITY 549 */ 550 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]); 551 552 /* 553 * UNMAP GRANULARITY ALIGNMENT 554 */ 555 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment, 556 &buf[32]); 557 if (dev->dev_attrib.unmap_granularity_alignment != 0) 558 buf[32] |= 0x80; /* Set the UGAVALID bit */ 559 560 /* 561 * MAXIMUM WRITE SAME LENGTH 562 */ 563 max_write_same: 564 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]); 565 566 return 0; 567 } 568 569 /* Block Device Characteristics VPD page */ 570 static sense_reason_t 571 spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) 572 { 573 struct se_device *dev = cmd->se_dev; 574 575 buf[0] = dev->transport->get_device_type(dev); 576 buf[3] = 0x3c; 577 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0; 578 579 return 0; 580 } 581 582 /* Thin Provisioning VPD */ 583 static sense_reason_t 584 spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) 585 { 586 struct se_device *dev = cmd->se_dev; 587 588 /* 589 * From spc3r22 section 6.5.4 Thin Provisioning VPD page: 590 * 591 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to 592 * zero, then the page length shall be set to 0004h. If the DP bit 593 * is set to one, then the page length shall be set to the value 594 * defined in table 162. 595 */ 596 buf[0] = dev->transport->get_device_type(dev); 597 598 /* 599 * Set Hardcoded length mentioned above for DP=0 600 */ 601 put_unaligned_be16(0x0004, &buf[2]); 602 603 /* 604 * The THRESHOLD EXPONENT field indicates the threshold set size in 605 * LBAs as a power of 2 (i.e., the threshold set size is equal to 606 * 2(threshold exponent)). 607 * 608 * Note that this is currently set to 0x00 as mkp says it will be 609 * changing again. We can enable this once it has settled in T10 610 * and is actually used by Linux/SCSI ML code. 611 */ 612 buf[4] = 0x00; 613 614 /* 615 * A TPU bit set to one indicates that the device server supports 616 * the UNMAP command (see 5.25). A TPU bit set to zero indicates 617 * that the device server does not support the UNMAP command. 618 */ 619 if (dev->dev_attrib.emulate_tpu != 0) 620 buf[5] = 0x80; 621 622 /* 623 * A TPWS bit set to one indicates that the device server supports 624 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs. 625 * A TPWS bit set to zero indicates that the device server does not 626 * support the use of the WRITE SAME (16) command to unmap LBAs. 627 */ 628 if (dev->dev_attrib.emulate_tpws != 0) 629 buf[5] |= 0x40 | 0x20; 630 631 return 0; 632 } 633 634 /* Referrals VPD page */ 635 static sense_reason_t 636 spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) 637 { 638 struct se_device *dev = cmd->se_dev; 639 640 buf[0] = dev->transport->get_device_type(dev); 641 buf[3] = 0x0c; 642 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); 643 put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]); 644 645 return 0; 646 } 647 648 static sense_reason_t 649 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); 650 651 static struct { 652 uint8_t page; 653 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *); 654 } evpd_handlers[] = { 655 { .page = 0x00, .emulate = spc_emulate_evpd_00 }, 656 { .page = 0x80, .emulate = spc_emulate_evpd_80 }, 657 { .page = 0x83, .emulate = spc_emulate_evpd_83 }, 658 { .page = 0x86, .emulate = spc_emulate_evpd_86 }, 659 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, 660 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, 661 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, 662 { .page = 0xb3, .emulate = spc_emulate_evpd_b3 }, 663 }; 664 665 /* supported vital product data pages */ 666 static sense_reason_t 667 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) 668 { 669 int p; 670 671 /* 672 * Only report the INQUIRY EVPD=1 pages after a valid NAA 673 * Registered Extended LUN WWN has been set via ConfigFS 674 * during device creation/restart. 675 */ 676 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 677 buf[3] = ARRAY_SIZE(evpd_handlers); 678 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) 679 buf[p + 4] = evpd_handlers[p].page; 680 } 681 682 return 0; 683 } 684 685 static sense_reason_t 686 spc_emulate_inquiry(struct se_cmd *cmd) 687 { 688 struct se_device *dev = cmd->se_dev; 689 struct se_portal_group *tpg = cmd->se_lun->lun_tpg; 690 unsigned char *rbuf; 691 unsigned char *cdb = cmd->t_task_cdb; 692 unsigned char *buf; 693 sense_reason_t ret; 694 int p; 695 int len = 0; 696 697 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); 698 if (!buf) { 699 pr_err("Unable to allocate response buffer for INQUIRY\n"); 700 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 701 } 702 703 if (dev == rcu_access_pointer(tpg->tpg_virt_lun0->lun_se_dev)) 704 buf[0] = 0x3f; /* Not connected */ 705 else 706 buf[0] = dev->transport->get_device_type(dev); 707 708 if (!(cdb[1] & 0x1)) { 709 if (cdb[2]) { 710 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", 711 cdb[2]); 712 ret = TCM_INVALID_CDB_FIELD; 713 goto out; 714 } 715 716 ret = spc_emulate_inquiry_std(cmd, buf); 717 len = buf[4] + 5; 718 goto out; 719 } 720 721 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { 722 if (cdb[2] == evpd_handlers[p].page) { 723 buf[1] = cdb[2]; 724 ret = evpd_handlers[p].emulate(cmd, buf); 725 len = get_unaligned_be16(&buf[2]) + 4; 726 goto out; 727 } 728 } 729 730 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 731 ret = TCM_INVALID_CDB_FIELD; 732 733 out: 734 rbuf = transport_kmap_data_sg(cmd); 735 if (rbuf) { 736 memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length)); 737 transport_kunmap_data_sg(cmd); 738 } 739 kfree(buf); 740 741 if (!ret) 742 target_complete_cmd_with_length(cmd, GOOD, len); 743 return ret; 744 } 745 746 static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p) 747 { 748 p[0] = 0x01; 749 p[1] = 0x0a; 750 751 /* No changeable values for now */ 752 if (pc == 1) 753 goto out; 754 755 out: 756 return 12; 757 } 758 759 static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p) 760 { 761 struct se_device *dev = cmd->se_dev; 762 struct se_session *sess = cmd->se_sess; 763 764 p[0] = 0x0a; 765 p[1] = 0x0a; 766 767 /* No changeable values for now */ 768 if (pc == 1) 769 goto out; 770 771 p[2] = 2; 772 /* 773 * From spc4r23, 7.4.7 Control mode page 774 * 775 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies 776 * restrictions on the algorithm used for reordering commands 777 * having the SIMPLE task attribute (see SAM-4). 778 * 779 * Table 368 -- QUEUE ALGORITHM MODIFIER field 780 * Code Description 781 * 0h Restricted reordering 782 * 1h Unrestricted reordering allowed 783 * 2h to 7h Reserved 784 * 8h to Fh Vendor specific 785 * 786 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that 787 * the device server shall order the processing sequence of commands 788 * having the SIMPLE task attribute such that data integrity is maintained 789 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol 790 * requests is halted at any time, the final value of all data observable 791 * on the medium shall be the same as if all the commands had been processed 792 * with the ORDERED task attribute). 793 * 794 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the 795 * device server may reorder the processing sequence of commands having the 796 * SIMPLE task attribute in any manner. Any data integrity exposures related to 797 * command sequence order shall be explicitly handled by the application client 798 * through the selection of appropriate ommands and task attributes. 799 */ 800 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; 801 /* 802 * From spc4r17, section 7.4.6 Control mode Page 803 * 804 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b 805 * 806 * 00b: The logical unit shall clear any unit attention condition 807 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 808 * status and shall not establish a unit attention condition when a com- 809 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT 810 * status. 811 * 812 * 10b: The logical unit shall not clear any unit attention condition 813 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 814 * status and shall not establish a unit attention condition when 815 * a command is completed with BUSY, TASK SET FULL, or RESERVATION 816 * CONFLICT status. 817 * 818 * 11b a The logical unit shall not clear any unit attention condition 819 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 820 * status and shall establish a unit attention condition for the 821 * initiator port associated with the I_T nexus on which the BUSY, 822 * TASK SET FULL, or RESERVATION CONFLICT status is being returned. 823 * Depending on the status, the additional sense code shall be set to 824 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS 825 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE 826 * command, a unit attention condition shall be established only once 827 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless 828 * to the number of commands completed with one of those status codes. 829 */ 830 p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : 831 (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; 832 /* 833 * From spc4r17, section 7.4.6 Control mode Page 834 * 835 * Task Aborted Status (TAS) bit set to zero. 836 * 837 * A task aborted status (TAS) bit set to zero specifies that aborted 838 * tasks shall be terminated by the device server without any response 839 * to the application client. A TAS bit set to one specifies that tasks 840 * aborted by the actions of an I_T nexus other than the I_T nexus on 841 * which the command was received shall be completed with TASK ABORTED 842 * status (see SAM-4). 843 */ 844 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; 845 /* 846 * From spc4r30, section 7.5.7 Control mode page 847 * 848 * Application Tag Owner (ATO) bit set to one. 849 * 850 * If the ATO bit is set to one the device server shall not modify the 851 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection 852 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE 853 * TAG field. 854 */ 855 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 856 if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type) 857 p[5] |= 0x80; 858 } 859 860 p[8] = 0xff; 861 p[9] = 0xff; 862 p[11] = 30; 863 864 out: 865 return 12; 866 } 867 868 static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) 869 { 870 struct se_device *dev = cmd->se_dev; 871 872 p[0] = 0x08; 873 p[1] = 0x12; 874 875 /* No changeable values for now */ 876 if (pc == 1) 877 goto out; 878 879 if (target_check_wce(dev)) 880 p[2] = 0x04; /* Write Cache Enable */ 881 p[12] = 0x20; /* Disabled Read Ahead */ 882 883 out: 884 return 20; 885 } 886 887 static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p) 888 { 889 p[0] = 0x1c; 890 p[1] = 0x0a; 891 892 /* No changeable values for now */ 893 if (pc == 1) 894 goto out; 895 896 out: 897 return 12; 898 } 899 900 static struct { 901 uint8_t page; 902 uint8_t subpage; 903 int (*emulate)(struct se_cmd *, u8, unsigned char *); 904 } modesense_handlers[] = { 905 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery }, 906 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, 907 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control }, 908 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions }, 909 }; 910 911 static void spc_modesense_write_protect(unsigned char *buf, int type) 912 { 913 /* 914 * I believe that the WP bit (bit 7) in the mode header is the same for 915 * all device types.. 916 */ 917 switch (type) { 918 case TYPE_DISK: 919 case TYPE_TAPE: 920 default: 921 buf[0] |= 0x80; /* WP bit */ 922 break; 923 } 924 } 925 926 static void spc_modesense_dpofua(unsigned char *buf, int type) 927 { 928 switch (type) { 929 case TYPE_DISK: 930 buf[0] |= 0x10; /* DPOFUA bit */ 931 break; 932 default: 933 break; 934 } 935 } 936 937 static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 938 { 939 *buf++ = 8; 940 put_unaligned_be32(min(blocks, 0xffffffffull), buf); 941 buf += 4; 942 put_unaligned_be32(block_size, buf); 943 return 9; 944 } 945 946 static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 947 { 948 if (blocks <= 0xffffffff) 949 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3; 950 951 *buf++ = 1; /* LONGLBA */ 952 buf += 2; 953 *buf++ = 16; 954 put_unaligned_be64(blocks, buf); 955 buf += 12; 956 put_unaligned_be32(block_size, buf); 957 958 return 17; 959 } 960 961 static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) 962 { 963 struct se_device *dev = cmd->se_dev; 964 char *cdb = cmd->t_task_cdb; 965 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf; 966 int type = dev->transport->get_device_type(dev); 967 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); 968 bool dbd = !!(cdb[1] & 0x08); 969 bool llba = ten ? !!(cdb[1] & 0x10) : false; 970 u8 pc = cdb[2] >> 6; 971 u8 page = cdb[2] & 0x3f; 972 u8 subpage = cdb[3]; 973 int length = 0; 974 int ret; 975 int i; 976 bool read_only = target_lun_is_rdonly(cmd);; 977 978 memset(buf, 0, SE_MODE_PAGE_BUF); 979 980 /* 981 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for 982 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). 983 */ 984 length = ten ? 3 : 2; 985 986 /* DEVICE-SPECIFIC PARAMETER */ 987 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || read_only) 988 spc_modesense_write_protect(&buf[length], type); 989 990 /* 991 * SBC only allows us to enable FUA and DPO together. Fortunately 992 * DPO is explicitly specified as a hint, so a noop is a perfectly 993 * valid implementation. 994 */ 995 if (target_check_fua(dev)) 996 spc_modesense_dpofua(&buf[length], type); 997 998 ++length; 999 1000 /* BLOCK DESCRIPTOR */ 1001 1002 /* 1003 * For now we only include a block descriptor for disk (SBC) 1004 * devices; other command sets use a slightly different format. 1005 */ 1006 if (!dbd && type == TYPE_DISK) { 1007 u64 blocks = dev->transport->get_blocks(dev); 1008 u32 block_size = dev->dev_attrib.block_size; 1009 1010 if (ten) { 1011 if (llba) { 1012 length += spc_modesense_long_blockdesc(&buf[length], 1013 blocks, block_size); 1014 } else { 1015 length += 3; 1016 length += spc_modesense_blockdesc(&buf[length], 1017 blocks, block_size); 1018 } 1019 } else { 1020 length += spc_modesense_blockdesc(&buf[length], blocks, 1021 block_size); 1022 } 1023 } else { 1024 if (ten) 1025 length += 4; 1026 else 1027 length += 1; 1028 } 1029 1030 if (page == 0x3f) { 1031 if (subpage != 0x00 && subpage != 0xff) { 1032 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); 1033 return TCM_INVALID_CDB_FIELD; 1034 } 1035 1036 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) { 1037 /* 1038 * Tricky way to say all subpage 00h for 1039 * subpage==0, all subpages for subpage==0xff 1040 * (and we just checked above that those are 1041 * the only two possibilities). 1042 */ 1043 if ((modesense_handlers[i].subpage & ~subpage) == 0) { 1044 ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1045 if (!ten && length + ret >= 255) 1046 break; 1047 length += ret; 1048 } 1049 } 1050 1051 goto set_length; 1052 } 1053 1054 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1055 if (modesense_handlers[i].page == page && 1056 modesense_handlers[i].subpage == subpage) { 1057 length += modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1058 goto set_length; 1059 } 1060 1061 /* 1062 * We don't intend to implement: 1063 * - obsolete page 03h "format parameters" (checked by Solaris) 1064 */ 1065 if (page != 0x03) 1066 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 1067 page, subpage); 1068 1069 return TCM_UNKNOWN_MODE_PAGE; 1070 1071 set_length: 1072 if (ten) 1073 put_unaligned_be16(length - 2, buf); 1074 else 1075 buf[0] = length - 1; 1076 1077 rbuf = transport_kmap_data_sg(cmd); 1078 if (rbuf) { 1079 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); 1080 transport_kunmap_data_sg(cmd); 1081 } 1082 1083 target_complete_cmd_with_length(cmd, GOOD, length); 1084 return 0; 1085 } 1086 1087 static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) 1088 { 1089 char *cdb = cmd->t_task_cdb; 1090 bool ten = cdb[0] == MODE_SELECT_10; 1091 int off = ten ? 8 : 4; 1092 bool pf = !!(cdb[1] & 0x10); 1093 u8 page, subpage; 1094 unsigned char *buf; 1095 unsigned char tbuf[SE_MODE_PAGE_BUF]; 1096 int length; 1097 sense_reason_t ret = 0; 1098 int i; 1099 1100 if (!cmd->data_length) { 1101 target_complete_cmd(cmd, GOOD); 1102 return 0; 1103 } 1104 1105 if (cmd->data_length < off + 2) 1106 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1107 1108 buf = transport_kmap_data_sg(cmd); 1109 if (!buf) 1110 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1111 1112 if (!pf) { 1113 ret = TCM_INVALID_CDB_FIELD; 1114 goto out; 1115 } 1116 1117 page = buf[off] & 0x3f; 1118 subpage = buf[off] & 0x40 ? buf[off + 1] : 0; 1119 1120 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1121 if (modesense_handlers[i].page == page && 1122 modesense_handlers[i].subpage == subpage) { 1123 memset(tbuf, 0, SE_MODE_PAGE_BUF); 1124 length = modesense_handlers[i].emulate(cmd, 0, tbuf); 1125 goto check_contents; 1126 } 1127 1128 ret = TCM_UNKNOWN_MODE_PAGE; 1129 goto out; 1130 1131 check_contents: 1132 if (cmd->data_length < off + length) { 1133 ret = TCM_PARAMETER_LIST_LENGTH_ERROR; 1134 goto out; 1135 } 1136 1137 if (memcmp(buf + off, tbuf, length)) 1138 ret = TCM_INVALID_PARAMETER_LIST; 1139 1140 out: 1141 transport_kunmap_data_sg(cmd); 1142 1143 if (!ret) 1144 target_complete_cmd(cmd, GOOD); 1145 return ret; 1146 } 1147 1148 static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd) 1149 { 1150 unsigned char *cdb = cmd->t_task_cdb; 1151 unsigned char *rbuf; 1152 u8 ua_asc = 0, ua_ascq = 0; 1153 unsigned char buf[SE_SENSE_BUF]; 1154 1155 memset(buf, 0, SE_SENSE_BUF); 1156 1157 if (cdb[1] & 0x01) { 1158 pr_err("REQUEST_SENSE description emulation not" 1159 " supported\n"); 1160 return TCM_INVALID_CDB_FIELD; 1161 } 1162 1163 rbuf = transport_kmap_data_sg(cmd); 1164 if (!rbuf) 1165 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1166 1167 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { 1168 /* 1169 * CURRENT ERROR, UNIT ATTENTION 1170 */ 1171 buf[0] = 0x70; 1172 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 1173 1174 /* 1175 * The Additional Sense Code (ASC) from the UNIT ATTENTION 1176 */ 1177 buf[SPC_ASC_KEY_OFFSET] = ua_asc; 1178 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq; 1179 buf[7] = 0x0A; 1180 } else { 1181 /* 1182 * CURRENT ERROR, NO SENSE 1183 */ 1184 buf[0] = 0x70; 1185 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE; 1186 1187 /* 1188 * NO ADDITIONAL SENSE INFORMATION 1189 */ 1190 buf[SPC_ASC_KEY_OFFSET] = 0x00; 1191 buf[7] = 0x0A; 1192 } 1193 1194 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 1195 transport_kunmap_data_sg(cmd); 1196 1197 target_complete_cmd(cmd, GOOD); 1198 return 0; 1199 } 1200 1201 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) 1202 { 1203 struct se_dev_entry *deve; 1204 struct se_session *sess = cmd->se_sess; 1205 struct se_node_acl *nacl; 1206 struct scsi_lun slun; 1207 unsigned char *buf; 1208 u32 lun_count = 0, offset = 8; 1209 __be32 len; 1210 1211 buf = transport_kmap_data_sg(cmd); 1212 if (cmd->data_length && !buf) 1213 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1214 1215 /* 1216 * If no struct se_session pointer is present, this struct se_cmd is 1217 * coming via a target_core_mod PASSTHROUGH op, and not through 1218 * a $FABRIC_MOD. In that case, report LUN=0 only. 1219 */ 1220 if (!sess) 1221 goto done; 1222 1223 nacl = sess->se_node_acl; 1224 1225 rcu_read_lock(); 1226 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 1227 /* 1228 * We determine the correct LUN LIST LENGTH even once we 1229 * have reached the initial allocation length. 1230 * See SPC2-R20 7.19. 1231 */ 1232 lun_count++; 1233 if (offset >= cmd->data_length) 1234 continue; 1235 1236 int_to_scsilun(deve->mapped_lun, &slun); 1237 memcpy(buf + offset, &slun, 1238 min(8u, cmd->data_length - offset)); 1239 offset += 8; 1240 } 1241 rcu_read_unlock(); 1242 1243 /* 1244 * See SPC3 r07, page 159. 1245 */ 1246 done: 1247 /* 1248 * If no LUNs are accessible, report virtual LUN 0. 1249 */ 1250 if (lun_count == 0) { 1251 int_to_scsilun(0, &slun); 1252 if (cmd->data_length > 8) 1253 memcpy(buf + offset, &slun, 1254 min(8u, cmd->data_length - offset)); 1255 lun_count = 1; 1256 } 1257 1258 if (buf) { 1259 len = cpu_to_be32(lun_count * 8); 1260 memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length)); 1261 transport_kunmap_data_sg(cmd); 1262 } 1263 1264 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); 1265 return 0; 1266 } 1267 EXPORT_SYMBOL(spc_emulate_report_luns); 1268 1269 static sense_reason_t 1270 spc_emulate_testunitready(struct se_cmd *cmd) 1271 { 1272 target_complete_cmd(cmd, GOOD); 1273 return 0; 1274 } 1275 1276 sense_reason_t 1277 spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) 1278 { 1279 struct se_device *dev = cmd->se_dev; 1280 unsigned char *cdb = cmd->t_task_cdb; 1281 1282 switch (cdb[0]) { 1283 case MODE_SELECT: 1284 *size = cdb[4]; 1285 cmd->execute_cmd = spc_emulate_modeselect; 1286 break; 1287 case MODE_SELECT_10: 1288 *size = (cdb[7] << 8) + cdb[8]; 1289 cmd->execute_cmd = spc_emulate_modeselect; 1290 break; 1291 case MODE_SENSE: 1292 *size = cdb[4]; 1293 cmd->execute_cmd = spc_emulate_modesense; 1294 break; 1295 case MODE_SENSE_10: 1296 *size = (cdb[7] << 8) + cdb[8]; 1297 cmd->execute_cmd = spc_emulate_modesense; 1298 break; 1299 case LOG_SELECT: 1300 case LOG_SENSE: 1301 *size = (cdb[7] << 8) + cdb[8]; 1302 break; 1303 case PERSISTENT_RESERVE_IN: 1304 *size = (cdb[7] << 8) + cdb[8]; 1305 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1306 break; 1307 case PERSISTENT_RESERVE_OUT: 1308 *size = (cdb[7] << 8) + cdb[8]; 1309 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1310 break; 1311 case RELEASE: 1312 case RELEASE_10: 1313 if (cdb[0] == RELEASE_10) 1314 *size = (cdb[7] << 8) | cdb[8]; 1315 else 1316 *size = cmd->data_length; 1317 1318 cmd->execute_cmd = target_scsi2_reservation_release; 1319 break; 1320 case RESERVE: 1321 case RESERVE_10: 1322 /* 1323 * The SPC-2 RESERVE does not contain a size in the SCSI CDB. 1324 * Assume the passthrough or $FABRIC_MOD will tell us about it. 1325 */ 1326 if (cdb[0] == RESERVE_10) 1327 *size = (cdb[7] << 8) | cdb[8]; 1328 else 1329 *size = cmd->data_length; 1330 1331 cmd->execute_cmd = target_scsi2_reservation_reserve; 1332 break; 1333 case REQUEST_SENSE: 1334 *size = cdb[4]; 1335 cmd->execute_cmd = spc_emulate_request_sense; 1336 break; 1337 case INQUIRY: 1338 *size = (cdb[3] << 8) + cdb[4]; 1339 1340 /* 1341 * Do implicit HEAD_OF_QUEUE processing for INQUIRY. 1342 * See spc4r17 section 5.3 1343 */ 1344 cmd->sam_task_attr = TCM_HEAD_TAG; 1345 cmd->execute_cmd = spc_emulate_inquiry; 1346 break; 1347 case SECURITY_PROTOCOL_IN: 1348 case SECURITY_PROTOCOL_OUT: 1349 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1350 break; 1351 case EXTENDED_COPY: 1352 *size = get_unaligned_be32(&cdb[10]); 1353 cmd->execute_cmd = target_do_xcopy; 1354 break; 1355 case RECEIVE_COPY_RESULTS: 1356 *size = get_unaligned_be32(&cdb[10]); 1357 cmd->execute_cmd = target_do_receive_copy_results; 1358 break; 1359 case READ_ATTRIBUTE: 1360 case WRITE_ATTRIBUTE: 1361 *size = (cdb[10] << 24) | (cdb[11] << 16) | 1362 (cdb[12] << 8) | cdb[13]; 1363 break; 1364 case RECEIVE_DIAGNOSTIC: 1365 case SEND_DIAGNOSTIC: 1366 *size = (cdb[3] << 8) | cdb[4]; 1367 break; 1368 case WRITE_BUFFER: 1369 *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 1370 break; 1371 case REPORT_LUNS: 1372 cmd->execute_cmd = spc_emulate_report_luns; 1373 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1374 /* 1375 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS 1376 * See spc4r17 section 5.3 1377 */ 1378 cmd->sam_task_attr = TCM_HEAD_TAG; 1379 break; 1380 case TEST_UNIT_READY: 1381 cmd->execute_cmd = spc_emulate_testunitready; 1382 *size = 0; 1383 break; 1384 case MAINTENANCE_IN: 1385 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1386 /* 1387 * MAINTENANCE_IN from SCC-2 1388 * Check for emulated MI_REPORT_TARGET_PGS 1389 */ 1390 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) { 1391 cmd->execute_cmd = 1392 target_emulate_report_target_port_groups; 1393 } 1394 *size = get_unaligned_be32(&cdb[6]); 1395 } else { 1396 /* 1397 * GPCMD_SEND_KEY from multi media commands 1398 */ 1399 *size = get_unaligned_be16(&cdb[8]); 1400 } 1401 break; 1402 case MAINTENANCE_OUT: 1403 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1404 /* 1405 * MAINTENANCE_OUT from SCC-2 1406 * Check for emulated MO_SET_TARGET_PGS. 1407 */ 1408 if (cdb[1] == MO_SET_TARGET_PGS) { 1409 cmd->execute_cmd = 1410 target_emulate_set_target_port_groups; 1411 } 1412 *size = get_unaligned_be32(&cdb[6]); 1413 } else { 1414 /* 1415 * GPCMD_SEND_KEY from multi media commands 1416 */ 1417 *size = get_unaligned_be16(&cdb[8]); 1418 } 1419 break; 1420 default: 1421 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" 1422 " 0x%02x, sending CHECK_CONDITION.\n", 1423 cmd->se_tfo->get_fabric_name(), cdb[0]); 1424 return TCM_UNSUPPORTED_SCSI_OPCODE; 1425 } 1426 1427 return 0; 1428 } 1429 EXPORT_SYMBOL(spc_parse_cdb); 1430