1 /* 2 * SCSI Primary Commands (SPC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23 #include <linux/kernel.h> 24 #include <linux/module.h> 25 #include <asm/unaligned.h> 26 27 #include <scsi/scsi_proto.h> 28 #include <scsi/scsi_common.h> 29 #include <scsi/scsi_tcq.h> 30 31 #include <target/target_core_base.h> 32 #include <target/target_core_backend.h> 33 #include <target/target_core_fabric.h> 34 35 #include "target_core_internal.h" 36 #include "target_core_alua.h" 37 #include "target_core_pr.h" 38 #include "target_core_ua.h" 39 #include "target_core_xcopy.h" 40 41 static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf) 42 { 43 struct t10_alua_tg_pt_gp *tg_pt_gp; 44 45 /* 46 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. 47 */ 48 buf[5] = 0x80; 49 50 /* 51 * Set TPGS field for explicit and/or implicit ALUA access type 52 * and opteration. 53 * 54 * See spc4r17 section 6.4.2 Table 135 55 */ 56 spin_lock(&lun->lun_tg_pt_gp_lock); 57 tg_pt_gp = lun->lun_tg_pt_gp; 58 if (tg_pt_gp) 59 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; 60 spin_unlock(&lun->lun_tg_pt_gp_lock); 61 } 62 63 sense_reason_t 64 spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) 65 { 66 struct se_lun *lun = cmd->se_lun; 67 struct se_device *dev = cmd->se_dev; 68 struct se_session *sess = cmd->se_sess; 69 70 /* Set RMB (removable media) for tape devices */ 71 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 72 buf[1] = 0x80; 73 74 buf[2] = 0x05; /* SPC-3 */ 75 76 /* 77 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2 78 * 79 * SPC4 says: 80 * A RESPONSE DATA FORMAT field set to 2h indicates that the 81 * standard INQUIRY data is in the format defined in this 82 * standard. Response data format values less than 2h are 83 * obsolete. Response data format values greater than 2h are 84 * reserved. 85 */ 86 buf[3] = 2; 87 88 /* 89 * Enable SCCS and TPGS fields for Emulated ALUA 90 */ 91 spc_fill_alua_data(lun, buf); 92 93 /* 94 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY 95 */ 96 if (dev->dev_attrib.emulate_3pc) 97 buf[5] |= 0x8; 98 /* 99 * Set Protection (PROTECT) bit when DIF has been enabled on the 100 * device, and the fabric supports VERIFY + PASS. Also report 101 * PROTECT=1 if sess_prot_type has been configured to allow T10-PI 102 * to unprotected devices. 103 */ 104 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 105 if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type) 106 buf[5] |= 0x1; 107 } 108 109 buf[7] = 0x2; /* CmdQue=1 */ 110 111 /* 112 * ASCII data fields described as being left-aligned shall have any 113 * unused bytes at the end of the field (i.e., highest offset) and the 114 * unused bytes shall be filled with ASCII space characters (20h). 115 */ 116 memset(&buf[8], 0x20, 117 INQUIRY_VENDOR_LEN + INQUIRY_MODEL_LEN + INQUIRY_REVISION_LEN); 118 memcpy(&buf[8], "LIO-ORG", sizeof("LIO-ORG") - 1); 119 memcpy(&buf[16], dev->t10_wwn.model, 120 strnlen(dev->t10_wwn.model, INQUIRY_MODEL_LEN)); 121 memcpy(&buf[32], dev->t10_wwn.revision, 122 strnlen(dev->t10_wwn.revision, INQUIRY_REVISION_LEN)); 123 buf[4] = 31; /* Set additional length to 31 */ 124 125 return 0; 126 } 127 EXPORT_SYMBOL(spc_emulate_inquiry_std); 128 129 /* unit serial number */ 130 static sense_reason_t 131 spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) 132 { 133 struct se_device *dev = cmd->se_dev; 134 u16 len; 135 136 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 137 len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); 138 len++; /* Extra Byte for NULL Terminator */ 139 buf[3] = len; 140 } 141 return 0; 142 } 143 144 void spc_parse_naa_6h_vendor_specific(struct se_device *dev, 145 unsigned char *buf) 146 { 147 unsigned char *p = &dev->t10_wwn.unit_serial[0]; 148 int cnt; 149 bool next = true; 150 151 /* 152 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on 153 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field 154 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION 155 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL 156 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure 157 * per device uniqeness. 158 */ 159 for (cnt = 0; *p && cnt < 13; p++) { 160 int val = hex_to_bin(*p); 161 162 if (val < 0) 163 continue; 164 165 if (next) { 166 next = false; 167 buf[cnt++] |= val; 168 } else { 169 next = true; 170 buf[cnt] = val << 4; 171 } 172 } 173 } 174 175 /* 176 * Device identification VPD, for a complete list of 177 * DESIGNATOR TYPEs see spc4r17 Table 459. 178 */ 179 sense_reason_t 180 spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) 181 { 182 struct se_device *dev = cmd->se_dev; 183 struct se_lun *lun = cmd->se_lun; 184 struct se_portal_group *tpg = NULL; 185 struct t10_alua_lu_gp_member *lu_gp_mem; 186 struct t10_alua_tg_pt_gp *tg_pt_gp; 187 unsigned char *prod = &dev->t10_wwn.model[0]; 188 u32 prod_len; 189 u32 unit_serial_len, off = 0; 190 u16 len = 0, id_len; 191 192 off = 4; 193 194 /* 195 * NAA IEEE Registered Extended Assigned designator format, see 196 * spc4r17 section 7.7.3.6.5 197 * 198 * We depend upon a target_core_mod/ConfigFS provided 199 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial 200 * value in order to return the NAA id. 201 */ 202 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)) 203 goto check_t10_vend_desc; 204 205 /* CODE SET == Binary */ 206 buf[off++] = 0x1; 207 208 /* Set ASSOCIATION == addressed logical unit: 0)b */ 209 buf[off] = 0x00; 210 211 /* Identifier/Designator type == NAA identifier */ 212 buf[off++] |= 0x3; 213 off++; 214 215 /* Identifier/Designator length */ 216 buf[off++] = 0x10; 217 218 /* 219 * Start NAA IEEE Registered Extended Identifier/Designator 220 */ 221 buf[off++] = (0x6 << 4); 222 223 /* 224 * Use OpenFabrics IEEE Company ID: 00 14 05 225 */ 226 buf[off++] = 0x01; 227 buf[off++] = 0x40; 228 buf[off] = (0x5 << 4); 229 230 /* 231 * Return ConfigFS Unit Serial Number information for 232 * VENDOR_SPECIFIC_IDENTIFIER and 233 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION 234 */ 235 spc_parse_naa_6h_vendor_specific(dev, &buf[off]); 236 237 len = 20; 238 off = (len + 4); 239 240 check_t10_vend_desc: 241 /* 242 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4 243 */ 244 id_len = 8; /* For Vendor field */ 245 prod_len = 4; /* For VPD Header */ 246 prod_len += 8; /* For Vendor field */ 247 prod_len += strlen(prod); 248 prod_len++; /* For : */ 249 250 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 251 unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]); 252 unit_serial_len++; /* For NULL Terminator */ 253 254 id_len += sprintf(&buf[off+12], "%s:%s", prod, 255 &dev->t10_wwn.unit_serial[0]); 256 } 257 buf[off] = 0x2; /* ASCII */ 258 buf[off+1] = 0x1; /* T10 Vendor ID */ 259 buf[off+2] = 0x0; 260 /* left align Vendor ID and pad with spaces */ 261 memset(&buf[off+4], 0x20, 8); 262 memcpy(&buf[off+4], "LIO-ORG", sizeof("LIO-ORG") - 1); 263 /* Extra Byte for NULL Terminator */ 264 id_len++; 265 /* Identifier Length */ 266 buf[off+3] = id_len; 267 /* Header size for Designation descriptor */ 268 len += (id_len + 4); 269 off += (id_len + 4); 270 271 if (1) { 272 struct t10_alua_lu_gp *lu_gp; 273 u32 padding, scsi_name_len, scsi_target_len; 274 u16 lu_gp_id = 0; 275 u16 tg_pt_gp_id = 0; 276 u16 tpgt; 277 278 tpg = lun->lun_tpg; 279 /* 280 * Relative target port identifer, see spc4r17 281 * section 7.7.3.7 282 * 283 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 284 * section 7.5.1 Table 362 285 */ 286 buf[off] = tpg->proto_id << 4; 287 buf[off++] |= 0x1; /* CODE SET == Binary */ 288 buf[off] = 0x80; /* Set PIV=1 */ 289 /* Set ASSOCIATION == target port: 01b */ 290 buf[off] |= 0x10; 291 /* DESIGNATOR TYPE == Relative target port identifer */ 292 buf[off++] |= 0x4; 293 off++; /* Skip over Reserved */ 294 buf[off++] = 4; /* DESIGNATOR LENGTH */ 295 /* Skip over Obsolete field in RTPI payload 296 * in Table 472 */ 297 off += 2; 298 put_unaligned_be16(lun->lun_rtpi, &buf[off]); 299 off += 2; 300 len += 8; /* Header size + Designation descriptor */ 301 /* 302 * Target port group identifier, see spc4r17 303 * section 7.7.3.8 304 * 305 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 306 * section 7.5.1 Table 362 307 */ 308 spin_lock(&lun->lun_tg_pt_gp_lock); 309 tg_pt_gp = lun->lun_tg_pt_gp; 310 if (!tg_pt_gp) { 311 spin_unlock(&lun->lun_tg_pt_gp_lock); 312 goto check_lu_gp; 313 } 314 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; 315 spin_unlock(&lun->lun_tg_pt_gp_lock); 316 317 buf[off] = tpg->proto_id << 4; 318 buf[off++] |= 0x1; /* CODE SET == Binary */ 319 buf[off] = 0x80; /* Set PIV=1 */ 320 /* Set ASSOCIATION == target port: 01b */ 321 buf[off] |= 0x10; 322 /* DESIGNATOR TYPE == Target port group identifier */ 323 buf[off++] |= 0x5; 324 off++; /* Skip over Reserved */ 325 buf[off++] = 4; /* DESIGNATOR LENGTH */ 326 off += 2; /* Skip over Reserved Field */ 327 put_unaligned_be16(tg_pt_gp_id, &buf[off]); 328 off += 2; 329 len += 8; /* Header size + Designation descriptor */ 330 /* 331 * Logical Unit Group identifier, see spc4r17 332 * section 7.7.3.8 333 */ 334 check_lu_gp: 335 lu_gp_mem = dev->dev_alua_lu_gp_mem; 336 if (!lu_gp_mem) 337 goto check_scsi_name; 338 339 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 340 lu_gp = lu_gp_mem->lu_gp; 341 if (!lu_gp) { 342 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 343 goto check_scsi_name; 344 } 345 lu_gp_id = lu_gp->lu_gp_id; 346 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 347 348 buf[off++] |= 0x1; /* CODE SET == Binary */ 349 /* DESIGNATOR TYPE == Logical Unit Group identifier */ 350 buf[off++] |= 0x6; 351 off++; /* Skip over Reserved */ 352 buf[off++] = 4; /* DESIGNATOR LENGTH */ 353 off += 2; /* Skip over Reserved Field */ 354 put_unaligned_be16(lu_gp_id, &buf[off]); 355 off += 2; 356 len += 8; /* Header size + Designation descriptor */ 357 /* 358 * SCSI name string designator, see spc4r17 359 * section 7.7.3.11 360 * 361 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 362 * section 7.5.1 Table 362 363 */ 364 check_scsi_name: 365 buf[off] = tpg->proto_id << 4; 366 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 367 buf[off] = 0x80; /* Set PIV=1 */ 368 /* Set ASSOCIATION == target port: 01b */ 369 buf[off] |= 0x10; 370 /* DESIGNATOR TYPE == SCSI name string */ 371 buf[off++] |= 0x8; 372 off += 2; /* Skip over Reserved and length */ 373 /* 374 * SCSI name string identifer containing, $FABRIC_MOD 375 * dependent information. For LIO-Target and iSCSI 376 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in 377 * UTF-8 encoding. 378 */ 379 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); 380 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", 381 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); 382 scsi_name_len += 1 /* Include NULL terminator */; 383 /* 384 * The null-terminated, null-padded (see 4.4.2) SCSI 385 * NAME STRING field contains a UTF-8 format string. 386 * The number of bytes in the SCSI NAME STRING field 387 * (i.e., the value in the DESIGNATOR LENGTH field) 388 * shall be no larger than 256 and shall be a multiple 389 * of four. 390 */ 391 padding = ((-scsi_name_len) & 3); 392 if (padding) 393 scsi_name_len += padding; 394 if (scsi_name_len > 256) 395 scsi_name_len = 256; 396 397 buf[off-1] = scsi_name_len; 398 off += scsi_name_len; 399 /* Header size + Designation descriptor */ 400 len += (scsi_name_len + 4); 401 402 /* 403 * Target device designator 404 */ 405 buf[off] = tpg->proto_id << 4; 406 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 407 buf[off] = 0x80; /* Set PIV=1 */ 408 /* Set ASSOCIATION == target device: 10b */ 409 buf[off] |= 0x20; 410 /* DESIGNATOR TYPE == SCSI name string */ 411 buf[off++] |= 0x8; 412 off += 2; /* Skip over Reserved and length */ 413 /* 414 * SCSI name string identifer containing, $FABRIC_MOD 415 * dependent information. For LIO-Target and iSCSI 416 * Target Port, this means "<iSCSI name>" in 417 * UTF-8 encoding. 418 */ 419 scsi_target_len = sprintf(&buf[off], "%s", 420 tpg->se_tpg_tfo->tpg_get_wwn(tpg)); 421 scsi_target_len += 1 /* Include NULL terminator */; 422 /* 423 * The null-terminated, null-padded (see 4.4.2) SCSI 424 * NAME STRING field contains a UTF-8 format string. 425 * The number of bytes in the SCSI NAME STRING field 426 * (i.e., the value in the DESIGNATOR LENGTH field) 427 * shall be no larger than 256 and shall be a multiple 428 * of four. 429 */ 430 padding = ((-scsi_target_len) & 3); 431 if (padding) 432 scsi_target_len += padding; 433 if (scsi_target_len > 256) 434 scsi_target_len = 256; 435 436 buf[off-1] = scsi_target_len; 437 off += scsi_target_len; 438 439 /* Header size + Designation descriptor */ 440 len += (scsi_target_len + 4); 441 } 442 put_unaligned_be16(len, &buf[2]); /* Page Length for VPD 0x83 */ 443 return 0; 444 } 445 EXPORT_SYMBOL(spc_emulate_evpd_83); 446 447 /* Extended INQUIRY Data VPD Page */ 448 static sense_reason_t 449 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 450 { 451 struct se_device *dev = cmd->se_dev; 452 struct se_session *sess = cmd->se_sess; 453 454 buf[3] = 0x3c; 455 /* 456 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK 457 * only for TYPE3 protection. 458 */ 459 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 460 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT || 461 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) 462 buf[4] = 0x5; 463 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || 464 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) 465 buf[4] = 0x4; 466 } 467 468 /* logical unit supports type 1 and type 3 protection */ 469 if ((dev->transport->get_device_type(dev) == TYPE_DISK) && 470 (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) && 471 (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) { 472 buf[4] |= (0x3 << 3); 473 } 474 475 /* Set HEADSUP, ORDSUP, SIMPSUP */ 476 buf[5] = 0x07; 477 478 /* If WriteCache emulation is enabled, set V_SUP */ 479 if (target_check_wce(dev)) 480 buf[6] = 0x01; 481 /* If an LBA map is present set R_SUP */ 482 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); 483 if (!list_empty(&dev->t10_alua.lba_map_list)) 484 buf[8] = 0x10; 485 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock); 486 return 0; 487 } 488 489 /* Block Limits VPD page */ 490 static sense_reason_t 491 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 492 { 493 struct se_device *dev = cmd->se_dev; 494 u32 mtl = 0; 495 int have_tp = 0, opt, min; 496 497 /* 498 * Following spc3r22 section 6.5.3 Block Limits VPD page, when 499 * emulate_tpu=1 or emulate_tpws=1 we will be expect a 500 * different page length for Thin Provisioning. 501 */ 502 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 503 have_tp = 1; 504 505 buf[0] = dev->transport->get_device_type(dev); 506 buf[3] = have_tp ? 0x3c : 0x10; 507 508 /* Set WSNZ to 1 */ 509 buf[4] = 0x01; 510 /* 511 * Set MAXIMUM COMPARE AND WRITE LENGTH 512 */ 513 if (dev->dev_attrib.emulate_caw) 514 buf[5] = 0x01; 515 516 /* 517 * Set OPTIMAL TRANSFER LENGTH GRANULARITY 518 */ 519 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) 520 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); 521 else 522 put_unaligned_be16(1, &buf[6]); 523 524 /* 525 * Set MAXIMUM TRANSFER LENGTH 526 * 527 * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics 528 * enforcing maximum HW scatter-gather-list entry limit 529 */ 530 if (cmd->se_tfo->max_data_sg_nents) { 531 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) / 532 dev->dev_attrib.block_size; 533 } 534 put_unaligned_be32(min_not_zero(mtl, dev->dev_attrib.hw_max_sectors), &buf[8]); 535 536 /* 537 * Set OPTIMAL TRANSFER LENGTH 538 */ 539 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) 540 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); 541 else 542 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); 543 544 /* 545 * Exit now if we don't support TP. 546 */ 547 if (!have_tp) 548 goto max_write_same; 549 550 /* 551 * Set MAXIMUM UNMAP LBA COUNT 552 */ 553 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]); 554 555 /* 556 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT 557 */ 558 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count, 559 &buf[24]); 560 561 /* 562 * Set OPTIMAL UNMAP GRANULARITY 563 */ 564 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]); 565 566 /* 567 * UNMAP GRANULARITY ALIGNMENT 568 */ 569 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment, 570 &buf[32]); 571 if (dev->dev_attrib.unmap_granularity_alignment != 0) 572 buf[32] |= 0x80; /* Set the UGAVALID bit */ 573 574 /* 575 * MAXIMUM WRITE SAME LENGTH 576 */ 577 max_write_same: 578 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]); 579 580 return 0; 581 } 582 583 /* Block Device Characteristics VPD page */ 584 static sense_reason_t 585 spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) 586 { 587 struct se_device *dev = cmd->se_dev; 588 589 buf[0] = dev->transport->get_device_type(dev); 590 buf[3] = 0x3c; 591 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0; 592 593 return 0; 594 } 595 596 /* Thin Provisioning VPD */ 597 static sense_reason_t 598 spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) 599 { 600 struct se_device *dev = cmd->se_dev; 601 602 /* 603 * From spc3r22 section 6.5.4 Thin Provisioning VPD page: 604 * 605 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to 606 * zero, then the page length shall be set to 0004h. If the DP bit 607 * is set to one, then the page length shall be set to the value 608 * defined in table 162. 609 */ 610 buf[0] = dev->transport->get_device_type(dev); 611 612 /* 613 * Set Hardcoded length mentioned above for DP=0 614 */ 615 put_unaligned_be16(0x0004, &buf[2]); 616 617 /* 618 * The THRESHOLD EXPONENT field indicates the threshold set size in 619 * LBAs as a power of 2 (i.e., the threshold set size is equal to 620 * 2(threshold exponent)). 621 * 622 * Note that this is currently set to 0x00 as mkp says it will be 623 * changing again. We can enable this once it has settled in T10 624 * and is actually used by Linux/SCSI ML code. 625 */ 626 buf[4] = 0x00; 627 628 /* 629 * A TPU bit set to one indicates that the device server supports 630 * the UNMAP command (see 5.25). A TPU bit set to zero indicates 631 * that the device server does not support the UNMAP command. 632 */ 633 if (dev->dev_attrib.emulate_tpu != 0) 634 buf[5] = 0x80; 635 636 /* 637 * A TPWS bit set to one indicates that the device server supports 638 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs. 639 * A TPWS bit set to zero indicates that the device server does not 640 * support the use of the WRITE SAME (16) command to unmap LBAs. 641 */ 642 if (dev->dev_attrib.emulate_tpws != 0) 643 buf[5] |= 0x40 | 0x20; 644 645 /* 646 * The unmap_zeroes_data set means that the underlying device supports 647 * REQ_OP_DISCARD and has the discard_zeroes_data bit set. This 648 * satisfies the SBC requirements for LBPRZ, meaning that a subsequent 649 * read will return zeroes after an UNMAP or WRITE SAME (16) to an LBA 650 * See sbc4r36 6.6.4. 651 */ 652 if (((dev->dev_attrib.emulate_tpu != 0) || 653 (dev->dev_attrib.emulate_tpws != 0)) && 654 (dev->dev_attrib.unmap_zeroes_data != 0)) 655 buf[5] |= 0x04; 656 657 return 0; 658 } 659 660 /* Referrals VPD page */ 661 static sense_reason_t 662 spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) 663 { 664 struct se_device *dev = cmd->se_dev; 665 666 buf[0] = dev->transport->get_device_type(dev); 667 buf[3] = 0x0c; 668 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); 669 put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]); 670 671 return 0; 672 } 673 674 static sense_reason_t 675 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); 676 677 static struct { 678 uint8_t page; 679 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *); 680 } evpd_handlers[] = { 681 { .page = 0x00, .emulate = spc_emulate_evpd_00 }, 682 { .page = 0x80, .emulate = spc_emulate_evpd_80 }, 683 { .page = 0x83, .emulate = spc_emulate_evpd_83 }, 684 { .page = 0x86, .emulate = spc_emulate_evpd_86 }, 685 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, 686 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, 687 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, 688 { .page = 0xb3, .emulate = spc_emulate_evpd_b3 }, 689 }; 690 691 /* supported vital product data pages */ 692 static sense_reason_t 693 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) 694 { 695 int p; 696 697 /* 698 * Only report the INQUIRY EVPD=1 pages after a valid NAA 699 * Registered Extended LUN WWN has been set via ConfigFS 700 * during device creation/restart. 701 */ 702 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 703 buf[3] = ARRAY_SIZE(evpd_handlers); 704 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) 705 buf[p + 4] = evpd_handlers[p].page; 706 } 707 708 return 0; 709 } 710 711 static sense_reason_t 712 spc_emulate_inquiry(struct se_cmd *cmd) 713 { 714 struct se_device *dev = cmd->se_dev; 715 struct se_portal_group *tpg = cmd->se_lun->lun_tpg; 716 unsigned char *rbuf; 717 unsigned char *cdb = cmd->t_task_cdb; 718 unsigned char *buf; 719 sense_reason_t ret; 720 int p; 721 int len = 0; 722 723 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); 724 if (!buf) { 725 pr_err("Unable to allocate response buffer for INQUIRY\n"); 726 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 727 } 728 729 if (dev == rcu_access_pointer(tpg->tpg_virt_lun0->lun_se_dev)) 730 buf[0] = 0x3f; /* Not connected */ 731 else 732 buf[0] = dev->transport->get_device_type(dev); 733 734 if (!(cdb[1] & 0x1)) { 735 if (cdb[2]) { 736 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", 737 cdb[2]); 738 ret = TCM_INVALID_CDB_FIELD; 739 goto out; 740 } 741 742 ret = spc_emulate_inquiry_std(cmd, buf); 743 len = buf[4] + 5; 744 goto out; 745 } 746 747 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { 748 if (cdb[2] == evpd_handlers[p].page) { 749 buf[1] = cdb[2]; 750 ret = evpd_handlers[p].emulate(cmd, buf); 751 len = get_unaligned_be16(&buf[2]) + 4; 752 goto out; 753 } 754 } 755 756 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 757 ret = TCM_INVALID_CDB_FIELD; 758 759 out: 760 rbuf = transport_kmap_data_sg(cmd); 761 if (rbuf) { 762 memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length)); 763 transport_kunmap_data_sg(cmd); 764 } 765 kfree(buf); 766 767 if (!ret) 768 target_complete_cmd_with_length(cmd, GOOD, len); 769 return ret; 770 } 771 772 static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p) 773 { 774 p[0] = 0x01; 775 p[1] = 0x0a; 776 777 /* No changeable values for now */ 778 if (pc == 1) 779 goto out; 780 781 out: 782 return 12; 783 } 784 785 static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p) 786 { 787 struct se_device *dev = cmd->se_dev; 788 struct se_session *sess = cmd->se_sess; 789 790 p[0] = 0x0a; 791 p[1] = 0x0a; 792 793 /* No changeable values for now */ 794 if (pc == 1) 795 goto out; 796 797 /* GLTSD: No implicit save of log parameters */ 798 p[2] = (1 << 1); 799 if (target_sense_desc_format(dev)) 800 /* D_SENSE: Descriptor format sense data for 64bit sectors */ 801 p[2] |= (1 << 2); 802 803 /* 804 * From spc4r23, 7.4.7 Control mode page 805 * 806 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies 807 * restrictions on the algorithm used for reordering commands 808 * having the SIMPLE task attribute (see SAM-4). 809 * 810 * Table 368 -- QUEUE ALGORITHM MODIFIER field 811 * Code Description 812 * 0h Restricted reordering 813 * 1h Unrestricted reordering allowed 814 * 2h to 7h Reserved 815 * 8h to Fh Vendor specific 816 * 817 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that 818 * the device server shall order the processing sequence of commands 819 * having the SIMPLE task attribute such that data integrity is maintained 820 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol 821 * requests is halted at any time, the final value of all data observable 822 * on the medium shall be the same as if all the commands had been processed 823 * with the ORDERED task attribute). 824 * 825 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the 826 * device server may reorder the processing sequence of commands having the 827 * SIMPLE task attribute in any manner. Any data integrity exposures related to 828 * command sequence order shall be explicitly handled by the application client 829 * through the selection of appropriate ommands and task attributes. 830 */ 831 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; 832 /* 833 * From spc4r17, section 7.4.6 Control mode Page 834 * 835 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b 836 * 837 * 00b: The logical unit shall clear any unit attention condition 838 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 839 * status and shall not establish a unit attention condition when a com- 840 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT 841 * status. 842 * 843 * 10b: The logical unit shall not clear any unit attention condition 844 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 845 * status and shall not establish a unit attention condition when 846 * a command is completed with BUSY, TASK SET FULL, or RESERVATION 847 * CONFLICT status. 848 * 849 * 11b a The logical unit shall not clear any unit attention condition 850 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 851 * status and shall establish a unit attention condition for the 852 * initiator port associated with the I_T nexus on which the BUSY, 853 * TASK SET FULL, or RESERVATION CONFLICT status is being returned. 854 * Depending on the status, the additional sense code shall be set to 855 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS 856 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE 857 * command, a unit attention condition shall be established only once 858 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless 859 * to the number of commands completed with one of those status codes. 860 */ 861 p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : 862 (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; 863 /* 864 * From spc4r17, section 7.4.6 Control mode Page 865 * 866 * Task Aborted Status (TAS) bit set to zero. 867 * 868 * A task aborted status (TAS) bit set to zero specifies that aborted 869 * tasks shall be terminated by the device server without any response 870 * to the application client. A TAS bit set to one specifies that tasks 871 * aborted by the actions of an I_T nexus other than the I_T nexus on 872 * which the command was received shall be completed with TASK ABORTED 873 * status (see SAM-4). 874 */ 875 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; 876 /* 877 * From spc4r30, section 7.5.7 Control mode page 878 * 879 * Application Tag Owner (ATO) bit set to one. 880 * 881 * If the ATO bit is set to one the device server shall not modify the 882 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection 883 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE 884 * TAG field. 885 */ 886 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 887 if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type) 888 p[5] |= 0x80; 889 } 890 891 p[8] = 0xff; 892 p[9] = 0xff; 893 p[11] = 30; 894 895 out: 896 return 12; 897 } 898 899 static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) 900 { 901 struct se_device *dev = cmd->se_dev; 902 903 p[0] = 0x08; 904 p[1] = 0x12; 905 906 /* No changeable values for now */ 907 if (pc == 1) 908 goto out; 909 910 if (target_check_wce(dev)) 911 p[2] = 0x04; /* Write Cache Enable */ 912 p[12] = 0x20; /* Disabled Read Ahead */ 913 914 out: 915 return 20; 916 } 917 918 static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p) 919 { 920 p[0] = 0x1c; 921 p[1] = 0x0a; 922 923 /* No changeable values for now */ 924 if (pc == 1) 925 goto out; 926 927 out: 928 return 12; 929 } 930 931 static struct { 932 uint8_t page; 933 uint8_t subpage; 934 int (*emulate)(struct se_cmd *, u8, unsigned char *); 935 } modesense_handlers[] = { 936 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery }, 937 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, 938 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control }, 939 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions }, 940 }; 941 942 static void spc_modesense_write_protect(unsigned char *buf, int type) 943 { 944 /* 945 * I believe that the WP bit (bit 7) in the mode header is the same for 946 * all device types.. 947 */ 948 switch (type) { 949 case TYPE_DISK: 950 case TYPE_TAPE: 951 default: 952 buf[0] |= 0x80; /* WP bit */ 953 break; 954 } 955 } 956 957 static void spc_modesense_dpofua(unsigned char *buf, int type) 958 { 959 switch (type) { 960 case TYPE_DISK: 961 buf[0] |= 0x10; /* DPOFUA bit */ 962 break; 963 default: 964 break; 965 } 966 } 967 968 static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 969 { 970 *buf++ = 8; 971 put_unaligned_be32(min(blocks, 0xffffffffull), buf); 972 buf += 4; 973 put_unaligned_be32(block_size, buf); 974 return 9; 975 } 976 977 static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 978 { 979 if (blocks <= 0xffffffff) 980 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3; 981 982 *buf++ = 1; /* LONGLBA */ 983 buf += 2; 984 *buf++ = 16; 985 put_unaligned_be64(blocks, buf); 986 buf += 12; 987 put_unaligned_be32(block_size, buf); 988 989 return 17; 990 } 991 992 static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) 993 { 994 struct se_device *dev = cmd->se_dev; 995 char *cdb = cmd->t_task_cdb; 996 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf; 997 int type = dev->transport->get_device_type(dev); 998 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); 999 bool dbd = !!(cdb[1] & 0x08); 1000 bool llba = ten ? !!(cdb[1] & 0x10) : false; 1001 u8 pc = cdb[2] >> 6; 1002 u8 page = cdb[2] & 0x3f; 1003 u8 subpage = cdb[3]; 1004 int length = 0; 1005 int ret; 1006 int i; 1007 1008 memset(buf, 0, SE_MODE_PAGE_BUF); 1009 1010 /* 1011 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for 1012 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). 1013 */ 1014 length = ten ? 3 : 2; 1015 1016 /* DEVICE-SPECIFIC PARAMETER */ 1017 if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd)) 1018 spc_modesense_write_protect(&buf[length], type); 1019 1020 /* 1021 * SBC only allows us to enable FUA and DPO together. Fortunately 1022 * DPO is explicitly specified as a hint, so a noop is a perfectly 1023 * valid implementation. 1024 */ 1025 if (target_check_fua(dev)) 1026 spc_modesense_dpofua(&buf[length], type); 1027 1028 ++length; 1029 1030 /* BLOCK DESCRIPTOR */ 1031 1032 /* 1033 * For now we only include a block descriptor for disk (SBC) 1034 * devices; other command sets use a slightly different format. 1035 */ 1036 if (!dbd && type == TYPE_DISK) { 1037 u64 blocks = dev->transport->get_blocks(dev); 1038 u32 block_size = dev->dev_attrib.block_size; 1039 1040 if (ten) { 1041 if (llba) { 1042 length += spc_modesense_long_blockdesc(&buf[length], 1043 blocks, block_size); 1044 } else { 1045 length += 3; 1046 length += spc_modesense_blockdesc(&buf[length], 1047 blocks, block_size); 1048 } 1049 } else { 1050 length += spc_modesense_blockdesc(&buf[length], blocks, 1051 block_size); 1052 } 1053 } else { 1054 if (ten) 1055 length += 4; 1056 else 1057 length += 1; 1058 } 1059 1060 if (page == 0x3f) { 1061 if (subpage != 0x00 && subpage != 0xff) { 1062 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); 1063 return TCM_INVALID_CDB_FIELD; 1064 } 1065 1066 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) { 1067 /* 1068 * Tricky way to say all subpage 00h for 1069 * subpage==0, all subpages for subpage==0xff 1070 * (and we just checked above that those are 1071 * the only two possibilities). 1072 */ 1073 if ((modesense_handlers[i].subpage & ~subpage) == 0) { 1074 ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1075 if (!ten && length + ret >= 255) 1076 break; 1077 length += ret; 1078 } 1079 } 1080 1081 goto set_length; 1082 } 1083 1084 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1085 if (modesense_handlers[i].page == page && 1086 modesense_handlers[i].subpage == subpage) { 1087 length += modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1088 goto set_length; 1089 } 1090 1091 /* 1092 * We don't intend to implement: 1093 * - obsolete page 03h "format parameters" (checked by Solaris) 1094 */ 1095 if (page != 0x03) 1096 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 1097 page, subpage); 1098 1099 return TCM_UNKNOWN_MODE_PAGE; 1100 1101 set_length: 1102 if (ten) 1103 put_unaligned_be16(length - 2, buf); 1104 else 1105 buf[0] = length - 1; 1106 1107 rbuf = transport_kmap_data_sg(cmd); 1108 if (rbuf) { 1109 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); 1110 transport_kunmap_data_sg(cmd); 1111 } 1112 1113 target_complete_cmd_with_length(cmd, GOOD, length); 1114 return 0; 1115 } 1116 1117 static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) 1118 { 1119 char *cdb = cmd->t_task_cdb; 1120 bool ten = cdb[0] == MODE_SELECT_10; 1121 int off = ten ? 8 : 4; 1122 bool pf = !!(cdb[1] & 0x10); 1123 u8 page, subpage; 1124 unsigned char *buf; 1125 unsigned char tbuf[SE_MODE_PAGE_BUF]; 1126 int length; 1127 sense_reason_t ret = 0; 1128 int i; 1129 1130 if (!cmd->data_length) { 1131 target_complete_cmd(cmd, GOOD); 1132 return 0; 1133 } 1134 1135 if (cmd->data_length < off + 2) 1136 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1137 1138 buf = transport_kmap_data_sg(cmd); 1139 if (!buf) 1140 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1141 1142 if (!pf) { 1143 ret = TCM_INVALID_CDB_FIELD; 1144 goto out; 1145 } 1146 1147 page = buf[off] & 0x3f; 1148 subpage = buf[off] & 0x40 ? buf[off + 1] : 0; 1149 1150 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1151 if (modesense_handlers[i].page == page && 1152 modesense_handlers[i].subpage == subpage) { 1153 memset(tbuf, 0, SE_MODE_PAGE_BUF); 1154 length = modesense_handlers[i].emulate(cmd, 0, tbuf); 1155 goto check_contents; 1156 } 1157 1158 ret = TCM_UNKNOWN_MODE_PAGE; 1159 goto out; 1160 1161 check_contents: 1162 if (cmd->data_length < off + length) { 1163 ret = TCM_PARAMETER_LIST_LENGTH_ERROR; 1164 goto out; 1165 } 1166 1167 if (memcmp(buf + off, tbuf, length)) 1168 ret = TCM_INVALID_PARAMETER_LIST; 1169 1170 out: 1171 transport_kunmap_data_sg(cmd); 1172 1173 if (!ret) 1174 target_complete_cmd(cmd, GOOD); 1175 return ret; 1176 } 1177 1178 static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd) 1179 { 1180 unsigned char *cdb = cmd->t_task_cdb; 1181 unsigned char *rbuf; 1182 u8 ua_asc = 0, ua_ascq = 0; 1183 unsigned char buf[SE_SENSE_BUF]; 1184 bool desc_format = target_sense_desc_format(cmd->se_dev); 1185 1186 memset(buf, 0, SE_SENSE_BUF); 1187 1188 if (cdb[1] & 0x01) { 1189 pr_err("REQUEST_SENSE description emulation not" 1190 " supported\n"); 1191 return TCM_INVALID_CDB_FIELD; 1192 } 1193 1194 rbuf = transport_kmap_data_sg(cmd); 1195 if (!rbuf) 1196 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1197 1198 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) 1199 scsi_build_sense_buffer(desc_format, buf, UNIT_ATTENTION, 1200 ua_asc, ua_ascq); 1201 else 1202 scsi_build_sense_buffer(desc_format, buf, NO_SENSE, 0x0, 0x0); 1203 1204 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 1205 transport_kunmap_data_sg(cmd); 1206 1207 target_complete_cmd(cmd, GOOD); 1208 return 0; 1209 } 1210 1211 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) 1212 { 1213 struct se_dev_entry *deve; 1214 struct se_session *sess = cmd->se_sess; 1215 struct se_node_acl *nacl; 1216 struct scsi_lun slun; 1217 unsigned char *buf; 1218 u32 lun_count = 0, offset = 8; 1219 __be32 len; 1220 1221 buf = transport_kmap_data_sg(cmd); 1222 if (cmd->data_length && !buf) 1223 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1224 1225 /* 1226 * If no struct se_session pointer is present, this struct se_cmd is 1227 * coming via a target_core_mod PASSTHROUGH op, and not through 1228 * a $FABRIC_MOD. In that case, report LUN=0 only. 1229 */ 1230 if (!sess) 1231 goto done; 1232 1233 nacl = sess->se_node_acl; 1234 1235 rcu_read_lock(); 1236 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 1237 /* 1238 * We determine the correct LUN LIST LENGTH even once we 1239 * have reached the initial allocation length. 1240 * See SPC2-R20 7.19. 1241 */ 1242 lun_count++; 1243 if (offset >= cmd->data_length) 1244 continue; 1245 1246 int_to_scsilun(deve->mapped_lun, &slun); 1247 memcpy(buf + offset, &slun, 1248 min(8u, cmd->data_length - offset)); 1249 offset += 8; 1250 } 1251 rcu_read_unlock(); 1252 1253 /* 1254 * See SPC3 r07, page 159. 1255 */ 1256 done: 1257 /* 1258 * If no LUNs are accessible, report virtual LUN 0. 1259 */ 1260 if (lun_count == 0) { 1261 int_to_scsilun(0, &slun); 1262 if (cmd->data_length > 8) 1263 memcpy(buf + offset, &slun, 1264 min(8u, cmd->data_length - offset)); 1265 lun_count = 1; 1266 } 1267 1268 if (buf) { 1269 len = cpu_to_be32(lun_count * 8); 1270 memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length)); 1271 transport_kunmap_data_sg(cmd); 1272 } 1273 1274 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); 1275 return 0; 1276 } 1277 EXPORT_SYMBOL(spc_emulate_report_luns); 1278 1279 static sense_reason_t 1280 spc_emulate_testunitready(struct se_cmd *cmd) 1281 { 1282 target_complete_cmd(cmd, GOOD); 1283 return 0; 1284 } 1285 1286 sense_reason_t 1287 spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) 1288 { 1289 struct se_device *dev = cmd->se_dev; 1290 unsigned char *cdb = cmd->t_task_cdb; 1291 1292 if (!dev->dev_attrib.emulate_pr && 1293 ((cdb[0] == PERSISTENT_RESERVE_IN) || 1294 (cdb[0] == PERSISTENT_RESERVE_OUT) || 1295 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) || 1296 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) { 1297 return TCM_UNSUPPORTED_SCSI_OPCODE; 1298 } 1299 1300 switch (cdb[0]) { 1301 case MODE_SELECT: 1302 *size = cdb[4]; 1303 cmd->execute_cmd = spc_emulate_modeselect; 1304 break; 1305 case MODE_SELECT_10: 1306 *size = get_unaligned_be16(&cdb[7]); 1307 cmd->execute_cmd = spc_emulate_modeselect; 1308 break; 1309 case MODE_SENSE: 1310 *size = cdb[4]; 1311 cmd->execute_cmd = spc_emulate_modesense; 1312 break; 1313 case MODE_SENSE_10: 1314 *size = get_unaligned_be16(&cdb[7]); 1315 cmd->execute_cmd = spc_emulate_modesense; 1316 break; 1317 case LOG_SELECT: 1318 case LOG_SENSE: 1319 *size = get_unaligned_be16(&cdb[7]); 1320 break; 1321 case PERSISTENT_RESERVE_IN: 1322 *size = get_unaligned_be16(&cdb[7]); 1323 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1324 break; 1325 case PERSISTENT_RESERVE_OUT: 1326 *size = get_unaligned_be32(&cdb[5]); 1327 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1328 break; 1329 case RELEASE: 1330 case RELEASE_10: 1331 if (cdb[0] == RELEASE_10) 1332 *size = get_unaligned_be16(&cdb[7]); 1333 else 1334 *size = cmd->data_length; 1335 1336 cmd->execute_cmd = target_scsi2_reservation_release; 1337 break; 1338 case RESERVE: 1339 case RESERVE_10: 1340 /* 1341 * The SPC-2 RESERVE does not contain a size in the SCSI CDB. 1342 * Assume the passthrough or $FABRIC_MOD will tell us about it. 1343 */ 1344 if (cdb[0] == RESERVE_10) 1345 *size = get_unaligned_be16(&cdb[7]); 1346 else 1347 *size = cmd->data_length; 1348 1349 cmd->execute_cmd = target_scsi2_reservation_reserve; 1350 break; 1351 case REQUEST_SENSE: 1352 *size = cdb[4]; 1353 cmd->execute_cmd = spc_emulate_request_sense; 1354 break; 1355 case INQUIRY: 1356 *size = get_unaligned_be16(&cdb[3]); 1357 1358 /* 1359 * Do implicit HEAD_OF_QUEUE processing for INQUIRY. 1360 * See spc4r17 section 5.3 1361 */ 1362 cmd->sam_task_attr = TCM_HEAD_TAG; 1363 cmd->execute_cmd = spc_emulate_inquiry; 1364 break; 1365 case SECURITY_PROTOCOL_IN: 1366 case SECURITY_PROTOCOL_OUT: 1367 *size = get_unaligned_be32(&cdb[6]); 1368 break; 1369 case EXTENDED_COPY: 1370 *size = get_unaligned_be32(&cdb[10]); 1371 cmd->execute_cmd = target_do_xcopy; 1372 break; 1373 case RECEIVE_COPY_RESULTS: 1374 *size = get_unaligned_be32(&cdb[10]); 1375 cmd->execute_cmd = target_do_receive_copy_results; 1376 break; 1377 case READ_ATTRIBUTE: 1378 case WRITE_ATTRIBUTE: 1379 *size = get_unaligned_be32(&cdb[10]); 1380 break; 1381 case RECEIVE_DIAGNOSTIC: 1382 case SEND_DIAGNOSTIC: 1383 *size = get_unaligned_be16(&cdb[3]); 1384 break; 1385 case WRITE_BUFFER: 1386 *size = get_unaligned_be24(&cdb[6]); 1387 break; 1388 case REPORT_LUNS: 1389 cmd->execute_cmd = spc_emulate_report_luns; 1390 *size = get_unaligned_be32(&cdb[6]); 1391 /* 1392 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS 1393 * See spc4r17 section 5.3 1394 */ 1395 cmd->sam_task_attr = TCM_HEAD_TAG; 1396 break; 1397 case TEST_UNIT_READY: 1398 cmd->execute_cmd = spc_emulate_testunitready; 1399 *size = 0; 1400 break; 1401 case MAINTENANCE_IN: 1402 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1403 /* 1404 * MAINTENANCE_IN from SCC-2 1405 * Check for emulated MI_REPORT_TARGET_PGS 1406 */ 1407 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) { 1408 cmd->execute_cmd = 1409 target_emulate_report_target_port_groups; 1410 } 1411 *size = get_unaligned_be32(&cdb[6]); 1412 } else { 1413 /* 1414 * GPCMD_SEND_KEY from multi media commands 1415 */ 1416 *size = get_unaligned_be16(&cdb[8]); 1417 } 1418 break; 1419 case MAINTENANCE_OUT: 1420 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1421 /* 1422 * MAINTENANCE_OUT from SCC-2 1423 * Check for emulated MO_SET_TARGET_PGS. 1424 */ 1425 if (cdb[1] == MO_SET_TARGET_PGS) { 1426 cmd->execute_cmd = 1427 target_emulate_set_target_port_groups; 1428 } 1429 *size = get_unaligned_be32(&cdb[6]); 1430 } else { 1431 /* 1432 * GPCMD_SEND_KEY from multi media commands 1433 */ 1434 *size = get_unaligned_be16(&cdb[8]); 1435 } 1436 break; 1437 default: 1438 return TCM_UNSUPPORTED_SCSI_OPCODE; 1439 } 1440 1441 return 0; 1442 } 1443 EXPORT_SYMBOL(spc_parse_cdb); 1444