1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * SCSI Primary Commands (SPC) parsing and emulation. 4 * 5 * (c) Copyright 2002-2013 Datera, Inc. 6 * 7 * Nicholas A. Bellinger <nab@kernel.org> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <asm/unaligned.h> 13 14 #include <scsi/scsi_proto.h> 15 #include <scsi/scsi_common.h> 16 #include <scsi/scsi_tcq.h> 17 18 #include <target/target_core_base.h> 19 #include <target/target_core_backend.h> 20 #include <target/target_core_fabric.h> 21 22 #include "target_core_internal.h" 23 #include "target_core_alua.h" 24 #include "target_core_pr.h" 25 #include "target_core_ua.h" 26 #include "target_core_xcopy.h" 27 28 static void spc_fill_alua_data(struct se_lun *lun, unsigned char *buf) 29 { 30 struct t10_alua_tg_pt_gp *tg_pt_gp; 31 32 /* 33 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. 34 */ 35 buf[5] = 0x80; 36 37 /* 38 * Set TPGS field for explicit and/or implicit ALUA access type 39 * and opteration. 40 * 41 * See spc4r17 section 6.4.2 Table 135 42 */ 43 rcu_read_lock(); 44 tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); 45 if (tg_pt_gp) 46 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; 47 rcu_read_unlock(); 48 } 49 50 static u16 51 spc_find_scsi_transport_vd(int proto_id) 52 { 53 switch (proto_id) { 54 case SCSI_PROTOCOL_FCP: 55 return SCSI_VERSION_DESCRIPTOR_FCP4; 56 case SCSI_PROTOCOL_ISCSI: 57 return SCSI_VERSION_DESCRIPTOR_ISCSI; 58 case SCSI_PROTOCOL_SAS: 59 return SCSI_VERSION_DESCRIPTOR_SAS3; 60 case SCSI_PROTOCOL_SBP: 61 return SCSI_VERSION_DESCRIPTOR_SBP3; 62 case SCSI_PROTOCOL_SRP: 63 return SCSI_VERSION_DESCRIPTOR_SRP; 64 default: 65 pr_warn("Cannot find VERSION DESCRIPTOR value for unknown SCSI" 66 " transport PROTOCOL IDENTIFIER %#x\n", proto_id); 67 return 0; 68 } 69 } 70 71 sense_reason_t 72 spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) 73 { 74 struct se_lun *lun = cmd->se_lun; 75 struct se_portal_group *tpg = lun->lun_tpg; 76 struct se_device *dev = cmd->se_dev; 77 struct se_session *sess = cmd->se_sess; 78 79 /* Set RMB (removable media) for tape devices */ 80 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 81 buf[1] = 0x80; 82 83 buf[2] = 0x06; /* SPC-4 */ 84 85 /* 86 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2 87 * 88 * SPC4 says: 89 * A RESPONSE DATA FORMAT field set to 2h indicates that the 90 * standard INQUIRY data is in the format defined in this 91 * standard. Response data format values less than 2h are 92 * obsolete. Response data format values greater than 2h are 93 * reserved. 94 */ 95 buf[3] = 2; 96 97 /* 98 * Enable SCCS and TPGS fields for Emulated ALUA 99 */ 100 spc_fill_alua_data(lun, buf); 101 102 /* 103 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY 104 */ 105 if (dev->dev_attrib.emulate_3pc) 106 buf[5] |= 0x8; 107 /* 108 * Set Protection (PROTECT) bit when DIF has been enabled on the 109 * device, and the fabric supports VERIFY + PASS. Also report 110 * PROTECT=1 if sess_prot_type has been configured to allow T10-PI 111 * to unprotected devices. 112 */ 113 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 114 if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type) 115 buf[5] |= 0x1; 116 } 117 118 /* 119 * Set MULTIP bit to indicate presence of multiple SCSI target ports 120 */ 121 if (dev->export_count > 1) 122 buf[6] |= 0x10; 123 124 buf[7] = 0x2; /* CmdQue=1 */ 125 126 /* 127 * ASCII data fields described as being left-aligned shall have any 128 * unused bytes at the end of the field (i.e., highest offset) and the 129 * unused bytes shall be filled with ASCII space characters (20h). 130 */ 131 memset(&buf[8], 0x20, 132 INQUIRY_VENDOR_LEN + INQUIRY_MODEL_LEN + INQUIRY_REVISION_LEN); 133 memcpy(&buf[8], dev->t10_wwn.vendor, 134 strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN)); 135 memcpy(&buf[16], dev->t10_wwn.model, 136 strnlen(dev->t10_wwn.model, INQUIRY_MODEL_LEN)); 137 memcpy(&buf[32], dev->t10_wwn.revision, 138 strnlen(dev->t10_wwn.revision, INQUIRY_REVISION_LEN)); 139 140 /* 141 * Set the VERSION DESCRIPTOR fields 142 */ 143 put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SAM5, &buf[58]); 144 put_unaligned_be16(spc_find_scsi_transport_vd(tpg->proto_id), &buf[60]); 145 put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SPC4, &buf[62]); 146 if (cmd->se_dev->transport->get_device_type(dev) == TYPE_DISK) 147 put_unaligned_be16(SCSI_VERSION_DESCRIPTOR_SBC3, &buf[64]); 148 149 buf[4] = 91; /* Set additional length to 91 */ 150 151 return 0; 152 } 153 EXPORT_SYMBOL(spc_emulate_inquiry_std); 154 155 /* unit serial number */ 156 static sense_reason_t 157 spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) 158 { 159 struct se_device *dev = cmd->se_dev; 160 u16 len; 161 162 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 163 len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); 164 len++; /* Extra Byte for NULL Terminator */ 165 buf[3] = len; 166 } 167 return 0; 168 } 169 170 /* 171 * Generate NAA IEEE Registered Extended designator 172 */ 173 void spc_gen_naa_6h_vendor_specific(struct se_device *dev, 174 unsigned char *buf) 175 { 176 unsigned char *p = &dev->t10_wwn.unit_serial[0]; 177 u32 company_id = dev->t10_wwn.company_id; 178 int cnt, off = 0; 179 bool next = true; 180 181 /* 182 * Start NAA IEEE Registered Extended Identifier/Designator 183 */ 184 buf[off] = 0x6 << 4; 185 186 /* IEEE COMPANY_ID */ 187 buf[off++] |= (company_id >> 20) & 0xf; 188 buf[off++] = (company_id >> 12) & 0xff; 189 buf[off++] = (company_id >> 4) & 0xff; 190 buf[off] = (company_id & 0xf) << 4; 191 192 /* 193 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on 194 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field 195 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION 196 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL 197 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure 198 * per device uniqeness. 199 */ 200 for (cnt = off + 13; *p && off < cnt; p++) { 201 int val = hex_to_bin(*p); 202 203 if (val < 0) 204 continue; 205 206 if (next) { 207 next = false; 208 buf[off++] |= val; 209 } else { 210 next = true; 211 buf[off] = val << 4; 212 } 213 } 214 } 215 216 /* 217 * Device identification VPD, for a complete list of 218 * DESIGNATOR TYPEs see spc4r17 Table 459. 219 */ 220 sense_reason_t 221 spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) 222 { 223 struct se_device *dev = cmd->se_dev; 224 struct se_lun *lun = cmd->se_lun; 225 struct se_portal_group *tpg = NULL; 226 struct t10_alua_lu_gp_member *lu_gp_mem; 227 struct t10_alua_tg_pt_gp *tg_pt_gp; 228 unsigned char *prod = &dev->t10_wwn.model[0]; 229 u32 off = 0; 230 u16 len = 0, id_len; 231 232 off = 4; 233 234 /* 235 * NAA IEEE Registered Extended Assigned designator format, see 236 * spc4r17 section 7.7.3.6.5 237 * 238 * We depend upon a target_core_mod/ConfigFS provided 239 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial 240 * value in order to return the NAA id. 241 */ 242 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)) 243 goto check_t10_vend_desc; 244 245 /* CODE SET == Binary */ 246 buf[off++] = 0x1; 247 248 /* Set ASSOCIATION == addressed logical unit: 0)b */ 249 buf[off] = 0x00; 250 251 /* Identifier/Designator type == NAA identifier */ 252 buf[off++] |= 0x3; 253 off++; 254 255 /* Identifier/Designator length */ 256 buf[off++] = 0x10; 257 258 /* NAA IEEE Registered Extended designator */ 259 spc_gen_naa_6h_vendor_specific(dev, &buf[off]); 260 261 len = 20; 262 off = (len + 4); 263 264 check_t10_vend_desc: 265 /* 266 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4 267 */ 268 id_len = 8; /* For Vendor field */ 269 270 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) 271 id_len += sprintf(&buf[off+12], "%s:%s", prod, 272 &dev->t10_wwn.unit_serial[0]); 273 buf[off] = 0x2; /* ASCII */ 274 buf[off+1] = 0x1; /* T10 Vendor ID */ 275 buf[off+2] = 0x0; 276 /* left align Vendor ID and pad with spaces */ 277 memset(&buf[off+4], 0x20, INQUIRY_VENDOR_LEN); 278 memcpy(&buf[off+4], dev->t10_wwn.vendor, 279 strnlen(dev->t10_wwn.vendor, INQUIRY_VENDOR_LEN)); 280 /* Extra Byte for NULL Terminator */ 281 id_len++; 282 /* Identifier Length */ 283 buf[off+3] = id_len; 284 /* Header size for Designation descriptor */ 285 len += (id_len + 4); 286 off += (id_len + 4); 287 288 if (1) { 289 struct t10_alua_lu_gp *lu_gp; 290 u32 padding, scsi_name_len, scsi_target_len; 291 u16 lu_gp_id = 0; 292 u16 tg_pt_gp_id = 0; 293 u16 tpgt; 294 295 tpg = lun->lun_tpg; 296 /* 297 * Relative target port identifer, see spc4r17 298 * section 7.7.3.7 299 * 300 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 301 * section 7.5.1 Table 362 302 */ 303 buf[off] = tpg->proto_id << 4; 304 buf[off++] |= 0x1; /* CODE SET == Binary */ 305 buf[off] = 0x80; /* Set PIV=1 */ 306 /* Set ASSOCIATION == target port: 01b */ 307 buf[off] |= 0x10; 308 /* DESIGNATOR TYPE == Relative target port identifer */ 309 buf[off++] |= 0x4; 310 off++; /* Skip over Reserved */ 311 buf[off++] = 4; /* DESIGNATOR LENGTH */ 312 /* Skip over Obsolete field in RTPI payload 313 * in Table 472 */ 314 off += 2; 315 put_unaligned_be16(lun->lun_tpg->tpg_rtpi, &buf[off]); 316 off += 2; 317 len += 8; /* Header size + Designation descriptor */ 318 /* 319 * Target port group identifier, see spc4r17 320 * section 7.7.3.8 321 * 322 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 323 * section 7.5.1 Table 362 324 */ 325 rcu_read_lock(); 326 tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); 327 if (!tg_pt_gp) { 328 rcu_read_unlock(); 329 goto check_lu_gp; 330 } 331 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; 332 rcu_read_unlock(); 333 334 buf[off] = tpg->proto_id << 4; 335 buf[off++] |= 0x1; /* CODE SET == Binary */ 336 buf[off] = 0x80; /* Set PIV=1 */ 337 /* Set ASSOCIATION == target port: 01b */ 338 buf[off] |= 0x10; 339 /* DESIGNATOR TYPE == Target port group identifier */ 340 buf[off++] |= 0x5; 341 off++; /* Skip over Reserved */ 342 buf[off++] = 4; /* DESIGNATOR LENGTH */ 343 off += 2; /* Skip over Reserved Field */ 344 put_unaligned_be16(tg_pt_gp_id, &buf[off]); 345 off += 2; 346 len += 8; /* Header size + Designation descriptor */ 347 /* 348 * Logical Unit Group identifier, see spc4r17 349 * section 7.7.3.8 350 */ 351 check_lu_gp: 352 lu_gp_mem = dev->dev_alua_lu_gp_mem; 353 if (!lu_gp_mem) 354 goto check_scsi_name; 355 356 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 357 lu_gp = lu_gp_mem->lu_gp; 358 if (!lu_gp) { 359 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 360 goto check_scsi_name; 361 } 362 lu_gp_id = lu_gp->lu_gp_id; 363 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 364 365 buf[off++] |= 0x1; /* CODE SET == Binary */ 366 /* DESIGNATOR TYPE == Logical Unit Group identifier */ 367 buf[off++] |= 0x6; 368 off++; /* Skip over Reserved */ 369 buf[off++] = 4; /* DESIGNATOR LENGTH */ 370 off += 2; /* Skip over Reserved Field */ 371 put_unaligned_be16(lu_gp_id, &buf[off]); 372 off += 2; 373 len += 8; /* Header size + Designation descriptor */ 374 /* 375 * SCSI name string designator, see spc4r17 376 * section 7.7.3.11 377 * 378 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 379 * section 7.5.1 Table 362 380 */ 381 check_scsi_name: 382 buf[off] = tpg->proto_id << 4; 383 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 384 buf[off] = 0x80; /* Set PIV=1 */ 385 /* Set ASSOCIATION == target port: 01b */ 386 buf[off] |= 0x10; 387 /* DESIGNATOR TYPE == SCSI name string */ 388 buf[off++] |= 0x8; 389 off += 2; /* Skip over Reserved and length */ 390 /* 391 * SCSI name string identifer containing, $FABRIC_MOD 392 * dependent information. For LIO-Target and iSCSI 393 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in 394 * UTF-8 encoding. 395 */ 396 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); 397 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", 398 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); 399 scsi_name_len += 1 /* Include NULL terminator */; 400 /* 401 * The null-terminated, null-padded (see 4.4.2) SCSI 402 * NAME STRING field contains a UTF-8 format string. 403 * The number of bytes in the SCSI NAME STRING field 404 * (i.e., the value in the DESIGNATOR LENGTH field) 405 * shall be no larger than 256 and shall be a multiple 406 * of four. 407 */ 408 padding = ((-scsi_name_len) & 3); 409 if (padding) 410 scsi_name_len += padding; 411 if (scsi_name_len > 256) 412 scsi_name_len = 256; 413 414 buf[off-1] = scsi_name_len; 415 off += scsi_name_len; 416 /* Header size + Designation descriptor */ 417 len += (scsi_name_len + 4); 418 419 /* 420 * Target device designator 421 */ 422 buf[off] = tpg->proto_id << 4; 423 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 424 buf[off] = 0x80; /* Set PIV=1 */ 425 /* Set ASSOCIATION == target device: 10b */ 426 buf[off] |= 0x20; 427 /* DESIGNATOR TYPE == SCSI name string */ 428 buf[off++] |= 0x8; 429 off += 2; /* Skip over Reserved and length */ 430 /* 431 * SCSI name string identifer containing, $FABRIC_MOD 432 * dependent information. For LIO-Target and iSCSI 433 * Target Port, this means "<iSCSI name>" in 434 * UTF-8 encoding. 435 */ 436 scsi_target_len = sprintf(&buf[off], "%s", 437 tpg->se_tpg_tfo->tpg_get_wwn(tpg)); 438 scsi_target_len += 1 /* Include NULL terminator */; 439 /* 440 * The null-terminated, null-padded (see 4.4.2) SCSI 441 * NAME STRING field contains a UTF-8 format string. 442 * The number of bytes in the SCSI NAME STRING field 443 * (i.e., the value in the DESIGNATOR LENGTH field) 444 * shall be no larger than 256 and shall be a multiple 445 * of four. 446 */ 447 padding = ((-scsi_target_len) & 3); 448 if (padding) 449 scsi_target_len += padding; 450 if (scsi_target_len > 256) 451 scsi_target_len = 256; 452 453 buf[off-1] = scsi_target_len; 454 off += scsi_target_len; 455 456 /* Header size + Designation descriptor */ 457 len += (scsi_target_len + 4); 458 } 459 put_unaligned_be16(len, &buf[2]); /* Page Length for VPD 0x83 */ 460 return 0; 461 } 462 EXPORT_SYMBOL(spc_emulate_evpd_83); 463 464 /* Extended INQUIRY Data VPD Page */ 465 static sense_reason_t 466 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 467 { 468 struct se_device *dev = cmd->se_dev; 469 struct se_session *sess = cmd->se_sess; 470 471 buf[3] = 0x3c; 472 /* 473 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK 474 * only for TYPE3 protection. 475 */ 476 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 477 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT || 478 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) 479 buf[4] = 0x5; 480 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || 481 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) 482 buf[4] = 0x4; 483 } 484 485 /* logical unit supports type 1 and type 3 protection */ 486 if ((dev->transport->get_device_type(dev) == TYPE_DISK) && 487 (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) && 488 (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) { 489 buf[4] |= (0x3 << 3); 490 } 491 492 /* Set HEADSUP, ORDSUP, SIMPSUP */ 493 buf[5] = 0x07; 494 495 /* If WriteCache emulation is enabled, set V_SUP */ 496 if (target_check_wce(dev)) 497 buf[6] = 0x01; 498 /* If an LBA map is present set R_SUP */ 499 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); 500 if (!list_empty(&dev->t10_alua.lba_map_list)) 501 buf[8] = 0x10; 502 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock); 503 return 0; 504 } 505 506 /* Block Limits VPD page */ 507 static sense_reason_t 508 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 509 { 510 struct se_device *dev = cmd->se_dev; 511 u32 mtl = 0; 512 int have_tp = 0, opt, min; 513 u32 io_max_blocks; 514 515 /* 516 * Following spc3r22 section 6.5.3 Block Limits VPD page, when 517 * emulate_tpu=1 or emulate_tpws=1 we will be expect a 518 * different page length for Thin Provisioning. 519 */ 520 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 521 have_tp = 1; 522 523 buf[0] = dev->transport->get_device_type(dev); 524 buf[3] = have_tp ? 0x3c : 0x10; 525 526 /* Set WSNZ to 1 */ 527 buf[4] = 0x01; 528 /* 529 * Set MAXIMUM COMPARE AND WRITE LENGTH 530 */ 531 if (dev->dev_attrib.emulate_caw) 532 buf[5] = 0x01; 533 534 /* 535 * Set OPTIMAL TRANSFER LENGTH GRANULARITY 536 */ 537 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) 538 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); 539 else 540 put_unaligned_be16(1, &buf[6]); 541 542 /* 543 * Set MAXIMUM TRANSFER LENGTH 544 * 545 * XXX: Currently assumes single PAGE_SIZE per scatterlist for fabrics 546 * enforcing maximum HW scatter-gather-list entry limit 547 */ 548 if (cmd->se_tfo->max_data_sg_nents) { 549 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE) / 550 dev->dev_attrib.block_size; 551 } 552 io_max_blocks = mult_frac(dev->dev_attrib.hw_max_sectors, 553 dev->dev_attrib.hw_block_size, 554 dev->dev_attrib.block_size); 555 put_unaligned_be32(min_not_zero(mtl, io_max_blocks), &buf[8]); 556 557 /* 558 * Set OPTIMAL TRANSFER LENGTH 559 */ 560 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) 561 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); 562 else 563 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); 564 565 /* 566 * Exit now if we don't support TP. 567 */ 568 if (!have_tp) 569 goto max_write_same; 570 571 /* 572 * Set MAXIMUM UNMAP LBA COUNT 573 */ 574 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]); 575 576 /* 577 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT 578 */ 579 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count, 580 &buf[24]); 581 582 /* 583 * Set OPTIMAL UNMAP GRANULARITY 584 */ 585 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]); 586 587 /* 588 * UNMAP GRANULARITY ALIGNMENT 589 */ 590 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment, 591 &buf[32]); 592 if (dev->dev_attrib.unmap_granularity_alignment != 0) 593 buf[32] |= 0x80; /* Set the UGAVALID bit */ 594 595 /* 596 * MAXIMUM WRITE SAME LENGTH 597 */ 598 max_write_same: 599 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]); 600 601 return 0; 602 } 603 604 /* Block Device Characteristics VPD page */ 605 static sense_reason_t 606 spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) 607 { 608 struct se_device *dev = cmd->se_dev; 609 610 buf[0] = dev->transport->get_device_type(dev); 611 buf[3] = 0x3c; 612 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0; 613 614 return 0; 615 } 616 617 /* Thin Provisioning VPD */ 618 static sense_reason_t 619 spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) 620 { 621 struct se_device *dev = cmd->se_dev; 622 623 /* 624 * From spc3r22 section 6.5.4 Thin Provisioning VPD page: 625 * 626 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to 627 * zero, then the page length shall be set to 0004h. If the DP bit 628 * is set to one, then the page length shall be set to the value 629 * defined in table 162. 630 */ 631 buf[0] = dev->transport->get_device_type(dev); 632 633 /* 634 * Set Hardcoded length mentioned above for DP=0 635 */ 636 put_unaligned_be16(0x0004, &buf[2]); 637 638 /* 639 * The THRESHOLD EXPONENT field indicates the threshold set size in 640 * LBAs as a power of 2 (i.e., the threshold set size is equal to 641 * 2(threshold exponent)). 642 * 643 * Note that this is currently set to 0x00 as mkp says it will be 644 * changing again. We can enable this once it has settled in T10 645 * and is actually used by Linux/SCSI ML code. 646 */ 647 buf[4] = 0x00; 648 649 /* 650 * A TPU bit set to one indicates that the device server supports 651 * the UNMAP command (see 5.25). A TPU bit set to zero indicates 652 * that the device server does not support the UNMAP command. 653 */ 654 if (dev->dev_attrib.emulate_tpu != 0) 655 buf[5] = 0x80; 656 657 /* 658 * A TPWS bit set to one indicates that the device server supports 659 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs. 660 * A TPWS bit set to zero indicates that the device server does not 661 * support the use of the WRITE SAME (16) command to unmap LBAs. 662 */ 663 if (dev->dev_attrib.emulate_tpws != 0) 664 buf[5] |= 0x40 | 0x20; 665 666 /* 667 * The unmap_zeroes_data set means that the underlying device supports 668 * REQ_OP_DISCARD and has the discard_zeroes_data bit set. This 669 * satisfies the SBC requirements for LBPRZ, meaning that a subsequent 670 * read will return zeroes after an UNMAP or WRITE SAME (16) to an LBA 671 * See sbc4r36 6.6.4. 672 */ 673 if (((dev->dev_attrib.emulate_tpu != 0) || 674 (dev->dev_attrib.emulate_tpws != 0)) && 675 (dev->dev_attrib.unmap_zeroes_data != 0)) 676 buf[5] |= 0x04; 677 678 return 0; 679 } 680 681 /* Referrals VPD page */ 682 static sense_reason_t 683 spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) 684 { 685 struct se_device *dev = cmd->se_dev; 686 687 buf[0] = dev->transport->get_device_type(dev); 688 buf[3] = 0x0c; 689 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); 690 put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]); 691 692 return 0; 693 } 694 695 static sense_reason_t 696 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); 697 698 static struct { 699 uint8_t page; 700 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *); 701 } evpd_handlers[] = { 702 { .page = 0x00, .emulate = spc_emulate_evpd_00 }, 703 { .page = 0x80, .emulate = spc_emulate_evpd_80 }, 704 { .page = 0x83, .emulate = spc_emulate_evpd_83 }, 705 { .page = 0x86, .emulate = spc_emulate_evpd_86 }, 706 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, 707 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, 708 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, 709 { .page = 0xb3, .emulate = spc_emulate_evpd_b3 }, 710 }; 711 712 /* supported vital product data pages */ 713 static sense_reason_t 714 spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) 715 { 716 int p; 717 718 /* 719 * Only report the INQUIRY EVPD=1 pages after a valid NAA 720 * Registered Extended LUN WWN has been set via ConfigFS 721 * during device creation/restart. 722 */ 723 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 724 buf[3] = ARRAY_SIZE(evpd_handlers); 725 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) 726 buf[p + 4] = evpd_handlers[p].page; 727 } 728 729 return 0; 730 } 731 732 static sense_reason_t 733 spc_emulate_inquiry(struct se_cmd *cmd) 734 { 735 struct se_device *dev = cmd->se_dev; 736 unsigned char *rbuf; 737 unsigned char *cdb = cmd->t_task_cdb; 738 unsigned char *buf; 739 sense_reason_t ret; 740 int p; 741 int len = 0; 742 743 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); 744 if (!buf) { 745 pr_err("Unable to allocate response buffer for INQUIRY\n"); 746 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 747 } 748 749 buf[0] = dev->transport->get_device_type(dev); 750 751 if (!(cdb[1] & 0x1)) { 752 if (cdb[2]) { 753 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", 754 cdb[2]); 755 ret = TCM_INVALID_CDB_FIELD; 756 goto out; 757 } 758 759 ret = spc_emulate_inquiry_std(cmd, buf); 760 len = buf[4] + 5; 761 goto out; 762 } 763 764 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { 765 if (cdb[2] == evpd_handlers[p].page) { 766 buf[1] = cdb[2]; 767 ret = evpd_handlers[p].emulate(cmd, buf); 768 len = get_unaligned_be16(&buf[2]) + 4; 769 goto out; 770 } 771 } 772 773 pr_debug("Unknown VPD Code: 0x%02x\n", cdb[2]); 774 ret = TCM_INVALID_CDB_FIELD; 775 776 out: 777 rbuf = transport_kmap_data_sg(cmd); 778 if (rbuf) { 779 memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length)); 780 transport_kunmap_data_sg(cmd); 781 } 782 kfree(buf); 783 784 if (!ret) 785 target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, len); 786 return ret; 787 } 788 789 static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p) 790 { 791 p[0] = 0x01; 792 p[1] = 0x0a; 793 794 /* No changeable values for now */ 795 if (pc == 1) 796 goto out; 797 798 out: 799 return 12; 800 } 801 802 static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p) 803 { 804 struct se_device *dev = cmd->se_dev; 805 struct se_session *sess = cmd->se_sess; 806 807 p[0] = 0x0a; 808 p[1] = 0x0a; 809 810 /* No changeable values for now */ 811 if (pc == 1) 812 goto out; 813 814 /* GLTSD: No implicit save of log parameters */ 815 p[2] = (1 << 1); 816 if (target_sense_desc_format(dev)) 817 /* D_SENSE: Descriptor format sense data for 64bit sectors */ 818 p[2] |= (1 << 2); 819 820 /* 821 * From spc4r23, 7.4.7 Control mode page 822 * 823 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies 824 * restrictions on the algorithm used for reordering commands 825 * having the SIMPLE task attribute (see SAM-4). 826 * 827 * Table 368 -- QUEUE ALGORITHM MODIFIER field 828 * Code Description 829 * 0h Restricted reordering 830 * 1h Unrestricted reordering allowed 831 * 2h to 7h Reserved 832 * 8h to Fh Vendor specific 833 * 834 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that 835 * the device server shall order the processing sequence of commands 836 * having the SIMPLE task attribute such that data integrity is maintained 837 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol 838 * requests is halted at any time, the final value of all data observable 839 * on the medium shall be the same as if all the commands had been processed 840 * with the ORDERED task attribute). 841 * 842 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the 843 * device server may reorder the processing sequence of commands having the 844 * SIMPLE task attribute in any manner. Any data integrity exposures related to 845 * command sequence order shall be explicitly handled by the application client 846 * through the selection of appropriate ommands and task attributes. 847 */ 848 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; 849 /* 850 * From spc4r17, section 7.4.6 Control mode Page 851 * 852 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b 853 * 854 * 00b: The logical unit shall clear any unit attention condition 855 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 856 * status and shall not establish a unit attention condition when a com- 857 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT 858 * status. 859 * 860 * 10b: The logical unit shall not clear any unit attention condition 861 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 862 * status and shall not establish a unit attention condition when 863 * a command is completed with BUSY, TASK SET FULL, or RESERVATION 864 * CONFLICT status. 865 * 866 * 11b a The logical unit shall not clear any unit attention condition 867 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 868 * status and shall establish a unit attention condition for the 869 * initiator port associated with the I_T nexus on which the BUSY, 870 * TASK SET FULL, or RESERVATION CONFLICT status is being returned. 871 * Depending on the status, the additional sense code shall be set to 872 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS 873 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE 874 * command, a unit attention condition shall be established only once 875 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless 876 * to the number of commands completed with one of those status codes. 877 */ 878 switch (dev->dev_attrib.emulate_ua_intlck_ctrl) { 879 case TARGET_UA_INTLCK_CTRL_ESTABLISH_UA: 880 p[4] = 0x30; 881 break; 882 case TARGET_UA_INTLCK_CTRL_NO_CLEAR: 883 p[4] = 0x20; 884 break; 885 default: /* TARGET_UA_INTLCK_CTRL_CLEAR */ 886 p[4] = 0x00; 887 break; 888 } 889 /* 890 * From spc4r17, section 7.4.6 Control mode Page 891 * 892 * Task Aborted Status (TAS) bit set to zero. 893 * 894 * A task aborted status (TAS) bit set to zero specifies that aborted 895 * tasks shall be terminated by the device server without any response 896 * to the application client. A TAS bit set to one specifies that tasks 897 * aborted by the actions of an I_T nexus other than the I_T nexus on 898 * which the command was received shall be completed with TASK ABORTED 899 * status (see SAM-4). 900 */ 901 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; 902 /* 903 * From spc4r30, section 7.5.7 Control mode page 904 * 905 * Application Tag Owner (ATO) bit set to one. 906 * 907 * If the ATO bit is set to one the device server shall not modify the 908 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection 909 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE 910 * TAG field. 911 */ 912 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 913 if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type) 914 p[5] |= 0x80; 915 } 916 917 p[8] = 0xff; 918 p[9] = 0xff; 919 p[11] = 30; 920 921 out: 922 return 12; 923 } 924 925 static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) 926 { 927 struct se_device *dev = cmd->se_dev; 928 929 p[0] = 0x08; 930 p[1] = 0x12; 931 932 /* No changeable values for now */ 933 if (pc == 1) 934 goto out; 935 936 if (target_check_wce(dev)) 937 p[2] = 0x04; /* Write Cache Enable */ 938 p[12] = 0x20; /* Disabled Read Ahead */ 939 940 out: 941 return 20; 942 } 943 944 static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p) 945 { 946 p[0] = 0x1c; 947 p[1] = 0x0a; 948 949 /* No changeable values for now */ 950 if (pc == 1) 951 goto out; 952 953 out: 954 return 12; 955 } 956 957 static struct { 958 uint8_t page; 959 uint8_t subpage; 960 int (*emulate)(struct se_cmd *, u8, unsigned char *); 961 } modesense_handlers[] = { 962 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery }, 963 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, 964 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control }, 965 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions }, 966 }; 967 968 static void spc_modesense_write_protect(unsigned char *buf, int type) 969 { 970 /* 971 * I believe that the WP bit (bit 7) in the mode header is the same for 972 * all device types.. 973 */ 974 switch (type) { 975 case TYPE_DISK: 976 case TYPE_TAPE: 977 default: 978 buf[0] |= 0x80; /* WP bit */ 979 break; 980 } 981 } 982 983 static void spc_modesense_dpofua(unsigned char *buf, int type) 984 { 985 switch (type) { 986 case TYPE_DISK: 987 buf[0] |= 0x10; /* DPOFUA bit */ 988 break; 989 default: 990 break; 991 } 992 } 993 994 static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 995 { 996 *buf++ = 8; 997 put_unaligned_be32(min(blocks, 0xffffffffull), buf); 998 buf += 4; 999 put_unaligned_be32(block_size, buf); 1000 return 9; 1001 } 1002 1003 static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 1004 { 1005 if (blocks <= 0xffffffff) 1006 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3; 1007 1008 *buf++ = 1; /* LONGLBA */ 1009 buf += 2; 1010 *buf++ = 16; 1011 put_unaligned_be64(blocks, buf); 1012 buf += 12; 1013 put_unaligned_be32(block_size, buf); 1014 1015 return 17; 1016 } 1017 1018 static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) 1019 { 1020 struct se_device *dev = cmd->se_dev; 1021 char *cdb = cmd->t_task_cdb; 1022 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf; 1023 int type = dev->transport->get_device_type(dev); 1024 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); 1025 bool dbd = !!(cdb[1] & 0x08); 1026 bool llba = ten ? !!(cdb[1] & 0x10) : false; 1027 u8 pc = cdb[2] >> 6; 1028 u8 page = cdb[2] & 0x3f; 1029 u8 subpage = cdb[3]; 1030 int length = 0; 1031 int ret; 1032 int i; 1033 1034 memset(buf, 0, SE_MODE_PAGE_BUF); 1035 1036 /* 1037 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for 1038 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). 1039 */ 1040 length = ten ? 3 : 2; 1041 1042 /* DEVICE-SPECIFIC PARAMETER */ 1043 if (cmd->se_lun->lun_access_ro || target_lun_is_rdonly(cmd)) 1044 spc_modesense_write_protect(&buf[length], type); 1045 1046 /* 1047 * SBC only allows us to enable FUA and DPO together. Fortunately 1048 * DPO is explicitly specified as a hint, so a noop is a perfectly 1049 * valid implementation. 1050 */ 1051 if (target_check_fua(dev)) 1052 spc_modesense_dpofua(&buf[length], type); 1053 1054 ++length; 1055 1056 /* BLOCK DESCRIPTOR */ 1057 1058 /* 1059 * For now we only include a block descriptor for disk (SBC) 1060 * devices; other command sets use a slightly different format. 1061 */ 1062 if (!dbd && type == TYPE_DISK) { 1063 u64 blocks = dev->transport->get_blocks(dev); 1064 u32 block_size = dev->dev_attrib.block_size; 1065 1066 if (ten) { 1067 if (llba) { 1068 length += spc_modesense_long_blockdesc(&buf[length], 1069 blocks, block_size); 1070 } else { 1071 length += 3; 1072 length += spc_modesense_blockdesc(&buf[length], 1073 blocks, block_size); 1074 } 1075 } else { 1076 length += spc_modesense_blockdesc(&buf[length], blocks, 1077 block_size); 1078 } 1079 } else { 1080 if (ten) 1081 length += 4; 1082 else 1083 length += 1; 1084 } 1085 1086 if (page == 0x3f) { 1087 if (subpage != 0x00 && subpage != 0xff) { 1088 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); 1089 return TCM_INVALID_CDB_FIELD; 1090 } 1091 1092 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) { 1093 /* 1094 * Tricky way to say all subpage 00h for 1095 * subpage==0, all subpages for subpage==0xff 1096 * (and we just checked above that those are 1097 * the only two possibilities). 1098 */ 1099 if ((modesense_handlers[i].subpage & ~subpage) == 0) { 1100 ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1101 if (!ten && length + ret >= 255) 1102 break; 1103 length += ret; 1104 } 1105 } 1106 1107 goto set_length; 1108 } 1109 1110 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1111 if (modesense_handlers[i].page == page && 1112 modesense_handlers[i].subpage == subpage) { 1113 length += modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1114 goto set_length; 1115 } 1116 1117 /* 1118 * We don't intend to implement: 1119 * - obsolete page 03h "format parameters" (checked by Solaris) 1120 */ 1121 if (page != 0x03) 1122 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 1123 page, subpage); 1124 1125 return TCM_UNKNOWN_MODE_PAGE; 1126 1127 set_length: 1128 if (ten) 1129 put_unaligned_be16(length - 2, buf); 1130 else 1131 buf[0] = length - 1; 1132 1133 rbuf = transport_kmap_data_sg(cmd); 1134 if (rbuf) { 1135 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); 1136 transport_kunmap_data_sg(cmd); 1137 } 1138 1139 target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, length); 1140 return 0; 1141 } 1142 1143 static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) 1144 { 1145 char *cdb = cmd->t_task_cdb; 1146 bool ten = cdb[0] == MODE_SELECT_10; 1147 int off = ten ? 8 : 4; 1148 bool pf = !!(cdb[1] & 0x10); 1149 u8 page, subpage; 1150 unsigned char *buf; 1151 unsigned char tbuf[SE_MODE_PAGE_BUF]; 1152 int length; 1153 sense_reason_t ret = 0; 1154 int i; 1155 1156 if (!cmd->data_length) { 1157 target_complete_cmd(cmd, SAM_STAT_GOOD); 1158 return 0; 1159 } 1160 1161 if (cmd->data_length < off + 2) 1162 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1163 1164 buf = transport_kmap_data_sg(cmd); 1165 if (!buf) 1166 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1167 1168 if (!pf) { 1169 ret = TCM_INVALID_CDB_FIELD; 1170 goto out; 1171 } 1172 1173 page = buf[off] & 0x3f; 1174 subpage = buf[off] & 0x40 ? buf[off + 1] : 0; 1175 1176 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1177 if (modesense_handlers[i].page == page && 1178 modesense_handlers[i].subpage == subpage) { 1179 memset(tbuf, 0, SE_MODE_PAGE_BUF); 1180 length = modesense_handlers[i].emulate(cmd, 0, tbuf); 1181 goto check_contents; 1182 } 1183 1184 ret = TCM_UNKNOWN_MODE_PAGE; 1185 goto out; 1186 1187 check_contents: 1188 if (cmd->data_length < off + length) { 1189 ret = TCM_PARAMETER_LIST_LENGTH_ERROR; 1190 goto out; 1191 } 1192 1193 if (memcmp(buf + off, tbuf, length)) 1194 ret = TCM_INVALID_PARAMETER_LIST; 1195 1196 out: 1197 transport_kunmap_data_sg(cmd); 1198 1199 if (!ret) 1200 target_complete_cmd(cmd, SAM_STAT_GOOD); 1201 return ret; 1202 } 1203 1204 static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd) 1205 { 1206 unsigned char *cdb = cmd->t_task_cdb; 1207 unsigned char *rbuf; 1208 u8 ua_asc = 0, ua_ascq = 0; 1209 unsigned char buf[SE_SENSE_BUF]; 1210 bool desc_format = target_sense_desc_format(cmd->se_dev); 1211 1212 memset(buf, 0, SE_SENSE_BUF); 1213 1214 if (cdb[1] & 0x01) { 1215 pr_err("REQUEST_SENSE description emulation not" 1216 " supported\n"); 1217 return TCM_INVALID_CDB_FIELD; 1218 } 1219 1220 rbuf = transport_kmap_data_sg(cmd); 1221 if (!rbuf) 1222 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1223 1224 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) 1225 scsi_build_sense_buffer(desc_format, buf, UNIT_ATTENTION, 1226 ua_asc, ua_ascq); 1227 else 1228 scsi_build_sense_buffer(desc_format, buf, NO_SENSE, 0x0, 0x0); 1229 1230 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 1231 transport_kunmap_data_sg(cmd); 1232 1233 target_complete_cmd(cmd, SAM_STAT_GOOD); 1234 return 0; 1235 } 1236 1237 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) 1238 { 1239 struct se_dev_entry *deve; 1240 struct se_session *sess = cmd->se_sess; 1241 struct se_node_acl *nacl; 1242 struct scsi_lun slun; 1243 unsigned char *buf; 1244 u32 lun_count = 0, offset = 8; 1245 __be32 len; 1246 1247 buf = transport_kmap_data_sg(cmd); 1248 if (cmd->data_length && !buf) 1249 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1250 1251 /* 1252 * If no struct se_session pointer is present, this struct se_cmd is 1253 * coming via a target_core_mod PASSTHROUGH op, and not through 1254 * a $FABRIC_MOD. In that case, report LUN=0 only. 1255 */ 1256 if (!sess) 1257 goto done; 1258 1259 nacl = sess->se_node_acl; 1260 1261 rcu_read_lock(); 1262 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) { 1263 /* 1264 * We determine the correct LUN LIST LENGTH even once we 1265 * have reached the initial allocation length. 1266 * See SPC2-R20 7.19. 1267 */ 1268 lun_count++; 1269 if (offset >= cmd->data_length) 1270 continue; 1271 1272 int_to_scsilun(deve->mapped_lun, &slun); 1273 memcpy(buf + offset, &slun, 1274 min(8u, cmd->data_length - offset)); 1275 offset += 8; 1276 } 1277 rcu_read_unlock(); 1278 1279 /* 1280 * See SPC3 r07, page 159. 1281 */ 1282 done: 1283 /* 1284 * If no LUNs are accessible, report virtual LUN 0. 1285 */ 1286 if (lun_count == 0) { 1287 int_to_scsilun(0, &slun); 1288 if (cmd->data_length > 8) 1289 memcpy(buf + offset, &slun, 1290 min(8u, cmd->data_length - offset)); 1291 lun_count = 1; 1292 } 1293 1294 if (buf) { 1295 len = cpu_to_be32(lun_count * 8); 1296 memcpy(buf, &len, min_t(int, sizeof len, cmd->data_length)); 1297 transport_kunmap_data_sg(cmd); 1298 } 1299 1300 target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, 8 + lun_count * 8); 1301 return 0; 1302 } 1303 EXPORT_SYMBOL(spc_emulate_report_luns); 1304 1305 static sense_reason_t 1306 spc_emulate_testunitready(struct se_cmd *cmd) 1307 { 1308 target_complete_cmd(cmd, SAM_STAT_GOOD); 1309 return 0; 1310 } 1311 1312 static void set_dpofua_usage_bits(u8 *usage_bits, struct se_device *dev) 1313 { 1314 if (!target_check_fua(dev)) 1315 usage_bits[1] &= ~0x18; 1316 else 1317 usage_bits[1] |= 0x18; 1318 } 1319 1320 static void set_dpofua_usage_bits32(u8 *usage_bits, struct se_device *dev) 1321 { 1322 if (!target_check_fua(dev)) 1323 usage_bits[10] &= ~0x18; 1324 else 1325 usage_bits[10] |= 0x18; 1326 } 1327 1328 static struct target_opcode_descriptor tcm_opcode_read6 = { 1329 .support = SCSI_SUPPORT_FULL, 1330 .opcode = READ_6, 1331 .cdb_size = 6, 1332 .usage_bits = {READ_6, 0x1f, 0xff, 0xff, 1333 0xff, SCSI_CONTROL_MASK}, 1334 }; 1335 1336 static struct target_opcode_descriptor tcm_opcode_read10 = { 1337 .support = SCSI_SUPPORT_FULL, 1338 .opcode = READ_10, 1339 .cdb_size = 10, 1340 .usage_bits = {READ_10, 0xf8, 0xff, 0xff, 1341 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, 1342 0xff, SCSI_CONTROL_MASK}, 1343 .update_usage_bits = set_dpofua_usage_bits, 1344 }; 1345 1346 static struct target_opcode_descriptor tcm_opcode_read12 = { 1347 .support = SCSI_SUPPORT_FULL, 1348 .opcode = READ_12, 1349 .cdb_size = 12, 1350 .usage_bits = {READ_12, 0xf8, 0xff, 0xff, 1351 0xff, 0xff, 0xff, 0xff, 1352 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, 1353 .update_usage_bits = set_dpofua_usage_bits, 1354 }; 1355 1356 static struct target_opcode_descriptor tcm_opcode_read16 = { 1357 .support = SCSI_SUPPORT_FULL, 1358 .opcode = READ_16, 1359 .cdb_size = 16, 1360 .usage_bits = {READ_16, 0xf8, 0xff, 0xff, 1361 0xff, 0xff, 0xff, 0xff, 1362 0xff, 0xff, 0xff, 0xff, 1363 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, 1364 .update_usage_bits = set_dpofua_usage_bits, 1365 }; 1366 1367 static struct target_opcode_descriptor tcm_opcode_write6 = { 1368 .support = SCSI_SUPPORT_FULL, 1369 .opcode = WRITE_6, 1370 .cdb_size = 6, 1371 .usage_bits = {WRITE_6, 0x1f, 0xff, 0xff, 1372 0xff, SCSI_CONTROL_MASK}, 1373 }; 1374 1375 static struct target_opcode_descriptor tcm_opcode_write10 = { 1376 .support = SCSI_SUPPORT_FULL, 1377 .opcode = WRITE_10, 1378 .cdb_size = 10, 1379 .usage_bits = {WRITE_10, 0xf8, 0xff, 0xff, 1380 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, 1381 0xff, SCSI_CONTROL_MASK}, 1382 .update_usage_bits = set_dpofua_usage_bits, 1383 }; 1384 1385 static struct target_opcode_descriptor tcm_opcode_write_verify10 = { 1386 .support = SCSI_SUPPORT_FULL, 1387 .opcode = WRITE_VERIFY, 1388 .cdb_size = 10, 1389 .usage_bits = {WRITE_VERIFY, 0xf0, 0xff, 0xff, 1390 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, 1391 0xff, SCSI_CONTROL_MASK}, 1392 .update_usage_bits = set_dpofua_usage_bits, 1393 }; 1394 1395 static struct target_opcode_descriptor tcm_opcode_write12 = { 1396 .support = SCSI_SUPPORT_FULL, 1397 .opcode = WRITE_12, 1398 .cdb_size = 12, 1399 .usage_bits = {WRITE_12, 0xf8, 0xff, 0xff, 1400 0xff, 0xff, 0xff, 0xff, 1401 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, 1402 .update_usage_bits = set_dpofua_usage_bits, 1403 }; 1404 1405 static struct target_opcode_descriptor tcm_opcode_write16 = { 1406 .support = SCSI_SUPPORT_FULL, 1407 .opcode = WRITE_16, 1408 .cdb_size = 16, 1409 .usage_bits = {WRITE_16, 0xf8, 0xff, 0xff, 1410 0xff, 0xff, 0xff, 0xff, 1411 0xff, 0xff, 0xff, 0xff, 1412 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, 1413 .update_usage_bits = set_dpofua_usage_bits, 1414 }; 1415 1416 static struct target_opcode_descriptor tcm_opcode_write_verify16 = { 1417 .support = SCSI_SUPPORT_FULL, 1418 .opcode = WRITE_VERIFY_16, 1419 .cdb_size = 16, 1420 .usage_bits = {WRITE_VERIFY_16, 0xf0, 0xff, 0xff, 1421 0xff, 0xff, 0xff, 0xff, 1422 0xff, 0xff, 0xff, 0xff, 1423 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, 1424 .update_usage_bits = set_dpofua_usage_bits, 1425 }; 1426 1427 static bool tcm_is_ws_enabled(struct se_cmd *cmd) 1428 { 1429 struct sbc_ops *ops = cmd->protocol_data; 1430 struct se_device *dev = cmd->se_dev; 1431 1432 return (dev->dev_attrib.emulate_tpws && !!ops->execute_unmap) || 1433 !!ops->execute_write_same; 1434 } 1435 1436 static struct target_opcode_descriptor tcm_opcode_write_same32 = { 1437 .support = SCSI_SUPPORT_FULL, 1438 .serv_action_valid = 1, 1439 .opcode = VARIABLE_LENGTH_CMD, 1440 .service_action = WRITE_SAME_32, 1441 .cdb_size = 32, 1442 .usage_bits = {VARIABLE_LENGTH_CMD, SCSI_CONTROL_MASK, 0x00, 0x00, 1443 0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0x18, 1444 0x00, WRITE_SAME_32, 0xe8, 0x00, 1445 0xff, 0xff, 0xff, 0xff, 1446 0xff, 0xff, 0xff, 0xff, 1447 0x00, 0x00, 0x00, 0x00, 1448 0x00, 0x00, 0x00, 0x00, 1449 0xff, 0xff, 0xff, 0xff}, 1450 .enabled = tcm_is_ws_enabled, 1451 .update_usage_bits = set_dpofua_usage_bits32, 1452 }; 1453 1454 static bool tcm_is_caw_enabled(struct se_cmd *cmd) 1455 { 1456 struct se_device *dev = cmd->se_dev; 1457 1458 return dev->dev_attrib.emulate_caw; 1459 } 1460 1461 static struct target_opcode_descriptor tcm_opcode_compare_write = { 1462 .support = SCSI_SUPPORT_FULL, 1463 .opcode = COMPARE_AND_WRITE, 1464 .cdb_size = 16, 1465 .usage_bits = {COMPARE_AND_WRITE, 0x18, 0xff, 0xff, 1466 0xff, 0xff, 0xff, 0xff, 1467 0xff, 0xff, 0x00, 0x00, 1468 0x00, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, 1469 .enabled = tcm_is_caw_enabled, 1470 .update_usage_bits = set_dpofua_usage_bits, 1471 }; 1472 1473 static struct target_opcode_descriptor tcm_opcode_read_capacity = { 1474 .support = SCSI_SUPPORT_FULL, 1475 .opcode = READ_CAPACITY, 1476 .cdb_size = 10, 1477 .usage_bits = {READ_CAPACITY, 0x00, 0xff, 0xff, 1478 0xff, 0xff, 0x00, 0x00, 1479 0x01, SCSI_CONTROL_MASK}, 1480 }; 1481 1482 static struct target_opcode_descriptor tcm_opcode_read_capacity16 = { 1483 .support = SCSI_SUPPORT_FULL, 1484 .serv_action_valid = 1, 1485 .opcode = SERVICE_ACTION_IN_16, 1486 .service_action = SAI_READ_CAPACITY_16, 1487 .cdb_size = 16, 1488 .usage_bits = {SERVICE_ACTION_IN_16, SAI_READ_CAPACITY_16, 0x00, 0x00, 1489 0x00, 0x00, 0x00, 0x00, 1490 0x00, 0x00, 0xff, 0xff, 1491 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, 1492 }; 1493 1494 static bool tcm_is_rep_ref_enabled(struct se_cmd *cmd) 1495 { 1496 struct se_device *dev = cmd->se_dev; 1497 1498 spin_lock(&dev->t10_alua.lba_map_lock); 1499 if (list_empty(&dev->t10_alua.lba_map_list)) { 1500 spin_unlock(&dev->t10_alua.lba_map_lock); 1501 return false; 1502 } 1503 spin_unlock(&dev->t10_alua.lba_map_lock); 1504 return true; 1505 1506 } 1507 1508 static struct target_opcode_descriptor tcm_opcode_read_report_refferals = { 1509 .support = SCSI_SUPPORT_FULL, 1510 .serv_action_valid = 1, 1511 .opcode = SERVICE_ACTION_IN_16, 1512 .service_action = SAI_REPORT_REFERRALS, 1513 .cdb_size = 16, 1514 .usage_bits = {SERVICE_ACTION_IN_16, SAI_REPORT_REFERRALS, 0x00, 0x00, 1515 0x00, 0x00, 0x00, 0x00, 1516 0x00, 0x00, 0xff, 0xff, 1517 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, 1518 .enabled = tcm_is_rep_ref_enabled, 1519 }; 1520 1521 static struct target_opcode_descriptor tcm_opcode_sync_cache = { 1522 .support = SCSI_SUPPORT_FULL, 1523 .opcode = SYNCHRONIZE_CACHE, 1524 .cdb_size = 10, 1525 .usage_bits = {SYNCHRONIZE_CACHE, 0x02, 0xff, 0xff, 1526 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, 1527 0xff, SCSI_CONTROL_MASK}, 1528 }; 1529 1530 static struct target_opcode_descriptor tcm_opcode_sync_cache16 = { 1531 .support = SCSI_SUPPORT_FULL, 1532 .opcode = SYNCHRONIZE_CACHE_16, 1533 .cdb_size = 16, 1534 .usage_bits = {SYNCHRONIZE_CACHE_16, 0x02, 0xff, 0xff, 1535 0xff, 0xff, 0xff, 0xff, 1536 0xff, 0xff, 0xff, 0xff, 1537 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, 1538 }; 1539 1540 static bool tcm_is_unmap_enabled(struct se_cmd *cmd) 1541 { 1542 struct sbc_ops *ops = cmd->protocol_data; 1543 struct se_device *dev = cmd->se_dev; 1544 1545 return ops->execute_unmap && dev->dev_attrib.emulate_tpu; 1546 } 1547 1548 static struct target_opcode_descriptor tcm_opcode_unmap = { 1549 .support = SCSI_SUPPORT_FULL, 1550 .opcode = UNMAP, 1551 .cdb_size = 10, 1552 .usage_bits = {UNMAP, 0x00, 0x00, 0x00, 1553 0x00, 0x00, SCSI_GROUP_NUMBER_MASK, 0xff, 1554 0xff, SCSI_CONTROL_MASK}, 1555 .enabled = tcm_is_unmap_enabled, 1556 }; 1557 1558 static struct target_opcode_descriptor tcm_opcode_write_same = { 1559 .support = SCSI_SUPPORT_FULL, 1560 .opcode = WRITE_SAME, 1561 .cdb_size = 10, 1562 .usage_bits = {WRITE_SAME, 0xe8, 0xff, 0xff, 1563 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, 1564 0xff, SCSI_CONTROL_MASK}, 1565 .enabled = tcm_is_ws_enabled, 1566 }; 1567 1568 static struct target_opcode_descriptor tcm_opcode_write_same16 = { 1569 .support = SCSI_SUPPORT_FULL, 1570 .opcode = WRITE_SAME_16, 1571 .cdb_size = 16, 1572 .usage_bits = {WRITE_SAME_16, 0xe8, 0xff, 0xff, 1573 0xff, 0xff, 0xff, 0xff, 1574 0xff, 0xff, 0xff, 0xff, 1575 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, 1576 .enabled = tcm_is_ws_enabled, 1577 }; 1578 1579 static struct target_opcode_descriptor tcm_opcode_verify = { 1580 .support = SCSI_SUPPORT_FULL, 1581 .opcode = VERIFY, 1582 .cdb_size = 10, 1583 .usage_bits = {VERIFY, 0x00, 0xff, 0xff, 1584 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, 0xff, 1585 0xff, SCSI_CONTROL_MASK}, 1586 }; 1587 1588 static struct target_opcode_descriptor tcm_opcode_verify16 = { 1589 .support = SCSI_SUPPORT_FULL, 1590 .opcode = VERIFY_16, 1591 .cdb_size = 16, 1592 .usage_bits = {VERIFY_16, 0x00, 0xff, 0xff, 1593 0xff, 0xff, 0xff, 0xff, 1594 0xff, 0xff, 0xff, 0xff, 1595 0xff, 0xff, SCSI_GROUP_NUMBER_MASK, SCSI_CONTROL_MASK}, 1596 }; 1597 1598 static struct target_opcode_descriptor tcm_opcode_start_stop = { 1599 .support = SCSI_SUPPORT_FULL, 1600 .opcode = START_STOP, 1601 .cdb_size = 6, 1602 .usage_bits = {START_STOP, 0x01, 0x00, 0x00, 1603 0x01, SCSI_CONTROL_MASK}, 1604 }; 1605 1606 static struct target_opcode_descriptor tcm_opcode_mode_select = { 1607 .support = SCSI_SUPPORT_FULL, 1608 .opcode = MODE_SELECT, 1609 .cdb_size = 6, 1610 .usage_bits = {MODE_SELECT, 0x10, 0x00, 0x00, 1611 0xff, SCSI_CONTROL_MASK}, 1612 }; 1613 1614 static struct target_opcode_descriptor tcm_opcode_mode_select10 = { 1615 .support = SCSI_SUPPORT_FULL, 1616 .opcode = MODE_SELECT_10, 1617 .cdb_size = 10, 1618 .usage_bits = {MODE_SELECT_10, 0x10, 0x00, 0x00, 1619 0x00, 0x00, 0x00, 0xff, 1620 0xff, SCSI_CONTROL_MASK}, 1621 }; 1622 1623 static struct target_opcode_descriptor tcm_opcode_mode_sense = { 1624 .support = SCSI_SUPPORT_FULL, 1625 .opcode = MODE_SENSE, 1626 .cdb_size = 6, 1627 .usage_bits = {MODE_SENSE, 0x08, 0xff, 0xff, 1628 0xff, SCSI_CONTROL_MASK}, 1629 }; 1630 1631 static struct target_opcode_descriptor tcm_opcode_mode_sense10 = { 1632 .support = SCSI_SUPPORT_FULL, 1633 .opcode = MODE_SENSE_10, 1634 .cdb_size = 10, 1635 .usage_bits = {MODE_SENSE_10, 0x18, 0xff, 0xff, 1636 0x00, 0x00, 0x00, 0xff, 1637 0xff, SCSI_CONTROL_MASK}, 1638 }; 1639 1640 static struct target_opcode_descriptor tcm_opcode_pri_read_keys = { 1641 .support = SCSI_SUPPORT_FULL, 1642 .serv_action_valid = 1, 1643 .opcode = PERSISTENT_RESERVE_IN, 1644 .service_action = PRI_READ_KEYS, 1645 .cdb_size = 10, 1646 .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_KEYS, 0x00, 0x00, 1647 0x00, 0x00, 0x00, 0xff, 1648 0xff, SCSI_CONTROL_MASK}, 1649 }; 1650 1651 static struct target_opcode_descriptor tcm_opcode_pri_read_resrv = { 1652 .support = SCSI_SUPPORT_FULL, 1653 .serv_action_valid = 1, 1654 .opcode = PERSISTENT_RESERVE_IN, 1655 .service_action = PRI_READ_RESERVATION, 1656 .cdb_size = 10, 1657 .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_RESERVATION, 0x00, 0x00, 1658 0x00, 0x00, 0x00, 0xff, 1659 0xff, SCSI_CONTROL_MASK}, 1660 }; 1661 1662 static bool tcm_is_pr_enabled(struct se_cmd *cmd) 1663 { 1664 struct se_device *dev = cmd->se_dev; 1665 1666 return dev->dev_attrib.emulate_pr; 1667 } 1668 1669 static struct target_opcode_descriptor tcm_opcode_pri_read_caps = { 1670 .support = SCSI_SUPPORT_FULL, 1671 .serv_action_valid = 1, 1672 .opcode = PERSISTENT_RESERVE_IN, 1673 .service_action = PRI_REPORT_CAPABILITIES, 1674 .cdb_size = 10, 1675 .usage_bits = {PERSISTENT_RESERVE_IN, PRI_REPORT_CAPABILITIES, 0x00, 0x00, 1676 0x00, 0x00, 0x00, 0xff, 1677 0xff, SCSI_CONTROL_MASK}, 1678 .enabled = tcm_is_pr_enabled, 1679 }; 1680 1681 static struct target_opcode_descriptor tcm_opcode_pri_read_full_status = { 1682 .support = SCSI_SUPPORT_FULL, 1683 .serv_action_valid = 1, 1684 .opcode = PERSISTENT_RESERVE_IN, 1685 .service_action = PRI_READ_FULL_STATUS, 1686 .cdb_size = 10, 1687 .usage_bits = {PERSISTENT_RESERVE_IN, PRI_READ_FULL_STATUS, 0x00, 0x00, 1688 0x00, 0x00, 0x00, 0xff, 1689 0xff, SCSI_CONTROL_MASK}, 1690 .enabled = tcm_is_pr_enabled, 1691 }; 1692 1693 static struct target_opcode_descriptor tcm_opcode_pro_register = { 1694 .support = SCSI_SUPPORT_FULL, 1695 .serv_action_valid = 1, 1696 .opcode = PERSISTENT_RESERVE_OUT, 1697 .service_action = PRO_REGISTER, 1698 .cdb_size = 10, 1699 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER, 0xff, 0x00, 1700 0x00, 0xff, 0xff, 0xff, 1701 0xff, SCSI_CONTROL_MASK}, 1702 .enabled = tcm_is_pr_enabled, 1703 }; 1704 1705 static struct target_opcode_descriptor tcm_opcode_pro_reserve = { 1706 .support = SCSI_SUPPORT_FULL, 1707 .serv_action_valid = 1, 1708 .opcode = PERSISTENT_RESERVE_OUT, 1709 .service_action = PRO_RESERVE, 1710 .cdb_size = 10, 1711 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RESERVE, 0xff, 0x00, 1712 0x00, 0xff, 0xff, 0xff, 1713 0xff, SCSI_CONTROL_MASK}, 1714 .enabled = tcm_is_pr_enabled, 1715 }; 1716 1717 static struct target_opcode_descriptor tcm_opcode_pro_release = { 1718 .support = SCSI_SUPPORT_FULL, 1719 .serv_action_valid = 1, 1720 .opcode = PERSISTENT_RESERVE_OUT, 1721 .service_action = PRO_RELEASE, 1722 .cdb_size = 10, 1723 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_RELEASE, 0xff, 0x00, 1724 0x00, 0xff, 0xff, 0xff, 1725 0xff, SCSI_CONTROL_MASK}, 1726 .enabled = tcm_is_pr_enabled, 1727 }; 1728 1729 static struct target_opcode_descriptor tcm_opcode_pro_clear = { 1730 .support = SCSI_SUPPORT_FULL, 1731 .serv_action_valid = 1, 1732 .opcode = PERSISTENT_RESERVE_OUT, 1733 .service_action = PRO_CLEAR, 1734 .cdb_size = 10, 1735 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_CLEAR, 0xff, 0x00, 1736 0x00, 0xff, 0xff, 0xff, 1737 0xff, SCSI_CONTROL_MASK}, 1738 .enabled = tcm_is_pr_enabled, 1739 }; 1740 1741 static struct target_opcode_descriptor tcm_opcode_pro_preempt = { 1742 .support = SCSI_SUPPORT_FULL, 1743 .serv_action_valid = 1, 1744 .opcode = PERSISTENT_RESERVE_OUT, 1745 .service_action = PRO_PREEMPT, 1746 .cdb_size = 10, 1747 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT, 0xff, 0x00, 1748 0x00, 0xff, 0xff, 0xff, 1749 0xff, SCSI_CONTROL_MASK}, 1750 .enabled = tcm_is_pr_enabled, 1751 }; 1752 1753 static struct target_opcode_descriptor tcm_opcode_pro_preempt_abort = { 1754 .support = SCSI_SUPPORT_FULL, 1755 .serv_action_valid = 1, 1756 .opcode = PERSISTENT_RESERVE_OUT, 1757 .service_action = PRO_PREEMPT_AND_ABORT, 1758 .cdb_size = 10, 1759 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_PREEMPT_AND_ABORT, 0xff, 0x00, 1760 0x00, 0xff, 0xff, 0xff, 1761 0xff, SCSI_CONTROL_MASK}, 1762 .enabled = tcm_is_pr_enabled, 1763 }; 1764 1765 static struct target_opcode_descriptor tcm_opcode_pro_reg_ign_exist = { 1766 .support = SCSI_SUPPORT_FULL, 1767 .serv_action_valid = 1, 1768 .opcode = PERSISTENT_RESERVE_OUT, 1769 .service_action = PRO_REGISTER_AND_IGNORE_EXISTING_KEY, 1770 .cdb_size = 10, 1771 .usage_bits = { 1772 PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_IGNORE_EXISTING_KEY, 1773 0xff, 0x00, 1774 0x00, 0xff, 0xff, 0xff, 1775 0xff, SCSI_CONTROL_MASK}, 1776 .enabled = tcm_is_pr_enabled, 1777 }; 1778 1779 static struct target_opcode_descriptor tcm_opcode_pro_register_move = { 1780 .support = SCSI_SUPPORT_FULL, 1781 .serv_action_valid = 1, 1782 .opcode = PERSISTENT_RESERVE_OUT, 1783 .service_action = PRO_REGISTER_AND_MOVE, 1784 .cdb_size = 10, 1785 .usage_bits = {PERSISTENT_RESERVE_OUT, PRO_REGISTER_AND_MOVE, 0xff, 0x00, 1786 0x00, 0xff, 0xff, 0xff, 1787 0xff, SCSI_CONTROL_MASK}, 1788 .enabled = tcm_is_pr_enabled, 1789 }; 1790 1791 static bool tcm_is_scsi2_reservations_enabled(struct se_cmd *cmd) 1792 { 1793 struct se_device *dev = cmd->se_dev; 1794 1795 return dev->dev_attrib.emulate_pr; 1796 } 1797 1798 static struct target_opcode_descriptor tcm_opcode_release = { 1799 .support = SCSI_SUPPORT_FULL, 1800 .opcode = RELEASE, 1801 .cdb_size = 6, 1802 .usage_bits = {RELEASE, 0x00, 0x00, 0x00, 1803 0x00, SCSI_CONTROL_MASK}, 1804 .enabled = tcm_is_scsi2_reservations_enabled, 1805 }; 1806 1807 static struct target_opcode_descriptor tcm_opcode_release10 = { 1808 .support = SCSI_SUPPORT_FULL, 1809 .opcode = RELEASE_10, 1810 .cdb_size = 10, 1811 .usage_bits = {RELEASE_10, 0x00, 0x00, 0x00, 1812 0x00, 0x00, 0x00, 0xff, 1813 0xff, SCSI_CONTROL_MASK}, 1814 .enabled = tcm_is_scsi2_reservations_enabled, 1815 }; 1816 1817 static struct target_opcode_descriptor tcm_opcode_reserve = { 1818 .support = SCSI_SUPPORT_FULL, 1819 .opcode = RESERVE, 1820 .cdb_size = 6, 1821 .usage_bits = {RESERVE, 0x00, 0x00, 0x00, 1822 0x00, SCSI_CONTROL_MASK}, 1823 .enabled = tcm_is_scsi2_reservations_enabled, 1824 }; 1825 1826 static struct target_opcode_descriptor tcm_opcode_reserve10 = { 1827 .support = SCSI_SUPPORT_FULL, 1828 .opcode = RESERVE_10, 1829 .cdb_size = 10, 1830 .usage_bits = {RESERVE_10, 0x00, 0x00, 0x00, 1831 0x00, 0x00, 0x00, 0xff, 1832 0xff, SCSI_CONTROL_MASK}, 1833 .enabled = tcm_is_scsi2_reservations_enabled, 1834 }; 1835 1836 static struct target_opcode_descriptor tcm_opcode_request_sense = { 1837 .support = SCSI_SUPPORT_FULL, 1838 .opcode = REQUEST_SENSE, 1839 .cdb_size = 6, 1840 .usage_bits = {REQUEST_SENSE, 0x00, 0x00, 0x00, 1841 0xff, SCSI_CONTROL_MASK}, 1842 }; 1843 1844 static struct target_opcode_descriptor tcm_opcode_inquiry = { 1845 .support = SCSI_SUPPORT_FULL, 1846 .opcode = INQUIRY, 1847 .cdb_size = 6, 1848 .usage_bits = {INQUIRY, 0x01, 0xff, 0xff, 1849 0xff, SCSI_CONTROL_MASK}, 1850 }; 1851 1852 static bool tcm_is_3pc_enabled(struct se_cmd *cmd) 1853 { 1854 struct se_device *dev = cmd->se_dev; 1855 1856 return dev->dev_attrib.emulate_3pc; 1857 } 1858 1859 static struct target_opcode_descriptor tcm_opcode_extended_copy_lid1 = { 1860 .support = SCSI_SUPPORT_FULL, 1861 .serv_action_valid = 1, 1862 .opcode = EXTENDED_COPY, 1863 .cdb_size = 16, 1864 .usage_bits = {EXTENDED_COPY, 0x00, 0x00, 0x00, 1865 0x00, 0x00, 0x00, 0x00, 1866 0x00, 0x00, 0xff, 0xff, 1867 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, 1868 .enabled = tcm_is_3pc_enabled, 1869 }; 1870 1871 static struct target_opcode_descriptor tcm_opcode_rcv_copy_res_op_params = { 1872 .support = SCSI_SUPPORT_FULL, 1873 .serv_action_valid = 1, 1874 .opcode = RECEIVE_COPY_RESULTS, 1875 .service_action = RCR_SA_OPERATING_PARAMETERS, 1876 .cdb_size = 16, 1877 .usage_bits = {RECEIVE_COPY_RESULTS, RCR_SA_OPERATING_PARAMETERS, 1878 0x00, 0x00, 1879 0x00, 0x00, 0x00, 0x00, 1880 0x00, 0x00, 0xff, 0xff, 1881 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, 1882 .enabled = tcm_is_3pc_enabled, 1883 }; 1884 1885 static struct target_opcode_descriptor tcm_opcode_report_luns = { 1886 .support = SCSI_SUPPORT_FULL, 1887 .opcode = REPORT_LUNS, 1888 .cdb_size = 12, 1889 .usage_bits = {REPORT_LUNS, 0x00, 0xff, 0x00, 1890 0x00, 0x00, 0xff, 0xff, 1891 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, 1892 }; 1893 1894 static struct target_opcode_descriptor tcm_opcode_test_unit_ready = { 1895 .support = SCSI_SUPPORT_FULL, 1896 .opcode = TEST_UNIT_READY, 1897 .cdb_size = 6, 1898 .usage_bits = {TEST_UNIT_READY, 0x00, 0x00, 0x00, 1899 0x00, SCSI_CONTROL_MASK}, 1900 }; 1901 1902 static struct target_opcode_descriptor tcm_opcode_report_target_pgs = { 1903 .support = SCSI_SUPPORT_FULL, 1904 .serv_action_valid = 1, 1905 .opcode = MAINTENANCE_IN, 1906 .service_action = MI_REPORT_TARGET_PGS, 1907 .cdb_size = 12, 1908 .usage_bits = {MAINTENANCE_IN, 0xE0 | MI_REPORT_TARGET_PGS, 0x00, 0x00, 1909 0x00, 0x00, 0xff, 0xff, 1910 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, 1911 }; 1912 1913 1914 static bool spc_rsoc_enabled(struct se_cmd *cmd) 1915 { 1916 struct se_device *dev = cmd->se_dev; 1917 1918 return dev->dev_attrib.emulate_rsoc; 1919 } 1920 1921 static struct target_opcode_descriptor tcm_opcode_report_supp_opcodes = { 1922 .support = SCSI_SUPPORT_FULL, 1923 .serv_action_valid = 1, 1924 .opcode = MAINTENANCE_IN, 1925 .service_action = MI_REPORT_SUPPORTED_OPERATION_CODES, 1926 .cdb_size = 12, 1927 .usage_bits = {MAINTENANCE_IN, MI_REPORT_SUPPORTED_OPERATION_CODES, 1928 0x87, 0xff, 1929 0xff, 0xff, 0xff, 0xff, 1930 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, 1931 .enabled = spc_rsoc_enabled, 1932 }; 1933 1934 static bool tcm_is_set_tpg_enabled(struct se_cmd *cmd) 1935 { 1936 struct t10_alua_tg_pt_gp *l_tg_pt_gp; 1937 struct se_lun *l_lun = cmd->se_lun; 1938 1939 rcu_read_lock(); 1940 l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp); 1941 if (!l_tg_pt_gp) { 1942 rcu_read_unlock(); 1943 return false; 1944 } 1945 if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { 1946 rcu_read_unlock(); 1947 return false; 1948 } 1949 rcu_read_unlock(); 1950 1951 return true; 1952 } 1953 1954 static struct target_opcode_descriptor tcm_opcode_set_tpg = { 1955 .support = SCSI_SUPPORT_FULL, 1956 .serv_action_valid = 1, 1957 .opcode = MAINTENANCE_OUT, 1958 .service_action = MO_SET_TARGET_PGS, 1959 .cdb_size = 12, 1960 .usage_bits = {MAINTENANCE_OUT, MO_SET_TARGET_PGS, 0x00, 0x00, 1961 0x00, 0x00, 0xff, 0xff, 1962 0xff, 0xff, 0x00, SCSI_CONTROL_MASK}, 1963 .enabled = tcm_is_set_tpg_enabled, 1964 }; 1965 1966 static struct target_opcode_descriptor *tcm_supported_opcodes[] = { 1967 &tcm_opcode_read6, 1968 &tcm_opcode_read10, 1969 &tcm_opcode_read12, 1970 &tcm_opcode_read16, 1971 &tcm_opcode_write6, 1972 &tcm_opcode_write10, 1973 &tcm_opcode_write_verify10, 1974 &tcm_opcode_write12, 1975 &tcm_opcode_write16, 1976 &tcm_opcode_write_verify16, 1977 &tcm_opcode_write_same32, 1978 &tcm_opcode_compare_write, 1979 &tcm_opcode_read_capacity, 1980 &tcm_opcode_read_capacity16, 1981 &tcm_opcode_read_report_refferals, 1982 &tcm_opcode_sync_cache, 1983 &tcm_opcode_sync_cache16, 1984 &tcm_opcode_unmap, 1985 &tcm_opcode_write_same, 1986 &tcm_opcode_write_same16, 1987 &tcm_opcode_verify, 1988 &tcm_opcode_verify16, 1989 &tcm_opcode_start_stop, 1990 &tcm_opcode_mode_select, 1991 &tcm_opcode_mode_select10, 1992 &tcm_opcode_mode_sense, 1993 &tcm_opcode_mode_sense10, 1994 &tcm_opcode_pri_read_keys, 1995 &tcm_opcode_pri_read_resrv, 1996 &tcm_opcode_pri_read_caps, 1997 &tcm_opcode_pri_read_full_status, 1998 &tcm_opcode_pro_register, 1999 &tcm_opcode_pro_reserve, 2000 &tcm_opcode_pro_release, 2001 &tcm_opcode_pro_clear, 2002 &tcm_opcode_pro_preempt, 2003 &tcm_opcode_pro_preempt_abort, 2004 &tcm_opcode_pro_reg_ign_exist, 2005 &tcm_opcode_pro_register_move, 2006 &tcm_opcode_release, 2007 &tcm_opcode_release10, 2008 &tcm_opcode_reserve, 2009 &tcm_opcode_reserve10, 2010 &tcm_opcode_request_sense, 2011 &tcm_opcode_inquiry, 2012 &tcm_opcode_extended_copy_lid1, 2013 &tcm_opcode_rcv_copy_res_op_params, 2014 &tcm_opcode_report_luns, 2015 &tcm_opcode_test_unit_ready, 2016 &tcm_opcode_report_target_pgs, 2017 &tcm_opcode_report_supp_opcodes, 2018 &tcm_opcode_set_tpg, 2019 }; 2020 2021 static int 2022 spc_rsoc_encode_command_timeouts_descriptor(unsigned char *buf, u8 ctdp, 2023 struct target_opcode_descriptor *descr) 2024 { 2025 if (!ctdp) 2026 return 0; 2027 2028 put_unaligned_be16(0xa, buf); 2029 buf[3] = descr->specific_timeout; 2030 put_unaligned_be32(descr->nominal_timeout, &buf[4]); 2031 put_unaligned_be32(descr->recommended_timeout, &buf[8]); 2032 2033 return 12; 2034 } 2035 2036 static int 2037 spc_rsoc_encode_command_descriptor(unsigned char *buf, u8 ctdp, 2038 struct target_opcode_descriptor *descr) 2039 { 2040 int td_size = 0; 2041 2042 buf[0] = descr->opcode; 2043 2044 put_unaligned_be16(descr->service_action, &buf[2]); 2045 2046 buf[5] = (ctdp << 1) | descr->serv_action_valid; 2047 put_unaligned_be16(descr->cdb_size, &buf[6]); 2048 2049 td_size = spc_rsoc_encode_command_timeouts_descriptor(&buf[8], ctdp, 2050 descr); 2051 2052 return 8 + td_size; 2053 } 2054 2055 static int 2056 spc_rsoc_encode_one_command_descriptor(unsigned char *buf, u8 ctdp, 2057 struct target_opcode_descriptor *descr, 2058 struct se_device *dev) 2059 { 2060 int td_size = 0; 2061 2062 if (!descr) { 2063 buf[1] = (ctdp << 7) | SCSI_SUPPORT_NOT_SUPPORTED; 2064 return 2; 2065 } 2066 2067 buf[1] = (ctdp << 7) | SCSI_SUPPORT_FULL; 2068 put_unaligned_be16(descr->cdb_size, &buf[2]); 2069 memcpy(&buf[4], descr->usage_bits, descr->cdb_size); 2070 if (descr->update_usage_bits) 2071 descr->update_usage_bits(&buf[4], dev); 2072 2073 td_size = spc_rsoc_encode_command_timeouts_descriptor( 2074 &buf[4 + descr->cdb_size], ctdp, descr); 2075 2076 return 4 + descr->cdb_size + td_size; 2077 } 2078 2079 static sense_reason_t 2080 spc_rsoc_get_descr(struct se_cmd *cmd, struct target_opcode_descriptor **opcode) 2081 { 2082 struct target_opcode_descriptor *descr; 2083 struct se_session *sess = cmd->se_sess; 2084 unsigned char *cdb = cmd->t_task_cdb; 2085 u8 opts = cdb[2] & 0x3; 2086 u8 requested_opcode; 2087 u16 requested_sa; 2088 int i; 2089 2090 requested_opcode = cdb[3]; 2091 requested_sa = ((u16)cdb[4]) << 8 | cdb[5]; 2092 *opcode = NULL; 2093 2094 if (opts > 3) { 2095 pr_debug("TARGET_CORE[%s]: Invalid REPORT SUPPORTED OPERATION CODES" 2096 " with unsupported REPORTING OPTIONS %#x for 0x%08llx from %s\n", 2097 cmd->se_tfo->fabric_name, opts, 2098 cmd->se_lun->unpacked_lun, 2099 sess->se_node_acl->initiatorname); 2100 return TCM_INVALID_CDB_FIELD; 2101 } 2102 2103 for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) { 2104 descr = tcm_supported_opcodes[i]; 2105 if (descr->opcode != requested_opcode) 2106 continue; 2107 2108 switch (opts) { 2109 case 0x1: 2110 /* 2111 * If the REQUESTED OPERATION CODE field specifies an 2112 * operation code for which the device server implements 2113 * service actions, then the device server shall 2114 * terminate the command with CHECK CONDITION status, 2115 * with the sense key set to ILLEGAL REQUEST, and the 2116 * additional sense code set to INVALID FIELD IN CDB 2117 */ 2118 if (descr->serv_action_valid) 2119 return TCM_INVALID_CDB_FIELD; 2120 2121 if (!descr->enabled || descr->enabled(cmd)) 2122 *opcode = descr; 2123 break; 2124 case 0x2: 2125 /* 2126 * If the REQUESTED OPERATION CODE field specifies an 2127 * operation code for which the device server does not 2128 * implement service actions, then the device server 2129 * shall terminate the command with CHECK CONDITION 2130 * status, with the sense key set to ILLEGAL REQUEST, 2131 * and the additional sense code set to INVALID FIELD IN CDB. 2132 */ 2133 if (descr->serv_action_valid && 2134 descr->service_action == requested_sa) { 2135 if (!descr->enabled || descr->enabled(cmd)) 2136 *opcode = descr; 2137 } else if (!descr->serv_action_valid) 2138 return TCM_INVALID_CDB_FIELD; 2139 break; 2140 case 0x3: 2141 /* 2142 * The command support data for the operation code and 2143 * service action a specified in the REQUESTED OPERATION 2144 * CODE field and REQUESTED SERVICE ACTION field shall 2145 * be returned in the one_command parameter data format. 2146 */ 2147 if (descr->service_action == requested_sa) 2148 if (!descr->enabled || descr->enabled(cmd)) 2149 *opcode = descr; 2150 break; 2151 } 2152 } 2153 2154 return 0; 2155 } 2156 2157 static sense_reason_t 2158 spc_emulate_report_supp_op_codes(struct se_cmd *cmd) 2159 { 2160 int descr_num = ARRAY_SIZE(tcm_supported_opcodes); 2161 struct target_opcode_descriptor *descr = NULL; 2162 unsigned char *cdb = cmd->t_task_cdb; 2163 u8 rctd = (cdb[2] >> 7) & 0x1; 2164 unsigned char *buf = NULL; 2165 int response_length = 0; 2166 u8 opts = cdb[2] & 0x3; 2167 unsigned char *rbuf; 2168 sense_reason_t ret = 0; 2169 int i; 2170 2171 if (!cmd->se_dev->dev_attrib.emulate_rsoc) 2172 return TCM_UNSUPPORTED_SCSI_OPCODE; 2173 2174 rbuf = transport_kmap_data_sg(cmd); 2175 if (cmd->data_length && !rbuf) { 2176 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2177 goto out; 2178 } 2179 2180 if (opts == 0) 2181 response_length = 4 + (8 + rctd * 12) * descr_num; 2182 else { 2183 ret = spc_rsoc_get_descr(cmd, &descr); 2184 if (ret) 2185 goto out; 2186 2187 if (descr) 2188 response_length = 4 + descr->cdb_size + rctd * 12; 2189 else 2190 response_length = 2; 2191 } 2192 2193 buf = kzalloc(response_length, GFP_KERNEL); 2194 if (!buf) { 2195 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 2196 goto out; 2197 } 2198 response_length = 0; 2199 2200 if (opts == 0) { 2201 response_length += 4; 2202 2203 for (i = 0; i < ARRAY_SIZE(tcm_supported_opcodes); i++) { 2204 descr = tcm_supported_opcodes[i]; 2205 if (descr->enabled && !descr->enabled(cmd)) 2206 continue; 2207 2208 response_length += spc_rsoc_encode_command_descriptor( 2209 &buf[response_length], rctd, descr); 2210 } 2211 put_unaligned_be32(response_length - 3, buf); 2212 } else { 2213 response_length = spc_rsoc_encode_one_command_descriptor( 2214 &buf[response_length], rctd, descr, 2215 cmd->se_dev); 2216 } 2217 2218 memcpy(rbuf, buf, min_t(u32, response_length, cmd->data_length)); 2219 out: 2220 kfree(buf); 2221 transport_kunmap_data_sg(cmd); 2222 2223 if (!ret) 2224 target_complete_cmd_with_length(cmd, SAM_STAT_GOOD, response_length); 2225 return ret; 2226 } 2227 2228 sense_reason_t 2229 spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) 2230 { 2231 struct se_device *dev = cmd->se_dev; 2232 unsigned char *cdb = cmd->t_task_cdb; 2233 2234 if (!dev->dev_attrib.emulate_pr && 2235 ((cdb[0] == PERSISTENT_RESERVE_IN) || 2236 (cdb[0] == PERSISTENT_RESERVE_OUT) || 2237 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) || 2238 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) { 2239 return TCM_UNSUPPORTED_SCSI_OPCODE; 2240 } 2241 2242 switch (cdb[0]) { 2243 case MODE_SELECT: 2244 *size = cdb[4]; 2245 cmd->execute_cmd = spc_emulate_modeselect; 2246 break; 2247 case MODE_SELECT_10: 2248 *size = get_unaligned_be16(&cdb[7]); 2249 cmd->execute_cmd = spc_emulate_modeselect; 2250 break; 2251 case MODE_SENSE: 2252 *size = cdb[4]; 2253 cmd->execute_cmd = spc_emulate_modesense; 2254 break; 2255 case MODE_SENSE_10: 2256 *size = get_unaligned_be16(&cdb[7]); 2257 cmd->execute_cmd = spc_emulate_modesense; 2258 break; 2259 case LOG_SELECT: 2260 case LOG_SENSE: 2261 *size = get_unaligned_be16(&cdb[7]); 2262 break; 2263 case PERSISTENT_RESERVE_IN: 2264 *size = get_unaligned_be16(&cdb[7]); 2265 cmd->execute_cmd = target_scsi3_emulate_pr_in; 2266 break; 2267 case PERSISTENT_RESERVE_OUT: 2268 *size = get_unaligned_be32(&cdb[5]); 2269 cmd->execute_cmd = target_scsi3_emulate_pr_out; 2270 break; 2271 case RELEASE: 2272 case RELEASE_10: 2273 if (cdb[0] == RELEASE_10) 2274 *size = get_unaligned_be16(&cdb[7]); 2275 else 2276 *size = cmd->data_length; 2277 2278 cmd->execute_cmd = target_scsi2_reservation_release; 2279 break; 2280 case RESERVE: 2281 case RESERVE_10: 2282 /* 2283 * The SPC-2 RESERVE does not contain a size in the SCSI CDB. 2284 * Assume the passthrough or $FABRIC_MOD will tell us about it. 2285 */ 2286 if (cdb[0] == RESERVE_10) 2287 *size = get_unaligned_be16(&cdb[7]); 2288 else 2289 *size = cmd->data_length; 2290 2291 cmd->execute_cmd = target_scsi2_reservation_reserve; 2292 break; 2293 case REQUEST_SENSE: 2294 *size = cdb[4]; 2295 cmd->execute_cmd = spc_emulate_request_sense; 2296 break; 2297 case INQUIRY: 2298 *size = get_unaligned_be16(&cdb[3]); 2299 2300 /* 2301 * Do implicit HEAD_OF_QUEUE processing for INQUIRY. 2302 * See spc4r17 section 5.3 2303 */ 2304 cmd->sam_task_attr = TCM_HEAD_TAG; 2305 cmd->execute_cmd = spc_emulate_inquiry; 2306 break; 2307 case SECURITY_PROTOCOL_IN: 2308 case SECURITY_PROTOCOL_OUT: 2309 *size = get_unaligned_be32(&cdb[6]); 2310 break; 2311 case EXTENDED_COPY: 2312 *size = get_unaligned_be32(&cdb[10]); 2313 cmd->execute_cmd = target_do_xcopy; 2314 break; 2315 case RECEIVE_COPY_RESULTS: 2316 *size = get_unaligned_be32(&cdb[10]); 2317 cmd->execute_cmd = target_do_receive_copy_results; 2318 break; 2319 case READ_ATTRIBUTE: 2320 case WRITE_ATTRIBUTE: 2321 *size = get_unaligned_be32(&cdb[10]); 2322 break; 2323 case RECEIVE_DIAGNOSTIC: 2324 case SEND_DIAGNOSTIC: 2325 *size = get_unaligned_be16(&cdb[3]); 2326 break; 2327 case WRITE_BUFFER: 2328 *size = get_unaligned_be24(&cdb[6]); 2329 break; 2330 case REPORT_LUNS: 2331 cmd->execute_cmd = spc_emulate_report_luns; 2332 *size = get_unaligned_be32(&cdb[6]); 2333 /* 2334 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS 2335 * See spc4r17 section 5.3 2336 */ 2337 cmd->sam_task_attr = TCM_HEAD_TAG; 2338 break; 2339 case TEST_UNIT_READY: 2340 cmd->execute_cmd = spc_emulate_testunitready; 2341 *size = 0; 2342 break; 2343 case MAINTENANCE_IN: 2344 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 2345 /* 2346 * MAINTENANCE_IN from SCC-2 2347 * Check for emulated MI_REPORT_TARGET_PGS 2348 */ 2349 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) { 2350 cmd->execute_cmd = 2351 target_emulate_report_target_port_groups; 2352 } 2353 if ((cdb[1] & 0x1f) == 2354 MI_REPORT_SUPPORTED_OPERATION_CODES) 2355 cmd->execute_cmd = 2356 spc_emulate_report_supp_op_codes; 2357 *size = get_unaligned_be32(&cdb[6]); 2358 } else { 2359 /* 2360 * GPCMD_SEND_KEY from multi media commands 2361 */ 2362 *size = get_unaligned_be16(&cdb[8]); 2363 } 2364 break; 2365 case MAINTENANCE_OUT: 2366 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 2367 /* 2368 * MAINTENANCE_OUT from SCC-2 2369 * Check for emulated MO_SET_TARGET_PGS. 2370 */ 2371 if (cdb[1] == MO_SET_TARGET_PGS) { 2372 cmd->execute_cmd = 2373 target_emulate_set_target_port_groups; 2374 } 2375 *size = get_unaligned_be32(&cdb[6]); 2376 } else { 2377 /* 2378 * GPCMD_SEND_KEY from multi media commands 2379 */ 2380 *size = get_unaligned_be16(&cdb[8]); 2381 } 2382 break; 2383 default: 2384 return TCM_UNSUPPORTED_SCSI_OPCODE; 2385 } 2386 2387 return 0; 2388 } 2389 EXPORT_SYMBOL(spc_parse_cdb); 2390