1 /******************************************************************************* 2 * Filename: target_core_xcopy.c 3 * 4 * This file contains support for SPC-4 Extended-Copy offload with generic 5 * TCM backends. 6 * 7 * Copyright (c) 2011-2013 Datera, Inc. All rights reserved. 8 * 9 * Author: 10 * Nicholas A. Bellinger <nab@daterainc.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 ******************************************************************************/ 23 24 #include <linux/slab.h> 25 #include <linux/spinlock.h> 26 #include <linux/list.h> 27 #include <linux/configfs.h> 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_cmnd.h> 30 #include <asm/unaligned.h> 31 32 #include <target/target_core_base.h> 33 #include <target/target_core_backend.h> 34 #include <target/target_core_fabric.h> 35 #include <target/target_core_configfs.h> 36 37 #include "target_core_pr.h" 38 #include "target_core_ua.h" 39 #include "target_core_xcopy.h" 40 41 static struct workqueue_struct *xcopy_wq = NULL; 42 /* 43 * From target_core_spc.c 44 */ 45 extern void spc_parse_naa_6h_vendor_specific(struct se_device *, unsigned char *); 46 /* 47 * From target_core_device.c 48 */ 49 extern struct mutex g_device_mutex; 50 extern struct list_head g_device_list; 51 /* 52 * From target_core_configfs.c 53 */ 54 extern struct configfs_subsystem *target_core_subsystem[]; 55 56 static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) 57 { 58 int off = 0; 59 60 buf[off++] = (0x6 << 4); 61 buf[off++] = 0x01; 62 buf[off++] = 0x40; 63 buf[off] = (0x5 << 4); 64 65 spc_parse_naa_6h_vendor_specific(dev, &buf[off]); 66 return 0; 67 } 68 69 static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, 70 bool src) 71 { 72 struct se_device *se_dev; 73 struct configfs_subsystem *subsys = target_core_subsystem[0]; 74 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; 75 int rc; 76 77 if (src == true) 78 dev_wwn = &xop->dst_tid_wwn[0]; 79 else 80 dev_wwn = &xop->src_tid_wwn[0]; 81 82 mutex_lock(&g_device_mutex); 83 list_for_each_entry(se_dev, &g_device_list, g_dev_node) { 84 85 if (!se_dev->dev_attrib.emulate_3pc) 86 continue; 87 88 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 89 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); 90 91 rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); 92 if (rc != 0) 93 continue; 94 95 if (src == true) { 96 xop->dst_dev = se_dev; 97 pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" 98 " se_dev\n", xop->dst_dev); 99 } else { 100 xop->src_dev = se_dev; 101 pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located" 102 " se_dev\n", xop->src_dev); 103 } 104 105 rc = configfs_depend_item(subsys, 106 &se_dev->dev_group.cg_item); 107 if (rc != 0) { 108 pr_err("configfs_depend_item attempt failed:" 109 " %d for se_dev: %p\n", rc, se_dev); 110 mutex_unlock(&g_device_mutex); 111 return rc; 112 } 113 114 pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p" 115 " se_dev->se_dev_group: %p\n", subsys, se_dev, 116 &se_dev->dev_group); 117 118 mutex_unlock(&g_device_mutex); 119 return 0; 120 } 121 mutex_unlock(&g_device_mutex); 122 123 pr_err("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); 124 return -EINVAL; 125 } 126 127 static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, 128 unsigned char *p, bool src) 129 { 130 unsigned char *desc = p; 131 unsigned short ript; 132 u8 desig_len; 133 /* 134 * Extract RELATIVE INITIATOR PORT IDENTIFIER 135 */ 136 ript = get_unaligned_be16(&desc[2]); 137 pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript); 138 /* 139 * Check for supported code set, association, and designator type 140 */ 141 if ((desc[4] & 0x0f) != 0x1) { 142 pr_err("XCOPY 0xe4: code set of non binary type not supported\n"); 143 return -EINVAL; 144 } 145 if ((desc[5] & 0x30) != 0x00) { 146 pr_err("XCOPY 0xe4: association other than LUN not supported\n"); 147 return -EINVAL; 148 } 149 if ((desc[5] & 0x0f) != 0x3) { 150 pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n", 151 (desc[5] & 0x0f)); 152 return -EINVAL; 153 } 154 /* 155 * Check for matching 16 byte length for NAA IEEE Registered Extended 156 * Assigned designator 157 */ 158 desig_len = desc[7]; 159 if (desig_len != 16) { 160 pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len); 161 return -EINVAL; 162 } 163 pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len); 164 /* 165 * Check for NAA IEEE Registered Extended Assigned header.. 166 */ 167 if ((desc[8] & 0xf0) != 0x60) { 168 pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n", 169 (desc[8] & 0xf0)); 170 return -EINVAL; 171 } 172 173 if (src == true) { 174 memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); 175 /* 176 * Determine if the source designator matches the local device 177 */ 178 if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0], 179 XCOPY_NAA_IEEE_REGEX_LEN)) { 180 xop->op_origin = XCOL_SOURCE_RECV_OP; 181 xop->src_dev = se_cmd->se_dev; 182 pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source" 183 " received xop\n", xop->src_dev); 184 } 185 } else { 186 memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); 187 /* 188 * Determine if the destination designator matches the local device 189 */ 190 if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0], 191 XCOPY_NAA_IEEE_REGEX_LEN)) { 192 xop->op_origin = XCOL_DEST_RECV_OP; 193 xop->dst_dev = se_cmd->se_dev; 194 pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination" 195 " received xop\n", xop->dst_dev); 196 } 197 } 198 199 return 0; 200 } 201 202 static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, 203 struct xcopy_op *xop, unsigned char *p, 204 unsigned short tdll) 205 { 206 struct se_device *local_dev = se_cmd->se_dev; 207 unsigned char *desc = p; 208 int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0; 209 unsigned short start = 0; 210 bool src = true; 211 212 if (offset != 0) { 213 pr_err("XCOPY target descriptor list length is not" 214 " multiple of %d\n", XCOPY_TARGET_DESC_LEN); 215 return -EINVAL; 216 } 217 if (tdll > 64) { 218 pr_err("XCOPY target descriptor supports a maximum" 219 " two src/dest descriptors, tdll: %hu too large..\n", tdll); 220 return -EINVAL; 221 } 222 /* 223 * Generate an IEEE Registered Extended designator based upon the 224 * se_device the XCOPY was received upon.. 225 */ 226 memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 227 target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]); 228 229 while (start < tdll) { 230 /* 231 * Check target descriptor identification with 0xE4 type with 232 * use VPD 0x83 WWPN matching .. 233 */ 234 switch (desc[0]) { 235 case 0xe4: 236 rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, 237 &desc[0], src); 238 if (rc != 0) 239 goto out; 240 /* 241 * Assume target descriptors are in source -> destination order.. 242 */ 243 if (src == true) 244 src = false; 245 else 246 src = true; 247 start += XCOPY_TARGET_DESC_LEN; 248 desc += XCOPY_TARGET_DESC_LEN; 249 ret++; 250 break; 251 default: 252 pr_err("XCOPY unsupported descriptor type code:" 253 " 0x%02x\n", desc[0]); 254 goto out; 255 } 256 } 257 258 if (xop->op_origin == XCOL_SOURCE_RECV_OP) 259 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); 260 else 261 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); 262 263 if (rc < 0) 264 goto out; 265 266 pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n", 267 xop->src_dev, &xop->src_tid_wwn[0]); 268 pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", 269 xop->dst_dev, &xop->dst_tid_wwn[0]); 270 271 return ret; 272 273 out: 274 return -EINVAL; 275 } 276 277 static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop, 278 unsigned char *p) 279 { 280 unsigned char *desc = p; 281 int dc = (desc[1] & 0x02); 282 unsigned short desc_len; 283 284 desc_len = get_unaligned_be16(&desc[2]); 285 if (desc_len != 0x18) { 286 pr_err("XCOPY segment desc 0x02: Illegal desc_len:" 287 " %hu\n", desc_len); 288 return -EINVAL; 289 } 290 291 xop->stdi = get_unaligned_be16(&desc[4]); 292 xop->dtdi = get_unaligned_be16(&desc[6]); 293 pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n", 294 desc_len, xop->stdi, xop->dtdi, dc); 295 296 xop->nolb = get_unaligned_be16(&desc[10]); 297 xop->src_lba = get_unaligned_be64(&desc[12]); 298 xop->dst_lba = get_unaligned_be64(&desc[20]); 299 pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n", 300 xop->nolb, (unsigned long long)xop->src_lba, 301 (unsigned long long)xop->dst_lba); 302 303 if (dc != 0) { 304 xop->dbl = (desc[29] & 0xff) << 16; 305 xop->dbl |= (desc[30] & 0xff) << 8; 306 xop->dbl |= desc[31] & 0xff; 307 308 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); 309 } 310 return 0; 311 } 312 313 static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, 314 struct xcopy_op *xop, unsigned char *p, 315 unsigned int sdll) 316 { 317 unsigned char *desc = p; 318 unsigned int start = 0; 319 int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0; 320 321 if (offset != 0) { 322 pr_err("XCOPY segment descriptor list length is not" 323 " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN); 324 return -EINVAL; 325 } 326 327 while (start < sdll) { 328 /* 329 * Check segment descriptor type code for block -> block 330 */ 331 switch (desc[0]) { 332 case 0x02: 333 rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc); 334 if (rc < 0) 335 goto out; 336 337 ret++; 338 start += XCOPY_SEGMENT_DESC_LEN; 339 desc += XCOPY_SEGMENT_DESC_LEN; 340 break; 341 default: 342 pr_err("XCOPY unspported segment descriptor" 343 "type: 0x%02x\n", desc[0]); 344 goto out; 345 } 346 } 347 348 return ret; 349 350 out: 351 return -EINVAL; 352 } 353 354 /* 355 * Start xcopy_pt ops 356 */ 357 358 struct xcopy_pt_cmd { 359 bool remote_port; 360 struct se_cmd se_cmd; 361 struct xcopy_op *xcopy_op; 362 struct completion xpt_passthrough_sem; 363 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; 364 }; 365 366 static struct se_port xcopy_pt_port; 367 static struct se_portal_group xcopy_pt_tpg; 368 static struct se_session xcopy_pt_sess; 369 static struct se_node_acl xcopy_pt_nacl; 370 371 static char *xcopy_pt_get_fabric_name(void) 372 { 373 return "xcopy-pt"; 374 } 375 376 static u32 xcopy_pt_get_tag(struct se_cmd *se_cmd) 377 { 378 return 0; 379 } 380 381 static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) 382 { 383 return 0; 384 } 385 386 static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) 387 { 388 struct configfs_subsystem *subsys = target_core_subsystem[0]; 389 struct se_device *remote_dev; 390 391 if (xop->op_origin == XCOL_SOURCE_RECV_OP) 392 remote_dev = xop->dst_dev; 393 else 394 remote_dev = xop->src_dev; 395 396 pr_debug("Calling configfs_undepend_item for subsys: %p" 397 " remote_dev: %p remote_dev->dev_group: %p\n", 398 subsys, remote_dev, &remote_dev->dev_group.cg_item); 399 400 configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item); 401 } 402 403 static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) 404 { 405 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, 406 struct xcopy_pt_cmd, se_cmd); 407 408 kfree(xpt_cmd); 409 } 410 411 static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd) 412 { 413 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, 414 struct xcopy_pt_cmd, se_cmd); 415 416 complete(&xpt_cmd->xpt_passthrough_sem); 417 return 0; 418 } 419 420 static int xcopy_pt_write_pending(struct se_cmd *se_cmd) 421 { 422 return 0; 423 } 424 425 static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd) 426 { 427 return 0; 428 } 429 430 static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd) 431 { 432 return 0; 433 } 434 435 static int xcopy_pt_queue_status(struct se_cmd *se_cmd) 436 { 437 return 0; 438 } 439 440 static struct target_core_fabric_ops xcopy_pt_tfo = { 441 .get_fabric_name = xcopy_pt_get_fabric_name, 442 .get_task_tag = xcopy_pt_get_tag, 443 .get_cmd_state = xcopy_pt_get_cmd_state, 444 .release_cmd = xcopy_pt_release_cmd, 445 .check_stop_free = xcopy_pt_check_stop_free, 446 .write_pending = xcopy_pt_write_pending, 447 .write_pending_status = xcopy_pt_write_pending_status, 448 .queue_data_in = xcopy_pt_queue_data_in, 449 .queue_status = xcopy_pt_queue_status, 450 }; 451 452 /* 453 * End xcopy_pt_ops 454 */ 455 456 int target_xcopy_setup_pt(void) 457 { 458 xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0); 459 if (!xcopy_wq) { 460 pr_err("Unable to allocate xcopy_wq\n"); 461 return -ENOMEM; 462 } 463 464 memset(&xcopy_pt_port, 0, sizeof(struct se_port)); 465 INIT_LIST_HEAD(&xcopy_pt_port.sep_alua_list); 466 INIT_LIST_HEAD(&xcopy_pt_port.sep_list); 467 mutex_init(&xcopy_pt_port.sep_tg_pt_md_mutex); 468 469 memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group)); 470 INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node); 471 INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list); 472 INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list); 473 474 xcopy_pt_port.sep_tpg = &xcopy_pt_tpg; 475 xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo; 476 477 memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl)); 478 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list); 479 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list); 480 memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); 481 INIT_LIST_HEAD(&xcopy_pt_sess.sess_list); 482 INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list); 483 484 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; 485 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; 486 487 xcopy_pt_sess.se_tpg = &xcopy_pt_tpg; 488 xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl; 489 490 return 0; 491 } 492 493 void target_xcopy_release_pt(void) 494 { 495 if (xcopy_wq) 496 destroy_workqueue(xcopy_wq); 497 } 498 499 static void target_xcopy_setup_pt_port( 500 struct xcopy_pt_cmd *xpt_cmd, 501 struct xcopy_op *xop, 502 bool remote_port) 503 { 504 struct se_cmd *ec_cmd = xop->xop_se_cmd; 505 struct se_cmd *pt_cmd = &xpt_cmd->se_cmd; 506 507 if (xop->op_origin == XCOL_SOURCE_RECV_OP) { 508 /* 509 * Honor destination port reservations for X-COPY PUSH emulation 510 * when CDB is received on local source port, and READs blocks to 511 * WRITE on remote destination port. 512 */ 513 if (remote_port) { 514 xpt_cmd->remote_port = remote_port; 515 pt_cmd->se_lun->lun_sep = &xcopy_pt_port; 516 pr_debug("Setup emulated remote DEST xcopy_pt_port: %p to" 517 " cmd->se_lun->lun_sep for X-COPY data PUSH\n", 518 pt_cmd->se_lun->lun_sep); 519 } else { 520 pt_cmd->se_lun = ec_cmd->se_lun; 521 pt_cmd->se_dev = ec_cmd->se_dev; 522 523 pr_debug("Honoring local SRC port from ec_cmd->se_dev:" 524 " %p\n", pt_cmd->se_dev); 525 pt_cmd->se_lun = ec_cmd->se_lun; 526 pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n", 527 pt_cmd->se_lun); 528 } 529 } else { 530 /* 531 * Honor source port reservation for X-COPY PULL emulation 532 * when CDB is received on local desintation port, and READs 533 * blocks from the remote source port to WRITE on local 534 * destination port. 535 */ 536 if (remote_port) { 537 xpt_cmd->remote_port = remote_port; 538 pt_cmd->se_lun->lun_sep = &xcopy_pt_port; 539 pr_debug("Setup emulated remote SRC xcopy_pt_port: %p to" 540 " cmd->se_lun->lun_sep for X-COPY data PULL\n", 541 pt_cmd->se_lun->lun_sep); 542 } else { 543 pt_cmd->se_lun = ec_cmd->se_lun; 544 pt_cmd->se_dev = ec_cmd->se_dev; 545 546 pr_debug("Honoring local DST port from ec_cmd->se_dev:" 547 " %p\n", pt_cmd->se_dev); 548 pt_cmd->se_lun = ec_cmd->se_lun; 549 pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n", 550 pt_cmd->se_lun); 551 } 552 } 553 } 554 555 static int target_xcopy_init_pt_lun( 556 struct xcopy_pt_cmd *xpt_cmd, 557 struct xcopy_op *xop, 558 struct se_device *se_dev, 559 struct se_cmd *pt_cmd, 560 bool remote_port) 561 { 562 /* 563 * Don't allocate + init an pt_cmd->se_lun if honoring local port for 564 * reservations. The pt_cmd->se_lun pointer will be setup from within 565 * target_xcopy_setup_pt_port() 566 */ 567 if (remote_port == false) { 568 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; 569 return 0; 570 } 571 572 pt_cmd->se_lun = &se_dev->xcopy_lun; 573 pt_cmd->se_dev = se_dev; 574 575 pr_debug("Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev); 576 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD | SCF_CMD_XCOPY_PASSTHROUGH; 577 578 pr_debug("Setup emulated se_dev: %p to pt_cmd->se_lun->lun_se_dev\n", 579 pt_cmd->se_lun->lun_se_dev); 580 581 return 0; 582 } 583 584 static int target_xcopy_setup_pt_cmd( 585 struct xcopy_pt_cmd *xpt_cmd, 586 struct xcopy_op *xop, 587 struct se_device *se_dev, 588 unsigned char *cdb, 589 bool remote_port, 590 bool alloc_mem) 591 { 592 struct se_cmd *cmd = &xpt_cmd->se_cmd; 593 sense_reason_t sense_rc; 594 int ret = 0, rc; 595 /* 596 * Setup LUN+port to honor reservations based upon xop->op_origin for 597 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received. 598 */ 599 rc = target_xcopy_init_pt_lun(xpt_cmd, xop, se_dev, cmd, remote_port); 600 if (rc < 0) { 601 ret = rc; 602 goto out; 603 } 604 xpt_cmd->xcopy_op = xop; 605 target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port); 606 607 sense_rc = target_setup_cmd_from_cdb(cmd, cdb); 608 if (sense_rc) { 609 ret = -EINVAL; 610 goto out; 611 } 612 613 if (alloc_mem) { 614 rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 615 cmd->data_length, false); 616 if (rc < 0) { 617 ret = rc; 618 goto out; 619 } 620 /* 621 * Set this bit so that transport_free_pages() allows the 622 * caller to release SGLs + physical memory allocated by 623 * transport_generic_get_mem().. 624 */ 625 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 626 } else { 627 /* 628 * Here the previously allocated SGLs for the internal READ 629 * are mapped zero-copy to the internal WRITE. 630 */ 631 sense_rc = transport_generic_map_mem_to_cmd(cmd, 632 xop->xop_data_sg, xop->xop_data_nents, 633 NULL, 0); 634 if (sense_rc) { 635 ret = -EINVAL; 636 goto out; 637 } 638 639 pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:" 640 " %u\n", cmd->t_data_sg, cmd->t_data_nents); 641 } 642 643 return 0; 644 645 out: 646 return ret; 647 } 648 649 static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd) 650 { 651 struct se_cmd *se_cmd = &xpt_cmd->se_cmd; 652 sense_reason_t sense_rc; 653 654 sense_rc = transport_generic_new_cmd(se_cmd); 655 if (sense_rc) 656 return -EINVAL; 657 658 if (se_cmd->data_direction == DMA_TO_DEVICE) 659 target_execute_cmd(se_cmd); 660 661 wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem); 662 663 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", 664 se_cmd->scsi_status); 665 666 return (se_cmd->scsi_status) ? -EINVAL : 0; 667 } 668 669 static int target_xcopy_read_source( 670 struct se_cmd *ec_cmd, 671 struct xcopy_op *xop, 672 struct se_device *src_dev, 673 sector_t src_lba, 674 u32 src_sectors) 675 { 676 struct xcopy_pt_cmd *xpt_cmd; 677 struct se_cmd *se_cmd; 678 u32 length = (src_sectors * src_dev->dev_attrib.block_size); 679 int rc; 680 unsigned char cdb[16]; 681 bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP); 682 683 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL); 684 if (!xpt_cmd) { 685 pr_err("Unable to allocate xcopy_pt_cmd\n"); 686 return -ENOMEM; 687 } 688 init_completion(&xpt_cmd->xpt_passthrough_sem); 689 se_cmd = &xpt_cmd->se_cmd; 690 691 memset(&cdb[0], 0, 16); 692 cdb[0] = READ_16; 693 put_unaligned_be64(src_lba, &cdb[2]); 694 put_unaligned_be32(src_sectors, &cdb[10]); 695 pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n", 696 (unsigned long long)src_lba, src_sectors, length); 697 698 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 699 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]); 700 xop->src_pt_cmd = xpt_cmd; 701 702 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], 703 remote_port, true); 704 if (rc < 0) { 705 transport_generic_free_cmd(se_cmd, 0); 706 return rc; 707 } 708 709 xop->xop_data_sg = se_cmd->t_data_sg; 710 xop->xop_data_nents = se_cmd->t_data_nents; 711 pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ" 712 " memory\n", xop->xop_data_sg, xop->xop_data_nents); 713 714 rc = target_xcopy_issue_pt_cmd(xpt_cmd); 715 if (rc < 0) { 716 transport_generic_free_cmd(se_cmd, 0); 717 return rc; 718 } 719 /* 720 * Clear off the allocated t_data_sg, that has been saved for 721 * zero-copy WRITE submission reuse in struct xcopy_op.. 722 */ 723 se_cmd->t_data_sg = NULL; 724 se_cmd->t_data_nents = 0; 725 726 return 0; 727 } 728 729 static int target_xcopy_write_destination( 730 struct se_cmd *ec_cmd, 731 struct xcopy_op *xop, 732 struct se_device *dst_dev, 733 sector_t dst_lba, 734 u32 dst_sectors) 735 { 736 struct xcopy_pt_cmd *xpt_cmd; 737 struct se_cmd *se_cmd; 738 u32 length = (dst_sectors * dst_dev->dev_attrib.block_size); 739 int rc; 740 unsigned char cdb[16]; 741 bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP); 742 743 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL); 744 if (!xpt_cmd) { 745 pr_err("Unable to allocate xcopy_pt_cmd\n"); 746 return -ENOMEM; 747 } 748 init_completion(&xpt_cmd->xpt_passthrough_sem); 749 se_cmd = &xpt_cmd->se_cmd; 750 751 memset(&cdb[0], 0, 16); 752 cdb[0] = WRITE_16; 753 put_unaligned_be64(dst_lba, &cdb[2]); 754 put_unaligned_be32(dst_sectors, &cdb[10]); 755 pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n", 756 (unsigned long long)dst_lba, dst_sectors, length); 757 758 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length, 759 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]); 760 xop->dst_pt_cmd = xpt_cmd; 761 762 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], 763 remote_port, false); 764 if (rc < 0) { 765 struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd; 766 /* 767 * If the failure happened before the t_mem_list hand-off in 768 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that 769 * core releases this memory on error during X-COPY WRITE I/O. 770 */ 771 src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 772 src_cmd->t_data_sg = xop->xop_data_sg; 773 src_cmd->t_data_nents = xop->xop_data_nents; 774 775 transport_generic_free_cmd(se_cmd, 0); 776 return rc; 777 } 778 779 rc = target_xcopy_issue_pt_cmd(xpt_cmd); 780 if (rc < 0) { 781 se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 782 transport_generic_free_cmd(se_cmd, 0); 783 return rc; 784 } 785 786 return 0; 787 } 788 789 static void target_xcopy_do_work(struct work_struct *work) 790 { 791 struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work); 792 struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev; 793 struct se_cmd *ec_cmd = xop->xop_se_cmd; 794 sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba; 795 unsigned int max_sectors; 796 int rc; 797 unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0; 798 799 end_lba = src_lba + nolb; 800 /* 801 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the 802 * smallest max_sectors between src_dev + dev_dev, or 803 */ 804 max_sectors = min(src_dev->dev_attrib.hw_max_sectors, 805 dst_dev->dev_attrib.hw_max_sectors); 806 max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS); 807 808 max_nolb = min_t(u16, max_sectors, ((u16)(~0U))); 809 810 pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n", 811 nolb, max_nolb, (unsigned long long)end_lba); 812 pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n", 813 (unsigned long long)src_lba, (unsigned long long)dst_lba); 814 815 while (src_lba < end_lba) { 816 cur_nolb = min(nolb, max_nolb); 817 818 pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu," 819 " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb); 820 821 rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb); 822 if (rc < 0) 823 goto out; 824 825 src_lba += cur_nolb; 826 pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n", 827 (unsigned long long)src_lba); 828 829 pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu," 830 " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb); 831 832 rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev, 833 dst_lba, cur_nolb); 834 if (rc < 0) { 835 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); 836 goto out; 837 } 838 839 dst_lba += cur_nolb; 840 pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n", 841 (unsigned long long)dst_lba); 842 843 copied_nolb += cur_nolb; 844 nolb -= cur_nolb; 845 846 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); 847 xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 848 849 transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0); 850 } 851 852 xcopy_pt_undepend_remotedev(xop); 853 kfree(xop); 854 855 pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n", 856 (unsigned long long)src_lba, (unsigned long long)dst_lba); 857 pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n", 858 copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size); 859 860 pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n"); 861 target_complete_cmd(ec_cmd, SAM_STAT_GOOD); 862 return; 863 864 out: 865 xcopy_pt_undepend_remotedev(xop); 866 kfree(xop); 867 868 pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n"); 869 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 870 target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); 871 } 872 873 sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) 874 { 875 struct se_device *dev = se_cmd->se_dev; 876 struct xcopy_op *xop = NULL; 877 unsigned char *p = NULL, *seg_desc; 878 unsigned int list_id, list_id_usage, sdll, inline_dl, sa; 879 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST; 880 int rc; 881 unsigned short tdll; 882 883 if (!dev->dev_attrib.emulate_3pc) { 884 pr_err("EXTENDED_COPY operation explicitly disabled\n"); 885 return TCM_UNSUPPORTED_SCSI_OPCODE; 886 } 887 888 sa = se_cmd->t_task_cdb[1] & 0x1f; 889 if (sa != 0x00) { 890 pr_err("EXTENDED_COPY(LID4) not supported\n"); 891 return TCM_UNSUPPORTED_SCSI_OPCODE; 892 } 893 894 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); 895 if (!xop) { 896 pr_err("Unable to allocate xcopy_op\n"); 897 return TCM_OUT_OF_RESOURCES; 898 } 899 xop->xop_se_cmd = se_cmd; 900 901 p = transport_kmap_data_sg(se_cmd); 902 if (!p) { 903 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); 904 kfree(xop); 905 return TCM_OUT_OF_RESOURCES; 906 } 907 908 list_id = p[0]; 909 list_id_usage = (p[1] & 0x18) >> 3; 910 911 /* 912 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH 913 */ 914 tdll = get_unaligned_be16(&p[2]); 915 sdll = get_unaligned_be32(&p[8]); 916 917 inline_dl = get_unaligned_be32(&p[12]); 918 if (inline_dl != 0) { 919 pr_err("XCOPY with non zero inline data length\n"); 920 goto out; 921 } 922 923 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" 924 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, 925 tdll, sdll, inline_dl); 926 927 rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll); 928 if (rc <= 0) 929 goto out; 930 931 if (xop->src_dev->dev_attrib.block_size != 932 xop->dst_dev->dev_attrib.block_size) { 933 pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev" 934 " block_size: %u currently unsupported\n", 935 xop->src_dev->dev_attrib.block_size, 936 xop->dst_dev->dev_attrib.block_size); 937 xcopy_pt_undepend_remotedev(xop); 938 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 939 goto out; 940 } 941 942 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, 943 rc * XCOPY_TARGET_DESC_LEN); 944 seg_desc = &p[16]; 945 seg_desc += (rc * XCOPY_TARGET_DESC_LEN); 946 947 rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll); 948 if (rc <= 0) { 949 xcopy_pt_undepend_remotedev(xop); 950 goto out; 951 } 952 transport_kunmap_data_sg(se_cmd); 953 954 pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, 955 rc * XCOPY_SEGMENT_DESC_LEN); 956 INIT_WORK(&xop->xop_work, target_xcopy_do_work); 957 queue_work(xcopy_wq, &xop->xop_work); 958 return TCM_NO_SENSE; 959 960 out: 961 if (p) 962 transport_kunmap_data_sg(se_cmd); 963 kfree(xop); 964 return ret; 965 } 966 967 static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) 968 { 969 unsigned char *p; 970 971 p = transport_kmap_data_sg(se_cmd); 972 if (!p) { 973 pr_err("transport_kmap_data_sg failed in" 974 " target_rcr_operating_parameters\n"); 975 return TCM_OUT_OF_RESOURCES; 976 } 977 978 if (se_cmd->data_length < 54) { 979 pr_err("Receive Copy Results Op Parameters length" 980 " too small: %u\n", se_cmd->data_length); 981 transport_kunmap_data_sg(se_cmd); 982 return TCM_INVALID_CDB_FIELD; 983 } 984 /* 985 * Set SNLID=1 (Supports no List ID) 986 */ 987 p[4] = 0x1; 988 /* 989 * MAXIMUM TARGET DESCRIPTOR COUNT 990 */ 991 put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]); 992 /* 993 * MAXIMUM SEGMENT DESCRIPTOR COUNT 994 */ 995 put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]); 996 /* 997 * MAXIMUM DESCRIPTOR LIST LENGTH 998 */ 999 put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]); 1000 /* 1001 * MAXIMUM SEGMENT LENGTH 1002 */ 1003 put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]); 1004 /* 1005 * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED) 1006 */ 1007 put_unaligned_be32(0x0, &p[20]); 1008 /* 1009 * HELD DATA LIMIT 1010 */ 1011 put_unaligned_be32(0x0, &p[24]); 1012 /* 1013 * MAXIMUM STREAM DEVICE TRANSFER SIZE 1014 */ 1015 put_unaligned_be32(0x0, &p[28]); 1016 /* 1017 * TOTAL CONCURRENT COPIES 1018 */ 1019 put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]); 1020 /* 1021 * MAXIMUM CONCURRENT COPIES 1022 */ 1023 p[36] = RCR_OP_MAX_CONCURR_COPIES; 1024 /* 1025 * DATA SEGMENT GRANULARITY (log 2) 1026 */ 1027 p[37] = RCR_OP_DATA_SEG_GRAN_LOG2; 1028 /* 1029 * INLINE DATA GRANULARITY log 2) 1030 */ 1031 p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2; 1032 /* 1033 * HELD DATA GRANULARITY 1034 */ 1035 p[39] = RCR_OP_HELD_DATA_GRAN_LOG2; 1036 /* 1037 * IMPLEMENTED DESCRIPTOR LIST LENGTH 1038 */ 1039 p[43] = 0x2; 1040 /* 1041 * List of implemented descriptor type codes (ordered) 1042 */ 1043 p[44] = 0x02; /* Copy Block to Block device */ 1044 p[45] = 0xe4; /* Identification descriptor target descriptor */ 1045 1046 /* 1047 * AVAILABLE DATA (n-3) 1048 */ 1049 put_unaligned_be32(42, &p[0]); 1050 1051 transport_kunmap_data_sg(se_cmd); 1052 target_complete_cmd(se_cmd, GOOD); 1053 1054 return TCM_NO_SENSE; 1055 } 1056 1057 sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd) 1058 { 1059 unsigned char *cdb = &se_cmd->t_task_cdb[0]; 1060 int sa = (cdb[1] & 0x1f), list_id = cdb[2]; 1061 sense_reason_t rc = TCM_NO_SENSE; 1062 1063 pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:" 1064 " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length); 1065 1066 if (list_id != 0) { 1067 pr_err("Receive Copy Results with non zero list identifier" 1068 " not supported\n"); 1069 return TCM_INVALID_CDB_FIELD; 1070 } 1071 1072 switch (sa) { 1073 case RCR_SA_OPERATING_PARAMETERS: 1074 rc = target_rcr_operating_parameters(se_cmd); 1075 break; 1076 case RCR_SA_COPY_STATUS: 1077 case RCR_SA_RECEIVE_DATA: 1078 case RCR_SA_FAILED_SEGMENT_DETAILS: 1079 default: 1080 pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa); 1081 return TCM_INVALID_CDB_FIELD; 1082 } 1083 1084 return rc; 1085 } 1086