1 /******************************************************************************* 2 * Filename: target_core_xcopy.c 3 * 4 * This file contains support for SPC-4 Extended-Copy offload with generic 5 * TCM backends. 6 * 7 * Copyright (c) 2011-2013 Datera, Inc. All rights reserved. 8 * 9 * Author: 10 * Nicholas A. Bellinger <nab@daterainc.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2 of the License, or 15 * (at your option) any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 ******************************************************************************/ 23 24 #include <linux/slab.h> 25 #include <linux/spinlock.h> 26 #include <linux/list.h> 27 #include <linux/configfs.h> 28 #include <linux/ratelimit.h> 29 #include <scsi/scsi_proto.h> 30 #include <asm/unaligned.h> 31 32 #include <target/target_core_base.h> 33 #include <target/target_core_backend.h> 34 #include <target/target_core_fabric.h> 35 36 #include "target_core_internal.h" 37 #include "target_core_pr.h" 38 #include "target_core_ua.h" 39 #include "target_core_xcopy.h" 40 41 static struct workqueue_struct *xcopy_wq = NULL; 42 43 static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) 44 { 45 int off = 0; 46 47 buf[off++] = (0x6 << 4); 48 buf[off++] = 0x01; 49 buf[off++] = 0x40; 50 buf[off] = (0x5 << 4); 51 52 spc_parse_naa_6h_vendor_specific(dev, &buf[off]); 53 return 0; 54 } 55 56 static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, 57 bool src) 58 { 59 struct se_device *se_dev; 60 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn; 61 int rc; 62 63 if (src) 64 dev_wwn = &xop->dst_tid_wwn[0]; 65 else 66 dev_wwn = &xop->src_tid_wwn[0]; 67 68 mutex_lock(&g_device_mutex); 69 list_for_each_entry(se_dev, &g_device_list, g_dev_node) { 70 71 if (!se_dev->dev_attrib.emulate_3pc) 72 continue; 73 74 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 75 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); 76 77 rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); 78 if (rc != 0) 79 continue; 80 81 if (src) { 82 xop->dst_dev = se_dev; 83 pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located" 84 " se_dev\n", xop->dst_dev); 85 } else { 86 xop->src_dev = se_dev; 87 pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located" 88 " se_dev\n", xop->src_dev); 89 } 90 91 rc = target_depend_item(&se_dev->dev_group.cg_item); 92 if (rc != 0) { 93 pr_err("configfs_depend_item attempt failed:" 94 " %d for se_dev: %p\n", rc, se_dev); 95 mutex_unlock(&g_device_mutex); 96 return rc; 97 } 98 99 pr_debug("Called configfs_depend_item for se_dev: %p" 100 " se_dev->se_dev_group: %p\n", se_dev, 101 &se_dev->dev_group); 102 103 mutex_unlock(&g_device_mutex); 104 return 0; 105 } 106 mutex_unlock(&g_device_mutex); 107 108 pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); 109 return -EINVAL; 110 } 111 112 static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, 113 unsigned char *p, bool src) 114 { 115 unsigned char *desc = p; 116 unsigned short ript; 117 u8 desig_len; 118 /* 119 * Extract RELATIVE INITIATOR PORT IDENTIFIER 120 */ 121 ript = get_unaligned_be16(&desc[2]); 122 pr_debug("XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", ript); 123 /* 124 * Check for supported code set, association, and designator type 125 */ 126 if ((desc[4] & 0x0f) != 0x1) { 127 pr_err("XCOPY 0xe4: code set of non binary type not supported\n"); 128 return -EINVAL; 129 } 130 if ((desc[5] & 0x30) != 0x00) { 131 pr_err("XCOPY 0xe4: association other than LUN not supported\n"); 132 return -EINVAL; 133 } 134 if ((desc[5] & 0x0f) != 0x3) { 135 pr_err("XCOPY 0xe4: designator type unsupported: 0x%02x\n", 136 (desc[5] & 0x0f)); 137 return -EINVAL; 138 } 139 /* 140 * Check for matching 16 byte length for NAA IEEE Registered Extended 141 * Assigned designator 142 */ 143 desig_len = desc[7]; 144 if (desig_len != 16) { 145 pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len); 146 return -EINVAL; 147 } 148 pr_debug("XCOPY 0xe4: desig_len: %d\n", (int)desig_len); 149 /* 150 * Check for NAA IEEE Registered Extended Assigned header.. 151 */ 152 if ((desc[8] & 0xf0) != 0x60) { 153 pr_err("XCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n", 154 (desc[8] & 0xf0)); 155 return -EINVAL; 156 } 157 158 if (src) { 159 memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); 160 /* 161 * Determine if the source designator matches the local device 162 */ 163 if (!memcmp(&xop->local_dev_wwn[0], &xop->src_tid_wwn[0], 164 XCOPY_NAA_IEEE_REGEX_LEN)) { 165 xop->op_origin = XCOL_SOURCE_RECV_OP; 166 xop->src_dev = se_cmd->se_dev; 167 pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source" 168 " received xop\n", xop->src_dev); 169 } 170 } else { 171 memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN); 172 /* 173 * Determine if the destination designator matches the local device 174 */ 175 if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0], 176 XCOPY_NAA_IEEE_REGEX_LEN)) { 177 xop->op_origin = XCOL_DEST_RECV_OP; 178 xop->dst_dev = se_cmd->se_dev; 179 pr_debug("XCOPY 0xe4: Set xop->dst_dev: %p from destination" 180 " received xop\n", xop->dst_dev); 181 } 182 } 183 184 return 0; 185 } 186 187 static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd, 188 struct xcopy_op *xop, unsigned char *p, 189 unsigned short tdll, sense_reason_t *sense_ret) 190 { 191 struct se_device *local_dev = se_cmd->se_dev; 192 unsigned char *desc = p; 193 int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0; 194 unsigned short start = 0; 195 bool src = true; 196 197 *sense_ret = TCM_INVALID_PARAMETER_LIST; 198 199 if (offset != 0) { 200 pr_err("XCOPY target descriptor list length is not" 201 " multiple of %d\n", XCOPY_TARGET_DESC_LEN); 202 return -EINVAL; 203 } 204 if (tdll > 64) { 205 pr_err("XCOPY target descriptor supports a maximum" 206 " two src/dest descriptors, tdll: %hu too large..\n", tdll); 207 return -EINVAL; 208 } 209 /* 210 * Generate an IEEE Registered Extended designator based upon the 211 * se_device the XCOPY was received upon.. 212 */ 213 memset(&xop->local_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); 214 target_xcopy_gen_naa_ieee(local_dev, &xop->local_dev_wwn[0]); 215 216 while (start < tdll) { 217 /* 218 * Check target descriptor identification with 0xE4 type with 219 * use VPD 0x83 WWPN matching .. 220 */ 221 switch (desc[0]) { 222 case 0xe4: 223 rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, 224 &desc[0], src); 225 if (rc != 0) 226 goto out; 227 /* 228 * Assume target descriptors are in source -> destination order.. 229 */ 230 if (src) 231 src = false; 232 else 233 src = true; 234 start += XCOPY_TARGET_DESC_LEN; 235 desc += XCOPY_TARGET_DESC_LEN; 236 ret++; 237 break; 238 default: 239 pr_err("XCOPY unsupported descriptor type code:" 240 " 0x%02x\n", desc[0]); 241 goto out; 242 } 243 } 244 245 if (xop->op_origin == XCOL_SOURCE_RECV_OP) 246 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true); 247 else 248 rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false); 249 /* 250 * If a matching IEEE NAA 0x83 descriptor for the requested device 251 * is not located on this node, return COPY_ABORTED with ASQ/ASQC 252 * 0x0d/0x02 - COPY_TARGET_DEVICE_NOT_REACHABLE to request the 253 * initiator to fall back to normal copy method. 254 */ 255 if (rc < 0) { 256 *sense_ret = TCM_COPY_TARGET_DEVICE_NOT_REACHABLE; 257 goto out; 258 } 259 260 pr_debug("XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n", 261 xop->src_dev, &xop->src_tid_wwn[0]); 262 pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", 263 xop->dst_dev, &xop->dst_tid_wwn[0]); 264 265 return ret; 266 267 out: 268 return -EINVAL; 269 } 270 271 static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op *xop, 272 unsigned char *p) 273 { 274 unsigned char *desc = p; 275 int dc = (desc[1] & 0x02); 276 unsigned short desc_len; 277 278 desc_len = get_unaligned_be16(&desc[2]); 279 if (desc_len != 0x18) { 280 pr_err("XCOPY segment desc 0x02: Illegal desc_len:" 281 " %hu\n", desc_len); 282 return -EINVAL; 283 } 284 285 xop->stdi = get_unaligned_be16(&desc[4]); 286 xop->dtdi = get_unaligned_be16(&desc[6]); 287 pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n", 288 desc_len, xop->stdi, xop->dtdi, dc); 289 290 xop->nolb = get_unaligned_be16(&desc[10]); 291 xop->src_lba = get_unaligned_be64(&desc[12]); 292 xop->dst_lba = get_unaligned_be64(&desc[20]); 293 pr_debug("XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n", 294 xop->nolb, (unsigned long long)xop->src_lba, 295 (unsigned long long)xop->dst_lba); 296 297 if (dc != 0) { 298 xop->dbl = (desc[29] & 0xff) << 16; 299 xop->dbl |= (desc[30] & 0xff) << 8; 300 xop->dbl |= desc[31] & 0xff; 301 302 pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); 303 } 304 return 0; 305 } 306 307 static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd, 308 struct xcopy_op *xop, unsigned char *p, 309 unsigned int sdll) 310 { 311 unsigned char *desc = p; 312 unsigned int start = 0; 313 int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0; 314 315 if (offset != 0) { 316 pr_err("XCOPY segment descriptor list length is not" 317 " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN); 318 return -EINVAL; 319 } 320 321 while (start < sdll) { 322 /* 323 * Check segment descriptor type code for block -> block 324 */ 325 switch (desc[0]) { 326 case 0x02: 327 rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc); 328 if (rc < 0) 329 goto out; 330 331 ret++; 332 start += XCOPY_SEGMENT_DESC_LEN; 333 desc += XCOPY_SEGMENT_DESC_LEN; 334 break; 335 default: 336 pr_err("XCOPY unsupported segment descriptor" 337 "type: 0x%02x\n", desc[0]); 338 goto out; 339 } 340 } 341 342 return ret; 343 344 out: 345 return -EINVAL; 346 } 347 348 /* 349 * Start xcopy_pt ops 350 */ 351 352 struct xcopy_pt_cmd { 353 bool remote_port; 354 struct se_cmd se_cmd; 355 struct xcopy_op *xcopy_op; 356 struct completion xpt_passthrough_sem; 357 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; 358 }; 359 360 struct se_portal_group xcopy_pt_tpg; 361 static struct se_session xcopy_pt_sess; 362 static struct se_node_acl xcopy_pt_nacl; 363 364 static char *xcopy_pt_get_fabric_name(void) 365 { 366 return "xcopy-pt"; 367 } 368 369 static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd) 370 { 371 return 0; 372 } 373 374 static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop) 375 { 376 struct se_device *remote_dev; 377 378 if (xop->op_origin == XCOL_SOURCE_RECV_OP) 379 remote_dev = xop->dst_dev; 380 else 381 remote_dev = xop->src_dev; 382 383 pr_debug("Calling configfs_undepend_item for" 384 " remote_dev: %p remote_dev->dev_group: %p\n", 385 remote_dev, &remote_dev->dev_group.cg_item); 386 387 target_undepend_item(&remote_dev->dev_group.cg_item); 388 } 389 390 static void xcopy_pt_release_cmd(struct se_cmd *se_cmd) 391 { 392 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, 393 struct xcopy_pt_cmd, se_cmd); 394 395 kfree(xpt_cmd); 396 } 397 398 static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd) 399 { 400 struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd, 401 struct xcopy_pt_cmd, se_cmd); 402 403 complete(&xpt_cmd->xpt_passthrough_sem); 404 return 0; 405 } 406 407 static int xcopy_pt_write_pending(struct se_cmd *se_cmd) 408 { 409 return 0; 410 } 411 412 static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd) 413 { 414 return 0; 415 } 416 417 static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd) 418 { 419 return 0; 420 } 421 422 static int xcopy_pt_queue_status(struct se_cmd *se_cmd) 423 { 424 return 0; 425 } 426 427 static const struct target_core_fabric_ops xcopy_pt_tfo = { 428 .get_fabric_name = xcopy_pt_get_fabric_name, 429 .get_cmd_state = xcopy_pt_get_cmd_state, 430 .release_cmd = xcopy_pt_release_cmd, 431 .check_stop_free = xcopy_pt_check_stop_free, 432 .write_pending = xcopy_pt_write_pending, 433 .write_pending_status = xcopy_pt_write_pending_status, 434 .queue_data_in = xcopy_pt_queue_data_in, 435 .queue_status = xcopy_pt_queue_status, 436 }; 437 438 /* 439 * End xcopy_pt_ops 440 */ 441 442 int target_xcopy_setup_pt(void) 443 { 444 xcopy_wq = alloc_workqueue("xcopy_wq", WQ_MEM_RECLAIM, 0); 445 if (!xcopy_wq) { 446 pr_err("Unable to allocate xcopy_wq\n"); 447 return -ENOMEM; 448 } 449 450 memset(&xcopy_pt_tpg, 0, sizeof(struct se_portal_group)); 451 INIT_LIST_HEAD(&xcopy_pt_tpg.se_tpg_node); 452 INIT_LIST_HEAD(&xcopy_pt_tpg.acl_node_list); 453 INIT_LIST_HEAD(&xcopy_pt_tpg.tpg_sess_list); 454 455 xcopy_pt_tpg.se_tpg_tfo = &xcopy_pt_tfo; 456 457 memset(&xcopy_pt_nacl, 0, sizeof(struct se_node_acl)); 458 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list); 459 INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list); 460 memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); 461 INIT_LIST_HEAD(&xcopy_pt_sess.sess_list); 462 INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list); 463 INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list); 464 spin_lock_init(&xcopy_pt_sess.sess_cmd_lock); 465 466 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; 467 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; 468 469 xcopy_pt_sess.se_tpg = &xcopy_pt_tpg; 470 xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl; 471 472 return 0; 473 } 474 475 void target_xcopy_release_pt(void) 476 { 477 if (xcopy_wq) 478 destroy_workqueue(xcopy_wq); 479 } 480 481 static void target_xcopy_setup_pt_port( 482 struct xcopy_pt_cmd *xpt_cmd, 483 struct xcopy_op *xop, 484 bool remote_port) 485 { 486 struct se_cmd *ec_cmd = xop->xop_se_cmd; 487 struct se_cmd *pt_cmd = &xpt_cmd->se_cmd; 488 489 if (xop->op_origin == XCOL_SOURCE_RECV_OP) { 490 /* 491 * Honor destination port reservations for X-COPY PUSH emulation 492 * when CDB is received on local source port, and READs blocks to 493 * WRITE on remote destination port. 494 */ 495 if (remote_port) { 496 xpt_cmd->remote_port = remote_port; 497 } else { 498 pt_cmd->se_lun = ec_cmd->se_lun; 499 pt_cmd->se_dev = ec_cmd->se_dev; 500 501 pr_debug("Honoring local SRC port from ec_cmd->se_dev:" 502 " %p\n", pt_cmd->se_dev); 503 pt_cmd->se_lun = ec_cmd->se_lun; 504 pr_debug("Honoring local SRC port from ec_cmd->se_lun: %p\n", 505 pt_cmd->se_lun); 506 } 507 } else { 508 /* 509 * Honor source port reservation for X-COPY PULL emulation 510 * when CDB is received on local desintation port, and READs 511 * blocks from the remote source port to WRITE on local 512 * destination port. 513 */ 514 if (remote_port) { 515 xpt_cmd->remote_port = remote_port; 516 } else { 517 pt_cmd->se_lun = ec_cmd->se_lun; 518 pt_cmd->se_dev = ec_cmd->se_dev; 519 520 pr_debug("Honoring local DST port from ec_cmd->se_dev:" 521 " %p\n", pt_cmd->se_dev); 522 pt_cmd->se_lun = ec_cmd->se_lun; 523 pr_debug("Honoring local DST port from ec_cmd->se_lun: %p\n", 524 pt_cmd->se_lun); 525 } 526 } 527 } 528 529 static void target_xcopy_init_pt_lun(struct se_device *se_dev, 530 struct se_cmd *pt_cmd, bool remote_port) 531 { 532 /* 533 * Don't allocate + init an pt_cmd->se_lun if honoring local port for 534 * reservations. The pt_cmd->se_lun pointer will be setup from within 535 * target_xcopy_setup_pt_port() 536 */ 537 if (remote_port) { 538 pr_debug("Setup emulated se_dev: %p from se_dev\n", 539 pt_cmd->se_dev); 540 pt_cmd->se_lun = &se_dev->xcopy_lun; 541 pt_cmd->se_dev = se_dev; 542 } 543 544 pt_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; 545 } 546 547 static int target_xcopy_setup_pt_cmd( 548 struct xcopy_pt_cmd *xpt_cmd, 549 struct xcopy_op *xop, 550 struct se_device *se_dev, 551 unsigned char *cdb, 552 bool remote_port, 553 bool alloc_mem) 554 { 555 struct se_cmd *cmd = &xpt_cmd->se_cmd; 556 sense_reason_t sense_rc; 557 int ret = 0, rc; 558 /* 559 * Setup LUN+port to honor reservations based upon xop->op_origin for 560 * X-COPY PUSH or X-COPY PULL based upon where the CDB was received. 561 */ 562 target_xcopy_init_pt_lun(se_dev, cmd, remote_port); 563 564 xpt_cmd->xcopy_op = xop; 565 target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port); 566 567 cmd->tag = 0; 568 sense_rc = target_setup_cmd_from_cdb(cmd, cdb); 569 if (sense_rc) { 570 ret = -EINVAL; 571 goto out; 572 } 573 574 if (alloc_mem) { 575 rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, 576 cmd->data_length, false, false); 577 if (rc < 0) { 578 ret = rc; 579 goto out; 580 } 581 /* 582 * Set this bit so that transport_free_pages() allows the 583 * caller to release SGLs + physical memory allocated by 584 * transport_generic_get_mem().. 585 */ 586 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 587 } else { 588 /* 589 * Here the previously allocated SGLs for the internal READ 590 * are mapped zero-copy to the internal WRITE. 591 */ 592 sense_rc = transport_generic_map_mem_to_cmd(cmd, 593 xop->xop_data_sg, xop->xop_data_nents, 594 NULL, 0); 595 if (sense_rc) { 596 ret = -EINVAL; 597 goto out; 598 } 599 600 pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:" 601 " %u\n", cmd->t_data_sg, cmd->t_data_nents); 602 } 603 604 return 0; 605 606 out: 607 return ret; 608 } 609 610 static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd) 611 { 612 struct se_cmd *se_cmd = &xpt_cmd->se_cmd; 613 sense_reason_t sense_rc; 614 615 sense_rc = transport_generic_new_cmd(se_cmd); 616 if (sense_rc) 617 return -EINVAL; 618 619 if (se_cmd->data_direction == DMA_TO_DEVICE) 620 target_execute_cmd(se_cmd); 621 622 wait_for_completion_interruptible(&xpt_cmd->xpt_passthrough_sem); 623 624 pr_debug("target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", 625 se_cmd->scsi_status); 626 627 return (se_cmd->scsi_status) ? -EINVAL : 0; 628 } 629 630 static int target_xcopy_read_source( 631 struct se_cmd *ec_cmd, 632 struct xcopy_op *xop, 633 struct se_device *src_dev, 634 sector_t src_lba, 635 u32 src_sectors) 636 { 637 struct xcopy_pt_cmd *xpt_cmd; 638 struct se_cmd *se_cmd; 639 u32 length = (src_sectors * src_dev->dev_attrib.block_size); 640 int rc; 641 unsigned char cdb[16]; 642 bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP); 643 644 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL); 645 if (!xpt_cmd) { 646 pr_err("Unable to allocate xcopy_pt_cmd\n"); 647 return -ENOMEM; 648 } 649 init_completion(&xpt_cmd->xpt_passthrough_sem); 650 se_cmd = &xpt_cmd->se_cmd; 651 652 memset(&cdb[0], 0, 16); 653 cdb[0] = READ_16; 654 put_unaligned_be64(src_lba, &cdb[2]); 655 put_unaligned_be32(src_sectors, &cdb[10]); 656 pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n", 657 (unsigned long long)src_lba, src_sectors, length); 658 659 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length, 660 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]); 661 xop->src_pt_cmd = xpt_cmd; 662 663 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0], 664 remote_port, true); 665 if (rc < 0) { 666 ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; 667 transport_generic_free_cmd(se_cmd, 0); 668 return rc; 669 } 670 671 xop->xop_data_sg = se_cmd->t_data_sg; 672 xop->xop_data_nents = se_cmd->t_data_nents; 673 pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ" 674 " memory\n", xop->xop_data_sg, xop->xop_data_nents); 675 676 rc = target_xcopy_issue_pt_cmd(xpt_cmd); 677 if (rc < 0) { 678 ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; 679 transport_generic_free_cmd(se_cmd, 0); 680 return rc; 681 } 682 /* 683 * Clear off the allocated t_data_sg, that has been saved for 684 * zero-copy WRITE submission reuse in struct xcopy_op.. 685 */ 686 se_cmd->t_data_sg = NULL; 687 se_cmd->t_data_nents = 0; 688 689 return 0; 690 } 691 692 static int target_xcopy_write_destination( 693 struct se_cmd *ec_cmd, 694 struct xcopy_op *xop, 695 struct se_device *dst_dev, 696 sector_t dst_lba, 697 u32 dst_sectors) 698 { 699 struct xcopy_pt_cmd *xpt_cmd; 700 struct se_cmd *se_cmd; 701 u32 length = (dst_sectors * dst_dev->dev_attrib.block_size); 702 int rc; 703 unsigned char cdb[16]; 704 bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP); 705 706 xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL); 707 if (!xpt_cmd) { 708 pr_err("Unable to allocate xcopy_pt_cmd\n"); 709 return -ENOMEM; 710 } 711 init_completion(&xpt_cmd->xpt_passthrough_sem); 712 se_cmd = &xpt_cmd->se_cmd; 713 714 memset(&cdb[0], 0, 16); 715 cdb[0] = WRITE_16; 716 put_unaligned_be64(dst_lba, &cdb[2]); 717 put_unaligned_be32(dst_sectors, &cdb[10]); 718 pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n", 719 (unsigned long long)dst_lba, dst_sectors, length); 720 721 transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length, 722 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]); 723 xop->dst_pt_cmd = xpt_cmd; 724 725 rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0], 726 remote_port, false); 727 if (rc < 0) { 728 struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd; 729 ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; 730 /* 731 * If the failure happened before the t_mem_list hand-off in 732 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that 733 * core releases this memory on error during X-COPY WRITE I/O. 734 */ 735 src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 736 src_cmd->t_data_sg = xop->xop_data_sg; 737 src_cmd->t_data_nents = xop->xop_data_nents; 738 739 transport_generic_free_cmd(se_cmd, 0); 740 return rc; 741 } 742 743 rc = target_xcopy_issue_pt_cmd(xpt_cmd); 744 if (rc < 0) { 745 ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status; 746 se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 747 transport_generic_free_cmd(se_cmd, 0); 748 return rc; 749 } 750 751 return 0; 752 } 753 754 static void target_xcopy_do_work(struct work_struct *work) 755 { 756 struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work); 757 struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev; 758 struct se_cmd *ec_cmd = xop->xop_se_cmd; 759 sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba; 760 unsigned int max_sectors; 761 int rc; 762 unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0; 763 764 end_lba = src_lba + nolb; 765 /* 766 * Break up XCOPY I/O into hw_max_sectors sized I/O based on the 767 * smallest max_sectors between src_dev + dev_dev, or 768 */ 769 max_sectors = min(src_dev->dev_attrib.hw_max_sectors, 770 dst_dev->dev_attrib.hw_max_sectors); 771 max_sectors = min_t(u32, max_sectors, XCOPY_MAX_SECTORS); 772 773 max_nolb = min_t(u16, max_sectors, ((u16)(~0U))); 774 775 pr_debug("target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n", 776 nolb, max_nolb, (unsigned long long)end_lba); 777 pr_debug("target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n", 778 (unsigned long long)src_lba, (unsigned long long)dst_lba); 779 780 while (src_lba < end_lba) { 781 cur_nolb = min(nolb, max_nolb); 782 783 pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu," 784 " cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb); 785 786 rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, cur_nolb); 787 if (rc < 0) 788 goto out; 789 790 src_lba += cur_nolb; 791 pr_debug("target_xcopy_do_work: Incremented READ src_lba to %llu\n", 792 (unsigned long long)src_lba); 793 794 pr_debug("target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu," 795 " cur_nolb: %hu\n", dst_dev, (unsigned long long)dst_lba, cur_nolb); 796 797 rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev, 798 dst_lba, cur_nolb); 799 if (rc < 0) { 800 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); 801 goto out; 802 } 803 804 dst_lba += cur_nolb; 805 pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n", 806 (unsigned long long)dst_lba); 807 808 copied_nolb += cur_nolb; 809 nolb -= cur_nolb; 810 811 transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0); 812 xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; 813 814 transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0); 815 } 816 817 xcopy_pt_undepend_remotedev(xop); 818 kfree(xop); 819 820 pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n", 821 (unsigned long long)src_lba, (unsigned long long)dst_lba); 822 pr_debug("target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n", 823 copied_nolb, copied_nolb * dst_dev->dev_attrib.block_size); 824 825 pr_debug("target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n"); 826 target_complete_cmd(ec_cmd, SAM_STAT_GOOD); 827 return; 828 829 out: 830 xcopy_pt_undepend_remotedev(xop); 831 kfree(xop); 832 /* 833 * Don't override an error scsi status if it has already been set 834 */ 835 if (ec_cmd->scsi_status == SAM_STAT_GOOD) { 836 pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY" 837 " CHECK_CONDITION -> sending response\n", rc); 838 ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION; 839 } 840 target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION); 841 } 842 843 sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) 844 { 845 struct se_device *dev = se_cmd->se_dev; 846 struct xcopy_op *xop = NULL; 847 unsigned char *p = NULL, *seg_desc; 848 unsigned int list_id, list_id_usage, sdll, inline_dl, sa; 849 sense_reason_t ret = TCM_INVALID_PARAMETER_LIST; 850 int rc; 851 unsigned short tdll; 852 853 if (!dev->dev_attrib.emulate_3pc) { 854 pr_err("EXTENDED_COPY operation explicitly disabled\n"); 855 return TCM_UNSUPPORTED_SCSI_OPCODE; 856 } 857 858 sa = se_cmd->t_task_cdb[1] & 0x1f; 859 if (sa != 0x00) { 860 pr_err("EXTENDED_COPY(LID4) not supported\n"); 861 return TCM_UNSUPPORTED_SCSI_OPCODE; 862 } 863 864 xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); 865 if (!xop) { 866 pr_err("Unable to allocate xcopy_op\n"); 867 return TCM_OUT_OF_RESOURCES; 868 } 869 xop->xop_se_cmd = se_cmd; 870 871 p = transport_kmap_data_sg(se_cmd); 872 if (!p) { 873 pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); 874 kfree(xop); 875 return TCM_OUT_OF_RESOURCES; 876 } 877 878 list_id = p[0]; 879 list_id_usage = (p[1] & 0x18) >> 3; 880 881 /* 882 * Determine TARGET DESCRIPTOR LIST LENGTH + SEGMENT DESCRIPTOR LIST LENGTH 883 */ 884 tdll = get_unaligned_be16(&p[2]); 885 sdll = get_unaligned_be32(&p[8]); 886 887 inline_dl = get_unaligned_be32(&p[12]); 888 if (inline_dl != 0) { 889 pr_err("XCOPY with non zero inline data length\n"); 890 goto out; 891 } 892 893 pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x" 894 " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, 895 tdll, sdll, inline_dl); 896 897 rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret); 898 if (rc <= 0) 899 goto out; 900 901 if (xop->src_dev->dev_attrib.block_size != 902 xop->dst_dev->dev_attrib.block_size) { 903 pr_err("XCOPY: Non matching src_dev block_size: %u + dst_dev" 904 " block_size: %u currently unsupported\n", 905 xop->src_dev->dev_attrib.block_size, 906 xop->dst_dev->dev_attrib.block_size); 907 xcopy_pt_undepend_remotedev(xop); 908 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 909 goto out; 910 } 911 912 pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, 913 rc * XCOPY_TARGET_DESC_LEN); 914 seg_desc = &p[16]; 915 seg_desc += (rc * XCOPY_TARGET_DESC_LEN); 916 917 rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll); 918 if (rc <= 0) { 919 xcopy_pt_undepend_remotedev(xop); 920 goto out; 921 } 922 transport_kunmap_data_sg(se_cmd); 923 924 pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc, 925 rc * XCOPY_SEGMENT_DESC_LEN); 926 INIT_WORK(&xop->xop_work, target_xcopy_do_work); 927 queue_work(xcopy_wq, &xop->xop_work); 928 return TCM_NO_SENSE; 929 930 out: 931 if (p) 932 transport_kunmap_data_sg(se_cmd); 933 kfree(xop); 934 return ret; 935 } 936 937 static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) 938 { 939 unsigned char *p; 940 941 p = transport_kmap_data_sg(se_cmd); 942 if (!p) { 943 pr_err("transport_kmap_data_sg failed in" 944 " target_rcr_operating_parameters\n"); 945 return TCM_OUT_OF_RESOURCES; 946 } 947 948 if (se_cmd->data_length < 54) { 949 pr_err("Receive Copy Results Op Parameters length" 950 " too small: %u\n", se_cmd->data_length); 951 transport_kunmap_data_sg(se_cmd); 952 return TCM_INVALID_CDB_FIELD; 953 } 954 /* 955 * Set SNLID=1 (Supports no List ID) 956 */ 957 p[4] = 0x1; 958 /* 959 * MAXIMUM TARGET DESCRIPTOR COUNT 960 */ 961 put_unaligned_be16(RCR_OP_MAX_TARGET_DESC_COUNT, &p[8]); 962 /* 963 * MAXIMUM SEGMENT DESCRIPTOR COUNT 964 */ 965 put_unaligned_be16(RCR_OP_MAX_SG_DESC_COUNT, &p[10]); 966 /* 967 * MAXIMUM DESCRIPTOR LIST LENGTH 968 */ 969 put_unaligned_be32(RCR_OP_MAX_DESC_LIST_LEN, &p[12]); 970 /* 971 * MAXIMUM SEGMENT LENGTH 972 */ 973 put_unaligned_be32(RCR_OP_MAX_SEGMENT_LEN, &p[16]); 974 /* 975 * MAXIMUM INLINE DATA LENGTH for SA 0x04 (NOT SUPPORTED) 976 */ 977 put_unaligned_be32(0x0, &p[20]); 978 /* 979 * HELD DATA LIMIT 980 */ 981 put_unaligned_be32(0x0, &p[24]); 982 /* 983 * MAXIMUM STREAM DEVICE TRANSFER SIZE 984 */ 985 put_unaligned_be32(0x0, &p[28]); 986 /* 987 * TOTAL CONCURRENT COPIES 988 */ 989 put_unaligned_be16(RCR_OP_TOTAL_CONCURR_COPIES, &p[34]); 990 /* 991 * MAXIMUM CONCURRENT COPIES 992 */ 993 p[36] = RCR_OP_MAX_CONCURR_COPIES; 994 /* 995 * DATA SEGMENT GRANULARITY (log 2) 996 */ 997 p[37] = RCR_OP_DATA_SEG_GRAN_LOG2; 998 /* 999 * INLINE DATA GRANULARITY log 2) 1000 */ 1001 p[38] = RCR_OP_INLINE_DATA_GRAN_LOG2; 1002 /* 1003 * HELD DATA GRANULARITY 1004 */ 1005 p[39] = RCR_OP_HELD_DATA_GRAN_LOG2; 1006 /* 1007 * IMPLEMENTED DESCRIPTOR LIST LENGTH 1008 */ 1009 p[43] = 0x2; 1010 /* 1011 * List of implemented descriptor type codes (ordered) 1012 */ 1013 p[44] = 0x02; /* Copy Block to Block device */ 1014 p[45] = 0xe4; /* Identification descriptor target descriptor */ 1015 1016 /* 1017 * AVAILABLE DATA (n-3) 1018 */ 1019 put_unaligned_be32(42, &p[0]); 1020 1021 transport_kunmap_data_sg(se_cmd); 1022 target_complete_cmd(se_cmd, GOOD); 1023 1024 return TCM_NO_SENSE; 1025 } 1026 1027 sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd) 1028 { 1029 unsigned char *cdb = &se_cmd->t_task_cdb[0]; 1030 int sa = (cdb[1] & 0x1f), list_id = cdb[2]; 1031 sense_reason_t rc = TCM_NO_SENSE; 1032 1033 pr_debug("Entering target_do_receive_copy_results: SA: 0x%02x, List ID:" 1034 " 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length); 1035 1036 if (list_id != 0) { 1037 pr_err("Receive Copy Results with non zero list identifier" 1038 " not supported\n"); 1039 return TCM_INVALID_CDB_FIELD; 1040 } 1041 1042 switch (sa) { 1043 case RCR_SA_OPERATING_PARAMETERS: 1044 rc = target_rcr_operating_parameters(se_cmd); 1045 break; 1046 case RCR_SA_COPY_STATUS: 1047 case RCR_SA_RECEIVE_DATA: 1048 case RCR_SA_FAILED_SEGMENT_DETAILS: 1049 default: 1050 pr_err("Unsupported SA for receive copy results: 0x%02x\n", sa); 1051 return TCM_INVALID_CDB_FIELD; 1052 } 1053 1054 return rc; 1055 } 1056