1 /* 2 * Copyright (c) 2016 Avago Technologies. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of version 2 of the GNU General Public License as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful. 9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, 10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A 11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO 12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. 13 * See the GNU General Public License for more details, a copy of which 14 * can be found in the file COPYING included with this package 15 */ 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 #include <linux/module.h> 18 #include <linux/parser.h> 19 #include <uapi/scsi/fc/fc_fs.h> 20 21 #include "../host/nvme.h" 22 #include "../target/nvmet.h" 23 #include <linux/nvme-fc-driver.h> 24 #include <linux/nvme-fc.h> 25 26 27 enum { 28 NVMF_OPT_ERR = 0, 29 NVMF_OPT_WWNN = 1 << 0, 30 NVMF_OPT_WWPN = 1 << 1, 31 NVMF_OPT_ROLES = 1 << 2, 32 NVMF_OPT_FCADDR = 1 << 3, 33 NVMF_OPT_LPWWNN = 1 << 4, 34 NVMF_OPT_LPWWPN = 1 << 5, 35 }; 36 37 struct fcloop_ctrl_options { 38 int mask; 39 u64 wwnn; 40 u64 wwpn; 41 u32 roles; 42 u32 fcaddr; 43 u64 lpwwnn; 44 u64 lpwwpn; 45 }; 46 47 static const match_table_t opt_tokens = { 48 { NVMF_OPT_WWNN, "wwnn=%s" }, 49 { NVMF_OPT_WWPN, "wwpn=%s" }, 50 { NVMF_OPT_ROLES, "roles=%d" }, 51 { NVMF_OPT_FCADDR, "fcaddr=%x" }, 52 { NVMF_OPT_LPWWNN, "lpwwnn=%s" }, 53 { NVMF_OPT_LPWWPN, "lpwwpn=%s" }, 54 { NVMF_OPT_ERR, NULL } 55 }; 56 57 static int 58 fcloop_parse_options(struct fcloop_ctrl_options *opts, 59 const char *buf) 60 { 61 substring_t args[MAX_OPT_ARGS]; 62 char *options, *o, *p; 63 int token, ret = 0; 64 u64 token64; 65 66 options = o = kstrdup(buf, GFP_KERNEL); 67 if (!options) 68 return -ENOMEM; 69 70 while ((p = strsep(&o, ",\n")) != NULL) { 71 if (!*p) 72 continue; 73 74 token = match_token(p, opt_tokens, args); 75 opts->mask |= token; 76 switch (token) { 77 case NVMF_OPT_WWNN: 78 if (match_u64(args, &token64)) { 79 ret = -EINVAL; 80 goto out_free_options; 81 } 82 opts->wwnn = token64; 83 break; 84 case NVMF_OPT_WWPN: 85 if (match_u64(args, &token64)) { 86 ret = -EINVAL; 87 goto out_free_options; 88 } 89 opts->wwpn = token64; 90 break; 91 case NVMF_OPT_ROLES: 92 if (match_int(args, &token)) { 93 ret = -EINVAL; 94 goto out_free_options; 95 } 96 opts->roles = token; 97 break; 98 case NVMF_OPT_FCADDR: 99 if (match_hex(args, &token)) { 100 ret = -EINVAL; 101 goto out_free_options; 102 } 103 opts->fcaddr = token; 104 break; 105 case NVMF_OPT_LPWWNN: 106 if (match_u64(args, &token64)) { 107 ret = -EINVAL; 108 goto out_free_options; 109 } 110 opts->lpwwnn = token64; 111 break; 112 case NVMF_OPT_LPWWPN: 113 if (match_u64(args, &token64)) { 114 ret = -EINVAL; 115 goto out_free_options; 116 } 117 opts->lpwwpn = token64; 118 break; 119 default: 120 pr_warn("unknown parameter or missing value '%s'\n", p); 121 ret = -EINVAL; 122 goto out_free_options; 123 } 124 } 125 126 out_free_options: 127 kfree(options); 128 return ret; 129 } 130 131 132 static int 133 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname, 134 const char *buf) 135 { 136 substring_t args[MAX_OPT_ARGS]; 137 char *options, *o, *p; 138 int token, ret = 0; 139 u64 token64; 140 141 *nname = -1; 142 *pname = -1; 143 144 options = o = kstrdup(buf, GFP_KERNEL); 145 if (!options) 146 return -ENOMEM; 147 148 while ((p = strsep(&o, ",\n")) != NULL) { 149 if (!*p) 150 continue; 151 152 token = match_token(p, opt_tokens, args); 153 switch (token) { 154 case NVMF_OPT_WWNN: 155 if (match_u64(args, &token64)) { 156 ret = -EINVAL; 157 goto out_free_options; 158 } 159 *nname = token64; 160 break; 161 case NVMF_OPT_WWPN: 162 if (match_u64(args, &token64)) { 163 ret = -EINVAL; 164 goto out_free_options; 165 } 166 *pname = token64; 167 break; 168 default: 169 pr_warn("unknown parameter or missing value '%s'\n", p); 170 ret = -EINVAL; 171 goto out_free_options; 172 } 173 } 174 175 out_free_options: 176 kfree(options); 177 178 if (!ret) { 179 if (*nname == -1) 180 return -EINVAL; 181 if (*pname == -1) 182 return -EINVAL; 183 } 184 185 return ret; 186 } 187 188 189 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN) 190 191 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \ 192 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN) 193 194 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN) 195 196 197 static DEFINE_SPINLOCK(fcloop_lock); 198 static LIST_HEAD(fcloop_lports); 199 static LIST_HEAD(fcloop_nports); 200 201 struct fcloop_lport { 202 struct nvme_fc_local_port *localport; 203 struct list_head lport_list; 204 struct completion unreg_done; 205 }; 206 207 struct fcloop_lport_priv { 208 struct fcloop_lport *lport; 209 }; 210 211 struct fcloop_rport { 212 struct nvme_fc_remote_port *remoteport; 213 struct nvmet_fc_target_port *targetport; 214 struct fcloop_nport *nport; 215 struct fcloop_lport *lport; 216 }; 217 218 struct fcloop_tport { 219 struct nvmet_fc_target_port *targetport; 220 struct nvme_fc_remote_port *remoteport; 221 struct fcloop_nport *nport; 222 struct fcloop_lport *lport; 223 }; 224 225 struct fcloop_nport { 226 struct fcloop_rport *rport; 227 struct fcloop_tport *tport; 228 struct fcloop_lport *lport; 229 struct list_head nport_list; 230 struct kref ref; 231 u64 node_name; 232 u64 port_name; 233 u32 port_role; 234 u32 port_id; 235 }; 236 237 struct fcloop_lsreq { 238 struct fcloop_tport *tport; 239 struct nvmefc_ls_req *lsreq; 240 struct work_struct work; 241 struct nvmefc_tgt_ls_req tgt_ls_req; 242 int status; 243 }; 244 245 enum { 246 INI_IO_START = 0, 247 INI_IO_ACTIVE = 1, 248 INI_IO_ABORTED = 2, 249 INI_IO_COMPLETED = 3, 250 }; 251 252 struct fcloop_fcpreq { 253 struct fcloop_tport *tport; 254 struct nvmefc_fcp_req *fcpreq; 255 spinlock_t reqlock; 256 u16 status; 257 u32 inistate; 258 bool active; 259 bool aborted; 260 struct kref ref; 261 struct work_struct fcp_rcv_work; 262 struct work_struct abort_rcv_work; 263 struct work_struct tio_done_work; 264 struct nvmefc_tgt_fcp_req tgt_fcp_req; 265 }; 266 267 struct fcloop_ini_fcpreq { 268 struct nvmefc_fcp_req *fcpreq; 269 struct fcloop_fcpreq *tfcp_req; 270 spinlock_t inilock; 271 }; 272 273 static inline struct fcloop_lsreq * 274 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq) 275 { 276 return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req); 277 } 278 279 static inline struct fcloop_fcpreq * 280 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq) 281 { 282 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req); 283 } 284 285 286 static int 287 fcloop_create_queue(struct nvme_fc_local_port *localport, 288 unsigned int qidx, u16 qsize, 289 void **handle) 290 { 291 *handle = localport; 292 return 0; 293 } 294 295 static void 296 fcloop_delete_queue(struct nvme_fc_local_port *localport, 297 unsigned int idx, void *handle) 298 { 299 } 300 301 302 /* 303 * Transmit of LS RSP done (e.g. buffers all set). call back up 304 * initiator "done" flows. 305 */ 306 static void 307 fcloop_tgt_lsrqst_done_work(struct work_struct *work) 308 { 309 struct fcloop_lsreq *tls_req = 310 container_of(work, struct fcloop_lsreq, work); 311 struct fcloop_tport *tport = tls_req->tport; 312 struct nvmefc_ls_req *lsreq = tls_req->lsreq; 313 314 if (!tport || tport->remoteport) 315 lsreq->done(lsreq, tls_req->status); 316 } 317 318 static int 319 fcloop_ls_req(struct nvme_fc_local_port *localport, 320 struct nvme_fc_remote_port *remoteport, 321 struct nvmefc_ls_req *lsreq) 322 { 323 struct fcloop_lsreq *tls_req = lsreq->private; 324 struct fcloop_rport *rport = remoteport->private; 325 int ret = 0; 326 327 tls_req->lsreq = lsreq; 328 INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work); 329 330 if (!rport->targetport) { 331 tls_req->status = -ECONNREFUSED; 332 tls_req->tport = NULL; 333 schedule_work(&tls_req->work); 334 return ret; 335 } 336 337 tls_req->status = 0; 338 tls_req->tport = rport->targetport->private; 339 ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req, 340 lsreq->rqstaddr, lsreq->rqstlen); 341 342 return ret; 343 } 344 345 static int 346 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport, 347 struct nvmefc_tgt_ls_req *tgt_lsreq) 348 { 349 struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq); 350 struct nvmefc_ls_req *lsreq = tls_req->lsreq; 351 352 memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf, 353 ((lsreq->rsplen < tgt_lsreq->rsplen) ? 354 lsreq->rsplen : tgt_lsreq->rsplen)); 355 tgt_lsreq->done(tgt_lsreq); 356 357 schedule_work(&tls_req->work); 358 359 return 0; 360 } 361 362 static void 363 fcloop_tfcp_req_free(struct kref *ref) 364 { 365 struct fcloop_fcpreq *tfcp_req = 366 container_of(ref, struct fcloop_fcpreq, ref); 367 368 kfree(tfcp_req); 369 } 370 371 static void 372 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req) 373 { 374 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free); 375 } 376 377 static int 378 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req) 379 { 380 return kref_get_unless_zero(&tfcp_req->ref); 381 } 382 383 static void 384 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq, 385 struct fcloop_fcpreq *tfcp_req, int status) 386 { 387 struct fcloop_ini_fcpreq *inireq = NULL; 388 389 if (fcpreq) { 390 inireq = fcpreq->private; 391 spin_lock(&inireq->inilock); 392 inireq->tfcp_req = NULL; 393 spin_unlock(&inireq->inilock); 394 395 fcpreq->status = status; 396 fcpreq->done(fcpreq); 397 } 398 399 /* release original io reference on tgt struct */ 400 fcloop_tfcp_req_put(tfcp_req); 401 } 402 403 static void 404 fcloop_fcp_recv_work(struct work_struct *work) 405 { 406 struct fcloop_fcpreq *tfcp_req = 407 container_of(work, struct fcloop_fcpreq, fcp_rcv_work); 408 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; 409 int ret = 0; 410 bool aborted = false; 411 412 spin_lock(&tfcp_req->reqlock); 413 switch (tfcp_req->inistate) { 414 case INI_IO_START: 415 tfcp_req->inistate = INI_IO_ACTIVE; 416 break; 417 case INI_IO_ABORTED: 418 aborted = true; 419 break; 420 default: 421 spin_unlock(&tfcp_req->reqlock); 422 WARN_ON(1); 423 return; 424 } 425 spin_unlock(&tfcp_req->reqlock); 426 427 if (unlikely(aborted)) 428 ret = -ECANCELED; 429 else 430 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, 431 &tfcp_req->tgt_fcp_req, 432 fcpreq->cmdaddr, fcpreq->cmdlen); 433 if (ret) 434 fcloop_call_host_done(fcpreq, tfcp_req, ret); 435 436 return; 437 } 438 439 static void 440 fcloop_fcp_abort_recv_work(struct work_struct *work) 441 { 442 struct fcloop_fcpreq *tfcp_req = 443 container_of(work, struct fcloop_fcpreq, abort_rcv_work); 444 struct nvmefc_fcp_req *fcpreq; 445 bool completed = false; 446 447 spin_lock(&tfcp_req->reqlock); 448 fcpreq = tfcp_req->fcpreq; 449 switch (tfcp_req->inistate) { 450 case INI_IO_ABORTED: 451 break; 452 case INI_IO_COMPLETED: 453 completed = true; 454 break; 455 default: 456 spin_unlock(&tfcp_req->reqlock); 457 WARN_ON(1); 458 return; 459 } 460 spin_unlock(&tfcp_req->reqlock); 461 462 if (unlikely(completed)) { 463 /* remove reference taken in original abort downcall */ 464 fcloop_tfcp_req_put(tfcp_req); 465 return; 466 } 467 468 if (tfcp_req->tport->targetport) 469 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, 470 &tfcp_req->tgt_fcp_req); 471 472 spin_lock(&tfcp_req->reqlock); 473 tfcp_req->fcpreq = NULL; 474 spin_unlock(&tfcp_req->reqlock); 475 476 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); 477 /* call_host_done releases reference for abort downcall */ 478 } 479 480 /* 481 * FCP IO operation done by target completion. 482 * call back up initiator "done" flows. 483 */ 484 static void 485 fcloop_tgt_fcprqst_done_work(struct work_struct *work) 486 { 487 struct fcloop_fcpreq *tfcp_req = 488 container_of(work, struct fcloop_fcpreq, tio_done_work); 489 struct nvmefc_fcp_req *fcpreq; 490 491 spin_lock(&tfcp_req->reqlock); 492 fcpreq = tfcp_req->fcpreq; 493 tfcp_req->inistate = INI_IO_COMPLETED; 494 spin_unlock(&tfcp_req->reqlock); 495 496 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status); 497 } 498 499 500 static int 501 fcloop_fcp_req(struct nvme_fc_local_port *localport, 502 struct nvme_fc_remote_port *remoteport, 503 void *hw_queue_handle, 504 struct nvmefc_fcp_req *fcpreq) 505 { 506 struct fcloop_rport *rport = remoteport->private; 507 struct fcloop_ini_fcpreq *inireq = fcpreq->private; 508 struct fcloop_fcpreq *tfcp_req; 509 510 if (!rport->targetport) 511 return -ECONNREFUSED; 512 513 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL); 514 if (!tfcp_req) 515 return -ENOMEM; 516 517 inireq->fcpreq = fcpreq; 518 inireq->tfcp_req = tfcp_req; 519 spin_lock_init(&inireq->inilock); 520 521 tfcp_req->fcpreq = fcpreq; 522 tfcp_req->tport = rport->targetport->private; 523 tfcp_req->inistate = INI_IO_START; 524 spin_lock_init(&tfcp_req->reqlock); 525 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work); 526 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work); 527 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); 528 kref_init(&tfcp_req->ref); 529 530 schedule_work(&tfcp_req->fcp_rcv_work); 531 532 return 0; 533 } 534 535 static void 536 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg, 537 struct scatterlist *io_sg, u32 offset, u32 length) 538 { 539 void *data_p, *io_p; 540 u32 data_len, io_len, tlen; 541 542 io_p = sg_virt(io_sg); 543 io_len = io_sg->length; 544 545 for ( ; offset; ) { 546 tlen = min_t(u32, offset, io_len); 547 offset -= tlen; 548 io_len -= tlen; 549 if (!io_len) { 550 io_sg = sg_next(io_sg); 551 io_p = sg_virt(io_sg); 552 io_len = io_sg->length; 553 } else 554 io_p += tlen; 555 } 556 557 data_p = sg_virt(data_sg); 558 data_len = data_sg->length; 559 560 for ( ; length; ) { 561 tlen = min_t(u32, io_len, data_len); 562 tlen = min_t(u32, tlen, length); 563 564 if (op == NVMET_FCOP_WRITEDATA) 565 memcpy(data_p, io_p, tlen); 566 else 567 memcpy(io_p, data_p, tlen); 568 569 length -= tlen; 570 571 io_len -= tlen; 572 if ((!io_len) && (length)) { 573 io_sg = sg_next(io_sg); 574 io_p = sg_virt(io_sg); 575 io_len = io_sg->length; 576 } else 577 io_p += tlen; 578 579 data_len -= tlen; 580 if ((!data_len) && (length)) { 581 data_sg = sg_next(data_sg); 582 data_p = sg_virt(data_sg); 583 data_len = data_sg->length; 584 } else 585 data_p += tlen; 586 } 587 } 588 589 static int 590 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, 591 struct nvmefc_tgt_fcp_req *tgt_fcpreq) 592 { 593 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 594 struct nvmefc_fcp_req *fcpreq; 595 u32 rsplen = 0, xfrlen = 0; 596 int fcp_err = 0, active, aborted; 597 u8 op = tgt_fcpreq->op; 598 599 spin_lock(&tfcp_req->reqlock); 600 fcpreq = tfcp_req->fcpreq; 601 active = tfcp_req->active; 602 aborted = tfcp_req->aborted; 603 tfcp_req->active = true; 604 spin_unlock(&tfcp_req->reqlock); 605 606 if (unlikely(active)) 607 /* illegal - call while i/o active */ 608 return -EALREADY; 609 610 if (unlikely(aborted)) { 611 /* target transport has aborted i/o prior */ 612 spin_lock(&tfcp_req->reqlock); 613 tfcp_req->active = false; 614 spin_unlock(&tfcp_req->reqlock); 615 tgt_fcpreq->transferred_length = 0; 616 tgt_fcpreq->fcp_error = -ECANCELED; 617 tgt_fcpreq->done(tgt_fcpreq); 618 return 0; 619 } 620 621 /* 622 * if fcpreq is NULL, the I/O has been aborted (from 623 * initiator side). For the target side, act as if all is well 624 * but don't actually move data. 625 */ 626 627 switch (op) { 628 case NVMET_FCOP_WRITEDATA: 629 xfrlen = tgt_fcpreq->transfer_length; 630 if (fcpreq) { 631 fcloop_fcp_copy_data(op, tgt_fcpreq->sg, 632 fcpreq->first_sgl, tgt_fcpreq->offset, 633 xfrlen); 634 fcpreq->transferred_length += xfrlen; 635 } 636 break; 637 638 case NVMET_FCOP_READDATA: 639 case NVMET_FCOP_READDATA_RSP: 640 xfrlen = tgt_fcpreq->transfer_length; 641 if (fcpreq) { 642 fcloop_fcp_copy_data(op, tgt_fcpreq->sg, 643 fcpreq->first_sgl, tgt_fcpreq->offset, 644 xfrlen); 645 fcpreq->transferred_length += xfrlen; 646 } 647 if (op == NVMET_FCOP_READDATA) 648 break; 649 650 /* Fall-Thru to RSP handling */ 651 /* FALLTHRU */ 652 653 case NVMET_FCOP_RSP: 654 if (fcpreq) { 655 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ? 656 fcpreq->rsplen : tgt_fcpreq->rsplen); 657 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen); 658 if (rsplen < tgt_fcpreq->rsplen) 659 fcp_err = -E2BIG; 660 fcpreq->rcv_rsplen = rsplen; 661 fcpreq->status = 0; 662 } 663 tfcp_req->status = 0; 664 break; 665 666 default: 667 fcp_err = -EINVAL; 668 break; 669 } 670 671 spin_lock(&tfcp_req->reqlock); 672 tfcp_req->active = false; 673 spin_unlock(&tfcp_req->reqlock); 674 675 tgt_fcpreq->transferred_length = xfrlen; 676 tgt_fcpreq->fcp_error = fcp_err; 677 tgt_fcpreq->done(tgt_fcpreq); 678 679 return 0; 680 } 681 682 static void 683 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport, 684 struct nvmefc_tgt_fcp_req *tgt_fcpreq) 685 { 686 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 687 688 /* 689 * mark aborted only in case there were 2 threads in transport 690 * (one doing io, other doing abort) and only kills ops posted 691 * after the abort request 692 */ 693 spin_lock(&tfcp_req->reqlock); 694 tfcp_req->aborted = true; 695 spin_unlock(&tfcp_req->reqlock); 696 697 tfcp_req->status = NVME_SC_INTERNAL; 698 699 /* 700 * nothing more to do. If io wasn't active, the transport should 701 * immediately call the req_release. If it was active, the op 702 * will complete, and the lldd should call req_release. 703 */ 704 } 705 706 static void 707 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport, 708 struct nvmefc_tgt_fcp_req *tgt_fcpreq) 709 { 710 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); 711 712 schedule_work(&tfcp_req->tio_done_work); 713 } 714 715 static void 716 fcloop_ls_abort(struct nvme_fc_local_port *localport, 717 struct nvme_fc_remote_port *remoteport, 718 struct nvmefc_ls_req *lsreq) 719 { 720 } 721 722 static void 723 fcloop_fcp_abort(struct nvme_fc_local_port *localport, 724 struct nvme_fc_remote_port *remoteport, 725 void *hw_queue_handle, 726 struct nvmefc_fcp_req *fcpreq) 727 { 728 struct fcloop_ini_fcpreq *inireq = fcpreq->private; 729 struct fcloop_fcpreq *tfcp_req; 730 bool abortio = true; 731 732 spin_lock(&inireq->inilock); 733 tfcp_req = inireq->tfcp_req; 734 if (tfcp_req) 735 fcloop_tfcp_req_get(tfcp_req); 736 spin_unlock(&inireq->inilock); 737 738 if (!tfcp_req) 739 /* abort has already been called */ 740 return; 741 742 /* break initiator/target relationship for io */ 743 spin_lock(&tfcp_req->reqlock); 744 switch (tfcp_req->inistate) { 745 case INI_IO_START: 746 case INI_IO_ACTIVE: 747 tfcp_req->inistate = INI_IO_ABORTED; 748 break; 749 case INI_IO_COMPLETED: 750 abortio = false; 751 break; 752 default: 753 spin_unlock(&tfcp_req->reqlock); 754 WARN_ON(1); 755 return; 756 } 757 spin_unlock(&tfcp_req->reqlock); 758 759 if (abortio) 760 /* leave the reference while the work item is scheduled */ 761 WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work)); 762 else { 763 /* 764 * as the io has already had the done callback made, 765 * nothing more to do. So release the reference taken above 766 */ 767 fcloop_tfcp_req_put(tfcp_req); 768 } 769 } 770 771 static void 772 fcloop_nport_free(struct kref *ref) 773 { 774 struct fcloop_nport *nport = 775 container_of(ref, struct fcloop_nport, ref); 776 unsigned long flags; 777 778 spin_lock_irqsave(&fcloop_lock, flags); 779 list_del(&nport->nport_list); 780 spin_unlock_irqrestore(&fcloop_lock, flags); 781 782 kfree(nport); 783 } 784 785 static void 786 fcloop_nport_put(struct fcloop_nport *nport) 787 { 788 kref_put(&nport->ref, fcloop_nport_free); 789 } 790 791 static int 792 fcloop_nport_get(struct fcloop_nport *nport) 793 { 794 return kref_get_unless_zero(&nport->ref); 795 } 796 797 static void 798 fcloop_localport_delete(struct nvme_fc_local_port *localport) 799 { 800 struct fcloop_lport_priv *lport_priv = localport->private; 801 struct fcloop_lport *lport = lport_priv->lport; 802 803 /* release any threads waiting for the unreg to complete */ 804 complete(&lport->unreg_done); 805 } 806 807 static void 808 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) 809 { 810 struct fcloop_rport *rport = remoteport->private; 811 812 fcloop_nport_put(rport->nport); 813 } 814 815 static void 816 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) 817 { 818 struct fcloop_tport *tport = targetport->private; 819 820 fcloop_nport_put(tport->nport); 821 } 822 823 #define FCLOOP_HW_QUEUES 4 824 #define FCLOOP_SGL_SEGS 256 825 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF 826 827 static struct nvme_fc_port_template fctemplate = { 828 .localport_delete = fcloop_localport_delete, 829 .remoteport_delete = fcloop_remoteport_delete, 830 .create_queue = fcloop_create_queue, 831 .delete_queue = fcloop_delete_queue, 832 .ls_req = fcloop_ls_req, 833 .fcp_io = fcloop_fcp_req, 834 .ls_abort = fcloop_ls_abort, 835 .fcp_abort = fcloop_fcp_abort, 836 .max_hw_queues = FCLOOP_HW_QUEUES, 837 .max_sgl_segments = FCLOOP_SGL_SEGS, 838 .max_dif_sgl_segments = FCLOOP_SGL_SEGS, 839 .dma_boundary = FCLOOP_DMABOUND_4G, 840 /* sizes of additional private data for data structures */ 841 .local_priv_sz = sizeof(struct fcloop_lport_priv), 842 .remote_priv_sz = sizeof(struct fcloop_rport), 843 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), 844 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), 845 }; 846 847 static struct nvmet_fc_target_template tgttemplate = { 848 .targetport_delete = fcloop_targetport_delete, 849 .xmt_ls_rsp = fcloop_xmt_ls_rsp, 850 .fcp_op = fcloop_fcp_op, 851 .fcp_abort = fcloop_tgt_fcp_abort, 852 .fcp_req_release = fcloop_fcp_req_release, 853 .max_hw_queues = FCLOOP_HW_QUEUES, 854 .max_sgl_segments = FCLOOP_SGL_SEGS, 855 .max_dif_sgl_segments = FCLOOP_SGL_SEGS, 856 .dma_boundary = FCLOOP_DMABOUND_4G, 857 /* optional features */ 858 .target_features = 0, 859 /* sizes of additional private data for data structures */ 860 .target_priv_sz = sizeof(struct fcloop_tport), 861 }; 862 863 static ssize_t 864 fcloop_create_local_port(struct device *dev, struct device_attribute *attr, 865 const char *buf, size_t count) 866 { 867 struct nvme_fc_port_info pinfo; 868 struct fcloop_ctrl_options *opts; 869 struct nvme_fc_local_port *localport; 870 struct fcloop_lport *lport; 871 struct fcloop_lport_priv *lport_priv; 872 unsigned long flags; 873 int ret = -ENOMEM; 874 875 lport = kzalloc(sizeof(*lport), GFP_KERNEL); 876 if (!lport) 877 return -ENOMEM; 878 879 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 880 if (!opts) 881 goto out_free_lport; 882 883 ret = fcloop_parse_options(opts, buf); 884 if (ret) 885 goto out_free_opts; 886 887 /* everything there ? */ 888 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) { 889 ret = -EINVAL; 890 goto out_free_opts; 891 } 892 893 memset(&pinfo, 0, sizeof(pinfo)); 894 pinfo.node_name = opts->wwnn; 895 pinfo.port_name = opts->wwpn; 896 pinfo.port_role = opts->roles; 897 pinfo.port_id = opts->fcaddr; 898 899 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport); 900 if (!ret) { 901 /* success */ 902 lport_priv = localport->private; 903 lport_priv->lport = lport; 904 905 lport->localport = localport; 906 INIT_LIST_HEAD(&lport->lport_list); 907 908 spin_lock_irqsave(&fcloop_lock, flags); 909 list_add_tail(&lport->lport_list, &fcloop_lports); 910 spin_unlock_irqrestore(&fcloop_lock, flags); 911 } 912 913 out_free_opts: 914 kfree(opts); 915 out_free_lport: 916 /* free only if we're going to fail */ 917 if (ret) 918 kfree(lport); 919 920 return ret ? ret : count; 921 } 922 923 924 static void 925 __unlink_local_port(struct fcloop_lport *lport) 926 { 927 list_del(&lport->lport_list); 928 } 929 930 static int 931 __wait_localport_unreg(struct fcloop_lport *lport) 932 { 933 int ret; 934 935 init_completion(&lport->unreg_done); 936 937 ret = nvme_fc_unregister_localport(lport->localport); 938 939 wait_for_completion(&lport->unreg_done); 940 941 kfree(lport); 942 943 return ret; 944 } 945 946 947 static ssize_t 948 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, 949 const char *buf, size_t count) 950 { 951 struct fcloop_lport *tlport, *lport = NULL; 952 u64 nodename, portname; 953 unsigned long flags; 954 int ret; 955 956 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); 957 if (ret) 958 return ret; 959 960 spin_lock_irqsave(&fcloop_lock, flags); 961 962 list_for_each_entry(tlport, &fcloop_lports, lport_list) { 963 if (tlport->localport->node_name == nodename && 964 tlport->localport->port_name == portname) { 965 lport = tlport; 966 __unlink_local_port(lport); 967 break; 968 } 969 } 970 spin_unlock_irqrestore(&fcloop_lock, flags); 971 972 if (!lport) 973 return -ENOENT; 974 975 ret = __wait_localport_unreg(lport); 976 977 return ret ? ret : count; 978 } 979 980 static struct fcloop_nport * 981 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) 982 { 983 struct fcloop_nport *newnport, *nport = NULL; 984 struct fcloop_lport *tmplport, *lport = NULL; 985 struct fcloop_ctrl_options *opts; 986 unsigned long flags; 987 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS; 988 int ret; 989 990 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 991 if (!opts) 992 return NULL; 993 994 ret = fcloop_parse_options(opts, buf); 995 if (ret) 996 goto out_free_opts; 997 998 /* everything there ? */ 999 if ((opts->mask & opts_mask) != opts_mask) { 1000 ret = -EINVAL; 1001 goto out_free_opts; 1002 } 1003 1004 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL); 1005 if (!newnport) 1006 goto out_free_opts; 1007 1008 INIT_LIST_HEAD(&newnport->nport_list); 1009 newnport->node_name = opts->wwnn; 1010 newnport->port_name = opts->wwpn; 1011 if (opts->mask & NVMF_OPT_ROLES) 1012 newnport->port_role = opts->roles; 1013 if (opts->mask & NVMF_OPT_FCADDR) 1014 newnport->port_id = opts->fcaddr; 1015 kref_init(&newnport->ref); 1016 1017 spin_lock_irqsave(&fcloop_lock, flags); 1018 1019 list_for_each_entry(tmplport, &fcloop_lports, lport_list) { 1020 if (tmplport->localport->node_name == opts->wwnn && 1021 tmplport->localport->port_name == opts->wwpn) 1022 goto out_invalid_opts; 1023 1024 if (tmplport->localport->node_name == opts->lpwwnn && 1025 tmplport->localport->port_name == opts->lpwwpn) 1026 lport = tmplport; 1027 } 1028 1029 if (remoteport) { 1030 if (!lport) 1031 goto out_invalid_opts; 1032 newnport->lport = lport; 1033 } 1034 1035 list_for_each_entry(nport, &fcloop_nports, nport_list) { 1036 if (nport->node_name == opts->wwnn && 1037 nport->port_name == opts->wwpn) { 1038 if ((remoteport && nport->rport) || 1039 (!remoteport && nport->tport)) { 1040 nport = NULL; 1041 goto out_invalid_opts; 1042 } 1043 1044 fcloop_nport_get(nport); 1045 1046 spin_unlock_irqrestore(&fcloop_lock, flags); 1047 1048 if (remoteport) 1049 nport->lport = lport; 1050 if (opts->mask & NVMF_OPT_ROLES) 1051 nport->port_role = opts->roles; 1052 if (opts->mask & NVMF_OPT_FCADDR) 1053 nport->port_id = opts->fcaddr; 1054 goto out_free_newnport; 1055 } 1056 } 1057 1058 list_add_tail(&newnport->nport_list, &fcloop_nports); 1059 1060 spin_unlock_irqrestore(&fcloop_lock, flags); 1061 1062 kfree(opts); 1063 return newnport; 1064 1065 out_invalid_opts: 1066 spin_unlock_irqrestore(&fcloop_lock, flags); 1067 out_free_newnport: 1068 kfree(newnport); 1069 out_free_opts: 1070 kfree(opts); 1071 return nport; 1072 } 1073 1074 static ssize_t 1075 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, 1076 const char *buf, size_t count) 1077 { 1078 struct nvme_fc_remote_port *remoteport; 1079 struct fcloop_nport *nport; 1080 struct fcloop_rport *rport; 1081 struct nvme_fc_port_info pinfo; 1082 int ret; 1083 1084 nport = fcloop_alloc_nport(buf, count, true); 1085 if (!nport) 1086 return -EIO; 1087 1088 memset(&pinfo, 0, sizeof(pinfo)); 1089 pinfo.node_name = nport->node_name; 1090 pinfo.port_name = nport->port_name; 1091 pinfo.port_role = nport->port_role; 1092 pinfo.port_id = nport->port_id; 1093 1094 ret = nvme_fc_register_remoteport(nport->lport->localport, 1095 &pinfo, &remoteport); 1096 if (ret || !remoteport) { 1097 fcloop_nport_put(nport); 1098 return ret; 1099 } 1100 1101 /* success */ 1102 rport = remoteport->private; 1103 rport->remoteport = remoteport; 1104 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL; 1105 if (nport->tport) { 1106 nport->tport->remoteport = remoteport; 1107 nport->tport->lport = nport->lport; 1108 } 1109 rport->nport = nport; 1110 rport->lport = nport->lport; 1111 nport->rport = rport; 1112 1113 return count; 1114 } 1115 1116 1117 static struct fcloop_rport * 1118 __unlink_remote_port(struct fcloop_nport *nport) 1119 { 1120 struct fcloop_rport *rport = nport->rport; 1121 1122 if (rport && nport->tport) 1123 nport->tport->remoteport = NULL; 1124 nport->rport = NULL; 1125 1126 return rport; 1127 } 1128 1129 static int 1130 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) 1131 { 1132 if (!rport) 1133 return -EALREADY; 1134 1135 return nvme_fc_unregister_remoteport(rport->remoteport); 1136 } 1137 1138 static ssize_t 1139 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, 1140 const char *buf, size_t count) 1141 { 1142 struct fcloop_nport *nport = NULL, *tmpport; 1143 static struct fcloop_rport *rport; 1144 u64 nodename, portname; 1145 unsigned long flags; 1146 int ret; 1147 1148 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); 1149 if (ret) 1150 return ret; 1151 1152 spin_lock_irqsave(&fcloop_lock, flags); 1153 1154 list_for_each_entry(tmpport, &fcloop_nports, nport_list) { 1155 if (tmpport->node_name == nodename && 1156 tmpport->port_name == portname && tmpport->rport) { 1157 nport = tmpport; 1158 rport = __unlink_remote_port(nport); 1159 break; 1160 } 1161 } 1162 1163 spin_unlock_irqrestore(&fcloop_lock, flags); 1164 1165 if (!nport) 1166 return -ENOENT; 1167 1168 ret = __remoteport_unreg(nport, rport); 1169 1170 return ret ? ret : count; 1171 } 1172 1173 static ssize_t 1174 fcloop_create_target_port(struct device *dev, struct device_attribute *attr, 1175 const char *buf, size_t count) 1176 { 1177 struct nvmet_fc_target_port *targetport; 1178 struct fcloop_nport *nport; 1179 struct fcloop_tport *tport; 1180 struct nvmet_fc_port_info tinfo; 1181 int ret; 1182 1183 nport = fcloop_alloc_nport(buf, count, false); 1184 if (!nport) 1185 return -EIO; 1186 1187 tinfo.node_name = nport->node_name; 1188 tinfo.port_name = nport->port_name; 1189 tinfo.port_id = nport->port_id; 1190 1191 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL, 1192 &targetport); 1193 if (ret) { 1194 fcloop_nport_put(nport); 1195 return ret; 1196 } 1197 1198 /* success */ 1199 tport = targetport->private; 1200 tport->targetport = targetport; 1201 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL; 1202 if (nport->rport) 1203 nport->rport->targetport = targetport; 1204 tport->nport = nport; 1205 tport->lport = nport->lport; 1206 nport->tport = tport; 1207 1208 return count; 1209 } 1210 1211 1212 static struct fcloop_tport * 1213 __unlink_target_port(struct fcloop_nport *nport) 1214 { 1215 struct fcloop_tport *tport = nport->tport; 1216 1217 if (tport && nport->rport) 1218 nport->rport->targetport = NULL; 1219 nport->tport = NULL; 1220 1221 return tport; 1222 } 1223 1224 static int 1225 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) 1226 { 1227 if (!tport) 1228 return -EALREADY; 1229 1230 return nvmet_fc_unregister_targetport(tport->targetport); 1231 } 1232 1233 static ssize_t 1234 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, 1235 const char *buf, size_t count) 1236 { 1237 struct fcloop_nport *nport = NULL, *tmpport; 1238 struct fcloop_tport *tport = NULL; 1239 u64 nodename, portname; 1240 unsigned long flags; 1241 int ret; 1242 1243 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); 1244 if (ret) 1245 return ret; 1246 1247 spin_lock_irqsave(&fcloop_lock, flags); 1248 1249 list_for_each_entry(tmpport, &fcloop_nports, nport_list) { 1250 if (tmpport->node_name == nodename && 1251 tmpport->port_name == portname && tmpport->tport) { 1252 nport = tmpport; 1253 tport = __unlink_target_port(nport); 1254 break; 1255 } 1256 } 1257 1258 spin_unlock_irqrestore(&fcloop_lock, flags); 1259 1260 if (!nport) 1261 return -ENOENT; 1262 1263 ret = __targetport_unreg(nport, tport); 1264 1265 return ret ? ret : count; 1266 } 1267 1268 1269 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port); 1270 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port); 1271 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port); 1272 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port); 1273 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port); 1274 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port); 1275 1276 static struct attribute *fcloop_dev_attrs[] = { 1277 &dev_attr_add_local_port.attr, 1278 &dev_attr_del_local_port.attr, 1279 &dev_attr_add_remote_port.attr, 1280 &dev_attr_del_remote_port.attr, 1281 &dev_attr_add_target_port.attr, 1282 &dev_attr_del_target_port.attr, 1283 NULL 1284 }; 1285 1286 static struct attribute_group fclopp_dev_attrs_group = { 1287 .attrs = fcloop_dev_attrs, 1288 }; 1289 1290 static const struct attribute_group *fcloop_dev_attr_groups[] = { 1291 &fclopp_dev_attrs_group, 1292 NULL, 1293 }; 1294 1295 static struct class *fcloop_class; 1296 static struct device *fcloop_device; 1297 1298 1299 static int __init fcloop_init(void) 1300 { 1301 int ret; 1302 1303 fcloop_class = class_create(THIS_MODULE, "fcloop"); 1304 if (IS_ERR(fcloop_class)) { 1305 pr_err("couldn't register class fcloop\n"); 1306 ret = PTR_ERR(fcloop_class); 1307 return ret; 1308 } 1309 1310 fcloop_device = device_create_with_groups( 1311 fcloop_class, NULL, MKDEV(0, 0), NULL, 1312 fcloop_dev_attr_groups, "ctl"); 1313 if (IS_ERR(fcloop_device)) { 1314 pr_err("couldn't create ctl device!\n"); 1315 ret = PTR_ERR(fcloop_device); 1316 goto out_destroy_class; 1317 } 1318 1319 get_device(fcloop_device); 1320 1321 return 0; 1322 1323 out_destroy_class: 1324 class_destroy(fcloop_class); 1325 return ret; 1326 } 1327 1328 static void __exit fcloop_exit(void) 1329 { 1330 struct fcloop_lport *lport; 1331 struct fcloop_nport *nport; 1332 struct fcloop_tport *tport; 1333 struct fcloop_rport *rport; 1334 unsigned long flags; 1335 int ret; 1336 1337 spin_lock_irqsave(&fcloop_lock, flags); 1338 1339 for (;;) { 1340 nport = list_first_entry_or_null(&fcloop_nports, 1341 typeof(*nport), nport_list); 1342 if (!nport) 1343 break; 1344 1345 tport = __unlink_target_port(nport); 1346 rport = __unlink_remote_port(nport); 1347 1348 spin_unlock_irqrestore(&fcloop_lock, flags); 1349 1350 ret = __targetport_unreg(nport, tport); 1351 if (ret) 1352 pr_warn("%s: Failed deleting target port\n", __func__); 1353 1354 ret = __remoteport_unreg(nport, rport); 1355 if (ret) 1356 pr_warn("%s: Failed deleting remote port\n", __func__); 1357 1358 spin_lock_irqsave(&fcloop_lock, flags); 1359 } 1360 1361 for (;;) { 1362 lport = list_first_entry_or_null(&fcloop_lports, 1363 typeof(*lport), lport_list); 1364 if (!lport) 1365 break; 1366 1367 __unlink_local_port(lport); 1368 1369 spin_unlock_irqrestore(&fcloop_lock, flags); 1370 1371 ret = __wait_localport_unreg(lport); 1372 if (ret) 1373 pr_warn("%s: Failed deleting local port\n", __func__); 1374 1375 spin_lock_irqsave(&fcloop_lock, flags); 1376 } 1377 1378 spin_unlock_irqrestore(&fcloop_lock, flags); 1379 1380 put_device(fcloop_device); 1381 1382 device_destroy(fcloop_class, MKDEV(0, 0)); 1383 class_destroy(fcloop_class); 1384 } 1385 1386 module_init(fcloop_init); 1387 module_exit(fcloop_exit); 1388 1389 MODULE_LICENSE("GPL v2"); 1390