1 /* 2 * SBP2 target driver (SCSI over IEEE1394 in target mode) 3 * 4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21 #define KMSG_COMPONENT "sbp_target" 22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 23 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/init.h> 27 #include <linux/types.h> 28 #include <linux/string.h> 29 #include <linux/configfs.h> 30 #include <linux/ctype.h> 31 #include <linux/firewire.h> 32 #include <linux/firewire-constants.h> 33 #include <scsi/scsi.h> 34 #include <scsi/scsi_tcq.h> 35 #include <target/target_core_base.h> 36 #include <target/target_core_backend.h> 37 #include <target/target_core_fabric.h> 38 #include <target/target_core_fabric_configfs.h> 39 #include <target/target_core_configfs.h> 40 #include <target/configfs_macros.h> 41 #include <asm/unaligned.h> 42 43 #include "sbp_target.h" 44 45 /* Local pointer to allocated TCM configfs fabric module */ 46 static struct target_fabric_configfs *sbp_fabric_configfs; 47 48 /* FireWire address region for management and command block address handlers */ 49 static const struct fw_address_region sbp_register_region = { 50 .start = CSR_REGISTER_BASE + 0x10000, 51 .end = 0x1000000000000ULL, 52 }; 53 54 static const u32 sbp_unit_directory_template[] = { 55 0x1200609e, /* unit_specifier_id: NCITS/T10 */ 56 0x13010483, /* unit_sw_version: 1155D Rev 4 */ 57 0x3800609e, /* command_set_specifier_id: NCITS/T10 */ 58 0x390104d8, /* command_set: SPC-2 */ 59 0x3b000000, /* command_set_revision: 0 */ 60 0x3c000001, /* firmware_revision: 1 */ 61 }; 62 63 #define SESSION_MAINTENANCE_INTERVAL HZ 64 65 static atomic_t login_id = ATOMIC_INIT(0); 66 67 static void session_maintenance_work(struct work_struct *); 68 static int sbp_run_transaction(struct fw_card *, int, int, int, int, 69 unsigned long long, void *, size_t); 70 71 static int read_peer_guid(u64 *guid, const struct sbp_management_request *req) 72 { 73 int ret; 74 __be32 high, low; 75 76 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, 77 req->node_addr, req->generation, req->speed, 78 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4, 79 &high, sizeof(high)); 80 if (ret != RCODE_COMPLETE) 81 return ret; 82 83 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, 84 req->node_addr, req->generation, req->speed, 85 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4, 86 &low, sizeof(low)); 87 if (ret != RCODE_COMPLETE) 88 return ret; 89 90 *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low); 91 92 return RCODE_COMPLETE; 93 } 94 95 static struct sbp_session *sbp_session_find_by_guid( 96 struct sbp_tpg *tpg, u64 guid) 97 { 98 struct se_session *se_sess; 99 struct sbp_session *sess, *found = NULL; 100 101 spin_lock_bh(&tpg->se_tpg.session_lock); 102 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 103 sess = se_sess->fabric_sess_ptr; 104 if (sess->guid == guid) 105 found = sess; 106 } 107 spin_unlock_bh(&tpg->se_tpg.session_lock); 108 109 return found; 110 } 111 112 static struct sbp_login_descriptor *sbp_login_find_by_lun( 113 struct sbp_session *session, struct se_lun *lun) 114 { 115 struct sbp_login_descriptor *login, *found = NULL; 116 117 spin_lock_bh(&session->lock); 118 list_for_each_entry(login, &session->login_list, link) { 119 if (login->lun == lun) 120 found = login; 121 } 122 spin_unlock_bh(&session->lock); 123 124 return found; 125 } 126 127 static int sbp_login_count_all_by_lun( 128 struct sbp_tpg *tpg, 129 struct se_lun *lun, 130 int exclusive) 131 { 132 struct se_session *se_sess; 133 struct sbp_session *sess; 134 struct sbp_login_descriptor *login; 135 int count = 0; 136 137 spin_lock_bh(&tpg->se_tpg.session_lock); 138 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 139 sess = se_sess->fabric_sess_ptr; 140 141 spin_lock_bh(&sess->lock); 142 list_for_each_entry(login, &sess->login_list, link) { 143 if (login->lun != lun) 144 continue; 145 146 if (!exclusive || login->exclusive) 147 count++; 148 } 149 spin_unlock_bh(&sess->lock); 150 } 151 spin_unlock_bh(&tpg->se_tpg.session_lock); 152 153 return count; 154 } 155 156 static struct sbp_login_descriptor *sbp_login_find_by_id( 157 struct sbp_tpg *tpg, int login_id) 158 { 159 struct se_session *se_sess; 160 struct sbp_session *sess; 161 struct sbp_login_descriptor *login, *found = NULL; 162 163 spin_lock_bh(&tpg->se_tpg.session_lock); 164 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 165 sess = se_sess->fabric_sess_ptr; 166 167 spin_lock_bh(&sess->lock); 168 list_for_each_entry(login, &sess->login_list, link) { 169 if (login->login_id == login_id) 170 found = login; 171 } 172 spin_unlock_bh(&sess->lock); 173 } 174 spin_unlock_bh(&tpg->se_tpg.session_lock); 175 176 return found; 177 } 178 179 static struct se_lun *sbp_get_lun_from_tpg(struct sbp_tpg *tpg, int lun) 180 { 181 struct se_portal_group *se_tpg = &tpg->se_tpg; 182 struct se_lun *se_lun; 183 184 if (lun >= TRANSPORT_MAX_LUNS_PER_TPG) 185 return ERR_PTR(-EINVAL); 186 187 spin_lock(&se_tpg->tpg_lun_lock); 188 se_lun = se_tpg->tpg_lun_list[lun]; 189 190 if (se_lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) 191 se_lun = ERR_PTR(-ENODEV); 192 193 spin_unlock(&se_tpg->tpg_lun_lock); 194 195 return se_lun; 196 } 197 198 static struct sbp_session *sbp_session_create( 199 struct sbp_tpg *tpg, 200 u64 guid) 201 { 202 struct sbp_session *sess; 203 int ret; 204 char guid_str[17]; 205 struct se_node_acl *se_nacl; 206 207 sess = kmalloc(sizeof(*sess), GFP_KERNEL); 208 if (!sess) { 209 pr_err("failed to allocate session descriptor\n"); 210 return ERR_PTR(-ENOMEM); 211 } 212 213 sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); 214 if (IS_ERR(sess->se_sess)) { 215 pr_err("failed to init se_session\n"); 216 217 ret = PTR_ERR(sess->se_sess); 218 kfree(sess); 219 return ERR_PTR(ret); 220 } 221 222 snprintf(guid_str, sizeof(guid_str), "%016llx", guid); 223 224 se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str); 225 if (!se_nacl) { 226 pr_warn("Node ACL not found for %s\n", guid_str); 227 228 transport_free_session(sess->se_sess); 229 kfree(sess); 230 231 return ERR_PTR(-EPERM); 232 } 233 234 sess->se_sess->se_node_acl = se_nacl; 235 236 spin_lock_init(&sess->lock); 237 INIT_LIST_HEAD(&sess->login_list); 238 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work); 239 240 sess->guid = guid; 241 242 transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess); 243 244 return sess; 245 } 246 247 static void sbp_session_release(struct sbp_session *sess, bool cancel_work) 248 { 249 spin_lock_bh(&sess->lock); 250 if (!list_empty(&sess->login_list)) { 251 spin_unlock_bh(&sess->lock); 252 return; 253 } 254 spin_unlock_bh(&sess->lock); 255 256 if (cancel_work) 257 cancel_delayed_work_sync(&sess->maint_work); 258 259 transport_deregister_session_configfs(sess->se_sess); 260 transport_deregister_session(sess->se_sess); 261 262 if (sess->card) 263 fw_card_put(sess->card); 264 265 kfree(sess); 266 } 267 268 static void sbp_target_agent_unregister(struct sbp_target_agent *); 269 270 static void sbp_login_release(struct sbp_login_descriptor *login, 271 bool cancel_work) 272 { 273 struct sbp_session *sess = login->sess; 274 275 /* FIXME: abort/wait on tasks */ 276 277 sbp_target_agent_unregister(login->tgt_agt); 278 279 if (sess) { 280 spin_lock_bh(&sess->lock); 281 list_del(&login->link); 282 spin_unlock_bh(&sess->lock); 283 284 sbp_session_release(sess, cancel_work); 285 } 286 287 kfree(login); 288 } 289 290 static struct sbp_target_agent *sbp_target_agent_register( 291 struct sbp_login_descriptor *); 292 293 static void sbp_management_request_login( 294 struct sbp_management_agent *agent, struct sbp_management_request *req, 295 int *status_data_size) 296 { 297 struct sbp_tport *tport = agent->tport; 298 struct sbp_tpg *tpg = tport->tpg; 299 struct se_lun *se_lun; 300 int ret; 301 u64 guid; 302 struct sbp_session *sess; 303 struct sbp_login_descriptor *login; 304 struct sbp_login_response_block *response; 305 int login_response_len; 306 307 se_lun = sbp_get_lun_from_tpg(tpg, 308 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); 309 if (IS_ERR(se_lun)) { 310 pr_notice("login to unknown LUN: %d\n", 311 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); 312 313 req->status.status = cpu_to_be32( 314 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 315 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP)); 316 return; 317 } 318 319 ret = read_peer_guid(&guid, req); 320 if (ret != RCODE_COMPLETE) { 321 pr_warn("failed to read peer GUID: %d\n", ret); 322 323 req->status.status = cpu_to_be32( 324 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 325 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 326 return; 327 } 328 329 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n", 330 se_lun->unpacked_lun, guid); 331 332 sess = sbp_session_find_by_guid(tpg, guid); 333 if (sess) { 334 login = sbp_login_find_by_lun(sess, se_lun); 335 if (login) { 336 pr_notice("initiator already logged-in\n"); 337 338 /* 339 * SBP-2 R4 says we should return access denied, but 340 * that can confuse initiators. Instead we need to 341 * treat this like a reconnect, but send the login 342 * response block like a fresh login. 343 * 344 * This is required particularly in the case of Apple 345 * devices booting off the FireWire target, where 346 * the firmware has an active login to the target. When 347 * the OS takes control of the session it issues its own 348 * LOGIN rather than a RECONNECT. To avoid the machine 349 * waiting until the reconnect_hold expires, we can skip 350 * the ACCESS_DENIED errors to speed things up. 351 */ 352 353 goto already_logged_in; 354 } 355 } 356 357 /* 358 * check exclusive bit in login request 359 * reject with access_denied if any logins present 360 */ 361 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) && 362 sbp_login_count_all_by_lun(tpg, se_lun, 0)) { 363 pr_warn("refusing exclusive login with other active logins\n"); 364 365 req->status.status = cpu_to_be32( 366 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 367 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 368 return; 369 } 370 371 /* 372 * check exclusive bit in any existing login descriptor 373 * reject with access_denied if any exclusive logins present 374 */ 375 if (sbp_login_count_all_by_lun(tpg, se_lun, 1)) { 376 pr_warn("refusing login while another exclusive login present\n"); 377 378 req->status.status = cpu_to_be32( 379 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 380 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 381 return; 382 } 383 384 /* 385 * check we haven't exceeded the number of allowed logins 386 * reject with resources_unavailable if we have 387 */ 388 if (sbp_login_count_all_by_lun(tpg, se_lun, 0) >= 389 tport->max_logins_per_lun) { 390 pr_warn("max number of logins reached\n"); 391 392 req->status.status = cpu_to_be32( 393 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 394 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 395 return; 396 } 397 398 if (!sess) { 399 sess = sbp_session_create(tpg, guid); 400 if (IS_ERR(sess)) { 401 switch (PTR_ERR(sess)) { 402 case -EPERM: 403 ret = SBP_STATUS_ACCESS_DENIED; 404 break; 405 default: 406 ret = SBP_STATUS_RESOURCES_UNAVAIL; 407 break; 408 } 409 410 req->status.status = cpu_to_be32( 411 STATUS_BLOCK_RESP( 412 STATUS_RESP_REQUEST_COMPLETE) | 413 STATUS_BLOCK_SBP_STATUS(ret)); 414 return; 415 } 416 417 sess->node_id = req->node_addr; 418 sess->card = fw_card_get(req->card); 419 sess->generation = req->generation; 420 sess->speed = req->speed; 421 422 schedule_delayed_work(&sess->maint_work, 423 SESSION_MAINTENANCE_INTERVAL); 424 } 425 426 /* only take the latest reconnect_hold into account */ 427 sess->reconnect_hold = min( 428 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)), 429 tport->max_reconnect_timeout) - 1; 430 431 login = kmalloc(sizeof(*login), GFP_KERNEL); 432 if (!login) { 433 pr_err("failed to allocate login descriptor\n"); 434 435 sbp_session_release(sess, true); 436 437 req->status.status = cpu_to_be32( 438 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 439 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 440 return; 441 } 442 443 login->sess = sess; 444 login->lun = se_lun; 445 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo); 446 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)); 447 login->login_id = atomic_inc_return(&login_id); 448 449 login->tgt_agt = sbp_target_agent_register(login); 450 if (IS_ERR(login->tgt_agt)) { 451 ret = PTR_ERR(login->tgt_agt); 452 pr_err("failed to map command block handler: %d\n", ret); 453 454 sbp_session_release(sess, true); 455 kfree(login); 456 457 req->status.status = cpu_to_be32( 458 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 459 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 460 return; 461 } 462 463 spin_lock_bh(&sess->lock); 464 list_add_tail(&login->link, &sess->login_list); 465 spin_unlock_bh(&sess->lock); 466 467 already_logged_in: 468 response = kzalloc(sizeof(*response), GFP_KERNEL); 469 if (!response) { 470 pr_err("failed to allocate login response block\n"); 471 472 sbp_login_release(login, true); 473 474 req->status.status = cpu_to_be32( 475 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 476 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 477 return; 478 } 479 480 login_response_len = clamp_val( 481 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)), 482 12, sizeof(*response)); 483 response->misc = cpu_to_be32( 484 ((login_response_len & 0xffff) << 16) | 485 (login->login_id & 0xffff)); 486 response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff); 487 addr_to_sbp2_pointer(login->tgt_agt->handler.offset, 488 &response->command_block_agent); 489 490 ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST, 491 sess->node_id, sess->generation, sess->speed, 492 sbp2_pointer_to_addr(&req->orb.ptr2), response, 493 login_response_len); 494 if (ret != RCODE_COMPLETE) { 495 pr_debug("failed to write login response block: %x\n", ret); 496 497 kfree(response); 498 sbp_login_release(login, true); 499 500 req->status.status = cpu_to_be32( 501 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 502 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 503 return; 504 } 505 506 kfree(response); 507 508 req->status.status = cpu_to_be32( 509 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 510 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 511 } 512 513 static void sbp_management_request_query_logins( 514 struct sbp_management_agent *agent, struct sbp_management_request *req, 515 int *status_data_size) 516 { 517 pr_notice("QUERY LOGINS not implemented\n"); 518 /* FIXME: implement */ 519 520 req->status.status = cpu_to_be32( 521 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 522 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 523 } 524 525 static void sbp_management_request_reconnect( 526 struct sbp_management_agent *agent, struct sbp_management_request *req, 527 int *status_data_size) 528 { 529 struct sbp_tport *tport = agent->tport; 530 struct sbp_tpg *tpg = tport->tpg; 531 int ret; 532 u64 guid; 533 struct sbp_login_descriptor *login; 534 535 ret = read_peer_guid(&guid, req); 536 if (ret != RCODE_COMPLETE) { 537 pr_warn("failed to read peer GUID: %d\n", ret); 538 539 req->status.status = cpu_to_be32( 540 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 541 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 542 return; 543 } 544 545 pr_notice("mgt_agent RECONNECT from %016llx\n", guid); 546 547 login = sbp_login_find_by_id(tpg, 548 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc))); 549 550 if (!login) { 551 pr_err("mgt_agent RECONNECT unknown login ID\n"); 552 553 req->status.status = cpu_to_be32( 554 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 555 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 556 return; 557 } 558 559 if (login->sess->guid != guid) { 560 pr_err("mgt_agent RECONNECT login GUID doesn't match\n"); 561 562 req->status.status = cpu_to_be32( 563 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 564 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 565 return; 566 } 567 568 spin_lock_bh(&login->sess->lock); 569 if (login->sess->card) 570 fw_card_put(login->sess->card); 571 572 /* update the node details */ 573 login->sess->generation = req->generation; 574 login->sess->node_id = req->node_addr; 575 login->sess->card = fw_card_get(req->card); 576 login->sess->speed = req->speed; 577 spin_unlock_bh(&login->sess->lock); 578 579 req->status.status = cpu_to_be32( 580 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 581 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 582 } 583 584 static void sbp_management_request_logout( 585 struct sbp_management_agent *agent, struct sbp_management_request *req, 586 int *status_data_size) 587 { 588 struct sbp_tport *tport = agent->tport; 589 struct sbp_tpg *tpg = tport->tpg; 590 int id; 591 struct sbp_login_descriptor *login; 592 593 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); 594 595 login = sbp_login_find_by_id(tpg, id); 596 if (!login) { 597 pr_warn("cannot find login: %d\n", id); 598 599 req->status.status = cpu_to_be32( 600 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 601 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN)); 602 return; 603 } 604 605 pr_info("mgt_agent LOGOUT from LUN %d session %d\n", 606 login->lun->unpacked_lun, login->login_id); 607 608 if (req->node_addr != login->sess->node_id) { 609 pr_warn("logout from different node ID\n"); 610 611 req->status.status = cpu_to_be32( 612 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 613 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 614 return; 615 } 616 617 sbp_login_release(login, true); 618 619 req->status.status = cpu_to_be32( 620 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 621 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 622 } 623 624 static void session_check_for_reset(struct sbp_session *sess) 625 { 626 bool card_valid = false; 627 628 spin_lock_bh(&sess->lock); 629 630 if (sess->card) { 631 spin_lock_irq(&sess->card->lock); 632 card_valid = (sess->card->local_node != NULL); 633 spin_unlock_irq(&sess->card->lock); 634 635 if (!card_valid) { 636 fw_card_put(sess->card); 637 sess->card = NULL; 638 } 639 } 640 641 if (!card_valid || (sess->generation != sess->card->generation)) { 642 pr_info("Waiting for reconnect from node: %016llx\n", 643 sess->guid); 644 645 sess->node_id = -1; 646 sess->reconnect_expires = get_jiffies_64() + 647 ((sess->reconnect_hold + 1) * HZ); 648 } 649 650 spin_unlock_bh(&sess->lock); 651 } 652 653 static void session_reconnect_expired(struct sbp_session *sess) 654 { 655 struct sbp_login_descriptor *login, *temp; 656 LIST_HEAD(login_list); 657 658 pr_info("Reconnect timer expired for node: %016llx\n", sess->guid); 659 660 spin_lock_bh(&sess->lock); 661 list_for_each_entry_safe(login, temp, &sess->login_list, link) { 662 login->sess = NULL; 663 list_move_tail(&login->link, &login_list); 664 } 665 spin_unlock_bh(&sess->lock); 666 667 list_for_each_entry_safe(login, temp, &login_list, link) { 668 list_del(&login->link); 669 sbp_login_release(login, false); 670 } 671 672 sbp_session_release(sess, false); 673 } 674 675 static void session_maintenance_work(struct work_struct *work) 676 { 677 struct sbp_session *sess = container_of(work, struct sbp_session, 678 maint_work.work); 679 680 /* could be called while tearing down the session */ 681 spin_lock_bh(&sess->lock); 682 if (list_empty(&sess->login_list)) { 683 spin_unlock_bh(&sess->lock); 684 return; 685 } 686 spin_unlock_bh(&sess->lock); 687 688 if (sess->node_id != -1) { 689 /* check for bus reset and make node_id invalid */ 690 session_check_for_reset(sess); 691 692 schedule_delayed_work(&sess->maint_work, 693 SESSION_MAINTENANCE_INTERVAL); 694 } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) { 695 /* still waiting for reconnect */ 696 schedule_delayed_work(&sess->maint_work, 697 SESSION_MAINTENANCE_INTERVAL); 698 } else { 699 /* reconnect timeout has expired */ 700 session_reconnect_expired(sess); 701 } 702 } 703 704 static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data, 705 struct sbp_target_agent *agent) 706 { 707 int state; 708 709 switch (tcode) { 710 case TCODE_READ_QUADLET_REQUEST: 711 pr_debug("tgt_agent AGENT_STATE READ\n"); 712 713 spin_lock_bh(&agent->lock); 714 state = agent->state; 715 spin_unlock_bh(&agent->lock); 716 717 *(__be32 *)data = cpu_to_be32(state); 718 719 return RCODE_COMPLETE; 720 721 case TCODE_WRITE_QUADLET_REQUEST: 722 /* ignored */ 723 return RCODE_COMPLETE; 724 725 default: 726 return RCODE_TYPE_ERROR; 727 } 728 } 729 730 static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data, 731 struct sbp_target_agent *agent) 732 { 733 switch (tcode) { 734 case TCODE_WRITE_QUADLET_REQUEST: 735 pr_debug("tgt_agent AGENT_RESET\n"); 736 spin_lock_bh(&agent->lock); 737 agent->state = AGENT_STATE_RESET; 738 spin_unlock_bh(&agent->lock); 739 return RCODE_COMPLETE; 740 741 default: 742 return RCODE_TYPE_ERROR; 743 } 744 } 745 746 static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data, 747 struct sbp_target_agent *agent) 748 { 749 struct sbp2_pointer *ptr = data; 750 751 switch (tcode) { 752 case TCODE_WRITE_BLOCK_REQUEST: 753 spin_lock_bh(&agent->lock); 754 if (agent->state != AGENT_STATE_SUSPENDED && 755 agent->state != AGENT_STATE_RESET) { 756 spin_unlock_bh(&agent->lock); 757 pr_notice("Ignoring ORB_POINTER write while active.\n"); 758 return RCODE_CONFLICT_ERROR; 759 } 760 agent->state = AGENT_STATE_ACTIVE; 761 spin_unlock_bh(&agent->lock); 762 763 agent->orb_pointer = sbp2_pointer_to_addr(ptr); 764 agent->doorbell = false; 765 766 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n", 767 agent->orb_pointer); 768 769 queue_work(system_unbound_wq, &agent->work); 770 771 return RCODE_COMPLETE; 772 773 case TCODE_READ_BLOCK_REQUEST: 774 pr_debug("tgt_agent ORB_POINTER READ\n"); 775 spin_lock_bh(&agent->lock); 776 addr_to_sbp2_pointer(agent->orb_pointer, ptr); 777 spin_unlock_bh(&agent->lock); 778 return RCODE_COMPLETE; 779 780 default: 781 return RCODE_TYPE_ERROR; 782 } 783 } 784 785 static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data, 786 struct sbp_target_agent *agent) 787 { 788 switch (tcode) { 789 case TCODE_WRITE_QUADLET_REQUEST: 790 spin_lock_bh(&agent->lock); 791 if (agent->state != AGENT_STATE_SUSPENDED) { 792 spin_unlock_bh(&agent->lock); 793 pr_debug("Ignoring DOORBELL while active.\n"); 794 return RCODE_CONFLICT_ERROR; 795 } 796 agent->state = AGENT_STATE_ACTIVE; 797 spin_unlock_bh(&agent->lock); 798 799 agent->doorbell = true; 800 801 pr_debug("tgt_agent DOORBELL\n"); 802 803 queue_work(system_unbound_wq, &agent->work); 804 805 return RCODE_COMPLETE; 806 807 case TCODE_READ_QUADLET_REQUEST: 808 return RCODE_COMPLETE; 809 810 default: 811 return RCODE_TYPE_ERROR; 812 } 813 } 814 815 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card, 816 int tcode, void *data, struct sbp_target_agent *agent) 817 { 818 switch (tcode) { 819 case TCODE_WRITE_QUADLET_REQUEST: 820 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n"); 821 /* ignored as we don't send unsolicited status */ 822 return RCODE_COMPLETE; 823 824 case TCODE_READ_QUADLET_REQUEST: 825 return RCODE_COMPLETE; 826 827 default: 828 return RCODE_TYPE_ERROR; 829 } 830 } 831 832 static void tgt_agent_rw(struct fw_card *card, struct fw_request *request, 833 int tcode, int destination, int source, int generation, 834 unsigned long long offset, void *data, size_t length, 835 void *callback_data) 836 { 837 struct sbp_target_agent *agent = callback_data; 838 struct sbp_session *sess = agent->login->sess; 839 int sess_gen, sess_node, rcode; 840 841 spin_lock_bh(&sess->lock); 842 sess_gen = sess->generation; 843 sess_node = sess->node_id; 844 spin_unlock_bh(&sess->lock); 845 846 if (generation != sess_gen) { 847 pr_notice("ignoring request with wrong generation\n"); 848 rcode = RCODE_TYPE_ERROR; 849 goto out; 850 } 851 852 if (source != sess_node) { 853 pr_notice("ignoring request from foreign node (%x != %x)\n", 854 source, sess_node); 855 rcode = RCODE_TYPE_ERROR; 856 goto out; 857 } 858 859 /* turn offset into the offset from the start of the block */ 860 offset -= agent->handler.offset; 861 862 if (offset == 0x00 && length == 4) { 863 /* AGENT_STATE */ 864 rcode = tgt_agent_rw_agent_state(card, tcode, data, agent); 865 } else if (offset == 0x04 && length == 4) { 866 /* AGENT_RESET */ 867 rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent); 868 } else if (offset == 0x08 && length == 8) { 869 /* ORB_POINTER */ 870 rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent); 871 } else if (offset == 0x10 && length == 4) { 872 /* DOORBELL */ 873 rcode = tgt_agent_rw_doorbell(card, tcode, data, agent); 874 } else if (offset == 0x14 && length == 4) { 875 /* UNSOLICITED_STATUS_ENABLE */ 876 rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode, 877 data, agent); 878 } else { 879 rcode = RCODE_ADDRESS_ERROR; 880 } 881 882 out: 883 fw_send_response(card, request, rcode); 884 } 885 886 static void sbp_handle_command(struct sbp_target_request *); 887 static int sbp_send_status(struct sbp_target_request *); 888 static void sbp_free_request(struct sbp_target_request *); 889 890 static void tgt_agent_process_work(struct work_struct *work) 891 { 892 struct sbp_target_request *req = 893 container_of(work, struct sbp_target_request, work); 894 895 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n", 896 req->orb_pointer, 897 sbp2_pointer_to_addr(&req->orb.next_orb), 898 sbp2_pointer_to_addr(&req->orb.data_descriptor), 899 be32_to_cpu(req->orb.misc)); 900 901 if (req->orb_pointer >> 32) 902 pr_debug("ORB with high bits set\n"); 903 904 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) { 905 case 0:/* Format specified by this standard */ 906 sbp_handle_command(req); 907 return; 908 case 1: /* Reserved for future standardization */ 909 case 2: /* Vendor-dependent */ 910 req->status.status |= cpu_to_be32( 911 STATUS_BLOCK_RESP( 912 STATUS_RESP_REQUEST_COMPLETE) | 913 STATUS_BLOCK_DEAD(0) | 914 STATUS_BLOCK_LEN(1) | 915 STATUS_BLOCK_SBP_STATUS( 916 SBP_STATUS_REQ_TYPE_NOTSUPP)); 917 sbp_send_status(req); 918 sbp_free_request(req); 919 return; 920 case 3: /* Dummy ORB */ 921 req->status.status |= cpu_to_be32( 922 STATUS_BLOCK_RESP( 923 STATUS_RESP_REQUEST_COMPLETE) | 924 STATUS_BLOCK_DEAD(0) | 925 STATUS_BLOCK_LEN(1) | 926 STATUS_BLOCK_SBP_STATUS( 927 SBP_STATUS_DUMMY_ORB_COMPLETE)); 928 sbp_send_status(req); 929 sbp_free_request(req); 930 return; 931 default: 932 BUG(); 933 } 934 } 935 936 /* used to double-check we haven't been issued an AGENT_RESET */ 937 static inline bool tgt_agent_check_active(struct sbp_target_agent *agent) 938 { 939 bool active; 940 941 spin_lock_bh(&agent->lock); 942 active = (agent->state == AGENT_STATE_ACTIVE); 943 spin_unlock_bh(&agent->lock); 944 945 return active; 946 } 947 948 static void tgt_agent_fetch_work(struct work_struct *work) 949 { 950 struct sbp_target_agent *agent = 951 container_of(work, struct sbp_target_agent, work); 952 struct sbp_session *sess = agent->login->sess; 953 struct sbp_target_request *req; 954 int ret; 955 bool doorbell = agent->doorbell; 956 u64 next_orb = agent->orb_pointer; 957 958 while (next_orb && tgt_agent_check_active(agent)) { 959 req = kzalloc(sizeof(*req), GFP_KERNEL); 960 if (!req) { 961 spin_lock_bh(&agent->lock); 962 agent->state = AGENT_STATE_DEAD; 963 spin_unlock_bh(&agent->lock); 964 return; 965 } 966 967 req->login = agent->login; 968 req->orb_pointer = next_orb; 969 970 req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH( 971 req->orb_pointer >> 32)); 972 req->status.orb_low = cpu_to_be32( 973 req->orb_pointer & 0xfffffffc); 974 975 /* read in the ORB */ 976 ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST, 977 sess->node_id, sess->generation, sess->speed, 978 req->orb_pointer, &req->orb, sizeof(req->orb)); 979 if (ret != RCODE_COMPLETE) { 980 pr_debug("tgt_orb fetch failed: %x\n", ret); 981 req->status.status |= cpu_to_be32( 982 STATUS_BLOCK_SRC( 983 STATUS_SRC_ORB_FINISHED) | 984 STATUS_BLOCK_RESP( 985 STATUS_RESP_TRANSPORT_FAILURE) | 986 STATUS_BLOCK_DEAD(1) | 987 STATUS_BLOCK_LEN(1) | 988 STATUS_BLOCK_SBP_STATUS( 989 SBP_STATUS_UNSPECIFIED_ERROR)); 990 spin_lock_bh(&agent->lock); 991 agent->state = AGENT_STATE_DEAD; 992 spin_unlock_bh(&agent->lock); 993 994 sbp_send_status(req); 995 sbp_free_request(req); 996 return; 997 } 998 999 /* check the next_ORB field */ 1000 if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) { 1001 next_orb = 0; 1002 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( 1003 STATUS_SRC_ORB_FINISHED)); 1004 } else { 1005 next_orb = sbp2_pointer_to_addr(&req->orb.next_orb); 1006 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( 1007 STATUS_SRC_ORB_CONTINUING)); 1008 } 1009 1010 if (tgt_agent_check_active(agent) && !doorbell) { 1011 INIT_WORK(&req->work, tgt_agent_process_work); 1012 queue_work(system_unbound_wq, &req->work); 1013 } else { 1014 /* don't process this request, just check next_ORB */ 1015 sbp_free_request(req); 1016 } 1017 1018 spin_lock_bh(&agent->lock); 1019 doorbell = agent->doorbell = false; 1020 1021 /* check if we should carry on processing */ 1022 if (next_orb) 1023 agent->orb_pointer = next_orb; 1024 else 1025 agent->state = AGENT_STATE_SUSPENDED; 1026 1027 spin_unlock_bh(&agent->lock); 1028 }; 1029 } 1030 1031 static struct sbp_target_agent *sbp_target_agent_register( 1032 struct sbp_login_descriptor *login) 1033 { 1034 struct sbp_target_agent *agent; 1035 int ret; 1036 1037 agent = kmalloc(sizeof(*agent), GFP_KERNEL); 1038 if (!agent) 1039 return ERR_PTR(-ENOMEM); 1040 1041 spin_lock_init(&agent->lock); 1042 1043 agent->handler.length = 0x20; 1044 agent->handler.address_callback = tgt_agent_rw; 1045 agent->handler.callback_data = agent; 1046 1047 agent->login = login; 1048 agent->state = AGENT_STATE_RESET; 1049 INIT_WORK(&agent->work, tgt_agent_fetch_work); 1050 agent->orb_pointer = 0; 1051 agent->doorbell = false; 1052 1053 ret = fw_core_add_address_handler(&agent->handler, 1054 &sbp_register_region); 1055 if (ret < 0) { 1056 kfree(agent); 1057 return ERR_PTR(ret); 1058 } 1059 1060 return agent; 1061 } 1062 1063 static void sbp_target_agent_unregister(struct sbp_target_agent *agent) 1064 { 1065 fw_core_remove_address_handler(&agent->handler); 1066 cancel_work_sync(&agent->work); 1067 kfree(agent); 1068 } 1069 1070 /* 1071 * Simple wrapper around fw_run_transaction that retries the transaction several 1072 * times in case of failure, with an exponential backoff. 1073 */ 1074 static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id, 1075 int generation, int speed, unsigned long long offset, 1076 void *payload, size_t length) 1077 { 1078 int attempt, ret, delay; 1079 1080 for (attempt = 1; attempt <= 5; attempt++) { 1081 ret = fw_run_transaction(card, tcode, destination_id, 1082 generation, speed, offset, payload, length); 1083 1084 switch (ret) { 1085 case RCODE_COMPLETE: 1086 case RCODE_TYPE_ERROR: 1087 case RCODE_ADDRESS_ERROR: 1088 case RCODE_GENERATION: 1089 return ret; 1090 1091 default: 1092 delay = 5 * attempt * attempt; 1093 usleep_range(delay, delay * 2); 1094 } 1095 } 1096 1097 return ret; 1098 } 1099 1100 /* 1101 * Wrapper around sbp_run_transaction that gets the card, destination, 1102 * generation and speed out of the request's session. 1103 */ 1104 static int sbp_run_request_transaction(struct sbp_target_request *req, 1105 int tcode, unsigned long long offset, void *payload, 1106 size_t length) 1107 { 1108 struct sbp_login_descriptor *login = req->login; 1109 struct sbp_session *sess = login->sess; 1110 struct fw_card *card; 1111 int node_id, generation, speed, ret; 1112 1113 spin_lock_bh(&sess->lock); 1114 card = fw_card_get(sess->card); 1115 node_id = sess->node_id; 1116 generation = sess->generation; 1117 speed = sess->speed; 1118 spin_unlock_bh(&sess->lock); 1119 1120 ret = sbp_run_transaction(card, tcode, node_id, generation, speed, 1121 offset, payload, length); 1122 1123 fw_card_put(card); 1124 1125 return ret; 1126 } 1127 1128 static int sbp_fetch_command(struct sbp_target_request *req) 1129 { 1130 int ret, cmd_len, copy_len; 1131 1132 cmd_len = scsi_command_size(req->orb.command_block); 1133 1134 req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL); 1135 if (!req->cmd_buf) 1136 return -ENOMEM; 1137 1138 memcpy(req->cmd_buf, req->orb.command_block, 1139 min_t(int, cmd_len, sizeof(req->orb.command_block))); 1140 1141 if (cmd_len > sizeof(req->orb.command_block)) { 1142 pr_debug("sbp_fetch_command: filling in long command\n"); 1143 copy_len = cmd_len - sizeof(req->orb.command_block); 1144 1145 ret = sbp_run_request_transaction(req, 1146 TCODE_READ_BLOCK_REQUEST, 1147 req->orb_pointer + sizeof(req->orb), 1148 req->cmd_buf + sizeof(req->orb.command_block), 1149 copy_len); 1150 if (ret != RCODE_COMPLETE) 1151 return -EIO; 1152 } 1153 1154 return 0; 1155 } 1156 1157 static int sbp_fetch_page_table(struct sbp_target_request *req) 1158 { 1159 int pg_tbl_sz, ret; 1160 struct sbp_page_table_entry *pg_tbl; 1161 1162 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc))) 1163 return 0; 1164 1165 pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) * 1166 sizeof(struct sbp_page_table_entry); 1167 1168 pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL); 1169 if (!pg_tbl) 1170 return -ENOMEM; 1171 1172 ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST, 1173 sbp2_pointer_to_addr(&req->orb.data_descriptor), 1174 pg_tbl, pg_tbl_sz); 1175 if (ret != RCODE_COMPLETE) { 1176 kfree(pg_tbl); 1177 return -EIO; 1178 } 1179 1180 req->pg_tbl = pg_tbl; 1181 return 0; 1182 } 1183 1184 static void sbp_calc_data_length_direction(struct sbp_target_request *req, 1185 u32 *data_len, enum dma_data_direction *data_dir) 1186 { 1187 int data_size, direction, idx; 1188 1189 data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); 1190 direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc)); 1191 1192 if (!data_size) { 1193 *data_len = 0; 1194 *data_dir = DMA_NONE; 1195 return; 1196 } 1197 1198 *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1199 1200 if (req->pg_tbl) { 1201 *data_len = 0; 1202 for (idx = 0; idx < data_size; idx++) { 1203 *data_len += be16_to_cpu( 1204 req->pg_tbl[idx].segment_length); 1205 } 1206 } else { 1207 *data_len = data_size; 1208 } 1209 } 1210 1211 static void sbp_handle_command(struct sbp_target_request *req) 1212 { 1213 struct sbp_login_descriptor *login = req->login; 1214 struct sbp_session *sess = login->sess; 1215 int ret, unpacked_lun; 1216 u32 data_length; 1217 enum dma_data_direction data_dir; 1218 1219 ret = sbp_fetch_command(req); 1220 if (ret) { 1221 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret); 1222 goto err; 1223 } 1224 1225 ret = sbp_fetch_page_table(req); 1226 if (ret) { 1227 pr_debug("sbp_handle_command: fetch page table failed: %d\n", 1228 ret); 1229 goto err; 1230 } 1231 1232 unpacked_lun = req->login->lun->unpacked_lun; 1233 sbp_calc_data_length_direction(req, &data_length, &data_dir); 1234 1235 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n", 1236 req->orb_pointer, unpacked_lun, data_length, data_dir); 1237 1238 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, 1239 req->sense_buf, unpacked_lun, data_length, 1240 TCM_SIMPLE_TAG, data_dir, 0)) 1241 goto err; 1242 1243 return; 1244 1245 err: 1246 req->status.status |= cpu_to_be32( 1247 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 1248 STATUS_BLOCK_DEAD(0) | 1249 STATUS_BLOCK_LEN(1) | 1250 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 1251 sbp_send_status(req); 1252 sbp_free_request(req); 1253 } 1254 1255 /* 1256 * DMA_TO_DEVICE = read from initiator (SCSI WRITE) 1257 * DMA_FROM_DEVICE = write to initiator (SCSI READ) 1258 */ 1259 static int sbp_rw_data(struct sbp_target_request *req) 1260 { 1261 struct sbp_session *sess = req->login->sess; 1262 int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id, 1263 generation, num_pte, length, tfr_length, 1264 rcode = RCODE_COMPLETE; 1265 struct sbp_page_table_entry *pte; 1266 unsigned long long offset; 1267 struct fw_card *card; 1268 struct sg_mapping_iter iter; 1269 1270 if (req->se_cmd.data_direction == DMA_FROM_DEVICE) { 1271 tcode = TCODE_WRITE_BLOCK_REQUEST; 1272 sg_miter_flags = SG_MITER_FROM_SG; 1273 } else { 1274 tcode = TCODE_READ_BLOCK_REQUEST; 1275 sg_miter_flags = SG_MITER_TO_SG; 1276 } 1277 1278 max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc)); 1279 speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc)); 1280 1281 pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc)); 1282 if (pg_size) { 1283 pr_err("sbp_run_transaction: page size ignored\n"); 1284 pg_size = 0x100 << pg_size; 1285 } 1286 1287 spin_lock_bh(&sess->lock); 1288 card = fw_card_get(sess->card); 1289 node_id = sess->node_id; 1290 generation = sess->generation; 1291 spin_unlock_bh(&sess->lock); 1292 1293 if (req->pg_tbl) { 1294 pte = req->pg_tbl; 1295 num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); 1296 1297 offset = 0; 1298 length = 0; 1299 } else { 1300 pte = NULL; 1301 num_pte = 0; 1302 1303 offset = sbp2_pointer_to_addr(&req->orb.data_descriptor); 1304 length = req->se_cmd.data_length; 1305 } 1306 1307 sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents, 1308 sg_miter_flags); 1309 1310 while (length || num_pte) { 1311 if (!length) { 1312 offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 | 1313 be32_to_cpu(pte->segment_base_lo); 1314 length = be16_to_cpu(pte->segment_length); 1315 1316 pte++; 1317 num_pte--; 1318 } 1319 1320 sg_miter_next(&iter); 1321 1322 tfr_length = min3(length, max_payload, (int)iter.length); 1323 1324 /* FIXME: take page_size into account */ 1325 1326 rcode = sbp_run_transaction(card, tcode, node_id, 1327 generation, speed, 1328 offset, iter.addr, tfr_length); 1329 1330 if (rcode != RCODE_COMPLETE) 1331 break; 1332 1333 length -= tfr_length; 1334 offset += tfr_length; 1335 iter.consumed = tfr_length; 1336 } 1337 1338 sg_miter_stop(&iter); 1339 fw_card_put(card); 1340 1341 if (rcode == RCODE_COMPLETE) { 1342 WARN_ON(length != 0); 1343 return 0; 1344 } else { 1345 return -EIO; 1346 } 1347 } 1348 1349 static int sbp_send_status(struct sbp_target_request *req) 1350 { 1351 int ret, length; 1352 struct sbp_login_descriptor *login = req->login; 1353 1354 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4; 1355 1356 ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST, 1357 login->status_fifo_addr, &req->status, length); 1358 if (ret != RCODE_COMPLETE) { 1359 pr_debug("sbp_send_status: write failed: 0x%x\n", ret); 1360 return -EIO; 1361 } 1362 1363 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n", 1364 req->orb_pointer); 1365 1366 return 0; 1367 } 1368 1369 static void sbp_sense_mangle(struct sbp_target_request *req) 1370 { 1371 struct se_cmd *se_cmd = &req->se_cmd; 1372 u8 *sense = req->sense_buf; 1373 u8 *status = req->status.data; 1374 1375 WARN_ON(se_cmd->scsi_sense_length < 18); 1376 1377 switch (sense[0] & 0x7f) { /* sfmt */ 1378 case 0x70: /* current, fixed */ 1379 status[0] = 0 << 6; 1380 break; 1381 case 0x71: /* deferred, fixed */ 1382 status[0] = 1 << 6; 1383 break; 1384 case 0x72: /* current, descriptor */ 1385 case 0x73: /* deferred, descriptor */ 1386 default: 1387 /* 1388 * TODO: SBP-3 specifies what we should do with descriptor 1389 * format sense data 1390 */ 1391 pr_err("sbp_send_sense: unknown sense format: 0x%x\n", 1392 sense[0]); 1393 req->status.status |= cpu_to_be32( 1394 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1395 STATUS_BLOCK_DEAD(0) | 1396 STATUS_BLOCK_LEN(1) | 1397 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED)); 1398 return; 1399 } 1400 1401 status[0] |= se_cmd->scsi_status & 0x3f;/* status */ 1402 status[1] = 1403 (sense[0] & 0x80) | /* valid */ 1404 ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */ 1405 (sense[2] & 0x0f); /* sense_key */ 1406 status[2] = se_cmd->scsi_asc; /* sense_code */ 1407 status[3] = se_cmd->scsi_ascq; /* sense_qualifier */ 1408 1409 /* information */ 1410 status[4] = sense[3]; 1411 status[5] = sense[4]; 1412 status[6] = sense[5]; 1413 status[7] = sense[6]; 1414 1415 /* CDB-dependent */ 1416 status[8] = sense[8]; 1417 status[9] = sense[9]; 1418 status[10] = sense[10]; 1419 status[11] = sense[11]; 1420 1421 /* fru */ 1422 status[12] = sense[14]; 1423 1424 /* sense_key-dependent */ 1425 status[13] = sense[15]; 1426 status[14] = sense[16]; 1427 status[15] = sense[17]; 1428 1429 req->status.status |= cpu_to_be32( 1430 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1431 STATUS_BLOCK_DEAD(0) | 1432 STATUS_BLOCK_LEN(5) | 1433 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 1434 } 1435 1436 static int sbp_send_sense(struct sbp_target_request *req) 1437 { 1438 struct se_cmd *se_cmd = &req->se_cmd; 1439 1440 if (se_cmd->scsi_sense_length) { 1441 sbp_sense_mangle(req); 1442 } else { 1443 req->status.status |= cpu_to_be32( 1444 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1445 STATUS_BLOCK_DEAD(0) | 1446 STATUS_BLOCK_LEN(1) | 1447 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 1448 } 1449 1450 return sbp_send_status(req); 1451 } 1452 1453 static void sbp_free_request(struct sbp_target_request *req) 1454 { 1455 kfree(req->pg_tbl); 1456 kfree(req->cmd_buf); 1457 kfree(req); 1458 } 1459 1460 static void sbp_mgt_agent_process(struct work_struct *work) 1461 { 1462 struct sbp_management_agent *agent = 1463 container_of(work, struct sbp_management_agent, work); 1464 struct sbp_management_request *req = agent->request; 1465 int ret; 1466 int status_data_len = 0; 1467 1468 /* fetch the ORB from the initiator */ 1469 ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST, 1470 req->node_addr, req->generation, req->speed, 1471 agent->orb_offset, &req->orb, sizeof(req->orb)); 1472 if (ret != RCODE_COMPLETE) { 1473 pr_debug("mgt_orb fetch failed: %x\n", ret); 1474 goto out; 1475 } 1476 1477 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n", 1478 sbp2_pointer_to_addr(&req->orb.ptr1), 1479 sbp2_pointer_to_addr(&req->orb.ptr2), 1480 be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length), 1481 sbp2_pointer_to_addr(&req->orb.status_fifo)); 1482 1483 if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) || 1484 ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) { 1485 pr_err("mgt_orb bad request\n"); 1486 goto out; 1487 } 1488 1489 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) { 1490 case MANAGEMENT_ORB_FUNCTION_LOGIN: 1491 sbp_management_request_login(agent, req, &status_data_len); 1492 break; 1493 1494 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS: 1495 sbp_management_request_query_logins(agent, req, 1496 &status_data_len); 1497 break; 1498 1499 case MANAGEMENT_ORB_FUNCTION_RECONNECT: 1500 sbp_management_request_reconnect(agent, req, &status_data_len); 1501 break; 1502 1503 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD: 1504 pr_notice("SET PASSWORD not implemented\n"); 1505 1506 req->status.status = cpu_to_be32( 1507 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1508 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1509 1510 break; 1511 1512 case MANAGEMENT_ORB_FUNCTION_LOGOUT: 1513 sbp_management_request_logout(agent, req, &status_data_len); 1514 break; 1515 1516 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK: 1517 pr_notice("ABORT TASK not implemented\n"); 1518 1519 req->status.status = cpu_to_be32( 1520 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1521 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1522 1523 break; 1524 1525 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET: 1526 pr_notice("ABORT TASK SET not implemented\n"); 1527 1528 req->status.status = cpu_to_be32( 1529 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1530 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1531 1532 break; 1533 1534 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET: 1535 pr_notice("LOGICAL UNIT RESET not implemented\n"); 1536 1537 req->status.status = cpu_to_be32( 1538 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1539 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1540 1541 break; 1542 1543 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET: 1544 pr_notice("TARGET RESET not implemented\n"); 1545 1546 req->status.status = cpu_to_be32( 1547 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1548 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1549 1550 break; 1551 1552 default: 1553 pr_notice("unknown management function 0x%x\n", 1554 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))); 1555 1556 req->status.status = cpu_to_be32( 1557 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1558 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1559 1560 break; 1561 } 1562 1563 req->status.status |= cpu_to_be32( 1564 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */ 1565 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) | 1566 STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32)); 1567 req->status.orb_low = cpu_to_be32(agent->orb_offset); 1568 1569 /* write the status block back to the initiator */ 1570 ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST, 1571 req->node_addr, req->generation, req->speed, 1572 sbp2_pointer_to_addr(&req->orb.status_fifo), 1573 &req->status, 8 + status_data_len); 1574 if (ret != RCODE_COMPLETE) { 1575 pr_debug("mgt_orb status write failed: %x\n", ret); 1576 goto out; 1577 } 1578 1579 out: 1580 fw_card_put(req->card); 1581 kfree(req); 1582 1583 spin_lock_bh(&agent->lock); 1584 agent->state = MANAGEMENT_AGENT_STATE_IDLE; 1585 spin_unlock_bh(&agent->lock); 1586 } 1587 1588 static void sbp_mgt_agent_rw(struct fw_card *card, 1589 struct fw_request *request, int tcode, int destination, int source, 1590 int generation, unsigned long long offset, void *data, size_t length, 1591 void *callback_data) 1592 { 1593 struct sbp_management_agent *agent = callback_data; 1594 struct sbp2_pointer *ptr = data; 1595 int rcode = RCODE_ADDRESS_ERROR; 1596 1597 if (!agent->tport->enable) 1598 goto out; 1599 1600 if ((offset != agent->handler.offset) || (length != 8)) 1601 goto out; 1602 1603 if (tcode == TCODE_WRITE_BLOCK_REQUEST) { 1604 struct sbp_management_request *req; 1605 int prev_state; 1606 1607 spin_lock_bh(&agent->lock); 1608 prev_state = agent->state; 1609 agent->state = MANAGEMENT_AGENT_STATE_BUSY; 1610 spin_unlock_bh(&agent->lock); 1611 1612 if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) { 1613 pr_notice("ignoring management request while busy\n"); 1614 rcode = RCODE_CONFLICT_ERROR; 1615 goto out; 1616 } 1617 1618 req = kzalloc(sizeof(*req), GFP_ATOMIC); 1619 if (!req) { 1620 rcode = RCODE_CONFLICT_ERROR; 1621 goto out; 1622 } 1623 1624 req->card = fw_card_get(card); 1625 req->generation = generation; 1626 req->node_addr = source; 1627 req->speed = fw_get_request_speed(request); 1628 1629 agent->orb_offset = sbp2_pointer_to_addr(ptr); 1630 agent->request = req; 1631 1632 queue_work(system_unbound_wq, &agent->work); 1633 rcode = RCODE_COMPLETE; 1634 } else if (tcode == TCODE_READ_BLOCK_REQUEST) { 1635 addr_to_sbp2_pointer(agent->orb_offset, ptr); 1636 rcode = RCODE_COMPLETE; 1637 } else { 1638 rcode = RCODE_TYPE_ERROR; 1639 } 1640 1641 out: 1642 fw_send_response(card, request, rcode); 1643 } 1644 1645 static struct sbp_management_agent *sbp_management_agent_register( 1646 struct sbp_tport *tport) 1647 { 1648 int ret; 1649 struct sbp_management_agent *agent; 1650 1651 agent = kmalloc(sizeof(*agent), GFP_KERNEL); 1652 if (!agent) 1653 return ERR_PTR(-ENOMEM); 1654 1655 spin_lock_init(&agent->lock); 1656 agent->tport = tport; 1657 agent->handler.length = 0x08; 1658 agent->handler.address_callback = sbp_mgt_agent_rw; 1659 agent->handler.callback_data = agent; 1660 agent->state = MANAGEMENT_AGENT_STATE_IDLE; 1661 INIT_WORK(&agent->work, sbp_mgt_agent_process); 1662 agent->orb_offset = 0; 1663 agent->request = NULL; 1664 1665 ret = fw_core_add_address_handler(&agent->handler, 1666 &sbp_register_region); 1667 if (ret < 0) { 1668 kfree(agent); 1669 return ERR_PTR(ret); 1670 } 1671 1672 return agent; 1673 } 1674 1675 static void sbp_management_agent_unregister(struct sbp_management_agent *agent) 1676 { 1677 fw_core_remove_address_handler(&agent->handler); 1678 cancel_work_sync(&agent->work); 1679 kfree(agent); 1680 } 1681 1682 static int sbp_check_true(struct se_portal_group *se_tpg) 1683 { 1684 return 1; 1685 } 1686 1687 static int sbp_check_false(struct se_portal_group *se_tpg) 1688 { 1689 return 0; 1690 } 1691 1692 static char *sbp_get_fabric_name(void) 1693 { 1694 return "sbp"; 1695 } 1696 1697 static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg) 1698 { 1699 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1700 struct sbp_tport *tport = tpg->tport; 1701 1702 return &tport->tport_name[0]; 1703 } 1704 1705 static u16 sbp_get_tag(struct se_portal_group *se_tpg) 1706 { 1707 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1708 return tpg->tport_tpgt; 1709 } 1710 1711 static u32 sbp_get_default_depth(struct se_portal_group *se_tpg) 1712 { 1713 return 1; 1714 } 1715 1716 static struct se_node_acl *sbp_alloc_fabric_acl(struct se_portal_group *se_tpg) 1717 { 1718 struct sbp_nacl *nacl; 1719 1720 nacl = kzalloc(sizeof(struct sbp_nacl), GFP_KERNEL); 1721 if (!nacl) { 1722 pr_err("Unable to allocate struct sbp_nacl\n"); 1723 return NULL; 1724 } 1725 1726 return &nacl->se_node_acl; 1727 } 1728 1729 static void sbp_release_fabric_acl( 1730 struct se_portal_group *se_tpg, 1731 struct se_node_acl *se_nacl) 1732 { 1733 struct sbp_nacl *nacl = 1734 container_of(se_nacl, struct sbp_nacl, se_node_acl); 1735 kfree(nacl); 1736 } 1737 1738 static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg) 1739 { 1740 return 1; 1741 } 1742 1743 static void sbp_release_cmd(struct se_cmd *se_cmd) 1744 { 1745 struct sbp_target_request *req = container_of(se_cmd, 1746 struct sbp_target_request, se_cmd); 1747 1748 sbp_free_request(req); 1749 } 1750 1751 static int sbp_shutdown_session(struct se_session *se_sess) 1752 { 1753 return 0; 1754 } 1755 1756 static void sbp_close_session(struct se_session *se_sess) 1757 { 1758 return; 1759 } 1760 1761 static u32 sbp_sess_get_index(struct se_session *se_sess) 1762 { 1763 return 0; 1764 } 1765 1766 static int sbp_write_pending(struct se_cmd *se_cmd) 1767 { 1768 struct sbp_target_request *req = container_of(se_cmd, 1769 struct sbp_target_request, se_cmd); 1770 int ret; 1771 1772 ret = sbp_rw_data(req); 1773 if (ret) { 1774 req->status.status |= cpu_to_be32( 1775 STATUS_BLOCK_RESP( 1776 STATUS_RESP_TRANSPORT_FAILURE) | 1777 STATUS_BLOCK_DEAD(0) | 1778 STATUS_BLOCK_LEN(1) | 1779 STATUS_BLOCK_SBP_STATUS( 1780 SBP_STATUS_UNSPECIFIED_ERROR)); 1781 sbp_send_status(req); 1782 return ret; 1783 } 1784 1785 target_execute_cmd(se_cmd); 1786 return 0; 1787 } 1788 1789 static int sbp_write_pending_status(struct se_cmd *se_cmd) 1790 { 1791 return 0; 1792 } 1793 1794 static void sbp_set_default_node_attrs(struct se_node_acl *nacl) 1795 { 1796 return; 1797 } 1798 1799 static u32 sbp_get_task_tag(struct se_cmd *se_cmd) 1800 { 1801 struct sbp_target_request *req = container_of(se_cmd, 1802 struct sbp_target_request, se_cmd); 1803 1804 /* only used for printk until we do TMRs */ 1805 return (u32)req->orb_pointer; 1806 } 1807 1808 static int sbp_get_cmd_state(struct se_cmd *se_cmd) 1809 { 1810 return 0; 1811 } 1812 1813 static int sbp_queue_data_in(struct se_cmd *se_cmd) 1814 { 1815 struct sbp_target_request *req = container_of(se_cmd, 1816 struct sbp_target_request, se_cmd); 1817 int ret; 1818 1819 ret = sbp_rw_data(req); 1820 if (ret) { 1821 req->status.status |= cpu_to_be32( 1822 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 1823 STATUS_BLOCK_DEAD(0) | 1824 STATUS_BLOCK_LEN(1) | 1825 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 1826 sbp_send_status(req); 1827 return ret; 1828 } 1829 1830 return sbp_send_sense(req); 1831 } 1832 1833 /* 1834 * Called after command (no data transfer) or after the write (to device) 1835 * operation is completed 1836 */ 1837 static int sbp_queue_status(struct se_cmd *se_cmd) 1838 { 1839 struct sbp_target_request *req = container_of(se_cmd, 1840 struct sbp_target_request, se_cmd); 1841 1842 return sbp_send_sense(req); 1843 } 1844 1845 static void sbp_queue_tm_rsp(struct se_cmd *se_cmd) 1846 { 1847 } 1848 1849 static void sbp_aborted_task(struct se_cmd *se_cmd) 1850 { 1851 return; 1852 } 1853 1854 static int sbp_check_stop_free(struct se_cmd *se_cmd) 1855 { 1856 struct sbp_target_request *req = container_of(se_cmd, 1857 struct sbp_target_request, se_cmd); 1858 1859 transport_generic_free_cmd(&req->se_cmd, 0); 1860 return 1; 1861 } 1862 1863 /* 1864 * Handlers for Serial Bus Protocol 2/3 (SBP-2 / SBP-3) 1865 */ 1866 static u8 sbp_get_fabric_proto_ident(struct se_portal_group *se_tpg) 1867 { 1868 /* 1869 * Return a IEEE 1394 SCSI Protocol identifier for loopback operations 1870 * This is defined in section 7.5.1 Table 362 in spc4r17 1871 */ 1872 return SCSI_PROTOCOL_SBP; 1873 } 1874 1875 static u32 sbp_get_pr_transport_id( 1876 struct se_portal_group *se_tpg, 1877 struct se_node_acl *se_nacl, 1878 struct t10_pr_registration *pr_reg, 1879 int *format_code, 1880 unsigned char *buf) 1881 { 1882 int ret; 1883 1884 /* 1885 * Set PROTOCOL IDENTIFIER to 3h for SBP 1886 */ 1887 buf[0] = SCSI_PROTOCOL_SBP; 1888 /* 1889 * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI 1890 * over IEEE 1394 1891 */ 1892 ret = hex2bin(&buf[8], se_nacl->initiatorname, 8); 1893 if (ret < 0) 1894 pr_debug("sbp transport_id: invalid hex string\n"); 1895 1896 /* 1897 * The IEEE 1394 Transport ID is a hardcoded 24-byte length 1898 */ 1899 return 24; 1900 } 1901 1902 static u32 sbp_get_pr_transport_id_len( 1903 struct se_portal_group *se_tpg, 1904 struct se_node_acl *se_nacl, 1905 struct t10_pr_registration *pr_reg, 1906 int *format_code) 1907 { 1908 *format_code = 0; 1909 /* 1910 * From spc4r17, 7.5.4.4 TransportID for initiator ports using SCSI 1911 * over IEEE 1394 1912 * 1913 * The SBP Transport ID is a hardcoded 24-byte length 1914 */ 1915 return 24; 1916 } 1917 1918 /* 1919 * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above 1920 * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations. 1921 */ 1922 static char *sbp_parse_pr_out_transport_id( 1923 struct se_portal_group *se_tpg, 1924 const char *buf, 1925 u32 *out_tid_len, 1926 char **port_nexus_ptr) 1927 { 1928 /* 1929 * Assume the FORMAT CODE 00b from spc4r17, 7.5.4.4 TransportID 1930 * for initiator ports using SCSI over SBP Serial SCSI Protocol 1931 * 1932 * The TransportID for a IEEE 1394 Initiator Port is of fixed size of 1933 * 24 bytes, and IEEE 1394 does not contain a I_T nexus identifier, 1934 * so we return the **port_nexus_ptr set to NULL. 1935 */ 1936 *port_nexus_ptr = NULL; 1937 *out_tid_len = 24; 1938 1939 return (char *)&buf[8]; 1940 } 1941 1942 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg) 1943 { 1944 int i, count = 0; 1945 1946 spin_lock(&tpg->tpg_lun_lock); 1947 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 1948 struct se_lun *se_lun = tpg->tpg_lun_list[i]; 1949 1950 if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE) 1951 continue; 1952 1953 count++; 1954 } 1955 spin_unlock(&tpg->tpg_lun_lock); 1956 1957 return count; 1958 } 1959 1960 static int sbp_update_unit_directory(struct sbp_tport *tport) 1961 { 1962 int num_luns, num_entries, idx = 0, mgt_agt_addr, ret, i; 1963 u32 *data; 1964 1965 if (tport->unit_directory.data) { 1966 fw_core_remove_descriptor(&tport->unit_directory); 1967 kfree(tport->unit_directory.data); 1968 tport->unit_directory.data = NULL; 1969 } 1970 1971 if (!tport->enable || !tport->tpg) 1972 return 0; 1973 1974 num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg); 1975 1976 /* 1977 * Number of entries in the final unit directory: 1978 * - all of those in the template 1979 * - management_agent 1980 * - unit_characteristics 1981 * - reconnect_timeout 1982 * - unit unique ID 1983 * - one for each LUN 1984 * 1985 * MUST NOT include leaf or sub-directory entries 1986 */ 1987 num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns; 1988 1989 if (tport->directory_id != -1) 1990 num_entries++; 1991 1992 /* allocate num_entries + 4 for the header and unique ID leaf */ 1993 data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL); 1994 if (!data) 1995 return -ENOMEM; 1996 1997 /* directory_length */ 1998 data[idx++] = num_entries << 16; 1999 2000 /* directory_id */ 2001 if (tport->directory_id != -1) 2002 data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id; 2003 2004 /* unit directory template */ 2005 memcpy(&data[idx], sbp_unit_directory_template, 2006 sizeof(sbp_unit_directory_template)); 2007 idx += ARRAY_SIZE(sbp_unit_directory_template); 2008 2009 /* management_agent */ 2010 mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4; 2011 data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff); 2012 2013 /* unit_characteristics */ 2014 data[idx++] = 0x3a000000 | 2015 (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) | 2016 SBP_ORB_FETCH_SIZE; 2017 2018 /* reconnect_timeout */ 2019 data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff); 2020 2021 /* unit unique ID (leaf is just after LUNs) */ 2022 data[idx++] = 0x8d000000 | (num_luns + 1); 2023 2024 spin_lock(&tport->tpg->se_tpg.tpg_lun_lock); 2025 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 2026 struct se_lun *se_lun = tport->tpg->se_tpg.tpg_lun_list[i]; 2027 struct se_device *dev; 2028 int type; 2029 2030 if (se_lun->lun_status == TRANSPORT_LUN_STATUS_FREE) 2031 continue; 2032 2033 spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock); 2034 2035 dev = se_lun->lun_se_dev; 2036 type = dev->transport->get_device_type(dev); 2037 2038 /* logical_unit_number */ 2039 data[idx++] = 0x14000000 | 2040 ((type << 16) & 0x1f0000) | 2041 (se_lun->unpacked_lun & 0xffff); 2042 2043 spin_lock(&tport->tpg->se_tpg.tpg_lun_lock); 2044 } 2045 spin_unlock(&tport->tpg->se_tpg.tpg_lun_lock); 2046 2047 /* unit unique ID leaf */ 2048 data[idx++] = 2 << 16; 2049 data[idx++] = tport->guid >> 32; 2050 data[idx++] = tport->guid; 2051 2052 tport->unit_directory.length = idx; 2053 tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24; 2054 tport->unit_directory.data = data; 2055 2056 ret = fw_core_add_descriptor(&tport->unit_directory); 2057 if (ret < 0) { 2058 kfree(tport->unit_directory.data); 2059 tport->unit_directory.data = NULL; 2060 } 2061 2062 return ret; 2063 } 2064 2065 static ssize_t sbp_parse_wwn(const char *name, u64 *wwn) 2066 { 2067 const char *cp; 2068 char c, nibble; 2069 int pos = 0, err; 2070 2071 *wwn = 0; 2072 for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) { 2073 c = *cp; 2074 if (c == '\n' && cp[1] == '\0') 2075 continue; 2076 if (c == '\0') { 2077 err = 2; 2078 if (pos != 16) 2079 goto fail; 2080 return cp - name; 2081 } 2082 err = 3; 2083 if (isdigit(c)) 2084 nibble = c - '0'; 2085 else if (isxdigit(c)) 2086 nibble = tolower(c) - 'a' + 10; 2087 else 2088 goto fail; 2089 *wwn = (*wwn << 4) | nibble; 2090 pos++; 2091 } 2092 err = 4; 2093 fail: 2094 printk(KERN_INFO "err %u len %zu pos %u\n", 2095 err, cp - name, pos); 2096 return -1; 2097 } 2098 2099 static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn) 2100 { 2101 return snprintf(buf, len, "%016llx", wwn); 2102 } 2103 2104 static struct se_node_acl *sbp_make_nodeacl( 2105 struct se_portal_group *se_tpg, 2106 struct config_group *group, 2107 const char *name) 2108 { 2109 struct se_node_acl *se_nacl, *se_nacl_new; 2110 struct sbp_nacl *nacl; 2111 u64 guid = 0; 2112 u32 nexus_depth = 1; 2113 2114 if (sbp_parse_wwn(name, &guid) < 0) 2115 return ERR_PTR(-EINVAL); 2116 2117 se_nacl_new = sbp_alloc_fabric_acl(se_tpg); 2118 if (!se_nacl_new) 2119 return ERR_PTR(-ENOMEM); 2120 2121 /* 2122 * se_nacl_new may be released by core_tpg_add_initiator_node_acl() 2123 * when converting a NodeACL from demo mode -> explict 2124 */ 2125 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, 2126 name, nexus_depth); 2127 if (IS_ERR(se_nacl)) { 2128 sbp_release_fabric_acl(se_tpg, se_nacl_new); 2129 return se_nacl; 2130 } 2131 2132 nacl = container_of(se_nacl, struct sbp_nacl, se_node_acl); 2133 nacl->guid = guid; 2134 sbp_format_wwn(nacl->iport_name, SBP_NAMELEN, guid); 2135 2136 return se_nacl; 2137 } 2138 2139 static void sbp_drop_nodeacl(struct se_node_acl *se_acl) 2140 { 2141 struct sbp_nacl *nacl = 2142 container_of(se_acl, struct sbp_nacl, se_node_acl); 2143 2144 core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); 2145 kfree(nacl); 2146 } 2147 2148 static int sbp_post_link_lun( 2149 struct se_portal_group *se_tpg, 2150 struct se_lun *se_lun) 2151 { 2152 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2153 2154 return sbp_update_unit_directory(tpg->tport); 2155 } 2156 2157 static void sbp_pre_unlink_lun( 2158 struct se_portal_group *se_tpg, 2159 struct se_lun *se_lun) 2160 { 2161 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2162 struct sbp_tport *tport = tpg->tport; 2163 int ret; 2164 2165 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) 2166 tport->enable = 0; 2167 2168 ret = sbp_update_unit_directory(tport); 2169 if (ret < 0) 2170 pr_err("unlink LUN: failed to update unit directory\n"); 2171 } 2172 2173 static struct se_portal_group *sbp_make_tpg( 2174 struct se_wwn *wwn, 2175 struct config_group *group, 2176 const char *name) 2177 { 2178 struct sbp_tport *tport = 2179 container_of(wwn, struct sbp_tport, tport_wwn); 2180 2181 struct sbp_tpg *tpg; 2182 unsigned long tpgt; 2183 int ret; 2184 2185 if (strstr(name, "tpgt_") != name) 2186 return ERR_PTR(-EINVAL); 2187 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) 2188 return ERR_PTR(-EINVAL); 2189 2190 if (tport->tpg) { 2191 pr_err("Only one TPG per Unit is possible.\n"); 2192 return ERR_PTR(-EBUSY); 2193 } 2194 2195 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); 2196 if (!tpg) { 2197 pr_err("Unable to allocate struct sbp_tpg\n"); 2198 return ERR_PTR(-ENOMEM); 2199 } 2200 2201 tpg->tport = tport; 2202 tpg->tport_tpgt = tpgt; 2203 tport->tpg = tpg; 2204 2205 /* default attribute values */ 2206 tport->enable = 0; 2207 tport->directory_id = -1; 2208 tport->mgt_orb_timeout = 15; 2209 tport->max_reconnect_timeout = 5; 2210 tport->max_logins_per_lun = 1; 2211 2212 tport->mgt_agt = sbp_management_agent_register(tport); 2213 if (IS_ERR(tport->mgt_agt)) { 2214 ret = PTR_ERR(tport->mgt_agt); 2215 goto out_free_tpg; 2216 } 2217 2218 ret = core_tpg_register(&sbp_fabric_configfs->tf_ops, wwn, 2219 &tpg->se_tpg, (void *)tpg, 2220 TRANSPORT_TPG_TYPE_NORMAL); 2221 if (ret < 0) 2222 goto out_unreg_mgt_agt; 2223 2224 return &tpg->se_tpg; 2225 2226 out_unreg_mgt_agt: 2227 sbp_management_agent_unregister(tport->mgt_agt); 2228 out_free_tpg: 2229 tport->tpg = NULL; 2230 kfree(tpg); 2231 return ERR_PTR(ret); 2232 } 2233 2234 static void sbp_drop_tpg(struct se_portal_group *se_tpg) 2235 { 2236 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2237 struct sbp_tport *tport = tpg->tport; 2238 2239 core_tpg_deregister(se_tpg); 2240 sbp_management_agent_unregister(tport->mgt_agt); 2241 tport->tpg = NULL; 2242 kfree(tpg); 2243 } 2244 2245 static struct se_wwn *sbp_make_tport( 2246 struct target_fabric_configfs *tf, 2247 struct config_group *group, 2248 const char *name) 2249 { 2250 struct sbp_tport *tport; 2251 u64 guid = 0; 2252 2253 if (sbp_parse_wwn(name, &guid) < 0) 2254 return ERR_PTR(-EINVAL); 2255 2256 tport = kzalloc(sizeof(*tport), GFP_KERNEL); 2257 if (!tport) { 2258 pr_err("Unable to allocate struct sbp_tport\n"); 2259 return ERR_PTR(-ENOMEM); 2260 } 2261 2262 tport->guid = guid; 2263 sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid); 2264 2265 return &tport->tport_wwn; 2266 } 2267 2268 static void sbp_drop_tport(struct se_wwn *wwn) 2269 { 2270 struct sbp_tport *tport = 2271 container_of(wwn, struct sbp_tport, tport_wwn); 2272 2273 kfree(tport); 2274 } 2275 2276 static ssize_t sbp_wwn_show_attr_version( 2277 struct target_fabric_configfs *tf, 2278 char *page) 2279 { 2280 return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION); 2281 } 2282 2283 TF_WWN_ATTR_RO(sbp, version); 2284 2285 static struct configfs_attribute *sbp_wwn_attrs[] = { 2286 &sbp_wwn_version.attr, 2287 NULL, 2288 }; 2289 2290 static ssize_t sbp_tpg_show_directory_id( 2291 struct se_portal_group *se_tpg, 2292 char *page) 2293 { 2294 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2295 struct sbp_tport *tport = tpg->tport; 2296 2297 if (tport->directory_id == -1) 2298 return sprintf(page, "implicit\n"); 2299 else 2300 return sprintf(page, "%06x\n", tport->directory_id); 2301 } 2302 2303 static ssize_t sbp_tpg_store_directory_id( 2304 struct se_portal_group *se_tpg, 2305 const char *page, 2306 size_t count) 2307 { 2308 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2309 struct sbp_tport *tport = tpg->tport; 2310 unsigned long val; 2311 2312 if (tport->enable) { 2313 pr_err("Cannot change the directory_id on an active target.\n"); 2314 return -EBUSY; 2315 } 2316 2317 if (strstr(page, "implicit") == page) { 2318 tport->directory_id = -1; 2319 } else { 2320 if (kstrtoul(page, 16, &val) < 0) 2321 return -EINVAL; 2322 if (val > 0xffffff) 2323 return -EINVAL; 2324 2325 tport->directory_id = val; 2326 } 2327 2328 return count; 2329 } 2330 2331 static ssize_t sbp_tpg_show_enable( 2332 struct se_portal_group *se_tpg, 2333 char *page) 2334 { 2335 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2336 struct sbp_tport *tport = tpg->tport; 2337 return sprintf(page, "%d\n", tport->enable); 2338 } 2339 2340 static ssize_t sbp_tpg_store_enable( 2341 struct se_portal_group *se_tpg, 2342 const char *page, 2343 size_t count) 2344 { 2345 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2346 struct sbp_tport *tport = tpg->tport; 2347 unsigned long val; 2348 int ret; 2349 2350 if (kstrtoul(page, 0, &val) < 0) 2351 return -EINVAL; 2352 if ((val != 0) && (val != 1)) 2353 return -EINVAL; 2354 2355 if (tport->enable == val) 2356 return count; 2357 2358 if (val) { 2359 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) { 2360 pr_err("Cannot enable a target with no LUNs!\n"); 2361 return -EINVAL; 2362 } 2363 } else { 2364 /* XXX: force-shutdown sessions instead? */ 2365 spin_lock_bh(&se_tpg->session_lock); 2366 if (!list_empty(&se_tpg->tpg_sess_list)) { 2367 spin_unlock_bh(&se_tpg->session_lock); 2368 return -EBUSY; 2369 } 2370 spin_unlock_bh(&se_tpg->session_lock); 2371 } 2372 2373 tport->enable = val; 2374 2375 ret = sbp_update_unit_directory(tport); 2376 if (ret < 0) { 2377 pr_err("Could not update Config ROM\n"); 2378 return ret; 2379 } 2380 2381 return count; 2382 } 2383 2384 TF_TPG_BASE_ATTR(sbp, directory_id, S_IRUGO | S_IWUSR); 2385 TF_TPG_BASE_ATTR(sbp, enable, S_IRUGO | S_IWUSR); 2386 2387 static struct configfs_attribute *sbp_tpg_base_attrs[] = { 2388 &sbp_tpg_directory_id.attr, 2389 &sbp_tpg_enable.attr, 2390 NULL, 2391 }; 2392 2393 static ssize_t sbp_tpg_attrib_show_mgt_orb_timeout( 2394 struct se_portal_group *se_tpg, 2395 char *page) 2396 { 2397 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2398 struct sbp_tport *tport = tpg->tport; 2399 return sprintf(page, "%d\n", tport->mgt_orb_timeout); 2400 } 2401 2402 static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout( 2403 struct se_portal_group *se_tpg, 2404 const char *page, 2405 size_t count) 2406 { 2407 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2408 struct sbp_tport *tport = tpg->tport; 2409 unsigned long val; 2410 int ret; 2411 2412 if (kstrtoul(page, 0, &val) < 0) 2413 return -EINVAL; 2414 if ((val < 1) || (val > 127)) 2415 return -EINVAL; 2416 2417 if (tport->mgt_orb_timeout == val) 2418 return count; 2419 2420 tport->mgt_orb_timeout = val; 2421 2422 ret = sbp_update_unit_directory(tport); 2423 if (ret < 0) 2424 return ret; 2425 2426 return count; 2427 } 2428 2429 static ssize_t sbp_tpg_attrib_show_max_reconnect_timeout( 2430 struct se_portal_group *se_tpg, 2431 char *page) 2432 { 2433 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2434 struct sbp_tport *tport = tpg->tport; 2435 return sprintf(page, "%d\n", tport->max_reconnect_timeout); 2436 } 2437 2438 static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout( 2439 struct se_portal_group *se_tpg, 2440 const char *page, 2441 size_t count) 2442 { 2443 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2444 struct sbp_tport *tport = tpg->tport; 2445 unsigned long val; 2446 int ret; 2447 2448 if (kstrtoul(page, 0, &val) < 0) 2449 return -EINVAL; 2450 if ((val < 1) || (val > 32767)) 2451 return -EINVAL; 2452 2453 if (tport->max_reconnect_timeout == val) 2454 return count; 2455 2456 tport->max_reconnect_timeout = val; 2457 2458 ret = sbp_update_unit_directory(tport); 2459 if (ret < 0) 2460 return ret; 2461 2462 return count; 2463 } 2464 2465 static ssize_t sbp_tpg_attrib_show_max_logins_per_lun( 2466 struct se_portal_group *se_tpg, 2467 char *page) 2468 { 2469 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2470 struct sbp_tport *tport = tpg->tport; 2471 return sprintf(page, "%d\n", tport->max_logins_per_lun); 2472 } 2473 2474 static ssize_t sbp_tpg_attrib_store_max_logins_per_lun( 2475 struct se_portal_group *se_tpg, 2476 const char *page, 2477 size_t count) 2478 { 2479 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2480 struct sbp_tport *tport = tpg->tport; 2481 unsigned long val; 2482 2483 if (kstrtoul(page, 0, &val) < 0) 2484 return -EINVAL; 2485 if ((val < 1) || (val > 127)) 2486 return -EINVAL; 2487 2488 /* XXX: also check against current count? */ 2489 2490 tport->max_logins_per_lun = val; 2491 2492 return count; 2493 } 2494 2495 TF_TPG_ATTRIB_ATTR(sbp, mgt_orb_timeout, S_IRUGO | S_IWUSR); 2496 TF_TPG_ATTRIB_ATTR(sbp, max_reconnect_timeout, S_IRUGO | S_IWUSR); 2497 TF_TPG_ATTRIB_ATTR(sbp, max_logins_per_lun, S_IRUGO | S_IWUSR); 2498 2499 static struct configfs_attribute *sbp_tpg_attrib_attrs[] = { 2500 &sbp_tpg_attrib_mgt_orb_timeout.attr, 2501 &sbp_tpg_attrib_max_reconnect_timeout.attr, 2502 &sbp_tpg_attrib_max_logins_per_lun.attr, 2503 NULL, 2504 }; 2505 2506 static struct target_core_fabric_ops sbp_ops = { 2507 .get_fabric_name = sbp_get_fabric_name, 2508 .get_fabric_proto_ident = sbp_get_fabric_proto_ident, 2509 .tpg_get_wwn = sbp_get_fabric_wwn, 2510 .tpg_get_tag = sbp_get_tag, 2511 .tpg_get_default_depth = sbp_get_default_depth, 2512 .tpg_get_pr_transport_id = sbp_get_pr_transport_id, 2513 .tpg_get_pr_transport_id_len = sbp_get_pr_transport_id_len, 2514 .tpg_parse_pr_out_transport_id = sbp_parse_pr_out_transport_id, 2515 .tpg_check_demo_mode = sbp_check_true, 2516 .tpg_check_demo_mode_cache = sbp_check_true, 2517 .tpg_check_demo_mode_write_protect = sbp_check_false, 2518 .tpg_check_prod_mode_write_protect = sbp_check_false, 2519 .tpg_alloc_fabric_acl = sbp_alloc_fabric_acl, 2520 .tpg_release_fabric_acl = sbp_release_fabric_acl, 2521 .tpg_get_inst_index = sbp_tpg_get_inst_index, 2522 .release_cmd = sbp_release_cmd, 2523 .shutdown_session = sbp_shutdown_session, 2524 .close_session = sbp_close_session, 2525 .sess_get_index = sbp_sess_get_index, 2526 .write_pending = sbp_write_pending, 2527 .write_pending_status = sbp_write_pending_status, 2528 .set_default_node_attributes = sbp_set_default_node_attrs, 2529 .get_task_tag = sbp_get_task_tag, 2530 .get_cmd_state = sbp_get_cmd_state, 2531 .queue_data_in = sbp_queue_data_in, 2532 .queue_status = sbp_queue_status, 2533 .queue_tm_rsp = sbp_queue_tm_rsp, 2534 .aborted_task = sbp_aborted_task, 2535 .check_stop_free = sbp_check_stop_free, 2536 2537 .fabric_make_wwn = sbp_make_tport, 2538 .fabric_drop_wwn = sbp_drop_tport, 2539 .fabric_make_tpg = sbp_make_tpg, 2540 .fabric_drop_tpg = sbp_drop_tpg, 2541 .fabric_post_link = sbp_post_link_lun, 2542 .fabric_pre_unlink = sbp_pre_unlink_lun, 2543 .fabric_make_np = NULL, 2544 .fabric_drop_np = NULL, 2545 .fabric_make_nodeacl = sbp_make_nodeacl, 2546 .fabric_drop_nodeacl = sbp_drop_nodeacl, 2547 }; 2548 2549 static int sbp_register_configfs(void) 2550 { 2551 struct target_fabric_configfs *fabric; 2552 int ret; 2553 2554 fabric = target_fabric_configfs_init(THIS_MODULE, "sbp"); 2555 if (IS_ERR(fabric)) { 2556 pr_err("target_fabric_configfs_init() failed\n"); 2557 return PTR_ERR(fabric); 2558 } 2559 2560 fabric->tf_ops = sbp_ops; 2561 2562 /* 2563 * Setup default attribute lists for various fabric->tf_cit_tmpl 2564 */ 2565 fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = sbp_wwn_attrs; 2566 fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = sbp_tpg_base_attrs; 2567 fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = sbp_tpg_attrib_attrs; 2568 fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; 2569 fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; 2570 fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; 2571 fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; 2572 fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; 2573 fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL; 2574 2575 ret = target_fabric_configfs_register(fabric); 2576 if (ret < 0) { 2577 pr_err("target_fabric_configfs_register() failed for SBP\n"); 2578 return ret; 2579 } 2580 2581 sbp_fabric_configfs = fabric; 2582 2583 return 0; 2584 }; 2585 2586 static void sbp_deregister_configfs(void) 2587 { 2588 if (!sbp_fabric_configfs) 2589 return; 2590 2591 target_fabric_configfs_deregister(sbp_fabric_configfs); 2592 sbp_fabric_configfs = NULL; 2593 }; 2594 2595 static int __init sbp_init(void) 2596 { 2597 int ret; 2598 2599 ret = sbp_register_configfs(); 2600 if (ret < 0) 2601 return ret; 2602 2603 return 0; 2604 }; 2605 2606 static void __exit sbp_exit(void) 2607 { 2608 sbp_deregister_configfs(); 2609 }; 2610 2611 MODULE_DESCRIPTION("FireWire SBP fabric driver"); 2612 MODULE_LICENSE("GPL"); 2613 module_init(sbp_init); 2614 module_exit(sbp_exit); 2615