1 /* 2 * SBP2 target driver (SCSI over IEEE1394 in target mode) 3 * 4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21 #define KMSG_COMPONENT "sbp_target" 22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 23 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/init.h> 27 #include <linux/types.h> 28 #include <linux/string.h> 29 #include <linux/configfs.h> 30 #include <linux/ctype.h> 31 #include <linux/firewire.h> 32 #include <linux/firewire-constants.h> 33 #include <scsi/scsi_proto.h> 34 #include <scsi/scsi_tcq.h> 35 #include <target/target_core_base.h> 36 #include <target/target_core_backend.h> 37 #include <target/target_core_fabric.h> 38 #include <target/target_core_fabric_configfs.h> 39 #include <target/configfs_macros.h> 40 #include <asm/unaligned.h> 41 42 #include "sbp_target.h" 43 44 static const struct target_core_fabric_ops sbp_ops; 45 46 /* FireWire address region for management and command block address handlers */ 47 static const struct fw_address_region sbp_register_region = { 48 .start = CSR_REGISTER_BASE + 0x10000, 49 .end = 0x1000000000000ULL, 50 }; 51 52 static const u32 sbp_unit_directory_template[] = { 53 0x1200609e, /* unit_specifier_id: NCITS/T10 */ 54 0x13010483, /* unit_sw_version: 1155D Rev 4 */ 55 0x3800609e, /* command_set_specifier_id: NCITS/T10 */ 56 0x390104d8, /* command_set: SPC-2 */ 57 0x3b000000, /* command_set_revision: 0 */ 58 0x3c000001, /* firmware_revision: 1 */ 59 }; 60 61 #define SESSION_MAINTENANCE_INTERVAL HZ 62 63 static atomic_t login_id = ATOMIC_INIT(0); 64 65 static void session_maintenance_work(struct work_struct *); 66 static int sbp_run_transaction(struct fw_card *, int, int, int, int, 67 unsigned long long, void *, size_t); 68 69 static int read_peer_guid(u64 *guid, const struct sbp_management_request *req) 70 { 71 int ret; 72 __be32 high, low; 73 74 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, 75 req->node_addr, req->generation, req->speed, 76 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4, 77 &high, sizeof(high)); 78 if (ret != RCODE_COMPLETE) 79 return ret; 80 81 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, 82 req->node_addr, req->generation, req->speed, 83 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4, 84 &low, sizeof(low)); 85 if (ret != RCODE_COMPLETE) 86 return ret; 87 88 *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low); 89 90 return RCODE_COMPLETE; 91 } 92 93 static struct sbp_session *sbp_session_find_by_guid( 94 struct sbp_tpg *tpg, u64 guid) 95 { 96 struct se_session *se_sess; 97 struct sbp_session *sess, *found = NULL; 98 99 spin_lock_bh(&tpg->se_tpg.session_lock); 100 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 101 sess = se_sess->fabric_sess_ptr; 102 if (sess->guid == guid) 103 found = sess; 104 } 105 spin_unlock_bh(&tpg->se_tpg.session_lock); 106 107 return found; 108 } 109 110 static struct sbp_login_descriptor *sbp_login_find_by_lun( 111 struct sbp_session *session, u32 unpacked_lun) 112 { 113 struct sbp_login_descriptor *login, *found = NULL; 114 115 spin_lock_bh(&session->lock); 116 list_for_each_entry(login, &session->login_list, link) { 117 if (login->login_lun == unpacked_lun) 118 found = login; 119 } 120 spin_unlock_bh(&session->lock); 121 122 return found; 123 } 124 125 static int sbp_login_count_all_by_lun( 126 struct sbp_tpg *tpg, 127 u32 unpacked_lun, 128 int exclusive) 129 { 130 struct se_session *se_sess; 131 struct sbp_session *sess; 132 struct sbp_login_descriptor *login; 133 int count = 0; 134 135 spin_lock_bh(&tpg->se_tpg.session_lock); 136 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 137 sess = se_sess->fabric_sess_ptr; 138 139 spin_lock_bh(&sess->lock); 140 list_for_each_entry(login, &sess->login_list, link) { 141 if (login->login_lun != unpacked_lun) 142 continue; 143 144 if (!exclusive || login->exclusive) 145 count++; 146 } 147 spin_unlock_bh(&sess->lock); 148 } 149 spin_unlock_bh(&tpg->se_tpg.session_lock); 150 151 return count; 152 } 153 154 static struct sbp_login_descriptor *sbp_login_find_by_id( 155 struct sbp_tpg *tpg, int login_id) 156 { 157 struct se_session *se_sess; 158 struct sbp_session *sess; 159 struct sbp_login_descriptor *login, *found = NULL; 160 161 spin_lock_bh(&tpg->se_tpg.session_lock); 162 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 163 sess = se_sess->fabric_sess_ptr; 164 165 spin_lock_bh(&sess->lock); 166 list_for_each_entry(login, &sess->login_list, link) { 167 if (login->login_id == login_id) 168 found = login; 169 } 170 spin_unlock_bh(&sess->lock); 171 } 172 spin_unlock_bh(&tpg->se_tpg.session_lock); 173 174 return found; 175 } 176 177 static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err) 178 { 179 struct se_portal_group *se_tpg = &tpg->se_tpg; 180 struct se_lun *se_lun; 181 182 rcu_read_lock(); 183 hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) { 184 if (se_lun->unpacked_lun == login_lun) { 185 rcu_read_unlock(); 186 *err = 0; 187 return login_lun; 188 } 189 } 190 rcu_read_unlock(); 191 192 *err = -ENODEV; 193 return login_lun; 194 } 195 196 static struct sbp_session *sbp_session_create( 197 struct sbp_tpg *tpg, 198 u64 guid) 199 { 200 struct sbp_session *sess; 201 int ret; 202 char guid_str[17]; 203 struct se_node_acl *se_nacl; 204 205 sess = kmalloc(sizeof(*sess), GFP_KERNEL); 206 if (!sess) { 207 pr_err("failed to allocate session descriptor\n"); 208 return ERR_PTR(-ENOMEM); 209 } 210 211 sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); 212 if (IS_ERR(sess->se_sess)) { 213 pr_err("failed to init se_session\n"); 214 215 ret = PTR_ERR(sess->se_sess); 216 kfree(sess); 217 return ERR_PTR(ret); 218 } 219 220 snprintf(guid_str, sizeof(guid_str), "%016llx", guid); 221 222 se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str); 223 if (!se_nacl) { 224 pr_warn("Node ACL not found for %s\n", guid_str); 225 226 transport_free_session(sess->se_sess); 227 kfree(sess); 228 229 return ERR_PTR(-EPERM); 230 } 231 232 sess->se_sess->se_node_acl = se_nacl; 233 234 spin_lock_init(&sess->lock); 235 INIT_LIST_HEAD(&sess->login_list); 236 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work); 237 238 sess->guid = guid; 239 240 transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess); 241 242 return sess; 243 } 244 245 static void sbp_session_release(struct sbp_session *sess, bool cancel_work) 246 { 247 spin_lock_bh(&sess->lock); 248 if (!list_empty(&sess->login_list)) { 249 spin_unlock_bh(&sess->lock); 250 return; 251 } 252 spin_unlock_bh(&sess->lock); 253 254 if (cancel_work) 255 cancel_delayed_work_sync(&sess->maint_work); 256 257 transport_deregister_session_configfs(sess->se_sess); 258 transport_deregister_session(sess->se_sess); 259 260 if (sess->card) 261 fw_card_put(sess->card); 262 263 kfree(sess); 264 } 265 266 static void sbp_target_agent_unregister(struct sbp_target_agent *); 267 268 static void sbp_login_release(struct sbp_login_descriptor *login, 269 bool cancel_work) 270 { 271 struct sbp_session *sess = login->sess; 272 273 /* FIXME: abort/wait on tasks */ 274 275 sbp_target_agent_unregister(login->tgt_agt); 276 277 if (sess) { 278 spin_lock_bh(&sess->lock); 279 list_del(&login->link); 280 spin_unlock_bh(&sess->lock); 281 282 sbp_session_release(sess, cancel_work); 283 } 284 285 kfree(login); 286 } 287 288 static struct sbp_target_agent *sbp_target_agent_register( 289 struct sbp_login_descriptor *); 290 291 static void sbp_management_request_login( 292 struct sbp_management_agent *agent, struct sbp_management_request *req, 293 int *status_data_size) 294 { 295 struct sbp_tport *tport = agent->tport; 296 struct sbp_tpg *tpg = tport->tpg; 297 struct sbp_session *sess; 298 struct sbp_login_descriptor *login; 299 struct sbp_login_response_block *response; 300 u64 guid; 301 u32 unpacked_lun; 302 int login_response_len, ret; 303 304 unpacked_lun = sbp_get_lun_from_tpg(tpg, 305 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret); 306 if (ret) { 307 pr_notice("login to unknown LUN: %d\n", 308 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); 309 310 req->status.status = cpu_to_be32( 311 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 312 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP)); 313 return; 314 } 315 316 ret = read_peer_guid(&guid, req); 317 if (ret != RCODE_COMPLETE) { 318 pr_warn("failed to read peer GUID: %d\n", ret); 319 320 req->status.status = cpu_to_be32( 321 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 322 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 323 return; 324 } 325 326 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n", 327 unpacked_lun, guid); 328 329 sess = sbp_session_find_by_guid(tpg, guid); 330 if (sess) { 331 login = sbp_login_find_by_lun(sess, unpacked_lun); 332 if (login) { 333 pr_notice("initiator already logged-in\n"); 334 335 /* 336 * SBP-2 R4 says we should return access denied, but 337 * that can confuse initiators. Instead we need to 338 * treat this like a reconnect, but send the login 339 * response block like a fresh login. 340 * 341 * This is required particularly in the case of Apple 342 * devices booting off the FireWire target, where 343 * the firmware has an active login to the target. When 344 * the OS takes control of the session it issues its own 345 * LOGIN rather than a RECONNECT. To avoid the machine 346 * waiting until the reconnect_hold expires, we can skip 347 * the ACCESS_DENIED errors to speed things up. 348 */ 349 350 goto already_logged_in; 351 } 352 } 353 354 /* 355 * check exclusive bit in login request 356 * reject with access_denied if any logins present 357 */ 358 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) && 359 sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) { 360 pr_warn("refusing exclusive login with other active logins\n"); 361 362 req->status.status = cpu_to_be32( 363 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 364 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 365 return; 366 } 367 368 /* 369 * check exclusive bit in any existing login descriptor 370 * reject with access_denied if any exclusive logins present 371 */ 372 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) { 373 pr_warn("refusing login while another exclusive login present\n"); 374 375 req->status.status = cpu_to_be32( 376 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 377 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 378 return; 379 } 380 381 /* 382 * check we haven't exceeded the number of allowed logins 383 * reject with resources_unavailable if we have 384 */ 385 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >= 386 tport->max_logins_per_lun) { 387 pr_warn("max number of logins reached\n"); 388 389 req->status.status = cpu_to_be32( 390 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 391 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 392 return; 393 } 394 395 if (!sess) { 396 sess = sbp_session_create(tpg, guid); 397 if (IS_ERR(sess)) { 398 switch (PTR_ERR(sess)) { 399 case -EPERM: 400 ret = SBP_STATUS_ACCESS_DENIED; 401 break; 402 default: 403 ret = SBP_STATUS_RESOURCES_UNAVAIL; 404 break; 405 } 406 407 req->status.status = cpu_to_be32( 408 STATUS_BLOCK_RESP( 409 STATUS_RESP_REQUEST_COMPLETE) | 410 STATUS_BLOCK_SBP_STATUS(ret)); 411 return; 412 } 413 414 sess->node_id = req->node_addr; 415 sess->card = fw_card_get(req->card); 416 sess->generation = req->generation; 417 sess->speed = req->speed; 418 419 schedule_delayed_work(&sess->maint_work, 420 SESSION_MAINTENANCE_INTERVAL); 421 } 422 423 /* only take the latest reconnect_hold into account */ 424 sess->reconnect_hold = min( 425 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)), 426 tport->max_reconnect_timeout) - 1; 427 428 login = kmalloc(sizeof(*login), GFP_KERNEL); 429 if (!login) { 430 pr_err("failed to allocate login descriptor\n"); 431 432 sbp_session_release(sess, true); 433 434 req->status.status = cpu_to_be32( 435 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 436 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 437 return; 438 } 439 440 login->sess = sess; 441 login->login_lun = unpacked_lun; 442 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo); 443 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)); 444 login->login_id = atomic_inc_return(&login_id); 445 446 login->tgt_agt = sbp_target_agent_register(login); 447 if (IS_ERR(login->tgt_agt)) { 448 ret = PTR_ERR(login->tgt_agt); 449 pr_err("failed to map command block handler: %d\n", ret); 450 451 sbp_session_release(sess, true); 452 kfree(login); 453 454 req->status.status = cpu_to_be32( 455 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 456 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 457 return; 458 } 459 460 spin_lock_bh(&sess->lock); 461 list_add_tail(&login->link, &sess->login_list); 462 spin_unlock_bh(&sess->lock); 463 464 already_logged_in: 465 response = kzalloc(sizeof(*response), GFP_KERNEL); 466 if (!response) { 467 pr_err("failed to allocate login response block\n"); 468 469 sbp_login_release(login, true); 470 471 req->status.status = cpu_to_be32( 472 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 473 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 474 return; 475 } 476 477 login_response_len = clamp_val( 478 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)), 479 12, sizeof(*response)); 480 response->misc = cpu_to_be32( 481 ((login_response_len & 0xffff) << 16) | 482 (login->login_id & 0xffff)); 483 response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff); 484 addr_to_sbp2_pointer(login->tgt_agt->handler.offset, 485 &response->command_block_agent); 486 487 ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST, 488 sess->node_id, sess->generation, sess->speed, 489 sbp2_pointer_to_addr(&req->orb.ptr2), response, 490 login_response_len); 491 if (ret != RCODE_COMPLETE) { 492 pr_debug("failed to write login response block: %x\n", ret); 493 494 kfree(response); 495 sbp_login_release(login, true); 496 497 req->status.status = cpu_to_be32( 498 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 499 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 500 return; 501 } 502 503 kfree(response); 504 505 req->status.status = cpu_to_be32( 506 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 507 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 508 } 509 510 static void sbp_management_request_query_logins( 511 struct sbp_management_agent *agent, struct sbp_management_request *req, 512 int *status_data_size) 513 { 514 pr_notice("QUERY LOGINS not implemented\n"); 515 /* FIXME: implement */ 516 517 req->status.status = cpu_to_be32( 518 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 519 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 520 } 521 522 static void sbp_management_request_reconnect( 523 struct sbp_management_agent *agent, struct sbp_management_request *req, 524 int *status_data_size) 525 { 526 struct sbp_tport *tport = agent->tport; 527 struct sbp_tpg *tpg = tport->tpg; 528 int ret; 529 u64 guid; 530 struct sbp_login_descriptor *login; 531 532 ret = read_peer_guid(&guid, req); 533 if (ret != RCODE_COMPLETE) { 534 pr_warn("failed to read peer GUID: %d\n", ret); 535 536 req->status.status = cpu_to_be32( 537 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 538 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 539 return; 540 } 541 542 pr_notice("mgt_agent RECONNECT from %016llx\n", guid); 543 544 login = sbp_login_find_by_id(tpg, 545 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc))); 546 547 if (!login) { 548 pr_err("mgt_agent RECONNECT unknown login ID\n"); 549 550 req->status.status = cpu_to_be32( 551 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 552 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 553 return; 554 } 555 556 if (login->sess->guid != guid) { 557 pr_err("mgt_agent RECONNECT login GUID doesn't match\n"); 558 559 req->status.status = cpu_to_be32( 560 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 561 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 562 return; 563 } 564 565 spin_lock_bh(&login->sess->lock); 566 if (login->sess->card) 567 fw_card_put(login->sess->card); 568 569 /* update the node details */ 570 login->sess->generation = req->generation; 571 login->sess->node_id = req->node_addr; 572 login->sess->card = fw_card_get(req->card); 573 login->sess->speed = req->speed; 574 spin_unlock_bh(&login->sess->lock); 575 576 req->status.status = cpu_to_be32( 577 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 578 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 579 } 580 581 static void sbp_management_request_logout( 582 struct sbp_management_agent *agent, struct sbp_management_request *req, 583 int *status_data_size) 584 { 585 struct sbp_tport *tport = agent->tport; 586 struct sbp_tpg *tpg = tport->tpg; 587 int id; 588 struct sbp_login_descriptor *login; 589 590 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); 591 592 login = sbp_login_find_by_id(tpg, id); 593 if (!login) { 594 pr_warn("cannot find login: %d\n", id); 595 596 req->status.status = cpu_to_be32( 597 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 598 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN)); 599 return; 600 } 601 602 pr_info("mgt_agent LOGOUT from LUN %d session %d\n", 603 login->login_lun, login->login_id); 604 605 if (req->node_addr != login->sess->node_id) { 606 pr_warn("logout from different node ID\n"); 607 608 req->status.status = cpu_to_be32( 609 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 610 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 611 return; 612 } 613 614 sbp_login_release(login, true); 615 616 req->status.status = cpu_to_be32( 617 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 618 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 619 } 620 621 static void session_check_for_reset(struct sbp_session *sess) 622 { 623 bool card_valid = false; 624 625 spin_lock_bh(&sess->lock); 626 627 if (sess->card) { 628 spin_lock_irq(&sess->card->lock); 629 card_valid = (sess->card->local_node != NULL); 630 spin_unlock_irq(&sess->card->lock); 631 632 if (!card_valid) { 633 fw_card_put(sess->card); 634 sess->card = NULL; 635 } 636 } 637 638 if (!card_valid || (sess->generation != sess->card->generation)) { 639 pr_info("Waiting for reconnect from node: %016llx\n", 640 sess->guid); 641 642 sess->node_id = -1; 643 sess->reconnect_expires = get_jiffies_64() + 644 ((sess->reconnect_hold + 1) * HZ); 645 } 646 647 spin_unlock_bh(&sess->lock); 648 } 649 650 static void session_reconnect_expired(struct sbp_session *sess) 651 { 652 struct sbp_login_descriptor *login, *temp; 653 LIST_HEAD(login_list); 654 655 pr_info("Reconnect timer expired for node: %016llx\n", sess->guid); 656 657 spin_lock_bh(&sess->lock); 658 list_for_each_entry_safe(login, temp, &sess->login_list, link) { 659 login->sess = NULL; 660 list_move_tail(&login->link, &login_list); 661 } 662 spin_unlock_bh(&sess->lock); 663 664 list_for_each_entry_safe(login, temp, &login_list, link) { 665 list_del(&login->link); 666 sbp_login_release(login, false); 667 } 668 669 sbp_session_release(sess, false); 670 } 671 672 static void session_maintenance_work(struct work_struct *work) 673 { 674 struct sbp_session *sess = container_of(work, struct sbp_session, 675 maint_work.work); 676 677 /* could be called while tearing down the session */ 678 spin_lock_bh(&sess->lock); 679 if (list_empty(&sess->login_list)) { 680 spin_unlock_bh(&sess->lock); 681 return; 682 } 683 spin_unlock_bh(&sess->lock); 684 685 if (sess->node_id != -1) { 686 /* check for bus reset and make node_id invalid */ 687 session_check_for_reset(sess); 688 689 schedule_delayed_work(&sess->maint_work, 690 SESSION_MAINTENANCE_INTERVAL); 691 } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) { 692 /* still waiting for reconnect */ 693 schedule_delayed_work(&sess->maint_work, 694 SESSION_MAINTENANCE_INTERVAL); 695 } else { 696 /* reconnect timeout has expired */ 697 session_reconnect_expired(sess); 698 } 699 } 700 701 static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data, 702 struct sbp_target_agent *agent) 703 { 704 int state; 705 706 switch (tcode) { 707 case TCODE_READ_QUADLET_REQUEST: 708 pr_debug("tgt_agent AGENT_STATE READ\n"); 709 710 spin_lock_bh(&agent->lock); 711 state = agent->state; 712 spin_unlock_bh(&agent->lock); 713 714 *(__be32 *)data = cpu_to_be32(state); 715 716 return RCODE_COMPLETE; 717 718 case TCODE_WRITE_QUADLET_REQUEST: 719 /* ignored */ 720 return RCODE_COMPLETE; 721 722 default: 723 return RCODE_TYPE_ERROR; 724 } 725 } 726 727 static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data, 728 struct sbp_target_agent *agent) 729 { 730 switch (tcode) { 731 case TCODE_WRITE_QUADLET_REQUEST: 732 pr_debug("tgt_agent AGENT_RESET\n"); 733 spin_lock_bh(&agent->lock); 734 agent->state = AGENT_STATE_RESET; 735 spin_unlock_bh(&agent->lock); 736 return RCODE_COMPLETE; 737 738 default: 739 return RCODE_TYPE_ERROR; 740 } 741 } 742 743 static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data, 744 struct sbp_target_agent *agent) 745 { 746 struct sbp2_pointer *ptr = data; 747 748 switch (tcode) { 749 case TCODE_WRITE_BLOCK_REQUEST: 750 spin_lock_bh(&agent->lock); 751 if (agent->state != AGENT_STATE_SUSPENDED && 752 agent->state != AGENT_STATE_RESET) { 753 spin_unlock_bh(&agent->lock); 754 pr_notice("Ignoring ORB_POINTER write while active.\n"); 755 return RCODE_CONFLICT_ERROR; 756 } 757 agent->state = AGENT_STATE_ACTIVE; 758 spin_unlock_bh(&agent->lock); 759 760 agent->orb_pointer = sbp2_pointer_to_addr(ptr); 761 agent->doorbell = false; 762 763 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n", 764 agent->orb_pointer); 765 766 queue_work(system_unbound_wq, &agent->work); 767 768 return RCODE_COMPLETE; 769 770 case TCODE_READ_BLOCK_REQUEST: 771 pr_debug("tgt_agent ORB_POINTER READ\n"); 772 spin_lock_bh(&agent->lock); 773 addr_to_sbp2_pointer(agent->orb_pointer, ptr); 774 spin_unlock_bh(&agent->lock); 775 return RCODE_COMPLETE; 776 777 default: 778 return RCODE_TYPE_ERROR; 779 } 780 } 781 782 static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data, 783 struct sbp_target_agent *agent) 784 { 785 switch (tcode) { 786 case TCODE_WRITE_QUADLET_REQUEST: 787 spin_lock_bh(&agent->lock); 788 if (agent->state != AGENT_STATE_SUSPENDED) { 789 spin_unlock_bh(&agent->lock); 790 pr_debug("Ignoring DOORBELL while active.\n"); 791 return RCODE_CONFLICT_ERROR; 792 } 793 agent->state = AGENT_STATE_ACTIVE; 794 spin_unlock_bh(&agent->lock); 795 796 agent->doorbell = true; 797 798 pr_debug("tgt_agent DOORBELL\n"); 799 800 queue_work(system_unbound_wq, &agent->work); 801 802 return RCODE_COMPLETE; 803 804 case TCODE_READ_QUADLET_REQUEST: 805 return RCODE_COMPLETE; 806 807 default: 808 return RCODE_TYPE_ERROR; 809 } 810 } 811 812 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card, 813 int tcode, void *data, struct sbp_target_agent *agent) 814 { 815 switch (tcode) { 816 case TCODE_WRITE_QUADLET_REQUEST: 817 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n"); 818 /* ignored as we don't send unsolicited status */ 819 return RCODE_COMPLETE; 820 821 case TCODE_READ_QUADLET_REQUEST: 822 return RCODE_COMPLETE; 823 824 default: 825 return RCODE_TYPE_ERROR; 826 } 827 } 828 829 static void tgt_agent_rw(struct fw_card *card, struct fw_request *request, 830 int tcode, int destination, int source, int generation, 831 unsigned long long offset, void *data, size_t length, 832 void *callback_data) 833 { 834 struct sbp_target_agent *agent = callback_data; 835 struct sbp_session *sess = agent->login->sess; 836 int sess_gen, sess_node, rcode; 837 838 spin_lock_bh(&sess->lock); 839 sess_gen = sess->generation; 840 sess_node = sess->node_id; 841 spin_unlock_bh(&sess->lock); 842 843 if (generation != sess_gen) { 844 pr_notice("ignoring request with wrong generation\n"); 845 rcode = RCODE_TYPE_ERROR; 846 goto out; 847 } 848 849 if (source != sess_node) { 850 pr_notice("ignoring request from foreign node (%x != %x)\n", 851 source, sess_node); 852 rcode = RCODE_TYPE_ERROR; 853 goto out; 854 } 855 856 /* turn offset into the offset from the start of the block */ 857 offset -= agent->handler.offset; 858 859 if (offset == 0x00 && length == 4) { 860 /* AGENT_STATE */ 861 rcode = tgt_agent_rw_agent_state(card, tcode, data, agent); 862 } else if (offset == 0x04 && length == 4) { 863 /* AGENT_RESET */ 864 rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent); 865 } else if (offset == 0x08 && length == 8) { 866 /* ORB_POINTER */ 867 rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent); 868 } else if (offset == 0x10 && length == 4) { 869 /* DOORBELL */ 870 rcode = tgt_agent_rw_doorbell(card, tcode, data, agent); 871 } else if (offset == 0x14 && length == 4) { 872 /* UNSOLICITED_STATUS_ENABLE */ 873 rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode, 874 data, agent); 875 } else { 876 rcode = RCODE_ADDRESS_ERROR; 877 } 878 879 out: 880 fw_send_response(card, request, rcode); 881 } 882 883 static void sbp_handle_command(struct sbp_target_request *); 884 static int sbp_send_status(struct sbp_target_request *); 885 static void sbp_free_request(struct sbp_target_request *); 886 887 static void tgt_agent_process_work(struct work_struct *work) 888 { 889 struct sbp_target_request *req = 890 container_of(work, struct sbp_target_request, work); 891 892 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n", 893 req->orb_pointer, 894 sbp2_pointer_to_addr(&req->orb.next_orb), 895 sbp2_pointer_to_addr(&req->orb.data_descriptor), 896 be32_to_cpu(req->orb.misc)); 897 898 if (req->orb_pointer >> 32) 899 pr_debug("ORB with high bits set\n"); 900 901 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) { 902 case 0:/* Format specified by this standard */ 903 sbp_handle_command(req); 904 return; 905 case 1: /* Reserved for future standardization */ 906 case 2: /* Vendor-dependent */ 907 req->status.status |= cpu_to_be32( 908 STATUS_BLOCK_RESP( 909 STATUS_RESP_REQUEST_COMPLETE) | 910 STATUS_BLOCK_DEAD(0) | 911 STATUS_BLOCK_LEN(1) | 912 STATUS_BLOCK_SBP_STATUS( 913 SBP_STATUS_REQ_TYPE_NOTSUPP)); 914 sbp_send_status(req); 915 sbp_free_request(req); 916 return; 917 case 3: /* Dummy ORB */ 918 req->status.status |= cpu_to_be32( 919 STATUS_BLOCK_RESP( 920 STATUS_RESP_REQUEST_COMPLETE) | 921 STATUS_BLOCK_DEAD(0) | 922 STATUS_BLOCK_LEN(1) | 923 STATUS_BLOCK_SBP_STATUS( 924 SBP_STATUS_DUMMY_ORB_COMPLETE)); 925 sbp_send_status(req); 926 sbp_free_request(req); 927 return; 928 default: 929 BUG(); 930 } 931 } 932 933 /* used to double-check we haven't been issued an AGENT_RESET */ 934 static inline bool tgt_agent_check_active(struct sbp_target_agent *agent) 935 { 936 bool active; 937 938 spin_lock_bh(&agent->lock); 939 active = (agent->state == AGENT_STATE_ACTIVE); 940 spin_unlock_bh(&agent->lock); 941 942 return active; 943 } 944 945 static void tgt_agent_fetch_work(struct work_struct *work) 946 { 947 struct sbp_target_agent *agent = 948 container_of(work, struct sbp_target_agent, work); 949 struct sbp_session *sess = agent->login->sess; 950 struct sbp_target_request *req; 951 int ret; 952 bool doorbell = agent->doorbell; 953 u64 next_orb = agent->orb_pointer; 954 955 while (next_orb && tgt_agent_check_active(agent)) { 956 req = kzalloc(sizeof(*req), GFP_KERNEL); 957 if (!req) { 958 spin_lock_bh(&agent->lock); 959 agent->state = AGENT_STATE_DEAD; 960 spin_unlock_bh(&agent->lock); 961 return; 962 } 963 964 req->login = agent->login; 965 req->orb_pointer = next_orb; 966 967 req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH( 968 req->orb_pointer >> 32)); 969 req->status.orb_low = cpu_to_be32( 970 req->orb_pointer & 0xfffffffc); 971 972 /* read in the ORB */ 973 ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST, 974 sess->node_id, sess->generation, sess->speed, 975 req->orb_pointer, &req->orb, sizeof(req->orb)); 976 if (ret != RCODE_COMPLETE) { 977 pr_debug("tgt_orb fetch failed: %x\n", ret); 978 req->status.status |= cpu_to_be32( 979 STATUS_BLOCK_SRC( 980 STATUS_SRC_ORB_FINISHED) | 981 STATUS_BLOCK_RESP( 982 STATUS_RESP_TRANSPORT_FAILURE) | 983 STATUS_BLOCK_DEAD(1) | 984 STATUS_BLOCK_LEN(1) | 985 STATUS_BLOCK_SBP_STATUS( 986 SBP_STATUS_UNSPECIFIED_ERROR)); 987 spin_lock_bh(&agent->lock); 988 agent->state = AGENT_STATE_DEAD; 989 spin_unlock_bh(&agent->lock); 990 991 sbp_send_status(req); 992 sbp_free_request(req); 993 return; 994 } 995 996 /* check the next_ORB field */ 997 if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) { 998 next_orb = 0; 999 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( 1000 STATUS_SRC_ORB_FINISHED)); 1001 } else { 1002 next_orb = sbp2_pointer_to_addr(&req->orb.next_orb); 1003 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( 1004 STATUS_SRC_ORB_CONTINUING)); 1005 } 1006 1007 if (tgt_agent_check_active(agent) && !doorbell) { 1008 INIT_WORK(&req->work, tgt_agent_process_work); 1009 queue_work(system_unbound_wq, &req->work); 1010 } else { 1011 /* don't process this request, just check next_ORB */ 1012 sbp_free_request(req); 1013 } 1014 1015 spin_lock_bh(&agent->lock); 1016 doorbell = agent->doorbell = false; 1017 1018 /* check if we should carry on processing */ 1019 if (next_orb) 1020 agent->orb_pointer = next_orb; 1021 else 1022 agent->state = AGENT_STATE_SUSPENDED; 1023 1024 spin_unlock_bh(&agent->lock); 1025 }; 1026 } 1027 1028 static struct sbp_target_agent *sbp_target_agent_register( 1029 struct sbp_login_descriptor *login) 1030 { 1031 struct sbp_target_agent *agent; 1032 int ret; 1033 1034 agent = kmalloc(sizeof(*agent), GFP_KERNEL); 1035 if (!agent) 1036 return ERR_PTR(-ENOMEM); 1037 1038 spin_lock_init(&agent->lock); 1039 1040 agent->handler.length = 0x20; 1041 agent->handler.address_callback = tgt_agent_rw; 1042 agent->handler.callback_data = agent; 1043 1044 agent->login = login; 1045 agent->state = AGENT_STATE_RESET; 1046 INIT_WORK(&agent->work, tgt_agent_fetch_work); 1047 agent->orb_pointer = 0; 1048 agent->doorbell = false; 1049 1050 ret = fw_core_add_address_handler(&agent->handler, 1051 &sbp_register_region); 1052 if (ret < 0) { 1053 kfree(agent); 1054 return ERR_PTR(ret); 1055 } 1056 1057 return agent; 1058 } 1059 1060 static void sbp_target_agent_unregister(struct sbp_target_agent *agent) 1061 { 1062 fw_core_remove_address_handler(&agent->handler); 1063 cancel_work_sync(&agent->work); 1064 kfree(agent); 1065 } 1066 1067 /* 1068 * Simple wrapper around fw_run_transaction that retries the transaction several 1069 * times in case of failure, with an exponential backoff. 1070 */ 1071 static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id, 1072 int generation, int speed, unsigned long long offset, 1073 void *payload, size_t length) 1074 { 1075 int attempt, ret, delay; 1076 1077 for (attempt = 1; attempt <= 5; attempt++) { 1078 ret = fw_run_transaction(card, tcode, destination_id, 1079 generation, speed, offset, payload, length); 1080 1081 switch (ret) { 1082 case RCODE_COMPLETE: 1083 case RCODE_TYPE_ERROR: 1084 case RCODE_ADDRESS_ERROR: 1085 case RCODE_GENERATION: 1086 return ret; 1087 1088 default: 1089 delay = 5 * attempt * attempt; 1090 usleep_range(delay, delay * 2); 1091 } 1092 } 1093 1094 return ret; 1095 } 1096 1097 /* 1098 * Wrapper around sbp_run_transaction that gets the card, destination, 1099 * generation and speed out of the request's session. 1100 */ 1101 static int sbp_run_request_transaction(struct sbp_target_request *req, 1102 int tcode, unsigned long long offset, void *payload, 1103 size_t length) 1104 { 1105 struct sbp_login_descriptor *login = req->login; 1106 struct sbp_session *sess = login->sess; 1107 struct fw_card *card; 1108 int node_id, generation, speed, ret; 1109 1110 spin_lock_bh(&sess->lock); 1111 card = fw_card_get(sess->card); 1112 node_id = sess->node_id; 1113 generation = sess->generation; 1114 speed = sess->speed; 1115 spin_unlock_bh(&sess->lock); 1116 1117 ret = sbp_run_transaction(card, tcode, node_id, generation, speed, 1118 offset, payload, length); 1119 1120 fw_card_put(card); 1121 1122 return ret; 1123 } 1124 1125 static int sbp_fetch_command(struct sbp_target_request *req) 1126 { 1127 int ret, cmd_len, copy_len; 1128 1129 cmd_len = scsi_command_size(req->orb.command_block); 1130 1131 req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL); 1132 if (!req->cmd_buf) 1133 return -ENOMEM; 1134 1135 memcpy(req->cmd_buf, req->orb.command_block, 1136 min_t(int, cmd_len, sizeof(req->orb.command_block))); 1137 1138 if (cmd_len > sizeof(req->orb.command_block)) { 1139 pr_debug("sbp_fetch_command: filling in long command\n"); 1140 copy_len = cmd_len - sizeof(req->orb.command_block); 1141 1142 ret = sbp_run_request_transaction(req, 1143 TCODE_READ_BLOCK_REQUEST, 1144 req->orb_pointer + sizeof(req->orb), 1145 req->cmd_buf + sizeof(req->orb.command_block), 1146 copy_len); 1147 if (ret != RCODE_COMPLETE) 1148 return -EIO; 1149 } 1150 1151 return 0; 1152 } 1153 1154 static int sbp_fetch_page_table(struct sbp_target_request *req) 1155 { 1156 int pg_tbl_sz, ret; 1157 struct sbp_page_table_entry *pg_tbl; 1158 1159 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc))) 1160 return 0; 1161 1162 pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) * 1163 sizeof(struct sbp_page_table_entry); 1164 1165 pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL); 1166 if (!pg_tbl) 1167 return -ENOMEM; 1168 1169 ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST, 1170 sbp2_pointer_to_addr(&req->orb.data_descriptor), 1171 pg_tbl, pg_tbl_sz); 1172 if (ret != RCODE_COMPLETE) { 1173 kfree(pg_tbl); 1174 return -EIO; 1175 } 1176 1177 req->pg_tbl = pg_tbl; 1178 return 0; 1179 } 1180 1181 static void sbp_calc_data_length_direction(struct sbp_target_request *req, 1182 u32 *data_len, enum dma_data_direction *data_dir) 1183 { 1184 int data_size, direction, idx; 1185 1186 data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); 1187 direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc)); 1188 1189 if (!data_size) { 1190 *data_len = 0; 1191 *data_dir = DMA_NONE; 1192 return; 1193 } 1194 1195 *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1196 1197 if (req->pg_tbl) { 1198 *data_len = 0; 1199 for (idx = 0; idx < data_size; idx++) { 1200 *data_len += be16_to_cpu( 1201 req->pg_tbl[idx].segment_length); 1202 } 1203 } else { 1204 *data_len = data_size; 1205 } 1206 } 1207 1208 static void sbp_handle_command(struct sbp_target_request *req) 1209 { 1210 struct sbp_login_descriptor *login = req->login; 1211 struct sbp_session *sess = login->sess; 1212 int ret, unpacked_lun; 1213 u32 data_length; 1214 enum dma_data_direction data_dir; 1215 1216 ret = sbp_fetch_command(req); 1217 if (ret) { 1218 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret); 1219 goto err; 1220 } 1221 1222 ret = sbp_fetch_page_table(req); 1223 if (ret) { 1224 pr_debug("sbp_handle_command: fetch page table failed: %d\n", 1225 ret); 1226 goto err; 1227 } 1228 1229 unpacked_lun = req->login->login_lun; 1230 sbp_calc_data_length_direction(req, &data_length, &data_dir); 1231 1232 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n", 1233 req->orb_pointer, unpacked_lun, data_length, data_dir); 1234 1235 /* only used for printk until we do TMRs */ 1236 req->se_cmd.tag = req->orb_pointer; 1237 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, 1238 req->sense_buf, unpacked_lun, data_length, 1239 TCM_SIMPLE_TAG, data_dir, 0)) 1240 goto err; 1241 1242 return; 1243 1244 err: 1245 req->status.status |= cpu_to_be32( 1246 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 1247 STATUS_BLOCK_DEAD(0) | 1248 STATUS_BLOCK_LEN(1) | 1249 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 1250 sbp_send_status(req); 1251 sbp_free_request(req); 1252 } 1253 1254 /* 1255 * DMA_TO_DEVICE = read from initiator (SCSI WRITE) 1256 * DMA_FROM_DEVICE = write to initiator (SCSI READ) 1257 */ 1258 static int sbp_rw_data(struct sbp_target_request *req) 1259 { 1260 struct sbp_session *sess = req->login->sess; 1261 int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id, 1262 generation, num_pte, length, tfr_length, 1263 rcode = RCODE_COMPLETE; 1264 struct sbp_page_table_entry *pte; 1265 unsigned long long offset; 1266 struct fw_card *card; 1267 struct sg_mapping_iter iter; 1268 1269 if (req->se_cmd.data_direction == DMA_FROM_DEVICE) { 1270 tcode = TCODE_WRITE_BLOCK_REQUEST; 1271 sg_miter_flags = SG_MITER_FROM_SG; 1272 } else { 1273 tcode = TCODE_READ_BLOCK_REQUEST; 1274 sg_miter_flags = SG_MITER_TO_SG; 1275 } 1276 1277 max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc)); 1278 speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc)); 1279 1280 pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc)); 1281 if (pg_size) { 1282 pr_err("sbp_run_transaction: page size ignored\n"); 1283 pg_size = 0x100 << pg_size; 1284 } 1285 1286 spin_lock_bh(&sess->lock); 1287 card = fw_card_get(sess->card); 1288 node_id = sess->node_id; 1289 generation = sess->generation; 1290 spin_unlock_bh(&sess->lock); 1291 1292 if (req->pg_tbl) { 1293 pte = req->pg_tbl; 1294 num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); 1295 1296 offset = 0; 1297 length = 0; 1298 } else { 1299 pte = NULL; 1300 num_pte = 0; 1301 1302 offset = sbp2_pointer_to_addr(&req->orb.data_descriptor); 1303 length = req->se_cmd.data_length; 1304 } 1305 1306 sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents, 1307 sg_miter_flags); 1308 1309 while (length || num_pte) { 1310 if (!length) { 1311 offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 | 1312 be32_to_cpu(pte->segment_base_lo); 1313 length = be16_to_cpu(pte->segment_length); 1314 1315 pte++; 1316 num_pte--; 1317 } 1318 1319 sg_miter_next(&iter); 1320 1321 tfr_length = min3(length, max_payload, (int)iter.length); 1322 1323 /* FIXME: take page_size into account */ 1324 1325 rcode = sbp_run_transaction(card, tcode, node_id, 1326 generation, speed, 1327 offset, iter.addr, tfr_length); 1328 1329 if (rcode != RCODE_COMPLETE) 1330 break; 1331 1332 length -= tfr_length; 1333 offset += tfr_length; 1334 iter.consumed = tfr_length; 1335 } 1336 1337 sg_miter_stop(&iter); 1338 fw_card_put(card); 1339 1340 if (rcode == RCODE_COMPLETE) { 1341 WARN_ON(length != 0); 1342 return 0; 1343 } else { 1344 return -EIO; 1345 } 1346 } 1347 1348 static int sbp_send_status(struct sbp_target_request *req) 1349 { 1350 int ret, length; 1351 struct sbp_login_descriptor *login = req->login; 1352 1353 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4; 1354 1355 ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST, 1356 login->status_fifo_addr, &req->status, length); 1357 if (ret != RCODE_COMPLETE) { 1358 pr_debug("sbp_send_status: write failed: 0x%x\n", ret); 1359 return -EIO; 1360 } 1361 1362 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n", 1363 req->orb_pointer); 1364 1365 return 0; 1366 } 1367 1368 static void sbp_sense_mangle(struct sbp_target_request *req) 1369 { 1370 struct se_cmd *se_cmd = &req->se_cmd; 1371 u8 *sense = req->sense_buf; 1372 u8 *status = req->status.data; 1373 1374 WARN_ON(se_cmd->scsi_sense_length < 18); 1375 1376 switch (sense[0] & 0x7f) { /* sfmt */ 1377 case 0x70: /* current, fixed */ 1378 status[0] = 0 << 6; 1379 break; 1380 case 0x71: /* deferred, fixed */ 1381 status[0] = 1 << 6; 1382 break; 1383 case 0x72: /* current, descriptor */ 1384 case 0x73: /* deferred, descriptor */ 1385 default: 1386 /* 1387 * TODO: SBP-3 specifies what we should do with descriptor 1388 * format sense data 1389 */ 1390 pr_err("sbp_send_sense: unknown sense format: 0x%x\n", 1391 sense[0]); 1392 req->status.status |= cpu_to_be32( 1393 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1394 STATUS_BLOCK_DEAD(0) | 1395 STATUS_BLOCK_LEN(1) | 1396 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED)); 1397 return; 1398 } 1399 1400 status[0] |= se_cmd->scsi_status & 0x3f;/* status */ 1401 status[1] = 1402 (sense[0] & 0x80) | /* valid */ 1403 ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */ 1404 (sense[2] & 0x0f); /* sense_key */ 1405 status[2] = se_cmd->scsi_asc; /* sense_code */ 1406 status[3] = se_cmd->scsi_ascq; /* sense_qualifier */ 1407 1408 /* information */ 1409 status[4] = sense[3]; 1410 status[5] = sense[4]; 1411 status[6] = sense[5]; 1412 status[7] = sense[6]; 1413 1414 /* CDB-dependent */ 1415 status[8] = sense[8]; 1416 status[9] = sense[9]; 1417 status[10] = sense[10]; 1418 status[11] = sense[11]; 1419 1420 /* fru */ 1421 status[12] = sense[14]; 1422 1423 /* sense_key-dependent */ 1424 status[13] = sense[15]; 1425 status[14] = sense[16]; 1426 status[15] = sense[17]; 1427 1428 req->status.status |= cpu_to_be32( 1429 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1430 STATUS_BLOCK_DEAD(0) | 1431 STATUS_BLOCK_LEN(5) | 1432 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 1433 } 1434 1435 static int sbp_send_sense(struct sbp_target_request *req) 1436 { 1437 struct se_cmd *se_cmd = &req->se_cmd; 1438 1439 if (se_cmd->scsi_sense_length) { 1440 sbp_sense_mangle(req); 1441 } else { 1442 req->status.status |= cpu_to_be32( 1443 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1444 STATUS_BLOCK_DEAD(0) | 1445 STATUS_BLOCK_LEN(1) | 1446 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 1447 } 1448 1449 return sbp_send_status(req); 1450 } 1451 1452 static void sbp_free_request(struct sbp_target_request *req) 1453 { 1454 kfree(req->pg_tbl); 1455 kfree(req->cmd_buf); 1456 kfree(req); 1457 } 1458 1459 static void sbp_mgt_agent_process(struct work_struct *work) 1460 { 1461 struct sbp_management_agent *agent = 1462 container_of(work, struct sbp_management_agent, work); 1463 struct sbp_management_request *req = agent->request; 1464 int ret; 1465 int status_data_len = 0; 1466 1467 /* fetch the ORB from the initiator */ 1468 ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST, 1469 req->node_addr, req->generation, req->speed, 1470 agent->orb_offset, &req->orb, sizeof(req->orb)); 1471 if (ret != RCODE_COMPLETE) { 1472 pr_debug("mgt_orb fetch failed: %x\n", ret); 1473 goto out; 1474 } 1475 1476 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n", 1477 sbp2_pointer_to_addr(&req->orb.ptr1), 1478 sbp2_pointer_to_addr(&req->orb.ptr2), 1479 be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length), 1480 sbp2_pointer_to_addr(&req->orb.status_fifo)); 1481 1482 if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) || 1483 ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) { 1484 pr_err("mgt_orb bad request\n"); 1485 goto out; 1486 } 1487 1488 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) { 1489 case MANAGEMENT_ORB_FUNCTION_LOGIN: 1490 sbp_management_request_login(agent, req, &status_data_len); 1491 break; 1492 1493 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS: 1494 sbp_management_request_query_logins(agent, req, 1495 &status_data_len); 1496 break; 1497 1498 case MANAGEMENT_ORB_FUNCTION_RECONNECT: 1499 sbp_management_request_reconnect(agent, req, &status_data_len); 1500 break; 1501 1502 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD: 1503 pr_notice("SET PASSWORD not implemented\n"); 1504 1505 req->status.status = cpu_to_be32( 1506 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1507 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1508 1509 break; 1510 1511 case MANAGEMENT_ORB_FUNCTION_LOGOUT: 1512 sbp_management_request_logout(agent, req, &status_data_len); 1513 break; 1514 1515 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK: 1516 pr_notice("ABORT TASK not implemented\n"); 1517 1518 req->status.status = cpu_to_be32( 1519 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1520 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1521 1522 break; 1523 1524 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET: 1525 pr_notice("ABORT TASK SET not implemented\n"); 1526 1527 req->status.status = cpu_to_be32( 1528 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1529 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1530 1531 break; 1532 1533 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET: 1534 pr_notice("LOGICAL UNIT RESET not implemented\n"); 1535 1536 req->status.status = cpu_to_be32( 1537 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1538 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1539 1540 break; 1541 1542 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET: 1543 pr_notice("TARGET RESET not implemented\n"); 1544 1545 req->status.status = cpu_to_be32( 1546 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1547 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1548 1549 break; 1550 1551 default: 1552 pr_notice("unknown management function 0x%x\n", 1553 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))); 1554 1555 req->status.status = cpu_to_be32( 1556 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1557 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1558 1559 break; 1560 } 1561 1562 req->status.status |= cpu_to_be32( 1563 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */ 1564 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) | 1565 STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32)); 1566 req->status.orb_low = cpu_to_be32(agent->orb_offset); 1567 1568 /* write the status block back to the initiator */ 1569 ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST, 1570 req->node_addr, req->generation, req->speed, 1571 sbp2_pointer_to_addr(&req->orb.status_fifo), 1572 &req->status, 8 + status_data_len); 1573 if (ret != RCODE_COMPLETE) { 1574 pr_debug("mgt_orb status write failed: %x\n", ret); 1575 goto out; 1576 } 1577 1578 out: 1579 fw_card_put(req->card); 1580 kfree(req); 1581 1582 spin_lock_bh(&agent->lock); 1583 agent->state = MANAGEMENT_AGENT_STATE_IDLE; 1584 spin_unlock_bh(&agent->lock); 1585 } 1586 1587 static void sbp_mgt_agent_rw(struct fw_card *card, 1588 struct fw_request *request, int tcode, int destination, int source, 1589 int generation, unsigned long long offset, void *data, size_t length, 1590 void *callback_data) 1591 { 1592 struct sbp_management_agent *agent = callback_data; 1593 struct sbp2_pointer *ptr = data; 1594 int rcode = RCODE_ADDRESS_ERROR; 1595 1596 if (!agent->tport->enable) 1597 goto out; 1598 1599 if ((offset != agent->handler.offset) || (length != 8)) 1600 goto out; 1601 1602 if (tcode == TCODE_WRITE_BLOCK_REQUEST) { 1603 struct sbp_management_request *req; 1604 int prev_state; 1605 1606 spin_lock_bh(&agent->lock); 1607 prev_state = agent->state; 1608 agent->state = MANAGEMENT_AGENT_STATE_BUSY; 1609 spin_unlock_bh(&agent->lock); 1610 1611 if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) { 1612 pr_notice("ignoring management request while busy\n"); 1613 rcode = RCODE_CONFLICT_ERROR; 1614 goto out; 1615 } 1616 1617 req = kzalloc(sizeof(*req), GFP_ATOMIC); 1618 if (!req) { 1619 rcode = RCODE_CONFLICT_ERROR; 1620 goto out; 1621 } 1622 1623 req->card = fw_card_get(card); 1624 req->generation = generation; 1625 req->node_addr = source; 1626 req->speed = fw_get_request_speed(request); 1627 1628 agent->orb_offset = sbp2_pointer_to_addr(ptr); 1629 agent->request = req; 1630 1631 queue_work(system_unbound_wq, &agent->work); 1632 rcode = RCODE_COMPLETE; 1633 } else if (tcode == TCODE_READ_BLOCK_REQUEST) { 1634 addr_to_sbp2_pointer(agent->orb_offset, ptr); 1635 rcode = RCODE_COMPLETE; 1636 } else { 1637 rcode = RCODE_TYPE_ERROR; 1638 } 1639 1640 out: 1641 fw_send_response(card, request, rcode); 1642 } 1643 1644 static struct sbp_management_agent *sbp_management_agent_register( 1645 struct sbp_tport *tport) 1646 { 1647 int ret; 1648 struct sbp_management_agent *agent; 1649 1650 agent = kmalloc(sizeof(*agent), GFP_KERNEL); 1651 if (!agent) 1652 return ERR_PTR(-ENOMEM); 1653 1654 spin_lock_init(&agent->lock); 1655 agent->tport = tport; 1656 agent->handler.length = 0x08; 1657 agent->handler.address_callback = sbp_mgt_agent_rw; 1658 agent->handler.callback_data = agent; 1659 agent->state = MANAGEMENT_AGENT_STATE_IDLE; 1660 INIT_WORK(&agent->work, sbp_mgt_agent_process); 1661 agent->orb_offset = 0; 1662 agent->request = NULL; 1663 1664 ret = fw_core_add_address_handler(&agent->handler, 1665 &sbp_register_region); 1666 if (ret < 0) { 1667 kfree(agent); 1668 return ERR_PTR(ret); 1669 } 1670 1671 return agent; 1672 } 1673 1674 static void sbp_management_agent_unregister(struct sbp_management_agent *agent) 1675 { 1676 fw_core_remove_address_handler(&agent->handler); 1677 cancel_work_sync(&agent->work); 1678 kfree(agent); 1679 } 1680 1681 static int sbp_check_true(struct se_portal_group *se_tpg) 1682 { 1683 return 1; 1684 } 1685 1686 static int sbp_check_false(struct se_portal_group *se_tpg) 1687 { 1688 return 0; 1689 } 1690 1691 static char *sbp_get_fabric_name(void) 1692 { 1693 return "sbp"; 1694 } 1695 1696 static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg) 1697 { 1698 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1699 struct sbp_tport *tport = tpg->tport; 1700 1701 return &tport->tport_name[0]; 1702 } 1703 1704 static u16 sbp_get_tag(struct se_portal_group *se_tpg) 1705 { 1706 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1707 return tpg->tport_tpgt; 1708 } 1709 1710 static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg) 1711 { 1712 return 1; 1713 } 1714 1715 static void sbp_release_cmd(struct se_cmd *se_cmd) 1716 { 1717 struct sbp_target_request *req = container_of(se_cmd, 1718 struct sbp_target_request, se_cmd); 1719 1720 sbp_free_request(req); 1721 } 1722 1723 static int sbp_shutdown_session(struct se_session *se_sess) 1724 { 1725 return 0; 1726 } 1727 1728 static void sbp_close_session(struct se_session *se_sess) 1729 { 1730 return; 1731 } 1732 1733 static u32 sbp_sess_get_index(struct se_session *se_sess) 1734 { 1735 return 0; 1736 } 1737 1738 static int sbp_write_pending(struct se_cmd *se_cmd) 1739 { 1740 struct sbp_target_request *req = container_of(se_cmd, 1741 struct sbp_target_request, se_cmd); 1742 int ret; 1743 1744 ret = sbp_rw_data(req); 1745 if (ret) { 1746 req->status.status |= cpu_to_be32( 1747 STATUS_BLOCK_RESP( 1748 STATUS_RESP_TRANSPORT_FAILURE) | 1749 STATUS_BLOCK_DEAD(0) | 1750 STATUS_BLOCK_LEN(1) | 1751 STATUS_BLOCK_SBP_STATUS( 1752 SBP_STATUS_UNSPECIFIED_ERROR)); 1753 sbp_send_status(req); 1754 return ret; 1755 } 1756 1757 target_execute_cmd(se_cmd); 1758 return 0; 1759 } 1760 1761 static int sbp_write_pending_status(struct se_cmd *se_cmd) 1762 { 1763 return 0; 1764 } 1765 1766 static void sbp_set_default_node_attrs(struct se_node_acl *nacl) 1767 { 1768 return; 1769 } 1770 1771 static int sbp_get_cmd_state(struct se_cmd *se_cmd) 1772 { 1773 return 0; 1774 } 1775 1776 static int sbp_queue_data_in(struct se_cmd *se_cmd) 1777 { 1778 struct sbp_target_request *req = container_of(se_cmd, 1779 struct sbp_target_request, se_cmd); 1780 int ret; 1781 1782 ret = sbp_rw_data(req); 1783 if (ret) { 1784 req->status.status |= cpu_to_be32( 1785 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 1786 STATUS_BLOCK_DEAD(0) | 1787 STATUS_BLOCK_LEN(1) | 1788 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 1789 sbp_send_status(req); 1790 return ret; 1791 } 1792 1793 return sbp_send_sense(req); 1794 } 1795 1796 /* 1797 * Called after command (no data transfer) or after the write (to device) 1798 * operation is completed 1799 */ 1800 static int sbp_queue_status(struct se_cmd *se_cmd) 1801 { 1802 struct sbp_target_request *req = container_of(se_cmd, 1803 struct sbp_target_request, se_cmd); 1804 1805 return sbp_send_sense(req); 1806 } 1807 1808 static void sbp_queue_tm_rsp(struct se_cmd *se_cmd) 1809 { 1810 } 1811 1812 static void sbp_aborted_task(struct se_cmd *se_cmd) 1813 { 1814 return; 1815 } 1816 1817 static int sbp_check_stop_free(struct se_cmd *se_cmd) 1818 { 1819 struct sbp_target_request *req = container_of(se_cmd, 1820 struct sbp_target_request, se_cmd); 1821 1822 transport_generic_free_cmd(&req->se_cmd, 0); 1823 return 1; 1824 } 1825 1826 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg) 1827 { 1828 struct se_lun *lun; 1829 int count = 0; 1830 1831 rcu_read_lock(); 1832 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) 1833 count++; 1834 rcu_read_unlock(); 1835 1836 return count; 1837 } 1838 1839 static int sbp_update_unit_directory(struct sbp_tport *tport) 1840 { 1841 struct se_lun *lun; 1842 int num_luns, num_entries, idx = 0, mgt_agt_addr, ret; 1843 u32 *data; 1844 1845 if (tport->unit_directory.data) { 1846 fw_core_remove_descriptor(&tport->unit_directory); 1847 kfree(tport->unit_directory.data); 1848 tport->unit_directory.data = NULL; 1849 } 1850 1851 if (!tport->enable || !tport->tpg) 1852 return 0; 1853 1854 num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg); 1855 1856 /* 1857 * Number of entries in the final unit directory: 1858 * - all of those in the template 1859 * - management_agent 1860 * - unit_characteristics 1861 * - reconnect_timeout 1862 * - unit unique ID 1863 * - one for each LUN 1864 * 1865 * MUST NOT include leaf or sub-directory entries 1866 */ 1867 num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns; 1868 1869 if (tport->directory_id != -1) 1870 num_entries++; 1871 1872 /* allocate num_entries + 4 for the header and unique ID leaf */ 1873 data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL); 1874 if (!data) 1875 return -ENOMEM; 1876 1877 /* directory_length */ 1878 data[idx++] = num_entries << 16; 1879 1880 /* directory_id */ 1881 if (tport->directory_id != -1) 1882 data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id; 1883 1884 /* unit directory template */ 1885 memcpy(&data[idx], sbp_unit_directory_template, 1886 sizeof(sbp_unit_directory_template)); 1887 idx += ARRAY_SIZE(sbp_unit_directory_template); 1888 1889 /* management_agent */ 1890 mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4; 1891 data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff); 1892 1893 /* unit_characteristics */ 1894 data[idx++] = 0x3a000000 | 1895 (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) | 1896 SBP_ORB_FETCH_SIZE; 1897 1898 /* reconnect_timeout */ 1899 data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff); 1900 1901 /* unit unique ID (leaf is just after LUNs) */ 1902 data[idx++] = 0x8d000000 | (num_luns + 1); 1903 1904 rcu_read_lock(); 1905 hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) { 1906 struct se_device *dev; 1907 int type; 1908 /* 1909 * rcu_dereference_raw protected by se_lun->lun_group symlink 1910 * reference to se_device->dev_group. 1911 */ 1912 dev = rcu_dereference_raw(lun->lun_se_dev); 1913 type = dev->transport->get_device_type(dev); 1914 1915 /* logical_unit_number */ 1916 data[idx++] = 0x14000000 | 1917 ((type << 16) & 0x1f0000) | 1918 (lun->unpacked_lun & 0xffff); 1919 } 1920 rcu_read_unlock(); 1921 1922 /* unit unique ID leaf */ 1923 data[idx++] = 2 << 16; 1924 data[idx++] = tport->guid >> 32; 1925 data[idx++] = tport->guid; 1926 1927 tport->unit_directory.length = idx; 1928 tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24; 1929 tport->unit_directory.data = data; 1930 1931 ret = fw_core_add_descriptor(&tport->unit_directory); 1932 if (ret < 0) { 1933 kfree(tport->unit_directory.data); 1934 tport->unit_directory.data = NULL; 1935 } 1936 1937 return ret; 1938 } 1939 1940 static ssize_t sbp_parse_wwn(const char *name, u64 *wwn) 1941 { 1942 const char *cp; 1943 char c, nibble; 1944 int pos = 0, err; 1945 1946 *wwn = 0; 1947 for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) { 1948 c = *cp; 1949 if (c == '\n' && cp[1] == '\0') 1950 continue; 1951 if (c == '\0') { 1952 err = 2; 1953 if (pos != 16) 1954 goto fail; 1955 return cp - name; 1956 } 1957 err = 3; 1958 if (isdigit(c)) 1959 nibble = c - '0'; 1960 else if (isxdigit(c)) 1961 nibble = tolower(c) - 'a' + 10; 1962 else 1963 goto fail; 1964 *wwn = (*wwn << 4) | nibble; 1965 pos++; 1966 } 1967 err = 4; 1968 fail: 1969 printk(KERN_INFO "err %u len %zu pos %u\n", 1970 err, cp - name, pos); 1971 return -1; 1972 } 1973 1974 static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn) 1975 { 1976 return snprintf(buf, len, "%016llx", wwn); 1977 } 1978 1979 static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name) 1980 { 1981 u64 guid = 0; 1982 1983 if (sbp_parse_wwn(name, &guid) < 0) 1984 return -EINVAL; 1985 return 0; 1986 } 1987 1988 static int sbp_post_link_lun( 1989 struct se_portal_group *se_tpg, 1990 struct se_lun *se_lun) 1991 { 1992 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1993 1994 return sbp_update_unit_directory(tpg->tport); 1995 } 1996 1997 static void sbp_pre_unlink_lun( 1998 struct se_portal_group *se_tpg, 1999 struct se_lun *se_lun) 2000 { 2001 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2002 struct sbp_tport *tport = tpg->tport; 2003 int ret; 2004 2005 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) 2006 tport->enable = 0; 2007 2008 ret = sbp_update_unit_directory(tport); 2009 if (ret < 0) 2010 pr_err("unlink LUN: failed to update unit directory\n"); 2011 } 2012 2013 static struct se_portal_group *sbp_make_tpg( 2014 struct se_wwn *wwn, 2015 struct config_group *group, 2016 const char *name) 2017 { 2018 struct sbp_tport *tport = 2019 container_of(wwn, struct sbp_tport, tport_wwn); 2020 2021 struct sbp_tpg *tpg; 2022 unsigned long tpgt; 2023 int ret; 2024 2025 if (strstr(name, "tpgt_") != name) 2026 return ERR_PTR(-EINVAL); 2027 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) 2028 return ERR_PTR(-EINVAL); 2029 2030 if (tport->tpg) { 2031 pr_err("Only one TPG per Unit is possible.\n"); 2032 return ERR_PTR(-EBUSY); 2033 } 2034 2035 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); 2036 if (!tpg) { 2037 pr_err("Unable to allocate struct sbp_tpg\n"); 2038 return ERR_PTR(-ENOMEM); 2039 } 2040 2041 tpg->tport = tport; 2042 tpg->tport_tpgt = tpgt; 2043 tport->tpg = tpg; 2044 2045 /* default attribute values */ 2046 tport->enable = 0; 2047 tport->directory_id = -1; 2048 tport->mgt_orb_timeout = 15; 2049 tport->max_reconnect_timeout = 5; 2050 tport->max_logins_per_lun = 1; 2051 2052 tport->mgt_agt = sbp_management_agent_register(tport); 2053 if (IS_ERR(tport->mgt_agt)) { 2054 ret = PTR_ERR(tport->mgt_agt); 2055 goto out_free_tpg; 2056 } 2057 2058 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP); 2059 if (ret < 0) 2060 goto out_unreg_mgt_agt; 2061 2062 return &tpg->se_tpg; 2063 2064 out_unreg_mgt_agt: 2065 sbp_management_agent_unregister(tport->mgt_agt); 2066 out_free_tpg: 2067 tport->tpg = NULL; 2068 kfree(tpg); 2069 return ERR_PTR(ret); 2070 } 2071 2072 static void sbp_drop_tpg(struct se_portal_group *se_tpg) 2073 { 2074 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2075 struct sbp_tport *tport = tpg->tport; 2076 2077 core_tpg_deregister(se_tpg); 2078 sbp_management_agent_unregister(tport->mgt_agt); 2079 tport->tpg = NULL; 2080 kfree(tpg); 2081 } 2082 2083 static struct se_wwn *sbp_make_tport( 2084 struct target_fabric_configfs *tf, 2085 struct config_group *group, 2086 const char *name) 2087 { 2088 struct sbp_tport *tport; 2089 u64 guid = 0; 2090 2091 if (sbp_parse_wwn(name, &guid) < 0) 2092 return ERR_PTR(-EINVAL); 2093 2094 tport = kzalloc(sizeof(*tport), GFP_KERNEL); 2095 if (!tport) { 2096 pr_err("Unable to allocate struct sbp_tport\n"); 2097 return ERR_PTR(-ENOMEM); 2098 } 2099 2100 tport->guid = guid; 2101 sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid); 2102 2103 return &tport->tport_wwn; 2104 } 2105 2106 static void sbp_drop_tport(struct se_wwn *wwn) 2107 { 2108 struct sbp_tport *tport = 2109 container_of(wwn, struct sbp_tport, tport_wwn); 2110 2111 kfree(tport); 2112 } 2113 2114 static ssize_t sbp_wwn_show_attr_version( 2115 struct target_fabric_configfs *tf, 2116 char *page) 2117 { 2118 return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION); 2119 } 2120 2121 TF_WWN_ATTR_RO(sbp, version); 2122 2123 static struct configfs_attribute *sbp_wwn_attrs[] = { 2124 &sbp_wwn_version.attr, 2125 NULL, 2126 }; 2127 2128 static ssize_t sbp_tpg_show_directory_id( 2129 struct se_portal_group *se_tpg, 2130 char *page) 2131 { 2132 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2133 struct sbp_tport *tport = tpg->tport; 2134 2135 if (tport->directory_id == -1) 2136 return sprintf(page, "implicit\n"); 2137 else 2138 return sprintf(page, "%06x\n", tport->directory_id); 2139 } 2140 2141 static ssize_t sbp_tpg_store_directory_id( 2142 struct se_portal_group *se_tpg, 2143 const char *page, 2144 size_t count) 2145 { 2146 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2147 struct sbp_tport *tport = tpg->tport; 2148 unsigned long val; 2149 2150 if (tport->enable) { 2151 pr_err("Cannot change the directory_id on an active target.\n"); 2152 return -EBUSY; 2153 } 2154 2155 if (strstr(page, "implicit") == page) { 2156 tport->directory_id = -1; 2157 } else { 2158 if (kstrtoul(page, 16, &val) < 0) 2159 return -EINVAL; 2160 if (val > 0xffffff) 2161 return -EINVAL; 2162 2163 tport->directory_id = val; 2164 } 2165 2166 return count; 2167 } 2168 2169 static ssize_t sbp_tpg_show_enable( 2170 struct se_portal_group *se_tpg, 2171 char *page) 2172 { 2173 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2174 struct sbp_tport *tport = tpg->tport; 2175 return sprintf(page, "%d\n", tport->enable); 2176 } 2177 2178 static ssize_t sbp_tpg_store_enable( 2179 struct se_portal_group *se_tpg, 2180 const char *page, 2181 size_t count) 2182 { 2183 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2184 struct sbp_tport *tport = tpg->tport; 2185 unsigned long val; 2186 int ret; 2187 2188 if (kstrtoul(page, 0, &val) < 0) 2189 return -EINVAL; 2190 if ((val != 0) && (val != 1)) 2191 return -EINVAL; 2192 2193 if (tport->enable == val) 2194 return count; 2195 2196 if (val) { 2197 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) { 2198 pr_err("Cannot enable a target with no LUNs!\n"); 2199 return -EINVAL; 2200 } 2201 } else { 2202 /* XXX: force-shutdown sessions instead? */ 2203 spin_lock_bh(&se_tpg->session_lock); 2204 if (!list_empty(&se_tpg->tpg_sess_list)) { 2205 spin_unlock_bh(&se_tpg->session_lock); 2206 return -EBUSY; 2207 } 2208 spin_unlock_bh(&se_tpg->session_lock); 2209 } 2210 2211 tport->enable = val; 2212 2213 ret = sbp_update_unit_directory(tport); 2214 if (ret < 0) { 2215 pr_err("Could not update Config ROM\n"); 2216 return ret; 2217 } 2218 2219 return count; 2220 } 2221 2222 TF_TPG_BASE_ATTR(sbp, directory_id, S_IRUGO | S_IWUSR); 2223 TF_TPG_BASE_ATTR(sbp, enable, S_IRUGO | S_IWUSR); 2224 2225 static struct configfs_attribute *sbp_tpg_base_attrs[] = { 2226 &sbp_tpg_directory_id.attr, 2227 &sbp_tpg_enable.attr, 2228 NULL, 2229 }; 2230 2231 static ssize_t sbp_tpg_attrib_show_mgt_orb_timeout( 2232 struct se_portal_group *se_tpg, 2233 char *page) 2234 { 2235 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2236 struct sbp_tport *tport = tpg->tport; 2237 return sprintf(page, "%d\n", tport->mgt_orb_timeout); 2238 } 2239 2240 static ssize_t sbp_tpg_attrib_store_mgt_orb_timeout( 2241 struct se_portal_group *se_tpg, 2242 const char *page, 2243 size_t count) 2244 { 2245 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2246 struct sbp_tport *tport = tpg->tport; 2247 unsigned long val; 2248 int ret; 2249 2250 if (kstrtoul(page, 0, &val) < 0) 2251 return -EINVAL; 2252 if ((val < 1) || (val > 127)) 2253 return -EINVAL; 2254 2255 if (tport->mgt_orb_timeout == val) 2256 return count; 2257 2258 tport->mgt_orb_timeout = val; 2259 2260 ret = sbp_update_unit_directory(tport); 2261 if (ret < 0) 2262 return ret; 2263 2264 return count; 2265 } 2266 2267 static ssize_t sbp_tpg_attrib_show_max_reconnect_timeout( 2268 struct se_portal_group *se_tpg, 2269 char *page) 2270 { 2271 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2272 struct sbp_tport *tport = tpg->tport; 2273 return sprintf(page, "%d\n", tport->max_reconnect_timeout); 2274 } 2275 2276 static ssize_t sbp_tpg_attrib_store_max_reconnect_timeout( 2277 struct se_portal_group *se_tpg, 2278 const char *page, 2279 size_t count) 2280 { 2281 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2282 struct sbp_tport *tport = tpg->tport; 2283 unsigned long val; 2284 int ret; 2285 2286 if (kstrtoul(page, 0, &val) < 0) 2287 return -EINVAL; 2288 if ((val < 1) || (val > 32767)) 2289 return -EINVAL; 2290 2291 if (tport->max_reconnect_timeout == val) 2292 return count; 2293 2294 tport->max_reconnect_timeout = val; 2295 2296 ret = sbp_update_unit_directory(tport); 2297 if (ret < 0) 2298 return ret; 2299 2300 return count; 2301 } 2302 2303 static ssize_t sbp_tpg_attrib_show_max_logins_per_lun( 2304 struct se_portal_group *se_tpg, 2305 char *page) 2306 { 2307 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2308 struct sbp_tport *tport = tpg->tport; 2309 return sprintf(page, "%d\n", tport->max_logins_per_lun); 2310 } 2311 2312 static ssize_t sbp_tpg_attrib_store_max_logins_per_lun( 2313 struct se_portal_group *se_tpg, 2314 const char *page, 2315 size_t count) 2316 { 2317 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2318 struct sbp_tport *tport = tpg->tport; 2319 unsigned long val; 2320 2321 if (kstrtoul(page, 0, &val) < 0) 2322 return -EINVAL; 2323 if ((val < 1) || (val > 127)) 2324 return -EINVAL; 2325 2326 /* XXX: also check against current count? */ 2327 2328 tport->max_logins_per_lun = val; 2329 2330 return count; 2331 } 2332 2333 TF_TPG_ATTRIB_ATTR(sbp, mgt_orb_timeout, S_IRUGO | S_IWUSR); 2334 TF_TPG_ATTRIB_ATTR(sbp, max_reconnect_timeout, S_IRUGO | S_IWUSR); 2335 TF_TPG_ATTRIB_ATTR(sbp, max_logins_per_lun, S_IRUGO | S_IWUSR); 2336 2337 static struct configfs_attribute *sbp_tpg_attrib_attrs[] = { 2338 &sbp_tpg_attrib_mgt_orb_timeout.attr, 2339 &sbp_tpg_attrib_max_reconnect_timeout.attr, 2340 &sbp_tpg_attrib_max_logins_per_lun.attr, 2341 NULL, 2342 }; 2343 2344 static const struct target_core_fabric_ops sbp_ops = { 2345 .module = THIS_MODULE, 2346 .name = "sbp", 2347 .get_fabric_name = sbp_get_fabric_name, 2348 .tpg_get_wwn = sbp_get_fabric_wwn, 2349 .tpg_get_tag = sbp_get_tag, 2350 .tpg_check_demo_mode = sbp_check_true, 2351 .tpg_check_demo_mode_cache = sbp_check_true, 2352 .tpg_check_demo_mode_write_protect = sbp_check_false, 2353 .tpg_check_prod_mode_write_protect = sbp_check_false, 2354 .tpg_get_inst_index = sbp_tpg_get_inst_index, 2355 .release_cmd = sbp_release_cmd, 2356 .shutdown_session = sbp_shutdown_session, 2357 .close_session = sbp_close_session, 2358 .sess_get_index = sbp_sess_get_index, 2359 .write_pending = sbp_write_pending, 2360 .write_pending_status = sbp_write_pending_status, 2361 .set_default_node_attributes = sbp_set_default_node_attrs, 2362 .get_cmd_state = sbp_get_cmd_state, 2363 .queue_data_in = sbp_queue_data_in, 2364 .queue_status = sbp_queue_status, 2365 .queue_tm_rsp = sbp_queue_tm_rsp, 2366 .aborted_task = sbp_aborted_task, 2367 .check_stop_free = sbp_check_stop_free, 2368 2369 .fabric_make_wwn = sbp_make_tport, 2370 .fabric_drop_wwn = sbp_drop_tport, 2371 .fabric_make_tpg = sbp_make_tpg, 2372 .fabric_drop_tpg = sbp_drop_tpg, 2373 .fabric_post_link = sbp_post_link_lun, 2374 .fabric_pre_unlink = sbp_pre_unlink_lun, 2375 .fabric_make_np = NULL, 2376 .fabric_drop_np = NULL, 2377 .fabric_init_nodeacl = sbp_init_nodeacl, 2378 2379 .tfc_wwn_attrs = sbp_wwn_attrs, 2380 .tfc_tpg_base_attrs = sbp_tpg_base_attrs, 2381 .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs, 2382 }; 2383 2384 static int __init sbp_init(void) 2385 { 2386 return target_register_template(&sbp_ops); 2387 }; 2388 2389 static void __exit sbp_exit(void) 2390 { 2391 target_unregister_template(&sbp_ops); 2392 }; 2393 2394 MODULE_DESCRIPTION("FireWire SBP fabric driver"); 2395 MODULE_LICENSE("GPL"); 2396 module_init(sbp_init); 2397 module_exit(sbp_exit); 2398