1 /* 2 * SBP2 target driver (SCSI over IEEE1394 in target mode) 3 * 4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21 #define KMSG_COMPONENT "sbp_target" 22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 23 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/init.h> 27 #include <linux/types.h> 28 #include <linux/string.h> 29 #include <linux/configfs.h> 30 #include <linux/ctype.h> 31 #include <linux/firewire.h> 32 #include <linux/firewire-constants.h> 33 #include <scsi/scsi_proto.h> 34 #include <scsi/scsi_tcq.h> 35 #include <target/target_core_base.h> 36 #include <target/target_core_backend.h> 37 #include <target/target_core_fabric.h> 38 #include <asm/unaligned.h> 39 40 #include "sbp_target.h" 41 42 static const struct target_core_fabric_ops sbp_ops; 43 44 /* FireWire address region for management and command block address handlers */ 45 static const struct fw_address_region sbp_register_region = { 46 .start = CSR_REGISTER_BASE + 0x10000, 47 .end = 0x1000000000000ULL, 48 }; 49 50 static const u32 sbp_unit_directory_template[] = { 51 0x1200609e, /* unit_specifier_id: NCITS/T10 */ 52 0x13010483, /* unit_sw_version: 1155D Rev 4 */ 53 0x3800609e, /* command_set_specifier_id: NCITS/T10 */ 54 0x390104d8, /* command_set: SPC-2 */ 55 0x3b000000, /* command_set_revision: 0 */ 56 0x3c000001, /* firmware_revision: 1 */ 57 }; 58 59 #define SESSION_MAINTENANCE_INTERVAL HZ 60 61 static atomic_t login_id = ATOMIC_INIT(0); 62 63 static void session_maintenance_work(struct work_struct *); 64 static int sbp_run_transaction(struct fw_card *, int, int, int, int, 65 unsigned long long, void *, size_t); 66 67 static int read_peer_guid(u64 *guid, const struct sbp_management_request *req) 68 { 69 int ret; 70 __be32 high, low; 71 72 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, 73 req->node_addr, req->generation, req->speed, 74 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4, 75 &high, sizeof(high)); 76 if (ret != RCODE_COMPLETE) 77 return ret; 78 79 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, 80 req->node_addr, req->generation, req->speed, 81 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4, 82 &low, sizeof(low)); 83 if (ret != RCODE_COMPLETE) 84 return ret; 85 86 *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low); 87 88 return RCODE_COMPLETE; 89 } 90 91 static struct sbp_session *sbp_session_find_by_guid( 92 struct sbp_tpg *tpg, u64 guid) 93 { 94 struct se_session *se_sess; 95 struct sbp_session *sess, *found = NULL; 96 97 spin_lock_bh(&tpg->se_tpg.session_lock); 98 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 99 sess = se_sess->fabric_sess_ptr; 100 if (sess->guid == guid) 101 found = sess; 102 } 103 spin_unlock_bh(&tpg->se_tpg.session_lock); 104 105 return found; 106 } 107 108 static struct sbp_login_descriptor *sbp_login_find_by_lun( 109 struct sbp_session *session, u32 unpacked_lun) 110 { 111 struct sbp_login_descriptor *login, *found = NULL; 112 113 spin_lock_bh(&session->lock); 114 list_for_each_entry(login, &session->login_list, link) { 115 if (login->login_lun == unpacked_lun) 116 found = login; 117 } 118 spin_unlock_bh(&session->lock); 119 120 return found; 121 } 122 123 static int sbp_login_count_all_by_lun( 124 struct sbp_tpg *tpg, 125 u32 unpacked_lun, 126 int exclusive) 127 { 128 struct se_session *se_sess; 129 struct sbp_session *sess; 130 struct sbp_login_descriptor *login; 131 int count = 0; 132 133 spin_lock_bh(&tpg->se_tpg.session_lock); 134 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 135 sess = se_sess->fabric_sess_ptr; 136 137 spin_lock_bh(&sess->lock); 138 list_for_each_entry(login, &sess->login_list, link) { 139 if (login->login_lun != unpacked_lun) 140 continue; 141 142 if (!exclusive || login->exclusive) 143 count++; 144 } 145 spin_unlock_bh(&sess->lock); 146 } 147 spin_unlock_bh(&tpg->se_tpg.session_lock); 148 149 return count; 150 } 151 152 static struct sbp_login_descriptor *sbp_login_find_by_id( 153 struct sbp_tpg *tpg, int login_id) 154 { 155 struct se_session *se_sess; 156 struct sbp_session *sess; 157 struct sbp_login_descriptor *login, *found = NULL; 158 159 spin_lock_bh(&tpg->se_tpg.session_lock); 160 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 161 sess = se_sess->fabric_sess_ptr; 162 163 spin_lock_bh(&sess->lock); 164 list_for_each_entry(login, &sess->login_list, link) { 165 if (login->login_id == login_id) 166 found = login; 167 } 168 spin_unlock_bh(&sess->lock); 169 } 170 spin_unlock_bh(&tpg->se_tpg.session_lock); 171 172 return found; 173 } 174 175 static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err) 176 { 177 struct se_portal_group *se_tpg = &tpg->se_tpg; 178 struct se_lun *se_lun; 179 180 rcu_read_lock(); 181 hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) { 182 if (se_lun->unpacked_lun == login_lun) { 183 rcu_read_unlock(); 184 *err = 0; 185 return login_lun; 186 } 187 } 188 rcu_read_unlock(); 189 190 *err = -ENODEV; 191 return login_lun; 192 } 193 194 static struct sbp_session *sbp_session_create( 195 struct sbp_tpg *tpg, 196 u64 guid) 197 { 198 struct sbp_session *sess; 199 int ret; 200 char guid_str[17]; 201 struct se_node_acl *se_nacl; 202 203 sess = kmalloc(sizeof(*sess), GFP_KERNEL); 204 if (!sess) { 205 pr_err("failed to allocate session descriptor\n"); 206 return ERR_PTR(-ENOMEM); 207 } 208 209 sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); 210 if (IS_ERR(sess->se_sess)) { 211 pr_err("failed to init se_session\n"); 212 213 ret = PTR_ERR(sess->se_sess); 214 kfree(sess); 215 return ERR_PTR(ret); 216 } 217 218 snprintf(guid_str, sizeof(guid_str), "%016llx", guid); 219 220 se_nacl = core_tpg_check_initiator_node_acl(&tpg->se_tpg, guid_str); 221 if (!se_nacl) { 222 pr_warn("Node ACL not found for %s\n", guid_str); 223 224 transport_free_session(sess->se_sess); 225 kfree(sess); 226 227 return ERR_PTR(-EPERM); 228 } 229 230 sess->se_sess->se_node_acl = se_nacl; 231 232 spin_lock_init(&sess->lock); 233 INIT_LIST_HEAD(&sess->login_list); 234 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work); 235 236 sess->guid = guid; 237 238 transport_register_session(&tpg->se_tpg, se_nacl, sess->se_sess, sess); 239 240 return sess; 241 } 242 243 static void sbp_session_release(struct sbp_session *sess, bool cancel_work) 244 { 245 spin_lock_bh(&sess->lock); 246 if (!list_empty(&sess->login_list)) { 247 spin_unlock_bh(&sess->lock); 248 return; 249 } 250 spin_unlock_bh(&sess->lock); 251 252 if (cancel_work) 253 cancel_delayed_work_sync(&sess->maint_work); 254 255 transport_deregister_session_configfs(sess->se_sess); 256 transport_deregister_session(sess->se_sess); 257 258 if (sess->card) 259 fw_card_put(sess->card); 260 261 kfree(sess); 262 } 263 264 static void sbp_target_agent_unregister(struct sbp_target_agent *); 265 266 static void sbp_login_release(struct sbp_login_descriptor *login, 267 bool cancel_work) 268 { 269 struct sbp_session *sess = login->sess; 270 271 /* FIXME: abort/wait on tasks */ 272 273 sbp_target_agent_unregister(login->tgt_agt); 274 275 if (sess) { 276 spin_lock_bh(&sess->lock); 277 list_del(&login->link); 278 spin_unlock_bh(&sess->lock); 279 280 sbp_session_release(sess, cancel_work); 281 } 282 283 kfree(login); 284 } 285 286 static struct sbp_target_agent *sbp_target_agent_register( 287 struct sbp_login_descriptor *); 288 289 static void sbp_management_request_login( 290 struct sbp_management_agent *agent, struct sbp_management_request *req, 291 int *status_data_size) 292 { 293 struct sbp_tport *tport = agent->tport; 294 struct sbp_tpg *tpg = tport->tpg; 295 struct sbp_session *sess; 296 struct sbp_login_descriptor *login; 297 struct sbp_login_response_block *response; 298 u64 guid; 299 u32 unpacked_lun; 300 int login_response_len, ret; 301 302 unpacked_lun = sbp_get_lun_from_tpg(tpg, 303 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret); 304 if (ret) { 305 pr_notice("login to unknown LUN: %d\n", 306 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); 307 308 req->status.status = cpu_to_be32( 309 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 310 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP)); 311 return; 312 } 313 314 ret = read_peer_guid(&guid, req); 315 if (ret != RCODE_COMPLETE) { 316 pr_warn("failed to read peer GUID: %d\n", ret); 317 318 req->status.status = cpu_to_be32( 319 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 320 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 321 return; 322 } 323 324 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n", 325 unpacked_lun, guid); 326 327 sess = sbp_session_find_by_guid(tpg, guid); 328 if (sess) { 329 login = sbp_login_find_by_lun(sess, unpacked_lun); 330 if (login) { 331 pr_notice("initiator already logged-in\n"); 332 333 /* 334 * SBP-2 R4 says we should return access denied, but 335 * that can confuse initiators. Instead we need to 336 * treat this like a reconnect, but send the login 337 * response block like a fresh login. 338 * 339 * This is required particularly in the case of Apple 340 * devices booting off the FireWire target, where 341 * the firmware has an active login to the target. When 342 * the OS takes control of the session it issues its own 343 * LOGIN rather than a RECONNECT. To avoid the machine 344 * waiting until the reconnect_hold expires, we can skip 345 * the ACCESS_DENIED errors to speed things up. 346 */ 347 348 goto already_logged_in; 349 } 350 } 351 352 /* 353 * check exclusive bit in login request 354 * reject with access_denied if any logins present 355 */ 356 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) && 357 sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) { 358 pr_warn("refusing exclusive login with other active logins\n"); 359 360 req->status.status = cpu_to_be32( 361 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 362 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 363 return; 364 } 365 366 /* 367 * check exclusive bit in any existing login descriptor 368 * reject with access_denied if any exclusive logins present 369 */ 370 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) { 371 pr_warn("refusing login while another exclusive login present\n"); 372 373 req->status.status = cpu_to_be32( 374 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 375 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 376 return; 377 } 378 379 /* 380 * check we haven't exceeded the number of allowed logins 381 * reject with resources_unavailable if we have 382 */ 383 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >= 384 tport->max_logins_per_lun) { 385 pr_warn("max number of logins reached\n"); 386 387 req->status.status = cpu_to_be32( 388 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 389 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 390 return; 391 } 392 393 if (!sess) { 394 sess = sbp_session_create(tpg, guid); 395 if (IS_ERR(sess)) { 396 switch (PTR_ERR(sess)) { 397 case -EPERM: 398 ret = SBP_STATUS_ACCESS_DENIED; 399 break; 400 default: 401 ret = SBP_STATUS_RESOURCES_UNAVAIL; 402 break; 403 } 404 405 req->status.status = cpu_to_be32( 406 STATUS_BLOCK_RESP( 407 STATUS_RESP_REQUEST_COMPLETE) | 408 STATUS_BLOCK_SBP_STATUS(ret)); 409 return; 410 } 411 412 sess->node_id = req->node_addr; 413 sess->card = fw_card_get(req->card); 414 sess->generation = req->generation; 415 sess->speed = req->speed; 416 417 schedule_delayed_work(&sess->maint_work, 418 SESSION_MAINTENANCE_INTERVAL); 419 } 420 421 /* only take the latest reconnect_hold into account */ 422 sess->reconnect_hold = min( 423 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)), 424 tport->max_reconnect_timeout) - 1; 425 426 login = kmalloc(sizeof(*login), GFP_KERNEL); 427 if (!login) { 428 pr_err("failed to allocate login descriptor\n"); 429 430 sbp_session_release(sess, true); 431 432 req->status.status = cpu_to_be32( 433 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 434 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 435 return; 436 } 437 438 login->sess = sess; 439 login->login_lun = unpacked_lun; 440 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo); 441 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)); 442 login->login_id = atomic_inc_return(&login_id); 443 444 login->tgt_agt = sbp_target_agent_register(login); 445 if (IS_ERR(login->tgt_agt)) { 446 ret = PTR_ERR(login->tgt_agt); 447 pr_err("failed to map command block handler: %d\n", ret); 448 449 sbp_session_release(sess, true); 450 kfree(login); 451 452 req->status.status = cpu_to_be32( 453 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 454 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 455 return; 456 } 457 458 spin_lock_bh(&sess->lock); 459 list_add_tail(&login->link, &sess->login_list); 460 spin_unlock_bh(&sess->lock); 461 462 already_logged_in: 463 response = kzalloc(sizeof(*response), GFP_KERNEL); 464 if (!response) { 465 pr_err("failed to allocate login response block\n"); 466 467 sbp_login_release(login, true); 468 469 req->status.status = cpu_to_be32( 470 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 471 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 472 return; 473 } 474 475 login_response_len = clamp_val( 476 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)), 477 12, sizeof(*response)); 478 response->misc = cpu_to_be32( 479 ((login_response_len & 0xffff) << 16) | 480 (login->login_id & 0xffff)); 481 response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff); 482 addr_to_sbp2_pointer(login->tgt_agt->handler.offset, 483 &response->command_block_agent); 484 485 ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST, 486 sess->node_id, sess->generation, sess->speed, 487 sbp2_pointer_to_addr(&req->orb.ptr2), response, 488 login_response_len); 489 if (ret != RCODE_COMPLETE) { 490 pr_debug("failed to write login response block: %x\n", ret); 491 492 kfree(response); 493 sbp_login_release(login, true); 494 495 req->status.status = cpu_to_be32( 496 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 497 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 498 return; 499 } 500 501 kfree(response); 502 503 req->status.status = cpu_to_be32( 504 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 505 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 506 } 507 508 static void sbp_management_request_query_logins( 509 struct sbp_management_agent *agent, struct sbp_management_request *req, 510 int *status_data_size) 511 { 512 pr_notice("QUERY LOGINS not implemented\n"); 513 /* FIXME: implement */ 514 515 req->status.status = cpu_to_be32( 516 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 517 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 518 } 519 520 static void sbp_management_request_reconnect( 521 struct sbp_management_agent *agent, struct sbp_management_request *req, 522 int *status_data_size) 523 { 524 struct sbp_tport *tport = agent->tport; 525 struct sbp_tpg *tpg = tport->tpg; 526 int ret; 527 u64 guid; 528 struct sbp_login_descriptor *login; 529 530 ret = read_peer_guid(&guid, req); 531 if (ret != RCODE_COMPLETE) { 532 pr_warn("failed to read peer GUID: %d\n", ret); 533 534 req->status.status = cpu_to_be32( 535 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 536 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 537 return; 538 } 539 540 pr_notice("mgt_agent RECONNECT from %016llx\n", guid); 541 542 login = sbp_login_find_by_id(tpg, 543 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc))); 544 545 if (!login) { 546 pr_err("mgt_agent RECONNECT unknown login ID\n"); 547 548 req->status.status = cpu_to_be32( 549 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 550 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 551 return; 552 } 553 554 if (login->sess->guid != guid) { 555 pr_err("mgt_agent RECONNECT login GUID doesn't match\n"); 556 557 req->status.status = cpu_to_be32( 558 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 559 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 560 return; 561 } 562 563 spin_lock_bh(&login->sess->lock); 564 if (login->sess->card) 565 fw_card_put(login->sess->card); 566 567 /* update the node details */ 568 login->sess->generation = req->generation; 569 login->sess->node_id = req->node_addr; 570 login->sess->card = fw_card_get(req->card); 571 login->sess->speed = req->speed; 572 spin_unlock_bh(&login->sess->lock); 573 574 req->status.status = cpu_to_be32( 575 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 576 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 577 } 578 579 static void sbp_management_request_logout( 580 struct sbp_management_agent *agent, struct sbp_management_request *req, 581 int *status_data_size) 582 { 583 struct sbp_tport *tport = agent->tport; 584 struct sbp_tpg *tpg = tport->tpg; 585 int id; 586 struct sbp_login_descriptor *login; 587 588 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); 589 590 login = sbp_login_find_by_id(tpg, id); 591 if (!login) { 592 pr_warn("cannot find login: %d\n", id); 593 594 req->status.status = cpu_to_be32( 595 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 596 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN)); 597 return; 598 } 599 600 pr_info("mgt_agent LOGOUT from LUN %d session %d\n", 601 login->login_lun, login->login_id); 602 603 if (req->node_addr != login->sess->node_id) { 604 pr_warn("logout from different node ID\n"); 605 606 req->status.status = cpu_to_be32( 607 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 608 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 609 return; 610 } 611 612 sbp_login_release(login, true); 613 614 req->status.status = cpu_to_be32( 615 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 616 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 617 } 618 619 static void session_check_for_reset(struct sbp_session *sess) 620 { 621 bool card_valid = false; 622 623 spin_lock_bh(&sess->lock); 624 625 if (sess->card) { 626 spin_lock_irq(&sess->card->lock); 627 card_valid = (sess->card->local_node != NULL); 628 spin_unlock_irq(&sess->card->lock); 629 630 if (!card_valid) { 631 fw_card_put(sess->card); 632 sess->card = NULL; 633 } 634 } 635 636 if (!card_valid || (sess->generation != sess->card->generation)) { 637 pr_info("Waiting for reconnect from node: %016llx\n", 638 sess->guid); 639 640 sess->node_id = -1; 641 sess->reconnect_expires = get_jiffies_64() + 642 ((sess->reconnect_hold + 1) * HZ); 643 } 644 645 spin_unlock_bh(&sess->lock); 646 } 647 648 static void session_reconnect_expired(struct sbp_session *sess) 649 { 650 struct sbp_login_descriptor *login, *temp; 651 LIST_HEAD(login_list); 652 653 pr_info("Reconnect timer expired for node: %016llx\n", sess->guid); 654 655 spin_lock_bh(&sess->lock); 656 list_for_each_entry_safe(login, temp, &sess->login_list, link) { 657 login->sess = NULL; 658 list_move_tail(&login->link, &login_list); 659 } 660 spin_unlock_bh(&sess->lock); 661 662 list_for_each_entry_safe(login, temp, &login_list, link) { 663 list_del(&login->link); 664 sbp_login_release(login, false); 665 } 666 667 sbp_session_release(sess, false); 668 } 669 670 static void session_maintenance_work(struct work_struct *work) 671 { 672 struct sbp_session *sess = container_of(work, struct sbp_session, 673 maint_work.work); 674 675 /* could be called while tearing down the session */ 676 spin_lock_bh(&sess->lock); 677 if (list_empty(&sess->login_list)) { 678 spin_unlock_bh(&sess->lock); 679 return; 680 } 681 spin_unlock_bh(&sess->lock); 682 683 if (sess->node_id != -1) { 684 /* check for bus reset and make node_id invalid */ 685 session_check_for_reset(sess); 686 687 schedule_delayed_work(&sess->maint_work, 688 SESSION_MAINTENANCE_INTERVAL); 689 } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) { 690 /* still waiting for reconnect */ 691 schedule_delayed_work(&sess->maint_work, 692 SESSION_MAINTENANCE_INTERVAL); 693 } else { 694 /* reconnect timeout has expired */ 695 session_reconnect_expired(sess); 696 } 697 } 698 699 static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data, 700 struct sbp_target_agent *agent) 701 { 702 int state; 703 704 switch (tcode) { 705 case TCODE_READ_QUADLET_REQUEST: 706 pr_debug("tgt_agent AGENT_STATE READ\n"); 707 708 spin_lock_bh(&agent->lock); 709 state = agent->state; 710 spin_unlock_bh(&agent->lock); 711 712 *(__be32 *)data = cpu_to_be32(state); 713 714 return RCODE_COMPLETE; 715 716 case TCODE_WRITE_QUADLET_REQUEST: 717 /* ignored */ 718 return RCODE_COMPLETE; 719 720 default: 721 return RCODE_TYPE_ERROR; 722 } 723 } 724 725 static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data, 726 struct sbp_target_agent *agent) 727 { 728 switch (tcode) { 729 case TCODE_WRITE_QUADLET_REQUEST: 730 pr_debug("tgt_agent AGENT_RESET\n"); 731 spin_lock_bh(&agent->lock); 732 agent->state = AGENT_STATE_RESET; 733 spin_unlock_bh(&agent->lock); 734 return RCODE_COMPLETE; 735 736 default: 737 return RCODE_TYPE_ERROR; 738 } 739 } 740 741 static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data, 742 struct sbp_target_agent *agent) 743 { 744 struct sbp2_pointer *ptr = data; 745 746 switch (tcode) { 747 case TCODE_WRITE_BLOCK_REQUEST: 748 spin_lock_bh(&agent->lock); 749 if (agent->state != AGENT_STATE_SUSPENDED && 750 agent->state != AGENT_STATE_RESET) { 751 spin_unlock_bh(&agent->lock); 752 pr_notice("Ignoring ORB_POINTER write while active.\n"); 753 return RCODE_CONFLICT_ERROR; 754 } 755 agent->state = AGENT_STATE_ACTIVE; 756 spin_unlock_bh(&agent->lock); 757 758 agent->orb_pointer = sbp2_pointer_to_addr(ptr); 759 agent->doorbell = false; 760 761 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n", 762 agent->orb_pointer); 763 764 queue_work(system_unbound_wq, &agent->work); 765 766 return RCODE_COMPLETE; 767 768 case TCODE_READ_BLOCK_REQUEST: 769 pr_debug("tgt_agent ORB_POINTER READ\n"); 770 spin_lock_bh(&agent->lock); 771 addr_to_sbp2_pointer(agent->orb_pointer, ptr); 772 spin_unlock_bh(&agent->lock); 773 return RCODE_COMPLETE; 774 775 default: 776 return RCODE_TYPE_ERROR; 777 } 778 } 779 780 static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data, 781 struct sbp_target_agent *agent) 782 { 783 switch (tcode) { 784 case TCODE_WRITE_QUADLET_REQUEST: 785 spin_lock_bh(&agent->lock); 786 if (agent->state != AGENT_STATE_SUSPENDED) { 787 spin_unlock_bh(&agent->lock); 788 pr_debug("Ignoring DOORBELL while active.\n"); 789 return RCODE_CONFLICT_ERROR; 790 } 791 agent->state = AGENT_STATE_ACTIVE; 792 spin_unlock_bh(&agent->lock); 793 794 agent->doorbell = true; 795 796 pr_debug("tgt_agent DOORBELL\n"); 797 798 queue_work(system_unbound_wq, &agent->work); 799 800 return RCODE_COMPLETE; 801 802 case TCODE_READ_QUADLET_REQUEST: 803 return RCODE_COMPLETE; 804 805 default: 806 return RCODE_TYPE_ERROR; 807 } 808 } 809 810 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card, 811 int tcode, void *data, struct sbp_target_agent *agent) 812 { 813 switch (tcode) { 814 case TCODE_WRITE_QUADLET_REQUEST: 815 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n"); 816 /* ignored as we don't send unsolicited status */ 817 return RCODE_COMPLETE; 818 819 case TCODE_READ_QUADLET_REQUEST: 820 return RCODE_COMPLETE; 821 822 default: 823 return RCODE_TYPE_ERROR; 824 } 825 } 826 827 static void tgt_agent_rw(struct fw_card *card, struct fw_request *request, 828 int tcode, int destination, int source, int generation, 829 unsigned long long offset, void *data, size_t length, 830 void *callback_data) 831 { 832 struct sbp_target_agent *agent = callback_data; 833 struct sbp_session *sess = agent->login->sess; 834 int sess_gen, sess_node, rcode; 835 836 spin_lock_bh(&sess->lock); 837 sess_gen = sess->generation; 838 sess_node = sess->node_id; 839 spin_unlock_bh(&sess->lock); 840 841 if (generation != sess_gen) { 842 pr_notice("ignoring request with wrong generation\n"); 843 rcode = RCODE_TYPE_ERROR; 844 goto out; 845 } 846 847 if (source != sess_node) { 848 pr_notice("ignoring request from foreign node (%x != %x)\n", 849 source, sess_node); 850 rcode = RCODE_TYPE_ERROR; 851 goto out; 852 } 853 854 /* turn offset into the offset from the start of the block */ 855 offset -= agent->handler.offset; 856 857 if (offset == 0x00 && length == 4) { 858 /* AGENT_STATE */ 859 rcode = tgt_agent_rw_agent_state(card, tcode, data, agent); 860 } else if (offset == 0x04 && length == 4) { 861 /* AGENT_RESET */ 862 rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent); 863 } else if (offset == 0x08 && length == 8) { 864 /* ORB_POINTER */ 865 rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent); 866 } else if (offset == 0x10 && length == 4) { 867 /* DOORBELL */ 868 rcode = tgt_agent_rw_doorbell(card, tcode, data, agent); 869 } else if (offset == 0x14 && length == 4) { 870 /* UNSOLICITED_STATUS_ENABLE */ 871 rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode, 872 data, agent); 873 } else { 874 rcode = RCODE_ADDRESS_ERROR; 875 } 876 877 out: 878 fw_send_response(card, request, rcode); 879 } 880 881 static void sbp_handle_command(struct sbp_target_request *); 882 static int sbp_send_status(struct sbp_target_request *); 883 static void sbp_free_request(struct sbp_target_request *); 884 885 static void tgt_agent_process_work(struct work_struct *work) 886 { 887 struct sbp_target_request *req = 888 container_of(work, struct sbp_target_request, work); 889 890 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n", 891 req->orb_pointer, 892 sbp2_pointer_to_addr(&req->orb.next_orb), 893 sbp2_pointer_to_addr(&req->orb.data_descriptor), 894 be32_to_cpu(req->orb.misc)); 895 896 if (req->orb_pointer >> 32) 897 pr_debug("ORB with high bits set\n"); 898 899 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) { 900 case 0:/* Format specified by this standard */ 901 sbp_handle_command(req); 902 return; 903 case 1: /* Reserved for future standardization */ 904 case 2: /* Vendor-dependent */ 905 req->status.status |= cpu_to_be32( 906 STATUS_BLOCK_RESP( 907 STATUS_RESP_REQUEST_COMPLETE) | 908 STATUS_BLOCK_DEAD(0) | 909 STATUS_BLOCK_LEN(1) | 910 STATUS_BLOCK_SBP_STATUS( 911 SBP_STATUS_REQ_TYPE_NOTSUPP)); 912 sbp_send_status(req); 913 sbp_free_request(req); 914 return; 915 case 3: /* Dummy ORB */ 916 req->status.status |= cpu_to_be32( 917 STATUS_BLOCK_RESP( 918 STATUS_RESP_REQUEST_COMPLETE) | 919 STATUS_BLOCK_DEAD(0) | 920 STATUS_BLOCK_LEN(1) | 921 STATUS_BLOCK_SBP_STATUS( 922 SBP_STATUS_DUMMY_ORB_COMPLETE)); 923 sbp_send_status(req); 924 sbp_free_request(req); 925 return; 926 default: 927 BUG(); 928 } 929 } 930 931 /* used to double-check we haven't been issued an AGENT_RESET */ 932 static inline bool tgt_agent_check_active(struct sbp_target_agent *agent) 933 { 934 bool active; 935 936 spin_lock_bh(&agent->lock); 937 active = (agent->state == AGENT_STATE_ACTIVE); 938 spin_unlock_bh(&agent->lock); 939 940 return active; 941 } 942 943 static void tgt_agent_fetch_work(struct work_struct *work) 944 { 945 struct sbp_target_agent *agent = 946 container_of(work, struct sbp_target_agent, work); 947 struct sbp_session *sess = agent->login->sess; 948 struct sbp_target_request *req; 949 int ret; 950 bool doorbell = agent->doorbell; 951 u64 next_orb = agent->orb_pointer; 952 953 while (next_orb && tgt_agent_check_active(agent)) { 954 req = kzalloc(sizeof(*req), GFP_KERNEL); 955 if (!req) { 956 spin_lock_bh(&agent->lock); 957 agent->state = AGENT_STATE_DEAD; 958 spin_unlock_bh(&agent->lock); 959 return; 960 } 961 962 req->login = agent->login; 963 req->orb_pointer = next_orb; 964 965 req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH( 966 req->orb_pointer >> 32)); 967 req->status.orb_low = cpu_to_be32( 968 req->orb_pointer & 0xfffffffc); 969 970 /* read in the ORB */ 971 ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST, 972 sess->node_id, sess->generation, sess->speed, 973 req->orb_pointer, &req->orb, sizeof(req->orb)); 974 if (ret != RCODE_COMPLETE) { 975 pr_debug("tgt_orb fetch failed: %x\n", ret); 976 req->status.status |= cpu_to_be32( 977 STATUS_BLOCK_SRC( 978 STATUS_SRC_ORB_FINISHED) | 979 STATUS_BLOCK_RESP( 980 STATUS_RESP_TRANSPORT_FAILURE) | 981 STATUS_BLOCK_DEAD(1) | 982 STATUS_BLOCK_LEN(1) | 983 STATUS_BLOCK_SBP_STATUS( 984 SBP_STATUS_UNSPECIFIED_ERROR)); 985 spin_lock_bh(&agent->lock); 986 agent->state = AGENT_STATE_DEAD; 987 spin_unlock_bh(&agent->lock); 988 989 sbp_send_status(req); 990 sbp_free_request(req); 991 return; 992 } 993 994 /* check the next_ORB field */ 995 if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) { 996 next_orb = 0; 997 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( 998 STATUS_SRC_ORB_FINISHED)); 999 } else { 1000 next_orb = sbp2_pointer_to_addr(&req->orb.next_orb); 1001 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( 1002 STATUS_SRC_ORB_CONTINUING)); 1003 } 1004 1005 if (tgt_agent_check_active(agent) && !doorbell) { 1006 INIT_WORK(&req->work, tgt_agent_process_work); 1007 queue_work(system_unbound_wq, &req->work); 1008 } else { 1009 /* don't process this request, just check next_ORB */ 1010 sbp_free_request(req); 1011 } 1012 1013 spin_lock_bh(&agent->lock); 1014 doorbell = agent->doorbell = false; 1015 1016 /* check if we should carry on processing */ 1017 if (next_orb) 1018 agent->orb_pointer = next_orb; 1019 else 1020 agent->state = AGENT_STATE_SUSPENDED; 1021 1022 spin_unlock_bh(&agent->lock); 1023 }; 1024 } 1025 1026 static struct sbp_target_agent *sbp_target_agent_register( 1027 struct sbp_login_descriptor *login) 1028 { 1029 struct sbp_target_agent *agent; 1030 int ret; 1031 1032 agent = kmalloc(sizeof(*agent), GFP_KERNEL); 1033 if (!agent) 1034 return ERR_PTR(-ENOMEM); 1035 1036 spin_lock_init(&agent->lock); 1037 1038 agent->handler.length = 0x20; 1039 agent->handler.address_callback = tgt_agent_rw; 1040 agent->handler.callback_data = agent; 1041 1042 agent->login = login; 1043 agent->state = AGENT_STATE_RESET; 1044 INIT_WORK(&agent->work, tgt_agent_fetch_work); 1045 agent->orb_pointer = 0; 1046 agent->doorbell = false; 1047 1048 ret = fw_core_add_address_handler(&agent->handler, 1049 &sbp_register_region); 1050 if (ret < 0) { 1051 kfree(agent); 1052 return ERR_PTR(ret); 1053 } 1054 1055 return agent; 1056 } 1057 1058 static void sbp_target_agent_unregister(struct sbp_target_agent *agent) 1059 { 1060 fw_core_remove_address_handler(&agent->handler); 1061 cancel_work_sync(&agent->work); 1062 kfree(agent); 1063 } 1064 1065 /* 1066 * Simple wrapper around fw_run_transaction that retries the transaction several 1067 * times in case of failure, with an exponential backoff. 1068 */ 1069 static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id, 1070 int generation, int speed, unsigned long long offset, 1071 void *payload, size_t length) 1072 { 1073 int attempt, ret, delay; 1074 1075 for (attempt = 1; attempt <= 5; attempt++) { 1076 ret = fw_run_transaction(card, tcode, destination_id, 1077 generation, speed, offset, payload, length); 1078 1079 switch (ret) { 1080 case RCODE_COMPLETE: 1081 case RCODE_TYPE_ERROR: 1082 case RCODE_ADDRESS_ERROR: 1083 case RCODE_GENERATION: 1084 return ret; 1085 1086 default: 1087 delay = 5 * attempt * attempt; 1088 usleep_range(delay, delay * 2); 1089 } 1090 } 1091 1092 return ret; 1093 } 1094 1095 /* 1096 * Wrapper around sbp_run_transaction that gets the card, destination, 1097 * generation and speed out of the request's session. 1098 */ 1099 static int sbp_run_request_transaction(struct sbp_target_request *req, 1100 int tcode, unsigned long long offset, void *payload, 1101 size_t length) 1102 { 1103 struct sbp_login_descriptor *login = req->login; 1104 struct sbp_session *sess = login->sess; 1105 struct fw_card *card; 1106 int node_id, generation, speed, ret; 1107 1108 spin_lock_bh(&sess->lock); 1109 card = fw_card_get(sess->card); 1110 node_id = sess->node_id; 1111 generation = sess->generation; 1112 speed = sess->speed; 1113 spin_unlock_bh(&sess->lock); 1114 1115 ret = sbp_run_transaction(card, tcode, node_id, generation, speed, 1116 offset, payload, length); 1117 1118 fw_card_put(card); 1119 1120 return ret; 1121 } 1122 1123 static int sbp_fetch_command(struct sbp_target_request *req) 1124 { 1125 int ret, cmd_len, copy_len; 1126 1127 cmd_len = scsi_command_size(req->orb.command_block); 1128 1129 req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL); 1130 if (!req->cmd_buf) 1131 return -ENOMEM; 1132 1133 memcpy(req->cmd_buf, req->orb.command_block, 1134 min_t(int, cmd_len, sizeof(req->orb.command_block))); 1135 1136 if (cmd_len > sizeof(req->orb.command_block)) { 1137 pr_debug("sbp_fetch_command: filling in long command\n"); 1138 copy_len = cmd_len - sizeof(req->orb.command_block); 1139 1140 ret = sbp_run_request_transaction(req, 1141 TCODE_READ_BLOCK_REQUEST, 1142 req->orb_pointer + sizeof(req->orb), 1143 req->cmd_buf + sizeof(req->orb.command_block), 1144 copy_len); 1145 if (ret != RCODE_COMPLETE) 1146 return -EIO; 1147 } 1148 1149 return 0; 1150 } 1151 1152 static int sbp_fetch_page_table(struct sbp_target_request *req) 1153 { 1154 int pg_tbl_sz, ret; 1155 struct sbp_page_table_entry *pg_tbl; 1156 1157 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc))) 1158 return 0; 1159 1160 pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) * 1161 sizeof(struct sbp_page_table_entry); 1162 1163 pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL); 1164 if (!pg_tbl) 1165 return -ENOMEM; 1166 1167 ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST, 1168 sbp2_pointer_to_addr(&req->orb.data_descriptor), 1169 pg_tbl, pg_tbl_sz); 1170 if (ret != RCODE_COMPLETE) { 1171 kfree(pg_tbl); 1172 return -EIO; 1173 } 1174 1175 req->pg_tbl = pg_tbl; 1176 return 0; 1177 } 1178 1179 static void sbp_calc_data_length_direction(struct sbp_target_request *req, 1180 u32 *data_len, enum dma_data_direction *data_dir) 1181 { 1182 int data_size, direction, idx; 1183 1184 data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); 1185 direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc)); 1186 1187 if (!data_size) { 1188 *data_len = 0; 1189 *data_dir = DMA_NONE; 1190 return; 1191 } 1192 1193 *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1194 1195 if (req->pg_tbl) { 1196 *data_len = 0; 1197 for (idx = 0; idx < data_size; idx++) { 1198 *data_len += be16_to_cpu( 1199 req->pg_tbl[idx].segment_length); 1200 } 1201 } else { 1202 *data_len = data_size; 1203 } 1204 } 1205 1206 static void sbp_handle_command(struct sbp_target_request *req) 1207 { 1208 struct sbp_login_descriptor *login = req->login; 1209 struct sbp_session *sess = login->sess; 1210 int ret, unpacked_lun; 1211 u32 data_length; 1212 enum dma_data_direction data_dir; 1213 1214 ret = sbp_fetch_command(req); 1215 if (ret) { 1216 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret); 1217 goto err; 1218 } 1219 1220 ret = sbp_fetch_page_table(req); 1221 if (ret) { 1222 pr_debug("sbp_handle_command: fetch page table failed: %d\n", 1223 ret); 1224 goto err; 1225 } 1226 1227 unpacked_lun = req->login->login_lun; 1228 sbp_calc_data_length_direction(req, &data_length, &data_dir); 1229 1230 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n", 1231 req->orb_pointer, unpacked_lun, data_length, data_dir); 1232 1233 /* only used for printk until we do TMRs */ 1234 req->se_cmd.tag = req->orb_pointer; 1235 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, 1236 req->sense_buf, unpacked_lun, data_length, 1237 TCM_SIMPLE_TAG, data_dir, 0)) 1238 goto err; 1239 1240 return; 1241 1242 err: 1243 req->status.status |= cpu_to_be32( 1244 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 1245 STATUS_BLOCK_DEAD(0) | 1246 STATUS_BLOCK_LEN(1) | 1247 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 1248 sbp_send_status(req); 1249 sbp_free_request(req); 1250 } 1251 1252 /* 1253 * DMA_TO_DEVICE = read from initiator (SCSI WRITE) 1254 * DMA_FROM_DEVICE = write to initiator (SCSI READ) 1255 */ 1256 static int sbp_rw_data(struct sbp_target_request *req) 1257 { 1258 struct sbp_session *sess = req->login->sess; 1259 int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id, 1260 generation, num_pte, length, tfr_length, 1261 rcode = RCODE_COMPLETE; 1262 struct sbp_page_table_entry *pte; 1263 unsigned long long offset; 1264 struct fw_card *card; 1265 struct sg_mapping_iter iter; 1266 1267 if (req->se_cmd.data_direction == DMA_FROM_DEVICE) { 1268 tcode = TCODE_WRITE_BLOCK_REQUEST; 1269 sg_miter_flags = SG_MITER_FROM_SG; 1270 } else { 1271 tcode = TCODE_READ_BLOCK_REQUEST; 1272 sg_miter_flags = SG_MITER_TO_SG; 1273 } 1274 1275 max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc)); 1276 speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc)); 1277 1278 pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc)); 1279 if (pg_size) { 1280 pr_err("sbp_run_transaction: page size ignored\n"); 1281 pg_size = 0x100 << pg_size; 1282 } 1283 1284 spin_lock_bh(&sess->lock); 1285 card = fw_card_get(sess->card); 1286 node_id = sess->node_id; 1287 generation = sess->generation; 1288 spin_unlock_bh(&sess->lock); 1289 1290 if (req->pg_tbl) { 1291 pte = req->pg_tbl; 1292 num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); 1293 1294 offset = 0; 1295 length = 0; 1296 } else { 1297 pte = NULL; 1298 num_pte = 0; 1299 1300 offset = sbp2_pointer_to_addr(&req->orb.data_descriptor); 1301 length = req->se_cmd.data_length; 1302 } 1303 1304 sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents, 1305 sg_miter_flags); 1306 1307 while (length || num_pte) { 1308 if (!length) { 1309 offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 | 1310 be32_to_cpu(pte->segment_base_lo); 1311 length = be16_to_cpu(pte->segment_length); 1312 1313 pte++; 1314 num_pte--; 1315 } 1316 1317 sg_miter_next(&iter); 1318 1319 tfr_length = min3(length, max_payload, (int)iter.length); 1320 1321 /* FIXME: take page_size into account */ 1322 1323 rcode = sbp_run_transaction(card, tcode, node_id, 1324 generation, speed, 1325 offset, iter.addr, tfr_length); 1326 1327 if (rcode != RCODE_COMPLETE) 1328 break; 1329 1330 length -= tfr_length; 1331 offset += tfr_length; 1332 iter.consumed = tfr_length; 1333 } 1334 1335 sg_miter_stop(&iter); 1336 fw_card_put(card); 1337 1338 if (rcode == RCODE_COMPLETE) { 1339 WARN_ON(length != 0); 1340 return 0; 1341 } else { 1342 return -EIO; 1343 } 1344 } 1345 1346 static int sbp_send_status(struct sbp_target_request *req) 1347 { 1348 int ret, length; 1349 struct sbp_login_descriptor *login = req->login; 1350 1351 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4; 1352 1353 ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST, 1354 login->status_fifo_addr, &req->status, length); 1355 if (ret != RCODE_COMPLETE) { 1356 pr_debug("sbp_send_status: write failed: 0x%x\n", ret); 1357 return -EIO; 1358 } 1359 1360 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n", 1361 req->orb_pointer); 1362 1363 return 0; 1364 } 1365 1366 static void sbp_sense_mangle(struct sbp_target_request *req) 1367 { 1368 struct se_cmd *se_cmd = &req->se_cmd; 1369 u8 *sense = req->sense_buf; 1370 u8 *status = req->status.data; 1371 1372 WARN_ON(se_cmd->scsi_sense_length < 18); 1373 1374 switch (sense[0] & 0x7f) { /* sfmt */ 1375 case 0x70: /* current, fixed */ 1376 status[0] = 0 << 6; 1377 break; 1378 case 0x71: /* deferred, fixed */ 1379 status[0] = 1 << 6; 1380 break; 1381 case 0x72: /* current, descriptor */ 1382 case 0x73: /* deferred, descriptor */ 1383 default: 1384 /* 1385 * TODO: SBP-3 specifies what we should do with descriptor 1386 * format sense data 1387 */ 1388 pr_err("sbp_send_sense: unknown sense format: 0x%x\n", 1389 sense[0]); 1390 req->status.status |= cpu_to_be32( 1391 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1392 STATUS_BLOCK_DEAD(0) | 1393 STATUS_BLOCK_LEN(1) | 1394 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED)); 1395 return; 1396 } 1397 1398 status[0] |= se_cmd->scsi_status & 0x3f;/* status */ 1399 status[1] = 1400 (sense[0] & 0x80) | /* valid */ 1401 ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */ 1402 (sense[2] & 0x0f); /* sense_key */ 1403 status[2] = se_cmd->scsi_asc; /* sense_code */ 1404 status[3] = se_cmd->scsi_ascq; /* sense_qualifier */ 1405 1406 /* information */ 1407 status[4] = sense[3]; 1408 status[5] = sense[4]; 1409 status[6] = sense[5]; 1410 status[7] = sense[6]; 1411 1412 /* CDB-dependent */ 1413 status[8] = sense[8]; 1414 status[9] = sense[9]; 1415 status[10] = sense[10]; 1416 status[11] = sense[11]; 1417 1418 /* fru */ 1419 status[12] = sense[14]; 1420 1421 /* sense_key-dependent */ 1422 status[13] = sense[15]; 1423 status[14] = sense[16]; 1424 status[15] = sense[17]; 1425 1426 req->status.status |= cpu_to_be32( 1427 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1428 STATUS_BLOCK_DEAD(0) | 1429 STATUS_BLOCK_LEN(5) | 1430 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 1431 } 1432 1433 static int sbp_send_sense(struct sbp_target_request *req) 1434 { 1435 struct se_cmd *se_cmd = &req->se_cmd; 1436 1437 if (se_cmd->scsi_sense_length) { 1438 sbp_sense_mangle(req); 1439 } else { 1440 req->status.status |= cpu_to_be32( 1441 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1442 STATUS_BLOCK_DEAD(0) | 1443 STATUS_BLOCK_LEN(1) | 1444 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 1445 } 1446 1447 return sbp_send_status(req); 1448 } 1449 1450 static void sbp_free_request(struct sbp_target_request *req) 1451 { 1452 kfree(req->pg_tbl); 1453 kfree(req->cmd_buf); 1454 kfree(req); 1455 } 1456 1457 static void sbp_mgt_agent_process(struct work_struct *work) 1458 { 1459 struct sbp_management_agent *agent = 1460 container_of(work, struct sbp_management_agent, work); 1461 struct sbp_management_request *req = agent->request; 1462 int ret; 1463 int status_data_len = 0; 1464 1465 /* fetch the ORB from the initiator */ 1466 ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST, 1467 req->node_addr, req->generation, req->speed, 1468 agent->orb_offset, &req->orb, sizeof(req->orb)); 1469 if (ret != RCODE_COMPLETE) { 1470 pr_debug("mgt_orb fetch failed: %x\n", ret); 1471 goto out; 1472 } 1473 1474 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n", 1475 sbp2_pointer_to_addr(&req->orb.ptr1), 1476 sbp2_pointer_to_addr(&req->orb.ptr2), 1477 be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length), 1478 sbp2_pointer_to_addr(&req->orb.status_fifo)); 1479 1480 if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) || 1481 ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) { 1482 pr_err("mgt_orb bad request\n"); 1483 goto out; 1484 } 1485 1486 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) { 1487 case MANAGEMENT_ORB_FUNCTION_LOGIN: 1488 sbp_management_request_login(agent, req, &status_data_len); 1489 break; 1490 1491 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS: 1492 sbp_management_request_query_logins(agent, req, 1493 &status_data_len); 1494 break; 1495 1496 case MANAGEMENT_ORB_FUNCTION_RECONNECT: 1497 sbp_management_request_reconnect(agent, req, &status_data_len); 1498 break; 1499 1500 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD: 1501 pr_notice("SET PASSWORD not implemented\n"); 1502 1503 req->status.status = cpu_to_be32( 1504 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1505 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1506 1507 break; 1508 1509 case MANAGEMENT_ORB_FUNCTION_LOGOUT: 1510 sbp_management_request_logout(agent, req, &status_data_len); 1511 break; 1512 1513 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK: 1514 pr_notice("ABORT TASK not implemented\n"); 1515 1516 req->status.status = cpu_to_be32( 1517 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1518 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1519 1520 break; 1521 1522 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET: 1523 pr_notice("ABORT TASK SET not implemented\n"); 1524 1525 req->status.status = cpu_to_be32( 1526 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1527 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1528 1529 break; 1530 1531 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET: 1532 pr_notice("LOGICAL UNIT RESET not implemented\n"); 1533 1534 req->status.status = cpu_to_be32( 1535 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1536 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1537 1538 break; 1539 1540 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET: 1541 pr_notice("TARGET RESET not implemented\n"); 1542 1543 req->status.status = cpu_to_be32( 1544 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1545 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1546 1547 break; 1548 1549 default: 1550 pr_notice("unknown management function 0x%x\n", 1551 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))); 1552 1553 req->status.status = cpu_to_be32( 1554 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1555 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1556 1557 break; 1558 } 1559 1560 req->status.status |= cpu_to_be32( 1561 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */ 1562 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) | 1563 STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32)); 1564 req->status.orb_low = cpu_to_be32(agent->orb_offset); 1565 1566 /* write the status block back to the initiator */ 1567 ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST, 1568 req->node_addr, req->generation, req->speed, 1569 sbp2_pointer_to_addr(&req->orb.status_fifo), 1570 &req->status, 8 + status_data_len); 1571 if (ret != RCODE_COMPLETE) { 1572 pr_debug("mgt_orb status write failed: %x\n", ret); 1573 goto out; 1574 } 1575 1576 out: 1577 fw_card_put(req->card); 1578 kfree(req); 1579 1580 spin_lock_bh(&agent->lock); 1581 agent->state = MANAGEMENT_AGENT_STATE_IDLE; 1582 spin_unlock_bh(&agent->lock); 1583 } 1584 1585 static void sbp_mgt_agent_rw(struct fw_card *card, 1586 struct fw_request *request, int tcode, int destination, int source, 1587 int generation, unsigned long long offset, void *data, size_t length, 1588 void *callback_data) 1589 { 1590 struct sbp_management_agent *agent = callback_data; 1591 struct sbp2_pointer *ptr = data; 1592 int rcode = RCODE_ADDRESS_ERROR; 1593 1594 if (!agent->tport->enable) 1595 goto out; 1596 1597 if ((offset != agent->handler.offset) || (length != 8)) 1598 goto out; 1599 1600 if (tcode == TCODE_WRITE_BLOCK_REQUEST) { 1601 struct sbp_management_request *req; 1602 int prev_state; 1603 1604 spin_lock_bh(&agent->lock); 1605 prev_state = agent->state; 1606 agent->state = MANAGEMENT_AGENT_STATE_BUSY; 1607 spin_unlock_bh(&agent->lock); 1608 1609 if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) { 1610 pr_notice("ignoring management request while busy\n"); 1611 rcode = RCODE_CONFLICT_ERROR; 1612 goto out; 1613 } 1614 1615 req = kzalloc(sizeof(*req), GFP_ATOMIC); 1616 if (!req) { 1617 rcode = RCODE_CONFLICT_ERROR; 1618 goto out; 1619 } 1620 1621 req->card = fw_card_get(card); 1622 req->generation = generation; 1623 req->node_addr = source; 1624 req->speed = fw_get_request_speed(request); 1625 1626 agent->orb_offset = sbp2_pointer_to_addr(ptr); 1627 agent->request = req; 1628 1629 queue_work(system_unbound_wq, &agent->work); 1630 rcode = RCODE_COMPLETE; 1631 } else if (tcode == TCODE_READ_BLOCK_REQUEST) { 1632 addr_to_sbp2_pointer(agent->orb_offset, ptr); 1633 rcode = RCODE_COMPLETE; 1634 } else { 1635 rcode = RCODE_TYPE_ERROR; 1636 } 1637 1638 out: 1639 fw_send_response(card, request, rcode); 1640 } 1641 1642 static struct sbp_management_agent *sbp_management_agent_register( 1643 struct sbp_tport *tport) 1644 { 1645 int ret; 1646 struct sbp_management_agent *agent; 1647 1648 agent = kmalloc(sizeof(*agent), GFP_KERNEL); 1649 if (!agent) 1650 return ERR_PTR(-ENOMEM); 1651 1652 spin_lock_init(&agent->lock); 1653 agent->tport = tport; 1654 agent->handler.length = 0x08; 1655 agent->handler.address_callback = sbp_mgt_agent_rw; 1656 agent->handler.callback_data = agent; 1657 agent->state = MANAGEMENT_AGENT_STATE_IDLE; 1658 INIT_WORK(&agent->work, sbp_mgt_agent_process); 1659 agent->orb_offset = 0; 1660 agent->request = NULL; 1661 1662 ret = fw_core_add_address_handler(&agent->handler, 1663 &sbp_register_region); 1664 if (ret < 0) { 1665 kfree(agent); 1666 return ERR_PTR(ret); 1667 } 1668 1669 return agent; 1670 } 1671 1672 static void sbp_management_agent_unregister(struct sbp_management_agent *agent) 1673 { 1674 fw_core_remove_address_handler(&agent->handler); 1675 cancel_work_sync(&agent->work); 1676 kfree(agent); 1677 } 1678 1679 static int sbp_check_true(struct se_portal_group *se_tpg) 1680 { 1681 return 1; 1682 } 1683 1684 static int sbp_check_false(struct se_portal_group *se_tpg) 1685 { 1686 return 0; 1687 } 1688 1689 static char *sbp_get_fabric_name(void) 1690 { 1691 return "sbp"; 1692 } 1693 1694 static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg) 1695 { 1696 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1697 struct sbp_tport *tport = tpg->tport; 1698 1699 return &tport->tport_name[0]; 1700 } 1701 1702 static u16 sbp_get_tag(struct se_portal_group *se_tpg) 1703 { 1704 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1705 return tpg->tport_tpgt; 1706 } 1707 1708 static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg) 1709 { 1710 return 1; 1711 } 1712 1713 static void sbp_release_cmd(struct se_cmd *se_cmd) 1714 { 1715 struct sbp_target_request *req = container_of(se_cmd, 1716 struct sbp_target_request, se_cmd); 1717 1718 sbp_free_request(req); 1719 } 1720 1721 static int sbp_shutdown_session(struct se_session *se_sess) 1722 { 1723 return 0; 1724 } 1725 1726 static void sbp_close_session(struct se_session *se_sess) 1727 { 1728 return; 1729 } 1730 1731 static u32 sbp_sess_get_index(struct se_session *se_sess) 1732 { 1733 return 0; 1734 } 1735 1736 static int sbp_write_pending(struct se_cmd *se_cmd) 1737 { 1738 struct sbp_target_request *req = container_of(se_cmd, 1739 struct sbp_target_request, se_cmd); 1740 int ret; 1741 1742 ret = sbp_rw_data(req); 1743 if (ret) { 1744 req->status.status |= cpu_to_be32( 1745 STATUS_BLOCK_RESP( 1746 STATUS_RESP_TRANSPORT_FAILURE) | 1747 STATUS_BLOCK_DEAD(0) | 1748 STATUS_BLOCK_LEN(1) | 1749 STATUS_BLOCK_SBP_STATUS( 1750 SBP_STATUS_UNSPECIFIED_ERROR)); 1751 sbp_send_status(req); 1752 return ret; 1753 } 1754 1755 target_execute_cmd(se_cmd); 1756 return 0; 1757 } 1758 1759 static int sbp_write_pending_status(struct se_cmd *se_cmd) 1760 { 1761 return 0; 1762 } 1763 1764 static void sbp_set_default_node_attrs(struct se_node_acl *nacl) 1765 { 1766 return; 1767 } 1768 1769 static int sbp_get_cmd_state(struct se_cmd *se_cmd) 1770 { 1771 return 0; 1772 } 1773 1774 static int sbp_queue_data_in(struct se_cmd *se_cmd) 1775 { 1776 struct sbp_target_request *req = container_of(se_cmd, 1777 struct sbp_target_request, se_cmd); 1778 int ret; 1779 1780 ret = sbp_rw_data(req); 1781 if (ret) { 1782 req->status.status |= cpu_to_be32( 1783 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 1784 STATUS_BLOCK_DEAD(0) | 1785 STATUS_BLOCK_LEN(1) | 1786 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 1787 sbp_send_status(req); 1788 return ret; 1789 } 1790 1791 return sbp_send_sense(req); 1792 } 1793 1794 /* 1795 * Called after command (no data transfer) or after the write (to device) 1796 * operation is completed 1797 */ 1798 static int sbp_queue_status(struct se_cmd *se_cmd) 1799 { 1800 struct sbp_target_request *req = container_of(se_cmd, 1801 struct sbp_target_request, se_cmd); 1802 1803 return sbp_send_sense(req); 1804 } 1805 1806 static void sbp_queue_tm_rsp(struct se_cmd *se_cmd) 1807 { 1808 } 1809 1810 static void sbp_aborted_task(struct se_cmd *se_cmd) 1811 { 1812 return; 1813 } 1814 1815 static int sbp_check_stop_free(struct se_cmd *se_cmd) 1816 { 1817 struct sbp_target_request *req = container_of(se_cmd, 1818 struct sbp_target_request, se_cmd); 1819 1820 transport_generic_free_cmd(&req->se_cmd, 0); 1821 return 1; 1822 } 1823 1824 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg) 1825 { 1826 struct se_lun *lun; 1827 int count = 0; 1828 1829 rcu_read_lock(); 1830 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) 1831 count++; 1832 rcu_read_unlock(); 1833 1834 return count; 1835 } 1836 1837 static int sbp_update_unit_directory(struct sbp_tport *tport) 1838 { 1839 struct se_lun *lun; 1840 int num_luns, num_entries, idx = 0, mgt_agt_addr, ret; 1841 u32 *data; 1842 1843 if (tport->unit_directory.data) { 1844 fw_core_remove_descriptor(&tport->unit_directory); 1845 kfree(tport->unit_directory.data); 1846 tport->unit_directory.data = NULL; 1847 } 1848 1849 if (!tport->enable || !tport->tpg) 1850 return 0; 1851 1852 num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg); 1853 1854 /* 1855 * Number of entries in the final unit directory: 1856 * - all of those in the template 1857 * - management_agent 1858 * - unit_characteristics 1859 * - reconnect_timeout 1860 * - unit unique ID 1861 * - one for each LUN 1862 * 1863 * MUST NOT include leaf or sub-directory entries 1864 */ 1865 num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns; 1866 1867 if (tport->directory_id != -1) 1868 num_entries++; 1869 1870 /* allocate num_entries + 4 for the header and unique ID leaf */ 1871 data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL); 1872 if (!data) 1873 return -ENOMEM; 1874 1875 /* directory_length */ 1876 data[idx++] = num_entries << 16; 1877 1878 /* directory_id */ 1879 if (tport->directory_id != -1) 1880 data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id; 1881 1882 /* unit directory template */ 1883 memcpy(&data[idx], sbp_unit_directory_template, 1884 sizeof(sbp_unit_directory_template)); 1885 idx += ARRAY_SIZE(sbp_unit_directory_template); 1886 1887 /* management_agent */ 1888 mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4; 1889 data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff); 1890 1891 /* unit_characteristics */ 1892 data[idx++] = 0x3a000000 | 1893 (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) | 1894 SBP_ORB_FETCH_SIZE; 1895 1896 /* reconnect_timeout */ 1897 data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff); 1898 1899 /* unit unique ID (leaf is just after LUNs) */ 1900 data[idx++] = 0x8d000000 | (num_luns + 1); 1901 1902 rcu_read_lock(); 1903 hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) { 1904 struct se_device *dev; 1905 int type; 1906 /* 1907 * rcu_dereference_raw protected by se_lun->lun_group symlink 1908 * reference to se_device->dev_group. 1909 */ 1910 dev = rcu_dereference_raw(lun->lun_se_dev); 1911 type = dev->transport->get_device_type(dev); 1912 1913 /* logical_unit_number */ 1914 data[idx++] = 0x14000000 | 1915 ((type << 16) & 0x1f0000) | 1916 (lun->unpacked_lun & 0xffff); 1917 } 1918 rcu_read_unlock(); 1919 1920 /* unit unique ID leaf */ 1921 data[idx++] = 2 << 16; 1922 data[idx++] = tport->guid >> 32; 1923 data[idx++] = tport->guid; 1924 1925 tport->unit_directory.length = idx; 1926 tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24; 1927 tport->unit_directory.data = data; 1928 1929 ret = fw_core_add_descriptor(&tport->unit_directory); 1930 if (ret < 0) { 1931 kfree(tport->unit_directory.data); 1932 tport->unit_directory.data = NULL; 1933 } 1934 1935 return ret; 1936 } 1937 1938 static ssize_t sbp_parse_wwn(const char *name, u64 *wwn) 1939 { 1940 const char *cp; 1941 char c, nibble; 1942 int pos = 0, err; 1943 1944 *wwn = 0; 1945 for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) { 1946 c = *cp; 1947 if (c == '\n' && cp[1] == '\0') 1948 continue; 1949 if (c == '\0') { 1950 err = 2; 1951 if (pos != 16) 1952 goto fail; 1953 return cp - name; 1954 } 1955 err = 3; 1956 if (isdigit(c)) 1957 nibble = c - '0'; 1958 else if (isxdigit(c)) 1959 nibble = tolower(c) - 'a' + 10; 1960 else 1961 goto fail; 1962 *wwn = (*wwn << 4) | nibble; 1963 pos++; 1964 } 1965 err = 4; 1966 fail: 1967 printk(KERN_INFO "err %u len %zu pos %u\n", 1968 err, cp - name, pos); 1969 return -1; 1970 } 1971 1972 static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn) 1973 { 1974 return snprintf(buf, len, "%016llx", wwn); 1975 } 1976 1977 static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name) 1978 { 1979 u64 guid = 0; 1980 1981 if (sbp_parse_wwn(name, &guid) < 0) 1982 return -EINVAL; 1983 return 0; 1984 } 1985 1986 static int sbp_post_link_lun( 1987 struct se_portal_group *se_tpg, 1988 struct se_lun *se_lun) 1989 { 1990 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1991 1992 return sbp_update_unit_directory(tpg->tport); 1993 } 1994 1995 static void sbp_pre_unlink_lun( 1996 struct se_portal_group *se_tpg, 1997 struct se_lun *se_lun) 1998 { 1999 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2000 struct sbp_tport *tport = tpg->tport; 2001 int ret; 2002 2003 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) 2004 tport->enable = 0; 2005 2006 ret = sbp_update_unit_directory(tport); 2007 if (ret < 0) 2008 pr_err("unlink LUN: failed to update unit directory\n"); 2009 } 2010 2011 static struct se_portal_group *sbp_make_tpg( 2012 struct se_wwn *wwn, 2013 struct config_group *group, 2014 const char *name) 2015 { 2016 struct sbp_tport *tport = 2017 container_of(wwn, struct sbp_tport, tport_wwn); 2018 2019 struct sbp_tpg *tpg; 2020 unsigned long tpgt; 2021 int ret; 2022 2023 if (strstr(name, "tpgt_") != name) 2024 return ERR_PTR(-EINVAL); 2025 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) 2026 return ERR_PTR(-EINVAL); 2027 2028 if (tport->tpg) { 2029 pr_err("Only one TPG per Unit is possible.\n"); 2030 return ERR_PTR(-EBUSY); 2031 } 2032 2033 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); 2034 if (!tpg) { 2035 pr_err("Unable to allocate struct sbp_tpg\n"); 2036 return ERR_PTR(-ENOMEM); 2037 } 2038 2039 tpg->tport = tport; 2040 tpg->tport_tpgt = tpgt; 2041 tport->tpg = tpg; 2042 2043 /* default attribute values */ 2044 tport->enable = 0; 2045 tport->directory_id = -1; 2046 tport->mgt_orb_timeout = 15; 2047 tport->max_reconnect_timeout = 5; 2048 tport->max_logins_per_lun = 1; 2049 2050 tport->mgt_agt = sbp_management_agent_register(tport); 2051 if (IS_ERR(tport->mgt_agt)) { 2052 ret = PTR_ERR(tport->mgt_agt); 2053 goto out_free_tpg; 2054 } 2055 2056 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP); 2057 if (ret < 0) 2058 goto out_unreg_mgt_agt; 2059 2060 return &tpg->se_tpg; 2061 2062 out_unreg_mgt_agt: 2063 sbp_management_agent_unregister(tport->mgt_agt); 2064 out_free_tpg: 2065 tport->tpg = NULL; 2066 kfree(tpg); 2067 return ERR_PTR(ret); 2068 } 2069 2070 static void sbp_drop_tpg(struct se_portal_group *se_tpg) 2071 { 2072 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2073 struct sbp_tport *tport = tpg->tport; 2074 2075 core_tpg_deregister(se_tpg); 2076 sbp_management_agent_unregister(tport->mgt_agt); 2077 tport->tpg = NULL; 2078 kfree(tpg); 2079 } 2080 2081 static struct se_wwn *sbp_make_tport( 2082 struct target_fabric_configfs *tf, 2083 struct config_group *group, 2084 const char *name) 2085 { 2086 struct sbp_tport *tport; 2087 u64 guid = 0; 2088 2089 if (sbp_parse_wwn(name, &guid) < 0) 2090 return ERR_PTR(-EINVAL); 2091 2092 tport = kzalloc(sizeof(*tport), GFP_KERNEL); 2093 if (!tport) { 2094 pr_err("Unable to allocate struct sbp_tport\n"); 2095 return ERR_PTR(-ENOMEM); 2096 } 2097 2098 tport->guid = guid; 2099 sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid); 2100 2101 return &tport->tport_wwn; 2102 } 2103 2104 static void sbp_drop_tport(struct se_wwn *wwn) 2105 { 2106 struct sbp_tport *tport = 2107 container_of(wwn, struct sbp_tport, tport_wwn); 2108 2109 kfree(tport); 2110 } 2111 2112 static ssize_t sbp_wwn_version_show(struct config_item *item, char *page) 2113 { 2114 return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION); 2115 } 2116 2117 CONFIGFS_ATTR_RO(sbp_wwn_, version); 2118 2119 static struct configfs_attribute *sbp_wwn_attrs[] = { 2120 &sbp_wwn_attr_version, 2121 NULL, 2122 }; 2123 2124 static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page) 2125 { 2126 struct se_portal_group *se_tpg = to_tpg(item); 2127 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2128 struct sbp_tport *tport = tpg->tport; 2129 2130 if (tport->directory_id == -1) 2131 return sprintf(page, "implicit\n"); 2132 else 2133 return sprintf(page, "%06x\n", tport->directory_id); 2134 } 2135 2136 static ssize_t sbp_tpg_directory_id_store(struct config_item *item, 2137 const char *page, size_t count) 2138 { 2139 struct se_portal_group *se_tpg = to_tpg(item); 2140 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2141 struct sbp_tport *tport = tpg->tport; 2142 unsigned long val; 2143 2144 if (tport->enable) { 2145 pr_err("Cannot change the directory_id on an active target.\n"); 2146 return -EBUSY; 2147 } 2148 2149 if (strstr(page, "implicit") == page) { 2150 tport->directory_id = -1; 2151 } else { 2152 if (kstrtoul(page, 16, &val) < 0) 2153 return -EINVAL; 2154 if (val > 0xffffff) 2155 return -EINVAL; 2156 2157 tport->directory_id = val; 2158 } 2159 2160 return count; 2161 } 2162 2163 static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page) 2164 { 2165 struct se_portal_group *se_tpg = to_tpg(item); 2166 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2167 struct sbp_tport *tport = tpg->tport; 2168 return sprintf(page, "%d\n", tport->enable); 2169 } 2170 2171 static ssize_t sbp_tpg_enable_store(struct config_item *item, 2172 const char *page, size_t count) 2173 { 2174 struct se_portal_group *se_tpg = to_tpg(item); 2175 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2176 struct sbp_tport *tport = tpg->tport; 2177 unsigned long val; 2178 int ret; 2179 2180 if (kstrtoul(page, 0, &val) < 0) 2181 return -EINVAL; 2182 if ((val != 0) && (val != 1)) 2183 return -EINVAL; 2184 2185 if (tport->enable == val) 2186 return count; 2187 2188 if (val) { 2189 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) { 2190 pr_err("Cannot enable a target with no LUNs!\n"); 2191 return -EINVAL; 2192 } 2193 } else { 2194 /* XXX: force-shutdown sessions instead? */ 2195 spin_lock_bh(&se_tpg->session_lock); 2196 if (!list_empty(&se_tpg->tpg_sess_list)) { 2197 spin_unlock_bh(&se_tpg->session_lock); 2198 return -EBUSY; 2199 } 2200 spin_unlock_bh(&se_tpg->session_lock); 2201 } 2202 2203 tport->enable = val; 2204 2205 ret = sbp_update_unit_directory(tport); 2206 if (ret < 0) { 2207 pr_err("Could not update Config ROM\n"); 2208 return ret; 2209 } 2210 2211 return count; 2212 } 2213 2214 CONFIGFS_ATTR(sbp_tpg_, directory_id); 2215 CONFIGFS_ATTR(sbp_tpg_, enable); 2216 2217 static struct configfs_attribute *sbp_tpg_base_attrs[] = { 2218 &sbp_tpg_attr_directory_id, 2219 &sbp_tpg_attr_enable, 2220 NULL, 2221 }; 2222 2223 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item, 2224 char *page) 2225 { 2226 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2227 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2228 struct sbp_tport *tport = tpg->tport; 2229 return sprintf(page, "%d\n", tport->mgt_orb_timeout); 2230 } 2231 2232 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item, 2233 const char *page, size_t count) 2234 { 2235 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2236 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2237 struct sbp_tport *tport = tpg->tport; 2238 unsigned long val; 2239 int ret; 2240 2241 if (kstrtoul(page, 0, &val) < 0) 2242 return -EINVAL; 2243 if ((val < 1) || (val > 127)) 2244 return -EINVAL; 2245 2246 if (tport->mgt_orb_timeout == val) 2247 return count; 2248 2249 tport->mgt_orb_timeout = val; 2250 2251 ret = sbp_update_unit_directory(tport); 2252 if (ret < 0) 2253 return ret; 2254 2255 return count; 2256 } 2257 2258 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item, 2259 char *page) 2260 { 2261 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2262 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2263 struct sbp_tport *tport = tpg->tport; 2264 return sprintf(page, "%d\n", tport->max_reconnect_timeout); 2265 } 2266 2267 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item, 2268 const char *page, size_t count) 2269 { 2270 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2271 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2272 struct sbp_tport *tport = tpg->tport; 2273 unsigned long val; 2274 int ret; 2275 2276 if (kstrtoul(page, 0, &val) < 0) 2277 return -EINVAL; 2278 if ((val < 1) || (val > 32767)) 2279 return -EINVAL; 2280 2281 if (tport->max_reconnect_timeout == val) 2282 return count; 2283 2284 tport->max_reconnect_timeout = val; 2285 2286 ret = sbp_update_unit_directory(tport); 2287 if (ret < 0) 2288 return ret; 2289 2290 return count; 2291 } 2292 2293 static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item, 2294 char *page) 2295 { 2296 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2297 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2298 struct sbp_tport *tport = tpg->tport; 2299 return sprintf(page, "%d\n", tport->max_logins_per_lun); 2300 } 2301 2302 static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item, 2303 const char *page, size_t count) 2304 { 2305 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2306 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2307 struct sbp_tport *tport = tpg->tport; 2308 unsigned long val; 2309 2310 if (kstrtoul(page, 0, &val) < 0) 2311 return -EINVAL; 2312 if ((val < 1) || (val > 127)) 2313 return -EINVAL; 2314 2315 /* XXX: also check against current count? */ 2316 2317 tport->max_logins_per_lun = val; 2318 2319 return count; 2320 } 2321 2322 CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout); 2323 CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout); 2324 CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun); 2325 2326 static struct configfs_attribute *sbp_tpg_attrib_attrs[] = { 2327 &sbp_tpg_attrib_attr_mgt_orb_timeout, 2328 &sbp_tpg_attrib_attr_max_reconnect_timeout, 2329 &sbp_tpg_attrib_attr_max_logins_per_lun, 2330 NULL, 2331 }; 2332 2333 static const struct target_core_fabric_ops sbp_ops = { 2334 .module = THIS_MODULE, 2335 .name = "sbp", 2336 .get_fabric_name = sbp_get_fabric_name, 2337 .tpg_get_wwn = sbp_get_fabric_wwn, 2338 .tpg_get_tag = sbp_get_tag, 2339 .tpg_check_demo_mode = sbp_check_true, 2340 .tpg_check_demo_mode_cache = sbp_check_true, 2341 .tpg_check_demo_mode_write_protect = sbp_check_false, 2342 .tpg_check_prod_mode_write_protect = sbp_check_false, 2343 .tpg_get_inst_index = sbp_tpg_get_inst_index, 2344 .release_cmd = sbp_release_cmd, 2345 .shutdown_session = sbp_shutdown_session, 2346 .close_session = sbp_close_session, 2347 .sess_get_index = sbp_sess_get_index, 2348 .write_pending = sbp_write_pending, 2349 .write_pending_status = sbp_write_pending_status, 2350 .set_default_node_attributes = sbp_set_default_node_attrs, 2351 .get_cmd_state = sbp_get_cmd_state, 2352 .queue_data_in = sbp_queue_data_in, 2353 .queue_status = sbp_queue_status, 2354 .queue_tm_rsp = sbp_queue_tm_rsp, 2355 .aborted_task = sbp_aborted_task, 2356 .check_stop_free = sbp_check_stop_free, 2357 2358 .fabric_make_wwn = sbp_make_tport, 2359 .fabric_drop_wwn = sbp_drop_tport, 2360 .fabric_make_tpg = sbp_make_tpg, 2361 .fabric_drop_tpg = sbp_drop_tpg, 2362 .fabric_post_link = sbp_post_link_lun, 2363 .fabric_pre_unlink = sbp_pre_unlink_lun, 2364 .fabric_make_np = NULL, 2365 .fabric_drop_np = NULL, 2366 .fabric_init_nodeacl = sbp_init_nodeacl, 2367 2368 .tfc_wwn_attrs = sbp_wwn_attrs, 2369 .tfc_tpg_base_attrs = sbp_tpg_base_attrs, 2370 .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs, 2371 }; 2372 2373 static int __init sbp_init(void) 2374 { 2375 return target_register_template(&sbp_ops); 2376 }; 2377 2378 static void __exit sbp_exit(void) 2379 { 2380 target_unregister_template(&sbp_ops); 2381 }; 2382 2383 MODULE_DESCRIPTION("FireWire SBP fabric driver"); 2384 MODULE_LICENSE("GPL"); 2385 module_init(sbp_init); 2386 module_exit(sbp_exit); 2387