1 /* 2 * SBP2 target driver (SCSI over IEEE1394 in target mode) 3 * 4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21 #define KMSG_COMPONENT "sbp_target" 22 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 23 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/init.h> 27 #include <linux/types.h> 28 #include <linux/string.h> 29 #include <linux/configfs.h> 30 #include <linux/ctype.h> 31 #include <linux/delay.h> 32 #include <linux/firewire.h> 33 #include <linux/firewire-constants.h> 34 #include <scsi/scsi_proto.h> 35 #include <scsi/scsi_tcq.h> 36 #include <target/target_core_base.h> 37 #include <target/target_core_backend.h> 38 #include <target/target_core_fabric.h> 39 #include <asm/unaligned.h> 40 41 #include "sbp_target.h" 42 43 /* FireWire address region for management and command block address handlers */ 44 static const struct fw_address_region sbp_register_region = { 45 .start = CSR_REGISTER_BASE + 0x10000, 46 .end = 0x1000000000000ULL, 47 }; 48 49 static const u32 sbp_unit_directory_template[] = { 50 0x1200609e, /* unit_specifier_id: NCITS/T10 */ 51 0x13010483, /* unit_sw_version: 1155D Rev 4 */ 52 0x3800609e, /* command_set_specifier_id: NCITS/T10 */ 53 0x390104d8, /* command_set: SPC-2 */ 54 0x3b000000, /* command_set_revision: 0 */ 55 0x3c000001, /* firmware_revision: 1 */ 56 }; 57 58 #define SESSION_MAINTENANCE_INTERVAL HZ 59 60 static atomic_t login_id = ATOMIC_INIT(0); 61 62 static void session_maintenance_work(struct work_struct *); 63 static int sbp_run_transaction(struct fw_card *, int, int, int, int, 64 unsigned long long, void *, size_t); 65 66 static int read_peer_guid(u64 *guid, const struct sbp_management_request *req) 67 { 68 int ret; 69 __be32 high, low; 70 71 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, 72 req->node_addr, req->generation, req->speed, 73 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4, 74 &high, sizeof(high)); 75 if (ret != RCODE_COMPLETE) 76 return ret; 77 78 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, 79 req->node_addr, req->generation, req->speed, 80 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4, 81 &low, sizeof(low)); 82 if (ret != RCODE_COMPLETE) 83 return ret; 84 85 *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low); 86 87 return RCODE_COMPLETE; 88 } 89 90 static struct sbp_session *sbp_session_find_by_guid( 91 struct sbp_tpg *tpg, u64 guid) 92 { 93 struct se_session *se_sess; 94 struct sbp_session *sess, *found = NULL; 95 96 spin_lock_bh(&tpg->se_tpg.session_lock); 97 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 98 sess = se_sess->fabric_sess_ptr; 99 if (sess->guid == guid) 100 found = sess; 101 } 102 spin_unlock_bh(&tpg->se_tpg.session_lock); 103 104 return found; 105 } 106 107 static struct sbp_login_descriptor *sbp_login_find_by_lun( 108 struct sbp_session *session, u32 unpacked_lun) 109 { 110 struct sbp_login_descriptor *login, *found = NULL; 111 112 spin_lock_bh(&session->lock); 113 list_for_each_entry(login, &session->login_list, link) { 114 if (login->login_lun == unpacked_lun) 115 found = login; 116 } 117 spin_unlock_bh(&session->lock); 118 119 return found; 120 } 121 122 static int sbp_login_count_all_by_lun( 123 struct sbp_tpg *tpg, 124 u32 unpacked_lun, 125 int exclusive) 126 { 127 struct se_session *se_sess; 128 struct sbp_session *sess; 129 struct sbp_login_descriptor *login; 130 int count = 0; 131 132 spin_lock_bh(&tpg->se_tpg.session_lock); 133 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 134 sess = se_sess->fabric_sess_ptr; 135 136 spin_lock_bh(&sess->lock); 137 list_for_each_entry(login, &sess->login_list, link) { 138 if (login->login_lun != unpacked_lun) 139 continue; 140 141 if (!exclusive || login->exclusive) 142 count++; 143 } 144 spin_unlock_bh(&sess->lock); 145 } 146 spin_unlock_bh(&tpg->se_tpg.session_lock); 147 148 return count; 149 } 150 151 static struct sbp_login_descriptor *sbp_login_find_by_id( 152 struct sbp_tpg *tpg, int login_id) 153 { 154 struct se_session *se_sess; 155 struct sbp_session *sess; 156 struct sbp_login_descriptor *login, *found = NULL; 157 158 spin_lock_bh(&tpg->se_tpg.session_lock); 159 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) { 160 sess = se_sess->fabric_sess_ptr; 161 162 spin_lock_bh(&sess->lock); 163 list_for_each_entry(login, &sess->login_list, link) { 164 if (login->login_id == login_id) 165 found = login; 166 } 167 spin_unlock_bh(&sess->lock); 168 } 169 spin_unlock_bh(&tpg->se_tpg.session_lock); 170 171 return found; 172 } 173 174 static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err) 175 { 176 struct se_portal_group *se_tpg = &tpg->se_tpg; 177 struct se_lun *se_lun; 178 179 rcu_read_lock(); 180 hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) { 181 if (se_lun->unpacked_lun == login_lun) { 182 rcu_read_unlock(); 183 *err = 0; 184 return login_lun; 185 } 186 } 187 rcu_read_unlock(); 188 189 *err = -ENODEV; 190 return login_lun; 191 } 192 193 static struct sbp_session *sbp_session_create( 194 struct sbp_tpg *tpg, 195 u64 guid) 196 { 197 struct sbp_session *sess; 198 int ret; 199 char guid_str[17]; 200 201 snprintf(guid_str, sizeof(guid_str), "%016llx", guid); 202 203 sess = kmalloc(sizeof(*sess), GFP_KERNEL); 204 if (!sess) 205 return ERR_PTR(-ENOMEM); 206 207 spin_lock_init(&sess->lock); 208 INIT_LIST_HEAD(&sess->login_list); 209 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work); 210 sess->guid = guid; 211 212 sess->se_sess = target_alloc_session(&tpg->se_tpg, 128, 213 sizeof(struct sbp_target_request), 214 TARGET_PROT_NORMAL, guid_str, 215 sess, NULL); 216 if (IS_ERR(sess->se_sess)) { 217 pr_err("failed to init se_session\n"); 218 ret = PTR_ERR(sess->se_sess); 219 kfree(sess); 220 return ERR_PTR(ret); 221 } 222 223 return sess; 224 } 225 226 static void sbp_session_release(struct sbp_session *sess, bool cancel_work) 227 { 228 spin_lock_bh(&sess->lock); 229 if (!list_empty(&sess->login_list)) { 230 spin_unlock_bh(&sess->lock); 231 return; 232 } 233 spin_unlock_bh(&sess->lock); 234 235 if (cancel_work) 236 cancel_delayed_work_sync(&sess->maint_work); 237 238 transport_deregister_session_configfs(sess->se_sess); 239 transport_deregister_session(sess->se_sess); 240 241 if (sess->card) 242 fw_card_put(sess->card); 243 244 kfree(sess); 245 } 246 247 static void sbp_target_agent_unregister(struct sbp_target_agent *); 248 249 static void sbp_login_release(struct sbp_login_descriptor *login, 250 bool cancel_work) 251 { 252 struct sbp_session *sess = login->sess; 253 254 /* FIXME: abort/wait on tasks */ 255 256 sbp_target_agent_unregister(login->tgt_agt); 257 258 if (sess) { 259 spin_lock_bh(&sess->lock); 260 list_del(&login->link); 261 spin_unlock_bh(&sess->lock); 262 263 sbp_session_release(sess, cancel_work); 264 } 265 266 kfree(login); 267 } 268 269 static struct sbp_target_agent *sbp_target_agent_register( 270 struct sbp_login_descriptor *); 271 272 static void sbp_management_request_login( 273 struct sbp_management_agent *agent, struct sbp_management_request *req, 274 int *status_data_size) 275 { 276 struct sbp_tport *tport = agent->tport; 277 struct sbp_tpg *tpg = tport->tpg; 278 struct sbp_session *sess; 279 struct sbp_login_descriptor *login; 280 struct sbp_login_response_block *response; 281 u64 guid; 282 u32 unpacked_lun; 283 int login_response_len, ret; 284 285 unpacked_lun = sbp_get_lun_from_tpg(tpg, 286 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret); 287 if (ret) { 288 pr_notice("login to unknown LUN: %d\n", 289 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); 290 291 req->status.status = cpu_to_be32( 292 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 293 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP)); 294 return; 295 } 296 297 ret = read_peer_guid(&guid, req); 298 if (ret != RCODE_COMPLETE) { 299 pr_warn("failed to read peer GUID: %d\n", ret); 300 301 req->status.status = cpu_to_be32( 302 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 303 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 304 return; 305 } 306 307 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n", 308 unpacked_lun, guid); 309 310 sess = sbp_session_find_by_guid(tpg, guid); 311 if (sess) { 312 login = sbp_login_find_by_lun(sess, unpacked_lun); 313 if (login) { 314 pr_notice("initiator already logged-in\n"); 315 316 /* 317 * SBP-2 R4 says we should return access denied, but 318 * that can confuse initiators. Instead we need to 319 * treat this like a reconnect, but send the login 320 * response block like a fresh login. 321 * 322 * This is required particularly in the case of Apple 323 * devices booting off the FireWire target, where 324 * the firmware has an active login to the target. When 325 * the OS takes control of the session it issues its own 326 * LOGIN rather than a RECONNECT. To avoid the machine 327 * waiting until the reconnect_hold expires, we can skip 328 * the ACCESS_DENIED errors to speed things up. 329 */ 330 331 goto already_logged_in; 332 } 333 } 334 335 /* 336 * check exclusive bit in login request 337 * reject with access_denied if any logins present 338 */ 339 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) && 340 sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) { 341 pr_warn("refusing exclusive login with other active logins\n"); 342 343 req->status.status = cpu_to_be32( 344 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 345 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 346 return; 347 } 348 349 /* 350 * check exclusive bit in any existing login descriptor 351 * reject with access_denied if any exclusive logins present 352 */ 353 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) { 354 pr_warn("refusing login while another exclusive login present\n"); 355 356 req->status.status = cpu_to_be32( 357 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 358 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 359 return; 360 } 361 362 /* 363 * check we haven't exceeded the number of allowed logins 364 * reject with resources_unavailable if we have 365 */ 366 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >= 367 tport->max_logins_per_lun) { 368 pr_warn("max number of logins reached\n"); 369 370 req->status.status = cpu_to_be32( 371 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 372 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 373 return; 374 } 375 376 if (!sess) { 377 sess = sbp_session_create(tpg, guid); 378 if (IS_ERR(sess)) { 379 switch (PTR_ERR(sess)) { 380 case -EPERM: 381 ret = SBP_STATUS_ACCESS_DENIED; 382 break; 383 default: 384 ret = SBP_STATUS_RESOURCES_UNAVAIL; 385 break; 386 } 387 388 req->status.status = cpu_to_be32( 389 STATUS_BLOCK_RESP( 390 STATUS_RESP_REQUEST_COMPLETE) | 391 STATUS_BLOCK_SBP_STATUS(ret)); 392 return; 393 } 394 395 sess->node_id = req->node_addr; 396 sess->card = fw_card_get(req->card); 397 sess->generation = req->generation; 398 sess->speed = req->speed; 399 400 schedule_delayed_work(&sess->maint_work, 401 SESSION_MAINTENANCE_INTERVAL); 402 } 403 404 /* only take the latest reconnect_hold into account */ 405 sess->reconnect_hold = min( 406 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)), 407 tport->max_reconnect_timeout) - 1; 408 409 login = kmalloc(sizeof(*login), GFP_KERNEL); 410 if (!login) { 411 pr_err("failed to allocate login descriptor\n"); 412 413 sbp_session_release(sess, true); 414 415 req->status.status = cpu_to_be32( 416 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 417 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 418 return; 419 } 420 421 login->sess = sess; 422 login->login_lun = unpacked_lun; 423 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo); 424 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)); 425 login->login_id = atomic_inc_return(&login_id); 426 427 login->tgt_agt = sbp_target_agent_register(login); 428 if (IS_ERR(login->tgt_agt)) { 429 ret = PTR_ERR(login->tgt_agt); 430 pr_err("failed to map command block handler: %d\n", ret); 431 432 sbp_session_release(sess, true); 433 kfree(login); 434 435 req->status.status = cpu_to_be32( 436 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 437 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 438 return; 439 } 440 441 spin_lock_bh(&sess->lock); 442 list_add_tail(&login->link, &sess->login_list); 443 spin_unlock_bh(&sess->lock); 444 445 already_logged_in: 446 response = kzalloc(sizeof(*response), GFP_KERNEL); 447 if (!response) { 448 pr_err("failed to allocate login response block\n"); 449 450 sbp_login_release(login, true); 451 452 req->status.status = cpu_to_be32( 453 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 454 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL)); 455 return; 456 } 457 458 login_response_len = clamp_val( 459 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)), 460 12, sizeof(*response)); 461 response->misc = cpu_to_be32( 462 ((login_response_len & 0xffff) << 16) | 463 (login->login_id & 0xffff)); 464 response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff); 465 addr_to_sbp2_pointer(login->tgt_agt->handler.offset, 466 &response->command_block_agent); 467 468 ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST, 469 sess->node_id, sess->generation, sess->speed, 470 sbp2_pointer_to_addr(&req->orb.ptr2), response, 471 login_response_len); 472 if (ret != RCODE_COMPLETE) { 473 pr_debug("failed to write login response block: %x\n", ret); 474 475 kfree(response); 476 sbp_login_release(login, true); 477 478 req->status.status = cpu_to_be32( 479 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 480 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 481 return; 482 } 483 484 kfree(response); 485 486 req->status.status = cpu_to_be32( 487 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 488 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 489 } 490 491 static void sbp_management_request_query_logins( 492 struct sbp_management_agent *agent, struct sbp_management_request *req, 493 int *status_data_size) 494 { 495 pr_notice("QUERY LOGINS not implemented\n"); 496 /* FIXME: implement */ 497 498 req->status.status = cpu_to_be32( 499 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 500 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 501 } 502 503 static void sbp_management_request_reconnect( 504 struct sbp_management_agent *agent, struct sbp_management_request *req, 505 int *status_data_size) 506 { 507 struct sbp_tport *tport = agent->tport; 508 struct sbp_tpg *tpg = tport->tpg; 509 int ret; 510 u64 guid; 511 struct sbp_login_descriptor *login; 512 513 ret = read_peer_guid(&guid, req); 514 if (ret != RCODE_COMPLETE) { 515 pr_warn("failed to read peer GUID: %d\n", ret); 516 517 req->status.status = cpu_to_be32( 518 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 519 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 520 return; 521 } 522 523 pr_notice("mgt_agent RECONNECT from %016llx\n", guid); 524 525 login = sbp_login_find_by_id(tpg, 526 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc))); 527 528 if (!login) { 529 pr_err("mgt_agent RECONNECT unknown login ID\n"); 530 531 req->status.status = cpu_to_be32( 532 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 533 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 534 return; 535 } 536 537 if (login->sess->guid != guid) { 538 pr_err("mgt_agent RECONNECT login GUID doesn't match\n"); 539 540 req->status.status = cpu_to_be32( 541 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 542 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 543 return; 544 } 545 546 spin_lock_bh(&login->sess->lock); 547 if (login->sess->card) 548 fw_card_put(login->sess->card); 549 550 /* update the node details */ 551 login->sess->generation = req->generation; 552 login->sess->node_id = req->node_addr; 553 login->sess->card = fw_card_get(req->card); 554 login->sess->speed = req->speed; 555 spin_unlock_bh(&login->sess->lock); 556 557 req->status.status = cpu_to_be32( 558 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 559 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 560 } 561 562 static void sbp_management_request_logout( 563 struct sbp_management_agent *agent, struct sbp_management_request *req, 564 int *status_data_size) 565 { 566 struct sbp_tport *tport = agent->tport; 567 struct sbp_tpg *tpg = tport->tpg; 568 int id; 569 struct sbp_login_descriptor *login; 570 571 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); 572 573 login = sbp_login_find_by_id(tpg, id); 574 if (!login) { 575 pr_warn("cannot find login: %d\n", id); 576 577 req->status.status = cpu_to_be32( 578 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 579 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN)); 580 return; 581 } 582 583 pr_info("mgt_agent LOGOUT from LUN %d session %d\n", 584 login->login_lun, login->login_id); 585 586 if (req->node_addr != login->sess->node_id) { 587 pr_warn("logout from different node ID\n"); 588 589 req->status.status = cpu_to_be32( 590 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 591 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED)); 592 return; 593 } 594 595 sbp_login_release(login, true); 596 597 req->status.status = cpu_to_be32( 598 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 599 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 600 } 601 602 static void session_check_for_reset(struct sbp_session *sess) 603 { 604 bool card_valid = false; 605 606 spin_lock_bh(&sess->lock); 607 608 if (sess->card) { 609 spin_lock_irq(&sess->card->lock); 610 card_valid = (sess->card->local_node != NULL); 611 spin_unlock_irq(&sess->card->lock); 612 613 if (!card_valid) { 614 fw_card_put(sess->card); 615 sess->card = NULL; 616 } 617 } 618 619 if (!card_valid || (sess->generation != sess->card->generation)) { 620 pr_info("Waiting for reconnect from node: %016llx\n", 621 sess->guid); 622 623 sess->node_id = -1; 624 sess->reconnect_expires = get_jiffies_64() + 625 ((sess->reconnect_hold + 1) * HZ); 626 } 627 628 spin_unlock_bh(&sess->lock); 629 } 630 631 static void session_reconnect_expired(struct sbp_session *sess) 632 { 633 struct sbp_login_descriptor *login, *temp; 634 LIST_HEAD(login_list); 635 636 pr_info("Reconnect timer expired for node: %016llx\n", sess->guid); 637 638 spin_lock_bh(&sess->lock); 639 list_for_each_entry_safe(login, temp, &sess->login_list, link) { 640 login->sess = NULL; 641 list_move_tail(&login->link, &login_list); 642 } 643 spin_unlock_bh(&sess->lock); 644 645 list_for_each_entry_safe(login, temp, &login_list, link) { 646 list_del(&login->link); 647 sbp_login_release(login, false); 648 } 649 650 sbp_session_release(sess, false); 651 } 652 653 static void session_maintenance_work(struct work_struct *work) 654 { 655 struct sbp_session *sess = container_of(work, struct sbp_session, 656 maint_work.work); 657 658 /* could be called while tearing down the session */ 659 spin_lock_bh(&sess->lock); 660 if (list_empty(&sess->login_list)) { 661 spin_unlock_bh(&sess->lock); 662 return; 663 } 664 spin_unlock_bh(&sess->lock); 665 666 if (sess->node_id != -1) { 667 /* check for bus reset and make node_id invalid */ 668 session_check_for_reset(sess); 669 670 schedule_delayed_work(&sess->maint_work, 671 SESSION_MAINTENANCE_INTERVAL); 672 } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) { 673 /* still waiting for reconnect */ 674 schedule_delayed_work(&sess->maint_work, 675 SESSION_MAINTENANCE_INTERVAL); 676 } else { 677 /* reconnect timeout has expired */ 678 session_reconnect_expired(sess); 679 } 680 } 681 682 static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data, 683 struct sbp_target_agent *agent) 684 { 685 int state; 686 687 switch (tcode) { 688 case TCODE_READ_QUADLET_REQUEST: 689 pr_debug("tgt_agent AGENT_STATE READ\n"); 690 691 spin_lock_bh(&agent->lock); 692 state = agent->state; 693 spin_unlock_bh(&agent->lock); 694 695 *(__be32 *)data = cpu_to_be32(state); 696 697 return RCODE_COMPLETE; 698 699 case TCODE_WRITE_QUADLET_REQUEST: 700 /* ignored */ 701 return RCODE_COMPLETE; 702 703 default: 704 return RCODE_TYPE_ERROR; 705 } 706 } 707 708 static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data, 709 struct sbp_target_agent *agent) 710 { 711 switch (tcode) { 712 case TCODE_WRITE_QUADLET_REQUEST: 713 pr_debug("tgt_agent AGENT_RESET\n"); 714 spin_lock_bh(&agent->lock); 715 agent->state = AGENT_STATE_RESET; 716 spin_unlock_bh(&agent->lock); 717 return RCODE_COMPLETE; 718 719 default: 720 return RCODE_TYPE_ERROR; 721 } 722 } 723 724 static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data, 725 struct sbp_target_agent *agent) 726 { 727 struct sbp2_pointer *ptr = data; 728 729 switch (tcode) { 730 case TCODE_WRITE_BLOCK_REQUEST: 731 spin_lock_bh(&agent->lock); 732 if (agent->state != AGENT_STATE_SUSPENDED && 733 agent->state != AGENT_STATE_RESET) { 734 spin_unlock_bh(&agent->lock); 735 pr_notice("Ignoring ORB_POINTER write while active.\n"); 736 return RCODE_CONFLICT_ERROR; 737 } 738 agent->state = AGENT_STATE_ACTIVE; 739 spin_unlock_bh(&agent->lock); 740 741 agent->orb_pointer = sbp2_pointer_to_addr(ptr); 742 agent->doorbell = false; 743 744 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n", 745 agent->orb_pointer); 746 747 queue_work(system_unbound_wq, &agent->work); 748 749 return RCODE_COMPLETE; 750 751 case TCODE_READ_BLOCK_REQUEST: 752 pr_debug("tgt_agent ORB_POINTER READ\n"); 753 spin_lock_bh(&agent->lock); 754 addr_to_sbp2_pointer(agent->orb_pointer, ptr); 755 spin_unlock_bh(&agent->lock); 756 return RCODE_COMPLETE; 757 758 default: 759 return RCODE_TYPE_ERROR; 760 } 761 } 762 763 static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data, 764 struct sbp_target_agent *agent) 765 { 766 switch (tcode) { 767 case TCODE_WRITE_QUADLET_REQUEST: 768 spin_lock_bh(&agent->lock); 769 if (agent->state != AGENT_STATE_SUSPENDED) { 770 spin_unlock_bh(&agent->lock); 771 pr_debug("Ignoring DOORBELL while active.\n"); 772 return RCODE_CONFLICT_ERROR; 773 } 774 agent->state = AGENT_STATE_ACTIVE; 775 spin_unlock_bh(&agent->lock); 776 777 agent->doorbell = true; 778 779 pr_debug("tgt_agent DOORBELL\n"); 780 781 queue_work(system_unbound_wq, &agent->work); 782 783 return RCODE_COMPLETE; 784 785 case TCODE_READ_QUADLET_REQUEST: 786 return RCODE_COMPLETE; 787 788 default: 789 return RCODE_TYPE_ERROR; 790 } 791 } 792 793 static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card, 794 int tcode, void *data, struct sbp_target_agent *agent) 795 { 796 switch (tcode) { 797 case TCODE_WRITE_QUADLET_REQUEST: 798 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n"); 799 /* ignored as we don't send unsolicited status */ 800 return RCODE_COMPLETE; 801 802 case TCODE_READ_QUADLET_REQUEST: 803 return RCODE_COMPLETE; 804 805 default: 806 return RCODE_TYPE_ERROR; 807 } 808 } 809 810 static void tgt_agent_rw(struct fw_card *card, struct fw_request *request, 811 int tcode, int destination, int source, int generation, 812 unsigned long long offset, void *data, size_t length, 813 void *callback_data) 814 { 815 struct sbp_target_agent *agent = callback_data; 816 struct sbp_session *sess = agent->login->sess; 817 int sess_gen, sess_node, rcode; 818 819 spin_lock_bh(&sess->lock); 820 sess_gen = sess->generation; 821 sess_node = sess->node_id; 822 spin_unlock_bh(&sess->lock); 823 824 if (generation != sess_gen) { 825 pr_notice("ignoring request with wrong generation\n"); 826 rcode = RCODE_TYPE_ERROR; 827 goto out; 828 } 829 830 if (source != sess_node) { 831 pr_notice("ignoring request from foreign node (%x != %x)\n", 832 source, sess_node); 833 rcode = RCODE_TYPE_ERROR; 834 goto out; 835 } 836 837 /* turn offset into the offset from the start of the block */ 838 offset -= agent->handler.offset; 839 840 if (offset == 0x00 && length == 4) { 841 /* AGENT_STATE */ 842 rcode = tgt_agent_rw_agent_state(card, tcode, data, agent); 843 } else if (offset == 0x04 && length == 4) { 844 /* AGENT_RESET */ 845 rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent); 846 } else if (offset == 0x08 && length == 8) { 847 /* ORB_POINTER */ 848 rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent); 849 } else if (offset == 0x10 && length == 4) { 850 /* DOORBELL */ 851 rcode = tgt_agent_rw_doorbell(card, tcode, data, agent); 852 } else if (offset == 0x14 && length == 4) { 853 /* UNSOLICITED_STATUS_ENABLE */ 854 rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode, 855 data, agent); 856 } else { 857 rcode = RCODE_ADDRESS_ERROR; 858 } 859 860 out: 861 fw_send_response(card, request, rcode); 862 } 863 864 static void sbp_handle_command(struct sbp_target_request *); 865 static int sbp_send_status(struct sbp_target_request *); 866 static void sbp_free_request(struct sbp_target_request *); 867 868 static void tgt_agent_process_work(struct work_struct *work) 869 { 870 struct sbp_target_request *req = 871 container_of(work, struct sbp_target_request, work); 872 873 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n", 874 req->orb_pointer, 875 sbp2_pointer_to_addr(&req->orb.next_orb), 876 sbp2_pointer_to_addr(&req->orb.data_descriptor), 877 be32_to_cpu(req->orb.misc)); 878 879 if (req->orb_pointer >> 32) 880 pr_debug("ORB with high bits set\n"); 881 882 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) { 883 case 0:/* Format specified by this standard */ 884 sbp_handle_command(req); 885 return; 886 case 1: /* Reserved for future standardization */ 887 case 2: /* Vendor-dependent */ 888 req->status.status |= cpu_to_be32( 889 STATUS_BLOCK_RESP( 890 STATUS_RESP_REQUEST_COMPLETE) | 891 STATUS_BLOCK_DEAD(0) | 892 STATUS_BLOCK_LEN(1) | 893 STATUS_BLOCK_SBP_STATUS( 894 SBP_STATUS_REQ_TYPE_NOTSUPP)); 895 sbp_send_status(req); 896 return; 897 case 3: /* Dummy ORB */ 898 req->status.status |= cpu_to_be32( 899 STATUS_BLOCK_RESP( 900 STATUS_RESP_REQUEST_COMPLETE) | 901 STATUS_BLOCK_DEAD(0) | 902 STATUS_BLOCK_LEN(1) | 903 STATUS_BLOCK_SBP_STATUS( 904 SBP_STATUS_DUMMY_ORB_COMPLETE)); 905 sbp_send_status(req); 906 return; 907 default: 908 BUG(); 909 } 910 } 911 912 /* used to double-check we haven't been issued an AGENT_RESET */ 913 static inline bool tgt_agent_check_active(struct sbp_target_agent *agent) 914 { 915 bool active; 916 917 spin_lock_bh(&agent->lock); 918 active = (agent->state == AGENT_STATE_ACTIVE); 919 spin_unlock_bh(&agent->lock); 920 921 return active; 922 } 923 924 static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess, 925 struct fw_card *card, u64 next_orb) 926 { 927 struct se_session *se_sess = sess->se_sess; 928 struct sbp_target_request *req; 929 int tag; 930 931 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); 932 if (tag < 0) 933 return ERR_PTR(-ENOMEM); 934 935 req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag]; 936 memset(req, 0, sizeof(*req)); 937 req->se_cmd.map_tag = tag; 938 req->se_cmd.tag = next_orb; 939 940 return req; 941 } 942 943 static void tgt_agent_fetch_work(struct work_struct *work) 944 { 945 struct sbp_target_agent *agent = 946 container_of(work, struct sbp_target_agent, work); 947 struct sbp_session *sess = agent->login->sess; 948 struct sbp_target_request *req; 949 int ret; 950 bool doorbell = agent->doorbell; 951 u64 next_orb = agent->orb_pointer; 952 953 while (next_orb && tgt_agent_check_active(agent)) { 954 req = sbp_mgt_get_req(sess, sess->card, next_orb); 955 if (IS_ERR(req)) { 956 spin_lock_bh(&agent->lock); 957 agent->state = AGENT_STATE_DEAD; 958 spin_unlock_bh(&agent->lock); 959 return; 960 } 961 962 req->login = agent->login; 963 req->orb_pointer = next_orb; 964 965 req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH( 966 req->orb_pointer >> 32)); 967 req->status.orb_low = cpu_to_be32( 968 req->orb_pointer & 0xfffffffc); 969 970 /* read in the ORB */ 971 ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST, 972 sess->node_id, sess->generation, sess->speed, 973 req->orb_pointer, &req->orb, sizeof(req->orb)); 974 if (ret != RCODE_COMPLETE) { 975 pr_debug("tgt_orb fetch failed: %x\n", ret); 976 req->status.status |= cpu_to_be32( 977 STATUS_BLOCK_SRC( 978 STATUS_SRC_ORB_FINISHED) | 979 STATUS_BLOCK_RESP( 980 STATUS_RESP_TRANSPORT_FAILURE) | 981 STATUS_BLOCK_DEAD(1) | 982 STATUS_BLOCK_LEN(1) | 983 STATUS_BLOCK_SBP_STATUS( 984 SBP_STATUS_UNSPECIFIED_ERROR)); 985 spin_lock_bh(&agent->lock); 986 agent->state = AGENT_STATE_DEAD; 987 spin_unlock_bh(&agent->lock); 988 989 sbp_send_status(req); 990 return; 991 } 992 993 /* check the next_ORB field */ 994 if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) { 995 next_orb = 0; 996 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( 997 STATUS_SRC_ORB_FINISHED)); 998 } else { 999 next_orb = sbp2_pointer_to_addr(&req->orb.next_orb); 1000 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( 1001 STATUS_SRC_ORB_CONTINUING)); 1002 } 1003 1004 if (tgt_agent_check_active(agent) && !doorbell) { 1005 INIT_WORK(&req->work, tgt_agent_process_work); 1006 queue_work(system_unbound_wq, &req->work); 1007 } else { 1008 /* don't process this request, just check next_ORB */ 1009 sbp_free_request(req); 1010 } 1011 1012 spin_lock_bh(&agent->lock); 1013 doorbell = agent->doorbell = false; 1014 1015 /* check if we should carry on processing */ 1016 if (next_orb) 1017 agent->orb_pointer = next_orb; 1018 else 1019 agent->state = AGENT_STATE_SUSPENDED; 1020 1021 spin_unlock_bh(&agent->lock); 1022 }; 1023 } 1024 1025 static struct sbp_target_agent *sbp_target_agent_register( 1026 struct sbp_login_descriptor *login) 1027 { 1028 struct sbp_target_agent *agent; 1029 int ret; 1030 1031 agent = kmalloc(sizeof(*agent), GFP_KERNEL); 1032 if (!agent) 1033 return ERR_PTR(-ENOMEM); 1034 1035 spin_lock_init(&agent->lock); 1036 1037 agent->handler.length = 0x20; 1038 agent->handler.address_callback = tgt_agent_rw; 1039 agent->handler.callback_data = agent; 1040 1041 agent->login = login; 1042 agent->state = AGENT_STATE_RESET; 1043 INIT_WORK(&agent->work, tgt_agent_fetch_work); 1044 agent->orb_pointer = 0; 1045 agent->doorbell = false; 1046 1047 ret = fw_core_add_address_handler(&agent->handler, 1048 &sbp_register_region); 1049 if (ret < 0) { 1050 kfree(agent); 1051 return ERR_PTR(ret); 1052 } 1053 1054 return agent; 1055 } 1056 1057 static void sbp_target_agent_unregister(struct sbp_target_agent *agent) 1058 { 1059 fw_core_remove_address_handler(&agent->handler); 1060 cancel_work_sync(&agent->work); 1061 kfree(agent); 1062 } 1063 1064 /* 1065 * Simple wrapper around fw_run_transaction that retries the transaction several 1066 * times in case of failure, with an exponential backoff. 1067 */ 1068 static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id, 1069 int generation, int speed, unsigned long long offset, 1070 void *payload, size_t length) 1071 { 1072 int attempt, ret, delay; 1073 1074 for (attempt = 1; attempt <= 5; attempt++) { 1075 ret = fw_run_transaction(card, tcode, destination_id, 1076 generation, speed, offset, payload, length); 1077 1078 switch (ret) { 1079 case RCODE_COMPLETE: 1080 case RCODE_TYPE_ERROR: 1081 case RCODE_ADDRESS_ERROR: 1082 case RCODE_GENERATION: 1083 return ret; 1084 1085 default: 1086 delay = 5 * attempt * attempt; 1087 usleep_range(delay, delay * 2); 1088 } 1089 } 1090 1091 return ret; 1092 } 1093 1094 /* 1095 * Wrapper around sbp_run_transaction that gets the card, destination, 1096 * generation and speed out of the request's session. 1097 */ 1098 static int sbp_run_request_transaction(struct sbp_target_request *req, 1099 int tcode, unsigned long long offset, void *payload, 1100 size_t length) 1101 { 1102 struct sbp_login_descriptor *login = req->login; 1103 struct sbp_session *sess = login->sess; 1104 struct fw_card *card; 1105 int node_id, generation, speed, ret; 1106 1107 spin_lock_bh(&sess->lock); 1108 card = fw_card_get(sess->card); 1109 node_id = sess->node_id; 1110 generation = sess->generation; 1111 speed = sess->speed; 1112 spin_unlock_bh(&sess->lock); 1113 1114 ret = sbp_run_transaction(card, tcode, node_id, generation, speed, 1115 offset, payload, length); 1116 1117 fw_card_put(card); 1118 1119 return ret; 1120 } 1121 1122 static int sbp_fetch_command(struct sbp_target_request *req) 1123 { 1124 int ret, cmd_len, copy_len; 1125 1126 cmd_len = scsi_command_size(req->orb.command_block); 1127 1128 req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL); 1129 if (!req->cmd_buf) 1130 return -ENOMEM; 1131 1132 memcpy(req->cmd_buf, req->orb.command_block, 1133 min_t(int, cmd_len, sizeof(req->orb.command_block))); 1134 1135 if (cmd_len > sizeof(req->orb.command_block)) { 1136 pr_debug("sbp_fetch_command: filling in long command\n"); 1137 copy_len = cmd_len - sizeof(req->orb.command_block); 1138 1139 ret = sbp_run_request_transaction(req, 1140 TCODE_READ_BLOCK_REQUEST, 1141 req->orb_pointer + sizeof(req->orb), 1142 req->cmd_buf + sizeof(req->orb.command_block), 1143 copy_len); 1144 if (ret != RCODE_COMPLETE) 1145 return -EIO; 1146 } 1147 1148 return 0; 1149 } 1150 1151 static int sbp_fetch_page_table(struct sbp_target_request *req) 1152 { 1153 int pg_tbl_sz, ret; 1154 struct sbp_page_table_entry *pg_tbl; 1155 1156 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc))) 1157 return 0; 1158 1159 pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) * 1160 sizeof(struct sbp_page_table_entry); 1161 1162 pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL); 1163 if (!pg_tbl) 1164 return -ENOMEM; 1165 1166 ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST, 1167 sbp2_pointer_to_addr(&req->orb.data_descriptor), 1168 pg_tbl, pg_tbl_sz); 1169 if (ret != RCODE_COMPLETE) { 1170 kfree(pg_tbl); 1171 return -EIO; 1172 } 1173 1174 req->pg_tbl = pg_tbl; 1175 return 0; 1176 } 1177 1178 static void sbp_calc_data_length_direction(struct sbp_target_request *req, 1179 u32 *data_len, enum dma_data_direction *data_dir) 1180 { 1181 int data_size, direction, idx; 1182 1183 data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); 1184 direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc)); 1185 1186 if (!data_size) { 1187 *data_len = 0; 1188 *data_dir = DMA_NONE; 1189 return; 1190 } 1191 1192 *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1193 1194 if (req->pg_tbl) { 1195 *data_len = 0; 1196 for (idx = 0; idx < data_size; idx++) { 1197 *data_len += be16_to_cpu( 1198 req->pg_tbl[idx].segment_length); 1199 } 1200 } else { 1201 *data_len = data_size; 1202 } 1203 } 1204 1205 static void sbp_handle_command(struct sbp_target_request *req) 1206 { 1207 struct sbp_login_descriptor *login = req->login; 1208 struct sbp_session *sess = login->sess; 1209 int ret, unpacked_lun; 1210 u32 data_length; 1211 enum dma_data_direction data_dir; 1212 1213 ret = sbp_fetch_command(req); 1214 if (ret) { 1215 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret); 1216 goto err; 1217 } 1218 1219 ret = sbp_fetch_page_table(req); 1220 if (ret) { 1221 pr_debug("sbp_handle_command: fetch page table failed: %d\n", 1222 ret); 1223 goto err; 1224 } 1225 1226 unpacked_lun = req->login->login_lun; 1227 sbp_calc_data_length_direction(req, &data_length, &data_dir); 1228 1229 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n", 1230 req->orb_pointer, unpacked_lun, data_length, data_dir); 1231 1232 /* only used for printk until we do TMRs */ 1233 req->se_cmd.tag = req->orb_pointer; 1234 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, 1235 req->sense_buf, unpacked_lun, data_length, 1236 TCM_SIMPLE_TAG, data_dir, TARGET_SCF_ACK_KREF)) 1237 goto err; 1238 1239 return; 1240 1241 err: 1242 req->status.status |= cpu_to_be32( 1243 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 1244 STATUS_BLOCK_DEAD(0) | 1245 STATUS_BLOCK_LEN(1) | 1246 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 1247 sbp_send_status(req); 1248 } 1249 1250 /* 1251 * DMA_TO_DEVICE = read from initiator (SCSI WRITE) 1252 * DMA_FROM_DEVICE = write to initiator (SCSI READ) 1253 */ 1254 static int sbp_rw_data(struct sbp_target_request *req) 1255 { 1256 struct sbp_session *sess = req->login->sess; 1257 int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id, 1258 generation, num_pte, length, tfr_length, 1259 rcode = RCODE_COMPLETE; 1260 struct sbp_page_table_entry *pte; 1261 unsigned long long offset; 1262 struct fw_card *card; 1263 struct sg_mapping_iter iter; 1264 1265 if (req->se_cmd.data_direction == DMA_FROM_DEVICE) { 1266 tcode = TCODE_WRITE_BLOCK_REQUEST; 1267 sg_miter_flags = SG_MITER_FROM_SG; 1268 } else { 1269 tcode = TCODE_READ_BLOCK_REQUEST; 1270 sg_miter_flags = SG_MITER_TO_SG; 1271 } 1272 1273 max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc)); 1274 speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc)); 1275 1276 pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc)); 1277 if (pg_size) { 1278 pr_err("sbp_run_transaction: page size ignored\n"); 1279 pg_size = 0x100 << pg_size; 1280 } 1281 1282 spin_lock_bh(&sess->lock); 1283 card = fw_card_get(sess->card); 1284 node_id = sess->node_id; 1285 generation = sess->generation; 1286 spin_unlock_bh(&sess->lock); 1287 1288 if (req->pg_tbl) { 1289 pte = req->pg_tbl; 1290 num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); 1291 1292 offset = 0; 1293 length = 0; 1294 } else { 1295 pte = NULL; 1296 num_pte = 0; 1297 1298 offset = sbp2_pointer_to_addr(&req->orb.data_descriptor); 1299 length = req->se_cmd.data_length; 1300 } 1301 1302 sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents, 1303 sg_miter_flags); 1304 1305 while (length || num_pte) { 1306 if (!length) { 1307 offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 | 1308 be32_to_cpu(pte->segment_base_lo); 1309 length = be16_to_cpu(pte->segment_length); 1310 1311 pte++; 1312 num_pte--; 1313 } 1314 1315 sg_miter_next(&iter); 1316 1317 tfr_length = min3(length, max_payload, (int)iter.length); 1318 1319 /* FIXME: take page_size into account */ 1320 1321 rcode = sbp_run_transaction(card, tcode, node_id, 1322 generation, speed, 1323 offset, iter.addr, tfr_length); 1324 1325 if (rcode != RCODE_COMPLETE) 1326 break; 1327 1328 length -= tfr_length; 1329 offset += tfr_length; 1330 iter.consumed = tfr_length; 1331 } 1332 1333 sg_miter_stop(&iter); 1334 fw_card_put(card); 1335 1336 if (rcode == RCODE_COMPLETE) { 1337 WARN_ON(length != 0); 1338 return 0; 1339 } else { 1340 return -EIO; 1341 } 1342 } 1343 1344 static int sbp_send_status(struct sbp_target_request *req) 1345 { 1346 int rc, ret = 0, length; 1347 struct sbp_login_descriptor *login = req->login; 1348 1349 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4; 1350 1351 rc = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST, 1352 login->status_fifo_addr, &req->status, length); 1353 if (rc != RCODE_COMPLETE) { 1354 pr_debug("sbp_send_status: write failed: 0x%x\n", rc); 1355 ret = -EIO; 1356 goto put_ref; 1357 } 1358 1359 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n", 1360 req->orb_pointer); 1361 /* 1362 * Drop the extra ACK_KREF reference taken by target_submit_cmd() 1363 * ahead of sbp_check_stop_free() -> transport_generic_free_cmd() 1364 * final se_cmd->cmd_kref put. 1365 */ 1366 put_ref: 1367 target_put_sess_cmd(&req->se_cmd); 1368 return ret; 1369 } 1370 1371 static void sbp_sense_mangle(struct sbp_target_request *req) 1372 { 1373 struct se_cmd *se_cmd = &req->se_cmd; 1374 u8 *sense = req->sense_buf; 1375 u8 *status = req->status.data; 1376 1377 WARN_ON(se_cmd->scsi_sense_length < 18); 1378 1379 switch (sense[0] & 0x7f) { /* sfmt */ 1380 case 0x70: /* current, fixed */ 1381 status[0] = 0 << 6; 1382 break; 1383 case 0x71: /* deferred, fixed */ 1384 status[0] = 1 << 6; 1385 break; 1386 case 0x72: /* current, descriptor */ 1387 case 0x73: /* deferred, descriptor */ 1388 default: 1389 /* 1390 * TODO: SBP-3 specifies what we should do with descriptor 1391 * format sense data 1392 */ 1393 pr_err("sbp_send_sense: unknown sense format: 0x%x\n", 1394 sense[0]); 1395 req->status.status |= cpu_to_be32( 1396 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1397 STATUS_BLOCK_DEAD(0) | 1398 STATUS_BLOCK_LEN(1) | 1399 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED)); 1400 return; 1401 } 1402 1403 status[0] |= se_cmd->scsi_status & 0x3f;/* status */ 1404 status[1] = 1405 (sense[0] & 0x80) | /* valid */ 1406 ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */ 1407 (sense[2] & 0x0f); /* sense_key */ 1408 status[2] = se_cmd->scsi_asc; /* sense_code */ 1409 status[3] = se_cmd->scsi_ascq; /* sense_qualifier */ 1410 1411 /* information */ 1412 status[4] = sense[3]; 1413 status[5] = sense[4]; 1414 status[6] = sense[5]; 1415 status[7] = sense[6]; 1416 1417 /* CDB-dependent */ 1418 status[8] = sense[8]; 1419 status[9] = sense[9]; 1420 status[10] = sense[10]; 1421 status[11] = sense[11]; 1422 1423 /* fru */ 1424 status[12] = sense[14]; 1425 1426 /* sense_key-dependent */ 1427 status[13] = sense[15]; 1428 status[14] = sense[16]; 1429 status[15] = sense[17]; 1430 1431 req->status.status |= cpu_to_be32( 1432 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1433 STATUS_BLOCK_DEAD(0) | 1434 STATUS_BLOCK_LEN(5) | 1435 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 1436 } 1437 1438 static int sbp_send_sense(struct sbp_target_request *req) 1439 { 1440 struct se_cmd *se_cmd = &req->se_cmd; 1441 1442 if (se_cmd->scsi_sense_length) { 1443 sbp_sense_mangle(req); 1444 } else { 1445 req->status.status |= cpu_to_be32( 1446 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1447 STATUS_BLOCK_DEAD(0) | 1448 STATUS_BLOCK_LEN(1) | 1449 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK)); 1450 } 1451 1452 return sbp_send_status(req); 1453 } 1454 1455 static void sbp_free_request(struct sbp_target_request *req) 1456 { 1457 struct se_cmd *se_cmd = &req->se_cmd; 1458 struct se_session *se_sess = se_cmd->se_sess; 1459 1460 kfree(req->pg_tbl); 1461 kfree(req->cmd_buf); 1462 1463 percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); 1464 } 1465 1466 static void sbp_mgt_agent_process(struct work_struct *work) 1467 { 1468 struct sbp_management_agent *agent = 1469 container_of(work, struct sbp_management_agent, work); 1470 struct sbp_management_request *req = agent->request; 1471 int ret; 1472 int status_data_len = 0; 1473 1474 /* fetch the ORB from the initiator */ 1475 ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST, 1476 req->node_addr, req->generation, req->speed, 1477 agent->orb_offset, &req->orb, sizeof(req->orb)); 1478 if (ret != RCODE_COMPLETE) { 1479 pr_debug("mgt_orb fetch failed: %x\n", ret); 1480 goto out; 1481 } 1482 1483 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n", 1484 sbp2_pointer_to_addr(&req->orb.ptr1), 1485 sbp2_pointer_to_addr(&req->orb.ptr2), 1486 be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length), 1487 sbp2_pointer_to_addr(&req->orb.status_fifo)); 1488 1489 if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) || 1490 ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) { 1491 pr_err("mgt_orb bad request\n"); 1492 goto out; 1493 } 1494 1495 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) { 1496 case MANAGEMENT_ORB_FUNCTION_LOGIN: 1497 sbp_management_request_login(agent, req, &status_data_len); 1498 break; 1499 1500 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS: 1501 sbp_management_request_query_logins(agent, req, 1502 &status_data_len); 1503 break; 1504 1505 case MANAGEMENT_ORB_FUNCTION_RECONNECT: 1506 sbp_management_request_reconnect(agent, req, &status_data_len); 1507 break; 1508 1509 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD: 1510 pr_notice("SET PASSWORD not implemented\n"); 1511 1512 req->status.status = cpu_to_be32( 1513 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1514 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1515 1516 break; 1517 1518 case MANAGEMENT_ORB_FUNCTION_LOGOUT: 1519 sbp_management_request_logout(agent, req, &status_data_len); 1520 break; 1521 1522 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK: 1523 pr_notice("ABORT TASK not implemented\n"); 1524 1525 req->status.status = cpu_to_be32( 1526 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1527 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1528 1529 break; 1530 1531 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET: 1532 pr_notice("ABORT TASK SET not implemented\n"); 1533 1534 req->status.status = cpu_to_be32( 1535 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1536 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1537 1538 break; 1539 1540 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET: 1541 pr_notice("LOGICAL UNIT RESET not implemented\n"); 1542 1543 req->status.status = cpu_to_be32( 1544 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1545 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1546 1547 break; 1548 1549 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET: 1550 pr_notice("TARGET RESET not implemented\n"); 1551 1552 req->status.status = cpu_to_be32( 1553 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1554 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1555 1556 break; 1557 1558 default: 1559 pr_notice("unknown management function 0x%x\n", 1560 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))); 1561 1562 req->status.status = cpu_to_be32( 1563 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 1564 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP)); 1565 1566 break; 1567 } 1568 1569 req->status.status |= cpu_to_be32( 1570 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */ 1571 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) | 1572 STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32)); 1573 req->status.orb_low = cpu_to_be32(agent->orb_offset); 1574 1575 /* write the status block back to the initiator */ 1576 ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST, 1577 req->node_addr, req->generation, req->speed, 1578 sbp2_pointer_to_addr(&req->orb.status_fifo), 1579 &req->status, 8 + status_data_len); 1580 if (ret != RCODE_COMPLETE) { 1581 pr_debug("mgt_orb status write failed: %x\n", ret); 1582 goto out; 1583 } 1584 1585 out: 1586 fw_card_put(req->card); 1587 kfree(req); 1588 1589 spin_lock_bh(&agent->lock); 1590 agent->state = MANAGEMENT_AGENT_STATE_IDLE; 1591 spin_unlock_bh(&agent->lock); 1592 } 1593 1594 static void sbp_mgt_agent_rw(struct fw_card *card, 1595 struct fw_request *request, int tcode, int destination, int source, 1596 int generation, unsigned long long offset, void *data, size_t length, 1597 void *callback_data) 1598 { 1599 struct sbp_management_agent *agent = callback_data; 1600 struct sbp2_pointer *ptr = data; 1601 int rcode = RCODE_ADDRESS_ERROR; 1602 1603 if (!agent->tport->enable) 1604 goto out; 1605 1606 if ((offset != agent->handler.offset) || (length != 8)) 1607 goto out; 1608 1609 if (tcode == TCODE_WRITE_BLOCK_REQUEST) { 1610 struct sbp_management_request *req; 1611 int prev_state; 1612 1613 spin_lock_bh(&agent->lock); 1614 prev_state = agent->state; 1615 agent->state = MANAGEMENT_AGENT_STATE_BUSY; 1616 spin_unlock_bh(&agent->lock); 1617 1618 if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) { 1619 pr_notice("ignoring management request while busy\n"); 1620 rcode = RCODE_CONFLICT_ERROR; 1621 goto out; 1622 } 1623 req = kzalloc(sizeof(*req), GFP_ATOMIC); 1624 if (!req) { 1625 rcode = RCODE_CONFLICT_ERROR; 1626 goto out; 1627 } 1628 1629 req->card = fw_card_get(card); 1630 req->generation = generation; 1631 req->node_addr = source; 1632 req->speed = fw_get_request_speed(request); 1633 1634 agent->orb_offset = sbp2_pointer_to_addr(ptr); 1635 agent->request = req; 1636 1637 queue_work(system_unbound_wq, &agent->work); 1638 rcode = RCODE_COMPLETE; 1639 } else if (tcode == TCODE_READ_BLOCK_REQUEST) { 1640 addr_to_sbp2_pointer(agent->orb_offset, ptr); 1641 rcode = RCODE_COMPLETE; 1642 } else { 1643 rcode = RCODE_TYPE_ERROR; 1644 } 1645 1646 out: 1647 fw_send_response(card, request, rcode); 1648 } 1649 1650 static struct sbp_management_agent *sbp_management_agent_register( 1651 struct sbp_tport *tport) 1652 { 1653 int ret; 1654 struct sbp_management_agent *agent; 1655 1656 agent = kmalloc(sizeof(*agent), GFP_KERNEL); 1657 if (!agent) 1658 return ERR_PTR(-ENOMEM); 1659 1660 spin_lock_init(&agent->lock); 1661 agent->tport = tport; 1662 agent->handler.length = 0x08; 1663 agent->handler.address_callback = sbp_mgt_agent_rw; 1664 agent->handler.callback_data = agent; 1665 agent->state = MANAGEMENT_AGENT_STATE_IDLE; 1666 INIT_WORK(&agent->work, sbp_mgt_agent_process); 1667 agent->orb_offset = 0; 1668 agent->request = NULL; 1669 1670 ret = fw_core_add_address_handler(&agent->handler, 1671 &sbp_register_region); 1672 if (ret < 0) { 1673 kfree(agent); 1674 return ERR_PTR(ret); 1675 } 1676 1677 return agent; 1678 } 1679 1680 static void sbp_management_agent_unregister(struct sbp_management_agent *agent) 1681 { 1682 fw_core_remove_address_handler(&agent->handler); 1683 cancel_work_sync(&agent->work); 1684 kfree(agent); 1685 } 1686 1687 static int sbp_check_true(struct se_portal_group *se_tpg) 1688 { 1689 return 1; 1690 } 1691 1692 static int sbp_check_false(struct se_portal_group *se_tpg) 1693 { 1694 return 0; 1695 } 1696 1697 static char *sbp_get_fabric_name(void) 1698 { 1699 return "sbp"; 1700 } 1701 1702 static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg) 1703 { 1704 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1705 struct sbp_tport *tport = tpg->tport; 1706 1707 return &tport->tport_name[0]; 1708 } 1709 1710 static u16 sbp_get_tag(struct se_portal_group *se_tpg) 1711 { 1712 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1713 return tpg->tport_tpgt; 1714 } 1715 1716 static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg) 1717 { 1718 return 1; 1719 } 1720 1721 static void sbp_release_cmd(struct se_cmd *se_cmd) 1722 { 1723 struct sbp_target_request *req = container_of(se_cmd, 1724 struct sbp_target_request, se_cmd); 1725 1726 sbp_free_request(req); 1727 } 1728 1729 static u32 sbp_sess_get_index(struct se_session *se_sess) 1730 { 1731 return 0; 1732 } 1733 1734 static int sbp_write_pending(struct se_cmd *se_cmd) 1735 { 1736 struct sbp_target_request *req = container_of(se_cmd, 1737 struct sbp_target_request, se_cmd); 1738 int ret; 1739 1740 ret = sbp_rw_data(req); 1741 if (ret) { 1742 req->status.status |= cpu_to_be32( 1743 STATUS_BLOCK_RESP( 1744 STATUS_RESP_TRANSPORT_FAILURE) | 1745 STATUS_BLOCK_DEAD(0) | 1746 STATUS_BLOCK_LEN(1) | 1747 STATUS_BLOCK_SBP_STATUS( 1748 SBP_STATUS_UNSPECIFIED_ERROR)); 1749 sbp_send_status(req); 1750 return ret; 1751 } 1752 1753 target_execute_cmd(se_cmd); 1754 return 0; 1755 } 1756 1757 static int sbp_write_pending_status(struct se_cmd *se_cmd) 1758 { 1759 return 0; 1760 } 1761 1762 static void sbp_set_default_node_attrs(struct se_node_acl *nacl) 1763 { 1764 return; 1765 } 1766 1767 static int sbp_get_cmd_state(struct se_cmd *se_cmd) 1768 { 1769 return 0; 1770 } 1771 1772 static int sbp_queue_data_in(struct se_cmd *se_cmd) 1773 { 1774 struct sbp_target_request *req = container_of(se_cmd, 1775 struct sbp_target_request, se_cmd); 1776 int ret; 1777 1778 ret = sbp_rw_data(req); 1779 if (ret) { 1780 req->status.status |= cpu_to_be32( 1781 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) | 1782 STATUS_BLOCK_DEAD(0) | 1783 STATUS_BLOCK_LEN(1) | 1784 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR)); 1785 sbp_send_status(req); 1786 return ret; 1787 } 1788 1789 return sbp_send_sense(req); 1790 } 1791 1792 /* 1793 * Called after command (no data transfer) or after the write (to device) 1794 * operation is completed 1795 */ 1796 static int sbp_queue_status(struct se_cmd *se_cmd) 1797 { 1798 struct sbp_target_request *req = container_of(se_cmd, 1799 struct sbp_target_request, se_cmd); 1800 1801 return sbp_send_sense(req); 1802 } 1803 1804 static void sbp_queue_tm_rsp(struct se_cmd *se_cmd) 1805 { 1806 } 1807 1808 static void sbp_aborted_task(struct se_cmd *se_cmd) 1809 { 1810 return; 1811 } 1812 1813 static int sbp_check_stop_free(struct se_cmd *se_cmd) 1814 { 1815 struct sbp_target_request *req = container_of(se_cmd, 1816 struct sbp_target_request, se_cmd); 1817 1818 return transport_generic_free_cmd(&req->se_cmd, 0); 1819 } 1820 1821 static int sbp_count_se_tpg_luns(struct se_portal_group *tpg) 1822 { 1823 struct se_lun *lun; 1824 int count = 0; 1825 1826 rcu_read_lock(); 1827 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) 1828 count++; 1829 rcu_read_unlock(); 1830 1831 return count; 1832 } 1833 1834 static int sbp_update_unit_directory(struct sbp_tport *tport) 1835 { 1836 struct se_lun *lun; 1837 int num_luns, num_entries, idx = 0, mgt_agt_addr, ret; 1838 u32 *data; 1839 1840 if (tport->unit_directory.data) { 1841 fw_core_remove_descriptor(&tport->unit_directory); 1842 kfree(tport->unit_directory.data); 1843 tport->unit_directory.data = NULL; 1844 } 1845 1846 if (!tport->enable || !tport->tpg) 1847 return 0; 1848 1849 num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg); 1850 1851 /* 1852 * Number of entries in the final unit directory: 1853 * - all of those in the template 1854 * - management_agent 1855 * - unit_characteristics 1856 * - reconnect_timeout 1857 * - unit unique ID 1858 * - one for each LUN 1859 * 1860 * MUST NOT include leaf or sub-directory entries 1861 */ 1862 num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns; 1863 1864 if (tport->directory_id != -1) 1865 num_entries++; 1866 1867 /* allocate num_entries + 4 for the header and unique ID leaf */ 1868 data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL); 1869 if (!data) 1870 return -ENOMEM; 1871 1872 /* directory_length */ 1873 data[idx++] = num_entries << 16; 1874 1875 /* directory_id */ 1876 if (tport->directory_id != -1) 1877 data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id; 1878 1879 /* unit directory template */ 1880 memcpy(&data[idx], sbp_unit_directory_template, 1881 sizeof(sbp_unit_directory_template)); 1882 idx += ARRAY_SIZE(sbp_unit_directory_template); 1883 1884 /* management_agent */ 1885 mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4; 1886 data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff); 1887 1888 /* unit_characteristics */ 1889 data[idx++] = 0x3a000000 | 1890 (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) | 1891 SBP_ORB_FETCH_SIZE; 1892 1893 /* reconnect_timeout */ 1894 data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff); 1895 1896 /* unit unique ID (leaf is just after LUNs) */ 1897 data[idx++] = 0x8d000000 | (num_luns + 1); 1898 1899 rcu_read_lock(); 1900 hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) { 1901 struct se_device *dev; 1902 int type; 1903 /* 1904 * rcu_dereference_raw protected by se_lun->lun_group symlink 1905 * reference to se_device->dev_group. 1906 */ 1907 dev = rcu_dereference_raw(lun->lun_se_dev); 1908 type = dev->transport->get_device_type(dev); 1909 1910 /* logical_unit_number */ 1911 data[idx++] = 0x14000000 | 1912 ((type << 16) & 0x1f0000) | 1913 (lun->unpacked_lun & 0xffff); 1914 } 1915 rcu_read_unlock(); 1916 1917 /* unit unique ID leaf */ 1918 data[idx++] = 2 << 16; 1919 data[idx++] = tport->guid >> 32; 1920 data[idx++] = tport->guid; 1921 1922 tport->unit_directory.length = idx; 1923 tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24; 1924 tport->unit_directory.data = data; 1925 1926 ret = fw_core_add_descriptor(&tport->unit_directory); 1927 if (ret < 0) { 1928 kfree(tport->unit_directory.data); 1929 tport->unit_directory.data = NULL; 1930 } 1931 1932 return ret; 1933 } 1934 1935 static ssize_t sbp_parse_wwn(const char *name, u64 *wwn) 1936 { 1937 const char *cp; 1938 char c, nibble; 1939 int pos = 0, err; 1940 1941 *wwn = 0; 1942 for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) { 1943 c = *cp; 1944 if (c == '\n' && cp[1] == '\0') 1945 continue; 1946 if (c == '\0') { 1947 err = 2; 1948 if (pos != 16) 1949 goto fail; 1950 return cp - name; 1951 } 1952 err = 3; 1953 if (isdigit(c)) 1954 nibble = c - '0'; 1955 else if (isxdigit(c)) 1956 nibble = tolower(c) - 'a' + 10; 1957 else 1958 goto fail; 1959 *wwn = (*wwn << 4) | nibble; 1960 pos++; 1961 } 1962 err = 4; 1963 fail: 1964 printk(KERN_INFO "err %u len %zu pos %u\n", 1965 err, cp - name, pos); 1966 return -1; 1967 } 1968 1969 static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn) 1970 { 1971 return snprintf(buf, len, "%016llx", wwn); 1972 } 1973 1974 static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name) 1975 { 1976 u64 guid = 0; 1977 1978 if (sbp_parse_wwn(name, &guid) < 0) 1979 return -EINVAL; 1980 return 0; 1981 } 1982 1983 static int sbp_post_link_lun( 1984 struct se_portal_group *se_tpg, 1985 struct se_lun *se_lun) 1986 { 1987 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1988 1989 return sbp_update_unit_directory(tpg->tport); 1990 } 1991 1992 static void sbp_pre_unlink_lun( 1993 struct se_portal_group *se_tpg, 1994 struct se_lun *se_lun) 1995 { 1996 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 1997 struct sbp_tport *tport = tpg->tport; 1998 int ret; 1999 2000 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) 2001 tport->enable = 0; 2002 2003 ret = sbp_update_unit_directory(tport); 2004 if (ret < 0) 2005 pr_err("unlink LUN: failed to update unit directory\n"); 2006 } 2007 2008 static struct se_portal_group *sbp_make_tpg( 2009 struct se_wwn *wwn, 2010 struct config_group *group, 2011 const char *name) 2012 { 2013 struct sbp_tport *tport = 2014 container_of(wwn, struct sbp_tport, tport_wwn); 2015 2016 struct sbp_tpg *tpg; 2017 unsigned long tpgt; 2018 int ret; 2019 2020 if (strstr(name, "tpgt_") != name) 2021 return ERR_PTR(-EINVAL); 2022 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) 2023 return ERR_PTR(-EINVAL); 2024 2025 if (tport->tpg) { 2026 pr_err("Only one TPG per Unit is possible.\n"); 2027 return ERR_PTR(-EBUSY); 2028 } 2029 2030 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL); 2031 if (!tpg) 2032 return ERR_PTR(-ENOMEM); 2033 2034 tpg->tport = tport; 2035 tpg->tport_tpgt = tpgt; 2036 tport->tpg = tpg; 2037 2038 /* default attribute values */ 2039 tport->enable = 0; 2040 tport->directory_id = -1; 2041 tport->mgt_orb_timeout = 15; 2042 tport->max_reconnect_timeout = 5; 2043 tport->max_logins_per_lun = 1; 2044 2045 tport->mgt_agt = sbp_management_agent_register(tport); 2046 if (IS_ERR(tport->mgt_agt)) { 2047 ret = PTR_ERR(tport->mgt_agt); 2048 goto out_free_tpg; 2049 } 2050 2051 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP); 2052 if (ret < 0) 2053 goto out_unreg_mgt_agt; 2054 2055 return &tpg->se_tpg; 2056 2057 out_unreg_mgt_agt: 2058 sbp_management_agent_unregister(tport->mgt_agt); 2059 out_free_tpg: 2060 tport->tpg = NULL; 2061 kfree(tpg); 2062 return ERR_PTR(ret); 2063 } 2064 2065 static void sbp_drop_tpg(struct se_portal_group *se_tpg) 2066 { 2067 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2068 struct sbp_tport *tport = tpg->tport; 2069 2070 core_tpg_deregister(se_tpg); 2071 sbp_management_agent_unregister(tport->mgt_agt); 2072 tport->tpg = NULL; 2073 kfree(tpg); 2074 } 2075 2076 static struct se_wwn *sbp_make_tport( 2077 struct target_fabric_configfs *tf, 2078 struct config_group *group, 2079 const char *name) 2080 { 2081 struct sbp_tport *tport; 2082 u64 guid = 0; 2083 2084 if (sbp_parse_wwn(name, &guid) < 0) 2085 return ERR_PTR(-EINVAL); 2086 2087 tport = kzalloc(sizeof(*tport), GFP_KERNEL); 2088 if (!tport) 2089 return ERR_PTR(-ENOMEM); 2090 2091 tport->guid = guid; 2092 sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid); 2093 2094 return &tport->tport_wwn; 2095 } 2096 2097 static void sbp_drop_tport(struct se_wwn *wwn) 2098 { 2099 struct sbp_tport *tport = 2100 container_of(wwn, struct sbp_tport, tport_wwn); 2101 2102 kfree(tport); 2103 } 2104 2105 static ssize_t sbp_wwn_version_show(struct config_item *item, char *page) 2106 { 2107 return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION); 2108 } 2109 2110 CONFIGFS_ATTR_RO(sbp_wwn_, version); 2111 2112 static struct configfs_attribute *sbp_wwn_attrs[] = { 2113 &sbp_wwn_attr_version, 2114 NULL, 2115 }; 2116 2117 static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page) 2118 { 2119 struct se_portal_group *se_tpg = to_tpg(item); 2120 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2121 struct sbp_tport *tport = tpg->tport; 2122 2123 if (tport->directory_id == -1) 2124 return sprintf(page, "implicit\n"); 2125 else 2126 return sprintf(page, "%06x\n", tport->directory_id); 2127 } 2128 2129 static ssize_t sbp_tpg_directory_id_store(struct config_item *item, 2130 const char *page, size_t count) 2131 { 2132 struct se_portal_group *se_tpg = to_tpg(item); 2133 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2134 struct sbp_tport *tport = tpg->tport; 2135 unsigned long val; 2136 2137 if (tport->enable) { 2138 pr_err("Cannot change the directory_id on an active target.\n"); 2139 return -EBUSY; 2140 } 2141 2142 if (strstr(page, "implicit") == page) { 2143 tport->directory_id = -1; 2144 } else { 2145 if (kstrtoul(page, 16, &val) < 0) 2146 return -EINVAL; 2147 if (val > 0xffffff) 2148 return -EINVAL; 2149 2150 tport->directory_id = val; 2151 } 2152 2153 return count; 2154 } 2155 2156 static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page) 2157 { 2158 struct se_portal_group *se_tpg = to_tpg(item); 2159 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2160 struct sbp_tport *tport = tpg->tport; 2161 return sprintf(page, "%d\n", tport->enable); 2162 } 2163 2164 static ssize_t sbp_tpg_enable_store(struct config_item *item, 2165 const char *page, size_t count) 2166 { 2167 struct se_portal_group *se_tpg = to_tpg(item); 2168 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2169 struct sbp_tport *tport = tpg->tport; 2170 unsigned long val; 2171 int ret; 2172 2173 if (kstrtoul(page, 0, &val) < 0) 2174 return -EINVAL; 2175 if ((val != 0) && (val != 1)) 2176 return -EINVAL; 2177 2178 if (tport->enable == val) 2179 return count; 2180 2181 if (val) { 2182 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) { 2183 pr_err("Cannot enable a target with no LUNs!\n"); 2184 return -EINVAL; 2185 } 2186 } else { 2187 /* XXX: force-shutdown sessions instead? */ 2188 spin_lock_bh(&se_tpg->session_lock); 2189 if (!list_empty(&se_tpg->tpg_sess_list)) { 2190 spin_unlock_bh(&se_tpg->session_lock); 2191 return -EBUSY; 2192 } 2193 spin_unlock_bh(&se_tpg->session_lock); 2194 } 2195 2196 tport->enable = val; 2197 2198 ret = sbp_update_unit_directory(tport); 2199 if (ret < 0) { 2200 pr_err("Could not update Config ROM\n"); 2201 return ret; 2202 } 2203 2204 return count; 2205 } 2206 2207 CONFIGFS_ATTR(sbp_tpg_, directory_id); 2208 CONFIGFS_ATTR(sbp_tpg_, enable); 2209 2210 static struct configfs_attribute *sbp_tpg_base_attrs[] = { 2211 &sbp_tpg_attr_directory_id, 2212 &sbp_tpg_attr_enable, 2213 NULL, 2214 }; 2215 2216 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item, 2217 char *page) 2218 { 2219 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2220 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2221 struct sbp_tport *tport = tpg->tport; 2222 return sprintf(page, "%d\n", tport->mgt_orb_timeout); 2223 } 2224 2225 static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item, 2226 const char *page, size_t count) 2227 { 2228 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2229 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2230 struct sbp_tport *tport = tpg->tport; 2231 unsigned long val; 2232 int ret; 2233 2234 if (kstrtoul(page, 0, &val) < 0) 2235 return -EINVAL; 2236 if ((val < 1) || (val > 127)) 2237 return -EINVAL; 2238 2239 if (tport->mgt_orb_timeout == val) 2240 return count; 2241 2242 tport->mgt_orb_timeout = val; 2243 2244 ret = sbp_update_unit_directory(tport); 2245 if (ret < 0) 2246 return ret; 2247 2248 return count; 2249 } 2250 2251 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item, 2252 char *page) 2253 { 2254 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2255 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2256 struct sbp_tport *tport = tpg->tport; 2257 return sprintf(page, "%d\n", tport->max_reconnect_timeout); 2258 } 2259 2260 static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item, 2261 const char *page, size_t count) 2262 { 2263 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2264 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2265 struct sbp_tport *tport = tpg->tport; 2266 unsigned long val; 2267 int ret; 2268 2269 if (kstrtoul(page, 0, &val) < 0) 2270 return -EINVAL; 2271 if ((val < 1) || (val > 32767)) 2272 return -EINVAL; 2273 2274 if (tport->max_reconnect_timeout == val) 2275 return count; 2276 2277 tport->max_reconnect_timeout = val; 2278 2279 ret = sbp_update_unit_directory(tport); 2280 if (ret < 0) 2281 return ret; 2282 2283 return count; 2284 } 2285 2286 static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item, 2287 char *page) 2288 { 2289 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2290 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2291 struct sbp_tport *tport = tpg->tport; 2292 return sprintf(page, "%d\n", tport->max_logins_per_lun); 2293 } 2294 2295 static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item, 2296 const char *page, size_t count) 2297 { 2298 struct se_portal_group *se_tpg = attrib_to_tpg(item); 2299 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg); 2300 struct sbp_tport *tport = tpg->tport; 2301 unsigned long val; 2302 2303 if (kstrtoul(page, 0, &val) < 0) 2304 return -EINVAL; 2305 if ((val < 1) || (val > 127)) 2306 return -EINVAL; 2307 2308 /* XXX: also check against current count? */ 2309 2310 tport->max_logins_per_lun = val; 2311 2312 return count; 2313 } 2314 2315 CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout); 2316 CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout); 2317 CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun); 2318 2319 static struct configfs_attribute *sbp_tpg_attrib_attrs[] = { 2320 &sbp_tpg_attrib_attr_mgt_orb_timeout, 2321 &sbp_tpg_attrib_attr_max_reconnect_timeout, 2322 &sbp_tpg_attrib_attr_max_logins_per_lun, 2323 NULL, 2324 }; 2325 2326 static const struct target_core_fabric_ops sbp_ops = { 2327 .module = THIS_MODULE, 2328 .name = "sbp", 2329 .get_fabric_name = sbp_get_fabric_name, 2330 .tpg_get_wwn = sbp_get_fabric_wwn, 2331 .tpg_get_tag = sbp_get_tag, 2332 .tpg_check_demo_mode = sbp_check_true, 2333 .tpg_check_demo_mode_cache = sbp_check_true, 2334 .tpg_check_demo_mode_write_protect = sbp_check_false, 2335 .tpg_check_prod_mode_write_protect = sbp_check_false, 2336 .tpg_get_inst_index = sbp_tpg_get_inst_index, 2337 .release_cmd = sbp_release_cmd, 2338 .sess_get_index = sbp_sess_get_index, 2339 .write_pending = sbp_write_pending, 2340 .write_pending_status = sbp_write_pending_status, 2341 .set_default_node_attributes = sbp_set_default_node_attrs, 2342 .get_cmd_state = sbp_get_cmd_state, 2343 .queue_data_in = sbp_queue_data_in, 2344 .queue_status = sbp_queue_status, 2345 .queue_tm_rsp = sbp_queue_tm_rsp, 2346 .aborted_task = sbp_aborted_task, 2347 .check_stop_free = sbp_check_stop_free, 2348 2349 .fabric_make_wwn = sbp_make_tport, 2350 .fabric_drop_wwn = sbp_drop_tport, 2351 .fabric_make_tpg = sbp_make_tpg, 2352 .fabric_drop_tpg = sbp_drop_tpg, 2353 .fabric_post_link = sbp_post_link_lun, 2354 .fabric_pre_unlink = sbp_pre_unlink_lun, 2355 .fabric_make_np = NULL, 2356 .fabric_drop_np = NULL, 2357 .fabric_init_nodeacl = sbp_init_nodeacl, 2358 2359 .tfc_wwn_attrs = sbp_wwn_attrs, 2360 .tfc_tpg_base_attrs = sbp_tpg_base_attrs, 2361 .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs, 2362 }; 2363 2364 static int __init sbp_init(void) 2365 { 2366 return target_register_template(&sbp_ops); 2367 }; 2368 2369 static void __exit sbp_exit(void) 2370 { 2371 target_unregister_template(&sbp_ops); 2372 }; 2373 2374 MODULE_DESCRIPTION("FireWire SBP fabric driver"); 2375 MODULE_LICENSE("GPL"); 2376 module_init(sbp_init); 2377 module_exit(sbp_exit); 2378