1 /******************************************************************************* 2 * IBM Virtual SCSI Target Driver 3 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp. 4 * Santiago Leon (santil@us.ibm.com) IBM Corp. 5 * Linda Xie (lxie@us.ibm.com) IBM Corp. 6 * 7 * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org> 8 * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com> 11 * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com> 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 ****************************************************************************/ 24 25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 26 27 #include <linux/module.h> 28 #include <linux/kernel.h> 29 #include <linux/slab.h> 30 #include <linux/types.h> 31 #include <linux/list.h> 32 #include <linux/string.h> 33 #include <linux/delay.h> 34 35 #include <target/target_core_base.h> 36 #include <target/target_core_fabric.h> 37 38 #include <asm/hvcall.h> 39 #include <asm/vio.h> 40 41 #include <scsi/viosrp.h> 42 43 #include "ibmvscsi_tgt.h" 44 45 #define IBMVSCSIS_VERSION "v0.2" 46 47 #define INITIAL_SRP_LIMIT 800 48 #define DEFAULT_MAX_SECTORS 256 49 50 static uint max_vdma_size = MAX_H_COPY_RDMA; 51 52 static char system_id[SYS_ID_NAME_LEN] = ""; 53 static char partition_name[PARTITION_NAMELEN] = "UNKNOWN"; 54 static uint partition_number = -1; 55 56 /* Adapter list and lock to control it */ 57 static DEFINE_SPINLOCK(ibmvscsis_dev_lock); 58 static LIST_HEAD(ibmvscsis_dev_list); 59 60 static long ibmvscsis_parse_command(struct scsi_info *vscsi, 61 struct viosrp_crq *crq); 62 63 static void ibmvscsis_adapter_idle(struct scsi_info *vscsi); 64 65 static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, 66 struct srp_rsp *rsp) 67 { 68 u32 residual_count = se_cmd->residual_count; 69 70 if (!residual_count) 71 return; 72 73 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 74 if (se_cmd->data_direction == DMA_TO_DEVICE) { 75 /* residual data from an underflow write */ 76 rsp->flags = SRP_RSP_FLAG_DOUNDER; 77 rsp->data_out_res_cnt = cpu_to_be32(residual_count); 78 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 79 /* residual data from an underflow read */ 80 rsp->flags = SRP_RSP_FLAG_DIUNDER; 81 rsp->data_in_res_cnt = cpu_to_be32(residual_count); 82 } 83 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 84 if (se_cmd->data_direction == DMA_TO_DEVICE) { 85 /* residual data from an overflow write */ 86 rsp->flags = SRP_RSP_FLAG_DOOVER; 87 rsp->data_out_res_cnt = cpu_to_be32(residual_count); 88 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 89 /* residual data from an overflow read */ 90 rsp->flags = SRP_RSP_FLAG_DIOVER; 91 rsp->data_in_res_cnt = cpu_to_be32(residual_count); 92 } 93 } 94 } 95 96 /** 97 * connection_broken() - Determine if the connection to the client is good 98 * @vscsi: Pointer to our adapter structure 99 * 100 * This function attempts to send a ping MAD to the client. If the call to 101 * queue the request returns H_CLOSED then the connection has been broken 102 * and the function returns TRUE. 103 * 104 * EXECUTION ENVIRONMENT: 105 * Interrupt or Process environment 106 */ 107 static bool connection_broken(struct scsi_info *vscsi) 108 { 109 struct viosrp_crq *crq; 110 u64 buffer[2] = { 0, 0 }; 111 long h_return_code; 112 bool rc = false; 113 114 /* create a PING crq */ 115 crq = (struct viosrp_crq *)&buffer; 116 crq->valid = VALID_CMD_RESP_EL; 117 crq->format = MESSAGE_IN_CRQ; 118 crq->status = PING; 119 120 h_return_code = h_send_crq(vscsi->dds.unit_id, 121 cpu_to_be64(buffer[MSG_HI]), 122 cpu_to_be64(buffer[MSG_LOW])); 123 124 pr_debug("connection_broken: rc %ld\n", h_return_code); 125 126 if (h_return_code == H_CLOSED) 127 rc = true; 128 129 return rc; 130 } 131 132 /** 133 * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue 134 * @vscsi: Pointer to our adapter structure 135 * 136 * This function calls h_free_q then frees the interrupt bit etc. 137 * It must release the lock before doing so because of the time it can take 138 * for h_free_crq in PHYP 139 * NOTE: the caller must make sure that state and or flags will prevent 140 * interrupt handler from scheduling work. 141 * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag 142 * we can't do it here, because we don't have the lock 143 * 144 * EXECUTION ENVIRONMENT: 145 * Process level 146 */ 147 static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi) 148 { 149 long qrc; 150 long rc = ADAPT_SUCCESS; 151 int ticks = 0; 152 153 do { 154 qrc = h_free_crq(vscsi->dds.unit_id); 155 switch (qrc) { 156 case H_SUCCESS: 157 break; 158 159 case H_HARDWARE: 160 case H_PARAMETER: 161 dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n", 162 qrc); 163 rc = ERROR; 164 break; 165 166 case H_BUSY: 167 case H_LONG_BUSY_ORDER_1_MSEC: 168 /* msleep not good for small values */ 169 usleep_range(1000, 2000); 170 ticks += 1; 171 break; 172 case H_LONG_BUSY_ORDER_10_MSEC: 173 usleep_range(10000, 20000); 174 ticks += 10; 175 break; 176 case H_LONG_BUSY_ORDER_100_MSEC: 177 msleep(100); 178 ticks += 100; 179 break; 180 case H_LONG_BUSY_ORDER_1_SEC: 181 ssleep(1); 182 ticks += 1000; 183 break; 184 case H_LONG_BUSY_ORDER_10_SEC: 185 ssleep(10); 186 ticks += 10000; 187 break; 188 case H_LONG_BUSY_ORDER_100_SEC: 189 ssleep(100); 190 ticks += 100000; 191 break; 192 default: 193 dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n", 194 qrc); 195 rc = ERROR; 196 break; 197 } 198 199 /* 200 * dont wait more then 300 seconds 201 * ticks are in milliseconds more or less 202 */ 203 if (ticks > 300000 && qrc != H_SUCCESS) { 204 rc = ERROR; 205 dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n"); 206 } 207 } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS); 208 209 pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc); 210 211 return rc; 212 } 213 214 /** 215 * ibmvscsis_delete_client_info() - Helper function to Delete Client Info 216 * @vscsi: Pointer to our adapter structure 217 * @client_closed: True if client closed its queue 218 * 219 * Deletes information specific to the client when the client goes away 220 * 221 * EXECUTION ENVIRONMENT: 222 * Interrupt or Process 223 */ 224 static void ibmvscsis_delete_client_info(struct scsi_info *vscsi, 225 bool client_closed) 226 { 227 vscsi->client_cap = 0; 228 229 /* 230 * Some things we don't want to clear if we're closing the queue, 231 * because some clients don't resend the host handshake when they 232 * get a transport event. 233 */ 234 if (client_closed) 235 vscsi->client_data.os_type = 0; 236 } 237 238 /** 239 * ibmvscsis_free_command_q() - Free Command Queue 240 * @vscsi: Pointer to our adapter structure 241 * 242 * This function calls unregister_command_q, then clears interrupts and 243 * any pending interrupt acknowledgments associated with the command q. 244 * It also clears memory if there is no error. 245 * 246 * PHYP did not meet the PAPR architecture so that we must give up the 247 * lock. This causes a timing hole regarding state change. To close the 248 * hole this routine does accounting on any change that occurred during 249 * the time the lock is not held. 250 * NOTE: must give up and then acquire the interrupt lock, the caller must 251 * make sure that state and or flags will prevent interrupt handler from 252 * scheduling work. 253 * 254 * EXECUTION ENVIRONMENT: 255 * Process level, interrupt lock is held 256 */ 257 static long ibmvscsis_free_command_q(struct scsi_info *vscsi) 258 { 259 int bytes; 260 u32 flags_under_lock; 261 u16 state_under_lock; 262 long rc = ADAPT_SUCCESS; 263 264 if (!(vscsi->flags & CRQ_CLOSED)) { 265 vio_disable_interrupts(vscsi->dma_dev); 266 267 state_under_lock = vscsi->new_state; 268 flags_under_lock = vscsi->flags; 269 vscsi->phyp_acr_state = 0; 270 vscsi->phyp_acr_flags = 0; 271 272 spin_unlock_bh(&vscsi->intr_lock); 273 rc = ibmvscsis_unregister_command_q(vscsi); 274 spin_lock_bh(&vscsi->intr_lock); 275 276 if (state_under_lock != vscsi->new_state) 277 vscsi->phyp_acr_state = vscsi->new_state; 278 279 vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags); 280 281 if (rc == ADAPT_SUCCESS) { 282 bytes = vscsi->cmd_q.size * PAGE_SIZE; 283 memset(vscsi->cmd_q.base_addr, 0, bytes); 284 vscsi->cmd_q.index = 0; 285 vscsi->flags |= CRQ_CLOSED; 286 287 ibmvscsis_delete_client_info(vscsi, false); 288 } 289 290 pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n", 291 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags, 292 vscsi->phyp_acr_state); 293 } 294 return rc; 295 } 296 297 /** 298 * ibmvscsis_cmd_q_dequeue() - Get valid Command element 299 * @mask: Mask to use in case index wraps 300 * @current_index: Current index into command queue 301 * @base_addr: Pointer to start of command queue 302 * 303 * Returns a pointer to a valid command element or NULL, if the command 304 * queue is empty 305 * 306 * EXECUTION ENVIRONMENT: 307 * Interrupt environment, interrupt lock held 308 */ 309 static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask, 310 uint *current_index, 311 struct viosrp_crq *base_addr) 312 { 313 struct viosrp_crq *ptr; 314 315 ptr = base_addr + *current_index; 316 317 if (ptr->valid) { 318 *current_index = (*current_index + 1) & mask; 319 dma_rmb(); 320 } else { 321 ptr = NULL; 322 } 323 324 return ptr; 325 } 326 327 /** 328 * ibmvscsis_send_init_message() - send initialize message to the client 329 * @vscsi: Pointer to our adapter structure 330 * @format: Which Init Message format to send 331 * 332 * EXECUTION ENVIRONMENT: 333 * Interrupt environment interrupt lock held 334 */ 335 static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format) 336 { 337 struct viosrp_crq *crq; 338 u64 buffer[2] = { 0, 0 }; 339 long rc; 340 341 crq = (struct viosrp_crq *)&buffer; 342 crq->valid = VALID_INIT_MSG; 343 crq->format = format; 344 rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), 345 cpu_to_be64(buffer[MSG_LOW])); 346 347 return rc; 348 } 349 350 /** 351 * ibmvscsis_check_init_msg() - Check init message valid 352 * @vscsi: Pointer to our adapter structure 353 * @format: Pointer to return format of Init Message, if any. 354 * Set to UNUSED_FORMAT if no Init Message in queue. 355 * 356 * Checks if an initialize message was queued by the initiatior 357 * after the queue was created and before the interrupt was enabled. 358 * 359 * EXECUTION ENVIRONMENT: 360 * Process level only, interrupt lock held 361 */ 362 static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format) 363 { 364 struct viosrp_crq *crq; 365 long rc = ADAPT_SUCCESS; 366 367 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index, 368 vscsi->cmd_q.base_addr); 369 if (!crq) { 370 *format = (uint)UNUSED_FORMAT; 371 } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) { 372 *format = (uint)INIT_MSG; 373 crq->valid = INVALIDATE_CMD_RESP_EL; 374 dma_rmb(); 375 376 /* 377 * the caller has ensured no initialize message was 378 * sent after the queue was 379 * created so there should be no other message on the queue. 380 */ 381 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, 382 &vscsi->cmd_q.index, 383 vscsi->cmd_q.base_addr); 384 if (crq) { 385 *format = (uint)(crq->format); 386 rc = ERROR; 387 crq->valid = INVALIDATE_CMD_RESP_EL; 388 dma_rmb(); 389 } 390 } else { 391 *format = (uint)(crq->format); 392 rc = ERROR; 393 crq->valid = INVALIDATE_CMD_RESP_EL; 394 dma_rmb(); 395 } 396 397 return rc; 398 } 399 400 /** 401 * ibmvscsis_disconnect() - Helper function to disconnect 402 * @work: Pointer to work_struct, gives access to our adapter structure 403 * 404 * An error has occurred or the driver received a Transport event, 405 * and the driver is requesting that the command queue be de-registered 406 * in a safe manner. If there is no outstanding I/O then we can stop the 407 * queue. If we are restarting the queue it will be reflected in the 408 * the state of the adapter. 409 * 410 * EXECUTION ENVIRONMENT: 411 * Process environment 412 */ 413 static void ibmvscsis_disconnect(struct work_struct *work) 414 { 415 struct scsi_info *vscsi = container_of(work, struct scsi_info, 416 proc_work); 417 u16 new_state; 418 bool wait_idle = false; 419 420 spin_lock_bh(&vscsi->intr_lock); 421 new_state = vscsi->new_state; 422 vscsi->new_state = 0; 423 424 pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags, 425 vscsi->state); 426 427 /* 428 * check which state we are in and see if we 429 * should transitition to the new state 430 */ 431 switch (vscsi->state) { 432 /* Should never be called while in this state. */ 433 case NO_QUEUE: 434 /* 435 * Can never transition from this state; 436 * igonore errors and logout. 437 */ 438 case UNCONFIGURING: 439 break; 440 441 /* can transition from this state to UNCONFIGURING */ 442 case ERR_DISCONNECT: 443 if (new_state == UNCONFIGURING) 444 vscsi->state = new_state; 445 break; 446 447 /* 448 * Can transition from this state to to unconfiguring 449 * or err disconnect. 450 */ 451 case ERR_DISCONNECT_RECONNECT: 452 switch (new_state) { 453 case UNCONFIGURING: 454 case ERR_DISCONNECT: 455 vscsi->state = new_state; 456 break; 457 458 case WAIT_IDLE: 459 break; 460 default: 461 break; 462 } 463 break; 464 465 /* can transition from this state to UNCONFIGURING */ 466 case ERR_DISCONNECTED: 467 if (new_state == UNCONFIGURING) 468 vscsi->state = new_state; 469 break; 470 471 case WAIT_ENABLED: 472 switch (new_state) { 473 case UNCONFIGURING: 474 vscsi->state = new_state; 475 vscsi->flags |= RESPONSE_Q_DOWN; 476 vscsi->flags &= ~(SCHEDULE_DISCONNECT | 477 DISCONNECT_SCHEDULED); 478 dma_rmb(); 479 if (vscsi->flags & CFG_SLEEPING) { 480 vscsi->flags &= ~CFG_SLEEPING; 481 complete(&vscsi->unconfig); 482 } 483 break; 484 485 /* should never happen */ 486 case ERR_DISCONNECT: 487 case ERR_DISCONNECT_RECONNECT: 488 case WAIT_IDLE: 489 dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n", 490 vscsi->state); 491 break; 492 } 493 break; 494 495 case WAIT_IDLE: 496 switch (new_state) { 497 case UNCONFIGURING: 498 vscsi->flags |= RESPONSE_Q_DOWN; 499 vscsi->state = new_state; 500 vscsi->flags &= ~(SCHEDULE_DISCONNECT | 501 DISCONNECT_SCHEDULED); 502 ibmvscsis_free_command_q(vscsi); 503 break; 504 case ERR_DISCONNECT: 505 case ERR_DISCONNECT_RECONNECT: 506 vscsi->state = new_state; 507 break; 508 } 509 break; 510 511 /* 512 * Initiator has not done a successful srp login 513 * or has done a successful srp logout ( adapter was not 514 * busy). In the first case there can be responses queued 515 * waiting for space on the initiators response queue (MAD) 516 * The second case the adapter is idle. Assume the worse case, 517 * i.e. the second case. 518 */ 519 case WAIT_CONNECTION: 520 case CONNECTED: 521 case SRP_PROCESSING: 522 wait_idle = true; 523 vscsi->state = new_state; 524 break; 525 526 /* can transition from this state to UNCONFIGURING */ 527 case UNDEFINED: 528 if (new_state == UNCONFIGURING) 529 vscsi->state = new_state; 530 break; 531 default: 532 break; 533 } 534 535 if (wait_idle) { 536 pr_debug("disconnect start wait, active %d, sched %d\n", 537 (int)list_empty(&vscsi->active_q), 538 (int)list_empty(&vscsi->schedule_q)); 539 if (!list_empty(&vscsi->active_q) || 540 !list_empty(&vscsi->schedule_q)) { 541 vscsi->flags |= WAIT_FOR_IDLE; 542 pr_debug("disconnect flags 0x%x\n", vscsi->flags); 543 /* 544 * This routine is can not be called with the interrupt 545 * lock held. 546 */ 547 spin_unlock_bh(&vscsi->intr_lock); 548 wait_for_completion(&vscsi->wait_idle); 549 spin_lock_bh(&vscsi->intr_lock); 550 } 551 pr_debug("disconnect stop wait\n"); 552 553 ibmvscsis_adapter_idle(vscsi); 554 } 555 556 spin_unlock_bh(&vscsi->intr_lock); 557 } 558 559 /** 560 * ibmvscsis_post_disconnect() - Schedule the disconnect 561 * @vscsi: Pointer to our adapter structure 562 * @new_state: State to move to after disconnecting 563 * @flag_bits: Flags to turn on in adapter structure 564 * 565 * If it's already been scheduled, then see if we need to "upgrade" 566 * the new state (if the one passed in is more "severe" than the 567 * previous one). 568 * 569 * PRECONDITION: 570 * interrupt lock is held 571 */ 572 static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state, 573 uint flag_bits) 574 { 575 uint state; 576 577 /* check the validity of the new state */ 578 switch (new_state) { 579 case UNCONFIGURING: 580 case ERR_DISCONNECT: 581 case ERR_DISCONNECT_RECONNECT: 582 case WAIT_IDLE: 583 break; 584 585 default: 586 dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n", 587 new_state); 588 return; 589 } 590 591 vscsi->flags |= flag_bits; 592 593 pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n", 594 new_state, flag_bits, vscsi->flags, vscsi->state); 595 596 if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) { 597 vscsi->flags |= SCHEDULE_DISCONNECT; 598 vscsi->new_state = new_state; 599 600 INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect); 601 (void)queue_work(vscsi->work_q, &vscsi->proc_work); 602 } else { 603 if (vscsi->new_state) 604 state = vscsi->new_state; 605 else 606 state = vscsi->state; 607 608 switch (state) { 609 case NO_QUEUE: 610 case UNCONFIGURING: 611 break; 612 613 case ERR_DISCONNECTED: 614 case ERR_DISCONNECT: 615 case UNDEFINED: 616 if (new_state == UNCONFIGURING) 617 vscsi->new_state = new_state; 618 break; 619 620 case ERR_DISCONNECT_RECONNECT: 621 switch (new_state) { 622 case UNCONFIGURING: 623 case ERR_DISCONNECT: 624 vscsi->new_state = new_state; 625 break; 626 default: 627 break; 628 } 629 break; 630 631 case WAIT_ENABLED: 632 case WAIT_IDLE: 633 case WAIT_CONNECTION: 634 case CONNECTED: 635 case SRP_PROCESSING: 636 vscsi->new_state = new_state; 637 break; 638 639 default: 640 break; 641 } 642 } 643 644 pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", 645 vscsi->flags, vscsi->new_state); 646 } 647 648 /** 649 * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message 650 * @vscsi: Pointer to our adapter structure 651 * 652 * Must be called with interrupt lock held. 653 */ 654 static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi) 655 { 656 long rc = ADAPT_SUCCESS; 657 658 switch (vscsi->state) { 659 case NO_QUEUE: 660 case ERR_DISCONNECT: 661 case ERR_DISCONNECT_RECONNECT: 662 case ERR_DISCONNECTED: 663 case UNCONFIGURING: 664 case UNDEFINED: 665 rc = ERROR; 666 break; 667 668 case WAIT_CONNECTION: 669 vscsi->state = CONNECTED; 670 break; 671 672 case WAIT_IDLE: 673 case SRP_PROCESSING: 674 case CONNECTED: 675 case WAIT_ENABLED: 676 default: 677 rc = ERROR; 678 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n", 679 vscsi->state); 680 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 681 break; 682 } 683 684 return rc; 685 } 686 687 /** 688 * ibmvscsis_handle_init_msg() - Respond to an Init Message 689 * @vscsi: Pointer to our adapter structure 690 * 691 * Must be called with interrupt lock held. 692 */ 693 static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi) 694 { 695 long rc = ADAPT_SUCCESS; 696 697 switch (vscsi->state) { 698 case WAIT_CONNECTION: 699 rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); 700 switch (rc) { 701 case H_SUCCESS: 702 vscsi->state = CONNECTED; 703 break; 704 705 case H_PARAMETER: 706 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", 707 rc); 708 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); 709 break; 710 711 case H_DROPPED: 712 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", 713 rc); 714 rc = ERROR; 715 ibmvscsis_post_disconnect(vscsi, 716 ERR_DISCONNECT_RECONNECT, 0); 717 break; 718 719 case H_CLOSED: 720 pr_warn("init_msg: failed to send, rc %ld\n", rc); 721 rc = 0; 722 break; 723 } 724 break; 725 726 case UNDEFINED: 727 rc = ERROR; 728 break; 729 730 case UNCONFIGURING: 731 break; 732 733 case WAIT_ENABLED: 734 case CONNECTED: 735 case SRP_PROCESSING: 736 case WAIT_IDLE: 737 case NO_QUEUE: 738 case ERR_DISCONNECT: 739 case ERR_DISCONNECT_RECONNECT: 740 case ERR_DISCONNECTED: 741 default: 742 rc = ERROR; 743 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n", 744 vscsi->state); 745 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 746 break; 747 } 748 749 return rc; 750 } 751 752 /** 753 * ibmvscsis_init_msg() - Respond to an init message 754 * @vscsi: Pointer to our adapter structure 755 * @crq: Pointer to CRQ element containing the Init Message 756 * 757 * EXECUTION ENVIRONMENT: 758 * Interrupt, interrupt lock held 759 */ 760 static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq) 761 { 762 long rc = ADAPT_SUCCESS; 763 764 pr_debug("init_msg: state 0x%hx\n", vscsi->state); 765 766 rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, 767 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, 768 0); 769 if (rc == H_SUCCESS) { 770 vscsi->client_data.partition_number = 771 be64_to_cpu(*(u64 *)vscsi->map_buf); 772 pr_debug("init_msg, part num %d\n", 773 vscsi->client_data.partition_number); 774 } else { 775 pr_debug("init_msg h_vioctl rc %ld\n", rc); 776 rc = ADAPT_SUCCESS; 777 } 778 779 if (crq->format == INIT_MSG) { 780 rc = ibmvscsis_handle_init_msg(vscsi); 781 } else if (crq->format == INIT_COMPLETE_MSG) { 782 rc = ibmvscsis_handle_init_compl_msg(vscsi); 783 } else { 784 rc = ERROR; 785 dev_err(&vscsi->dev, "init_msg: invalid format %d\n", 786 (uint)crq->format); 787 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 788 } 789 790 return rc; 791 } 792 793 /** 794 * ibmvscsis_establish_new_q() - Establish new CRQ queue 795 * @vscsi: Pointer to our adapter structure 796 * 797 * Must be called with interrupt lock held. 798 */ 799 static long ibmvscsis_establish_new_q(struct scsi_info *vscsi) 800 { 801 long rc = ADAPT_SUCCESS; 802 uint format; 803 804 vscsi->flags &= PRESERVE_FLAG_FIELDS; 805 vscsi->rsp_q_timer.timer_pops = 0; 806 vscsi->debit = 0; 807 vscsi->credit = 0; 808 809 rc = vio_enable_interrupts(vscsi->dma_dev); 810 if (rc) { 811 pr_warn("establish_new_q: failed to enable interrupts, rc %ld\n", 812 rc); 813 return rc; 814 } 815 816 rc = ibmvscsis_check_init_msg(vscsi, &format); 817 if (rc) { 818 dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n", 819 rc); 820 return rc; 821 } 822 823 if (format == UNUSED_FORMAT) { 824 rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); 825 switch (rc) { 826 case H_SUCCESS: 827 case H_DROPPED: 828 case H_CLOSED: 829 rc = ADAPT_SUCCESS; 830 break; 831 832 case H_PARAMETER: 833 case H_HARDWARE: 834 break; 835 836 default: 837 vscsi->state = UNDEFINED; 838 rc = H_HARDWARE; 839 break; 840 } 841 } else if (format == INIT_MSG) { 842 rc = ibmvscsis_handle_init_msg(vscsi); 843 } 844 845 return rc; 846 } 847 848 /** 849 * ibmvscsis_reset_queue() - Reset CRQ Queue 850 * @vscsi: Pointer to our adapter structure 851 * 852 * This function calls h_free_q and then calls h_reg_q and does all 853 * of the bookkeeping to get us back to where we can communicate. 854 * 855 * Actually, we don't always call h_free_crq. A problem was discovered 856 * where one partition would close and reopen his queue, which would 857 * cause his partner to get a transport event, which would cause him to 858 * close and reopen his queue, which would cause the original partition 859 * to get a transport event, etc., etc. To prevent this, we don't 860 * actually close our queue if the client initiated the reset, (i.e. 861 * either we got a transport event or we have detected that the client's 862 * queue is gone) 863 * 864 * EXECUTION ENVIRONMENT: 865 * Process environment, called with interrupt lock held 866 */ 867 static void ibmvscsis_reset_queue(struct scsi_info *vscsi) 868 { 869 int bytes; 870 long rc = ADAPT_SUCCESS; 871 872 pr_debug("reset_queue: flags 0x%x\n", vscsi->flags); 873 874 /* don't reset, the client did it for us */ 875 if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { 876 vscsi->flags &= PRESERVE_FLAG_FIELDS; 877 vscsi->rsp_q_timer.timer_pops = 0; 878 vscsi->debit = 0; 879 vscsi->credit = 0; 880 vscsi->state = WAIT_CONNECTION; 881 vio_enable_interrupts(vscsi->dma_dev); 882 } else { 883 rc = ibmvscsis_free_command_q(vscsi); 884 if (rc == ADAPT_SUCCESS) { 885 vscsi->state = WAIT_CONNECTION; 886 887 bytes = vscsi->cmd_q.size * PAGE_SIZE; 888 rc = h_reg_crq(vscsi->dds.unit_id, 889 vscsi->cmd_q.crq_token, bytes); 890 if (rc == H_CLOSED || rc == H_SUCCESS) { 891 rc = ibmvscsis_establish_new_q(vscsi); 892 } 893 894 if (rc != ADAPT_SUCCESS) { 895 pr_debug("reset_queue: reg_crq rc %ld\n", rc); 896 897 vscsi->state = ERR_DISCONNECTED; 898 vscsi->flags |= RESPONSE_Q_DOWN; 899 ibmvscsis_free_command_q(vscsi); 900 } 901 } else { 902 vscsi->state = ERR_DISCONNECTED; 903 vscsi->flags |= RESPONSE_Q_DOWN; 904 } 905 } 906 } 907 908 /** 909 * ibmvscsis_free_cmd_resources() - Free command resources 910 * @vscsi: Pointer to our adapter structure 911 * @cmd: Command which is not longer in use 912 * 913 * Must be called with interrupt lock held. 914 */ 915 static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, 916 struct ibmvscsis_cmd *cmd) 917 { 918 struct iu_entry *iue = cmd->iue; 919 920 switch (cmd->type) { 921 case TASK_MANAGEMENT: 922 case SCSI_CDB: 923 /* 924 * When the queue goes down this value is cleared, so it 925 * cannot be cleared in this general purpose function. 926 */ 927 if (vscsi->debit) 928 vscsi->debit -= 1; 929 break; 930 case ADAPTER_MAD: 931 vscsi->flags &= ~PROCESSING_MAD; 932 break; 933 case UNSET_TYPE: 934 break; 935 default: 936 dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n", 937 cmd->type); 938 break; 939 } 940 941 cmd->iue = NULL; 942 list_add_tail(&cmd->list, &vscsi->free_cmd); 943 srp_iu_put(iue); 944 945 if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) && 946 list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) { 947 vscsi->flags &= ~WAIT_FOR_IDLE; 948 complete(&vscsi->wait_idle); 949 } 950 } 951 952 /** 953 * ibmvscsis_trans_event() - Handle a Transport Event 954 * @vscsi: Pointer to our adapter structure 955 * @crq: Pointer to CRQ entry containing the Transport Event 956 * 957 * Do the logic to close the I_T nexus. This function may not 958 * behave to specification. 959 * 960 * EXECUTION ENVIRONMENT: 961 * Interrupt, interrupt lock held 962 */ 963 static long ibmvscsis_trans_event(struct scsi_info *vscsi, 964 struct viosrp_crq *crq) 965 { 966 long rc = ADAPT_SUCCESS; 967 968 pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n", 969 (int)crq->format, vscsi->flags, vscsi->state); 970 971 switch (crq->format) { 972 case MIGRATED: 973 case PARTNER_FAILED: 974 case PARTNER_DEREGISTER: 975 ibmvscsis_delete_client_info(vscsi, true); 976 break; 977 978 default: 979 rc = ERROR; 980 dev_err(&vscsi->dev, "trans_event: invalid format %d\n", 981 (uint)crq->format); 982 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 983 RESPONSE_Q_DOWN); 984 break; 985 } 986 987 if (rc == ADAPT_SUCCESS) { 988 switch (vscsi->state) { 989 case NO_QUEUE: 990 case ERR_DISCONNECTED: 991 case UNDEFINED: 992 break; 993 994 case UNCONFIGURING: 995 vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT); 996 break; 997 998 case WAIT_ENABLED: 999 break; 1000 1001 case WAIT_CONNECTION: 1002 break; 1003 1004 case CONNECTED: 1005 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 1006 (RESPONSE_Q_DOWN | 1007 TRANS_EVENT)); 1008 break; 1009 1010 case SRP_PROCESSING: 1011 if ((vscsi->debit > 0) || 1012 !list_empty(&vscsi->schedule_q) || 1013 !list_empty(&vscsi->waiting_rsp) || 1014 !list_empty(&vscsi->active_q)) { 1015 pr_debug("debit %d, sched %d, wait %d, active %d\n", 1016 vscsi->debit, 1017 (int)list_empty(&vscsi->schedule_q), 1018 (int)list_empty(&vscsi->waiting_rsp), 1019 (int)list_empty(&vscsi->active_q)); 1020 pr_warn("connection lost with outstanding work\n"); 1021 } else { 1022 pr_debug("trans_event: SRP Processing, but no outstanding work\n"); 1023 } 1024 1025 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 1026 (RESPONSE_Q_DOWN | 1027 TRANS_EVENT)); 1028 break; 1029 1030 case ERR_DISCONNECT: 1031 case ERR_DISCONNECT_RECONNECT: 1032 case WAIT_IDLE: 1033 vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT); 1034 break; 1035 } 1036 } 1037 1038 rc = vscsi->flags & SCHEDULE_DISCONNECT; 1039 1040 pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n", 1041 vscsi->flags, vscsi->state, rc); 1042 1043 return rc; 1044 } 1045 1046 /** 1047 * ibmvscsis_poll_cmd_q() - Poll Command Queue 1048 * @vscsi: Pointer to our adapter structure 1049 * 1050 * Called to handle command elements that may have arrived while 1051 * interrupts were disabled. 1052 * 1053 * EXECUTION ENVIRONMENT: 1054 * intr_lock must be held 1055 */ 1056 static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi) 1057 { 1058 struct viosrp_crq *crq; 1059 long rc; 1060 bool ack = true; 1061 volatile u8 valid; 1062 1063 pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n", 1064 vscsi->flags, vscsi->state, vscsi->cmd_q.index); 1065 1066 rc = vscsi->flags & SCHEDULE_DISCONNECT; 1067 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 1068 valid = crq->valid; 1069 dma_rmb(); 1070 1071 while (valid) { 1072 poll_work: 1073 vscsi->cmd_q.index = 1074 (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask; 1075 1076 if (!rc) { 1077 rc = ibmvscsis_parse_command(vscsi, crq); 1078 } else { 1079 if ((uint)crq->valid == VALID_TRANS_EVENT) { 1080 /* 1081 * must service the transport layer events even 1082 * in an error state, dont break out until all 1083 * the consecutive transport events have been 1084 * processed 1085 */ 1086 rc = ibmvscsis_trans_event(vscsi, crq); 1087 } else if (vscsi->flags & TRANS_EVENT) { 1088 /* 1089 * if a tranport event has occurred leave 1090 * everything but transport events on the queue 1091 */ 1092 pr_debug("poll_cmd_q, ignoring\n"); 1093 1094 /* 1095 * need to decrement the queue index so we can 1096 * look at the elment again 1097 */ 1098 if (vscsi->cmd_q.index) 1099 vscsi->cmd_q.index -= 1; 1100 else 1101 /* 1102 * index is at 0 it just wrapped. 1103 * have it index last element in q 1104 */ 1105 vscsi->cmd_q.index = vscsi->cmd_q.mask; 1106 break; 1107 } 1108 } 1109 1110 crq->valid = INVALIDATE_CMD_RESP_EL; 1111 1112 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 1113 valid = crq->valid; 1114 dma_rmb(); 1115 } 1116 1117 if (!rc) { 1118 if (ack) { 1119 vio_enable_interrupts(vscsi->dma_dev); 1120 ack = false; 1121 pr_debug("poll_cmd_q, reenabling interrupts\n"); 1122 } 1123 valid = crq->valid; 1124 dma_rmb(); 1125 if (valid) 1126 goto poll_work; 1127 } 1128 1129 pr_debug("Leaving poll_cmd_q: rc %ld\n", rc); 1130 } 1131 1132 /** 1133 * ibmvscsis_free_cmd_qs() - Free elements in queue 1134 * @vscsi: Pointer to our adapter structure 1135 * 1136 * Free all of the elements on all queues that are waiting for 1137 * whatever reason. 1138 * 1139 * PRECONDITION: 1140 * Called with interrupt lock held 1141 */ 1142 static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi) 1143 { 1144 struct ibmvscsis_cmd *cmd, *nxt; 1145 1146 pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n", 1147 (int)list_empty(&vscsi->waiting_rsp), 1148 vscsi->rsp_q_timer.started); 1149 1150 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) { 1151 list_del(&cmd->list); 1152 ibmvscsis_free_cmd_resources(vscsi, cmd); 1153 } 1154 } 1155 1156 /** 1157 * ibmvscsis_get_free_cmd() - Get free command from list 1158 * @vscsi: Pointer to our adapter structure 1159 * 1160 * Must be called with interrupt lock held. 1161 */ 1162 static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi) 1163 { 1164 struct ibmvscsis_cmd *cmd = NULL; 1165 struct iu_entry *iue; 1166 1167 iue = srp_iu_get(&vscsi->target); 1168 if (iue) { 1169 cmd = list_first_entry_or_null(&vscsi->free_cmd, 1170 struct ibmvscsis_cmd, list); 1171 if (cmd) { 1172 list_del(&cmd->list); 1173 cmd->iue = iue; 1174 cmd->type = UNSET_TYPE; 1175 memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd)); 1176 } else { 1177 srp_iu_put(iue); 1178 } 1179 } 1180 1181 return cmd; 1182 } 1183 1184 /** 1185 * ibmvscsis_adapter_idle() - Helper function to handle idle adapter 1186 * @vscsi: Pointer to our adapter structure 1187 * 1188 * This function is called when the adapter is idle when the driver 1189 * is attempting to clear an error condition. 1190 * The adapter is considered busy if any of its cmd queues 1191 * are non-empty. This function can be invoked 1192 * from the off level disconnect function. 1193 * 1194 * EXECUTION ENVIRONMENT: 1195 * Process environment called with interrupt lock held 1196 */ 1197 static void ibmvscsis_adapter_idle(struct scsi_info *vscsi) 1198 { 1199 int free_qs = false; 1200 1201 pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags, 1202 vscsi->state); 1203 1204 /* Only need to free qs if we're disconnecting from client */ 1205 if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT) 1206 free_qs = true; 1207 1208 switch (vscsi->state) { 1209 case UNCONFIGURING: 1210 ibmvscsis_free_command_q(vscsi); 1211 dma_rmb(); 1212 isync(); 1213 if (vscsi->flags & CFG_SLEEPING) { 1214 vscsi->flags &= ~CFG_SLEEPING; 1215 complete(&vscsi->unconfig); 1216 } 1217 break; 1218 case ERR_DISCONNECT_RECONNECT: 1219 ibmvscsis_reset_queue(vscsi); 1220 pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags); 1221 break; 1222 1223 case ERR_DISCONNECT: 1224 ibmvscsis_free_command_q(vscsi); 1225 vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED); 1226 vscsi->flags |= RESPONSE_Q_DOWN; 1227 if (vscsi->tport.enabled) 1228 vscsi->state = ERR_DISCONNECTED; 1229 else 1230 vscsi->state = WAIT_ENABLED; 1231 pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n", 1232 vscsi->flags, vscsi->state); 1233 break; 1234 1235 case WAIT_IDLE: 1236 vscsi->rsp_q_timer.timer_pops = 0; 1237 vscsi->debit = 0; 1238 vscsi->credit = 0; 1239 if (vscsi->flags & TRANS_EVENT) { 1240 vscsi->state = WAIT_CONNECTION; 1241 vscsi->flags &= PRESERVE_FLAG_FIELDS; 1242 } else { 1243 vscsi->state = CONNECTED; 1244 vscsi->flags &= ~DISCONNECT_SCHEDULED; 1245 } 1246 1247 pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n", 1248 vscsi->flags, vscsi->state); 1249 ibmvscsis_poll_cmd_q(vscsi); 1250 break; 1251 1252 case ERR_DISCONNECTED: 1253 vscsi->flags &= ~DISCONNECT_SCHEDULED; 1254 pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n", 1255 vscsi->flags, vscsi->state); 1256 break; 1257 1258 default: 1259 dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n", 1260 vscsi->state); 1261 break; 1262 } 1263 1264 if (free_qs) 1265 ibmvscsis_free_cmd_qs(vscsi); 1266 1267 /* 1268 * There is a timing window where we could lose a disconnect request. 1269 * The known path to this window occurs during the DISCONNECT_RECONNECT 1270 * case above: reset_queue calls free_command_q, which will release the 1271 * interrupt lock. During that time, a new post_disconnect call can be 1272 * made with a "more severe" state (DISCONNECT or UNCONFIGURING). 1273 * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect 1274 * will only set the new_state. Now free_command_q reacquires the intr 1275 * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_ 1276 * FIELDS), and the disconnect is lost. This is particularly bad when 1277 * the new disconnect was for UNCONFIGURING, since the unconfigure hangs 1278 * forever. 1279 * Fix is that free command queue sets acr state and acr flags if there 1280 * is a change under the lock 1281 * note free command queue writes to this state it clears it 1282 * before releasing the lock, different drivers call the free command 1283 * queue different times so dont initialize above 1284 */ 1285 if (vscsi->phyp_acr_state != 0) { 1286 /* 1287 * set any bits in flags that may have been cleared by 1288 * a call to free command queue in switch statement 1289 * or reset queue 1290 */ 1291 vscsi->flags |= vscsi->phyp_acr_flags; 1292 ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0); 1293 vscsi->phyp_acr_state = 0; 1294 vscsi->phyp_acr_flags = 0; 1295 1296 pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n", 1297 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags, 1298 vscsi->phyp_acr_state); 1299 } 1300 1301 pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n", 1302 vscsi->flags, vscsi->state, vscsi->new_state); 1303 } 1304 1305 /** 1306 * ibmvscsis_copy_crq_packet() - Copy CRQ Packet 1307 * @vscsi: Pointer to our adapter structure 1308 * @cmd: Pointer to command element to use to process the request 1309 * @crq: Pointer to CRQ entry containing the request 1310 * 1311 * Copy the srp information unit from the hosted 1312 * partition using remote dma 1313 * 1314 * EXECUTION ENVIRONMENT: 1315 * Interrupt, interrupt lock held 1316 */ 1317 static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi, 1318 struct ibmvscsis_cmd *cmd, 1319 struct viosrp_crq *crq) 1320 { 1321 struct iu_entry *iue = cmd->iue; 1322 long rc = 0; 1323 u16 len; 1324 1325 len = be16_to_cpu(crq->IU_length); 1326 if ((len > SRP_MAX_IU_LEN) || (len == 0)) { 1327 dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len); 1328 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 1329 return SRP_VIOLATION; 1330 } 1331 1332 rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn, 1333 be64_to_cpu(crq->IU_data_ptr), 1334 vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma); 1335 1336 switch (rc) { 1337 case H_SUCCESS: 1338 cmd->init_time = mftb(); 1339 iue->remote_token = crq->IU_data_ptr; 1340 iue->iu_len = len; 1341 pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n", 1342 be64_to_cpu(crq->IU_data_ptr), cmd->init_time); 1343 break; 1344 case H_PERMISSION: 1345 if (connection_broken(vscsi)) 1346 ibmvscsis_post_disconnect(vscsi, 1347 ERR_DISCONNECT_RECONNECT, 1348 (RESPONSE_Q_DOWN | 1349 CLIENT_FAILED)); 1350 else 1351 ibmvscsis_post_disconnect(vscsi, 1352 ERR_DISCONNECT_RECONNECT, 0); 1353 1354 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n", 1355 rc); 1356 break; 1357 case H_DEST_PARM: 1358 case H_SOURCE_PARM: 1359 default: 1360 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n", 1361 rc); 1362 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 1363 break; 1364 } 1365 1366 return rc; 1367 } 1368 1369 /** 1370 * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram 1371 * @vscsi: Pointer to our adapter structure 1372 * @iue: Information Unit containing the Adapter Info MAD request 1373 * 1374 * EXECUTION ENVIRONMENT: 1375 * Interrupt adapter lock is held 1376 */ 1377 static long ibmvscsis_adapter_info(struct scsi_info *vscsi, 1378 struct iu_entry *iue) 1379 { 1380 struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info; 1381 struct mad_adapter_info_data *info; 1382 uint flag_bits = 0; 1383 dma_addr_t token; 1384 long rc; 1385 1386 mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS); 1387 1388 if (be16_to_cpu(mad->common.length) > sizeof(*info)) { 1389 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); 1390 return 0; 1391 } 1392 1393 info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token, 1394 GFP_KERNEL); 1395 if (!info) { 1396 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", 1397 iue->target); 1398 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); 1399 return 0; 1400 } 1401 1402 /* Get remote info */ 1403 rc = h_copy_rdma(be16_to_cpu(mad->common.length), 1404 vscsi->dds.window[REMOTE].liobn, 1405 be64_to_cpu(mad->buffer), 1406 vscsi->dds.window[LOCAL].liobn, token); 1407 1408 if (rc != H_SUCCESS) { 1409 if (rc == H_PERMISSION) { 1410 if (connection_broken(vscsi)) 1411 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); 1412 } 1413 pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n", 1414 rc); 1415 pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n", 1416 be64_to_cpu(mad->buffer), vscsi->flags, flag_bits); 1417 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 1418 flag_bits); 1419 goto free_dma; 1420 } 1421 1422 /* 1423 * Copy client info, but ignore partition number, which we 1424 * already got from phyp - unless we failed to get it from 1425 * phyp (e.g. if we're running on a p5 system). 1426 */ 1427 if (vscsi->client_data.partition_number == 0) 1428 vscsi->client_data.partition_number = 1429 be32_to_cpu(info->partition_number); 1430 strncpy(vscsi->client_data.srp_version, info->srp_version, 1431 sizeof(vscsi->client_data.srp_version)); 1432 strncpy(vscsi->client_data.partition_name, info->partition_name, 1433 sizeof(vscsi->client_data.partition_name)); 1434 vscsi->client_data.mad_version = be32_to_cpu(info->mad_version); 1435 vscsi->client_data.os_type = be32_to_cpu(info->os_type); 1436 1437 /* Copy our info */ 1438 strncpy(info->srp_version, SRP_VERSION, 1439 sizeof(info->srp_version)); 1440 strncpy(info->partition_name, vscsi->dds.partition_name, 1441 sizeof(info->partition_name)); 1442 info->partition_number = cpu_to_be32(vscsi->dds.partition_num); 1443 info->mad_version = cpu_to_be32(MAD_VERSION_1); 1444 info->os_type = cpu_to_be32(LINUX); 1445 memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu)); 1446 info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE); 1447 1448 dma_wmb(); 1449 rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn, 1450 token, vscsi->dds.window[REMOTE].liobn, 1451 be64_to_cpu(mad->buffer)); 1452 switch (rc) { 1453 case H_SUCCESS: 1454 break; 1455 1456 case H_SOURCE_PARM: 1457 case H_DEST_PARM: 1458 case H_PERMISSION: 1459 if (connection_broken(vscsi)) 1460 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); 1461 default: 1462 dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n", 1463 rc); 1464 ibmvscsis_post_disconnect(vscsi, 1465 ERR_DISCONNECT_RECONNECT, 1466 flag_bits); 1467 break; 1468 } 1469 1470 free_dma: 1471 dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token); 1472 pr_debug("Leaving adapter_info, rc %ld\n", rc); 1473 1474 return rc; 1475 } 1476 1477 /** 1478 * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram 1479 * @vscsi: Pointer to our adapter structure 1480 * @iue: Information Unit containing the Capabilities MAD request 1481 * 1482 * NOTE: if you return an error from this routine you must be 1483 * disconnecting or you will cause a hang 1484 * 1485 * EXECUTION ENVIRONMENT: 1486 * Interrupt called with adapter lock held 1487 */ 1488 static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue) 1489 { 1490 struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities; 1491 struct capabilities *cap; 1492 struct mad_capability_common *common; 1493 dma_addr_t token; 1494 u16 olen, len, status, min_len, cap_len; 1495 u32 flag; 1496 uint flag_bits = 0; 1497 long rc = 0; 1498 1499 olen = be16_to_cpu(mad->common.length); 1500 /* 1501 * struct capabilities hardcodes a couple capabilities after the 1502 * header, but the capabilities can actually be in any order. 1503 */ 1504 min_len = offsetof(struct capabilities, migration); 1505 if ((olen < min_len) || (olen > PAGE_SIZE)) { 1506 pr_warn("cap_mad: invalid len %d\n", olen); 1507 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); 1508 return 0; 1509 } 1510 1511 cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token, 1512 GFP_KERNEL); 1513 if (!cap) { 1514 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", 1515 iue->target); 1516 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); 1517 return 0; 1518 } 1519 rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn, 1520 be64_to_cpu(mad->buffer), 1521 vscsi->dds.window[LOCAL].liobn, token); 1522 if (rc == H_SUCCESS) { 1523 strncpy(cap->name, dev_name(&vscsi->dma_dev->dev), 1524 SRP_MAX_LOC_LEN); 1525 1526 len = olen - min_len; 1527 status = VIOSRP_MAD_SUCCESS; 1528 common = (struct mad_capability_common *)&cap->migration; 1529 1530 while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) { 1531 pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n", 1532 len, be32_to_cpu(common->cap_type), 1533 be16_to_cpu(common->length)); 1534 1535 cap_len = be16_to_cpu(common->length); 1536 if (cap_len > len) { 1537 dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n"); 1538 status = VIOSRP_MAD_FAILED; 1539 break; 1540 } 1541 1542 if (cap_len == 0) { 1543 dev_err(&vscsi->dev, "cap_mad: cap len is 0\n"); 1544 status = VIOSRP_MAD_FAILED; 1545 break; 1546 } 1547 1548 switch (common->cap_type) { 1549 default: 1550 pr_debug("cap_mad: unsupported capability\n"); 1551 common->server_support = 0; 1552 flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED); 1553 cap->flags &= ~flag; 1554 break; 1555 } 1556 1557 len = len - cap_len; 1558 common = (struct mad_capability_common *) 1559 ((char *)common + cap_len); 1560 } 1561 1562 mad->common.status = cpu_to_be16(status); 1563 1564 dma_wmb(); 1565 rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token, 1566 vscsi->dds.window[REMOTE].liobn, 1567 be64_to_cpu(mad->buffer)); 1568 1569 if (rc != H_SUCCESS) { 1570 pr_debug("cap_mad: failed to copy to client, rc %ld\n", 1571 rc); 1572 1573 if (rc == H_PERMISSION) { 1574 if (connection_broken(vscsi)) 1575 flag_bits = (RESPONSE_Q_DOWN | 1576 CLIENT_FAILED); 1577 } 1578 1579 pr_warn("cap_mad: error copying data to client, rc %ld\n", 1580 rc); 1581 ibmvscsis_post_disconnect(vscsi, 1582 ERR_DISCONNECT_RECONNECT, 1583 flag_bits); 1584 } 1585 } 1586 1587 dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token); 1588 1589 pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n", 1590 rc, vscsi->client_cap); 1591 1592 return rc; 1593 } 1594 1595 /** 1596 * ibmvscsis_process_mad() - Service a MAnagement Data gram 1597 * @vscsi: Pointer to our adapter structure 1598 * @iue: Information Unit containing the MAD request 1599 * 1600 * Must be called with interrupt lock held. 1601 */ 1602 static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue) 1603 { 1604 struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad; 1605 struct viosrp_empty_iu *empty; 1606 long rc = ADAPT_SUCCESS; 1607 1608 switch (be32_to_cpu(mad->type)) { 1609 case VIOSRP_EMPTY_IU_TYPE: 1610 empty = &vio_iu(iue)->mad.empty_iu; 1611 vscsi->empty_iu_id = be64_to_cpu(empty->buffer); 1612 vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag); 1613 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS); 1614 break; 1615 case VIOSRP_ADAPTER_INFO_TYPE: 1616 rc = ibmvscsis_adapter_info(vscsi, iue); 1617 break; 1618 case VIOSRP_CAPABILITIES_TYPE: 1619 rc = ibmvscsis_cap_mad(vscsi, iue); 1620 break; 1621 case VIOSRP_ENABLE_FAST_FAIL: 1622 if (vscsi->state == CONNECTED) { 1623 vscsi->fast_fail = true; 1624 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS); 1625 } else { 1626 pr_warn("fast fail mad sent after login\n"); 1627 mad->status = cpu_to_be16(VIOSRP_MAD_FAILED); 1628 } 1629 break; 1630 default: 1631 mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED); 1632 break; 1633 } 1634 1635 return rc; 1636 } 1637 1638 /** 1639 * srp_snd_msg_failed() - Handle an error when sending a response 1640 * @vscsi: Pointer to our adapter structure 1641 * @rc: The return code from the h_send_crq command 1642 * 1643 * Must be called with interrupt lock held. 1644 */ 1645 static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc) 1646 { 1647 ktime_t kt; 1648 1649 if (rc != H_DROPPED) { 1650 ibmvscsis_free_cmd_qs(vscsi); 1651 1652 if (rc == H_CLOSED) 1653 vscsi->flags |= CLIENT_FAILED; 1654 1655 /* don't flag the same problem multiple times */ 1656 if (!(vscsi->flags & RESPONSE_Q_DOWN)) { 1657 vscsi->flags |= RESPONSE_Q_DOWN; 1658 if (!(vscsi->state & (ERR_DISCONNECT | 1659 ERR_DISCONNECT_RECONNECT | 1660 ERR_DISCONNECTED | UNDEFINED))) { 1661 dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n", 1662 vscsi->state, vscsi->flags, rc); 1663 } 1664 ibmvscsis_post_disconnect(vscsi, 1665 ERR_DISCONNECT_RECONNECT, 0); 1666 } 1667 return; 1668 } 1669 1670 /* 1671 * The response queue is full. 1672 * If the server is processing SRP requests, i.e. 1673 * the client has successfully done an 1674 * SRP_LOGIN, then it will wait forever for room in 1675 * the queue. However if the system admin 1676 * is attempting to unconfigure the server then one 1677 * or more children will be in a state where 1678 * they are being removed. So if there is even one 1679 * child being removed then the driver assumes 1680 * the system admin is attempting to break the 1681 * connection with the client and MAX_TIMER_POPS 1682 * is honored. 1683 */ 1684 if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) || 1685 (vscsi->state == SRP_PROCESSING)) { 1686 pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n", 1687 vscsi->flags, (int)vscsi->rsp_q_timer.started, 1688 vscsi->rsp_q_timer.timer_pops); 1689 1690 /* 1691 * Check if the timer is running; if it 1692 * is not then start it up. 1693 */ 1694 if (!vscsi->rsp_q_timer.started) { 1695 if (vscsi->rsp_q_timer.timer_pops < 1696 MAX_TIMER_POPS) { 1697 kt = WAIT_NANO_SECONDS; 1698 } else { 1699 /* 1700 * slide the timeslice if the maximum 1701 * timer pops have already happened 1702 */ 1703 kt = ktime_set(WAIT_SECONDS, 0); 1704 } 1705 1706 vscsi->rsp_q_timer.started = true; 1707 hrtimer_start(&vscsi->rsp_q_timer.timer, kt, 1708 HRTIMER_MODE_REL); 1709 } 1710 } else { 1711 /* 1712 * TBD: Do we need to worry about this? Need to get 1713 * remove working. 1714 */ 1715 /* 1716 * waited a long time and it appears the system admin 1717 * is bring this driver down 1718 */ 1719 vscsi->flags |= RESPONSE_Q_DOWN; 1720 ibmvscsis_free_cmd_qs(vscsi); 1721 /* 1722 * if the driver is already attempting to disconnect 1723 * from the client and has already logged an error 1724 * trace this event but don't put it in the error log 1725 */ 1726 if (!(vscsi->state & (ERR_DISCONNECT | 1727 ERR_DISCONNECT_RECONNECT | 1728 ERR_DISCONNECTED | UNDEFINED))) { 1729 dev_err(&vscsi->dev, "client crq full too long\n"); 1730 ibmvscsis_post_disconnect(vscsi, 1731 ERR_DISCONNECT_RECONNECT, 1732 0); 1733 } 1734 } 1735 } 1736 1737 /** 1738 * ibmvscsis_send_messages() - Send a Response 1739 * @vscsi: Pointer to our adapter structure 1740 * 1741 * Send a response, first checking the waiting queue. Responses are 1742 * sent in order they are received. If the response cannot be sent, 1743 * because the client queue is full, it stays on the waiting queue. 1744 * 1745 * PRECONDITION: 1746 * Called with interrupt lock held 1747 */ 1748 static void ibmvscsis_send_messages(struct scsi_info *vscsi) 1749 { 1750 u64 msg_hi = 0; 1751 /* note do not attmempt to access the IU_data_ptr with this pointer 1752 * it is not valid 1753 */ 1754 struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi; 1755 struct ibmvscsis_cmd *cmd, *nxt; 1756 struct iu_entry *iue; 1757 long rc = ADAPT_SUCCESS; 1758 1759 if (!(vscsi->flags & RESPONSE_Q_DOWN)) { 1760 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) { 1761 iue = cmd->iue; 1762 1763 crq->valid = VALID_CMD_RESP_EL; 1764 crq->format = cmd->rsp.format; 1765 1766 if (cmd->flags & CMD_FAST_FAIL) 1767 crq->status = VIOSRP_ADAPTER_FAIL; 1768 1769 crq->IU_length = cpu_to_be16(cmd->rsp.len); 1770 1771 rc = h_send_crq(vscsi->dma_dev->unit_address, 1772 be64_to_cpu(msg_hi), 1773 be64_to_cpu(cmd->rsp.tag)); 1774 1775 pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n", 1776 cmd, be64_to_cpu(cmd->rsp.tag), rc); 1777 1778 /* if all ok free up the command element resources */ 1779 if (rc == H_SUCCESS) { 1780 /* some movement has occurred */ 1781 vscsi->rsp_q_timer.timer_pops = 0; 1782 list_del(&cmd->list); 1783 1784 ibmvscsis_free_cmd_resources(vscsi, cmd); 1785 } else { 1786 srp_snd_msg_failed(vscsi, rc); 1787 break; 1788 } 1789 } 1790 1791 if (!rc) { 1792 /* 1793 * The timer could pop with the queue empty. If 1794 * this happens, rc will always indicate a 1795 * success; clear the pop count. 1796 */ 1797 vscsi->rsp_q_timer.timer_pops = 0; 1798 } 1799 } else { 1800 ibmvscsis_free_cmd_qs(vscsi); 1801 } 1802 } 1803 1804 /* Called with intr lock held */ 1805 static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi, 1806 struct ibmvscsis_cmd *cmd, 1807 struct viosrp_crq *crq) 1808 { 1809 struct iu_entry *iue = cmd->iue; 1810 struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad; 1811 uint flag_bits = 0; 1812 long rc; 1813 1814 dma_wmb(); 1815 rc = h_copy_rdma(sizeof(struct mad_common), 1816 vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma, 1817 vscsi->dds.window[REMOTE].liobn, 1818 be64_to_cpu(crq->IU_data_ptr)); 1819 if (!rc) { 1820 cmd->rsp.format = VIOSRP_MAD_FORMAT; 1821 cmd->rsp.len = sizeof(struct mad_common); 1822 cmd->rsp.tag = mad->tag; 1823 list_add_tail(&cmd->list, &vscsi->waiting_rsp); 1824 ibmvscsis_send_messages(vscsi); 1825 } else { 1826 pr_debug("Error sending mad response, rc %ld\n", rc); 1827 if (rc == H_PERMISSION) { 1828 if (connection_broken(vscsi)) 1829 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); 1830 } 1831 dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n", 1832 rc); 1833 1834 ibmvscsis_free_cmd_resources(vscsi, cmd); 1835 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 1836 flag_bits); 1837 } 1838 } 1839 1840 /** 1841 * ibmvscsis_mad() - Service a MAnagement Data gram. 1842 * @vscsi: Pointer to our adapter structure 1843 * @crq: Pointer to the CRQ entry containing the MAD request 1844 * 1845 * EXECUTION ENVIRONMENT: 1846 * Interrupt, called with adapter lock held 1847 */ 1848 static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) 1849 { 1850 struct iu_entry *iue; 1851 struct ibmvscsis_cmd *cmd; 1852 struct mad_common *mad; 1853 long rc = ADAPT_SUCCESS; 1854 1855 switch (vscsi->state) { 1856 /* 1857 * We have not exchanged Init Msgs yet, so this MAD was sent 1858 * before the last Transport Event; client will not be 1859 * expecting a response. 1860 */ 1861 case WAIT_CONNECTION: 1862 pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n", 1863 vscsi->flags); 1864 return ADAPT_SUCCESS; 1865 1866 case SRP_PROCESSING: 1867 case CONNECTED: 1868 break; 1869 1870 /* 1871 * We should never get here while we're in these states. 1872 * Just log an error and get out. 1873 */ 1874 case UNCONFIGURING: 1875 case WAIT_IDLE: 1876 case ERR_DISCONNECT: 1877 case ERR_DISCONNECT_RECONNECT: 1878 default: 1879 dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n", 1880 vscsi->state); 1881 return ADAPT_SUCCESS; 1882 } 1883 1884 cmd = ibmvscsis_get_free_cmd(vscsi); 1885 if (!cmd) { 1886 dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n", 1887 vscsi->debit); 1888 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 1889 return ERROR; 1890 } 1891 iue = cmd->iue; 1892 cmd->type = ADAPTER_MAD; 1893 1894 rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq); 1895 if (!rc) { 1896 mad = (struct mad_common *)&vio_iu(iue)->mad; 1897 1898 pr_debug("mad: type %d\n", be32_to_cpu(mad->type)); 1899 1900 rc = ibmvscsis_process_mad(vscsi, iue); 1901 1902 pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status), 1903 rc); 1904 1905 if (!rc) 1906 ibmvscsis_send_mad_resp(vscsi, cmd, crq); 1907 } else { 1908 ibmvscsis_free_cmd_resources(vscsi, cmd); 1909 } 1910 1911 pr_debug("Leaving mad, rc %ld\n", rc); 1912 return rc; 1913 } 1914 1915 /** 1916 * ibmvscsis_login_rsp() - Create/copy a login response notice to the client 1917 * @vscsi: Pointer to our adapter structure 1918 * @cmd: Pointer to the command for the SRP Login request 1919 * 1920 * EXECUTION ENVIRONMENT: 1921 * Interrupt, interrupt lock held 1922 */ 1923 static long ibmvscsis_login_rsp(struct scsi_info *vscsi, 1924 struct ibmvscsis_cmd *cmd) 1925 { 1926 struct iu_entry *iue = cmd->iue; 1927 struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp; 1928 struct format_code *fmt; 1929 uint flag_bits = 0; 1930 long rc = ADAPT_SUCCESS; 1931 1932 memset(rsp, 0, sizeof(struct srp_login_rsp)); 1933 1934 rsp->opcode = SRP_LOGIN_RSP; 1935 rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit); 1936 rsp->tag = cmd->rsp.tag; 1937 rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); 1938 rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); 1939 fmt = (struct format_code *)&rsp->buf_fmt; 1940 fmt->buffers = SUPPORTED_FORMATS; 1941 vscsi->credit = 0; 1942 1943 cmd->rsp.len = sizeof(struct srp_login_rsp); 1944 1945 dma_wmb(); 1946 rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn, 1947 iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn, 1948 be64_to_cpu(iue->remote_token)); 1949 1950 switch (rc) { 1951 case H_SUCCESS: 1952 break; 1953 1954 case H_PERMISSION: 1955 if (connection_broken(vscsi)) 1956 flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; 1957 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n", 1958 rc); 1959 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 1960 flag_bits); 1961 break; 1962 case H_SOURCE_PARM: 1963 case H_DEST_PARM: 1964 default: 1965 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n", 1966 rc); 1967 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 1968 break; 1969 } 1970 1971 return rc; 1972 } 1973 1974 /** 1975 * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client 1976 * @vscsi: Pointer to our adapter structure 1977 * @cmd: Pointer to the command for the SRP Login request 1978 * @reason: The reason the SRP Login is being rejected, per SRP protocol 1979 * 1980 * EXECUTION ENVIRONMENT: 1981 * Interrupt, interrupt lock held 1982 */ 1983 static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi, 1984 struct ibmvscsis_cmd *cmd, u32 reason) 1985 { 1986 struct iu_entry *iue = cmd->iue; 1987 struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej; 1988 struct format_code *fmt; 1989 uint flag_bits = 0; 1990 long rc = ADAPT_SUCCESS; 1991 1992 memset(rej, 0, sizeof(*rej)); 1993 1994 rej->opcode = SRP_LOGIN_REJ; 1995 rej->reason = cpu_to_be32(reason); 1996 rej->tag = cmd->rsp.tag; 1997 fmt = (struct format_code *)&rej->buf_fmt; 1998 fmt->buffers = SUPPORTED_FORMATS; 1999 2000 cmd->rsp.len = sizeof(*rej); 2001 2002 dma_wmb(); 2003 rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn, 2004 iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn, 2005 be64_to_cpu(iue->remote_token)); 2006 2007 switch (rc) { 2008 case H_SUCCESS: 2009 break; 2010 case H_PERMISSION: 2011 if (connection_broken(vscsi)) 2012 flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; 2013 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", 2014 rc); 2015 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 2016 flag_bits); 2017 break; 2018 case H_SOURCE_PARM: 2019 case H_DEST_PARM: 2020 default: 2021 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", 2022 rc); 2023 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2024 break; 2025 } 2026 2027 return rc; 2028 } 2029 2030 static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport) 2031 { 2032 char *name = tport->tport_name; 2033 struct ibmvscsis_nexus *nexus; 2034 int rc; 2035 2036 if (tport->ibmv_nexus) { 2037 pr_debug("tport->ibmv_nexus already exists\n"); 2038 return 0; 2039 } 2040 2041 nexus = kzalloc(sizeof(*nexus), GFP_KERNEL); 2042 if (!nexus) { 2043 pr_err("Unable to allocate struct ibmvscsis_nexus\n"); 2044 return -ENOMEM; 2045 } 2046 2047 nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0, 2048 TARGET_PROT_NORMAL, name, nexus, 2049 NULL); 2050 if (IS_ERR(nexus->se_sess)) { 2051 rc = PTR_ERR(nexus->se_sess); 2052 goto transport_init_fail; 2053 } 2054 2055 tport->ibmv_nexus = nexus; 2056 2057 return 0; 2058 2059 transport_init_fail: 2060 kfree(nexus); 2061 return rc; 2062 } 2063 2064 static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport) 2065 { 2066 struct se_session *se_sess; 2067 struct ibmvscsis_nexus *nexus; 2068 2069 nexus = tport->ibmv_nexus; 2070 if (!nexus) 2071 return -ENODEV; 2072 2073 se_sess = nexus->se_sess; 2074 if (!se_sess) 2075 return -ENODEV; 2076 2077 /* 2078 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port 2079 */ 2080 target_wait_for_sess_cmds(se_sess); 2081 transport_deregister_session_configfs(se_sess); 2082 transport_deregister_session(se_sess); 2083 tport->ibmv_nexus = NULL; 2084 kfree(nexus); 2085 2086 return 0; 2087 } 2088 2089 /** 2090 * ibmvscsis_srp_login() - Process an SRP Login Request 2091 * @vscsi: Pointer to our adapter structure 2092 * @cmd: Command element to use to process the SRP Login request 2093 * @crq: Pointer to CRQ entry containing the SRP Login request 2094 * 2095 * EXECUTION ENVIRONMENT: 2096 * Interrupt, called with interrupt lock held 2097 */ 2098 static long ibmvscsis_srp_login(struct scsi_info *vscsi, 2099 struct ibmvscsis_cmd *cmd, 2100 struct viosrp_crq *crq) 2101 { 2102 struct iu_entry *iue = cmd->iue; 2103 struct srp_login_req *req = &vio_iu(iue)->srp.login_req; 2104 struct port_id { 2105 __be64 id_extension; 2106 __be64 io_guid; 2107 } *iport, *tport; 2108 struct format_code *fmt; 2109 u32 reason = 0x0; 2110 long rc = ADAPT_SUCCESS; 2111 2112 iport = (struct port_id *)req->initiator_port_id; 2113 tport = (struct port_id *)req->target_port_id; 2114 fmt = (struct format_code *)&req->req_buf_fmt; 2115 if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN) 2116 reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE; 2117 else if (be32_to_cpu(req->req_it_iu_len) < 64) 2118 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL; 2119 else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) || 2120 (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1))) 2121 reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL; 2122 else if (req->req_flags & SRP_MULTICHAN_MULTI) 2123 reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED; 2124 else if (fmt->buffers & (~SUPPORTED_FORMATS)) 2125 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT; 2126 else if ((fmt->buffers & SUPPORTED_FORMATS) == 0) 2127 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT; 2128 2129 if (vscsi->state == SRP_PROCESSING) 2130 reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED; 2131 2132 rc = ibmvscsis_make_nexus(&vscsi->tport); 2133 if (rc) 2134 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL; 2135 2136 cmd->rsp.format = VIOSRP_SRP_FORMAT; 2137 cmd->rsp.tag = req->tag; 2138 2139 pr_debug("srp_login: reason 0x%x\n", reason); 2140 2141 if (reason) 2142 rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason); 2143 else 2144 rc = ibmvscsis_login_rsp(vscsi, cmd); 2145 2146 if (!rc) { 2147 if (!reason) 2148 vscsi->state = SRP_PROCESSING; 2149 2150 list_add_tail(&cmd->list, &vscsi->waiting_rsp); 2151 ibmvscsis_send_messages(vscsi); 2152 } else { 2153 ibmvscsis_free_cmd_resources(vscsi, cmd); 2154 } 2155 2156 pr_debug("Leaving srp_login, rc %ld\n", rc); 2157 return rc; 2158 } 2159 2160 /** 2161 * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus 2162 * @vscsi: Pointer to our adapter structure 2163 * @cmd: Command element to use to process the Implicit Logout request 2164 * @crq: Pointer to CRQ entry containing the Implicit Logout request 2165 * 2166 * Do the logic to close the I_T nexus. This function may not 2167 * behave to specification. 2168 * 2169 * EXECUTION ENVIRONMENT: 2170 * Interrupt, interrupt lock held 2171 */ 2172 static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi, 2173 struct ibmvscsis_cmd *cmd, 2174 struct viosrp_crq *crq) 2175 { 2176 struct iu_entry *iue = cmd->iue; 2177 struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout; 2178 long rc = ADAPT_SUCCESS; 2179 2180 if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) || 2181 !list_empty(&vscsi->waiting_rsp)) { 2182 dev_err(&vscsi->dev, "i_logout: outstanding work\n"); 2183 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); 2184 } else { 2185 cmd->rsp.format = SRP_FORMAT; 2186 cmd->rsp.tag = log_out->tag; 2187 cmd->rsp.len = sizeof(struct mad_common); 2188 list_add_tail(&cmd->list, &vscsi->waiting_rsp); 2189 ibmvscsis_send_messages(vscsi); 2190 2191 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0); 2192 } 2193 2194 return rc; 2195 } 2196 2197 /* Called with intr lock held */ 2198 static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq) 2199 { 2200 struct ibmvscsis_cmd *cmd; 2201 struct iu_entry *iue; 2202 struct srp_cmd *srp; 2203 struct srp_tsk_mgmt *tsk; 2204 long rc; 2205 2206 if (vscsi->request_limit - vscsi->debit <= 0) { 2207 /* Client has exceeded request limit */ 2208 dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n", 2209 vscsi->request_limit, vscsi->debit); 2210 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2211 return; 2212 } 2213 2214 cmd = ibmvscsis_get_free_cmd(vscsi); 2215 if (!cmd) { 2216 dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n", 2217 vscsi->debit); 2218 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2219 return; 2220 } 2221 iue = cmd->iue; 2222 srp = &vio_iu(iue)->srp.cmd; 2223 2224 rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq); 2225 if (rc) { 2226 ibmvscsis_free_cmd_resources(vscsi, cmd); 2227 return; 2228 } 2229 2230 if (vscsi->state == SRP_PROCESSING) { 2231 switch (srp->opcode) { 2232 case SRP_LOGIN_REQ: 2233 rc = ibmvscsis_srp_login(vscsi, cmd, crq); 2234 break; 2235 2236 case SRP_TSK_MGMT: 2237 tsk = &vio_iu(iue)->srp.tsk_mgmt; 2238 pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag, 2239 tsk->tag); 2240 cmd->rsp.tag = tsk->tag; 2241 vscsi->debit += 1; 2242 cmd->type = TASK_MANAGEMENT; 2243 list_add_tail(&cmd->list, &vscsi->schedule_q); 2244 queue_work(vscsi->work_q, &cmd->work); 2245 break; 2246 2247 case SRP_CMD: 2248 pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag, 2249 srp->tag); 2250 cmd->rsp.tag = srp->tag; 2251 vscsi->debit += 1; 2252 cmd->type = SCSI_CDB; 2253 /* 2254 * We want to keep track of work waiting for 2255 * the workqueue. 2256 */ 2257 list_add_tail(&cmd->list, &vscsi->schedule_q); 2258 queue_work(vscsi->work_q, &cmd->work); 2259 break; 2260 2261 case SRP_I_LOGOUT: 2262 rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq); 2263 break; 2264 2265 case SRP_CRED_RSP: 2266 case SRP_AER_RSP: 2267 default: 2268 ibmvscsis_free_cmd_resources(vscsi, cmd); 2269 dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n", 2270 (uint)srp->opcode); 2271 ibmvscsis_post_disconnect(vscsi, 2272 ERR_DISCONNECT_RECONNECT, 0); 2273 break; 2274 } 2275 } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) { 2276 rc = ibmvscsis_srp_login(vscsi, cmd, crq); 2277 } else { 2278 ibmvscsis_free_cmd_resources(vscsi, cmd); 2279 dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n", 2280 vscsi->state); 2281 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2282 } 2283 } 2284 2285 /** 2286 * ibmvscsis_ping_response() - Respond to a ping request 2287 * @vscsi: Pointer to our adapter structure 2288 * 2289 * Let the client know that the server is alive and waiting on 2290 * its native I/O stack. 2291 * If any type of error occurs from the call to queue a ping 2292 * response then the client is either not accepting or receiving 2293 * interrupts. Disconnect with an error. 2294 * 2295 * EXECUTION ENVIRONMENT: 2296 * Interrupt, interrupt lock held 2297 */ 2298 static long ibmvscsis_ping_response(struct scsi_info *vscsi) 2299 { 2300 struct viosrp_crq *crq; 2301 u64 buffer[2] = { 0, 0 }; 2302 long rc; 2303 2304 crq = (struct viosrp_crq *)&buffer; 2305 crq->valid = VALID_CMD_RESP_EL; 2306 crq->format = (u8)MESSAGE_IN_CRQ; 2307 crq->status = PING_RESPONSE; 2308 2309 rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), 2310 cpu_to_be64(buffer[MSG_LOW])); 2311 2312 switch (rc) { 2313 case H_SUCCESS: 2314 break; 2315 case H_CLOSED: 2316 vscsi->flags |= CLIENT_FAILED; 2317 case H_DROPPED: 2318 vscsi->flags |= RESPONSE_Q_DOWN; 2319 case H_REMOTE_PARM: 2320 dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", 2321 rc); 2322 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2323 break; 2324 default: 2325 dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n", 2326 rc); 2327 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); 2328 break; 2329 } 2330 2331 return rc; 2332 } 2333 2334 /** 2335 * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue. 2336 * @vscsi: Pointer to our adapter structure 2337 * @crq: Pointer to CRQ element containing the SRP request 2338 * 2339 * This function will return success if the command queue element is valid 2340 * and the srp iu or MAD request it pointed to was also valid. That does 2341 * not mean that an error was not returned to the client. 2342 * 2343 * EXECUTION ENVIRONMENT: 2344 * Interrupt, intr lock held 2345 */ 2346 static long ibmvscsis_parse_command(struct scsi_info *vscsi, 2347 struct viosrp_crq *crq) 2348 { 2349 long rc = ADAPT_SUCCESS; 2350 2351 switch (crq->valid) { 2352 case VALID_CMD_RESP_EL: 2353 switch (crq->format) { 2354 case OS400_FORMAT: 2355 case AIX_FORMAT: 2356 case LINUX_FORMAT: 2357 case MAD_FORMAT: 2358 if (vscsi->flags & PROCESSING_MAD) { 2359 rc = ERROR; 2360 dev_err(&vscsi->dev, "parse_command: already processing mad\n"); 2361 ibmvscsis_post_disconnect(vscsi, 2362 ERR_DISCONNECT_RECONNECT, 2363 0); 2364 } else { 2365 vscsi->flags |= PROCESSING_MAD; 2366 rc = ibmvscsis_mad(vscsi, crq); 2367 } 2368 break; 2369 2370 case SRP_FORMAT: 2371 ibmvscsis_srp_cmd(vscsi, crq); 2372 break; 2373 2374 case MESSAGE_IN_CRQ: 2375 if (crq->status == PING) 2376 ibmvscsis_ping_response(vscsi); 2377 break; 2378 2379 default: 2380 dev_err(&vscsi->dev, "parse_command: invalid format %d\n", 2381 (uint)crq->format); 2382 ibmvscsis_post_disconnect(vscsi, 2383 ERR_DISCONNECT_RECONNECT, 0); 2384 break; 2385 } 2386 break; 2387 2388 case VALID_TRANS_EVENT: 2389 rc = ibmvscsis_trans_event(vscsi, crq); 2390 break; 2391 2392 case VALID_INIT_MSG: 2393 rc = ibmvscsis_init_msg(vscsi, crq); 2394 break; 2395 2396 default: 2397 dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n", 2398 (uint)crq->valid); 2399 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2400 break; 2401 } 2402 2403 /* 2404 * Return only what the interrupt handler cares 2405 * about. Most errors we keep right on trucking. 2406 */ 2407 rc = vscsi->flags & SCHEDULE_DISCONNECT; 2408 2409 return rc; 2410 } 2411 2412 static int read_dma_window(struct scsi_info *vscsi) 2413 { 2414 struct vio_dev *vdev = vscsi->dma_dev; 2415 const __be32 *dma_window; 2416 const __be32 *prop; 2417 2418 /* TODO Using of_parse_dma_window would be better, but it doesn't give 2419 * a way to read multiple windows without already knowing the size of 2420 * a window or the number of windows. 2421 */ 2422 dma_window = (const __be32 *)vio_get_attribute(vdev, 2423 "ibm,my-dma-window", 2424 NULL); 2425 if (!dma_window) { 2426 pr_err("Couldn't find ibm,my-dma-window property\n"); 2427 return -1; 2428 } 2429 2430 vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window); 2431 dma_window++; 2432 2433 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells", 2434 NULL); 2435 if (!prop) { 2436 pr_warn("Couldn't find ibm,#dma-address-cells property\n"); 2437 dma_window++; 2438 } else { 2439 dma_window += be32_to_cpu(*prop); 2440 } 2441 2442 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells", 2443 NULL); 2444 if (!prop) { 2445 pr_warn("Couldn't find ibm,#dma-size-cells property\n"); 2446 dma_window++; 2447 } else { 2448 dma_window += be32_to_cpu(*prop); 2449 } 2450 2451 /* dma_window should point to the second window now */ 2452 vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window); 2453 2454 return 0; 2455 } 2456 2457 static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name) 2458 { 2459 struct ibmvscsis_tport *tport = NULL; 2460 struct vio_dev *vdev; 2461 struct scsi_info *vscsi; 2462 2463 spin_lock_bh(&ibmvscsis_dev_lock); 2464 list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) { 2465 vdev = vscsi->dma_dev; 2466 if (!strcmp(dev_name(&vdev->dev), name)) { 2467 tport = &vscsi->tport; 2468 break; 2469 } 2470 } 2471 spin_unlock_bh(&ibmvscsis_dev_lock); 2472 2473 return tport; 2474 } 2475 2476 /** 2477 * ibmvscsis_parse_cmd() - Parse SRP Command 2478 * @vscsi: Pointer to our adapter structure 2479 * @cmd: Pointer to command element with SRP command 2480 * 2481 * Parse the srp command; if it is valid then submit it to tcm. 2482 * Note: The return code does not reflect the status of the SCSI CDB. 2483 * 2484 * EXECUTION ENVIRONMENT: 2485 * Process level 2486 */ 2487 static void ibmvscsis_parse_cmd(struct scsi_info *vscsi, 2488 struct ibmvscsis_cmd *cmd) 2489 { 2490 struct iu_entry *iue = cmd->iue; 2491 struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf; 2492 struct ibmvscsis_nexus *nexus; 2493 u64 data_len = 0; 2494 enum dma_data_direction dir; 2495 int attr = 0; 2496 int rc = 0; 2497 2498 nexus = vscsi->tport.ibmv_nexus; 2499 /* 2500 * additional length in bytes. Note that the SRP spec says that 2501 * additional length is in 4-byte words, but technically the 2502 * additional length field is only the upper 6 bits of the byte. 2503 * The lower 2 bits are reserved. If the lower 2 bits are 0 (as 2504 * all reserved fields should be), then interpreting the byte as 2505 * an int will yield the length in bytes. 2506 */ 2507 if (srp->add_cdb_len & 0x03) { 2508 dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n"); 2509 spin_lock_bh(&vscsi->intr_lock); 2510 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2511 ibmvscsis_free_cmd_resources(vscsi, cmd); 2512 spin_unlock_bh(&vscsi->intr_lock); 2513 return; 2514 } 2515 2516 if (srp_get_desc_table(srp, &dir, &data_len)) { 2517 dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n", 2518 srp->tag); 2519 goto fail; 2520 } 2521 2522 cmd->rsp.sol_not = srp->sol_not; 2523 2524 switch (srp->task_attr) { 2525 case SRP_SIMPLE_TASK: 2526 attr = TCM_SIMPLE_TAG; 2527 break; 2528 case SRP_ORDERED_TASK: 2529 attr = TCM_ORDERED_TAG; 2530 break; 2531 case SRP_HEAD_TASK: 2532 attr = TCM_HEAD_TAG; 2533 break; 2534 case SRP_ACA_TASK: 2535 attr = TCM_ACA_TAG; 2536 break; 2537 default: 2538 dev_err(&vscsi->dev, "Invalid task attribute %d\n", 2539 srp->task_attr); 2540 goto fail; 2541 } 2542 2543 cmd->se_cmd.tag = be64_to_cpu(srp->tag); 2544 2545 spin_lock_bh(&vscsi->intr_lock); 2546 list_add_tail(&cmd->list, &vscsi->active_q); 2547 spin_unlock_bh(&vscsi->intr_lock); 2548 2549 srp->lun.scsi_lun[0] &= 0x3f; 2550 2551 rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb, 2552 cmd->sense_buf, scsilun_to_int(&srp->lun), 2553 data_len, attr, dir, 0); 2554 if (rc) { 2555 dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc); 2556 spin_lock_bh(&vscsi->intr_lock); 2557 list_del(&cmd->list); 2558 ibmvscsis_free_cmd_resources(vscsi, cmd); 2559 spin_unlock_bh(&vscsi->intr_lock); 2560 goto fail; 2561 } 2562 return; 2563 2564 fail: 2565 spin_lock_bh(&vscsi->intr_lock); 2566 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2567 spin_unlock_bh(&vscsi->intr_lock); 2568 } 2569 2570 /** 2571 * ibmvscsis_parse_task() - Parse SRP Task Management Request 2572 * @vscsi: Pointer to our adapter structure 2573 * @cmd: Pointer to command element with SRP task management request 2574 * 2575 * Parse the srp task management request; if it is valid then submit it to tcm. 2576 * Note: The return code does not reflect the status of the task management 2577 * request. 2578 * 2579 * EXECUTION ENVIRONMENT: 2580 * Processor level 2581 */ 2582 static void ibmvscsis_parse_task(struct scsi_info *vscsi, 2583 struct ibmvscsis_cmd *cmd) 2584 { 2585 struct iu_entry *iue = cmd->iue; 2586 struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt; 2587 int tcm_type; 2588 u64 tag_to_abort = 0; 2589 int rc = 0; 2590 struct ibmvscsis_nexus *nexus; 2591 2592 nexus = vscsi->tport.ibmv_nexus; 2593 2594 cmd->rsp.sol_not = srp_tsk->sol_not; 2595 2596 switch (srp_tsk->tsk_mgmt_func) { 2597 case SRP_TSK_ABORT_TASK: 2598 tcm_type = TMR_ABORT_TASK; 2599 tag_to_abort = be64_to_cpu(srp_tsk->task_tag); 2600 break; 2601 case SRP_TSK_ABORT_TASK_SET: 2602 tcm_type = TMR_ABORT_TASK_SET; 2603 break; 2604 case SRP_TSK_CLEAR_TASK_SET: 2605 tcm_type = TMR_CLEAR_TASK_SET; 2606 break; 2607 case SRP_TSK_LUN_RESET: 2608 tcm_type = TMR_LUN_RESET; 2609 break; 2610 case SRP_TSK_CLEAR_ACA: 2611 tcm_type = TMR_CLEAR_ACA; 2612 break; 2613 default: 2614 dev_err(&vscsi->dev, "unknown task mgmt func %d\n", 2615 srp_tsk->tsk_mgmt_func); 2616 cmd->se_cmd.se_tmr_req->response = 2617 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 2618 rc = -1; 2619 break; 2620 } 2621 2622 if (!rc) { 2623 cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag); 2624 2625 spin_lock_bh(&vscsi->intr_lock); 2626 list_add_tail(&cmd->list, &vscsi->active_q); 2627 spin_unlock_bh(&vscsi->intr_lock); 2628 2629 srp_tsk->lun.scsi_lun[0] &= 0x3f; 2630 2631 pr_debug("calling submit_tmr, func %d\n", 2632 srp_tsk->tsk_mgmt_func); 2633 rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL, 2634 scsilun_to_int(&srp_tsk->lun), srp_tsk, 2635 tcm_type, GFP_KERNEL, tag_to_abort, 0); 2636 if (rc) { 2637 dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n", 2638 rc); 2639 spin_lock_bh(&vscsi->intr_lock); 2640 list_del(&cmd->list); 2641 spin_unlock_bh(&vscsi->intr_lock); 2642 cmd->se_cmd.se_tmr_req->response = 2643 TMR_FUNCTION_REJECTED; 2644 } 2645 } 2646 2647 if (rc) 2648 transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0); 2649 } 2650 2651 static void ibmvscsis_scheduler(struct work_struct *work) 2652 { 2653 struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd, 2654 work); 2655 struct scsi_info *vscsi = cmd->adapter; 2656 2657 spin_lock_bh(&vscsi->intr_lock); 2658 2659 /* Remove from schedule_q */ 2660 list_del(&cmd->list); 2661 2662 /* Don't submit cmd if we're disconnecting */ 2663 if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) { 2664 ibmvscsis_free_cmd_resources(vscsi, cmd); 2665 2666 /* ibmvscsis_disconnect might be waiting for us */ 2667 if (list_empty(&vscsi->active_q) && 2668 list_empty(&vscsi->schedule_q) && 2669 (vscsi->flags & WAIT_FOR_IDLE)) { 2670 vscsi->flags &= ~WAIT_FOR_IDLE; 2671 complete(&vscsi->wait_idle); 2672 } 2673 2674 spin_unlock_bh(&vscsi->intr_lock); 2675 return; 2676 } 2677 2678 spin_unlock_bh(&vscsi->intr_lock); 2679 2680 switch (cmd->type) { 2681 case SCSI_CDB: 2682 ibmvscsis_parse_cmd(vscsi, cmd); 2683 break; 2684 case TASK_MANAGEMENT: 2685 ibmvscsis_parse_task(vscsi, cmd); 2686 break; 2687 default: 2688 dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n", 2689 cmd->type); 2690 spin_lock_bh(&vscsi->intr_lock); 2691 ibmvscsis_free_cmd_resources(vscsi, cmd); 2692 spin_unlock_bh(&vscsi->intr_lock); 2693 break; 2694 } 2695 } 2696 2697 static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num) 2698 { 2699 struct ibmvscsis_cmd *cmd; 2700 int i; 2701 2702 INIT_LIST_HEAD(&vscsi->free_cmd); 2703 vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd), 2704 GFP_KERNEL); 2705 if (!vscsi->cmd_pool) 2706 return -ENOMEM; 2707 2708 for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num; 2709 i++, cmd++) { 2710 cmd->adapter = vscsi; 2711 INIT_WORK(&cmd->work, ibmvscsis_scheduler); 2712 list_add_tail(&cmd->list, &vscsi->free_cmd); 2713 } 2714 2715 return 0; 2716 } 2717 2718 static void ibmvscsis_free_cmds(struct scsi_info *vscsi) 2719 { 2720 kfree(vscsi->cmd_pool); 2721 vscsi->cmd_pool = NULL; 2722 INIT_LIST_HEAD(&vscsi->free_cmd); 2723 } 2724 2725 /** 2726 * ibmvscsis_service_wait_q() - Service Waiting Queue 2727 * @timer: Pointer to timer which has expired 2728 * 2729 * This routine is called when the timer pops to service the waiting 2730 * queue. Elements on the queue have completed, their responses have been 2731 * copied to the client, but the client's response queue was full so 2732 * the queue message could not be sent. The routine grabs the proper locks 2733 * and calls send messages. 2734 * 2735 * EXECUTION ENVIRONMENT: 2736 * called at interrupt level 2737 */ 2738 static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer) 2739 { 2740 struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer); 2741 struct scsi_info *vscsi = container_of(p_timer, struct scsi_info, 2742 rsp_q_timer); 2743 2744 spin_lock_bh(&vscsi->intr_lock); 2745 p_timer->timer_pops += 1; 2746 p_timer->started = false; 2747 ibmvscsis_send_messages(vscsi); 2748 spin_unlock_bh(&vscsi->intr_lock); 2749 2750 return HRTIMER_NORESTART; 2751 } 2752 2753 static long ibmvscsis_alloctimer(struct scsi_info *vscsi) 2754 { 2755 struct timer_cb *p_timer; 2756 2757 p_timer = &vscsi->rsp_q_timer; 2758 hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2759 2760 p_timer->timer.function = ibmvscsis_service_wait_q; 2761 p_timer->started = false; 2762 p_timer->timer_pops = 0; 2763 2764 return ADAPT_SUCCESS; 2765 } 2766 2767 static void ibmvscsis_freetimer(struct scsi_info *vscsi) 2768 { 2769 struct timer_cb *p_timer; 2770 2771 p_timer = &vscsi->rsp_q_timer; 2772 2773 (void)hrtimer_cancel(&p_timer->timer); 2774 2775 p_timer->started = false; 2776 p_timer->timer_pops = 0; 2777 } 2778 2779 static irqreturn_t ibmvscsis_interrupt(int dummy, void *data) 2780 { 2781 struct scsi_info *vscsi = data; 2782 2783 vio_disable_interrupts(vscsi->dma_dev); 2784 tasklet_schedule(&vscsi->work_task); 2785 2786 return IRQ_HANDLED; 2787 } 2788 2789 /** 2790 * ibmvscsis_enable_change_state() - Set new state based on enabled status 2791 * @vscsi: Pointer to our adapter structure 2792 * 2793 * This function determines our new state now that we are enabled. This 2794 * may involve sending an Init Complete message to the client. 2795 * 2796 * Must be called with interrupt lock held. 2797 */ 2798 static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) 2799 { 2800 int bytes; 2801 long rc = ADAPT_SUCCESS; 2802 2803 bytes = vscsi->cmd_q.size * PAGE_SIZE; 2804 rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes); 2805 if (rc == H_CLOSED || rc == H_SUCCESS) { 2806 vscsi->state = WAIT_CONNECTION; 2807 rc = ibmvscsis_establish_new_q(vscsi); 2808 } 2809 2810 if (rc != ADAPT_SUCCESS) { 2811 vscsi->state = ERR_DISCONNECTED; 2812 vscsi->flags |= RESPONSE_Q_DOWN; 2813 } 2814 2815 return rc; 2816 } 2817 2818 /** 2819 * ibmvscsis_create_command_q() - Create Command Queue 2820 * @vscsi: Pointer to our adapter structure 2821 * @num_cmds: Currently unused. In the future, may be used to determine 2822 * the size of the CRQ. 2823 * 2824 * Allocates memory for command queue maps remote memory into an ioba 2825 * initializes the command response queue 2826 * 2827 * EXECUTION ENVIRONMENT: 2828 * Process level only 2829 */ 2830 static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) 2831 { 2832 int pages; 2833 struct vio_dev *vdev = vscsi->dma_dev; 2834 2835 /* We might support multiple pages in the future, but just 1 for now */ 2836 pages = 1; 2837 2838 vscsi->cmd_q.size = pages; 2839 2840 vscsi->cmd_q.base_addr = 2841 (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); 2842 if (!vscsi->cmd_q.base_addr) 2843 return -ENOMEM; 2844 2845 vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1; 2846 2847 vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev, 2848 vscsi->cmd_q.base_addr, 2849 PAGE_SIZE, DMA_BIDIRECTIONAL); 2850 if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) { 2851 free_page((unsigned long)vscsi->cmd_q.base_addr); 2852 return -ENOMEM; 2853 } 2854 2855 return 0; 2856 } 2857 2858 /** 2859 * ibmvscsis_destroy_command_q - Destroy Command Queue 2860 * @vscsi: Pointer to our adapter structure 2861 * 2862 * Releases memory for command queue and unmaps mapped remote memory. 2863 * 2864 * EXECUTION ENVIRONMENT: 2865 * Process level only 2866 */ 2867 static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi) 2868 { 2869 dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token, 2870 PAGE_SIZE, DMA_BIDIRECTIONAL); 2871 free_page((unsigned long)vscsi->cmd_q.base_addr); 2872 vscsi->cmd_q.base_addr = NULL; 2873 vscsi->state = NO_QUEUE; 2874 } 2875 2876 static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi, 2877 struct ibmvscsis_cmd *cmd) 2878 { 2879 struct iu_entry *iue = cmd->iue; 2880 struct se_cmd *se_cmd = &cmd->se_cmd; 2881 struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf; 2882 struct scsi_sense_hdr sshdr; 2883 u8 rc = se_cmd->scsi_status; 2884 2885 if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb))) 2886 if (scsi_normalize_sense(se_cmd->sense_buffer, 2887 se_cmd->scsi_sense_length, &sshdr)) 2888 if (sshdr.sense_key == HARDWARE_ERROR && 2889 (se_cmd->residual_count == 0 || 2890 se_cmd->residual_count == se_cmd->data_length)) { 2891 rc = NO_SENSE; 2892 cmd->flags |= CMD_FAST_FAIL; 2893 } 2894 2895 return rc; 2896 } 2897 2898 /** 2899 * srp_build_response() - Build an SRP response buffer 2900 * @vscsi: Pointer to our adapter structure 2901 * @cmd: Pointer to command for which to send the response 2902 * @len_p: Where to return the length of the IU response sent. This 2903 * is needed to construct the CRQ response. 2904 * 2905 * Build the SRP response buffer and copy it to the client's memory space. 2906 */ 2907 static long srp_build_response(struct scsi_info *vscsi, 2908 struct ibmvscsis_cmd *cmd, uint *len_p) 2909 { 2910 struct iu_entry *iue = cmd->iue; 2911 struct se_cmd *se_cmd = &cmd->se_cmd; 2912 struct srp_rsp *rsp; 2913 uint len; 2914 u32 rsp_code; 2915 char *data; 2916 u32 *tsk_status; 2917 long rc = ADAPT_SUCCESS; 2918 2919 spin_lock_bh(&vscsi->intr_lock); 2920 2921 rsp = &vio_iu(iue)->srp.rsp; 2922 len = sizeof(*rsp); 2923 memset(rsp, 0, len); 2924 data = rsp->data; 2925 2926 rsp->opcode = SRP_RSP; 2927 2928 if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING) 2929 rsp->req_lim_delta = cpu_to_be32(vscsi->credit); 2930 else 2931 rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); 2932 rsp->tag = cmd->rsp.tag; 2933 rsp->flags = 0; 2934 2935 if (cmd->type == SCSI_CDB) { 2936 rsp->status = ibmvscsis_fast_fail(vscsi, cmd); 2937 if (rsp->status) { 2938 pr_debug("build_resp: cmd %p, scsi status %d\n", cmd, 2939 (int)rsp->status); 2940 ibmvscsis_determine_resid(se_cmd, rsp); 2941 if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) { 2942 rsp->sense_data_len = 2943 cpu_to_be32(se_cmd->scsi_sense_length); 2944 rsp->flags |= SRP_RSP_FLAG_SNSVALID; 2945 len += se_cmd->scsi_sense_length; 2946 memcpy(data, se_cmd->sense_buffer, 2947 se_cmd->scsi_sense_length); 2948 } 2949 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 2950 UCSOLNT_RESP_SHIFT; 2951 } else if (cmd->flags & CMD_FAST_FAIL) { 2952 pr_debug("build_resp: cmd %p, fast fail\n", cmd); 2953 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 2954 UCSOLNT_RESP_SHIFT; 2955 } else { 2956 rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >> 2957 SCSOLNT_RESP_SHIFT; 2958 } 2959 } else { 2960 /* this is task management */ 2961 rsp->status = 0; 2962 rsp->resp_data_len = cpu_to_be32(4); 2963 rsp->flags |= SRP_RSP_FLAG_RSPVALID; 2964 2965 switch (se_cmd->se_tmr_req->response) { 2966 case TMR_FUNCTION_COMPLETE: 2967 case TMR_TASK_DOES_NOT_EXIST: 2968 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE; 2969 rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >> 2970 SCSOLNT_RESP_SHIFT; 2971 break; 2972 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: 2973 case TMR_LUN_DOES_NOT_EXIST: 2974 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED; 2975 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 2976 UCSOLNT_RESP_SHIFT; 2977 break; 2978 case TMR_FUNCTION_FAILED: 2979 case TMR_FUNCTION_REJECTED: 2980 default: 2981 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED; 2982 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 2983 UCSOLNT_RESP_SHIFT; 2984 break; 2985 } 2986 2987 tsk_status = (u32 *)data; 2988 *tsk_status = cpu_to_be32(rsp_code); 2989 data = (char *)(tsk_status + 1); 2990 len += 4; 2991 } 2992 2993 dma_wmb(); 2994 rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma, 2995 vscsi->dds.window[REMOTE].liobn, 2996 be64_to_cpu(iue->remote_token)); 2997 2998 switch (rc) { 2999 case H_SUCCESS: 3000 vscsi->credit = 0; 3001 *len_p = len; 3002 break; 3003 case H_PERMISSION: 3004 if (connection_broken(vscsi)) 3005 vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED; 3006 3007 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n", 3008 rc, vscsi->flags, vscsi->state); 3009 break; 3010 case H_SOURCE_PARM: 3011 case H_DEST_PARM: 3012 default: 3013 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n", 3014 rc); 3015 break; 3016 } 3017 3018 spin_unlock_bh(&vscsi->intr_lock); 3019 3020 return rc; 3021 } 3022 3023 static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg, 3024 int nsg, struct srp_direct_buf *md, int nmd, 3025 enum dma_data_direction dir, unsigned int bytes) 3026 { 3027 struct iu_entry *iue = cmd->iue; 3028 struct srp_target *target = iue->target; 3029 struct scsi_info *vscsi = target->ldata; 3030 struct scatterlist *sgp; 3031 dma_addr_t client_ioba, server_ioba; 3032 ulong buf_len; 3033 ulong client_len, server_len; 3034 int md_idx; 3035 long tx_len; 3036 long rc = 0; 3037 3038 if (bytes == 0) 3039 return 0; 3040 3041 sgp = sg; 3042 client_len = 0; 3043 server_len = 0; 3044 md_idx = 0; 3045 tx_len = bytes; 3046 3047 do { 3048 if (client_len == 0) { 3049 if (md_idx >= nmd) { 3050 dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n"); 3051 rc = -EIO; 3052 break; 3053 } 3054 client_ioba = be64_to_cpu(md[md_idx].va); 3055 client_len = be32_to_cpu(md[md_idx].len); 3056 } 3057 if (server_len == 0) { 3058 if (!sgp) { 3059 dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n"); 3060 rc = -EIO; 3061 break; 3062 } 3063 server_ioba = sg_dma_address(sgp); 3064 server_len = sg_dma_len(sgp); 3065 } 3066 3067 buf_len = tx_len; 3068 3069 if (buf_len > client_len) 3070 buf_len = client_len; 3071 3072 if (buf_len > server_len) 3073 buf_len = server_len; 3074 3075 if (buf_len > max_vdma_size) 3076 buf_len = max_vdma_size; 3077 3078 if (dir == DMA_TO_DEVICE) { 3079 /* read from client */ 3080 rc = h_copy_rdma(buf_len, 3081 vscsi->dds.window[REMOTE].liobn, 3082 client_ioba, 3083 vscsi->dds.window[LOCAL].liobn, 3084 server_ioba); 3085 } else { 3086 /* The h_copy_rdma will cause phyp, running in another 3087 * partition, to read memory, so we need to make sure 3088 * the data has been written out, hence these syncs. 3089 */ 3090 /* ensure that everything is in memory */ 3091 isync(); 3092 /* ensure that memory has been made visible */ 3093 dma_wmb(); 3094 rc = h_copy_rdma(buf_len, 3095 vscsi->dds.window[LOCAL].liobn, 3096 server_ioba, 3097 vscsi->dds.window[REMOTE].liobn, 3098 client_ioba); 3099 } 3100 switch (rc) { 3101 case H_SUCCESS: 3102 break; 3103 case H_PERMISSION: 3104 case H_SOURCE_PARM: 3105 case H_DEST_PARM: 3106 if (connection_broken(vscsi)) { 3107 spin_lock_bh(&vscsi->intr_lock); 3108 vscsi->flags |= 3109 (RESPONSE_Q_DOWN | CLIENT_FAILED); 3110 spin_unlock_bh(&vscsi->intr_lock); 3111 } 3112 dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n", 3113 rc); 3114 break; 3115 3116 default: 3117 dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n", 3118 rc); 3119 break; 3120 } 3121 3122 if (!rc) { 3123 tx_len -= buf_len; 3124 if (tx_len) { 3125 client_len -= buf_len; 3126 if (client_len == 0) 3127 md_idx++; 3128 else 3129 client_ioba += buf_len; 3130 3131 server_len -= buf_len; 3132 if (server_len == 0) 3133 sgp = sg_next(sgp); 3134 else 3135 server_ioba += buf_len; 3136 } else { 3137 break; 3138 } 3139 } 3140 } while (!rc); 3141 3142 return rc; 3143 } 3144 3145 /** 3146 * ibmvscsis_handle_crq() - Handle CRQ 3147 * @data: Pointer to our adapter structure 3148 * 3149 * Read the command elements from the command queue and copy the payloads 3150 * associated with the command elements to local memory and execute the 3151 * SRP requests. 3152 * 3153 * Note: this is an edge triggered interrupt. It can not be shared. 3154 */ 3155 static void ibmvscsis_handle_crq(unsigned long data) 3156 { 3157 struct scsi_info *vscsi = (struct scsi_info *)data; 3158 struct viosrp_crq *crq; 3159 long rc; 3160 bool ack = true; 3161 volatile u8 valid; 3162 3163 spin_lock_bh(&vscsi->intr_lock); 3164 3165 pr_debug("got interrupt\n"); 3166 3167 /* 3168 * if we are in a path where we are waiting for all pending commands 3169 * to complete because we received a transport event and anything in 3170 * the command queue is for a new connection, do nothing 3171 */ 3172 if (TARGET_STOP(vscsi)) { 3173 vio_enable_interrupts(vscsi->dma_dev); 3174 3175 pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n", 3176 vscsi->flags, vscsi->state); 3177 spin_unlock_bh(&vscsi->intr_lock); 3178 return; 3179 } 3180 3181 rc = vscsi->flags & SCHEDULE_DISCONNECT; 3182 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 3183 valid = crq->valid; 3184 dma_rmb(); 3185 3186 while (valid) { 3187 /* 3188 * These are edege triggered interrupts. After dropping out of 3189 * the while loop, the code must check for work since an 3190 * interrupt could be lost, and an elment be left on the queue, 3191 * hence the label. 3192 */ 3193 cmd_work: 3194 vscsi->cmd_q.index = 3195 (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask; 3196 3197 if (!rc) { 3198 rc = ibmvscsis_parse_command(vscsi, crq); 3199 } else { 3200 if ((uint)crq->valid == VALID_TRANS_EVENT) { 3201 /* 3202 * must service the transport layer events even 3203 * in an error state, dont break out until all 3204 * the consecutive transport events have been 3205 * processed 3206 */ 3207 rc = ibmvscsis_trans_event(vscsi, crq); 3208 } else if (vscsi->flags & TRANS_EVENT) { 3209 /* 3210 * if a transport event has occurred leave 3211 * everything but transport events on the queue 3212 * 3213 * need to decrement the queue index so we can 3214 * look at the element again 3215 */ 3216 if (vscsi->cmd_q.index) 3217 vscsi->cmd_q.index -= 1; 3218 else 3219 /* 3220 * index is at 0 it just wrapped. 3221 * have it index last element in q 3222 */ 3223 vscsi->cmd_q.index = vscsi->cmd_q.mask; 3224 break; 3225 } 3226 } 3227 3228 crq->valid = INVALIDATE_CMD_RESP_EL; 3229 3230 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 3231 valid = crq->valid; 3232 dma_rmb(); 3233 } 3234 3235 if (!rc) { 3236 if (ack) { 3237 vio_enable_interrupts(vscsi->dma_dev); 3238 ack = false; 3239 pr_debug("handle_crq, reenabling interrupts\n"); 3240 } 3241 valid = crq->valid; 3242 dma_rmb(); 3243 if (valid) 3244 goto cmd_work; 3245 } else { 3246 pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n", 3247 vscsi->flags, vscsi->state, vscsi->cmd_q.index); 3248 } 3249 3250 pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n", 3251 (int)list_empty(&vscsi->schedule_q), vscsi->flags, 3252 vscsi->state); 3253 3254 spin_unlock_bh(&vscsi->intr_lock); 3255 } 3256 3257 static int ibmvscsis_probe(struct vio_dev *vdev, 3258 const struct vio_device_id *id) 3259 { 3260 struct scsi_info *vscsi; 3261 int rc = 0; 3262 long hrc = 0; 3263 char wq_name[24]; 3264 3265 vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL); 3266 if (!vscsi) { 3267 rc = -ENOMEM; 3268 pr_err("probe: allocation of adapter failed\n"); 3269 return rc; 3270 } 3271 3272 vscsi->dma_dev = vdev; 3273 vscsi->dev = vdev->dev; 3274 INIT_LIST_HEAD(&vscsi->schedule_q); 3275 INIT_LIST_HEAD(&vscsi->waiting_rsp); 3276 INIT_LIST_HEAD(&vscsi->active_q); 3277 3278 snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s", 3279 dev_name(&vdev->dev)); 3280 3281 pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name); 3282 3283 rc = read_dma_window(vscsi); 3284 if (rc) 3285 goto free_adapter; 3286 pr_debug("Probe: liobn 0x%x, riobn 0x%x\n", 3287 vscsi->dds.window[LOCAL].liobn, 3288 vscsi->dds.window[REMOTE].liobn); 3289 3290 strcpy(vscsi->eye, "VSCSI "); 3291 strncat(vscsi->eye, vdev->name, MAX_EYE); 3292 3293 vscsi->dds.unit_id = vdev->unit_address; 3294 strncpy(vscsi->dds.partition_name, partition_name, 3295 sizeof(vscsi->dds.partition_name)); 3296 vscsi->dds.partition_num = partition_number; 3297 3298 spin_lock_bh(&ibmvscsis_dev_lock); 3299 list_add_tail(&vscsi->list, &ibmvscsis_dev_list); 3300 spin_unlock_bh(&ibmvscsis_dev_lock); 3301 3302 /* 3303 * TBD: How do we determine # of cmds to request? Do we know how 3304 * many "children" we have? 3305 */ 3306 vscsi->request_limit = INITIAL_SRP_LIMIT; 3307 rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit, 3308 SRP_MAX_IU_LEN); 3309 if (rc) 3310 goto rem_list; 3311 3312 vscsi->target.ldata = vscsi; 3313 3314 rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit); 3315 if (rc) { 3316 dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n", 3317 rc, vscsi->request_limit); 3318 goto free_target; 3319 } 3320 3321 /* 3322 * Note: the lock is used in freeing timers, so must initialize 3323 * first so that ordering in case of error is correct. 3324 */ 3325 spin_lock_init(&vscsi->intr_lock); 3326 3327 rc = ibmvscsis_alloctimer(vscsi); 3328 if (rc) { 3329 dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc); 3330 goto free_cmds; 3331 } 3332 3333 rc = ibmvscsis_create_command_q(vscsi, 256); 3334 if (rc) { 3335 dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n", 3336 rc); 3337 goto free_timer; 3338 } 3339 3340 vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 3341 if (!vscsi->map_buf) { 3342 rc = -ENOMEM; 3343 dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n"); 3344 goto destroy_queue; 3345 } 3346 3347 vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE, 3348 DMA_BIDIRECTIONAL); 3349 if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) { 3350 rc = -ENOMEM; 3351 dev_err(&vscsi->dev, "probe: error mapping command buffer\n"); 3352 goto free_buf; 3353 } 3354 3355 hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, 3356 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, 3357 0); 3358 if (hrc == H_SUCCESS) 3359 vscsi->client_data.partition_number = 3360 be64_to_cpu(*(u64 *)vscsi->map_buf); 3361 /* 3362 * We expect the VIOCTL to fail if we're configured as "any 3363 * client can connect" and the client isn't activated yet. 3364 * We'll make the call again when he sends an init msg. 3365 */ 3366 pr_debug("probe hrc %ld, client partition num %d\n", 3367 hrc, vscsi->client_data.partition_number); 3368 3369 tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq, 3370 (unsigned long)vscsi); 3371 3372 init_completion(&vscsi->wait_idle); 3373 init_completion(&vscsi->unconfig); 3374 3375 snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev)); 3376 vscsi->work_q = create_workqueue(wq_name); 3377 if (!vscsi->work_q) { 3378 rc = -ENOMEM; 3379 dev_err(&vscsi->dev, "create_workqueue failed\n"); 3380 goto unmap_buf; 3381 } 3382 3383 rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi); 3384 if (rc) { 3385 rc = -EPERM; 3386 dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc); 3387 goto destroy_WQ; 3388 } 3389 3390 vscsi->state = WAIT_ENABLED; 3391 3392 dev_set_drvdata(&vdev->dev, vscsi); 3393 3394 return 0; 3395 3396 destroy_WQ: 3397 destroy_workqueue(vscsi->work_q); 3398 unmap_buf: 3399 dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE, 3400 DMA_BIDIRECTIONAL); 3401 free_buf: 3402 kfree(vscsi->map_buf); 3403 destroy_queue: 3404 tasklet_kill(&vscsi->work_task); 3405 ibmvscsis_unregister_command_q(vscsi); 3406 ibmvscsis_destroy_command_q(vscsi); 3407 free_timer: 3408 ibmvscsis_freetimer(vscsi); 3409 free_cmds: 3410 ibmvscsis_free_cmds(vscsi); 3411 free_target: 3412 srp_target_free(&vscsi->target); 3413 rem_list: 3414 spin_lock_bh(&ibmvscsis_dev_lock); 3415 list_del(&vscsi->list); 3416 spin_unlock_bh(&ibmvscsis_dev_lock); 3417 free_adapter: 3418 kfree(vscsi); 3419 3420 return rc; 3421 } 3422 3423 static int ibmvscsis_remove(struct vio_dev *vdev) 3424 { 3425 struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev); 3426 3427 pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev)); 3428 3429 spin_lock_bh(&vscsi->intr_lock); 3430 ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0); 3431 vscsi->flags |= CFG_SLEEPING; 3432 spin_unlock_bh(&vscsi->intr_lock); 3433 wait_for_completion(&vscsi->unconfig); 3434 3435 vio_disable_interrupts(vdev); 3436 free_irq(vdev->irq, vscsi); 3437 destroy_workqueue(vscsi->work_q); 3438 dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE, 3439 DMA_BIDIRECTIONAL); 3440 kfree(vscsi->map_buf); 3441 tasklet_kill(&vscsi->work_task); 3442 ibmvscsis_destroy_command_q(vscsi); 3443 ibmvscsis_freetimer(vscsi); 3444 ibmvscsis_free_cmds(vscsi); 3445 srp_target_free(&vscsi->target); 3446 spin_lock_bh(&ibmvscsis_dev_lock); 3447 list_del(&vscsi->list); 3448 spin_unlock_bh(&ibmvscsis_dev_lock); 3449 kfree(vscsi); 3450 3451 return 0; 3452 } 3453 3454 static ssize_t system_id_show(struct device *dev, 3455 struct device_attribute *attr, char *buf) 3456 { 3457 return snprintf(buf, PAGE_SIZE, "%s\n", system_id); 3458 } 3459 3460 static ssize_t partition_number_show(struct device *dev, 3461 struct device_attribute *attr, char *buf) 3462 { 3463 return snprintf(buf, PAGE_SIZE, "%x\n", partition_number); 3464 } 3465 3466 static ssize_t unit_address_show(struct device *dev, 3467 struct device_attribute *attr, char *buf) 3468 { 3469 struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev); 3470 3471 return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address); 3472 } 3473 3474 static int ibmvscsis_get_system_info(void) 3475 { 3476 struct device_node *rootdn, *vdevdn; 3477 const char *id, *model, *name; 3478 const uint *num; 3479 3480 rootdn = of_find_node_by_path("/"); 3481 if (!rootdn) 3482 return -ENOENT; 3483 3484 model = of_get_property(rootdn, "model", NULL); 3485 id = of_get_property(rootdn, "system-id", NULL); 3486 if (model && id) 3487 snprintf(system_id, sizeof(system_id), "%s-%s", model, id); 3488 3489 name = of_get_property(rootdn, "ibm,partition-name", NULL); 3490 if (name) 3491 strncpy(partition_name, name, sizeof(partition_name)); 3492 3493 num = of_get_property(rootdn, "ibm,partition-no", NULL); 3494 if (num) 3495 partition_number = of_read_number(num, 1); 3496 3497 of_node_put(rootdn); 3498 3499 vdevdn = of_find_node_by_path("/vdevice"); 3500 if (vdevdn) { 3501 const uint *mvds; 3502 3503 mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size", 3504 NULL); 3505 if (mvds) 3506 max_vdma_size = *mvds; 3507 of_node_put(vdevdn); 3508 } 3509 3510 return 0; 3511 } 3512 3513 static char *ibmvscsis_get_fabric_name(void) 3514 { 3515 return "ibmvscsis"; 3516 } 3517 3518 static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg) 3519 { 3520 struct ibmvscsis_tport *tport = 3521 container_of(se_tpg, struct ibmvscsis_tport, se_tpg); 3522 3523 return tport->tport_name; 3524 } 3525 3526 static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg) 3527 { 3528 struct ibmvscsis_tport *tport = 3529 container_of(se_tpg, struct ibmvscsis_tport, se_tpg); 3530 3531 return tport->tport_tpgt; 3532 } 3533 3534 static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg) 3535 { 3536 return 1; 3537 } 3538 3539 static int ibmvscsis_check_true(struct se_portal_group *se_tpg) 3540 { 3541 return 1; 3542 } 3543 3544 static int ibmvscsis_check_false(struct se_portal_group *se_tpg) 3545 { 3546 return 0; 3547 } 3548 3549 static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg) 3550 { 3551 return 1; 3552 } 3553 3554 static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd) 3555 { 3556 return target_put_sess_cmd(se_cmd); 3557 } 3558 3559 static void ibmvscsis_release_cmd(struct se_cmd *se_cmd) 3560 { 3561 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3562 se_cmd); 3563 struct scsi_info *vscsi = cmd->adapter; 3564 3565 spin_lock_bh(&vscsi->intr_lock); 3566 /* Remove from active_q */ 3567 list_move_tail(&cmd->list, &vscsi->waiting_rsp); 3568 ibmvscsis_send_messages(vscsi); 3569 spin_unlock_bh(&vscsi->intr_lock); 3570 } 3571 3572 static u32 ibmvscsis_sess_get_index(struct se_session *se_sess) 3573 { 3574 return 0; 3575 } 3576 3577 static int ibmvscsis_write_pending(struct se_cmd *se_cmd) 3578 { 3579 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3580 se_cmd); 3581 struct iu_entry *iue = cmd->iue; 3582 int rc; 3583 3584 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 3585 1, 1); 3586 if (rc) { 3587 pr_err("srp_transfer_data() failed: %d\n", rc); 3588 return -EIO; 3589 } 3590 /* 3591 * We now tell TCM to add this WRITE CDB directly into the TCM storage 3592 * object execution queue. 3593 */ 3594 target_execute_cmd(se_cmd); 3595 return 0; 3596 } 3597 3598 static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd) 3599 { 3600 return 0; 3601 } 3602 3603 static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl) 3604 { 3605 } 3606 3607 static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd) 3608 { 3609 return 0; 3610 } 3611 3612 static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd) 3613 { 3614 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3615 se_cmd); 3616 struct iu_entry *iue = cmd->iue; 3617 struct scsi_info *vscsi = cmd->adapter; 3618 char *sd; 3619 uint len = 0; 3620 int rc; 3621 3622 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1, 3623 1); 3624 if (rc) { 3625 pr_err("srp_transfer_data failed: %d\n", rc); 3626 sd = se_cmd->sense_buffer; 3627 se_cmd->scsi_sense_length = 18; 3628 memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length); 3629 /* Logical Unit Communication Time-out asc/ascq = 0x0801 */ 3630 scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR, 3631 0x08, 0x01); 3632 } 3633 3634 srp_build_response(vscsi, cmd, &len); 3635 cmd->rsp.format = SRP_FORMAT; 3636 cmd->rsp.len = len; 3637 3638 return 0; 3639 } 3640 3641 static int ibmvscsis_queue_status(struct se_cmd *se_cmd) 3642 { 3643 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3644 se_cmd); 3645 struct scsi_info *vscsi = cmd->adapter; 3646 uint len; 3647 3648 pr_debug("queue_status %p\n", se_cmd); 3649 3650 srp_build_response(vscsi, cmd, &len); 3651 cmd->rsp.format = SRP_FORMAT; 3652 cmd->rsp.len = len; 3653 3654 return 0; 3655 } 3656 3657 static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd) 3658 { 3659 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3660 se_cmd); 3661 struct scsi_info *vscsi = cmd->adapter; 3662 uint len; 3663 3664 pr_debug("queue_tm_rsp %p, status %d\n", 3665 se_cmd, (int)se_cmd->se_tmr_req->response); 3666 3667 srp_build_response(vscsi, cmd, &len); 3668 cmd->rsp.format = SRP_FORMAT; 3669 cmd->rsp.len = len; 3670 } 3671 3672 static void ibmvscsis_aborted_task(struct se_cmd *se_cmd) 3673 { 3674 /* TBD: What (if anything) should we do here? */ 3675 pr_debug("ibmvscsis_aborted_task %p\n", se_cmd); 3676 } 3677 3678 static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf, 3679 struct config_group *group, 3680 const char *name) 3681 { 3682 struct ibmvscsis_tport *tport; 3683 3684 tport = ibmvscsis_lookup_port(name); 3685 if (tport) { 3686 tport->tport_proto_id = SCSI_PROTOCOL_SRP; 3687 pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n", 3688 name, tport, tport->tport_proto_id); 3689 return &tport->tport_wwn; 3690 } 3691 3692 return ERR_PTR(-EINVAL); 3693 } 3694 3695 static void ibmvscsis_drop_tport(struct se_wwn *wwn) 3696 { 3697 struct ibmvscsis_tport *tport = container_of(wwn, 3698 struct ibmvscsis_tport, 3699 tport_wwn); 3700 3701 pr_debug("drop_tport(%s)\n", 3702 config_item_name(&tport->tport_wwn.wwn_group.cg_item)); 3703 } 3704 3705 static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn, 3706 struct config_group *group, 3707 const char *name) 3708 { 3709 struct ibmvscsis_tport *tport = 3710 container_of(wwn, struct ibmvscsis_tport, tport_wwn); 3711 int rc; 3712 3713 tport->releasing = false; 3714 3715 rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg, 3716 tport->tport_proto_id); 3717 if (rc) 3718 return ERR_PTR(rc); 3719 3720 return &tport->se_tpg; 3721 } 3722 3723 static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg) 3724 { 3725 struct ibmvscsis_tport *tport = container_of(se_tpg, 3726 struct ibmvscsis_tport, 3727 se_tpg); 3728 3729 tport->releasing = true; 3730 tport->enabled = false; 3731 3732 /* 3733 * Release the virtual I_T Nexus for this ibmvscsis TPG 3734 */ 3735 ibmvscsis_drop_nexus(tport); 3736 /* 3737 * Deregister the se_tpg from TCM.. 3738 */ 3739 core_tpg_deregister(se_tpg); 3740 } 3741 3742 static ssize_t ibmvscsis_wwn_version_show(struct config_item *item, 3743 char *page) 3744 { 3745 return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION); 3746 } 3747 CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version); 3748 3749 static struct configfs_attribute *ibmvscsis_wwn_attrs[] = { 3750 &ibmvscsis_wwn_attr_version, 3751 NULL, 3752 }; 3753 3754 static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item, 3755 char *page) 3756 { 3757 struct se_portal_group *se_tpg = to_tpg(item); 3758 struct ibmvscsis_tport *tport = container_of(se_tpg, 3759 struct ibmvscsis_tport, 3760 se_tpg); 3761 3762 return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0); 3763 } 3764 3765 static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item, 3766 const char *page, size_t count) 3767 { 3768 struct se_portal_group *se_tpg = to_tpg(item); 3769 struct ibmvscsis_tport *tport = container_of(se_tpg, 3770 struct ibmvscsis_tport, 3771 se_tpg); 3772 struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport); 3773 unsigned long tmp; 3774 int rc; 3775 long lrc; 3776 3777 rc = kstrtoul(page, 0, &tmp); 3778 if (rc < 0) { 3779 pr_err("Unable to extract srpt_tpg_store_enable\n"); 3780 return -EINVAL; 3781 } 3782 3783 if ((tmp != 0) && (tmp != 1)) { 3784 pr_err("Illegal value for srpt_tpg_store_enable\n"); 3785 return -EINVAL; 3786 } 3787 3788 if (tmp) { 3789 spin_lock_bh(&vscsi->intr_lock); 3790 tport->enabled = true; 3791 lrc = ibmvscsis_enable_change_state(vscsi); 3792 if (lrc) 3793 pr_err("enable_change_state failed, rc %ld state %d\n", 3794 lrc, vscsi->state); 3795 spin_unlock_bh(&vscsi->intr_lock); 3796 } else { 3797 spin_lock_bh(&vscsi->intr_lock); 3798 tport->enabled = false; 3799 /* This simulates the server going down */ 3800 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); 3801 spin_unlock_bh(&vscsi->intr_lock); 3802 } 3803 3804 pr_debug("tpg_enable_store, tmp %ld, state %d\n", tmp, vscsi->state); 3805 3806 return count; 3807 } 3808 CONFIGFS_ATTR(ibmvscsis_tpg_, enable); 3809 3810 static struct configfs_attribute *ibmvscsis_tpg_attrs[] = { 3811 &ibmvscsis_tpg_attr_enable, 3812 NULL, 3813 }; 3814 3815 static const struct target_core_fabric_ops ibmvscsis_ops = { 3816 .module = THIS_MODULE, 3817 .name = "ibmvscsis", 3818 .get_fabric_name = ibmvscsis_get_fabric_name, 3819 .tpg_get_wwn = ibmvscsis_get_fabric_wwn, 3820 .tpg_get_tag = ibmvscsis_get_tag, 3821 .tpg_get_default_depth = ibmvscsis_get_default_depth, 3822 .tpg_check_demo_mode = ibmvscsis_check_true, 3823 .tpg_check_demo_mode_cache = ibmvscsis_check_true, 3824 .tpg_check_demo_mode_write_protect = ibmvscsis_check_false, 3825 .tpg_check_prod_mode_write_protect = ibmvscsis_check_false, 3826 .tpg_get_inst_index = ibmvscsis_tpg_get_inst_index, 3827 .check_stop_free = ibmvscsis_check_stop_free, 3828 .release_cmd = ibmvscsis_release_cmd, 3829 .sess_get_index = ibmvscsis_sess_get_index, 3830 .write_pending = ibmvscsis_write_pending, 3831 .write_pending_status = ibmvscsis_write_pending_status, 3832 .set_default_node_attributes = ibmvscsis_set_default_node_attrs, 3833 .get_cmd_state = ibmvscsis_get_cmd_state, 3834 .queue_data_in = ibmvscsis_queue_data_in, 3835 .queue_status = ibmvscsis_queue_status, 3836 .queue_tm_rsp = ibmvscsis_queue_tm_rsp, 3837 .aborted_task = ibmvscsis_aborted_task, 3838 /* 3839 * Setup function pointers for logic in target_core_fabric_configfs.c 3840 */ 3841 .fabric_make_wwn = ibmvscsis_make_tport, 3842 .fabric_drop_wwn = ibmvscsis_drop_tport, 3843 .fabric_make_tpg = ibmvscsis_make_tpg, 3844 .fabric_drop_tpg = ibmvscsis_drop_tpg, 3845 3846 .tfc_wwn_attrs = ibmvscsis_wwn_attrs, 3847 .tfc_tpg_base_attrs = ibmvscsis_tpg_attrs, 3848 }; 3849 3850 static void ibmvscsis_dev_release(struct device *dev) {}; 3851 3852 static struct class_attribute ibmvscsis_class_attrs[] = { 3853 __ATTR_NULL, 3854 }; 3855 3856 static struct device_attribute dev_attr_system_id = 3857 __ATTR(system_id, S_IRUGO, system_id_show, NULL); 3858 3859 static struct device_attribute dev_attr_partition_number = 3860 __ATTR(partition_number, S_IRUGO, partition_number_show, NULL); 3861 3862 static struct device_attribute dev_attr_unit_address = 3863 __ATTR(unit_address, S_IRUGO, unit_address_show, NULL); 3864 3865 static struct attribute *ibmvscsis_dev_attrs[] = { 3866 &dev_attr_system_id.attr, 3867 &dev_attr_partition_number.attr, 3868 &dev_attr_unit_address.attr, 3869 }; 3870 ATTRIBUTE_GROUPS(ibmvscsis_dev); 3871 3872 static struct class ibmvscsis_class = { 3873 .name = "ibmvscsis", 3874 .dev_release = ibmvscsis_dev_release, 3875 .class_attrs = ibmvscsis_class_attrs, 3876 .dev_groups = ibmvscsis_dev_groups, 3877 }; 3878 3879 static struct vio_device_id ibmvscsis_device_table[] = { 3880 { "v-scsi-host", "IBM,v-scsi-host" }, 3881 { "", "" } 3882 }; 3883 MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table); 3884 3885 static struct vio_driver ibmvscsis_driver = { 3886 .name = "ibmvscsis", 3887 .id_table = ibmvscsis_device_table, 3888 .probe = ibmvscsis_probe, 3889 .remove = ibmvscsis_remove, 3890 }; 3891 3892 /* 3893 * ibmvscsis_init() - Kernel Module initialization 3894 * 3895 * Note: vio_register_driver() registers callback functions, and at least one 3896 * of those callback functions calls TCM - Linux IO Target Subsystem, thus 3897 * the SCSI Target template must be registered before vio_register_driver() 3898 * is called. 3899 */ 3900 static int __init ibmvscsis_init(void) 3901 { 3902 int rc = 0; 3903 3904 rc = ibmvscsis_get_system_info(); 3905 if (rc) { 3906 pr_err("rc %d from get_system_info\n", rc); 3907 goto out; 3908 } 3909 3910 rc = class_register(&ibmvscsis_class); 3911 if (rc) { 3912 pr_err("failed class register\n"); 3913 goto out; 3914 } 3915 3916 rc = target_register_template(&ibmvscsis_ops); 3917 if (rc) { 3918 pr_err("rc %d from target_register_template\n", rc); 3919 goto unregister_class; 3920 } 3921 3922 rc = vio_register_driver(&ibmvscsis_driver); 3923 if (rc) { 3924 pr_err("rc %d from vio_register_driver\n", rc); 3925 goto unregister_target; 3926 } 3927 3928 return 0; 3929 3930 unregister_target: 3931 target_unregister_template(&ibmvscsis_ops); 3932 unregister_class: 3933 class_unregister(&ibmvscsis_class); 3934 out: 3935 return rc; 3936 } 3937 3938 static void __exit ibmvscsis_exit(void) 3939 { 3940 pr_info("Unregister IBM virtual SCSI host driver\n"); 3941 vio_unregister_driver(&ibmvscsis_driver); 3942 target_unregister_template(&ibmvscsis_ops); 3943 class_unregister(&ibmvscsis_class); 3944 } 3945 3946 MODULE_DESCRIPTION("IBMVSCSIS fabric driver"); 3947 MODULE_AUTHOR("Bryant G. Ly and Michael Cyr"); 3948 MODULE_LICENSE("GPL"); 3949 MODULE_VERSION(IBMVSCSIS_VERSION); 3950 module_init(ibmvscsis_init); 3951 module_exit(ibmvscsis_exit); 3952