1 /******************************************************************************* 2 * IBM Virtual SCSI Target Driver 3 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp. 4 * Santiago Leon (santil@us.ibm.com) IBM Corp. 5 * Linda Xie (lxie@us.ibm.com) IBM Corp. 6 * 7 * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org> 8 * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com> 11 * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com> 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 ****************************************************************************/ 24 25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 26 27 #include <linux/module.h> 28 #include <linux/kernel.h> 29 #include <linux/slab.h> 30 #include <linux/types.h> 31 #include <linux/list.h> 32 #include <linux/string.h> 33 #include <linux/delay.h> 34 35 #include <target/target_core_base.h> 36 #include <target/target_core_fabric.h> 37 38 #include <asm/hvcall.h> 39 #include <asm/vio.h> 40 41 #include <scsi/viosrp.h> 42 43 #include "ibmvscsi_tgt.h" 44 45 #define IBMVSCSIS_VERSION "v0.2" 46 47 #define INITIAL_SRP_LIMIT 800 48 #define DEFAULT_MAX_SECTORS 256 49 #define MAX_TXU 1024 * 1024 50 51 static uint max_vdma_size = MAX_H_COPY_RDMA; 52 53 static char system_id[SYS_ID_NAME_LEN] = ""; 54 static char partition_name[PARTITION_NAMELEN] = "UNKNOWN"; 55 static uint partition_number = -1; 56 57 /* Adapter list and lock to control it */ 58 static DEFINE_SPINLOCK(ibmvscsis_dev_lock); 59 static LIST_HEAD(ibmvscsis_dev_list); 60 61 static long ibmvscsis_parse_command(struct scsi_info *vscsi, 62 struct viosrp_crq *crq); 63 64 static void ibmvscsis_adapter_idle(struct scsi_info *vscsi); 65 66 static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, 67 struct srp_rsp *rsp) 68 { 69 u32 residual_count = se_cmd->residual_count; 70 71 if (!residual_count) 72 return; 73 74 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 75 if (se_cmd->data_direction == DMA_TO_DEVICE) { 76 /* residual data from an underflow write */ 77 rsp->flags = SRP_RSP_FLAG_DOUNDER; 78 rsp->data_out_res_cnt = cpu_to_be32(residual_count); 79 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 80 /* residual data from an underflow read */ 81 rsp->flags = SRP_RSP_FLAG_DIUNDER; 82 rsp->data_in_res_cnt = cpu_to_be32(residual_count); 83 } 84 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 85 if (se_cmd->data_direction == DMA_TO_DEVICE) { 86 /* residual data from an overflow write */ 87 rsp->flags = SRP_RSP_FLAG_DOOVER; 88 rsp->data_out_res_cnt = cpu_to_be32(residual_count); 89 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 90 /* residual data from an overflow read */ 91 rsp->flags = SRP_RSP_FLAG_DIOVER; 92 rsp->data_in_res_cnt = cpu_to_be32(residual_count); 93 } 94 } 95 } 96 97 /** 98 * connection_broken() - Determine if the connection to the client is good 99 * @vscsi: Pointer to our adapter structure 100 * 101 * This function attempts to send a ping MAD to the client. If the call to 102 * queue the request returns H_CLOSED then the connection has been broken 103 * and the function returns TRUE. 104 * 105 * EXECUTION ENVIRONMENT: 106 * Interrupt or Process environment 107 */ 108 static bool connection_broken(struct scsi_info *vscsi) 109 { 110 struct viosrp_crq *crq; 111 u64 buffer[2] = { 0, 0 }; 112 long h_return_code; 113 bool rc = false; 114 115 /* create a PING crq */ 116 crq = (struct viosrp_crq *)&buffer; 117 crq->valid = VALID_CMD_RESP_EL; 118 crq->format = MESSAGE_IN_CRQ; 119 crq->status = PING; 120 121 h_return_code = h_send_crq(vscsi->dds.unit_id, 122 cpu_to_be64(buffer[MSG_HI]), 123 cpu_to_be64(buffer[MSG_LOW])); 124 125 dev_dbg(&vscsi->dev, "Connection_broken: rc %ld\n", h_return_code); 126 127 if (h_return_code == H_CLOSED) 128 rc = true; 129 130 return rc; 131 } 132 133 /** 134 * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue 135 * @vscsi: Pointer to our adapter structure 136 * 137 * This function calls h_free_q then frees the interrupt bit etc. 138 * It must release the lock before doing so because of the time it can take 139 * for h_free_crq in PHYP 140 * NOTE: the caller must make sure that state and or flags will prevent 141 * interrupt handler from scheduling work. 142 * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag 143 * we can't do it here, because we don't have the lock 144 * 145 * EXECUTION ENVIRONMENT: 146 * Process level 147 */ 148 static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi) 149 { 150 long qrc; 151 long rc = ADAPT_SUCCESS; 152 int ticks = 0; 153 154 do { 155 qrc = h_free_crq(vscsi->dds.unit_id); 156 switch (qrc) { 157 case H_SUCCESS: 158 spin_lock_bh(&vscsi->intr_lock); 159 vscsi->flags &= ~PREP_FOR_SUSPEND_FLAGS; 160 spin_unlock_bh(&vscsi->intr_lock); 161 break; 162 163 case H_HARDWARE: 164 case H_PARAMETER: 165 dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n", 166 qrc); 167 rc = ERROR; 168 break; 169 170 case H_BUSY: 171 case H_LONG_BUSY_ORDER_1_MSEC: 172 /* msleep not good for small values */ 173 usleep_range(1000, 2000); 174 ticks += 1; 175 break; 176 case H_LONG_BUSY_ORDER_10_MSEC: 177 usleep_range(10000, 20000); 178 ticks += 10; 179 break; 180 case H_LONG_BUSY_ORDER_100_MSEC: 181 msleep(100); 182 ticks += 100; 183 break; 184 case H_LONG_BUSY_ORDER_1_SEC: 185 ssleep(1); 186 ticks += 1000; 187 break; 188 case H_LONG_BUSY_ORDER_10_SEC: 189 ssleep(10); 190 ticks += 10000; 191 break; 192 case H_LONG_BUSY_ORDER_100_SEC: 193 ssleep(100); 194 ticks += 100000; 195 break; 196 default: 197 dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n", 198 qrc); 199 rc = ERROR; 200 break; 201 } 202 203 /* 204 * dont wait more then 300 seconds 205 * ticks are in milliseconds more or less 206 */ 207 if (ticks > 300000 && qrc != H_SUCCESS) { 208 rc = ERROR; 209 dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n"); 210 } 211 } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS); 212 213 dev_dbg(&vscsi->dev, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc); 214 215 return rc; 216 } 217 218 /** 219 * ibmvscsis_delete_client_info() - Helper function to Delete Client Info 220 * @vscsi: Pointer to our adapter structure 221 * @client_closed: True if client closed its queue 222 * 223 * Deletes information specific to the client when the client goes away 224 * 225 * EXECUTION ENVIRONMENT: 226 * Interrupt or Process 227 */ 228 static void ibmvscsis_delete_client_info(struct scsi_info *vscsi, 229 bool client_closed) 230 { 231 vscsi->client_cap = 0; 232 233 /* 234 * Some things we don't want to clear if we're closing the queue, 235 * because some clients don't resend the host handshake when they 236 * get a transport event. 237 */ 238 if (client_closed) 239 vscsi->client_data.os_type = 0; 240 } 241 242 /** 243 * ibmvscsis_free_command_q() - Free Command Queue 244 * @vscsi: Pointer to our adapter structure 245 * 246 * This function calls unregister_command_q, then clears interrupts and 247 * any pending interrupt acknowledgments associated with the command q. 248 * It also clears memory if there is no error. 249 * 250 * PHYP did not meet the PAPR architecture so that we must give up the 251 * lock. This causes a timing hole regarding state change. To close the 252 * hole this routine does accounting on any change that occurred during 253 * the time the lock is not held. 254 * NOTE: must give up and then acquire the interrupt lock, the caller must 255 * make sure that state and or flags will prevent interrupt handler from 256 * scheduling work. 257 * 258 * EXECUTION ENVIRONMENT: 259 * Process level, interrupt lock is held 260 */ 261 static long ibmvscsis_free_command_q(struct scsi_info *vscsi) 262 { 263 int bytes; 264 u32 flags_under_lock; 265 u16 state_under_lock; 266 long rc = ADAPT_SUCCESS; 267 268 if (!(vscsi->flags & CRQ_CLOSED)) { 269 vio_disable_interrupts(vscsi->dma_dev); 270 271 state_under_lock = vscsi->new_state; 272 flags_under_lock = vscsi->flags; 273 vscsi->phyp_acr_state = 0; 274 vscsi->phyp_acr_flags = 0; 275 276 spin_unlock_bh(&vscsi->intr_lock); 277 rc = ibmvscsis_unregister_command_q(vscsi); 278 spin_lock_bh(&vscsi->intr_lock); 279 280 if (state_under_lock != vscsi->new_state) 281 vscsi->phyp_acr_state = vscsi->new_state; 282 283 vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags); 284 285 if (rc == ADAPT_SUCCESS) { 286 bytes = vscsi->cmd_q.size * PAGE_SIZE; 287 memset(vscsi->cmd_q.base_addr, 0, bytes); 288 vscsi->cmd_q.index = 0; 289 vscsi->flags |= CRQ_CLOSED; 290 291 ibmvscsis_delete_client_info(vscsi, false); 292 } 293 294 dev_dbg(&vscsi->dev, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n", 295 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags, 296 vscsi->phyp_acr_state); 297 } 298 return rc; 299 } 300 301 /** 302 * ibmvscsis_cmd_q_dequeue() - Get valid Command element 303 * @mask: Mask to use in case index wraps 304 * @current_index: Current index into command queue 305 * @base_addr: Pointer to start of command queue 306 * 307 * Returns a pointer to a valid command element or NULL, if the command 308 * queue is empty 309 * 310 * EXECUTION ENVIRONMENT: 311 * Interrupt environment, interrupt lock held 312 */ 313 static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask, 314 uint *current_index, 315 struct viosrp_crq *base_addr) 316 { 317 struct viosrp_crq *ptr; 318 319 ptr = base_addr + *current_index; 320 321 if (ptr->valid) { 322 *current_index = (*current_index + 1) & mask; 323 dma_rmb(); 324 } else { 325 ptr = NULL; 326 } 327 328 return ptr; 329 } 330 331 /** 332 * ibmvscsis_send_init_message() - send initialize message to the client 333 * @vscsi: Pointer to our adapter structure 334 * @format: Which Init Message format to send 335 * 336 * EXECUTION ENVIRONMENT: 337 * Interrupt environment interrupt lock held 338 */ 339 static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format) 340 { 341 struct viosrp_crq *crq; 342 u64 buffer[2] = { 0, 0 }; 343 long rc; 344 345 crq = (struct viosrp_crq *)&buffer; 346 crq->valid = VALID_INIT_MSG; 347 crq->format = format; 348 rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), 349 cpu_to_be64(buffer[MSG_LOW])); 350 351 return rc; 352 } 353 354 /** 355 * ibmvscsis_check_init_msg() - Check init message valid 356 * @vscsi: Pointer to our adapter structure 357 * @format: Pointer to return format of Init Message, if any. 358 * Set to UNUSED_FORMAT if no Init Message in queue. 359 * 360 * Checks if an initialize message was queued by the initiatior 361 * after the queue was created and before the interrupt was enabled. 362 * 363 * EXECUTION ENVIRONMENT: 364 * Process level only, interrupt lock held 365 */ 366 static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format) 367 { 368 struct viosrp_crq *crq; 369 long rc = ADAPT_SUCCESS; 370 371 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index, 372 vscsi->cmd_q.base_addr); 373 if (!crq) { 374 *format = (uint)UNUSED_FORMAT; 375 } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) { 376 *format = (uint)INIT_MSG; 377 crq->valid = INVALIDATE_CMD_RESP_EL; 378 dma_rmb(); 379 380 /* 381 * the caller has ensured no initialize message was 382 * sent after the queue was 383 * created so there should be no other message on the queue. 384 */ 385 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, 386 &vscsi->cmd_q.index, 387 vscsi->cmd_q.base_addr); 388 if (crq) { 389 *format = (uint)(crq->format); 390 rc = ERROR; 391 crq->valid = INVALIDATE_CMD_RESP_EL; 392 dma_rmb(); 393 } 394 } else { 395 *format = (uint)(crq->format); 396 rc = ERROR; 397 crq->valid = INVALIDATE_CMD_RESP_EL; 398 dma_rmb(); 399 } 400 401 return rc; 402 } 403 404 /** 405 * ibmvscsis_disconnect() - Helper function to disconnect 406 * @work: Pointer to work_struct, gives access to our adapter structure 407 * 408 * An error has occurred or the driver received a Transport event, 409 * and the driver is requesting that the command queue be de-registered 410 * in a safe manner. If there is no outstanding I/O then we can stop the 411 * queue. If we are restarting the queue it will be reflected in the 412 * the state of the adapter. 413 * 414 * EXECUTION ENVIRONMENT: 415 * Process environment 416 */ 417 static void ibmvscsis_disconnect(struct work_struct *work) 418 { 419 struct scsi_info *vscsi = container_of(work, struct scsi_info, 420 proc_work); 421 u16 new_state; 422 bool wait_idle = false; 423 424 spin_lock_bh(&vscsi->intr_lock); 425 new_state = vscsi->new_state; 426 vscsi->new_state = 0; 427 428 vscsi->flags |= DISCONNECT_SCHEDULED; 429 vscsi->flags &= ~SCHEDULE_DISCONNECT; 430 431 dev_dbg(&vscsi->dev, "disconnect: flags 0x%x, state 0x%hx\n", 432 vscsi->flags, vscsi->state); 433 434 /* 435 * check which state we are in and see if we 436 * should transitition to the new state 437 */ 438 switch (vscsi->state) { 439 /* Should never be called while in this state. */ 440 case NO_QUEUE: 441 /* 442 * Can never transition from this state; 443 * igonore errors and logout. 444 */ 445 case UNCONFIGURING: 446 break; 447 448 /* can transition from this state to UNCONFIGURING */ 449 case ERR_DISCONNECT: 450 if (new_state == UNCONFIGURING) 451 vscsi->state = new_state; 452 break; 453 454 /* 455 * Can transition from this state to to unconfiguring 456 * or err disconnect. 457 */ 458 case ERR_DISCONNECT_RECONNECT: 459 switch (new_state) { 460 case UNCONFIGURING: 461 case ERR_DISCONNECT: 462 vscsi->state = new_state; 463 break; 464 465 case WAIT_IDLE: 466 break; 467 default: 468 break; 469 } 470 break; 471 472 /* can transition from this state to UNCONFIGURING */ 473 case ERR_DISCONNECTED: 474 if (new_state == UNCONFIGURING) 475 vscsi->state = new_state; 476 break; 477 478 case WAIT_ENABLED: 479 switch (new_state) { 480 case UNCONFIGURING: 481 vscsi->state = new_state; 482 vscsi->flags |= RESPONSE_Q_DOWN; 483 vscsi->flags &= ~(SCHEDULE_DISCONNECT | 484 DISCONNECT_SCHEDULED); 485 dma_rmb(); 486 if (vscsi->flags & CFG_SLEEPING) { 487 vscsi->flags &= ~CFG_SLEEPING; 488 complete(&vscsi->unconfig); 489 } 490 break; 491 492 /* should never happen */ 493 case ERR_DISCONNECT: 494 case ERR_DISCONNECT_RECONNECT: 495 case WAIT_IDLE: 496 dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n", 497 vscsi->state); 498 break; 499 } 500 break; 501 502 case WAIT_IDLE: 503 switch (new_state) { 504 case UNCONFIGURING: 505 vscsi->flags |= RESPONSE_Q_DOWN; 506 vscsi->state = new_state; 507 vscsi->flags &= ~(SCHEDULE_DISCONNECT | 508 DISCONNECT_SCHEDULED); 509 ibmvscsis_free_command_q(vscsi); 510 break; 511 case ERR_DISCONNECT: 512 case ERR_DISCONNECT_RECONNECT: 513 vscsi->state = new_state; 514 break; 515 } 516 break; 517 518 /* 519 * Initiator has not done a successful srp login 520 * or has done a successful srp logout ( adapter was not 521 * busy). In the first case there can be responses queued 522 * waiting for space on the initiators response queue (MAD) 523 * The second case the adapter is idle. Assume the worse case, 524 * i.e. the second case. 525 */ 526 case WAIT_CONNECTION: 527 case CONNECTED: 528 case SRP_PROCESSING: 529 wait_idle = true; 530 vscsi->state = new_state; 531 break; 532 533 /* can transition from this state to UNCONFIGURING */ 534 case UNDEFINED: 535 if (new_state == UNCONFIGURING) 536 vscsi->state = new_state; 537 break; 538 default: 539 break; 540 } 541 542 if (wait_idle) { 543 dev_dbg(&vscsi->dev, "disconnect start wait, active %d, sched %d\n", 544 (int)list_empty(&vscsi->active_q), 545 (int)list_empty(&vscsi->schedule_q)); 546 if (!list_empty(&vscsi->active_q) || 547 !list_empty(&vscsi->schedule_q)) { 548 vscsi->flags |= WAIT_FOR_IDLE; 549 dev_dbg(&vscsi->dev, "disconnect flags 0x%x\n", 550 vscsi->flags); 551 /* 552 * This routine is can not be called with the interrupt 553 * lock held. 554 */ 555 spin_unlock_bh(&vscsi->intr_lock); 556 wait_for_completion(&vscsi->wait_idle); 557 spin_lock_bh(&vscsi->intr_lock); 558 } 559 dev_dbg(&vscsi->dev, "disconnect stop wait\n"); 560 561 ibmvscsis_adapter_idle(vscsi); 562 } 563 564 spin_unlock_bh(&vscsi->intr_lock); 565 } 566 567 /** 568 * ibmvscsis_post_disconnect() - Schedule the disconnect 569 * @vscsi: Pointer to our adapter structure 570 * @new_state: State to move to after disconnecting 571 * @flag_bits: Flags to turn on in adapter structure 572 * 573 * If it's already been scheduled, then see if we need to "upgrade" 574 * the new state (if the one passed in is more "severe" than the 575 * previous one). 576 * 577 * PRECONDITION: 578 * interrupt lock is held 579 */ 580 static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state, 581 uint flag_bits) 582 { 583 uint state; 584 585 /* check the validity of the new state */ 586 switch (new_state) { 587 case UNCONFIGURING: 588 case ERR_DISCONNECT: 589 case ERR_DISCONNECT_RECONNECT: 590 case WAIT_IDLE: 591 break; 592 593 default: 594 dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n", 595 new_state); 596 return; 597 } 598 599 vscsi->flags |= flag_bits; 600 601 dev_dbg(&vscsi->dev, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n", 602 new_state, flag_bits, vscsi->flags, vscsi->state); 603 604 if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) { 605 vscsi->flags |= SCHEDULE_DISCONNECT; 606 vscsi->new_state = new_state; 607 608 INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect); 609 (void)queue_work(vscsi->work_q, &vscsi->proc_work); 610 } else { 611 if (vscsi->new_state) 612 state = vscsi->new_state; 613 else 614 state = vscsi->state; 615 616 switch (state) { 617 case NO_QUEUE: 618 case UNCONFIGURING: 619 break; 620 621 case ERR_DISCONNECTED: 622 case ERR_DISCONNECT: 623 case UNDEFINED: 624 if (new_state == UNCONFIGURING) 625 vscsi->new_state = new_state; 626 break; 627 628 case ERR_DISCONNECT_RECONNECT: 629 switch (new_state) { 630 case UNCONFIGURING: 631 case ERR_DISCONNECT: 632 vscsi->new_state = new_state; 633 break; 634 default: 635 break; 636 } 637 break; 638 639 case WAIT_ENABLED: 640 case WAIT_IDLE: 641 case WAIT_CONNECTION: 642 case CONNECTED: 643 case SRP_PROCESSING: 644 vscsi->new_state = new_state; 645 break; 646 647 default: 648 break; 649 } 650 } 651 652 dev_dbg(&vscsi->dev, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", 653 vscsi->flags, vscsi->new_state); 654 } 655 656 /** 657 * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message 658 * @vscsi: Pointer to our adapter structure 659 * 660 * Must be called with interrupt lock held. 661 */ 662 static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi) 663 { 664 long rc = ADAPT_SUCCESS; 665 666 switch (vscsi->state) { 667 case NO_QUEUE: 668 case ERR_DISCONNECT: 669 case ERR_DISCONNECT_RECONNECT: 670 case ERR_DISCONNECTED: 671 case UNCONFIGURING: 672 case UNDEFINED: 673 rc = ERROR; 674 break; 675 676 case WAIT_CONNECTION: 677 vscsi->state = CONNECTED; 678 break; 679 680 case WAIT_IDLE: 681 case SRP_PROCESSING: 682 case CONNECTED: 683 case WAIT_ENABLED: 684 default: 685 rc = ERROR; 686 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n", 687 vscsi->state); 688 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 689 break; 690 } 691 692 return rc; 693 } 694 695 /** 696 * ibmvscsis_handle_init_msg() - Respond to an Init Message 697 * @vscsi: Pointer to our adapter structure 698 * 699 * Must be called with interrupt lock held. 700 */ 701 static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi) 702 { 703 long rc = ADAPT_SUCCESS; 704 705 switch (vscsi->state) { 706 case WAIT_CONNECTION: 707 rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); 708 switch (rc) { 709 case H_SUCCESS: 710 vscsi->state = CONNECTED; 711 break; 712 713 case H_PARAMETER: 714 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", 715 rc); 716 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); 717 break; 718 719 case H_DROPPED: 720 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", 721 rc); 722 rc = ERROR; 723 ibmvscsis_post_disconnect(vscsi, 724 ERR_DISCONNECT_RECONNECT, 0); 725 break; 726 727 case H_CLOSED: 728 dev_warn(&vscsi->dev, "init_msg: failed to send, rc %ld\n", 729 rc); 730 rc = 0; 731 break; 732 } 733 break; 734 735 case UNDEFINED: 736 rc = ERROR; 737 break; 738 739 case UNCONFIGURING: 740 break; 741 742 case WAIT_ENABLED: 743 case CONNECTED: 744 case SRP_PROCESSING: 745 case WAIT_IDLE: 746 case NO_QUEUE: 747 case ERR_DISCONNECT: 748 case ERR_DISCONNECT_RECONNECT: 749 case ERR_DISCONNECTED: 750 default: 751 rc = ERROR; 752 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n", 753 vscsi->state); 754 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 755 break; 756 } 757 758 return rc; 759 } 760 761 /** 762 * ibmvscsis_init_msg() - Respond to an init message 763 * @vscsi: Pointer to our adapter structure 764 * @crq: Pointer to CRQ element containing the Init Message 765 * 766 * EXECUTION ENVIRONMENT: 767 * Interrupt, interrupt lock held 768 */ 769 static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq) 770 { 771 long rc = ADAPT_SUCCESS; 772 773 dev_dbg(&vscsi->dev, "init_msg: state 0x%hx\n", vscsi->state); 774 775 rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, 776 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, 777 0); 778 if (rc == H_SUCCESS) { 779 vscsi->client_data.partition_number = 780 be64_to_cpu(*(u64 *)vscsi->map_buf); 781 dev_dbg(&vscsi->dev, "init_msg, part num %d\n", 782 vscsi->client_data.partition_number); 783 } else { 784 dev_dbg(&vscsi->dev, "init_msg h_vioctl rc %ld\n", rc); 785 rc = ADAPT_SUCCESS; 786 } 787 788 if (crq->format == INIT_MSG) { 789 rc = ibmvscsis_handle_init_msg(vscsi); 790 } else if (crq->format == INIT_COMPLETE_MSG) { 791 rc = ibmvscsis_handle_init_compl_msg(vscsi); 792 } else { 793 rc = ERROR; 794 dev_err(&vscsi->dev, "init_msg: invalid format %d\n", 795 (uint)crq->format); 796 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 797 } 798 799 return rc; 800 } 801 802 /** 803 * ibmvscsis_establish_new_q() - Establish new CRQ queue 804 * @vscsi: Pointer to our adapter structure 805 * 806 * Must be called with interrupt lock held. 807 */ 808 static long ibmvscsis_establish_new_q(struct scsi_info *vscsi) 809 { 810 long rc = ADAPT_SUCCESS; 811 uint format; 812 813 rc = h_vioctl(vscsi->dds.unit_id, H_ENABLE_PREPARE_FOR_SUSPEND, 30000, 814 0, 0, 0, 0); 815 if (rc == H_SUCCESS) 816 vscsi->flags |= PREP_FOR_SUSPEND_ENABLED; 817 else if (rc != H_NOT_FOUND) 818 dev_err(&vscsi->dev, "Error from Enable Prepare for Suspend: %ld\n", 819 rc); 820 821 vscsi->flags &= PRESERVE_FLAG_FIELDS; 822 vscsi->rsp_q_timer.timer_pops = 0; 823 vscsi->debit = 0; 824 vscsi->credit = 0; 825 826 rc = vio_enable_interrupts(vscsi->dma_dev); 827 if (rc) { 828 dev_warn(&vscsi->dev, "establish_new_q: failed to enable interrupts, rc %ld\n", 829 rc); 830 return rc; 831 } 832 833 rc = ibmvscsis_check_init_msg(vscsi, &format); 834 if (rc) { 835 dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n", 836 rc); 837 return rc; 838 } 839 840 if (format == UNUSED_FORMAT) { 841 rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); 842 switch (rc) { 843 case H_SUCCESS: 844 case H_DROPPED: 845 case H_CLOSED: 846 rc = ADAPT_SUCCESS; 847 break; 848 849 case H_PARAMETER: 850 case H_HARDWARE: 851 break; 852 853 default: 854 vscsi->state = UNDEFINED; 855 rc = H_HARDWARE; 856 break; 857 } 858 } else if (format == INIT_MSG) { 859 rc = ibmvscsis_handle_init_msg(vscsi); 860 } 861 862 return rc; 863 } 864 865 /** 866 * ibmvscsis_reset_queue() - Reset CRQ Queue 867 * @vscsi: Pointer to our adapter structure 868 * 869 * This function calls h_free_q and then calls h_reg_q and does all 870 * of the bookkeeping to get us back to where we can communicate. 871 * 872 * Actually, we don't always call h_free_crq. A problem was discovered 873 * where one partition would close and reopen his queue, which would 874 * cause his partner to get a transport event, which would cause him to 875 * close and reopen his queue, which would cause the original partition 876 * to get a transport event, etc., etc. To prevent this, we don't 877 * actually close our queue if the client initiated the reset, (i.e. 878 * either we got a transport event or we have detected that the client's 879 * queue is gone) 880 * 881 * EXECUTION ENVIRONMENT: 882 * Process environment, called with interrupt lock held 883 */ 884 static void ibmvscsis_reset_queue(struct scsi_info *vscsi) 885 { 886 int bytes; 887 long rc = ADAPT_SUCCESS; 888 889 dev_dbg(&vscsi->dev, "reset_queue: flags 0x%x\n", vscsi->flags); 890 891 /* don't reset, the client did it for us */ 892 if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { 893 vscsi->flags &= PRESERVE_FLAG_FIELDS; 894 vscsi->rsp_q_timer.timer_pops = 0; 895 vscsi->debit = 0; 896 vscsi->credit = 0; 897 vscsi->state = WAIT_CONNECTION; 898 vio_enable_interrupts(vscsi->dma_dev); 899 } else { 900 rc = ibmvscsis_free_command_q(vscsi); 901 if (rc == ADAPT_SUCCESS) { 902 vscsi->state = WAIT_CONNECTION; 903 904 bytes = vscsi->cmd_q.size * PAGE_SIZE; 905 rc = h_reg_crq(vscsi->dds.unit_id, 906 vscsi->cmd_q.crq_token, bytes); 907 if (rc == H_CLOSED || rc == H_SUCCESS) { 908 rc = ibmvscsis_establish_new_q(vscsi); 909 } 910 911 if (rc != ADAPT_SUCCESS) { 912 dev_dbg(&vscsi->dev, "reset_queue: reg_crq rc %ld\n", 913 rc); 914 915 vscsi->state = ERR_DISCONNECTED; 916 vscsi->flags |= RESPONSE_Q_DOWN; 917 ibmvscsis_free_command_q(vscsi); 918 } 919 } else { 920 vscsi->state = ERR_DISCONNECTED; 921 vscsi->flags |= RESPONSE_Q_DOWN; 922 } 923 } 924 } 925 926 /** 927 * ibmvscsis_free_cmd_resources() - Free command resources 928 * @vscsi: Pointer to our adapter structure 929 * @cmd: Command which is not longer in use 930 * 931 * Must be called with interrupt lock held. 932 */ 933 static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, 934 struct ibmvscsis_cmd *cmd) 935 { 936 struct iu_entry *iue = cmd->iue; 937 938 switch (cmd->type) { 939 case TASK_MANAGEMENT: 940 case SCSI_CDB: 941 /* 942 * When the queue goes down this value is cleared, so it 943 * cannot be cleared in this general purpose function. 944 */ 945 if (vscsi->debit) 946 vscsi->debit -= 1; 947 break; 948 case ADAPTER_MAD: 949 vscsi->flags &= ~PROCESSING_MAD; 950 break; 951 case UNSET_TYPE: 952 break; 953 default: 954 dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n", 955 cmd->type); 956 break; 957 } 958 959 cmd->iue = NULL; 960 list_add_tail(&cmd->list, &vscsi->free_cmd); 961 srp_iu_put(iue); 962 963 if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) && 964 list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) { 965 vscsi->flags &= ~WAIT_FOR_IDLE; 966 complete(&vscsi->wait_idle); 967 } 968 } 969 970 /** 971 * ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL 972 * @vscsi: Pointer to our adapter structure 973 * @idle: Indicates whether we were called from adapter_idle. This 974 * is important to know if we need to do a disconnect, since if 975 * we're called from adapter_idle, we're still processing the 976 * current disconnect, so we can't just call post_disconnect. 977 * 978 * This function is called when the adapter is idle when phyp has sent 979 * us a Prepare for Suspend Transport Event. 980 * 981 * EXECUTION ENVIRONMENT: 982 * Process or interrupt environment called with interrupt lock held 983 */ 984 static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle) 985 { 986 long rc = 0; 987 struct viosrp_crq *crq; 988 989 /* See if there is a Resume event in the queue */ 990 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 991 992 dev_dbg(&vscsi->dev, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n", 993 vscsi->flags, vscsi->state, (int)crq->valid); 994 995 if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) { 996 rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0, 997 0, 0); 998 if (rc) { 999 dev_err(&vscsi->dev, "Ready for Suspend Vioctl failed: %ld\n", 1000 rc); 1001 rc = 0; 1002 } 1003 } else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) && 1004 (vscsi->flags & PREP_FOR_SUSPEND_ABORTED)) || 1005 ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) || 1006 (crq->format != RESUME_FROM_SUSP)))) { 1007 if (idle) { 1008 vscsi->state = ERR_DISCONNECT_RECONNECT; 1009 ibmvscsis_reset_queue(vscsi); 1010 rc = -1; 1011 } else if (vscsi->state == CONNECTED) { 1012 ibmvscsis_post_disconnect(vscsi, 1013 ERR_DISCONNECT_RECONNECT, 0); 1014 } 1015 1016 vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE; 1017 1018 if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) || 1019 (crq->format != RESUME_FROM_SUSP))) 1020 dev_err(&vscsi->dev, "Invalid element in CRQ after Prepare for Suspend"); 1021 } 1022 1023 vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED); 1024 1025 return rc; 1026 } 1027 1028 /** 1029 * ibmvscsis_trans_event() - Handle a Transport Event 1030 * @vscsi: Pointer to our adapter structure 1031 * @crq: Pointer to CRQ entry containing the Transport Event 1032 * 1033 * Do the logic to close the I_T nexus. This function may not 1034 * behave to specification. 1035 * 1036 * EXECUTION ENVIRONMENT: 1037 * Interrupt, interrupt lock held 1038 */ 1039 static long ibmvscsis_trans_event(struct scsi_info *vscsi, 1040 struct viosrp_crq *crq) 1041 { 1042 long rc = ADAPT_SUCCESS; 1043 1044 dev_dbg(&vscsi->dev, "trans_event: format %d, flags 0x%x, state 0x%hx\n", 1045 (int)crq->format, vscsi->flags, vscsi->state); 1046 1047 switch (crq->format) { 1048 case MIGRATED: 1049 case PARTNER_FAILED: 1050 case PARTNER_DEREGISTER: 1051 ibmvscsis_delete_client_info(vscsi, true); 1052 if (crq->format == MIGRATED) 1053 vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE; 1054 switch (vscsi->state) { 1055 case NO_QUEUE: 1056 case ERR_DISCONNECTED: 1057 case UNDEFINED: 1058 break; 1059 1060 case UNCONFIGURING: 1061 vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT); 1062 break; 1063 1064 case WAIT_ENABLED: 1065 break; 1066 1067 case WAIT_CONNECTION: 1068 break; 1069 1070 case CONNECTED: 1071 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 1072 (RESPONSE_Q_DOWN | 1073 TRANS_EVENT)); 1074 break; 1075 1076 case SRP_PROCESSING: 1077 if ((vscsi->debit > 0) || 1078 !list_empty(&vscsi->schedule_q) || 1079 !list_empty(&vscsi->waiting_rsp) || 1080 !list_empty(&vscsi->active_q)) { 1081 dev_dbg(&vscsi->dev, "debit %d, sched %d, wait %d, active %d\n", 1082 vscsi->debit, 1083 (int)list_empty(&vscsi->schedule_q), 1084 (int)list_empty(&vscsi->waiting_rsp), 1085 (int)list_empty(&vscsi->active_q)); 1086 dev_warn(&vscsi->dev, "connection lost with outstanding work\n"); 1087 } else { 1088 dev_dbg(&vscsi->dev, "trans_event: SRP Processing, but no outstanding work\n"); 1089 } 1090 1091 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 1092 (RESPONSE_Q_DOWN | 1093 TRANS_EVENT)); 1094 break; 1095 1096 case ERR_DISCONNECT: 1097 case ERR_DISCONNECT_RECONNECT: 1098 case WAIT_IDLE: 1099 vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT); 1100 break; 1101 } 1102 break; 1103 1104 case PREPARE_FOR_SUSPEND: 1105 dev_dbg(&vscsi->dev, "Prep for Suspend, crq status = 0x%x\n", 1106 (int)crq->status); 1107 switch (vscsi->state) { 1108 case ERR_DISCONNECTED: 1109 case WAIT_CONNECTION: 1110 case CONNECTED: 1111 ibmvscsis_ready_for_suspend(vscsi, false); 1112 break; 1113 case SRP_PROCESSING: 1114 vscsi->resume_state = vscsi->state; 1115 vscsi->flags |= PREP_FOR_SUSPEND_PENDING; 1116 if (crq->status == CRQ_ENTRY_OVERWRITTEN) 1117 vscsi->flags |= PREP_FOR_SUSPEND_OVERWRITE; 1118 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0); 1119 break; 1120 case NO_QUEUE: 1121 case UNDEFINED: 1122 case UNCONFIGURING: 1123 case WAIT_ENABLED: 1124 case ERR_DISCONNECT: 1125 case ERR_DISCONNECT_RECONNECT: 1126 case WAIT_IDLE: 1127 dev_err(&vscsi->dev, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n", 1128 vscsi->state); 1129 break; 1130 } 1131 break; 1132 1133 case RESUME_FROM_SUSP: 1134 dev_dbg(&vscsi->dev, "Resume from Suspend, crq status = 0x%x\n", 1135 (int)crq->status); 1136 if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) { 1137 vscsi->flags |= PREP_FOR_SUSPEND_ABORTED; 1138 } else { 1139 if ((crq->status == CRQ_ENTRY_OVERWRITTEN) || 1140 (vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE)) { 1141 ibmvscsis_post_disconnect(vscsi, 1142 ERR_DISCONNECT_RECONNECT, 1143 0); 1144 vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE; 1145 } 1146 } 1147 break; 1148 1149 default: 1150 rc = ERROR; 1151 dev_err(&vscsi->dev, "trans_event: invalid format %d\n", 1152 (uint)crq->format); 1153 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 1154 RESPONSE_Q_DOWN); 1155 break; 1156 } 1157 1158 rc = vscsi->flags & SCHEDULE_DISCONNECT; 1159 1160 dev_dbg(&vscsi->dev, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n", 1161 vscsi->flags, vscsi->state, rc); 1162 1163 return rc; 1164 } 1165 1166 /** 1167 * ibmvscsis_poll_cmd_q() - Poll Command Queue 1168 * @vscsi: Pointer to our adapter structure 1169 * 1170 * Called to handle command elements that may have arrived while 1171 * interrupts were disabled. 1172 * 1173 * EXECUTION ENVIRONMENT: 1174 * intr_lock must be held 1175 */ 1176 static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi) 1177 { 1178 struct viosrp_crq *crq; 1179 long rc; 1180 bool ack = true; 1181 volatile u8 valid; 1182 1183 dev_dbg(&vscsi->dev, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n", 1184 vscsi->flags, vscsi->state, vscsi->cmd_q.index); 1185 1186 rc = vscsi->flags & SCHEDULE_DISCONNECT; 1187 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 1188 valid = crq->valid; 1189 dma_rmb(); 1190 1191 while (valid) { 1192 poll_work: 1193 vscsi->cmd_q.index = 1194 (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask; 1195 1196 if (!rc) { 1197 rc = ibmvscsis_parse_command(vscsi, crq); 1198 } else { 1199 if ((uint)crq->valid == VALID_TRANS_EVENT) { 1200 /* 1201 * must service the transport layer events even 1202 * in an error state, dont break out until all 1203 * the consecutive transport events have been 1204 * processed 1205 */ 1206 rc = ibmvscsis_trans_event(vscsi, crq); 1207 } else if (vscsi->flags & TRANS_EVENT) { 1208 /* 1209 * if a tranport event has occurred leave 1210 * everything but transport events on the queue 1211 */ 1212 dev_dbg(&vscsi->dev, "poll_cmd_q, ignoring\n"); 1213 1214 /* 1215 * need to decrement the queue index so we can 1216 * look at the elment again 1217 */ 1218 if (vscsi->cmd_q.index) 1219 vscsi->cmd_q.index -= 1; 1220 else 1221 /* 1222 * index is at 0 it just wrapped. 1223 * have it index last element in q 1224 */ 1225 vscsi->cmd_q.index = vscsi->cmd_q.mask; 1226 break; 1227 } 1228 } 1229 1230 crq->valid = INVALIDATE_CMD_RESP_EL; 1231 1232 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 1233 valid = crq->valid; 1234 dma_rmb(); 1235 } 1236 1237 if (!rc) { 1238 if (ack) { 1239 vio_enable_interrupts(vscsi->dma_dev); 1240 ack = false; 1241 dev_dbg(&vscsi->dev, "poll_cmd_q, reenabling interrupts\n"); 1242 } 1243 valid = crq->valid; 1244 dma_rmb(); 1245 if (valid) 1246 goto poll_work; 1247 } 1248 1249 dev_dbg(&vscsi->dev, "Leaving poll_cmd_q: rc %ld\n", rc); 1250 } 1251 1252 /** 1253 * ibmvscsis_free_cmd_qs() - Free elements in queue 1254 * @vscsi: Pointer to our adapter structure 1255 * 1256 * Free all of the elements on all queues that are waiting for 1257 * whatever reason. 1258 * 1259 * PRECONDITION: 1260 * Called with interrupt lock held 1261 */ 1262 static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi) 1263 { 1264 struct ibmvscsis_cmd *cmd, *nxt; 1265 1266 dev_dbg(&vscsi->dev, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n", 1267 (int)list_empty(&vscsi->waiting_rsp), 1268 vscsi->rsp_q_timer.started); 1269 1270 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) { 1271 list_del(&cmd->list); 1272 ibmvscsis_free_cmd_resources(vscsi, cmd); 1273 } 1274 } 1275 1276 /** 1277 * ibmvscsis_get_free_cmd() - Get free command from list 1278 * @vscsi: Pointer to our adapter structure 1279 * 1280 * Must be called with interrupt lock held. 1281 */ 1282 static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi) 1283 { 1284 struct ibmvscsis_cmd *cmd = NULL; 1285 struct iu_entry *iue; 1286 1287 iue = srp_iu_get(&vscsi->target); 1288 if (iue) { 1289 cmd = list_first_entry_or_null(&vscsi->free_cmd, 1290 struct ibmvscsis_cmd, list); 1291 if (cmd) { 1292 if (cmd->abort_cmd) 1293 cmd->abort_cmd = NULL; 1294 cmd->flags &= ~(DELAY_SEND); 1295 list_del(&cmd->list); 1296 cmd->iue = iue; 1297 cmd->type = UNSET_TYPE; 1298 memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd)); 1299 } else { 1300 srp_iu_put(iue); 1301 } 1302 } 1303 1304 return cmd; 1305 } 1306 1307 /** 1308 * ibmvscsis_adapter_idle() - Helper function to handle idle adapter 1309 * @vscsi: Pointer to our adapter structure 1310 * 1311 * This function is called when the adapter is idle when the driver 1312 * is attempting to clear an error condition. 1313 * The adapter is considered busy if any of its cmd queues 1314 * are non-empty. This function can be invoked 1315 * from the off level disconnect function. 1316 * 1317 * EXECUTION ENVIRONMENT: 1318 * Process environment called with interrupt lock held 1319 */ 1320 static void ibmvscsis_adapter_idle(struct scsi_info *vscsi) 1321 { 1322 int free_qs = false; 1323 long rc = 0; 1324 1325 dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx\n", 1326 vscsi->flags, vscsi->state); 1327 1328 /* Only need to free qs if we're disconnecting from client */ 1329 if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT) 1330 free_qs = true; 1331 1332 switch (vscsi->state) { 1333 case UNCONFIGURING: 1334 ibmvscsis_free_command_q(vscsi); 1335 dma_rmb(); 1336 isync(); 1337 if (vscsi->flags & CFG_SLEEPING) { 1338 vscsi->flags &= ~CFG_SLEEPING; 1339 complete(&vscsi->unconfig); 1340 } 1341 break; 1342 case ERR_DISCONNECT_RECONNECT: 1343 ibmvscsis_reset_queue(vscsi); 1344 dev_dbg(&vscsi->dev, "adapter_idle, disc_rec: flags 0x%x\n", 1345 vscsi->flags); 1346 break; 1347 1348 case ERR_DISCONNECT: 1349 ibmvscsis_free_command_q(vscsi); 1350 vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED); 1351 vscsi->flags |= RESPONSE_Q_DOWN; 1352 if (vscsi->tport.enabled) 1353 vscsi->state = ERR_DISCONNECTED; 1354 else 1355 vscsi->state = WAIT_ENABLED; 1356 dev_dbg(&vscsi->dev, "adapter_idle, disc: flags 0x%x, state 0x%hx\n", 1357 vscsi->flags, vscsi->state); 1358 break; 1359 1360 case WAIT_IDLE: 1361 vscsi->rsp_q_timer.timer_pops = 0; 1362 vscsi->debit = 0; 1363 vscsi->credit = 0; 1364 if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) { 1365 vscsi->state = vscsi->resume_state; 1366 vscsi->resume_state = 0; 1367 rc = ibmvscsis_ready_for_suspend(vscsi, true); 1368 vscsi->flags &= ~DISCONNECT_SCHEDULED; 1369 if (rc) 1370 break; 1371 } else if (vscsi->flags & TRANS_EVENT) { 1372 vscsi->state = WAIT_CONNECTION; 1373 vscsi->flags &= PRESERVE_FLAG_FIELDS; 1374 } else { 1375 vscsi->state = CONNECTED; 1376 vscsi->flags &= ~DISCONNECT_SCHEDULED; 1377 } 1378 1379 dev_dbg(&vscsi->dev, "adapter_idle, wait: flags 0x%x, state 0x%hx\n", 1380 vscsi->flags, vscsi->state); 1381 ibmvscsis_poll_cmd_q(vscsi); 1382 break; 1383 1384 case ERR_DISCONNECTED: 1385 vscsi->flags &= ~DISCONNECT_SCHEDULED; 1386 dev_dbg(&vscsi->dev, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n", 1387 vscsi->flags, vscsi->state); 1388 break; 1389 1390 default: 1391 dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n", 1392 vscsi->state); 1393 break; 1394 } 1395 1396 if (free_qs) 1397 ibmvscsis_free_cmd_qs(vscsi); 1398 1399 /* 1400 * There is a timing window where we could lose a disconnect request. 1401 * The known path to this window occurs during the DISCONNECT_RECONNECT 1402 * case above: reset_queue calls free_command_q, which will release the 1403 * interrupt lock. During that time, a new post_disconnect call can be 1404 * made with a "more severe" state (DISCONNECT or UNCONFIGURING). 1405 * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect 1406 * will only set the new_state. Now free_command_q reacquires the intr 1407 * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_ 1408 * FIELDS), and the disconnect is lost. This is particularly bad when 1409 * the new disconnect was for UNCONFIGURING, since the unconfigure hangs 1410 * forever. 1411 * Fix is that free command queue sets acr state and acr flags if there 1412 * is a change under the lock 1413 * note free command queue writes to this state it clears it 1414 * before releasing the lock, different drivers call the free command 1415 * queue different times so dont initialize above 1416 */ 1417 if (vscsi->phyp_acr_state != 0) { 1418 /* 1419 * set any bits in flags that may have been cleared by 1420 * a call to free command queue in switch statement 1421 * or reset queue 1422 */ 1423 vscsi->flags |= vscsi->phyp_acr_flags; 1424 ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0); 1425 vscsi->phyp_acr_state = 0; 1426 vscsi->phyp_acr_flags = 0; 1427 1428 dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n", 1429 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags, 1430 vscsi->phyp_acr_state); 1431 } 1432 1433 dev_dbg(&vscsi->dev, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n", 1434 vscsi->flags, vscsi->state, vscsi->new_state); 1435 } 1436 1437 /** 1438 * ibmvscsis_copy_crq_packet() - Copy CRQ Packet 1439 * @vscsi: Pointer to our adapter structure 1440 * @cmd: Pointer to command element to use to process the request 1441 * @crq: Pointer to CRQ entry containing the request 1442 * 1443 * Copy the srp information unit from the hosted 1444 * partition using remote dma 1445 * 1446 * EXECUTION ENVIRONMENT: 1447 * Interrupt, interrupt lock held 1448 */ 1449 static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi, 1450 struct ibmvscsis_cmd *cmd, 1451 struct viosrp_crq *crq) 1452 { 1453 struct iu_entry *iue = cmd->iue; 1454 long rc = 0; 1455 u16 len; 1456 1457 len = be16_to_cpu(crq->IU_length); 1458 if ((len > SRP_MAX_IU_LEN) || (len == 0)) { 1459 dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len); 1460 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 1461 return SRP_VIOLATION; 1462 } 1463 1464 rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn, 1465 be64_to_cpu(crq->IU_data_ptr), 1466 vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma); 1467 1468 switch (rc) { 1469 case H_SUCCESS: 1470 cmd->init_time = mftb(); 1471 iue->remote_token = crq->IU_data_ptr; 1472 iue->iu_len = len; 1473 dev_dbg(&vscsi->dev, "copy_crq: ioba 0x%llx, init_time 0x%llx\n", 1474 be64_to_cpu(crq->IU_data_ptr), cmd->init_time); 1475 break; 1476 case H_PERMISSION: 1477 if (connection_broken(vscsi)) 1478 ibmvscsis_post_disconnect(vscsi, 1479 ERR_DISCONNECT_RECONNECT, 1480 (RESPONSE_Q_DOWN | 1481 CLIENT_FAILED)); 1482 else 1483 ibmvscsis_post_disconnect(vscsi, 1484 ERR_DISCONNECT_RECONNECT, 0); 1485 1486 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n", 1487 rc); 1488 break; 1489 case H_DEST_PARM: 1490 case H_SOURCE_PARM: 1491 default: 1492 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n", 1493 rc); 1494 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 1495 break; 1496 } 1497 1498 return rc; 1499 } 1500 1501 /** 1502 * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram 1503 * @vscsi: Pointer to our adapter structure 1504 * @iue: Information Unit containing the Adapter Info MAD request 1505 * 1506 * EXECUTION ENVIRONMENT: 1507 * Interrupt adapter lock is held 1508 */ 1509 static long ibmvscsis_adapter_info(struct scsi_info *vscsi, 1510 struct iu_entry *iue) 1511 { 1512 struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info; 1513 struct mad_adapter_info_data *info; 1514 uint flag_bits = 0; 1515 dma_addr_t token; 1516 long rc; 1517 1518 mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS); 1519 1520 if (be16_to_cpu(mad->common.length) > sizeof(*info)) { 1521 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); 1522 return 0; 1523 } 1524 1525 info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token, 1526 GFP_ATOMIC); 1527 if (!info) { 1528 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", 1529 iue->target); 1530 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); 1531 return 0; 1532 } 1533 1534 /* Get remote info */ 1535 rc = h_copy_rdma(be16_to_cpu(mad->common.length), 1536 vscsi->dds.window[REMOTE].liobn, 1537 be64_to_cpu(mad->buffer), 1538 vscsi->dds.window[LOCAL].liobn, token); 1539 1540 if (rc != H_SUCCESS) { 1541 if (rc == H_PERMISSION) { 1542 if (connection_broken(vscsi)) 1543 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); 1544 } 1545 dev_warn(&vscsi->dev, "adapter_info: h_copy_rdma from client failed, rc %ld\n", 1546 rc); 1547 dev_dbg(&vscsi->dev, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n", 1548 be64_to_cpu(mad->buffer), vscsi->flags, flag_bits); 1549 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 1550 flag_bits); 1551 goto free_dma; 1552 } 1553 1554 /* 1555 * Copy client info, but ignore partition number, which we 1556 * already got from phyp - unless we failed to get it from 1557 * phyp (e.g. if we're running on a p5 system). 1558 */ 1559 if (vscsi->client_data.partition_number == 0) 1560 vscsi->client_data.partition_number = 1561 be32_to_cpu(info->partition_number); 1562 strncpy(vscsi->client_data.srp_version, info->srp_version, 1563 sizeof(vscsi->client_data.srp_version)); 1564 strncpy(vscsi->client_data.partition_name, info->partition_name, 1565 sizeof(vscsi->client_data.partition_name)); 1566 vscsi->client_data.mad_version = be32_to_cpu(info->mad_version); 1567 vscsi->client_data.os_type = be32_to_cpu(info->os_type); 1568 1569 /* Copy our info */ 1570 strncpy(info->srp_version, SRP_VERSION, 1571 sizeof(info->srp_version)); 1572 strncpy(info->partition_name, vscsi->dds.partition_name, 1573 sizeof(info->partition_name)); 1574 info->partition_number = cpu_to_be32(vscsi->dds.partition_num); 1575 info->mad_version = cpu_to_be32(MAD_VERSION_1); 1576 info->os_type = cpu_to_be32(LINUX); 1577 memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu)); 1578 info->port_max_txu[0] = cpu_to_be32(MAX_TXU); 1579 1580 dma_wmb(); 1581 rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn, 1582 token, vscsi->dds.window[REMOTE].liobn, 1583 be64_to_cpu(mad->buffer)); 1584 switch (rc) { 1585 case H_SUCCESS: 1586 break; 1587 1588 case H_SOURCE_PARM: 1589 case H_DEST_PARM: 1590 case H_PERMISSION: 1591 if (connection_broken(vscsi)) 1592 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); 1593 default: 1594 dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n", 1595 rc); 1596 ibmvscsis_post_disconnect(vscsi, 1597 ERR_DISCONNECT_RECONNECT, 1598 flag_bits); 1599 break; 1600 } 1601 1602 free_dma: 1603 dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token); 1604 dev_dbg(&vscsi->dev, "Leaving adapter_info, rc %ld\n", rc); 1605 1606 return rc; 1607 } 1608 1609 /** 1610 * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram 1611 * @vscsi: Pointer to our adapter structure 1612 * @iue: Information Unit containing the Capabilities MAD request 1613 * 1614 * NOTE: if you return an error from this routine you must be 1615 * disconnecting or you will cause a hang 1616 * 1617 * EXECUTION ENVIRONMENT: 1618 * Interrupt called with adapter lock held 1619 */ 1620 static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue) 1621 { 1622 struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities; 1623 struct capabilities *cap; 1624 struct mad_capability_common *common; 1625 dma_addr_t token; 1626 u16 olen, len, status, min_len, cap_len; 1627 u32 flag; 1628 uint flag_bits = 0; 1629 long rc = 0; 1630 1631 olen = be16_to_cpu(mad->common.length); 1632 /* 1633 * struct capabilities hardcodes a couple capabilities after the 1634 * header, but the capabilities can actually be in any order. 1635 */ 1636 min_len = offsetof(struct capabilities, migration); 1637 if ((olen < min_len) || (olen > PAGE_SIZE)) { 1638 dev_warn(&vscsi->dev, "cap_mad: invalid len %d\n", olen); 1639 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); 1640 return 0; 1641 } 1642 1643 cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token, 1644 GFP_ATOMIC); 1645 if (!cap) { 1646 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", 1647 iue->target); 1648 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); 1649 return 0; 1650 } 1651 rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn, 1652 be64_to_cpu(mad->buffer), 1653 vscsi->dds.window[LOCAL].liobn, token); 1654 if (rc == H_SUCCESS) { 1655 strncpy(cap->name, dev_name(&vscsi->dma_dev->dev), 1656 SRP_MAX_LOC_LEN); 1657 1658 len = olen - min_len; 1659 status = VIOSRP_MAD_SUCCESS; 1660 common = (struct mad_capability_common *)&cap->migration; 1661 1662 while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) { 1663 dev_dbg(&vscsi->dev, "cap_mad: len left %hd, cap type %d, cap len %hd\n", 1664 len, be32_to_cpu(common->cap_type), 1665 be16_to_cpu(common->length)); 1666 1667 cap_len = be16_to_cpu(common->length); 1668 if (cap_len > len) { 1669 dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n"); 1670 status = VIOSRP_MAD_FAILED; 1671 break; 1672 } 1673 1674 if (cap_len == 0) { 1675 dev_err(&vscsi->dev, "cap_mad: cap len is 0\n"); 1676 status = VIOSRP_MAD_FAILED; 1677 break; 1678 } 1679 1680 switch (common->cap_type) { 1681 default: 1682 dev_dbg(&vscsi->dev, "cap_mad: unsupported capability\n"); 1683 common->server_support = 0; 1684 flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED); 1685 cap->flags &= ~flag; 1686 break; 1687 } 1688 1689 len = len - cap_len; 1690 common = (struct mad_capability_common *) 1691 ((char *)common + cap_len); 1692 } 1693 1694 mad->common.status = cpu_to_be16(status); 1695 1696 dma_wmb(); 1697 rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token, 1698 vscsi->dds.window[REMOTE].liobn, 1699 be64_to_cpu(mad->buffer)); 1700 1701 if (rc != H_SUCCESS) { 1702 dev_dbg(&vscsi->dev, "cap_mad: failed to copy to client, rc %ld\n", 1703 rc); 1704 1705 if (rc == H_PERMISSION) { 1706 if (connection_broken(vscsi)) 1707 flag_bits = (RESPONSE_Q_DOWN | 1708 CLIENT_FAILED); 1709 } 1710 1711 dev_warn(&vscsi->dev, "cap_mad: error copying data to client, rc %ld\n", 1712 rc); 1713 ibmvscsis_post_disconnect(vscsi, 1714 ERR_DISCONNECT_RECONNECT, 1715 flag_bits); 1716 } 1717 } 1718 1719 dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token); 1720 1721 dev_dbg(&vscsi->dev, "Leaving cap_mad, rc %ld, client_cap 0x%x\n", 1722 rc, vscsi->client_cap); 1723 1724 return rc; 1725 } 1726 1727 /** 1728 * ibmvscsis_process_mad() - Service a MAnagement Data gram 1729 * @vscsi: Pointer to our adapter structure 1730 * @iue: Information Unit containing the MAD request 1731 * 1732 * Must be called with interrupt lock held. 1733 */ 1734 static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue) 1735 { 1736 struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad; 1737 struct viosrp_empty_iu *empty; 1738 long rc = ADAPT_SUCCESS; 1739 1740 switch (be32_to_cpu(mad->type)) { 1741 case VIOSRP_EMPTY_IU_TYPE: 1742 empty = &vio_iu(iue)->mad.empty_iu; 1743 vscsi->empty_iu_id = be64_to_cpu(empty->buffer); 1744 vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag); 1745 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS); 1746 break; 1747 case VIOSRP_ADAPTER_INFO_TYPE: 1748 rc = ibmvscsis_adapter_info(vscsi, iue); 1749 break; 1750 case VIOSRP_CAPABILITIES_TYPE: 1751 rc = ibmvscsis_cap_mad(vscsi, iue); 1752 break; 1753 case VIOSRP_ENABLE_FAST_FAIL: 1754 if (vscsi->state == CONNECTED) { 1755 vscsi->fast_fail = true; 1756 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS); 1757 } else { 1758 dev_warn(&vscsi->dev, "fast fail mad sent after login\n"); 1759 mad->status = cpu_to_be16(VIOSRP_MAD_FAILED); 1760 } 1761 break; 1762 default: 1763 mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED); 1764 break; 1765 } 1766 1767 return rc; 1768 } 1769 1770 /** 1771 * srp_snd_msg_failed() - Handle an error when sending a response 1772 * @vscsi: Pointer to our adapter structure 1773 * @rc: The return code from the h_send_crq command 1774 * 1775 * Must be called with interrupt lock held. 1776 */ 1777 static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc) 1778 { 1779 ktime_t kt; 1780 1781 if (rc != H_DROPPED) { 1782 ibmvscsis_free_cmd_qs(vscsi); 1783 1784 if (rc == H_CLOSED) 1785 vscsi->flags |= CLIENT_FAILED; 1786 1787 /* don't flag the same problem multiple times */ 1788 if (!(vscsi->flags & RESPONSE_Q_DOWN)) { 1789 vscsi->flags |= RESPONSE_Q_DOWN; 1790 if (!(vscsi->state & (ERR_DISCONNECT | 1791 ERR_DISCONNECT_RECONNECT | 1792 ERR_DISCONNECTED | UNDEFINED))) { 1793 dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n", 1794 vscsi->state, vscsi->flags, rc); 1795 } 1796 ibmvscsis_post_disconnect(vscsi, 1797 ERR_DISCONNECT_RECONNECT, 0); 1798 } 1799 return; 1800 } 1801 1802 /* 1803 * The response queue is full. 1804 * If the server is processing SRP requests, i.e. 1805 * the client has successfully done an 1806 * SRP_LOGIN, then it will wait forever for room in 1807 * the queue. However if the system admin 1808 * is attempting to unconfigure the server then one 1809 * or more children will be in a state where 1810 * they are being removed. So if there is even one 1811 * child being removed then the driver assumes 1812 * the system admin is attempting to break the 1813 * connection with the client and MAX_TIMER_POPS 1814 * is honored. 1815 */ 1816 if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) || 1817 (vscsi->state == SRP_PROCESSING)) { 1818 dev_dbg(&vscsi->dev, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n", 1819 vscsi->flags, (int)vscsi->rsp_q_timer.started, 1820 vscsi->rsp_q_timer.timer_pops); 1821 1822 /* 1823 * Check if the timer is running; if it 1824 * is not then start it up. 1825 */ 1826 if (!vscsi->rsp_q_timer.started) { 1827 if (vscsi->rsp_q_timer.timer_pops < 1828 MAX_TIMER_POPS) { 1829 kt = WAIT_NANO_SECONDS; 1830 } else { 1831 /* 1832 * slide the timeslice if the maximum 1833 * timer pops have already happened 1834 */ 1835 kt = ktime_set(WAIT_SECONDS, 0); 1836 } 1837 1838 vscsi->rsp_q_timer.started = true; 1839 hrtimer_start(&vscsi->rsp_q_timer.timer, kt, 1840 HRTIMER_MODE_REL); 1841 } 1842 } else { 1843 /* 1844 * TBD: Do we need to worry about this? Need to get 1845 * remove working. 1846 */ 1847 /* 1848 * waited a long time and it appears the system admin 1849 * is bring this driver down 1850 */ 1851 vscsi->flags |= RESPONSE_Q_DOWN; 1852 ibmvscsis_free_cmd_qs(vscsi); 1853 /* 1854 * if the driver is already attempting to disconnect 1855 * from the client and has already logged an error 1856 * trace this event but don't put it in the error log 1857 */ 1858 if (!(vscsi->state & (ERR_DISCONNECT | 1859 ERR_DISCONNECT_RECONNECT | 1860 ERR_DISCONNECTED | UNDEFINED))) { 1861 dev_err(&vscsi->dev, "client crq full too long\n"); 1862 ibmvscsis_post_disconnect(vscsi, 1863 ERR_DISCONNECT_RECONNECT, 1864 0); 1865 } 1866 } 1867 } 1868 1869 /** 1870 * ibmvscsis_send_messages() - Send a Response 1871 * @vscsi: Pointer to our adapter structure 1872 * 1873 * Send a response, first checking the waiting queue. Responses are 1874 * sent in order they are received. If the response cannot be sent, 1875 * because the client queue is full, it stays on the waiting queue. 1876 * 1877 * PRECONDITION: 1878 * Called with interrupt lock held 1879 */ 1880 static void ibmvscsis_send_messages(struct scsi_info *vscsi) 1881 { 1882 u64 msg_hi = 0; 1883 /* note do not attempt to access the IU_data_ptr with this pointer 1884 * it is not valid 1885 */ 1886 struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi; 1887 struct ibmvscsis_cmd *cmd, *nxt; 1888 struct iu_entry *iue; 1889 long rc = ADAPT_SUCCESS; 1890 bool retry = false; 1891 1892 if (!(vscsi->flags & RESPONSE_Q_DOWN)) { 1893 do { 1894 retry = false; 1895 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, 1896 list) { 1897 /* 1898 * Check to make sure abort cmd gets processed 1899 * prior to the abort tmr cmd 1900 */ 1901 if (cmd->flags & DELAY_SEND) 1902 continue; 1903 1904 if (cmd->abort_cmd) { 1905 retry = true; 1906 cmd->abort_cmd->flags &= ~(DELAY_SEND); 1907 cmd->abort_cmd = NULL; 1908 } 1909 1910 /* 1911 * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and 1912 * the case where LIO issued a 1913 * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST 1914 * case then we dont send a response, since it 1915 * was already done. 1916 */ 1917 if (cmd->se_cmd.transport_state & CMD_T_ABORTED && 1918 !(cmd->se_cmd.transport_state & CMD_T_TAS)) { 1919 list_del(&cmd->list); 1920 ibmvscsis_free_cmd_resources(vscsi, 1921 cmd); 1922 /* 1923 * With a successfully aborted op 1924 * through LIO we want to increment the 1925 * the vscsi credit so that when we dont 1926 * send a rsp to the original scsi abort 1927 * op (h_send_crq), but the tm rsp to 1928 * the abort is sent, the credit is 1929 * correctly sent with the abort tm rsp. 1930 * We would need 1 for the abort tm rsp 1931 * and 1 credit for the aborted scsi op. 1932 * Thus we need to increment here. 1933 * Also we want to increment the credit 1934 * here because we want to make sure 1935 * cmd is actually released first 1936 * otherwise the client will think it 1937 * it can send a new cmd, and we could 1938 * find ourselves short of cmd elements. 1939 */ 1940 vscsi->credit += 1; 1941 } else { 1942 iue = cmd->iue; 1943 1944 crq->valid = VALID_CMD_RESP_EL; 1945 crq->format = cmd->rsp.format; 1946 1947 if (cmd->flags & CMD_FAST_FAIL) 1948 crq->status = VIOSRP_ADAPTER_FAIL; 1949 1950 crq->IU_length = cpu_to_be16(cmd->rsp.len); 1951 1952 rc = h_send_crq(vscsi->dma_dev->unit_address, 1953 be64_to_cpu(msg_hi), 1954 be64_to_cpu(cmd->rsp.tag)); 1955 1956 dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n", 1957 cmd, be64_to_cpu(cmd->rsp.tag), 1958 rc); 1959 1960 /* if all ok free up the command 1961 * element resources 1962 */ 1963 if (rc == H_SUCCESS) { 1964 /* some movement has occurred */ 1965 vscsi->rsp_q_timer.timer_pops = 0; 1966 list_del(&cmd->list); 1967 1968 ibmvscsis_free_cmd_resources(vscsi, 1969 cmd); 1970 } else { 1971 srp_snd_msg_failed(vscsi, rc); 1972 break; 1973 } 1974 } 1975 } 1976 } while (retry); 1977 1978 if (!rc) { 1979 /* 1980 * The timer could pop with the queue empty. If 1981 * this happens, rc will always indicate a 1982 * success; clear the pop count. 1983 */ 1984 vscsi->rsp_q_timer.timer_pops = 0; 1985 } 1986 } else { 1987 ibmvscsis_free_cmd_qs(vscsi); 1988 } 1989 } 1990 1991 /* Called with intr lock held */ 1992 static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi, 1993 struct ibmvscsis_cmd *cmd, 1994 struct viosrp_crq *crq) 1995 { 1996 struct iu_entry *iue = cmd->iue; 1997 struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad; 1998 uint flag_bits = 0; 1999 long rc; 2000 2001 dma_wmb(); 2002 rc = h_copy_rdma(sizeof(struct mad_common), 2003 vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma, 2004 vscsi->dds.window[REMOTE].liobn, 2005 be64_to_cpu(crq->IU_data_ptr)); 2006 if (!rc) { 2007 cmd->rsp.format = VIOSRP_MAD_FORMAT; 2008 cmd->rsp.len = sizeof(struct mad_common); 2009 cmd->rsp.tag = mad->tag; 2010 list_add_tail(&cmd->list, &vscsi->waiting_rsp); 2011 ibmvscsis_send_messages(vscsi); 2012 } else { 2013 dev_dbg(&vscsi->dev, "Error sending mad response, rc %ld\n", 2014 rc); 2015 if (rc == H_PERMISSION) { 2016 if (connection_broken(vscsi)) 2017 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); 2018 } 2019 dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n", 2020 rc); 2021 2022 ibmvscsis_free_cmd_resources(vscsi, cmd); 2023 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 2024 flag_bits); 2025 } 2026 } 2027 2028 /** 2029 * ibmvscsis_mad() - Service a MAnagement Data gram. 2030 * @vscsi: Pointer to our adapter structure 2031 * @crq: Pointer to the CRQ entry containing the MAD request 2032 * 2033 * EXECUTION ENVIRONMENT: 2034 * Interrupt, called with adapter lock held 2035 */ 2036 static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) 2037 { 2038 struct iu_entry *iue; 2039 struct ibmvscsis_cmd *cmd; 2040 struct mad_common *mad; 2041 long rc = ADAPT_SUCCESS; 2042 2043 switch (vscsi->state) { 2044 /* 2045 * We have not exchanged Init Msgs yet, so this MAD was sent 2046 * before the last Transport Event; client will not be 2047 * expecting a response. 2048 */ 2049 case WAIT_CONNECTION: 2050 dev_dbg(&vscsi->dev, "mad: in Wait Connection state, ignoring MAD, flags %d\n", 2051 vscsi->flags); 2052 return ADAPT_SUCCESS; 2053 2054 case SRP_PROCESSING: 2055 case CONNECTED: 2056 break; 2057 2058 /* 2059 * We should never get here while we're in these states. 2060 * Just log an error and get out. 2061 */ 2062 case UNCONFIGURING: 2063 case WAIT_IDLE: 2064 case ERR_DISCONNECT: 2065 case ERR_DISCONNECT_RECONNECT: 2066 default: 2067 dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n", 2068 vscsi->state); 2069 return ADAPT_SUCCESS; 2070 } 2071 2072 cmd = ibmvscsis_get_free_cmd(vscsi); 2073 if (!cmd) { 2074 dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n", 2075 vscsi->debit); 2076 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2077 return ERROR; 2078 } 2079 iue = cmd->iue; 2080 cmd->type = ADAPTER_MAD; 2081 2082 rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq); 2083 if (!rc) { 2084 mad = (struct mad_common *)&vio_iu(iue)->mad; 2085 2086 dev_dbg(&vscsi->dev, "mad: type %d\n", be32_to_cpu(mad->type)); 2087 2088 rc = ibmvscsis_process_mad(vscsi, iue); 2089 2090 dev_dbg(&vscsi->dev, "mad: status %hd, rc %ld\n", 2091 be16_to_cpu(mad->status), rc); 2092 2093 if (!rc) 2094 ibmvscsis_send_mad_resp(vscsi, cmd, crq); 2095 } else { 2096 ibmvscsis_free_cmd_resources(vscsi, cmd); 2097 } 2098 2099 dev_dbg(&vscsi->dev, "Leaving mad, rc %ld\n", rc); 2100 return rc; 2101 } 2102 2103 /** 2104 * ibmvscsis_login_rsp() - Create/copy a login response notice to the client 2105 * @vscsi: Pointer to our adapter structure 2106 * @cmd: Pointer to the command for the SRP Login request 2107 * 2108 * EXECUTION ENVIRONMENT: 2109 * Interrupt, interrupt lock held 2110 */ 2111 static long ibmvscsis_login_rsp(struct scsi_info *vscsi, 2112 struct ibmvscsis_cmd *cmd) 2113 { 2114 struct iu_entry *iue = cmd->iue; 2115 struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp; 2116 struct format_code *fmt; 2117 uint flag_bits = 0; 2118 long rc = ADAPT_SUCCESS; 2119 2120 memset(rsp, 0, sizeof(struct srp_login_rsp)); 2121 2122 rsp->opcode = SRP_LOGIN_RSP; 2123 rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit); 2124 rsp->tag = cmd->rsp.tag; 2125 rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); 2126 rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); 2127 fmt = (struct format_code *)&rsp->buf_fmt; 2128 fmt->buffers = SUPPORTED_FORMATS; 2129 vscsi->credit = 0; 2130 2131 cmd->rsp.len = sizeof(struct srp_login_rsp); 2132 2133 dma_wmb(); 2134 rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn, 2135 iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn, 2136 be64_to_cpu(iue->remote_token)); 2137 2138 switch (rc) { 2139 case H_SUCCESS: 2140 break; 2141 2142 case H_PERMISSION: 2143 if (connection_broken(vscsi)) 2144 flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; 2145 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n", 2146 rc); 2147 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 2148 flag_bits); 2149 break; 2150 case H_SOURCE_PARM: 2151 case H_DEST_PARM: 2152 default: 2153 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n", 2154 rc); 2155 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2156 break; 2157 } 2158 2159 return rc; 2160 } 2161 2162 /** 2163 * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client 2164 * @vscsi: Pointer to our adapter structure 2165 * @cmd: Pointer to the command for the SRP Login request 2166 * @reason: The reason the SRP Login is being rejected, per SRP protocol 2167 * 2168 * EXECUTION ENVIRONMENT: 2169 * Interrupt, interrupt lock held 2170 */ 2171 static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi, 2172 struct ibmvscsis_cmd *cmd, u32 reason) 2173 { 2174 struct iu_entry *iue = cmd->iue; 2175 struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej; 2176 struct format_code *fmt; 2177 uint flag_bits = 0; 2178 long rc = ADAPT_SUCCESS; 2179 2180 memset(rej, 0, sizeof(*rej)); 2181 2182 rej->opcode = SRP_LOGIN_REJ; 2183 rej->reason = cpu_to_be32(reason); 2184 rej->tag = cmd->rsp.tag; 2185 fmt = (struct format_code *)&rej->buf_fmt; 2186 fmt->buffers = SUPPORTED_FORMATS; 2187 2188 cmd->rsp.len = sizeof(*rej); 2189 2190 dma_wmb(); 2191 rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn, 2192 iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn, 2193 be64_to_cpu(iue->remote_token)); 2194 2195 switch (rc) { 2196 case H_SUCCESS: 2197 break; 2198 case H_PERMISSION: 2199 if (connection_broken(vscsi)) 2200 flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; 2201 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", 2202 rc); 2203 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 2204 flag_bits); 2205 break; 2206 case H_SOURCE_PARM: 2207 case H_DEST_PARM: 2208 default: 2209 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", 2210 rc); 2211 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2212 break; 2213 } 2214 2215 return rc; 2216 } 2217 2218 static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport) 2219 { 2220 char *name = tport->tport_name; 2221 struct ibmvscsis_nexus *nexus; 2222 struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport); 2223 int rc; 2224 2225 if (tport->ibmv_nexus) { 2226 dev_dbg(&vscsi->dev, "tport->ibmv_nexus already exists\n"); 2227 return 0; 2228 } 2229 2230 nexus = kzalloc(sizeof(*nexus), GFP_KERNEL); 2231 if (!nexus) { 2232 dev_err(&vscsi->dev, "Unable to allocate struct ibmvscsis_nexus\n"); 2233 return -ENOMEM; 2234 } 2235 2236 nexus->se_sess = target_setup_session(&tport->se_tpg, 0, 0, 2237 TARGET_PROT_NORMAL, name, nexus, 2238 NULL); 2239 if (IS_ERR(nexus->se_sess)) { 2240 rc = PTR_ERR(nexus->se_sess); 2241 goto transport_init_fail; 2242 } 2243 2244 tport->ibmv_nexus = nexus; 2245 2246 return 0; 2247 2248 transport_init_fail: 2249 kfree(nexus); 2250 return rc; 2251 } 2252 2253 static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport) 2254 { 2255 struct se_session *se_sess; 2256 struct ibmvscsis_nexus *nexus; 2257 2258 nexus = tport->ibmv_nexus; 2259 if (!nexus) 2260 return -ENODEV; 2261 2262 se_sess = nexus->se_sess; 2263 if (!se_sess) 2264 return -ENODEV; 2265 2266 /* 2267 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port 2268 */ 2269 target_remove_session(se_sess); 2270 tport->ibmv_nexus = NULL; 2271 kfree(nexus); 2272 2273 return 0; 2274 } 2275 2276 /** 2277 * ibmvscsis_srp_login() - Process an SRP Login Request 2278 * @vscsi: Pointer to our adapter structure 2279 * @cmd: Command element to use to process the SRP Login request 2280 * @crq: Pointer to CRQ entry containing the SRP Login request 2281 * 2282 * EXECUTION ENVIRONMENT: 2283 * Interrupt, called with interrupt lock held 2284 */ 2285 static long ibmvscsis_srp_login(struct scsi_info *vscsi, 2286 struct ibmvscsis_cmd *cmd, 2287 struct viosrp_crq *crq) 2288 { 2289 struct iu_entry *iue = cmd->iue; 2290 struct srp_login_req *req = &vio_iu(iue)->srp.login_req; 2291 struct port_id { 2292 __be64 id_extension; 2293 __be64 io_guid; 2294 } *iport, *tport; 2295 struct format_code *fmt; 2296 u32 reason = 0x0; 2297 long rc = ADAPT_SUCCESS; 2298 2299 iport = (struct port_id *)req->initiator_port_id; 2300 tport = (struct port_id *)req->target_port_id; 2301 fmt = (struct format_code *)&req->req_buf_fmt; 2302 if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN) 2303 reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE; 2304 else if (be32_to_cpu(req->req_it_iu_len) < 64) 2305 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL; 2306 else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) || 2307 (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1))) 2308 reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL; 2309 else if (req->req_flags & SRP_MULTICHAN_MULTI) 2310 reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED; 2311 else if (fmt->buffers & (~SUPPORTED_FORMATS)) 2312 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT; 2313 else if ((fmt->buffers & SUPPORTED_FORMATS) == 0) 2314 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT; 2315 2316 if (vscsi->state == SRP_PROCESSING) 2317 reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED; 2318 2319 rc = ibmvscsis_make_nexus(&vscsi->tport); 2320 if (rc) 2321 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL; 2322 2323 cmd->rsp.format = VIOSRP_SRP_FORMAT; 2324 cmd->rsp.tag = req->tag; 2325 2326 dev_dbg(&vscsi->dev, "srp_login: reason 0x%x\n", reason); 2327 2328 if (reason) 2329 rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason); 2330 else 2331 rc = ibmvscsis_login_rsp(vscsi, cmd); 2332 2333 if (!rc) { 2334 if (!reason) 2335 vscsi->state = SRP_PROCESSING; 2336 2337 list_add_tail(&cmd->list, &vscsi->waiting_rsp); 2338 ibmvscsis_send_messages(vscsi); 2339 } else { 2340 ibmvscsis_free_cmd_resources(vscsi, cmd); 2341 } 2342 2343 dev_dbg(&vscsi->dev, "Leaving srp_login, rc %ld\n", rc); 2344 return rc; 2345 } 2346 2347 /** 2348 * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus 2349 * @vscsi: Pointer to our adapter structure 2350 * @cmd: Command element to use to process the Implicit Logout request 2351 * @crq: Pointer to CRQ entry containing the Implicit Logout request 2352 * 2353 * Do the logic to close the I_T nexus. This function may not 2354 * behave to specification. 2355 * 2356 * EXECUTION ENVIRONMENT: 2357 * Interrupt, interrupt lock held 2358 */ 2359 static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi, 2360 struct ibmvscsis_cmd *cmd, 2361 struct viosrp_crq *crq) 2362 { 2363 struct iu_entry *iue = cmd->iue; 2364 struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout; 2365 long rc = ADAPT_SUCCESS; 2366 2367 if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) || 2368 !list_empty(&vscsi->waiting_rsp)) { 2369 dev_err(&vscsi->dev, "i_logout: outstanding work\n"); 2370 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); 2371 } else { 2372 cmd->rsp.format = SRP_FORMAT; 2373 cmd->rsp.tag = log_out->tag; 2374 cmd->rsp.len = sizeof(struct mad_common); 2375 list_add_tail(&cmd->list, &vscsi->waiting_rsp); 2376 ibmvscsis_send_messages(vscsi); 2377 2378 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0); 2379 } 2380 2381 return rc; 2382 } 2383 2384 /* Called with intr lock held */ 2385 static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq) 2386 { 2387 struct ibmvscsis_cmd *cmd; 2388 struct iu_entry *iue; 2389 struct srp_cmd *srp; 2390 struct srp_tsk_mgmt *tsk; 2391 long rc; 2392 2393 if (vscsi->request_limit - vscsi->debit <= 0) { 2394 /* Client has exceeded request limit */ 2395 dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n", 2396 vscsi->request_limit, vscsi->debit); 2397 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2398 return; 2399 } 2400 2401 cmd = ibmvscsis_get_free_cmd(vscsi); 2402 if (!cmd) { 2403 dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n", 2404 vscsi->debit); 2405 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2406 return; 2407 } 2408 iue = cmd->iue; 2409 srp = &vio_iu(iue)->srp.cmd; 2410 2411 rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq); 2412 if (rc) { 2413 ibmvscsis_free_cmd_resources(vscsi, cmd); 2414 return; 2415 } 2416 2417 if (vscsi->state == SRP_PROCESSING) { 2418 switch (srp->opcode) { 2419 case SRP_LOGIN_REQ: 2420 rc = ibmvscsis_srp_login(vscsi, cmd, crq); 2421 break; 2422 2423 case SRP_TSK_MGMT: 2424 tsk = &vio_iu(iue)->srp.tsk_mgmt; 2425 dev_dbg(&vscsi->dev, "tsk_mgmt tag: %llu (0x%llx)\n", 2426 tsk->tag, tsk->tag); 2427 cmd->rsp.tag = tsk->tag; 2428 vscsi->debit += 1; 2429 cmd->type = TASK_MANAGEMENT; 2430 list_add_tail(&cmd->list, &vscsi->schedule_q); 2431 queue_work(vscsi->work_q, &cmd->work); 2432 break; 2433 2434 case SRP_CMD: 2435 dev_dbg(&vscsi->dev, "srp_cmd tag: %llu (0x%llx)\n", 2436 srp->tag, srp->tag); 2437 cmd->rsp.tag = srp->tag; 2438 vscsi->debit += 1; 2439 cmd->type = SCSI_CDB; 2440 /* 2441 * We want to keep track of work waiting for 2442 * the workqueue. 2443 */ 2444 list_add_tail(&cmd->list, &vscsi->schedule_q); 2445 queue_work(vscsi->work_q, &cmd->work); 2446 break; 2447 2448 case SRP_I_LOGOUT: 2449 rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq); 2450 break; 2451 2452 case SRP_CRED_RSP: 2453 case SRP_AER_RSP: 2454 default: 2455 ibmvscsis_free_cmd_resources(vscsi, cmd); 2456 dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n", 2457 (uint)srp->opcode); 2458 ibmvscsis_post_disconnect(vscsi, 2459 ERR_DISCONNECT_RECONNECT, 0); 2460 break; 2461 } 2462 } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) { 2463 rc = ibmvscsis_srp_login(vscsi, cmd, crq); 2464 } else { 2465 ibmvscsis_free_cmd_resources(vscsi, cmd); 2466 dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n", 2467 vscsi->state); 2468 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2469 } 2470 } 2471 2472 /** 2473 * ibmvscsis_ping_response() - Respond to a ping request 2474 * @vscsi: Pointer to our adapter structure 2475 * 2476 * Let the client know that the server is alive and waiting on 2477 * its native I/O stack. 2478 * If any type of error occurs from the call to queue a ping 2479 * response then the client is either not accepting or receiving 2480 * interrupts. Disconnect with an error. 2481 * 2482 * EXECUTION ENVIRONMENT: 2483 * Interrupt, interrupt lock held 2484 */ 2485 static long ibmvscsis_ping_response(struct scsi_info *vscsi) 2486 { 2487 struct viosrp_crq *crq; 2488 u64 buffer[2] = { 0, 0 }; 2489 long rc; 2490 2491 crq = (struct viosrp_crq *)&buffer; 2492 crq->valid = VALID_CMD_RESP_EL; 2493 crq->format = (u8)MESSAGE_IN_CRQ; 2494 crq->status = PING_RESPONSE; 2495 2496 rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), 2497 cpu_to_be64(buffer[MSG_LOW])); 2498 2499 switch (rc) { 2500 case H_SUCCESS: 2501 break; 2502 case H_CLOSED: 2503 vscsi->flags |= CLIENT_FAILED; 2504 case H_DROPPED: 2505 vscsi->flags |= RESPONSE_Q_DOWN; 2506 case H_REMOTE_PARM: 2507 dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", 2508 rc); 2509 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2510 break; 2511 default: 2512 dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n", 2513 rc); 2514 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); 2515 break; 2516 } 2517 2518 return rc; 2519 } 2520 2521 /** 2522 * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue. 2523 * @vscsi: Pointer to our adapter structure 2524 * @crq: Pointer to CRQ element containing the SRP request 2525 * 2526 * This function will return success if the command queue element is valid 2527 * and the srp iu or MAD request it pointed to was also valid. That does 2528 * not mean that an error was not returned to the client. 2529 * 2530 * EXECUTION ENVIRONMENT: 2531 * Interrupt, intr lock held 2532 */ 2533 static long ibmvscsis_parse_command(struct scsi_info *vscsi, 2534 struct viosrp_crq *crq) 2535 { 2536 long rc = ADAPT_SUCCESS; 2537 2538 switch (crq->valid) { 2539 case VALID_CMD_RESP_EL: 2540 switch (crq->format) { 2541 case OS400_FORMAT: 2542 case AIX_FORMAT: 2543 case LINUX_FORMAT: 2544 case MAD_FORMAT: 2545 if (vscsi->flags & PROCESSING_MAD) { 2546 rc = ERROR; 2547 dev_err(&vscsi->dev, "parse_command: already processing mad\n"); 2548 ibmvscsis_post_disconnect(vscsi, 2549 ERR_DISCONNECT_RECONNECT, 2550 0); 2551 } else { 2552 vscsi->flags |= PROCESSING_MAD; 2553 rc = ibmvscsis_mad(vscsi, crq); 2554 } 2555 break; 2556 2557 case SRP_FORMAT: 2558 ibmvscsis_srp_cmd(vscsi, crq); 2559 break; 2560 2561 case MESSAGE_IN_CRQ: 2562 if (crq->status == PING) 2563 ibmvscsis_ping_response(vscsi); 2564 break; 2565 2566 default: 2567 dev_err(&vscsi->dev, "parse_command: invalid format %d\n", 2568 (uint)crq->format); 2569 ibmvscsis_post_disconnect(vscsi, 2570 ERR_DISCONNECT_RECONNECT, 0); 2571 break; 2572 } 2573 break; 2574 2575 case VALID_TRANS_EVENT: 2576 rc = ibmvscsis_trans_event(vscsi, crq); 2577 break; 2578 2579 case VALID_INIT_MSG: 2580 rc = ibmvscsis_init_msg(vscsi, crq); 2581 break; 2582 2583 default: 2584 dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n", 2585 (uint)crq->valid); 2586 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2587 break; 2588 } 2589 2590 /* 2591 * Return only what the interrupt handler cares 2592 * about. Most errors we keep right on trucking. 2593 */ 2594 rc = vscsi->flags & SCHEDULE_DISCONNECT; 2595 2596 return rc; 2597 } 2598 2599 static int read_dma_window(struct scsi_info *vscsi) 2600 { 2601 struct vio_dev *vdev = vscsi->dma_dev; 2602 const __be32 *dma_window; 2603 const __be32 *prop; 2604 2605 /* TODO Using of_parse_dma_window would be better, but it doesn't give 2606 * a way to read multiple windows without already knowing the size of 2607 * a window or the number of windows. 2608 */ 2609 dma_window = (const __be32 *)vio_get_attribute(vdev, 2610 "ibm,my-dma-window", 2611 NULL); 2612 if (!dma_window) { 2613 dev_err(&vscsi->dev, "Couldn't find ibm,my-dma-window property\n"); 2614 return -1; 2615 } 2616 2617 vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window); 2618 dma_window++; 2619 2620 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells", 2621 NULL); 2622 if (!prop) { 2623 dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-address-cells property\n"); 2624 dma_window++; 2625 } else { 2626 dma_window += be32_to_cpu(*prop); 2627 } 2628 2629 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells", 2630 NULL); 2631 if (!prop) { 2632 dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-size-cells property\n"); 2633 dma_window++; 2634 } else { 2635 dma_window += be32_to_cpu(*prop); 2636 } 2637 2638 /* dma_window should point to the second window now */ 2639 vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window); 2640 2641 return 0; 2642 } 2643 2644 static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name) 2645 { 2646 struct ibmvscsis_tport *tport = NULL; 2647 struct vio_dev *vdev; 2648 struct scsi_info *vscsi; 2649 2650 spin_lock_bh(&ibmvscsis_dev_lock); 2651 list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) { 2652 vdev = vscsi->dma_dev; 2653 if (!strcmp(dev_name(&vdev->dev), name)) { 2654 tport = &vscsi->tport; 2655 break; 2656 } 2657 } 2658 spin_unlock_bh(&ibmvscsis_dev_lock); 2659 2660 return tport; 2661 } 2662 2663 /** 2664 * ibmvscsis_parse_cmd() - Parse SRP Command 2665 * @vscsi: Pointer to our adapter structure 2666 * @cmd: Pointer to command element with SRP command 2667 * 2668 * Parse the srp command; if it is valid then submit it to tcm. 2669 * Note: The return code does not reflect the status of the SCSI CDB. 2670 * 2671 * EXECUTION ENVIRONMENT: 2672 * Process level 2673 */ 2674 static void ibmvscsis_parse_cmd(struct scsi_info *vscsi, 2675 struct ibmvscsis_cmd *cmd) 2676 { 2677 struct iu_entry *iue = cmd->iue; 2678 struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf; 2679 struct ibmvscsis_nexus *nexus; 2680 u64 data_len = 0; 2681 enum dma_data_direction dir; 2682 int attr = 0; 2683 int rc = 0; 2684 2685 nexus = vscsi->tport.ibmv_nexus; 2686 /* 2687 * additional length in bytes. Note that the SRP spec says that 2688 * additional length is in 4-byte words, but technically the 2689 * additional length field is only the upper 6 bits of the byte. 2690 * The lower 2 bits are reserved. If the lower 2 bits are 0 (as 2691 * all reserved fields should be), then interpreting the byte as 2692 * an int will yield the length in bytes. 2693 */ 2694 if (srp->add_cdb_len & 0x03) { 2695 dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n"); 2696 spin_lock_bh(&vscsi->intr_lock); 2697 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2698 ibmvscsis_free_cmd_resources(vscsi, cmd); 2699 spin_unlock_bh(&vscsi->intr_lock); 2700 return; 2701 } 2702 2703 if (srp_get_desc_table(srp, &dir, &data_len)) { 2704 dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n", 2705 srp->tag); 2706 goto fail; 2707 } 2708 2709 cmd->rsp.sol_not = srp->sol_not; 2710 2711 switch (srp->task_attr) { 2712 case SRP_SIMPLE_TASK: 2713 attr = TCM_SIMPLE_TAG; 2714 break; 2715 case SRP_ORDERED_TASK: 2716 attr = TCM_ORDERED_TAG; 2717 break; 2718 case SRP_HEAD_TASK: 2719 attr = TCM_HEAD_TAG; 2720 break; 2721 case SRP_ACA_TASK: 2722 attr = TCM_ACA_TAG; 2723 break; 2724 default: 2725 dev_err(&vscsi->dev, "Invalid task attribute %d\n", 2726 srp->task_attr); 2727 goto fail; 2728 } 2729 2730 cmd->se_cmd.tag = be64_to_cpu(srp->tag); 2731 2732 spin_lock_bh(&vscsi->intr_lock); 2733 list_add_tail(&cmd->list, &vscsi->active_q); 2734 spin_unlock_bh(&vscsi->intr_lock); 2735 2736 srp->lun.scsi_lun[0] &= 0x3f; 2737 2738 rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb, 2739 cmd->sense_buf, scsilun_to_int(&srp->lun), 2740 data_len, attr, dir, 0); 2741 if (rc) { 2742 dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc); 2743 spin_lock_bh(&vscsi->intr_lock); 2744 list_del(&cmd->list); 2745 ibmvscsis_free_cmd_resources(vscsi, cmd); 2746 spin_unlock_bh(&vscsi->intr_lock); 2747 goto fail; 2748 } 2749 return; 2750 2751 fail: 2752 spin_lock_bh(&vscsi->intr_lock); 2753 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2754 spin_unlock_bh(&vscsi->intr_lock); 2755 } 2756 2757 /** 2758 * ibmvscsis_parse_task() - Parse SRP Task Management Request 2759 * @vscsi: Pointer to our adapter structure 2760 * @cmd: Pointer to command element with SRP task management request 2761 * 2762 * Parse the srp task management request; if it is valid then submit it to tcm. 2763 * Note: The return code does not reflect the status of the task management 2764 * request. 2765 * 2766 * EXECUTION ENVIRONMENT: 2767 * Processor level 2768 */ 2769 static void ibmvscsis_parse_task(struct scsi_info *vscsi, 2770 struct ibmvscsis_cmd *cmd) 2771 { 2772 struct iu_entry *iue = cmd->iue; 2773 struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt; 2774 int tcm_type; 2775 u64 tag_to_abort = 0; 2776 int rc = 0; 2777 struct ibmvscsis_nexus *nexus; 2778 2779 nexus = vscsi->tport.ibmv_nexus; 2780 2781 cmd->rsp.sol_not = srp_tsk->sol_not; 2782 2783 switch (srp_tsk->tsk_mgmt_func) { 2784 case SRP_TSK_ABORT_TASK: 2785 tcm_type = TMR_ABORT_TASK; 2786 tag_to_abort = be64_to_cpu(srp_tsk->task_tag); 2787 break; 2788 case SRP_TSK_ABORT_TASK_SET: 2789 tcm_type = TMR_ABORT_TASK_SET; 2790 break; 2791 case SRP_TSK_CLEAR_TASK_SET: 2792 tcm_type = TMR_CLEAR_TASK_SET; 2793 break; 2794 case SRP_TSK_LUN_RESET: 2795 tcm_type = TMR_LUN_RESET; 2796 break; 2797 case SRP_TSK_CLEAR_ACA: 2798 tcm_type = TMR_CLEAR_ACA; 2799 break; 2800 default: 2801 dev_err(&vscsi->dev, "unknown task mgmt func %d\n", 2802 srp_tsk->tsk_mgmt_func); 2803 cmd->se_cmd.se_tmr_req->response = 2804 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 2805 rc = -1; 2806 break; 2807 } 2808 2809 if (!rc) { 2810 cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag); 2811 2812 spin_lock_bh(&vscsi->intr_lock); 2813 list_add_tail(&cmd->list, &vscsi->active_q); 2814 spin_unlock_bh(&vscsi->intr_lock); 2815 2816 srp_tsk->lun.scsi_lun[0] &= 0x3f; 2817 2818 dev_dbg(&vscsi->dev, "calling submit_tmr, func %d\n", 2819 srp_tsk->tsk_mgmt_func); 2820 rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL, 2821 scsilun_to_int(&srp_tsk->lun), srp_tsk, 2822 tcm_type, GFP_KERNEL, tag_to_abort, 0); 2823 if (rc) { 2824 dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n", 2825 rc); 2826 spin_lock_bh(&vscsi->intr_lock); 2827 list_del(&cmd->list); 2828 spin_unlock_bh(&vscsi->intr_lock); 2829 cmd->se_cmd.se_tmr_req->response = 2830 TMR_FUNCTION_REJECTED; 2831 } 2832 } 2833 2834 if (rc) 2835 transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0); 2836 } 2837 2838 static void ibmvscsis_scheduler(struct work_struct *work) 2839 { 2840 struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd, 2841 work); 2842 struct scsi_info *vscsi = cmd->adapter; 2843 2844 spin_lock_bh(&vscsi->intr_lock); 2845 2846 /* Remove from schedule_q */ 2847 list_del(&cmd->list); 2848 2849 /* Don't submit cmd if we're disconnecting */ 2850 if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) { 2851 ibmvscsis_free_cmd_resources(vscsi, cmd); 2852 2853 /* ibmvscsis_disconnect might be waiting for us */ 2854 if (list_empty(&vscsi->active_q) && 2855 list_empty(&vscsi->schedule_q) && 2856 (vscsi->flags & WAIT_FOR_IDLE)) { 2857 vscsi->flags &= ~WAIT_FOR_IDLE; 2858 complete(&vscsi->wait_idle); 2859 } 2860 2861 spin_unlock_bh(&vscsi->intr_lock); 2862 return; 2863 } 2864 2865 spin_unlock_bh(&vscsi->intr_lock); 2866 2867 switch (cmd->type) { 2868 case SCSI_CDB: 2869 ibmvscsis_parse_cmd(vscsi, cmd); 2870 break; 2871 case TASK_MANAGEMENT: 2872 ibmvscsis_parse_task(vscsi, cmd); 2873 break; 2874 default: 2875 dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n", 2876 cmd->type); 2877 spin_lock_bh(&vscsi->intr_lock); 2878 ibmvscsis_free_cmd_resources(vscsi, cmd); 2879 spin_unlock_bh(&vscsi->intr_lock); 2880 break; 2881 } 2882 } 2883 2884 static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num) 2885 { 2886 struct ibmvscsis_cmd *cmd; 2887 int i; 2888 2889 INIT_LIST_HEAD(&vscsi->free_cmd); 2890 vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd), 2891 GFP_KERNEL); 2892 if (!vscsi->cmd_pool) 2893 return -ENOMEM; 2894 2895 for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num; 2896 i++, cmd++) { 2897 cmd->abort_cmd = NULL; 2898 cmd->adapter = vscsi; 2899 INIT_WORK(&cmd->work, ibmvscsis_scheduler); 2900 list_add_tail(&cmd->list, &vscsi->free_cmd); 2901 } 2902 2903 return 0; 2904 } 2905 2906 static void ibmvscsis_free_cmds(struct scsi_info *vscsi) 2907 { 2908 kfree(vscsi->cmd_pool); 2909 vscsi->cmd_pool = NULL; 2910 INIT_LIST_HEAD(&vscsi->free_cmd); 2911 } 2912 2913 /** 2914 * ibmvscsis_service_wait_q() - Service Waiting Queue 2915 * @timer: Pointer to timer which has expired 2916 * 2917 * This routine is called when the timer pops to service the waiting 2918 * queue. Elements on the queue have completed, their responses have been 2919 * copied to the client, but the client's response queue was full so 2920 * the queue message could not be sent. The routine grabs the proper locks 2921 * and calls send messages. 2922 * 2923 * EXECUTION ENVIRONMENT: 2924 * called at interrupt level 2925 */ 2926 static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer) 2927 { 2928 struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer); 2929 struct scsi_info *vscsi = container_of(p_timer, struct scsi_info, 2930 rsp_q_timer); 2931 2932 spin_lock_bh(&vscsi->intr_lock); 2933 p_timer->timer_pops += 1; 2934 p_timer->started = false; 2935 ibmvscsis_send_messages(vscsi); 2936 spin_unlock_bh(&vscsi->intr_lock); 2937 2938 return HRTIMER_NORESTART; 2939 } 2940 2941 static long ibmvscsis_alloctimer(struct scsi_info *vscsi) 2942 { 2943 struct timer_cb *p_timer; 2944 2945 p_timer = &vscsi->rsp_q_timer; 2946 hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2947 2948 p_timer->timer.function = ibmvscsis_service_wait_q; 2949 p_timer->started = false; 2950 p_timer->timer_pops = 0; 2951 2952 return ADAPT_SUCCESS; 2953 } 2954 2955 static void ibmvscsis_freetimer(struct scsi_info *vscsi) 2956 { 2957 struct timer_cb *p_timer; 2958 2959 p_timer = &vscsi->rsp_q_timer; 2960 2961 (void)hrtimer_cancel(&p_timer->timer); 2962 2963 p_timer->started = false; 2964 p_timer->timer_pops = 0; 2965 } 2966 2967 static irqreturn_t ibmvscsis_interrupt(int dummy, void *data) 2968 { 2969 struct scsi_info *vscsi = data; 2970 2971 vio_disable_interrupts(vscsi->dma_dev); 2972 tasklet_schedule(&vscsi->work_task); 2973 2974 return IRQ_HANDLED; 2975 } 2976 2977 /** 2978 * ibmvscsis_enable_change_state() - Set new state based on enabled status 2979 * @vscsi: Pointer to our adapter structure 2980 * 2981 * This function determines our new state now that we are enabled. This 2982 * may involve sending an Init Complete message to the client. 2983 * 2984 * Must be called with interrupt lock held. 2985 */ 2986 static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) 2987 { 2988 int bytes; 2989 long rc = ADAPT_SUCCESS; 2990 2991 bytes = vscsi->cmd_q.size * PAGE_SIZE; 2992 rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes); 2993 if (rc == H_CLOSED || rc == H_SUCCESS) { 2994 vscsi->state = WAIT_CONNECTION; 2995 rc = ibmvscsis_establish_new_q(vscsi); 2996 } 2997 2998 if (rc != ADAPT_SUCCESS) { 2999 vscsi->state = ERR_DISCONNECTED; 3000 vscsi->flags |= RESPONSE_Q_DOWN; 3001 } 3002 3003 return rc; 3004 } 3005 3006 /** 3007 * ibmvscsis_create_command_q() - Create Command Queue 3008 * @vscsi: Pointer to our adapter structure 3009 * @num_cmds: Currently unused. In the future, may be used to determine 3010 * the size of the CRQ. 3011 * 3012 * Allocates memory for command queue maps remote memory into an ioba 3013 * initializes the command response queue 3014 * 3015 * EXECUTION ENVIRONMENT: 3016 * Process level only 3017 */ 3018 static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) 3019 { 3020 int pages; 3021 struct vio_dev *vdev = vscsi->dma_dev; 3022 3023 /* We might support multiple pages in the future, but just 1 for now */ 3024 pages = 1; 3025 3026 vscsi->cmd_q.size = pages; 3027 3028 vscsi->cmd_q.base_addr = 3029 (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); 3030 if (!vscsi->cmd_q.base_addr) 3031 return -ENOMEM; 3032 3033 vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1; 3034 3035 vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev, 3036 vscsi->cmd_q.base_addr, 3037 PAGE_SIZE, DMA_BIDIRECTIONAL); 3038 if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) { 3039 free_page((unsigned long)vscsi->cmd_q.base_addr); 3040 return -ENOMEM; 3041 } 3042 3043 return 0; 3044 } 3045 3046 /** 3047 * ibmvscsis_destroy_command_q - Destroy Command Queue 3048 * @vscsi: Pointer to our adapter structure 3049 * 3050 * Releases memory for command queue and unmaps mapped remote memory. 3051 * 3052 * EXECUTION ENVIRONMENT: 3053 * Process level only 3054 */ 3055 static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi) 3056 { 3057 dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token, 3058 PAGE_SIZE, DMA_BIDIRECTIONAL); 3059 free_page((unsigned long)vscsi->cmd_q.base_addr); 3060 vscsi->cmd_q.base_addr = NULL; 3061 vscsi->state = NO_QUEUE; 3062 } 3063 3064 static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi, 3065 struct ibmvscsis_cmd *cmd) 3066 { 3067 struct iu_entry *iue = cmd->iue; 3068 struct se_cmd *se_cmd = &cmd->se_cmd; 3069 struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf; 3070 struct scsi_sense_hdr sshdr; 3071 u8 rc = se_cmd->scsi_status; 3072 3073 if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb))) 3074 if (scsi_normalize_sense(se_cmd->sense_buffer, 3075 se_cmd->scsi_sense_length, &sshdr)) 3076 if (sshdr.sense_key == HARDWARE_ERROR && 3077 (se_cmd->residual_count == 0 || 3078 se_cmd->residual_count == se_cmd->data_length)) { 3079 rc = NO_SENSE; 3080 cmd->flags |= CMD_FAST_FAIL; 3081 } 3082 3083 return rc; 3084 } 3085 3086 /** 3087 * srp_build_response() - Build an SRP response buffer 3088 * @vscsi: Pointer to our adapter structure 3089 * @cmd: Pointer to command for which to send the response 3090 * @len_p: Where to return the length of the IU response sent. This 3091 * is needed to construct the CRQ response. 3092 * 3093 * Build the SRP response buffer and copy it to the client's memory space. 3094 */ 3095 static long srp_build_response(struct scsi_info *vscsi, 3096 struct ibmvscsis_cmd *cmd, uint *len_p) 3097 { 3098 struct iu_entry *iue = cmd->iue; 3099 struct se_cmd *se_cmd = &cmd->se_cmd; 3100 struct srp_rsp *rsp; 3101 uint len; 3102 u32 rsp_code; 3103 char *data; 3104 u32 *tsk_status; 3105 long rc = ADAPT_SUCCESS; 3106 3107 spin_lock_bh(&vscsi->intr_lock); 3108 3109 rsp = &vio_iu(iue)->srp.rsp; 3110 len = sizeof(*rsp); 3111 memset(rsp, 0, len); 3112 data = rsp->data; 3113 3114 rsp->opcode = SRP_RSP; 3115 3116 rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); 3117 rsp->tag = cmd->rsp.tag; 3118 rsp->flags = 0; 3119 3120 if (cmd->type == SCSI_CDB) { 3121 rsp->status = ibmvscsis_fast_fail(vscsi, cmd); 3122 if (rsp->status) { 3123 dev_dbg(&vscsi->dev, "build_resp: cmd %p, scsi status %d\n", 3124 cmd, (int)rsp->status); 3125 ibmvscsis_determine_resid(se_cmd, rsp); 3126 if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) { 3127 rsp->sense_data_len = 3128 cpu_to_be32(se_cmd->scsi_sense_length); 3129 rsp->flags |= SRP_RSP_FLAG_SNSVALID; 3130 len += se_cmd->scsi_sense_length; 3131 memcpy(data, se_cmd->sense_buffer, 3132 se_cmd->scsi_sense_length); 3133 } 3134 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 3135 UCSOLNT_RESP_SHIFT; 3136 } else if (cmd->flags & CMD_FAST_FAIL) { 3137 dev_dbg(&vscsi->dev, "build_resp: cmd %p, fast fail\n", 3138 cmd); 3139 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 3140 UCSOLNT_RESP_SHIFT; 3141 } else { 3142 rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >> 3143 SCSOLNT_RESP_SHIFT; 3144 } 3145 } else { 3146 /* this is task management */ 3147 rsp->status = 0; 3148 rsp->resp_data_len = cpu_to_be32(4); 3149 rsp->flags |= SRP_RSP_FLAG_RSPVALID; 3150 3151 switch (se_cmd->se_tmr_req->response) { 3152 case TMR_FUNCTION_COMPLETE: 3153 case TMR_TASK_DOES_NOT_EXIST: 3154 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE; 3155 rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >> 3156 SCSOLNT_RESP_SHIFT; 3157 break; 3158 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: 3159 case TMR_LUN_DOES_NOT_EXIST: 3160 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED; 3161 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 3162 UCSOLNT_RESP_SHIFT; 3163 break; 3164 case TMR_FUNCTION_FAILED: 3165 case TMR_FUNCTION_REJECTED: 3166 default: 3167 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED; 3168 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 3169 UCSOLNT_RESP_SHIFT; 3170 break; 3171 } 3172 3173 tsk_status = (u32 *)data; 3174 *tsk_status = cpu_to_be32(rsp_code); 3175 data = (char *)(tsk_status + 1); 3176 len += 4; 3177 } 3178 3179 dma_wmb(); 3180 rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma, 3181 vscsi->dds.window[REMOTE].liobn, 3182 be64_to_cpu(iue->remote_token)); 3183 3184 switch (rc) { 3185 case H_SUCCESS: 3186 vscsi->credit = 0; 3187 *len_p = len; 3188 break; 3189 case H_PERMISSION: 3190 if (connection_broken(vscsi)) 3191 vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED; 3192 3193 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n", 3194 rc, vscsi->flags, vscsi->state); 3195 break; 3196 case H_SOURCE_PARM: 3197 case H_DEST_PARM: 3198 default: 3199 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n", 3200 rc); 3201 break; 3202 } 3203 3204 spin_unlock_bh(&vscsi->intr_lock); 3205 3206 return rc; 3207 } 3208 3209 static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg, 3210 int nsg, struct srp_direct_buf *md, int nmd, 3211 enum dma_data_direction dir, unsigned int bytes) 3212 { 3213 struct iu_entry *iue = cmd->iue; 3214 struct srp_target *target = iue->target; 3215 struct scsi_info *vscsi = target->ldata; 3216 struct scatterlist *sgp; 3217 dma_addr_t client_ioba, server_ioba; 3218 ulong buf_len; 3219 ulong client_len, server_len; 3220 int md_idx; 3221 long tx_len; 3222 long rc = 0; 3223 3224 if (bytes == 0) 3225 return 0; 3226 3227 sgp = sg; 3228 client_len = 0; 3229 server_len = 0; 3230 md_idx = 0; 3231 tx_len = bytes; 3232 3233 do { 3234 if (client_len == 0) { 3235 if (md_idx >= nmd) { 3236 dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n"); 3237 rc = -EIO; 3238 break; 3239 } 3240 client_ioba = be64_to_cpu(md[md_idx].va); 3241 client_len = be32_to_cpu(md[md_idx].len); 3242 } 3243 if (server_len == 0) { 3244 if (!sgp) { 3245 dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n"); 3246 rc = -EIO; 3247 break; 3248 } 3249 server_ioba = sg_dma_address(sgp); 3250 server_len = sg_dma_len(sgp); 3251 } 3252 3253 buf_len = tx_len; 3254 3255 if (buf_len > client_len) 3256 buf_len = client_len; 3257 3258 if (buf_len > server_len) 3259 buf_len = server_len; 3260 3261 if (buf_len > max_vdma_size) 3262 buf_len = max_vdma_size; 3263 3264 if (dir == DMA_TO_DEVICE) { 3265 /* read from client */ 3266 rc = h_copy_rdma(buf_len, 3267 vscsi->dds.window[REMOTE].liobn, 3268 client_ioba, 3269 vscsi->dds.window[LOCAL].liobn, 3270 server_ioba); 3271 } else { 3272 /* The h_copy_rdma will cause phyp, running in another 3273 * partition, to read memory, so we need to make sure 3274 * the data has been written out, hence these syncs. 3275 */ 3276 /* ensure that everything is in memory */ 3277 isync(); 3278 /* ensure that memory has been made visible */ 3279 dma_wmb(); 3280 rc = h_copy_rdma(buf_len, 3281 vscsi->dds.window[LOCAL].liobn, 3282 server_ioba, 3283 vscsi->dds.window[REMOTE].liobn, 3284 client_ioba); 3285 } 3286 switch (rc) { 3287 case H_SUCCESS: 3288 break; 3289 case H_PERMISSION: 3290 case H_SOURCE_PARM: 3291 case H_DEST_PARM: 3292 if (connection_broken(vscsi)) { 3293 spin_lock_bh(&vscsi->intr_lock); 3294 vscsi->flags |= 3295 (RESPONSE_Q_DOWN | CLIENT_FAILED); 3296 spin_unlock_bh(&vscsi->intr_lock); 3297 } 3298 dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n", 3299 rc); 3300 break; 3301 3302 default: 3303 dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n", 3304 rc); 3305 break; 3306 } 3307 3308 if (!rc) { 3309 tx_len -= buf_len; 3310 if (tx_len) { 3311 client_len -= buf_len; 3312 if (client_len == 0) 3313 md_idx++; 3314 else 3315 client_ioba += buf_len; 3316 3317 server_len -= buf_len; 3318 if (server_len == 0) 3319 sgp = sg_next(sgp); 3320 else 3321 server_ioba += buf_len; 3322 } else { 3323 break; 3324 } 3325 } 3326 } while (!rc); 3327 3328 return rc; 3329 } 3330 3331 /** 3332 * ibmvscsis_handle_crq() - Handle CRQ 3333 * @data: Pointer to our adapter structure 3334 * 3335 * Read the command elements from the command queue and copy the payloads 3336 * associated with the command elements to local memory and execute the 3337 * SRP requests. 3338 * 3339 * Note: this is an edge triggered interrupt. It can not be shared. 3340 */ 3341 static void ibmvscsis_handle_crq(unsigned long data) 3342 { 3343 struct scsi_info *vscsi = (struct scsi_info *)data; 3344 struct viosrp_crq *crq; 3345 long rc; 3346 bool ack = true; 3347 volatile u8 valid; 3348 3349 spin_lock_bh(&vscsi->intr_lock); 3350 3351 dev_dbg(&vscsi->dev, "got interrupt\n"); 3352 3353 /* 3354 * if we are in a path where we are waiting for all pending commands 3355 * to complete because we received a transport event and anything in 3356 * the command queue is for a new connection, do nothing 3357 */ 3358 if (TARGET_STOP(vscsi)) { 3359 vio_enable_interrupts(vscsi->dma_dev); 3360 3361 dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n", 3362 vscsi->flags, vscsi->state); 3363 spin_unlock_bh(&vscsi->intr_lock); 3364 return; 3365 } 3366 3367 rc = vscsi->flags & SCHEDULE_DISCONNECT; 3368 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 3369 valid = crq->valid; 3370 dma_rmb(); 3371 3372 while (valid) { 3373 /* 3374 * These are edege triggered interrupts. After dropping out of 3375 * the while loop, the code must check for work since an 3376 * interrupt could be lost, and an elment be left on the queue, 3377 * hence the label. 3378 */ 3379 cmd_work: 3380 vscsi->cmd_q.index = 3381 (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask; 3382 3383 if (!rc) { 3384 rc = ibmvscsis_parse_command(vscsi, crq); 3385 } else { 3386 if ((uint)crq->valid == VALID_TRANS_EVENT) { 3387 /* 3388 * must service the transport layer events even 3389 * in an error state, dont break out until all 3390 * the consecutive transport events have been 3391 * processed 3392 */ 3393 rc = ibmvscsis_trans_event(vscsi, crq); 3394 } else if (vscsi->flags & TRANS_EVENT) { 3395 /* 3396 * if a transport event has occurred leave 3397 * everything but transport events on the queue 3398 * 3399 * need to decrement the queue index so we can 3400 * look at the element again 3401 */ 3402 if (vscsi->cmd_q.index) 3403 vscsi->cmd_q.index -= 1; 3404 else 3405 /* 3406 * index is at 0 it just wrapped. 3407 * have it index last element in q 3408 */ 3409 vscsi->cmd_q.index = vscsi->cmd_q.mask; 3410 break; 3411 } 3412 } 3413 3414 crq->valid = INVALIDATE_CMD_RESP_EL; 3415 3416 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 3417 valid = crq->valid; 3418 dma_rmb(); 3419 } 3420 3421 if (!rc) { 3422 if (ack) { 3423 vio_enable_interrupts(vscsi->dma_dev); 3424 ack = false; 3425 dev_dbg(&vscsi->dev, "handle_crq, reenabling interrupts\n"); 3426 } 3427 valid = crq->valid; 3428 dma_rmb(); 3429 if (valid) 3430 goto cmd_work; 3431 } else { 3432 dev_dbg(&vscsi->dev, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n", 3433 vscsi->flags, vscsi->state, vscsi->cmd_q.index); 3434 } 3435 3436 dev_dbg(&vscsi->dev, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n", 3437 (int)list_empty(&vscsi->schedule_q), vscsi->flags, 3438 vscsi->state); 3439 3440 spin_unlock_bh(&vscsi->intr_lock); 3441 } 3442 3443 static int ibmvscsis_probe(struct vio_dev *vdev, 3444 const struct vio_device_id *id) 3445 { 3446 struct scsi_info *vscsi; 3447 int rc = 0; 3448 long hrc = 0; 3449 char wq_name[24]; 3450 3451 vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL); 3452 if (!vscsi) { 3453 rc = -ENOMEM; 3454 dev_err(&vdev->dev, "probe: allocation of adapter failed\n"); 3455 return rc; 3456 } 3457 3458 vscsi->dma_dev = vdev; 3459 vscsi->dev = vdev->dev; 3460 INIT_LIST_HEAD(&vscsi->schedule_q); 3461 INIT_LIST_HEAD(&vscsi->waiting_rsp); 3462 INIT_LIST_HEAD(&vscsi->active_q); 3463 3464 snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s", 3465 dev_name(&vdev->dev)); 3466 3467 dev_dbg(&vscsi->dev, "probe tport_name: %s\n", vscsi->tport.tport_name); 3468 3469 rc = read_dma_window(vscsi); 3470 if (rc) 3471 goto free_adapter; 3472 dev_dbg(&vscsi->dev, "Probe: liobn 0x%x, riobn 0x%x\n", 3473 vscsi->dds.window[LOCAL].liobn, 3474 vscsi->dds.window[REMOTE].liobn); 3475 3476 snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name); 3477 3478 vscsi->dds.unit_id = vdev->unit_address; 3479 strscpy(vscsi->dds.partition_name, partition_name, 3480 sizeof(vscsi->dds.partition_name)); 3481 vscsi->dds.partition_num = partition_number; 3482 3483 spin_lock_bh(&ibmvscsis_dev_lock); 3484 list_add_tail(&vscsi->list, &ibmvscsis_dev_list); 3485 spin_unlock_bh(&ibmvscsis_dev_lock); 3486 3487 /* 3488 * TBD: How do we determine # of cmds to request? Do we know how 3489 * many "children" we have? 3490 */ 3491 vscsi->request_limit = INITIAL_SRP_LIMIT; 3492 rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit, 3493 SRP_MAX_IU_LEN); 3494 if (rc) 3495 goto rem_list; 3496 3497 vscsi->target.ldata = vscsi; 3498 3499 rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit); 3500 if (rc) { 3501 dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n", 3502 rc, vscsi->request_limit); 3503 goto free_target; 3504 } 3505 3506 /* 3507 * Note: the lock is used in freeing timers, so must initialize 3508 * first so that ordering in case of error is correct. 3509 */ 3510 spin_lock_init(&vscsi->intr_lock); 3511 3512 rc = ibmvscsis_alloctimer(vscsi); 3513 if (rc) { 3514 dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc); 3515 goto free_cmds; 3516 } 3517 3518 rc = ibmvscsis_create_command_q(vscsi, 256); 3519 if (rc) { 3520 dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n", 3521 rc); 3522 goto free_timer; 3523 } 3524 3525 vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 3526 if (!vscsi->map_buf) { 3527 rc = -ENOMEM; 3528 dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n"); 3529 goto destroy_queue; 3530 } 3531 3532 vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE, 3533 DMA_BIDIRECTIONAL); 3534 if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) { 3535 rc = -ENOMEM; 3536 dev_err(&vscsi->dev, "probe: error mapping command buffer\n"); 3537 goto free_buf; 3538 } 3539 3540 hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, 3541 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, 3542 0); 3543 if (hrc == H_SUCCESS) 3544 vscsi->client_data.partition_number = 3545 be64_to_cpu(*(u64 *)vscsi->map_buf); 3546 /* 3547 * We expect the VIOCTL to fail if we're configured as "any 3548 * client can connect" and the client isn't activated yet. 3549 * We'll make the call again when he sends an init msg. 3550 */ 3551 dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n", 3552 hrc, vscsi->client_data.partition_number); 3553 3554 tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq, 3555 (unsigned long)vscsi); 3556 3557 init_completion(&vscsi->wait_idle); 3558 init_completion(&vscsi->unconfig); 3559 3560 snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev)); 3561 vscsi->work_q = create_workqueue(wq_name); 3562 if (!vscsi->work_q) { 3563 rc = -ENOMEM; 3564 dev_err(&vscsi->dev, "create_workqueue failed\n"); 3565 goto unmap_buf; 3566 } 3567 3568 rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi); 3569 if (rc) { 3570 rc = -EPERM; 3571 dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc); 3572 goto destroy_WQ; 3573 } 3574 3575 vscsi->state = WAIT_ENABLED; 3576 3577 dev_set_drvdata(&vdev->dev, vscsi); 3578 3579 return 0; 3580 3581 destroy_WQ: 3582 destroy_workqueue(vscsi->work_q); 3583 unmap_buf: 3584 dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE, 3585 DMA_BIDIRECTIONAL); 3586 free_buf: 3587 kfree(vscsi->map_buf); 3588 destroy_queue: 3589 tasklet_kill(&vscsi->work_task); 3590 ibmvscsis_unregister_command_q(vscsi); 3591 ibmvscsis_destroy_command_q(vscsi); 3592 free_timer: 3593 ibmvscsis_freetimer(vscsi); 3594 free_cmds: 3595 ibmvscsis_free_cmds(vscsi); 3596 free_target: 3597 srp_target_free(&vscsi->target); 3598 rem_list: 3599 spin_lock_bh(&ibmvscsis_dev_lock); 3600 list_del(&vscsi->list); 3601 spin_unlock_bh(&ibmvscsis_dev_lock); 3602 free_adapter: 3603 kfree(vscsi); 3604 3605 return rc; 3606 } 3607 3608 static int ibmvscsis_remove(struct vio_dev *vdev) 3609 { 3610 struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev); 3611 3612 dev_dbg(&vscsi->dev, "remove (%s)\n", dev_name(&vscsi->dma_dev->dev)); 3613 3614 spin_lock_bh(&vscsi->intr_lock); 3615 ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0); 3616 vscsi->flags |= CFG_SLEEPING; 3617 spin_unlock_bh(&vscsi->intr_lock); 3618 wait_for_completion(&vscsi->unconfig); 3619 3620 vio_disable_interrupts(vdev); 3621 free_irq(vdev->irq, vscsi); 3622 destroy_workqueue(vscsi->work_q); 3623 dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE, 3624 DMA_BIDIRECTIONAL); 3625 kfree(vscsi->map_buf); 3626 tasklet_kill(&vscsi->work_task); 3627 ibmvscsis_destroy_command_q(vscsi); 3628 ibmvscsis_freetimer(vscsi); 3629 ibmvscsis_free_cmds(vscsi); 3630 srp_target_free(&vscsi->target); 3631 spin_lock_bh(&ibmvscsis_dev_lock); 3632 list_del(&vscsi->list); 3633 spin_unlock_bh(&ibmvscsis_dev_lock); 3634 kfree(vscsi); 3635 3636 return 0; 3637 } 3638 3639 static ssize_t system_id_show(struct device *dev, 3640 struct device_attribute *attr, char *buf) 3641 { 3642 return snprintf(buf, PAGE_SIZE, "%s\n", system_id); 3643 } 3644 3645 static ssize_t partition_number_show(struct device *dev, 3646 struct device_attribute *attr, char *buf) 3647 { 3648 return snprintf(buf, PAGE_SIZE, "%x\n", partition_number); 3649 } 3650 3651 static ssize_t unit_address_show(struct device *dev, 3652 struct device_attribute *attr, char *buf) 3653 { 3654 struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev); 3655 3656 return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address); 3657 } 3658 3659 static int ibmvscsis_get_system_info(void) 3660 { 3661 struct device_node *rootdn, *vdevdn; 3662 const char *id, *model, *name; 3663 const uint *num; 3664 3665 rootdn = of_find_node_by_path("/"); 3666 if (!rootdn) 3667 return -ENOENT; 3668 3669 model = of_get_property(rootdn, "model", NULL); 3670 id = of_get_property(rootdn, "system-id", NULL); 3671 if (model && id) 3672 snprintf(system_id, sizeof(system_id), "%s-%s", model, id); 3673 3674 name = of_get_property(rootdn, "ibm,partition-name", NULL); 3675 if (name) 3676 strncpy(partition_name, name, sizeof(partition_name)); 3677 3678 num = of_get_property(rootdn, "ibm,partition-no", NULL); 3679 if (num) 3680 partition_number = of_read_number(num, 1); 3681 3682 of_node_put(rootdn); 3683 3684 vdevdn = of_find_node_by_path("/vdevice"); 3685 if (vdevdn) { 3686 const uint *mvds; 3687 3688 mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size", 3689 NULL); 3690 if (mvds) 3691 max_vdma_size = *mvds; 3692 of_node_put(vdevdn); 3693 } 3694 3695 return 0; 3696 } 3697 3698 static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg) 3699 { 3700 struct ibmvscsis_tport *tport = 3701 container_of(se_tpg, struct ibmvscsis_tport, se_tpg); 3702 3703 return tport->tport_name; 3704 } 3705 3706 static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg) 3707 { 3708 struct ibmvscsis_tport *tport = 3709 container_of(se_tpg, struct ibmvscsis_tport, se_tpg); 3710 3711 return tport->tport_tpgt; 3712 } 3713 3714 static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg) 3715 { 3716 return 1; 3717 } 3718 3719 static int ibmvscsis_check_true(struct se_portal_group *se_tpg) 3720 { 3721 return 1; 3722 } 3723 3724 static int ibmvscsis_check_false(struct se_portal_group *se_tpg) 3725 { 3726 return 0; 3727 } 3728 3729 static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg) 3730 { 3731 return 1; 3732 } 3733 3734 static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd) 3735 { 3736 return target_put_sess_cmd(se_cmd); 3737 } 3738 3739 static void ibmvscsis_release_cmd(struct se_cmd *se_cmd) 3740 { 3741 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3742 se_cmd); 3743 struct scsi_info *vscsi = cmd->adapter; 3744 3745 spin_lock_bh(&vscsi->intr_lock); 3746 /* Remove from active_q */ 3747 list_move_tail(&cmd->list, &vscsi->waiting_rsp); 3748 ibmvscsis_send_messages(vscsi); 3749 spin_unlock_bh(&vscsi->intr_lock); 3750 } 3751 3752 static u32 ibmvscsis_sess_get_index(struct se_session *se_sess) 3753 { 3754 return 0; 3755 } 3756 3757 static int ibmvscsis_write_pending(struct se_cmd *se_cmd) 3758 { 3759 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3760 se_cmd); 3761 struct scsi_info *vscsi = cmd->adapter; 3762 struct iu_entry *iue = cmd->iue; 3763 int rc; 3764 3765 /* 3766 * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success 3767 * since LIO can't do anything about it, and we dont want to 3768 * attempt an srp_transfer_data. 3769 */ 3770 if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) { 3771 dev_err(&vscsi->dev, "write_pending failed since: %d\n", 3772 vscsi->flags); 3773 return -EIO; 3774 3775 } 3776 3777 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 3778 1, 1); 3779 if (rc) { 3780 dev_err(&vscsi->dev, "srp_transfer_data() failed: %d\n", rc); 3781 return -EIO; 3782 } 3783 /* 3784 * We now tell TCM to add this WRITE CDB directly into the TCM storage 3785 * object execution queue. 3786 */ 3787 target_execute_cmd(se_cmd); 3788 return 0; 3789 } 3790 3791 static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl) 3792 { 3793 } 3794 3795 static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd) 3796 { 3797 return 0; 3798 } 3799 3800 static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd) 3801 { 3802 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3803 se_cmd); 3804 struct iu_entry *iue = cmd->iue; 3805 struct scsi_info *vscsi = cmd->adapter; 3806 char *sd; 3807 uint len = 0; 3808 int rc; 3809 3810 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1, 3811 1); 3812 if (rc) { 3813 dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc); 3814 sd = se_cmd->sense_buffer; 3815 se_cmd->scsi_sense_length = 18; 3816 memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length); 3817 /* Logical Unit Communication Time-out asc/ascq = 0x0801 */ 3818 scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR, 3819 0x08, 0x01); 3820 } 3821 3822 srp_build_response(vscsi, cmd, &len); 3823 cmd->rsp.format = SRP_FORMAT; 3824 cmd->rsp.len = len; 3825 3826 return 0; 3827 } 3828 3829 static int ibmvscsis_queue_status(struct se_cmd *se_cmd) 3830 { 3831 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3832 se_cmd); 3833 struct scsi_info *vscsi = cmd->adapter; 3834 uint len; 3835 3836 dev_dbg(&vscsi->dev, "queue_status %p\n", se_cmd); 3837 3838 srp_build_response(vscsi, cmd, &len); 3839 cmd->rsp.format = SRP_FORMAT; 3840 cmd->rsp.len = len; 3841 3842 return 0; 3843 } 3844 3845 static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd) 3846 { 3847 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3848 se_cmd); 3849 struct scsi_info *vscsi = cmd->adapter; 3850 struct ibmvscsis_cmd *cmd_itr; 3851 struct iu_entry *iue = iue = cmd->iue; 3852 struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt; 3853 u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag); 3854 uint len; 3855 3856 dev_dbg(&vscsi->dev, "queue_tm_rsp %p, status %d\n", 3857 se_cmd, (int)se_cmd->se_tmr_req->response); 3858 3859 if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK && 3860 cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) { 3861 spin_lock_bh(&vscsi->intr_lock); 3862 list_for_each_entry(cmd_itr, &vscsi->active_q, list) { 3863 if (tag_to_abort == cmd_itr->se_cmd.tag) { 3864 cmd_itr->abort_cmd = cmd; 3865 cmd->flags |= DELAY_SEND; 3866 break; 3867 } 3868 } 3869 spin_unlock_bh(&vscsi->intr_lock); 3870 } 3871 3872 srp_build_response(vscsi, cmd, &len); 3873 cmd->rsp.format = SRP_FORMAT; 3874 cmd->rsp.len = len; 3875 } 3876 3877 static void ibmvscsis_aborted_task(struct se_cmd *se_cmd) 3878 { 3879 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3880 se_cmd); 3881 struct scsi_info *vscsi = cmd->adapter; 3882 3883 dev_dbg(&vscsi->dev, "ibmvscsis_aborted_task %p task_tag: %llu\n", 3884 se_cmd, se_cmd->tag); 3885 } 3886 3887 static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf, 3888 struct config_group *group, 3889 const char *name) 3890 { 3891 struct ibmvscsis_tport *tport; 3892 struct scsi_info *vscsi; 3893 3894 tport = ibmvscsis_lookup_port(name); 3895 if (tport) { 3896 vscsi = container_of(tport, struct scsi_info, tport); 3897 tport->tport_proto_id = SCSI_PROTOCOL_SRP; 3898 dev_dbg(&vscsi->dev, "make_tport(%s), pointer:%p, tport_id:%x\n", 3899 name, tport, tport->tport_proto_id); 3900 return &tport->tport_wwn; 3901 } 3902 3903 return ERR_PTR(-EINVAL); 3904 } 3905 3906 static void ibmvscsis_drop_tport(struct se_wwn *wwn) 3907 { 3908 struct ibmvscsis_tport *tport = container_of(wwn, 3909 struct ibmvscsis_tport, 3910 tport_wwn); 3911 struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport); 3912 3913 dev_dbg(&vscsi->dev, "drop_tport(%s)\n", 3914 config_item_name(&tport->tport_wwn.wwn_group.cg_item)); 3915 } 3916 3917 static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn, 3918 const char *name) 3919 { 3920 struct ibmvscsis_tport *tport = 3921 container_of(wwn, struct ibmvscsis_tport, tport_wwn); 3922 u16 tpgt; 3923 int rc; 3924 3925 if (strstr(name, "tpgt_") != name) 3926 return ERR_PTR(-EINVAL); 3927 rc = kstrtou16(name + 5, 0, &tpgt); 3928 if (rc) 3929 return ERR_PTR(rc); 3930 tport->tport_tpgt = tpgt; 3931 3932 tport->releasing = false; 3933 3934 rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg, 3935 tport->tport_proto_id); 3936 if (rc) 3937 return ERR_PTR(rc); 3938 3939 return &tport->se_tpg; 3940 } 3941 3942 static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg) 3943 { 3944 struct ibmvscsis_tport *tport = container_of(se_tpg, 3945 struct ibmvscsis_tport, 3946 se_tpg); 3947 3948 tport->releasing = true; 3949 tport->enabled = false; 3950 3951 /* 3952 * Release the virtual I_T Nexus for this ibmvscsis TPG 3953 */ 3954 ibmvscsis_drop_nexus(tport); 3955 /* 3956 * Deregister the se_tpg from TCM.. 3957 */ 3958 core_tpg_deregister(se_tpg); 3959 } 3960 3961 static ssize_t ibmvscsis_wwn_version_show(struct config_item *item, 3962 char *page) 3963 { 3964 return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION); 3965 } 3966 CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version); 3967 3968 static struct configfs_attribute *ibmvscsis_wwn_attrs[] = { 3969 &ibmvscsis_wwn_attr_version, 3970 NULL, 3971 }; 3972 3973 static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item, 3974 char *page) 3975 { 3976 struct se_portal_group *se_tpg = to_tpg(item); 3977 struct ibmvscsis_tport *tport = container_of(se_tpg, 3978 struct ibmvscsis_tport, 3979 se_tpg); 3980 3981 return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0); 3982 } 3983 3984 static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item, 3985 const char *page, size_t count) 3986 { 3987 struct se_portal_group *se_tpg = to_tpg(item); 3988 struct ibmvscsis_tport *tport = container_of(se_tpg, 3989 struct ibmvscsis_tport, 3990 se_tpg); 3991 struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport); 3992 unsigned long tmp; 3993 int rc; 3994 long lrc; 3995 3996 rc = kstrtoul(page, 0, &tmp); 3997 if (rc < 0) { 3998 dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n"); 3999 return -EINVAL; 4000 } 4001 4002 if ((tmp != 0) && (tmp != 1)) { 4003 dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n"); 4004 return -EINVAL; 4005 } 4006 4007 if (tmp) { 4008 spin_lock_bh(&vscsi->intr_lock); 4009 tport->enabled = true; 4010 lrc = ibmvscsis_enable_change_state(vscsi); 4011 if (lrc) 4012 dev_err(&vscsi->dev, "enable_change_state failed, rc %ld state %d\n", 4013 lrc, vscsi->state); 4014 spin_unlock_bh(&vscsi->intr_lock); 4015 } else { 4016 spin_lock_bh(&vscsi->intr_lock); 4017 tport->enabled = false; 4018 /* This simulates the server going down */ 4019 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); 4020 spin_unlock_bh(&vscsi->intr_lock); 4021 } 4022 4023 dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp, 4024 vscsi->state); 4025 4026 return count; 4027 } 4028 CONFIGFS_ATTR(ibmvscsis_tpg_, enable); 4029 4030 static struct configfs_attribute *ibmvscsis_tpg_attrs[] = { 4031 &ibmvscsis_tpg_attr_enable, 4032 NULL, 4033 }; 4034 4035 static const struct target_core_fabric_ops ibmvscsis_ops = { 4036 .module = THIS_MODULE, 4037 .fabric_name = "ibmvscsis", 4038 .max_data_sg_nents = MAX_TXU / PAGE_SIZE, 4039 .tpg_get_wwn = ibmvscsis_get_fabric_wwn, 4040 .tpg_get_tag = ibmvscsis_get_tag, 4041 .tpg_get_default_depth = ibmvscsis_get_default_depth, 4042 .tpg_check_demo_mode = ibmvscsis_check_true, 4043 .tpg_check_demo_mode_cache = ibmvscsis_check_true, 4044 .tpg_check_demo_mode_write_protect = ibmvscsis_check_false, 4045 .tpg_check_prod_mode_write_protect = ibmvscsis_check_false, 4046 .tpg_get_inst_index = ibmvscsis_tpg_get_inst_index, 4047 .check_stop_free = ibmvscsis_check_stop_free, 4048 .release_cmd = ibmvscsis_release_cmd, 4049 .sess_get_index = ibmvscsis_sess_get_index, 4050 .write_pending = ibmvscsis_write_pending, 4051 .set_default_node_attributes = ibmvscsis_set_default_node_attrs, 4052 .get_cmd_state = ibmvscsis_get_cmd_state, 4053 .queue_data_in = ibmvscsis_queue_data_in, 4054 .queue_status = ibmvscsis_queue_status, 4055 .queue_tm_rsp = ibmvscsis_queue_tm_rsp, 4056 .aborted_task = ibmvscsis_aborted_task, 4057 /* 4058 * Setup function pointers for logic in target_core_fabric_configfs.c 4059 */ 4060 .fabric_make_wwn = ibmvscsis_make_tport, 4061 .fabric_drop_wwn = ibmvscsis_drop_tport, 4062 .fabric_make_tpg = ibmvscsis_make_tpg, 4063 .fabric_drop_tpg = ibmvscsis_drop_tpg, 4064 4065 .tfc_wwn_attrs = ibmvscsis_wwn_attrs, 4066 .tfc_tpg_base_attrs = ibmvscsis_tpg_attrs, 4067 }; 4068 4069 static void ibmvscsis_dev_release(struct device *dev) {}; 4070 4071 static struct device_attribute dev_attr_system_id = 4072 __ATTR(system_id, S_IRUGO, system_id_show, NULL); 4073 4074 static struct device_attribute dev_attr_partition_number = 4075 __ATTR(partition_number, S_IRUGO, partition_number_show, NULL); 4076 4077 static struct device_attribute dev_attr_unit_address = 4078 __ATTR(unit_address, S_IRUGO, unit_address_show, NULL); 4079 4080 static struct attribute *ibmvscsis_dev_attrs[] = { 4081 &dev_attr_system_id.attr, 4082 &dev_attr_partition_number.attr, 4083 &dev_attr_unit_address.attr, 4084 }; 4085 ATTRIBUTE_GROUPS(ibmvscsis_dev); 4086 4087 static struct class ibmvscsis_class = { 4088 .name = "ibmvscsis", 4089 .dev_release = ibmvscsis_dev_release, 4090 .dev_groups = ibmvscsis_dev_groups, 4091 }; 4092 4093 static const struct vio_device_id ibmvscsis_device_table[] = { 4094 { "v-scsi-host", "IBM,v-scsi-host" }, 4095 { "", "" } 4096 }; 4097 MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table); 4098 4099 static struct vio_driver ibmvscsis_driver = { 4100 .name = "ibmvscsis", 4101 .id_table = ibmvscsis_device_table, 4102 .probe = ibmvscsis_probe, 4103 .remove = ibmvscsis_remove, 4104 }; 4105 4106 /* 4107 * ibmvscsis_init() - Kernel Module initialization 4108 * 4109 * Note: vio_register_driver() registers callback functions, and at least one 4110 * of those callback functions calls TCM - Linux IO Target Subsystem, thus 4111 * the SCSI Target template must be registered before vio_register_driver() 4112 * is called. 4113 */ 4114 static int __init ibmvscsis_init(void) 4115 { 4116 int rc = 0; 4117 4118 rc = ibmvscsis_get_system_info(); 4119 if (rc) { 4120 pr_err("rc %d from get_system_info\n", rc); 4121 goto out; 4122 } 4123 4124 rc = class_register(&ibmvscsis_class); 4125 if (rc) { 4126 pr_err("failed class register\n"); 4127 goto out; 4128 } 4129 4130 rc = target_register_template(&ibmvscsis_ops); 4131 if (rc) { 4132 pr_err("rc %d from target_register_template\n", rc); 4133 goto unregister_class; 4134 } 4135 4136 rc = vio_register_driver(&ibmvscsis_driver); 4137 if (rc) { 4138 pr_err("rc %d from vio_register_driver\n", rc); 4139 goto unregister_target; 4140 } 4141 4142 return 0; 4143 4144 unregister_target: 4145 target_unregister_template(&ibmvscsis_ops); 4146 unregister_class: 4147 class_unregister(&ibmvscsis_class); 4148 out: 4149 return rc; 4150 } 4151 4152 static void __exit ibmvscsis_exit(void) 4153 { 4154 pr_info("Unregister IBM virtual SCSI host driver\n"); 4155 vio_unregister_driver(&ibmvscsis_driver); 4156 target_unregister_template(&ibmvscsis_ops); 4157 class_unregister(&ibmvscsis_class); 4158 } 4159 4160 MODULE_DESCRIPTION("IBMVSCSIS fabric driver"); 4161 MODULE_AUTHOR("Bryant G. Ly and Michael Cyr"); 4162 MODULE_LICENSE("GPL"); 4163 MODULE_VERSION(IBMVSCSIS_VERSION); 4164 module_init(ibmvscsis_init); 4165 module_exit(ibmvscsis_exit); 4166