1 /******************************************************************************* 2 * IBM Virtual SCSI Target Driver 3 * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp. 4 * Santiago Leon (santil@us.ibm.com) IBM Corp. 5 * Linda Xie (lxie@us.ibm.com) IBM Corp. 6 * 7 * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org> 8 * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com> 11 * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com> 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 ****************************************************************************/ 24 25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 26 27 #include <linux/module.h> 28 #include <linux/kernel.h> 29 #include <linux/slab.h> 30 #include <linux/types.h> 31 #include <linux/list.h> 32 #include <linux/string.h> 33 34 #include <target/target_core_base.h> 35 #include <target/target_core_fabric.h> 36 37 #include <asm/hvcall.h> 38 #include <asm/vio.h> 39 40 #include <scsi/viosrp.h> 41 42 #include "ibmvscsi_tgt.h" 43 44 #define IBMVSCSIS_VERSION "v0.2" 45 46 #define INITIAL_SRP_LIMIT 800 47 #define DEFAULT_MAX_SECTORS 256 48 49 static uint max_vdma_size = MAX_H_COPY_RDMA; 50 51 static char system_id[SYS_ID_NAME_LEN] = ""; 52 static char partition_name[PARTITION_NAMELEN] = "UNKNOWN"; 53 static uint partition_number = -1; 54 55 /* Adapter list and lock to control it */ 56 static DEFINE_SPINLOCK(ibmvscsis_dev_lock); 57 static LIST_HEAD(ibmvscsis_dev_list); 58 59 static long ibmvscsis_parse_command(struct scsi_info *vscsi, 60 struct viosrp_crq *crq); 61 62 static void ibmvscsis_adapter_idle(struct scsi_info *vscsi); 63 64 static void ibmvscsis_determine_resid(struct se_cmd *se_cmd, 65 struct srp_rsp *rsp) 66 { 67 u32 residual_count = se_cmd->residual_count; 68 69 if (!residual_count) 70 return; 71 72 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 73 if (se_cmd->data_direction == DMA_TO_DEVICE) { 74 /* residual data from an underflow write */ 75 rsp->flags = SRP_RSP_FLAG_DOUNDER; 76 rsp->data_out_res_cnt = cpu_to_be32(residual_count); 77 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 78 /* residual data from an underflow read */ 79 rsp->flags = SRP_RSP_FLAG_DIUNDER; 80 rsp->data_in_res_cnt = cpu_to_be32(residual_count); 81 } 82 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 83 if (se_cmd->data_direction == DMA_TO_DEVICE) { 84 /* residual data from an overflow write */ 85 rsp->flags = SRP_RSP_FLAG_DOOVER; 86 rsp->data_out_res_cnt = cpu_to_be32(residual_count); 87 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 88 /* residual data from an overflow read */ 89 rsp->flags = SRP_RSP_FLAG_DIOVER; 90 rsp->data_in_res_cnt = cpu_to_be32(residual_count); 91 } 92 } 93 } 94 95 /** 96 * connection_broken() - Determine if the connection to the client is good 97 * @vscsi: Pointer to our adapter structure 98 * 99 * This function attempts to send a ping MAD to the client. If the call to 100 * queue the request returns H_CLOSED then the connection has been broken 101 * and the function returns TRUE. 102 * 103 * EXECUTION ENVIRONMENT: 104 * Interrupt or Process environment 105 */ 106 static bool connection_broken(struct scsi_info *vscsi) 107 { 108 struct viosrp_crq *crq; 109 u64 buffer[2] = { 0, 0 }; 110 long h_return_code; 111 bool rc = false; 112 113 /* create a PING crq */ 114 crq = (struct viosrp_crq *)&buffer; 115 crq->valid = VALID_CMD_RESP_EL; 116 crq->format = MESSAGE_IN_CRQ; 117 crq->status = PING; 118 119 h_return_code = h_send_crq(vscsi->dds.unit_id, 120 cpu_to_be64(buffer[MSG_HI]), 121 cpu_to_be64(buffer[MSG_LOW])); 122 123 pr_debug("connection_broken: rc %ld\n", h_return_code); 124 125 if (h_return_code == H_CLOSED) 126 rc = true; 127 128 return rc; 129 } 130 131 /** 132 * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue 133 * @vscsi: Pointer to our adapter structure 134 * 135 * This function calls h_free_q then frees the interrupt bit etc. 136 * It must release the lock before doing so because of the time it can take 137 * for h_free_crq in PHYP 138 * NOTE: the caller must make sure that state and or flags will prevent 139 * interrupt handler from scheduling work. 140 * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag 141 * we can't do it here, because we don't have the lock 142 * 143 * EXECUTION ENVIRONMENT: 144 * Process level 145 */ 146 static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi) 147 { 148 long qrc; 149 long rc = ADAPT_SUCCESS; 150 int ticks = 0; 151 152 do { 153 qrc = h_free_crq(vscsi->dds.unit_id); 154 switch (qrc) { 155 case H_SUCCESS: 156 break; 157 158 case H_HARDWARE: 159 case H_PARAMETER: 160 dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n", 161 qrc); 162 rc = ERROR; 163 break; 164 165 case H_BUSY: 166 case H_LONG_BUSY_ORDER_1_MSEC: 167 /* msleep not good for small values */ 168 usleep_range(1000, 2000); 169 ticks += 1; 170 break; 171 case H_LONG_BUSY_ORDER_10_MSEC: 172 usleep_range(10000, 20000); 173 ticks += 10; 174 break; 175 case H_LONG_BUSY_ORDER_100_MSEC: 176 msleep(100); 177 ticks += 100; 178 break; 179 case H_LONG_BUSY_ORDER_1_SEC: 180 ssleep(1); 181 ticks += 1000; 182 break; 183 case H_LONG_BUSY_ORDER_10_SEC: 184 ssleep(10); 185 ticks += 10000; 186 break; 187 case H_LONG_BUSY_ORDER_100_SEC: 188 ssleep(100); 189 ticks += 100000; 190 break; 191 default: 192 dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n", 193 qrc); 194 rc = ERROR; 195 break; 196 } 197 198 /* 199 * dont wait more then 300 seconds 200 * ticks are in milliseconds more or less 201 */ 202 if (ticks > 300000 && qrc != H_SUCCESS) { 203 rc = ERROR; 204 dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n"); 205 } 206 } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS); 207 208 pr_debug("Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc); 209 210 return rc; 211 } 212 213 /** 214 * ibmvscsis_delete_client_info() - Helper function to Delete Client Info 215 * @vscsi: Pointer to our adapter structure 216 * @client_closed: True if client closed its queue 217 * 218 * Deletes information specific to the client when the client goes away 219 * 220 * EXECUTION ENVIRONMENT: 221 * Interrupt or Process 222 */ 223 static void ibmvscsis_delete_client_info(struct scsi_info *vscsi, 224 bool client_closed) 225 { 226 vscsi->client_cap = 0; 227 228 /* 229 * Some things we don't want to clear if we're closing the queue, 230 * because some clients don't resend the host handshake when they 231 * get a transport event. 232 */ 233 if (client_closed) 234 vscsi->client_data.os_type = 0; 235 } 236 237 /** 238 * ibmvscsis_free_command_q() - Free Command Queue 239 * @vscsi: Pointer to our adapter structure 240 * 241 * This function calls unregister_command_q, then clears interrupts and 242 * any pending interrupt acknowledgments associated with the command q. 243 * It also clears memory if there is no error. 244 * 245 * PHYP did not meet the PAPR architecture so that we must give up the 246 * lock. This causes a timing hole regarding state change. To close the 247 * hole this routine does accounting on any change that occurred during 248 * the time the lock is not held. 249 * NOTE: must give up and then acquire the interrupt lock, the caller must 250 * make sure that state and or flags will prevent interrupt handler from 251 * scheduling work. 252 * 253 * EXECUTION ENVIRONMENT: 254 * Process level, interrupt lock is held 255 */ 256 static long ibmvscsis_free_command_q(struct scsi_info *vscsi) 257 { 258 int bytes; 259 u32 flags_under_lock; 260 u16 state_under_lock; 261 long rc = ADAPT_SUCCESS; 262 263 if (!(vscsi->flags & CRQ_CLOSED)) { 264 vio_disable_interrupts(vscsi->dma_dev); 265 266 state_under_lock = vscsi->new_state; 267 flags_under_lock = vscsi->flags; 268 vscsi->phyp_acr_state = 0; 269 vscsi->phyp_acr_flags = 0; 270 271 spin_unlock_bh(&vscsi->intr_lock); 272 rc = ibmvscsis_unregister_command_q(vscsi); 273 spin_lock_bh(&vscsi->intr_lock); 274 275 if (state_under_lock != vscsi->new_state) 276 vscsi->phyp_acr_state = vscsi->new_state; 277 278 vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags); 279 280 if (rc == ADAPT_SUCCESS) { 281 bytes = vscsi->cmd_q.size * PAGE_SIZE; 282 memset(vscsi->cmd_q.base_addr, 0, bytes); 283 vscsi->cmd_q.index = 0; 284 vscsi->flags |= CRQ_CLOSED; 285 286 ibmvscsis_delete_client_info(vscsi, false); 287 } 288 289 pr_debug("free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n", 290 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags, 291 vscsi->phyp_acr_state); 292 } 293 return rc; 294 } 295 296 /** 297 * ibmvscsis_cmd_q_dequeue() - Get valid Command element 298 * @mask: Mask to use in case index wraps 299 * @current_index: Current index into command queue 300 * @base_addr: Pointer to start of command queue 301 * 302 * Returns a pointer to a valid command element or NULL, if the command 303 * queue is empty 304 * 305 * EXECUTION ENVIRONMENT: 306 * Interrupt environment, interrupt lock held 307 */ 308 static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask, 309 uint *current_index, 310 struct viosrp_crq *base_addr) 311 { 312 struct viosrp_crq *ptr; 313 314 ptr = base_addr + *current_index; 315 316 if (ptr->valid) { 317 *current_index = (*current_index + 1) & mask; 318 dma_rmb(); 319 } else { 320 ptr = NULL; 321 } 322 323 return ptr; 324 } 325 326 /** 327 * ibmvscsis_send_init_message() - send initialize message to the client 328 * @vscsi: Pointer to our adapter structure 329 * @format: Which Init Message format to send 330 * 331 * EXECUTION ENVIRONMENT: 332 * Interrupt environment interrupt lock held 333 */ 334 static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format) 335 { 336 struct viosrp_crq *crq; 337 u64 buffer[2] = { 0, 0 }; 338 long rc; 339 340 crq = (struct viosrp_crq *)&buffer; 341 crq->valid = VALID_INIT_MSG; 342 crq->format = format; 343 rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), 344 cpu_to_be64(buffer[MSG_LOW])); 345 346 return rc; 347 } 348 349 /** 350 * ibmvscsis_check_init_msg() - Check init message valid 351 * @vscsi: Pointer to our adapter structure 352 * @format: Pointer to return format of Init Message, if any. 353 * Set to UNUSED_FORMAT if no Init Message in queue. 354 * 355 * Checks if an initialize message was queued by the initiatior 356 * after the queue was created and before the interrupt was enabled. 357 * 358 * EXECUTION ENVIRONMENT: 359 * Process level only, interrupt lock held 360 */ 361 static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format) 362 { 363 struct viosrp_crq *crq; 364 long rc = ADAPT_SUCCESS; 365 366 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index, 367 vscsi->cmd_q.base_addr); 368 if (!crq) { 369 *format = (uint)UNUSED_FORMAT; 370 } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) { 371 *format = (uint)INIT_MSG; 372 crq->valid = INVALIDATE_CMD_RESP_EL; 373 dma_rmb(); 374 375 /* 376 * the caller has ensured no initialize message was 377 * sent after the queue was 378 * created so there should be no other message on the queue. 379 */ 380 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, 381 &vscsi->cmd_q.index, 382 vscsi->cmd_q.base_addr); 383 if (crq) { 384 *format = (uint)(crq->format); 385 rc = ERROR; 386 crq->valid = INVALIDATE_CMD_RESP_EL; 387 dma_rmb(); 388 } 389 } else { 390 *format = (uint)(crq->format); 391 rc = ERROR; 392 crq->valid = INVALIDATE_CMD_RESP_EL; 393 dma_rmb(); 394 } 395 396 return rc; 397 } 398 399 /** 400 * ibmvscsis_establish_new_q() - Establish new CRQ queue 401 * @vscsi: Pointer to our adapter structure 402 * @new_state: New state being established after resetting the queue 403 * 404 * Must be called with interrupt lock held. 405 */ 406 static long ibmvscsis_establish_new_q(struct scsi_info *vscsi, uint new_state) 407 { 408 long rc = ADAPT_SUCCESS; 409 uint format; 410 411 vscsi->flags &= PRESERVE_FLAG_FIELDS; 412 vscsi->rsp_q_timer.timer_pops = 0; 413 vscsi->debit = 0; 414 vscsi->credit = 0; 415 416 rc = vio_enable_interrupts(vscsi->dma_dev); 417 if (rc) { 418 pr_warn("reset_queue: failed to enable interrupts, rc %ld\n", 419 rc); 420 return rc; 421 } 422 423 rc = ibmvscsis_check_init_msg(vscsi, &format); 424 if (rc) { 425 dev_err(&vscsi->dev, "reset_queue: check_init_msg failed, rc %ld\n", 426 rc); 427 return rc; 428 } 429 430 if (format == UNUSED_FORMAT && new_state == WAIT_CONNECTION) { 431 rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); 432 switch (rc) { 433 case H_SUCCESS: 434 case H_DROPPED: 435 case H_CLOSED: 436 rc = ADAPT_SUCCESS; 437 break; 438 439 case H_PARAMETER: 440 case H_HARDWARE: 441 break; 442 443 default: 444 vscsi->state = UNDEFINED; 445 rc = H_HARDWARE; 446 break; 447 } 448 } 449 450 return rc; 451 } 452 453 /** 454 * ibmvscsis_reset_queue() - Reset CRQ Queue 455 * @vscsi: Pointer to our adapter structure 456 * @new_state: New state to establish after resetting the queue 457 * 458 * This function calls h_free_q and then calls h_reg_q and does all 459 * of the bookkeeping to get us back to where we can communicate. 460 * 461 * Actually, we don't always call h_free_crq. A problem was discovered 462 * where one partition would close and reopen his queue, which would 463 * cause his partner to get a transport event, which would cause him to 464 * close and reopen his queue, which would cause the original partition 465 * to get a transport event, etc., etc. To prevent this, we don't 466 * actually close our queue if the client initiated the reset, (i.e. 467 * either we got a transport event or we have detected that the client's 468 * queue is gone) 469 * 470 * EXECUTION ENVIRONMENT: 471 * Process environment, called with interrupt lock held 472 */ 473 static void ibmvscsis_reset_queue(struct scsi_info *vscsi, uint new_state) 474 { 475 int bytes; 476 long rc = ADAPT_SUCCESS; 477 478 pr_debug("reset_queue: flags 0x%x\n", vscsi->flags); 479 480 /* don't reset, the client did it for us */ 481 if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) { 482 vscsi->flags &= PRESERVE_FLAG_FIELDS; 483 vscsi->rsp_q_timer.timer_pops = 0; 484 vscsi->debit = 0; 485 vscsi->credit = 0; 486 vscsi->state = new_state; 487 vio_enable_interrupts(vscsi->dma_dev); 488 } else { 489 rc = ibmvscsis_free_command_q(vscsi); 490 if (rc == ADAPT_SUCCESS) { 491 vscsi->state = new_state; 492 493 bytes = vscsi->cmd_q.size * PAGE_SIZE; 494 rc = h_reg_crq(vscsi->dds.unit_id, 495 vscsi->cmd_q.crq_token, bytes); 496 if (rc == H_CLOSED || rc == H_SUCCESS) { 497 rc = ibmvscsis_establish_new_q(vscsi, 498 new_state); 499 } 500 501 if (rc != ADAPT_SUCCESS) { 502 pr_debug("reset_queue: reg_crq rc %ld\n", rc); 503 504 vscsi->state = ERR_DISCONNECTED; 505 vscsi->flags |= RESPONSE_Q_DOWN; 506 ibmvscsis_free_command_q(vscsi); 507 } 508 } else { 509 vscsi->state = ERR_DISCONNECTED; 510 vscsi->flags |= RESPONSE_Q_DOWN; 511 } 512 } 513 } 514 515 /** 516 * ibmvscsis_free_cmd_resources() - Free command resources 517 * @vscsi: Pointer to our adapter structure 518 * @cmd: Command which is not longer in use 519 * 520 * Must be called with interrupt lock held. 521 */ 522 static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, 523 struct ibmvscsis_cmd *cmd) 524 { 525 struct iu_entry *iue = cmd->iue; 526 527 switch (cmd->type) { 528 case TASK_MANAGEMENT: 529 case SCSI_CDB: 530 /* 531 * When the queue goes down this value is cleared, so it 532 * cannot be cleared in this general purpose function. 533 */ 534 if (vscsi->debit) 535 vscsi->debit -= 1; 536 break; 537 case ADAPTER_MAD: 538 vscsi->flags &= ~PROCESSING_MAD; 539 break; 540 case UNSET_TYPE: 541 break; 542 default: 543 dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n", 544 cmd->type); 545 break; 546 } 547 548 cmd->iue = NULL; 549 list_add_tail(&cmd->list, &vscsi->free_cmd); 550 srp_iu_put(iue); 551 552 if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) && 553 list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) { 554 vscsi->flags &= ~WAIT_FOR_IDLE; 555 complete(&vscsi->wait_idle); 556 } 557 } 558 559 /** 560 * ibmvscsis_disconnect() - Helper function to disconnect 561 * @work: Pointer to work_struct, gives access to our adapter structure 562 * 563 * An error has occurred or the driver received a Transport event, 564 * and the driver is requesting that the command queue be de-registered 565 * in a safe manner. If there is no outstanding I/O then we can stop the 566 * queue. If we are restarting the queue it will be reflected in the 567 * the state of the adapter. 568 * 569 * EXECUTION ENVIRONMENT: 570 * Process environment 571 */ 572 static void ibmvscsis_disconnect(struct work_struct *work) 573 { 574 struct scsi_info *vscsi = container_of(work, struct scsi_info, 575 proc_work); 576 u16 new_state; 577 bool wait_idle = false; 578 long rc = ADAPT_SUCCESS; 579 580 spin_lock_bh(&vscsi->intr_lock); 581 new_state = vscsi->new_state; 582 vscsi->new_state = 0; 583 584 pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags, 585 vscsi->state); 586 587 /* 588 * check which state we are in and see if we 589 * should transitition to the new state 590 */ 591 switch (vscsi->state) { 592 /* Should never be called while in this state. */ 593 case NO_QUEUE: 594 /* 595 * Can never transition from this state; 596 * igonore errors and logout. 597 */ 598 case UNCONFIGURING: 599 break; 600 601 /* can transition from this state to UNCONFIGURING */ 602 case ERR_DISCONNECT: 603 if (new_state == UNCONFIGURING) 604 vscsi->state = new_state; 605 break; 606 607 /* 608 * Can transition from this state to to unconfiguring 609 * or err disconnect. 610 */ 611 case ERR_DISCONNECT_RECONNECT: 612 switch (new_state) { 613 case UNCONFIGURING: 614 case ERR_DISCONNECT: 615 vscsi->state = new_state; 616 break; 617 618 case WAIT_IDLE: 619 break; 620 default: 621 break; 622 } 623 break; 624 625 /* can transition from this state to UNCONFIGURING */ 626 case ERR_DISCONNECTED: 627 if (new_state == UNCONFIGURING) 628 vscsi->state = new_state; 629 break; 630 631 /* 632 * If this is a transition into an error state. 633 * a client is attempting to establish a connection 634 * and has violated the RPA protocol. 635 * There can be nothing pending on the adapter although 636 * there can be requests in the command queue. 637 */ 638 case WAIT_ENABLED: 639 case PART_UP_WAIT_ENAB: 640 switch (new_state) { 641 case ERR_DISCONNECT: 642 vscsi->flags |= RESPONSE_Q_DOWN; 643 vscsi->state = new_state; 644 vscsi->flags &= ~(SCHEDULE_DISCONNECT | 645 DISCONNECT_SCHEDULED); 646 ibmvscsis_free_command_q(vscsi); 647 break; 648 case ERR_DISCONNECT_RECONNECT: 649 ibmvscsis_reset_queue(vscsi, WAIT_ENABLED); 650 break; 651 652 /* should never happen */ 653 case WAIT_IDLE: 654 rc = ERROR; 655 dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n", 656 vscsi->state); 657 break; 658 } 659 break; 660 661 case WAIT_IDLE: 662 switch (new_state) { 663 case ERR_DISCONNECT: 664 case ERR_DISCONNECT_RECONNECT: 665 vscsi->state = new_state; 666 break; 667 } 668 break; 669 670 /* 671 * Initiator has not done a successful srp login 672 * or has done a successful srp logout ( adapter was not 673 * busy). In the first case there can be responses queued 674 * waiting for space on the initiators response queue (MAD) 675 * The second case the adapter is idle. Assume the worse case, 676 * i.e. the second case. 677 */ 678 case WAIT_CONNECTION: 679 case CONNECTED: 680 case SRP_PROCESSING: 681 wait_idle = true; 682 vscsi->state = new_state; 683 break; 684 685 /* can transition from this state to UNCONFIGURING */ 686 case UNDEFINED: 687 if (new_state == UNCONFIGURING) 688 vscsi->state = new_state; 689 break; 690 default: 691 break; 692 } 693 694 if (wait_idle) { 695 pr_debug("disconnect start wait, active %d, sched %d\n", 696 (int)list_empty(&vscsi->active_q), 697 (int)list_empty(&vscsi->schedule_q)); 698 if (!list_empty(&vscsi->active_q) || 699 !list_empty(&vscsi->schedule_q)) { 700 vscsi->flags |= WAIT_FOR_IDLE; 701 pr_debug("disconnect flags 0x%x\n", vscsi->flags); 702 /* 703 * This routine is can not be called with the interrupt 704 * lock held. 705 */ 706 spin_unlock_bh(&vscsi->intr_lock); 707 wait_for_completion(&vscsi->wait_idle); 708 spin_lock_bh(&vscsi->intr_lock); 709 } 710 pr_debug("disconnect stop wait\n"); 711 712 ibmvscsis_adapter_idle(vscsi); 713 } 714 715 spin_unlock_bh(&vscsi->intr_lock); 716 } 717 718 /** 719 * ibmvscsis_post_disconnect() - Schedule the disconnect 720 * @vscsi: Pointer to our adapter structure 721 * @new_state: State to move to after disconnecting 722 * @flag_bits: Flags to turn on in adapter structure 723 * 724 * If it's already been scheduled, then see if we need to "upgrade" 725 * the new state (if the one passed in is more "severe" than the 726 * previous one). 727 * 728 * PRECONDITION: 729 * interrupt lock is held 730 */ 731 static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state, 732 uint flag_bits) 733 { 734 uint state; 735 736 /* check the validity of the new state */ 737 switch (new_state) { 738 case UNCONFIGURING: 739 case ERR_DISCONNECT: 740 case ERR_DISCONNECT_RECONNECT: 741 case WAIT_IDLE: 742 break; 743 744 default: 745 dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n", 746 new_state); 747 return; 748 } 749 750 vscsi->flags |= flag_bits; 751 752 pr_debug("post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n", 753 new_state, flag_bits, vscsi->flags, vscsi->state); 754 755 if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) { 756 vscsi->flags |= SCHEDULE_DISCONNECT; 757 vscsi->new_state = new_state; 758 759 INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect); 760 (void)queue_work(vscsi->work_q, &vscsi->proc_work); 761 } else { 762 if (vscsi->new_state) 763 state = vscsi->new_state; 764 else 765 state = vscsi->state; 766 767 switch (state) { 768 case NO_QUEUE: 769 case UNCONFIGURING: 770 break; 771 772 case ERR_DISCONNECTED: 773 case ERR_DISCONNECT: 774 case UNDEFINED: 775 if (new_state == UNCONFIGURING) 776 vscsi->new_state = new_state; 777 break; 778 779 case ERR_DISCONNECT_RECONNECT: 780 switch (new_state) { 781 case UNCONFIGURING: 782 case ERR_DISCONNECT: 783 vscsi->new_state = new_state; 784 break; 785 default: 786 break; 787 } 788 break; 789 790 case WAIT_ENABLED: 791 case PART_UP_WAIT_ENAB: 792 case WAIT_IDLE: 793 case WAIT_CONNECTION: 794 case CONNECTED: 795 case SRP_PROCESSING: 796 vscsi->new_state = new_state; 797 break; 798 799 default: 800 break; 801 } 802 } 803 804 pr_debug("Leaving post_disconnect: flags 0x%x, new_state 0x%x\n", 805 vscsi->flags, vscsi->new_state); 806 } 807 808 /** 809 * ibmvscsis_trans_event() - Handle a Transport Event 810 * @vscsi: Pointer to our adapter structure 811 * @crq: Pointer to CRQ entry containing the Transport Event 812 * 813 * Do the logic to close the I_T nexus. This function may not 814 * behave to specification. 815 * 816 * EXECUTION ENVIRONMENT: 817 * Interrupt, interrupt lock held 818 */ 819 static long ibmvscsis_trans_event(struct scsi_info *vscsi, 820 struct viosrp_crq *crq) 821 { 822 long rc = ADAPT_SUCCESS; 823 824 pr_debug("trans_event: format %d, flags 0x%x, state 0x%hx\n", 825 (int)crq->format, vscsi->flags, vscsi->state); 826 827 switch (crq->format) { 828 case MIGRATED: 829 case PARTNER_FAILED: 830 case PARTNER_DEREGISTER: 831 ibmvscsis_delete_client_info(vscsi, true); 832 break; 833 834 default: 835 rc = ERROR; 836 dev_err(&vscsi->dev, "trans_event: invalid format %d\n", 837 (uint)crq->format); 838 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 839 RESPONSE_Q_DOWN); 840 break; 841 } 842 843 if (rc == ADAPT_SUCCESS) { 844 switch (vscsi->state) { 845 case NO_QUEUE: 846 case ERR_DISCONNECTED: 847 case UNDEFINED: 848 break; 849 850 case UNCONFIGURING: 851 vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT); 852 break; 853 854 case WAIT_ENABLED: 855 break; 856 857 case WAIT_CONNECTION: 858 break; 859 860 case CONNECTED: 861 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 862 (RESPONSE_Q_DOWN | 863 TRANS_EVENT)); 864 break; 865 866 case PART_UP_WAIT_ENAB: 867 vscsi->state = WAIT_ENABLED; 868 break; 869 870 case SRP_PROCESSING: 871 if ((vscsi->debit > 0) || 872 !list_empty(&vscsi->schedule_q) || 873 !list_empty(&vscsi->waiting_rsp) || 874 !list_empty(&vscsi->active_q)) { 875 pr_debug("debit %d, sched %d, wait %d, active %d\n", 876 vscsi->debit, 877 (int)list_empty(&vscsi->schedule_q), 878 (int)list_empty(&vscsi->waiting_rsp), 879 (int)list_empty(&vscsi->active_q)); 880 pr_warn("connection lost with outstanding work\n"); 881 } else { 882 pr_debug("trans_event: SRP Processing, but no outstanding work\n"); 883 } 884 885 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 886 (RESPONSE_Q_DOWN | 887 TRANS_EVENT)); 888 break; 889 890 case ERR_DISCONNECT: 891 case ERR_DISCONNECT_RECONNECT: 892 case WAIT_IDLE: 893 vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT); 894 break; 895 } 896 } 897 898 rc = vscsi->flags & SCHEDULE_DISCONNECT; 899 900 pr_debug("Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n", 901 vscsi->flags, vscsi->state, rc); 902 903 return rc; 904 } 905 906 /** 907 * ibmvscsis_poll_cmd_q() - Poll Command Queue 908 * @vscsi: Pointer to our adapter structure 909 * 910 * Called to handle command elements that may have arrived while 911 * interrupts were disabled. 912 * 913 * EXECUTION ENVIRONMENT: 914 * intr_lock must be held 915 */ 916 static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi) 917 { 918 struct viosrp_crq *crq; 919 long rc; 920 bool ack = true; 921 volatile u8 valid; 922 923 pr_debug("poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n", 924 vscsi->flags, vscsi->state, vscsi->cmd_q.index); 925 926 rc = vscsi->flags & SCHEDULE_DISCONNECT; 927 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 928 valid = crq->valid; 929 dma_rmb(); 930 931 while (valid) { 932 poll_work: 933 vscsi->cmd_q.index = 934 (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask; 935 936 if (!rc) { 937 rc = ibmvscsis_parse_command(vscsi, crq); 938 } else { 939 if ((uint)crq->valid == VALID_TRANS_EVENT) { 940 /* 941 * must service the transport layer events even 942 * in an error state, dont break out until all 943 * the consecutive transport events have been 944 * processed 945 */ 946 rc = ibmvscsis_trans_event(vscsi, crq); 947 } else if (vscsi->flags & TRANS_EVENT) { 948 /* 949 * if a tranport event has occurred leave 950 * everything but transport events on the queue 951 */ 952 pr_debug("poll_cmd_q, ignoring\n"); 953 954 /* 955 * need to decrement the queue index so we can 956 * look at the elment again 957 */ 958 if (vscsi->cmd_q.index) 959 vscsi->cmd_q.index -= 1; 960 else 961 /* 962 * index is at 0 it just wrapped. 963 * have it index last element in q 964 */ 965 vscsi->cmd_q.index = vscsi->cmd_q.mask; 966 break; 967 } 968 } 969 970 crq->valid = INVALIDATE_CMD_RESP_EL; 971 972 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 973 valid = crq->valid; 974 dma_rmb(); 975 } 976 977 if (!rc) { 978 if (ack) { 979 vio_enable_interrupts(vscsi->dma_dev); 980 ack = false; 981 pr_debug("poll_cmd_q, reenabling interrupts\n"); 982 } 983 valid = crq->valid; 984 dma_rmb(); 985 if (valid) 986 goto poll_work; 987 } 988 989 pr_debug("Leaving poll_cmd_q: rc %ld\n", rc); 990 } 991 992 /** 993 * ibmvscsis_free_cmd_qs() - Free elements in queue 994 * @vscsi: Pointer to our adapter structure 995 * 996 * Free all of the elements on all queues that are waiting for 997 * whatever reason. 998 * 999 * PRECONDITION: 1000 * Called with interrupt lock held 1001 */ 1002 static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi) 1003 { 1004 struct ibmvscsis_cmd *cmd, *nxt; 1005 1006 pr_debug("free_cmd_qs: waiting_rsp empty %d, timer starter %d\n", 1007 (int)list_empty(&vscsi->waiting_rsp), 1008 vscsi->rsp_q_timer.started); 1009 1010 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) { 1011 list_del(&cmd->list); 1012 ibmvscsis_free_cmd_resources(vscsi, cmd); 1013 } 1014 } 1015 1016 /** 1017 * ibmvscsis_get_free_cmd() - Get free command from list 1018 * @vscsi: Pointer to our adapter structure 1019 * 1020 * Must be called with interrupt lock held. 1021 */ 1022 static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi) 1023 { 1024 struct ibmvscsis_cmd *cmd = NULL; 1025 struct iu_entry *iue; 1026 1027 iue = srp_iu_get(&vscsi->target); 1028 if (iue) { 1029 cmd = list_first_entry_or_null(&vscsi->free_cmd, 1030 struct ibmvscsis_cmd, list); 1031 if (cmd) { 1032 list_del(&cmd->list); 1033 cmd->iue = iue; 1034 cmd->type = UNSET_TYPE; 1035 memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd)); 1036 } else { 1037 srp_iu_put(iue); 1038 } 1039 } 1040 1041 return cmd; 1042 } 1043 1044 /** 1045 * ibmvscsis_adapter_idle() - Helper function to handle idle adapter 1046 * @vscsi: Pointer to our adapter structure 1047 * 1048 * This function is called when the adapter is idle when the driver 1049 * is attempting to clear an error condition. 1050 * The adapter is considered busy if any of its cmd queues 1051 * are non-empty. This function can be invoked 1052 * from the off level disconnect function. 1053 * 1054 * EXECUTION ENVIRONMENT: 1055 * Process environment called with interrupt lock held 1056 */ 1057 static void ibmvscsis_adapter_idle(struct scsi_info *vscsi) 1058 { 1059 int free_qs = false; 1060 1061 pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags, 1062 vscsi->state); 1063 1064 /* Only need to free qs if we're disconnecting from client */ 1065 if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT) 1066 free_qs = true; 1067 1068 switch (vscsi->state) { 1069 case ERR_DISCONNECT_RECONNECT: 1070 ibmvscsis_reset_queue(vscsi, WAIT_CONNECTION); 1071 pr_debug("adapter_idle, disc_rec: flags 0x%x\n", vscsi->flags); 1072 break; 1073 1074 case ERR_DISCONNECT: 1075 ibmvscsis_free_command_q(vscsi); 1076 vscsi->flags &= ~DISCONNECT_SCHEDULED; 1077 vscsi->flags |= RESPONSE_Q_DOWN; 1078 vscsi->state = ERR_DISCONNECTED; 1079 pr_debug("adapter_idle, disc: flags 0x%x, state 0x%hx\n", 1080 vscsi->flags, vscsi->state); 1081 break; 1082 1083 case WAIT_IDLE: 1084 vscsi->rsp_q_timer.timer_pops = 0; 1085 vscsi->debit = 0; 1086 vscsi->credit = 0; 1087 if (vscsi->flags & TRANS_EVENT) { 1088 vscsi->state = WAIT_CONNECTION; 1089 vscsi->flags &= PRESERVE_FLAG_FIELDS; 1090 } else { 1091 vscsi->state = CONNECTED; 1092 vscsi->flags &= ~DISCONNECT_SCHEDULED; 1093 } 1094 1095 pr_debug("adapter_idle, wait: flags 0x%x, state 0x%hx\n", 1096 vscsi->flags, vscsi->state); 1097 ibmvscsis_poll_cmd_q(vscsi); 1098 break; 1099 1100 case ERR_DISCONNECTED: 1101 vscsi->flags &= ~DISCONNECT_SCHEDULED; 1102 pr_debug("adapter_idle, disconnected: flags 0x%x, state 0x%hx\n", 1103 vscsi->flags, vscsi->state); 1104 break; 1105 1106 default: 1107 dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n", 1108 vscsi->state); 1109 break; 1110 } 1111 1112 if (free_qs) 1113 ibmvscsis_free_cmd_qs(vscsi); 1114 1115 /* 1116 * There is a timing window where we could lose a disconnect request. 1117 * The known path to this window occurs during the DISCONNECT_RECONNECT 1118 * case above: reset_queue calls free_command_q, which will release the 1119 * interrupt lock. During that time, a new post_disconnect call can be 1120 * made with a "more severe" state (DISCONNECT or UNCONFIGURING). 1121 * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect 1122 * will only set the new_state. Now free_command_q reacquires the intr 1123 * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_ 1124 * FIELDS), and the disconnect is lost. This is particularly bad when 1125 * the new disconnect was for UNCONFIGURING, since the unconfigure hangs 1126 * forever. 1127 * Fix is that free command queue sets acr state and acr flags if there 1128 * is a change under the lock 1129 * note free command queue writes to this state it clears it 1130 * before releasing the lock, different drivers call the free command 1131 * queue different times so dont initialize above 1132 */ 1133 if (vscsi->phyp_acr_state != 0) { 1134 /* 1135 * set any bits in flags that may have been cleared by 1136 * a call to free command queue in switch statement 1137 * or reset queue 1138 */ 1139 vscsi->flags |= vscsi->phyp_acr_flags; 1140 ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0); 1141 vscsi->phyp_acr_state = 0; 1142 vscsi->phyp_acr_flags = 0; 1143 1144 pr_debug("adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n", 1145 vscsi->flags, vscsi->state, vscsi->phyp_acr_flags, 1146 vscsi->phyp_acr_state); 1147 } 1148 1149 pr_debug("Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n", 1150 vscsi->flags, vscsi->state, vscsi->new_state); 1151 } 1152 1153 /** 1154 * ibmvscsis_copy_crq_packet() - Copy CRQ Packet 1155 * @vscsi: Pointer to our adapter structure 1156 * @cmd: Pointer to command element to use to process the request 1157 * @crq: Pointer to CRQ entry containing the request 1158 * 1159 * Copy the srp information unit from the hosted 1160 * partition using remote dma 1161 * 1162 * EXECUTION ENVIRONMENT: 1163 * Interrupt, interrupt lock held 1164 */ 1165 static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi, 1166 struct ibmvscsis_cmd *cmd, 1167 struct viosrp_crq *crq) 1168 { 1169 struct iu_entry *iue = cmd->iue; 1170 long rc = 0; 1171 u16 len; 1172 1173 len = be16_to_cpu(crq->IU_length); 1174 if ((len > SRP_MAX_IU_LEN) || (len == 0)) { 1175 dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len); 1176 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 1177 return SRP_VIOLATION; 1178 } 1179 1180 rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn, 1181 be64_to_cpu(crq->IU_data_ptr), 1182 vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma); 1183 1184 switch (rc) { 1185 case H_SUCCESS: 1186 cmd->init_time = mftb(); 1187 iue->remote_token = crq->IU_data_ptr; 1188 iue->iu_len = len; 1189 pr_debug("copy_crq: ioba 0x%llx, init_time 0x%llx\n", 1190 be64_to_cpu(crq->IU_data_ptr), cmd->init_time); 1191 break; 1192 case H_PERMISSION: 1193 if (connection_broken(vscsi)) 1194 ibmvscsis_post_disconnect(vscsi, 1195 ERR_DISCONNECT_RECONNECT, 1196 (RESPONSE_Q_DOWN | 1197 CLIENT_FAILED)); 1198 else 1199 ibmvscsis_post_disconnect(vscsi, 1200 ERR_DISCONNECT_RECONNECT, 0); 1201 1202 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n", 1203 rc); 1204 break; 1205 case H_DEST_PARM: 1206 case H_SOURCE_PARM: 1207 default: 1208 dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n", 1209 rc); 1210 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 1211 break; 1212 } 1213 1214 return rc; 1215 } 1216 1217 /** 1218 * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram 1219 * @vscsi: Pointer to our adapter structure 1220 * @iue: Information Unit containing the Adapter Info MAD request 1221 * 1222 * EXECUTION ENVIRONMENT: 1223 * Interrupt adpater lock is held 1224 */ 1225 static long ibmvscsis_adapter_info(struct scsi_info *vscsi, 1226 struct iu_entry *iue) 1227 { 1228 struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info; 1229 struct mad_adapter_info_data *info; 1230 uint flag_bits = 0; 1231 dma_addr_t token; 1232 long rc; 1233 1234 mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS); 1235 1236 if (be16_to_cpu(mad->common.length) > sizeof(*info)) { 1237 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); 1238 return 0; 1239 } 1240 1241 info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token, 1242 GFP_KERNEL); 1243 if (!info) { 1244 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", 1245 iue->target); 1246 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); 1247 return 0; 1248 } 1249 1250 /* Get remote info */ 1251 rc = h_copy_rdma(be16_to_cpu(mad->common.length), 1252 vscsi->dds.window[REMOTE].liobn, 1253 be64_to_cpu(mad->buffer), 1254 vscsi->dds.window[LOCAL].liobn, token); 1255 1256 if (rc != H_SUCCESS) { 1257 if (rc == H_PERMISSION) { 1258 if (connection_broken(vscsi)) 1259 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); 1260 } 1261 pr_warn("adapter_info: h_copy_rdma from client failed, rc %ld\n", 1262 rc); 1263 pr_debug("adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n", 1264 be64_to_cpu(mad->buffer), vscsi->flags, flag_bits); 1265 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 1266 flag_bits); 1267 goto free_dma; 1268 } 1269 1270 /* 1271 * Copy client info, but ignore partition number, which we 1272 * already got from phyp - unless we failed to get it from 1273 * phyp (e.g. if we're running on a p5 system). 1274 */ 1275 if (vscsi->client_data.partition_number == 0) 1276 vscsi->client_data.partition_number = 1277 be32_to_cpu(info->partition_number); 1278 strncpy(vscsi->client_data.srp_version, info->srp_version, 1279 sizeof(vscsi->client_data.srp_version)); 1280 strncpy(vscsi->client_data.partition_name, info->partition_name, 1281 sizeof(vscsi->client_data.partition_name)); 1282 vscsi->client_data.mad_version = be32_to_cpu(info->mad_version); 1283 vscsi->client_data.os_type = be32_to_cpu(info->os_type); 1284 1285 /* Copy our info */ 1286 strncpy(info->srp_version, SRP_VERSION, 1287 sizeof(info->srp_version)); 1288 strncpy(info->partition_name, vscsi->dds.partition_name, 1289 sizeof(info->partition_name)); 1290 info->partition_number = cpu_to_be32(vscsi->dds.partition_num); 1291 info->mad_version = cpu_to_be32(MAD_VERSION_1); 1292 info->os_type = cpu_to_be32(LINUX); 1293 memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu)); 1294 info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE); 1295 1296 dma_wmb(); 1297 rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn, 1298 token, vscsi->dds.window[REMOTE].liobn, 1299 be64_to_cpu(mad->buffer)); 1300 switch (rc) { 1301 case H_SUCCESS: 1302 break; 1303 1304 case H_SOURCE_PARM: 1305 case H_DEST_PARM: 1306 case H_PERMISSION: 1307 if (connection_broken(vscsi)) 1308 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); 1309 default: 1310 dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n", 1311 rc); 1312 ibmvscsis_post_disconnect(vscsi, 1313 ERR_DISCONNECT_RECONNECT, 1314 flag_bits); 1315 break; 1316 } 1317 1318 free_dma: 1319 dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token); 1320 pr_debug("Leaving adapter_info, rc %ld\n", rc); 1321 1322 return rc; 1323 } 1324 1325 /** 1326 * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram 1327 * @vscsi: Pointer to our adapter structure 1328 * @iue: Information Unit containing the Capabilities MAD request 1329 * 1330 * NOTE: if you return an error from this routine you must be 1331 * disconnecting or you will cause a hang 1332 * 1333 * EXECUTION ENVIRONMENT: 1334 * Interrupt called with adapter lock held 1335 */ 1336 static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue) 1337 { 1338 struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities; 1339 struct capabilities *cap; 1340 struct mad_capability_common *common; 1341 dma_addr_t token; 1342 u16 olen, len, status, min_len, cap_len; 1343 u32 flag; 1344 uint flag_bits = 0; 1345 long rc = 0; 1346 1347 olen = be16_to_cpu(mad->common.length); 1348 /* 1349 * struct capabilities hardcodes a couple capabilities after the 1350 * header, but the capabilities can actually be in any order. 1351 */ 1352 min_len = offsetof(struct capabilities, migration); 1353 if ((olen < min_len) || (olen > PAGE_SIZE)) { 1354 pr_warn("cap_mad: invalid len %d\n", olen); 1355 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); 1356 return 0; 1357 } 1358 1359 cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token, 1360 GFP_KERNEL); 1361 if (!cap) { 1362 dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n", 1363 iue->target); 1364 mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED); 1365 return 0; 1366 } 1367 rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn, 1368 be64_to_cpu(mad->buffer), 1369 vscsi->dds.window[LOCAL].liobn, token); 1370 if (rc == H_SUCCESS) { 1371 strncpy(cap->name, dev_name(&vscsi->dma_dev->dev), 1372 SRP_MAX_LOC_LEN); 1373 1374 len = olen - min_len; 1375 status = VIOSRP_MAD_SUCCESS; 1376 common = (struct mad_capability_common *)&cap->migration; 1377 1378 while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) { 1379 pr_debug("cap_mad: len left %hd, cap type %d, cap len %hd\n", 1380 len, be32_to_cpu(common->cap_type), 1381 be16_to_cpu(common->length)); 1382 1383 cap_len = be16_to_cpu(common->length); 1384 if (cap_len > len) { 1385 dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n"); 1386 status = VIOSRP_MAD_FAILED; 1387 break; 1388 } 1389 1390 if (cap_len == 0) { 1391 dev_err(&vscsi->dev, "cap_mad: cap len is 0\n"); 1392 status = VIOSRP_MAD_FAILED; 1393 break; 1394 } 1395 1396 switch (common->cap_type) { 1397 default: 1398 pr_debug("cap_mad: unsupported capability\n"); 1399 common->server_support = 0; 1400 flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED); 1401 cap->flags &= ~flag; 1402 break; 1403 } 1404 1405 len = len - cap_len; 1406 common = (struct mad_capability_common *) 1407 ((char *)common + cap_len); 1408 } 1409 1410 mad->common.status = cpu_to_be16(status); 1411 1412 dma_wmb(); 1413 rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token, 1414 vscsi->dds.window[REMOTE].liobn, 1415 be64_to_cpu(mad->buffer)); 1416 1417 if (rc != H_SUCCESS) { 1418 pr_debug("cap_mad: failed to copy to client, rc %ld\n", 1419 rc); 1420 1421 if (rc == H_PERMISSION) { 1422 if (connection_broken(vscsi)) 1423 flag_bits = (RESPONSE_Q_DOWN | 1424 CLIENT_FAILED); 1425 } 1426 1427 pr_warn("cap_mad: error copying data to client, rc %ld\n", 1428 rc); 1429 ibmvscsis_post_disconnect(vscsi, 1430 ERR_DISCONNECT_RECONNECT, 1431 flag_bits); 1432 } 1433 } 1434 1435 dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token); 1436 1437 pr_debug("Leaving cap_mad, rc %ld, client_cap 0x%x\n", 1438 rc, vscsi->client_cap); 1439 1440 return rc; 1441 } 1442 1443 /** 1444 * ibmvscsis_process_mad() - Service a MAnagement Data gram 1445 * @vscsi: Pointer to our adapter structure 1446 * @iue: Information Unit containing the MAD request 1447 * 1448 * Must be called with interrupt lock held. 1449 */ 1450 static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue) 1451 { 1452 struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad; 1453 struct viosrp_empty_iu *empty; 1454 long rc = ADAPT_SUCCESS; 1455 1456 switch (be32_to_cpu(mad->type)) { 1457 case VIOSRP_EMPTY_IU_TYPE: 1458 empty = &vio_iu(iue)->mad.empty_iu; 1459 vscsi->empty_iu_id = be64_to_cpu(empty->buffer); 1460 vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag); 1461 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS); 1462 break; 1463 case VIOSRP_ADAPTER_INFO_TYPE: 1464 rc = ibmvscsis_adapter_info(vscsi, iue); 1465 break; 1466 case VIOSRP_CAPABILITIES_TYPE: 1467 rc = ibmvscsis_cap_mad(vscsi, iue); 1468 break; 1469 case VIOSRP_ENABLE_FAST_FAIL: 1470 if (vscsi->state == CONNECTED) { 1471 vscsi->fast_fail = true; 1472 mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS); 1473 } else { 1474 pr_warn("fast fail mad sent after login\n"); 1475 mad->status = cpu_to_be16(VIOSRP_MAD_FAILED); 1476 } 1477 break; 1478 default: 1479 mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED); 1480 break; 1481 } 1482 1483 return rc; 1484 } 1485 1486 /** 1487 * srp_snd_msg_failed() - Handle an error when sending a response 1488 * @vscsi: Pointer to our adapter structure 1489 * @rc: The return code from the h_send_crq command 1490 * 1491 * Must be called with interrupt lock held. 1492 */ 1493 static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc) 1494 { 1495 ktime_t kt; 1496 1497 if (rc != H_DROPPED) { 1498 ibmvscsis_free_cmd_qs(vscsi); 1499 1500 if (rc == H_CLOSED) 1501 vscsi->flags |= CLIENT_FAILED; 1502 1503 /* don't flag the same problem multiple times */ 1504 if (!(vscsi->flags & RESPONSE_Q_DOWN)) { 1505 vscsi->flags |= RESPONSE_Q_DOWN; 1506 if (!(vscsi->state & (ERR_DISCONNECT | 1507 ERR_DISCONNECT_RECONNECT | 1508 ERR_DISCONNECTED | UNDEFINED))) { 1509 dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n", 1510 vscsi->state, vscsi->flags, rc); 1511 } 1512 ibmvscsis_post_disconnect(vscsi, 1513 ERR_DISCONNECT_RECONNECT, 0); 1514 } 1515 return; 1516 } 1517 1518 /* 1519 * The response queue is full. 1520 * If the server is processing SRP requests, i.e. 1521 * the client has successfully done an 1522 * SRP_LOGIN, then it will wait forever for room in 1523 * the queue. However if the system admin 1524 * is attempting to unconfigure the server then one 1525 * or more children will be in a state where 1526 * they are being removed. So if there is even one 1527 * child being removed then the driver assumes 1528 * the system admin is attempting to break the 1529 * connection with the client and MAX_TIMER_POPS 1530 * is honored. 1531 */ 1532 if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) || 1533 (vscsi->state == SRP_PROCESSING)) { 1534 pr_debug("snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n", 1535 vscsi->flags, (int)vscsi->rsp_q_timer.started, 1536 vscsi->rsp_q_timer.timer_pops); 1537 1538 /* 1539 * Check if the timer is running; if it 1540 * is not then start it up. 1541 */ 1542 if (!vscsi->rsp_q_timer.started) { 1543 if (vscsi->rsp_q_timer.timer_pops < 1544 MAX_TIMER_POPS) { 1545 kt = ktime_set(0, WAIT_NANO_SECONDS); 1546 } else { 1547 /* 1548 * slide the timeslice if the maximum 1549 * timer pops have already happened 1550 */ 1551 kt = ktime_set(WAIT_SECONDS, 0); 1552 } 1553 1554 vscsi->rsp_q_timer.started = true; 1555 hrtimer_start(&vscsi->rsp_q_timer.timer, kt, 1556 HRTIMER_MODE_REL); 1557 } 1558 } else { 1559 /* 1560 * TBD: Do we need to worry about this? Need to get 1561 * remove working. 1562 */ 1563 /* 1564 * waited a long time and it appears the system admin 1565 * is bring this driver down 1566 */ 1567 vscsi->flags |= RESPONSE_Q_DOWN; 1568 ibmvscsis_free_cmd_qs(vscsi); 1569 /* 1570 * if the driver is already attempting to disconnect 1571 * from the client and has already logged an error 1572 * trace this event but don't put it in the error log 1573 */ 1574 if (!(vscsi->state & (ERR_DISCONNECT | 1575 ERR_DISCONNECT_RECONNECT | 1576 ERR_DISCONNECTED | UNDEFINED))) { 1577 dev_err(&vscsi->dev, "client crq full too long\n"); 1578 ibmvscsis_post_disconnect(vscsi, 1579 ERR_DISCONNECT_RECONNECT, 1580 0); 1581 } 1582 } 1583 } 1584 1585 /** 1586 * ibmvscsis_send_messages() - Send a Response 1587 * @vscsi: Pointer to our adapter structure 1588 * 1589 * Send a response, first checking the waiting queue. Responses are 1590 * sent in order they are received. If the response cannot be sent, 1591 * because the client queue is full, it stays on the waiting queue. 1592 * 1593 * PRECONDITION: 1594 * Called with interrupt lock held 1595 */ 1596 static void ibmvscsis_send_messages(struct scsi_info *vscsi) 1597 { 1598 u64 msg_hi = 0; 1599 /* note do not attmempt to access the IU_data_ptr with this pointer 1600 * it is not valid 1601 */ 1602 struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi; 1603 struct ibmvscsis_cmd *cmd, *nxt; 1604 struct iu_entry *iue; 1605 long rc = ADAPT_SUCCESS; 1606 1607 if (!(vscsi->flags & RESPONSE_Q_DOWN)) { 1608 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) { 1609 pr_debug("send_messages cmd %p\n", cmd); 1610 1611 iue = cmd->iue; 1612 1613 crq->valid = VALID_CMD_RESP_EL; 1614 crq->format = cmd->rsp.format; 1615 1616 if (cmd->flags & CMD_FAST_FAIL) 1617 crq->status = VIOSRP_ADAPTER_FAIL; 1618 1619 crq->IU_length = cpu_to_be16(cmd->rsp.len); 1620 1621 rc = h_send_crq(vscsi->dma_dev->unit_address, 1622 be64_to_cpu(msg_hi), 1623 be64_to_cpu(cmd->rsp.tag)); 1624 1625 pr_debug("send_messages: tag 0x%llx, rc %ld\n", 1626 be64_to_cpu(cmd->rsp.tag), rc); 1627 1628 /* if all ok free up the command element resources */ 1629 if (rc == H_SUCCESS) { 1630 /* some movement has occurred */ 1631 vscsi->rsp_q_timer.timer_pops = 0; 1632 list_del(&cmd->list); 1633 1634 ibmvscsis_free_cmd_resources(vscsi, cmd); 1635 } else { 1636 srp_snd_msg_failed(vscsi, rc); 1637 break; 1638 } 1639 } 1640 1641 if (!rc) { 1642 /* 1643 * The timer could pop with the queue empty. If 1644 * this happens, rc will always indicate a 1645 * success; clear the pop count. 1646 */ 1647 vscsi->rsp_q_timer.timer_pops = 0; 1648 } 1649 } else { 1650 ibmvscsis_free_cmd_qs(vscsi); 1651 } 1652 } 1653 1654 /* Called with intr lock held */ 1655 static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi, 1656 struct ibmvscsis_cmd *cmd, 1657 struct viosrp_crq *crq) 1658 { 1659 struct iu_entry *iue = cmd->iue; 1660 struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad; 1661 uint flag_bits = 0; 1662 long rc; 1663 1664 dma_wmb(); 1665 rc = h_copy_rdma(sizeof(struct mad_common), 1666 vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma, 1667 vscsi->dds.window[REMOTE].liobn, 1668 be64_to_cpu(crq->IU_data_ptr)); 1669 if (!rc) { 1670 cmd->rsp.format = VIOSRP_MAD_FORMAT; 1671 cmd->rsp.len = sizeof(struct mad_common); 1672 cmd->rsp.tag = mad->tag; 1673 list_add_tail(&cmd->list, &vscsi->waiting_rsp); 1674 ibmvscsis_send_messages(vscsi); 1675 } else { 1676 pr_debug("Error sending mad response, rc %ld\n", rc); 1677 if (rc == H_PERMISSION) { 1678 if (connection_broken(vscsi)) 1679 flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); 1680 } 1681 dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n", 1682 rc); 1683 1684 ibmvscsis_free_cmd_resources(vscsi, cmd); 1685 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 1686 flag_bits); 1687 } 1688 } 1689 1690 /** 1691 * ibmvscsis_mad() - Service a MAnagement Data gram. 1692 * @vscsi: Pointer to our adapter structure 1693 * @crq: Pointer to the CRQ entry containing the MAD request 1694 * 1695 * EXECUTION ENVIRONMENT: 1696 * Interrupt called with adapter lock held 1697 */ 1698 static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq) 1699 { 1700 struct iu_entry *iue; 1701 struct ibmvscsis_cmd *cmd; 1702 struct mad_common *mad; 1703 long rc = ADAPT_SUCCESS; 1704 1705 switch (vscsi->state) { 1706 /* 1707 * We have not exchanged Init Msgs yet, so this MAD was sent 1708 * before the last Transport Event; client will not be 1709 * expecting a response. 1710 */ 1711 case WAIT_CONNECTION: 1712 pr_debug("mad: in Wait Connection state, ignoring MAD, flags %d\n", 1713 vscsi->flags); 1714 return ADAPT_SUCCESS; 1715 1716 case SRP_PROCESSING: 1717 case CONNECTED: 1718 break; 1719 1720 /* 1721 * We should never get here while we're in these states. 1722 * Just log an error and get out. 1723 */ 1724 case UNCONFIGURING: 1725 case WAIT_IDLE: 1726 case ERR_DISCONNECT: 1727 case ERR_DISCONNECT_RECONNECT: 1728 default: 1729 dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n", 1730 vscsi->state); 1731 return ADAPT_SUCCESS; 1732 } 1733 1734 cmd = ibmvscsis_get_free_cmd(vscsi); 1735 if (!cmd) { 1736 dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n", 1737 vscsi->debit); 1738 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 1739 return ERROR; 1740 } 1741 iue = cmd->iue; 1742 cmd->type = ADAPTER_MAD; 1743 1744 rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq); 1745 if (!rc) { 1746 mad = (struct mad_common *)&vio_iu(iue)->mad; 1747 1748 pr_debug("mad: type %d\n", be32_to_cpu(mad->type)); 1749 1750 if (be16_to_cpu(mad->length) < 0) { 1751 dev_err(&vscsi->dev, "mad: length is < 0\n"); 1752 ibmvscsis_post_disconnect(vscsi, 1753 ERR_DISCONNECT_RECONNECT, 0); 1754 rc = SRP_VIOLATION; 1755 } else { 1756 rc = ibmvscsis_process_mad(vscsi, iue); 1757 } 1758 1759 pr_debug("mad: status %hd, rc %ld\n", be16_to_cpu(mad->status), 1760 rc); 1761 1762 if (!rc) 1763 ibmvscsis_send_mad_resp(vscsi, cmd, crq); 1764 } else { 1765 ibmvscsis_free_cmd_resources(vscsi, cmd); 1766 } 1767 1768 pr_debug("Leaving mad, rc %ld\n", rc); 1769 return rc; 1770 } 1771 1772 /** 1773 * ibmvscsis_login_rsp() - Create/copy a login response notice to the client 1774 * @vscsi: Pointer to our adapter structure 1775 * @cmd: Pointer to the command for the SRP Login request 1776 * 1777 * EXECUTION ENVIRONMENT: 1778 * Interrupt, interrupt lock held 1779 */ 1780 static long ibmvscsis_login_rsp(struct scsi_info *vscsi, 1781 struct ibmvscsis_cmd *cmd) 1782 { 1783 struct iu_entry *iue = cmd->iue; 1784 struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp; 1785 struct format_code *fmt; 1786 uint flag_bits = 0; 1787 long rc = ADAPT_SUCCESS; 1788 1789 memset(rsp, 0, sizeof(struct srp_login_rsp)); 1790 1791 rsp->opcode = SRP_LOGIN_RSP; 1792 rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit); 1793 rsp->tag = cmd->rsp.tag; 1794 rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); 1795 rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN); 1796 fmt = (struct format_code *)&rsp->buf_fmt; 1797 fmt->buffers = SUPPORTED_FORMATS; 1798 vscsi->credit = 0; 1799 1800 cmd->rsp.len = sizeof(struct srp_login_rsp); 1801 1802 dma_wmb(); 1803 rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn, 1804 iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn, 1805 be64_to_cpu(iue->remote_token)); 1806 1807 switch (rc) { 1808 case H_SUCCESS: 1809 break; 1810 1811 case H_PERMISSION: 1812 if (connection_broken(vscsi)) 1813 flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; 1814 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n", 1815 rc); 1816 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 1817 flag_bits); 1818 break; 1819 case H_SOURCE_PARM: 1820 case H_DEST_PARM: 1821 default: 1822 dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n", 1823 rc); 1824 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 1825 break; 1826 } 1827 1828 return rc; 1829 } 1830 1831 /** 1832 * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client 1833 * @vscsi: Pointer to our adapter structure 1834 * @cmd: Pointer to the command for the SRP Login request 1835 * @reason: The reason the SRP Login is being rejected, per SRP protocol 1836 * 1837 * EXECUTION ENVIRONMENT: 1838 * Interrupt, interrupt lock held 1839 */ 1840 static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi, 1841 struct ibmvscsis_cmd *cmd, u32 reason) 1842 { 1843 struct iu_entry *iue = cmd->iue; 1844 struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej; 1845 struct format_code *fmt; 1846 uint flag_bits = 0; 1847 long rc = ADAPT_SUCCESS; 1848 1849 memset(rej, 0, sizeof(*rej)); 1850 1851 rej->opcode = SRP_LOGIN_REJ; 1852 rej->reason = cpu_to_be32(reason); 1853 rej->tag = cmd->rsp.tag; 1854 fmt = (struct format_code *)&rej->buf_fmt; 1855 fmt->buffers = SUPPORTED_FORMATS; 1856 1857 cmd->rsp.len = sizeof(*rej); 1858 1859 dma_wmb(); 1860 rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn, 1861 iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn, 1862 be64_to_cpu(iue->remote_token)); 1863 1864 switch (rc) { 1865 case H_SUCCESS: 1866 break; 1867 case H_PERMISSION: 1868 if (connection_broken(vscsi)) 1869 flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED; 1870 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", 1871 rc); 1872 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 1873 flag_bits); 1874 break; 1875 case H_SOURCE_PARM: 1876 case H_DEST_PARM: 1877 default: 1878 dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n", 1879 rc); 1880 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 1881 break; 1882 } 1883 1884 return rc; 1885 } 1886 1887 static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport) 1888 { 1889 char *name = tport->tport_name; 1890 struct ibmvscsis_nexus *nexus; 1891 int rc; 1892 1893 if (tport->ibmv_nexus) { 1894 pr_debug("tport->ibmv_nexus already exists\n"); 1895 return 0; 1896 } 1897 1898 nexus = kzalloc(sizeof(*nexus), GFP_KERNEL); 1899 if (!nexus) { 1900 pr_err("Unable to allocate struct ibmvscsis_nexus\n"); 1901 return -ENOMEM; 1902 } 1903 1904 nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0, 1905 TARGET_PROT_NORMAL, name, nexus, 1906 NULL); 1907 if (IS_ERR(nexus->se_sess)) { 1908 rc = PTR_ERR(nexus->se_sess); 1909 goto transport_init_fail; 1910 } 1911 1912 tport->ibmv_nexus = nexus; 1913 1914 return 0; 1915 1916 transport_init_fail: 1917 kfree(nexus); 1918 return rc; 1919 } 1920 1921 static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport) 1922 { 1923 struct se_session *se_sess; 1924 struct ibmvscsis_nexus *nexus; 1925 1926 nexus = tport->ibmv_nexus; 1927 if (!nexus) 1928 return -ENODEV; 1929 1930 se_sess = nexus->se_sess; 1931 if (!se_sess) 1932 return -ENODEV; 1933 1934 /* 1935 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port 1936 */ 1937 transport_deregister_session(se_sess); 1938 tport->ibmv_nexus = NULL; 1939 kfree(nexus); 1940 1941 return 0; 1942 } 1943 1944 /** 1945 * ibmvscsis_srp_login() - Process an SRP Login Request 1946 * @vscsi: Pointer to our adapter structure 1947 * @cmd: Command element to use to process the SRP Login request 1948 * @crq: Pointer to CRQ entry containing the SRP Login request 1949 * 1950 * EXECUTION ENVIRONMENT: 1951 * Interrupt, called with interrupt lock held 1952 */ 1953 static long ibmvscsis_srp_login(struct scsi_info *vscsi, 1954 struct ibmvscsis_cmd *cmd, 1955 struct viosrp_crq *crq) 1956 { 1957 struct iu_entry *iue = cmd->iue; 1958 struct srp_login_req *req = &vio_iu(iue)->srp.login_req; 1959 struct port_id { 1960 __be64 id_extension; 1961 __be64 io_guid; 1962 } *iport, *tport; 1963 struct format_code *fmt; 1964 u32 reason = 0x0; 1965 long rc = ADAPT_SUCCESS; 1966 1967 iport = (struct port_id *)req->initiator_port_id; 1968 tport = (struct port_id *)req->target_port_id; 1969 fmt = (struct format_code *)&req->req_buf_fmt; 1970 if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN) 1971 reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE; 1972 else if (be32_to_cpu(req->req_it_iu_len) < 64) 1973 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL; 1974 else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) || 1975 (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1))) 1976 reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL; 1977 else if (req->req_flags & SRP_MULTICHAN_MULTI) 1978 reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED; 1979 else if (fmt->buffers & (~SUPPORTED_FORMATS)) 1980 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT; 1981 else if ((fmt->buffers | SUPPORTED_FORMATS) == 0) 1982 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT; 1983 1984 if (vscsi->state == SRP_PROCESSING) 1985 reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED; 1986 1987 rc = ibmvscsis_make_nexus(&vscsi->tport); 1988 if (rc) 1989 reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL; 1990 1991 cmd->rsp.format = VIOSRP_SRP_FORMAT; 1992 cmd->rsp.tag = req->tag; 1993 1994 pr_debug("srp_login: reason 0x%x\n", reason); 1995 1996 if (reason) 1997 rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason); 1998 else 1999 rc = ibmvscsis_login_rsp(vscsi, cmd); 2000 2001 if (!rc) { 2002 if (!reason) 2003 vscsi->state = SRP_PROCESSING; 2004 2005 list_add_tail(&cmd->list, &vscsi->waiting_rsp); 2006 ibmvscsis_send_messages(vscsi); 2007 } else { 2008 ibmvscsis_free_cmd_resources(vscsi, cmd); 2009 } 2010 2011 pr_debug("Leaving srp_login, rc %ld\n", rc); 2012 return rc; 2013 } 2014 2015 /** 2016 * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus 2017 * @vscsi: Pointer to our adapter structure 2018 * @cmd: Command element to use to process the Implicit Logout request 2019 * @crq: Pointer to CRQ entry containing the Implicit Logout request 2020 * 2021 * Do the logic to close the I_T nexus. This function may not 2022 * behave to specification. 2023 * 2024 * EXECUTION ENVIRONMENT: 2025 * Interrupt, interrupt lock held 2026 */ 2027 static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi, 2028 struct ibmvscsis_cmd *cmd, 2029 struct viosrp_crq *crq) 2030 { 2031 struct iu_entry *iue = cmd->iue; 2032 struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout; 2033 long rc = ADAPT_SUCCESS; 2034 2035 if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) || 2036 !list_empty(&vscsi->waiting_rsp)) { 2037 dev_err(&vscsi->dev, "i_logout: outstanding work\n"); 2038 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); 2039 } else { 2040 cmd->rsp.format = SRP_FORMAT; 2041 cmd->rsp.tag = log_out->tag; 2042 cmd->rsp.len = sizeof(struct mad_common); 2043 list_add_tail(&cmd->list, &vscsi->waiting_rsp); 2044 ibmvscsis_send_messages(vscsi); 2045 2046 ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0); 2047 } 2048 2049 return rc; 2050 } 2051 2052 /* Called with intr lock held */ 2053 static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq) 2054 { 2055 struct ibmvscsis_cmd *cmd; 2056 struct iu_entry *iue; 2057 struct srp_cmd *srp; 2058 struct srp_tsk_mgmt *tsk; 2059 long rc; 2060 2061 if (vscsi->request_limit - vscsi->debit <= 0) { 2062 /* Client has exceeded request limit */ 2063 dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n", 2064 vscsi->request_limit, vscsi->debit); 2065 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2066 return; 2067 } 2068 2069 cmd = ibmvscsis_get_free_cmd(vscsi); 2070 if (!cmd) { 2071 dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n", 2072 vscsi->debit); 2073 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2074 return; 2075 } 2076 iue = cmd->iue; 2077 srp = &vio_iu(iue)->srp.cmd; 2078 2079 rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq); 2080 if (rc) { 2081 ibmvscsis_free_cmd_resources(vscsi, cmd); 2082 return; 2083 } 2084 2085 if (vscsi->state == SRP_PROCESSING) { 2086 switch (srp->opcode) { 2087 case SRP_LOGIN_REQ: 2088 rc = ibmvscsis_srp_login(vscsi, cmd, crq); 2089 break; 2090 2091 case SRP_TSK_MGMT: 2092 tsk = &vio_iu(iue)->srp.tsk_mgmt; 2093 pr_debug("tsk_mgmt tag: %llu (0x%llx)\n", tsk->tag, 2094 tsk->tag); 2095 cmd->rsp.tag = tsk->tag; 2096 vscsi->debit += 1; 2097 cmd->type = TASK_MANAGEMENT; 2098 list_add_tail(&cmd->list, &vscsi->schedule_q); 2099 queue_work(vscsi->work_q, &cmd->work); 2100 break; 2101 2102 case SRP_CMD: 2103 pr_debug("srp_cmd tag: %llu (0x%llx)\n", srp->tag, 2104 srp->tag); 2105 cmd->rsp.tag = srp->tag; 2106 vscsi->debit += 1; 2107 cmd->type = SCSI_CDB; 2108 /* 2109 * We want to keep track of work waiting for 2110 * the workqueue. 2111 */ 2112 list_add_tail(&cmd->list, &vscsi->schedule_q); 2113 queue_work(vscsi->work_q, &cmd->work); 2114 break; 2115 2116 case SRP_I_LOGOUT: 2117 rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq); 2118 break; 2119 2120 case SRP_CRED_RSP: 2121 case SRP_AER_RSP: 2122 default: 2123 ibmvscsis_free_cmd_resources(vscsi, cmd); 2124 dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n", 2125 (uint)srp->opcode); 2126 ibmvscsis_post_disconnect(vscsi, 2127 ERR_DISCONNECT_RECONNECT, 0); 2128 break; 2129 } 2130 } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) { 2131 rc = ibmvscsis_srp_login(vscsi, cmd, crq); 2132 } else { 2133 ibmvscsis_free_cmd_resources(vscsi, cmd); 2134 dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n", 2135 vscsi->state); 2136 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2137 } 2138 } 2139 2140 /** 2141 * ibmvscsis_ping_response() - Respond to a ping request 2142 * @vscsi: Pointer to our adapter structure 2143 * 2144 * Let the client know that the server is alive and waiting on 2145 * its native I/O stack. 2146 * If any type of error occurs from the call to queue a ping 2147 * response then the client is either not accepting or receiving 2148 * interrupts. Disconnect with an error. 2149 * 2150 * EXECUTION ENVIRONMENT: 2151 * Interrupt, interrupt lock held 2152 */ 2153 static long ibmvscsis_ping_response(struct scsi_info *vscsi) 2154 { 2155 struct viosrp_crq *crq; 2156 u64 buffer[2] = { 0, 0 }; 2157 long rc; 2158 2159 crq = (struct viosrp_crq *)&buffer; 2160 crq->valid = VALID_CMD_RESP_EL; 2161 crq->format = (u8)MESSAGE_IN_CRQ; 2162 crq->status = PING_RESPONSE; 2163 2164 rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]), 2165 cpu_to_be64(buffer[MSG_LOW])); 2166 2167 switch (rc) { 2168 case H_SUCCESS: 2169 break; 2170 case H_CLOSED: 2171 vscsi->flags |= CLIENT_FAILED; 2172 case H_DROPPED: 2173 vscsi->flags |= RESPONSE_Q_DOWN; 2174 case H_REMOTE_PARM: 2175 dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", 2176 rc); 2177 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2178 break; 2179 default: 2180 dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n", 2181 rc); 2182 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); 2183 break; 2184 } 2185 2186 return rc; 2187 } 2188 2189 /** 2190 * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message 2191 * @vscsi: Pointer to our adapter structure 2192 * 2193 * Must be called with interrupt lock held. 2194 */ 2195 static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi) 2196 { 2197 long rc = ADAPT_SUCCESS; 2198 2199 switch (vscsi->state) { 2200 case NO_QUEUE: 2201 case ERR_DISCONNECT: 2202 case ERR_DISCONNECT_RECONNECT: 2203 case ERR_DISCONNECTED: 2204 case UNCONFIGURING: 2205 case UNDEFINED: 2206 rc = ERROR; 2207 break; 2208 2209 case WAIT_CONNECTION: 2210 vscsi->state = CONNECTED; 2211 break; 2212 2213 case WAIT_IDLE: 2214 case SRP_PROCESSING: 2215 case CONNECTED: 2216 case WAIT_ENABLED: 2217 case PART_UP_WAIT_ENAB: 2218 default: 2219 rc = ERROR; 2220 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n", 2221 vscsi->state); 2222 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2223 break; 2224 } 2225 2226 return rc; 2227 } 2228 2229 /** 2230 * ibmvscsis_handle_init_msg() - Respond to an Init Message 2231 * @vscsi: Pointer to our adapter structure 2232 * 2233 * Must be called with interrupt lock held. 2234 */ 2235 static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi) 2236 { 2237 long rc = ADAPT_SUCCESS; 2238 2239 switch (vscsi->state) { 2240 case WAIT_ENABLED: 2241 vscsi->state = PART_UP_WAIT_ENAB; 2242 break; 2243 2244 case WAIT_CONNECTION: 2245 rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); 2246 switch (rc) { 2247 case H_SUCCESS: 2248 vscsi->state = CONNECTED; 2249 break; 2250 2251 case H_PARAMETER: 2252 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", 2253 rc); 2254 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0); 2255 break; 2256 2257 case H_DROPPED: 2258 dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n", 2259 rc); 2260 rc = ERROR; 2261 ibmvscsis_post_disconnect(vscsi, 2262 ERR_DISCONNECT_RECONNECT, 0); 2263 break; 2264 2265 case H_CLOSED: 2266 pr_warn("init_msg: failed to send, rc %ld\n", rc); 2267 rc = 0; 2268 break; 2269 } 2270 break; 2271 2272 case UNDEFINED: 2273 rc = ERROR; 2274 break; 2275 2276 case UNCONFIGURING: 2277 break; 2278 2279 case PART_UP_WAIT_ENAB: 2280 case CONNECTED: 2281 case SRP_PROCESSING: 2282 case WAIT_IDLE: 2283 case NO_QUEUE: 2284 case ERR_DISCONNECT: 2285 case ERR_DISCONNECT_RECONNECT: 2286 case ERR_DISCONNECTED: 2287 default: 2288 rc = ERROR; 2289 dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n", 2290 vscsi->state); 2291 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2292 break; 2293 } 2294 2295 return rc; 2296 } 2297 2298 /** 2299 * ibmvscsis_init_msg() - Respond to an init message 2300 * @vscsi: Pointer to our adapter structure 2301 * @crq: Pointer to CRQ element containing the Init Message 2302 * 2303 * EXECUTION ENVIRONMENT: 2304 * Interrupt, interrupt lock held 2305 */ 2306 static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq) 2307 { 2308 long rc = ADAPT_SUCCESS; 2309 2310 pr_debug("init_msg: state 0x%hx\n", vscsi->state); 2311 2312 rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, 2313 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, 2314 0); 2315 if (rc == H_SUCCESS) { 2316 vscsi->client_data.partition_number = 2317 be64_to_cpu(*(u64 *)vscsi->map_buf); 2318 pr_debug("init_msg, part num %d\n", 2319 vscsi->client_data.partition_number); 2320 } else { 2321 pr_debug("init_msg h_vioctl rc %ld\n", rc); 2322 rc = ADAPT_SUCCESS; 2323 } 2324 2325 if (crq->format == INIT_MSG) { 2326 rc = ibmvscsis_handle_init_msg(vscsi); 2327 } else if (crq->format == INIT_COMPLETE_MSG) { 2328 rc = ibmvscsis_handle_init_compl_msg(vscsi); 2329 } else { 2330 rc = ERROR; 2331 dev_err(&vscsi->dev, "init_msg: invalid format %d\n", 2332 (uint)crq->format); 2333 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2334 } 2335 2336 return rc; 2337 } 2338 2339 /** 2340 * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue. 2341 * @vscsi: Pointer to our adapter structure 2342 * @crq: Pointer to CRQ element containing the SRP request 2343 * 2344 * This function will return success if the command queue element is valid 2345 * and the srp iu or MAD request it pointed to was also valid. That does 2346 * not mean that an error was not returned to the client. 2347 * 2348 * EXECUTION ENVIRONMENT: 2349 * Interrupt, intr lock held 2350 */ 2351 static long ibmvscsis_parse_command(struct scsi_info *vscsi, 2352 struct viosrp_crq *crq) 2353 { 2354 long rc = ADAPT_SUCCESS; 2355 2356 switch (crq->valid) { 2357 case VALID_CMD_RESP_EL: 2358 switch (crq->format) { 2359 case OS400_FORMAT: 2360 case AIX_FORMAT: 2361 case LINUX_FORMAT: 2362 case MAD_FORMAT: 2363 if (vscsi->flags & PROCESSING_MAD) { 2364 rc = ERROR; 2365 dev_err(&vscsi->dev, "parse_command: already processing mad\n"); 2366 ibmvscsis_post_disconnect(vscsi, 2367 ERR_DISCONNECT_RECONNECT, 2368 0); 2369 } else { 2370 vscsi->flags |= PROCESSING_MAD; 2371 rc = ibmvscsis_mad(vscsi, crq); 2372 } 2373 break; 2374 2375 case SRP_FORMAT: 2376 ibmvscsis_srp_cmd(vscsi, crq); 2377 break; 2378 2379 case MESSAGE_IN_CRQ: 2380 if (crq->status == PING) 2381 ibmvscsis_ping_response(vscsi); 2382 break; 2383 2384 default: 2385 dev_err(&vscsi->dev, "parse_command: invalid format %d\n", 2386 (uint)crq->format); 2387 ibmvscsis_post_disconnect(vscsi, 2388 ERR_DISCONNECT_RECONNECT, 0); 2389 break; 2390 } 2391 break; 2392 2393 case VALID_TRANS_EVENT: 2394 rc = ibmvscsis_trans_event(vscsi, crq); 2395 break; 2396 2397 case VALID_INIT_MSG: 2398 rc = ibmvscsis_init_msg(vscsi, crq); 2399 break; 2400 2401 default: 2402 dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n", 2403 (uint)crq->valid); 2404 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2405 break; 2406 } 2407 2408 /* 2409 * Return only what the interrupt handler cares 2410 * about. Most errors we keep right on trucking. 2411 */ 2412 rc = vscsi->flags & SCHEDULE_DISCONNECT; 2413 2414 return rc; 2415 } 2416 2417 static int read_dma_window(struct scsi_info *vscsi) 2418 { 2419 struct vio_dev *vdev = vscsi->dma_dev; 2420 const __be32 *dma_window; 2421 const __be32 *prop; 2422 2423 /* TODO Using of_parse_dma_window would be better, but it doesn't give 2424 * a way to read multiple windows without already knowing the size of 2425 * a window or the number of windows. 2426 */ 2427 dma_window = (const __be32 *)vio_get_attribute(vdev, 2428 "ibm,my-dma-window", 2429 NULL); 2430 if (!dma_window) { 2431 pr_err("Couldn't find ibm,my-dma-window property\n"); 2432 return -1; 2433 } 2434 2435 vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window); 2436 dma_window++; 2437 2438 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells", 2439 NULL); 2440 if (!prop) { 2441 pr_warn("Couldn't find ibm,#dma-address-cells property\n"); 2442 dma_window++; 2443 } else { 2444 dma_window += be32_to_cpu(*prop); 2445 } 2446 2447 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells", 2448 NULL); 2449 if (!prop) { 2450 pr_warn("Couldn't find ibm,#dma-size-cells property\n"); 2451 dma_window++; 2452 } else { 2453 dma_window += be32_to_cpu(*prop); 2454 } 2455 2456 /* dma_window should point to the second window now */ 2457 vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window); 2458 2459 return 0; 2460 } 2461 2462 static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name) 2463 { 2464 struct ibmvscsis_tport *tport = NULL; 2465 struct vio_dev *vdev; 2466 struct scsi_info *vscsi; 2467 2468 spin_lock_bh(&ibmvscsis_dev_lock); 2469 list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) { 2470 vdev = vscsi->dma_dev; 2471 if (!strcmp(dev_name(&vdev->dev), name)) { 2472 tport = &vscsi->tport; 2473 break; 2474 } 2475 } 2476 spin_unlock_bh(&ibmvscsis_dev_lock); 2477 2478 return tport; 2479 } 2480 2481 /** 2482 * ibmvscsis_parse_cmd() - Parse SRP Command 2483 * @vscsi: Pointer to our adapter structure 2484 * @cmd: Pointer to command element with SRP command 2485 * 2486 * Parse the srp command; if it is valid then submit it to tcm. 2487 * Note: The return code does not reflect the status of the SCSI CDB. 2488 * 2489 * EXECUTION ENVIRONMENT: 2490 * Process level 2491 */ 2492 static void ibmvscsis_parse_cmd(struct scsi_info *vscsi, 2493 struct ibmvscsis_cmd *cmd) 2494 { 2495 struct iu_entry *iue = cmd->iue; 2496 struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf; 2497 struct ibmvscsis_nexus *nexus; 2498 u64 data_len = 0; 2499 enum dma_data_direction dir; 2500 int attr = 0; 2501 int rc = 0; 2502 2503 nexus = vscsi->tport.ibmv_nexus; 2504 /* 2505 * additional length in bytes. Note that the SRP spec says that 2506 * additional length is in 4-byte words, but technically the 2507 * additional length field is only the upper 6 bits of the byte. 2508 * The lower 2 bits are reserved. If the lower 2 bits are 0 (as 2509 * all reserved fields should be), then interpreting the byte as 2510 * an int will yield the length in bytes. 2511 */ 2512 if (srp->add_cdb_len & 0x03) { 2513 dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n"); 2514 spin_lock_bh(&vscsi->intr_lock); 2515 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2516 ibmvscsis_free_cmd_resources(vscsi, cmd); 2517 spin_unlock_bh(&vscsi->intr_lock); 2518 return; 2519 } 2520 2521 if (srp_get_desc_table(srp, &dir, &data_len)) { 2522 dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n", 2523 srp->tag); 2524 goto fail; 2525 return; 2526 } 2527 2528 cmd->rsp.sol_not = srp->sol_not; 2529 2530 switch (srp->task_attr) { 2531 case SRP_SIMPLE_TASK: 2532 attr = TCM_SIMPLE_TAG; 2533 break; 2534 case SRP_ORDERED_TASK: 2535 attr = TCM_ORDERED_TAG; 2536 break; 2537 case SRP_HEAD_TASK: 2538 attr = TCM_HEAD_TAG; 2539 break; 2540 case SRP_ACA_TASK: 2541 attr = TCM_ACA_TAG; 2542 break; 2543 default: 2544 dev_err(&vscsi->dev, "Invalid task attribute %d\n", 2545 srp->task_attr); 2546 goto fail; 2547 } 2548 2549 cmd->se_cmd.tag = be64_to_cpu(srp->tag); 2550 2551 spin_lock_bh(&vscsi->intr_lock); 2552 list_add_tail(&cmd->list, &vscsi->active_q); 2553 spin_unlock_bh(&vscsi->intr_lock); 2554 2555 srp->lun.scsi_lun[0] &= 0x3f; 2556 2557 pr_debug("calling submit_cmd, se_cmd %p, lun 0x%llx, cdb 0x%x, attr:%d\n", 2558 &cmd->se_cmd, scsilun_to_int(&srp->lun), (int)srp->cdb[0], 2559 attr); 2560 2561 rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb, 2562 cmd->sense_buf, scsilun_to_int(&srp->lun), 2563 data_len, attr, dir, 0); 2564 if (rc) { 2565 dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc); 2566 goto fail; 2567 } 2568 return; 2569 2570 fail: 2571 spin_lock_bh(&vscsi->intr_lock); 2572 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2573 spin_unlock_bh(&vscsi->intr_lock); 2574 } 2575 2576 /** 2577 * ibmvscsis_parse_task() - Parse SRP Task Management Request 2578 * @vscsi: Pointer to our adapter structure 2579 * @cmd: Pointer to command element with SRP task management request 2580 * 2581 * Parse the srp task management request; if it is valid then submit it to tcm. 2582 * Note: The return code does not reflect the status of the task management 2583 * request. 2584 * 2585 * EXECUTION ENVIRONMENT: 2586 * Processor level 2587 */ 2588 static void ibmvscsis_parse_task(struct scsi_info *vscsi, 2589 struct ibmvscsis_cmd *cmd) 2590 { 2591 struct iu_entry *iue = cmd->iue; 2592 struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt; 2593 int tcm_type; 2594 u64 tag_to_abort = 0; 2595 int rc = 0; 2596 struct ibmvscsis_nexus *nexus; 2597 2598 nexus = vscsi->tport.ibmv_nexus; 2599 2600 cmd->rsp.sol_not = srp_tsk->sol_not; 2601 2602 switch (srp_tsk->tsk_mgmt_func) { 2603 case SRP_TSK_ABORT_TASK: 2604 tcm_type = TMR_ABORT_TASK; 2605 tag_to_abort = be64_to_cpu(srp_tsk->task_tag); 2606 break; 2607 case SRP_TSK_ABORT_TASK_SET: 2608 tcm_type = TMR_ABORT_TASK_SET; 2609 break; 2610 case SRP_TSK_CLEAR_TASK_SET: 2611 tcm_type = TMR_CLEAR_TASK_SET; 2612 break; 2613 case SRP_TSK_LUN_RESET: 2614 tcm_type = TMR_LUN_RESET; 2615 break; 2616 case SRP_TSK_CLEAR_ACA: 2617 tcm_type = TMR_CLEAR_ACA; 2618 break; 2619 default: 2620 dev_err(&vscsi->dev, "unknown task mgmt func %d\n", 2621 srp_tsk->tsk_mgmt_func); 2622 cmd->se_cmd.se_tmr_req->response = 2623 TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; 2624 rc = -1; 2625 break; 2626 } 2627 2628 if (!rc) { 2629 cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag); 2630 2631 spin_lock_bh(&vscsi->intr_lock); 2632 list_add_tail(&cmd->list, &vscsi->active_q); 2633 spin_unlock_bh(&vscsi->intr_lock); 2634 2635 srp_tsk->lun.scsi_lun[0] &= 0x3f; 2636 2637 pr_debug("calling submit_tmr, func %d\n", 2638 srp_tsk->tsk_mgmt_func); 2639 rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL, 2640 scsilun_to_int(&srp_tsk->lun), srp_tsk, 2641 tcm_type, GFP_KERNEL, tag_to_abort, 0); 2642 if (rc) { 2643 dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n", 2644 rc); 2645 cmd->se_cmd.se_tmr_req->response = 2646 TMR_FUNCTION_REJECTED; 2647 } 2648 } 2649 2650 if (rc) 2651 transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0); 2652 } 2653 2654 static void ibmvscsis_scheduler(struct work_struct *work) 2655 { 2656 struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd, 2657 work); 2658 struct scsi_info *vscsi = cmd->adapter; 2659 2660 spin_lock_bh(&vscsi->intr_lock); 2661 2662 /* Remove from schedule_q */ 2663 list_del(&cmd->list); 2664 2665 /* Don't submit cmd if we're disconnecting */ 2666 if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) { 2667 ibmvscsis_free_cmd_resources(vscsi, cmd); 2668 2669 /* ibmvscsis_disconnect might be waiting for us */ 2670 if (list_empty(&vscsi->active_q) && 2671 list_empty(&vscsi->schedule_q) && 2672 (vscsi->flags & WAIT_FOR_IDLE)) { 2673 vscsi->flags &= ~WAIT_FOR_IDLE; 2674 complete(&vscsi->wait_idle); 2675 } 2676 2677 spin_unlock_bh(&vscsi->intr_lock); 2678 return; 2679 } 2680 2681 spin_unlock_bh(&vscsi->intr_lock); 2682 2683 switch (cmd->type) { 2684 case SCSI_CDB: 2685 ibmvscsis_parse_cmd(vscsi, cmd); 2686 break; 2687 case TASK_MANAGEMENT: 2688 ibmvscsis_parse_task(vscsi, cmd); 2689 break; 2690 default: 2691 dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n", 2692 cmd->type); 2693 spin_lock_bh(&vscsi->intr_lock); 2694 ibmvscsis_free_cmd_resources(vscsi, cmd); 2695 spin_unlock_bh(&vscsi->intr_lock); 2696 break; 2697 } 2698 } 2699 2700 static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num) 2701 { 2702 struct ibmvscsis_cmd *cmd; 2703 int i; 2704 2705 INIT_LIST_HEAD(&vscsi->free_cmd); 2706 vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd), 2707 GFP_KERNEL); 2708 if (!vscsi->cmd_pool) 2709 return -ENOMEM; 2710 2711 for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num; 2712 i++, cmd++) { 2713 cmd->adapter = vscsi; 2714 INIT_WORK(&cmd->work, ibmvscsis_scheduler); 2715 list_add_tail(&cmd->list, &vscsi->free_cmd); 2716 } 2717 2718 return 0; 2719 } 2720 2721 static void ibmvscsis_free_cmds(struct scsi_info *vscsi) 2722 { 2723 kfree(vscsi->cmd_pool); 2724 vscsi->cmd_pool = NULL; 2725 INIT_LIST_HEAD(&vscsi->free_cmd); 2726 } 2727 2728 /** 2729 * ibmvscsis_service_wait_q() - Service Waiting Queue 2730 * @timer: Pointer to timer which has expired 2731 * 2732 * This routine is called when the timer pops to service the waiting 2733 * queue. Elements on the queue have completed, their responses have been 2734 * copied to the client, but the client's response queue was full so 2735 * the queue message could not be sent. The routine grabs the proper locks 2736 * and calls send messages. 2737 * 2738 * EXECUTION ENVIRONMENT: 2739 * called at interrupt level 2740 */ 2741 static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer) 2742 { 2743 struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer); 2744 struct scsi_info *vscsi = container_of(p_timer, struct scsi_info, 2745 rsp_q_timer); 2746 2747 spin_lock_bh(&vscsi->intr_lock); 2748 p_timer->timer_pops += 1; 2749 p_timer->started = false; 2750 ibmvscsis_send_messages(vscsi); 2751 spin_unlock_bh(&vscsi->intr_lock); 2752 2753 return HRTIMER_NORESTART; 2754 } 2755 2756 static long ibmvscsis_alloctimer(struct scsi_info *vscsi) 2757 { 2758 struct timer_cb *p_timer; 2759 2760 p_timer = &vscsi->rsp_q_timer; 2761 hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 2762 2763 p_timer->timer.function = ibmvscsis_service_wait_q; 2764 p_timer->started = false; 2765 p_timer->timer_pops = 0; 2766 2767 return ADAPT_SUCCESS; 2768 } 2769 2770 static void ibmvscsis_freetimer(struct scsi_info *vscsi) 2771 { 2772 struct timer_cb *p_timer; 2773 2774 p_timer = &vscsi->rsp_q_timer; 2775 2776 (void)hrtimer_cancel(&p_timer->timer); 2777 2778 p_timer->started = false; 2779 p_timer->timer_pops = 0; 2780 } 2781 2782 static irqreturn_t ibmvscsis_interrupt(int dummy, void *data) 2783 { 2784 struct scsi_info *vscsi = data; 2785 2786 vio_disable_interrupts(vscsi->dma_dev); 2787 tasklet_schedule(&vscsi->work_task); 2788 2789 return IRQ_HANDLED; 2790 } 2791 2792 /** 2793 * ibmvscsis_check_q() - Helper function to Check Init Message Valid 2794 * @vscsi: Pointer to our adapter structure 2795 * 2796 * Checks if a initialize message was queued by the initiatior 2797 * while the timing window was open. This function is called from 2798 * probe after the CRQ is created and interrupts are enabled. 2799 * It would only be used by adapters who wait for some event before 2800 * completing the init handshake with the client. For ibmvscsi, this 2801 * event is waiting for the port to be enabled. 2802 * 2803 * EXECUTION ENVIRONMENT: 2804 * Process level only, interrupt lock held 2805 */ 2806 static long ibmvscsis_check_q(struct scsi_info *vscsi) 2807 { 2808 uint format; 2809 long rc; 2810 2811 rc = ibmvscsis_check_init_msg(vscsi, &format); 2812 if (rc) 2813 ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0); 2814 else if (format == UNUSED_FORMAT) 2815 vscsi->state = WAIT_ENABLED; 2816 else 2817 vscsi->state = PART_UP_WAIT_ENAB; 2818 2819 return rc; 2820 } 2821 2822 /** 2823 * ibmvscsis_enable_change_state() - Set new state based on enabled status 2824 * @vscsi: Pointer to our adapter structure 2825 * 2826 * This function determines our new state now that we are enabled. This 2827 * may involve sending an Init Complete message to the client. 2828 * 2829 * Must be called with interrupt lock held. 2830 */ 2831 static long ibmvscsis_enable_change_state(struct scsi_info *vscsi) 2832 { 2833 long rc = ADAPT_SUCCESS; 2834 2835 handle_state_change: 2836 switch (vscsi->state) { 2837 case WAIT_ENABLED: 2838 rc = ibmvscsis_send_init_message(vscsi, INIT_MSG); 2839 switch (rc) { 2840 case H_SUCCESS: 2841 case H_DROPPED: 2842 case H_CLOSED: 2843 vscsi->state = WAIT_CONNECTION; 2844 rc = ADAPT_SUCCESS; 2845 break; 2846 2847 case H_PARAMETER: 2848 break; 2849 2850 case H_HARDWARE: 2851 break; 2852 2853 default: 2854 vscsi->state = UNDEFINED; 2855 rc = H_HARDWARE; 2856 break; 2857 } 2858 break; 2859 case PART_UP_WAIT_ENAB: 2860 rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG); 2861 switch (rc) { 2862 case H_SUCCESS: 2863 vscsi->state = CONNECTED; 2864 rc = ADAPT_SUCCESS; 2865 break; 2866 2867 case H_DROPPED: 2868 case H_CLOSED: 2869 vscsi->state = WAIT_ENABLED; 2870 goto handle_state_change; 2871 2872 case H_PARAMETER: 2873 break; 2874 2875 case H_HARDWARE: 2876 break; 2877 2878 default: 2879 rc = H_HARDWARE; 2880 break; 2881 } 2882 break; 2883 2884 case WAIT_CONNECTION: 2885 case WAIT_IDLE: 2886 case SRP_PROCESSING: 2887 case CONNECTED: 2888 rc = ADAPT_SUCCESS; 2889 break; 2890 /* should not be able to get here */ 2891 case UNCONFIGURING: 2892 rc = ERROR; 2893 vscsi->state = UNDEFINED; 2894 break; 2895 2896 /* driver should never allow this to happen */ 2897 case ERR_DISCONNECT: 2898 case ERR_DISCONNECT_RECONNECT: 2899 default: 2900 dev_err(&vscsi->dev, "in invalid state %d during enable_change_state\n", 2901 vscsi->state); 2902 rc = ADAPT_SUCCESS; 2903 break; 2904 } 2905 2906 return rc; 2907 } 2908 2909 /** 2910 * ibmvscsis_create_command_q() - Create Command Queue 2911 * @vscsi: Pointer to our adapter structure 2912 * @num_cmds: Currently unused. In the future, may be used to determine 2913 * the size of the CRQ. 2914 * 2915 * Allocates memory for command queue maps remote memory into an ioba 2916 * initializes the command response queue 2917 * 2918 * EXECUTION ENVIRONMENT: 2919 * Process level only 2920 */ 2921 static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds) 2922 { 2923 long rc = 0; 2924 int pages; 2925 struct vio_dev *vdev = vscsi->dma_dev; 2926 2927 /* We might support multiple pages in the future, but just 1 for now */ 2928 pages = 1; 2929 2930 vscsi->cmd_q.size = pages; 2931 2932 vscsi->cmd_q.base_addr = 2933 (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL); 2934 if (!vscsi->cmd_q.base_addr) 2935 return -ENOMEM; 2936 2937 vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1; 2938 2939 vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev, 2940 vscsi->cmd_q.base_addr, 2941 PAGE_SIZE, DMA_BIDIRECTIONAL); 2942 if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) { 2943 free_page((unsigned long)vscsi->cmd_q.base_addr); 2944 return -ENOMEM; 2945 } 2946 2947 rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, PAGE_SIZE); 2948 if (rc) { 2949 if (rc == H_CLOSED) { 2950 vscsi->state = WAIT_ENABLED; 2951 rc = 0; 2952 } else { 2953 dma_unmap_single(&vdev->dev, vscsi->cmd_q.crq_token, 2954 PAGE_SIZE, DMA_BIDIRECTIONAL); 2955 free_page((unsigned long)vscsi->cmd_q.base_addr); 2956 rc = -ENODEV; 2957 } 2958 } else { 2959 vscsi->state = WAIT_ENABLED; 2960 } 2961 2962 return rc; 2963 } 2964 2965 /** 2966 * ibmvscsis_destroy_command_q - Destroy Command Queue 2967 * @vscsi: Pointer to our adapter structure 2968 * 2969 * Releases memory for command queue and unmaps mapped remote memory. 2970 * 2971 * EXECUTION ENVIRONMENT: 2972 * Process level only 2973 */ 2974 static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi) 2975 { 2976 dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token, 2977 PAGE_SIZE, DMA_BIDIRECTIONAL); 2978 free_page((unsigned long)vscsi->cmd_q.base_addr); 2979 vscsi->cmd_q.base_addr = NULL; 2980 vscsi->state = NO_QUEUE; 2981 } 2982 2983 static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi, 2984 struct ibmvscsis_cmd *cmd) 2985 { 2986 struct iu_entry *iue = cmd->iue; 2987 struct se_cmd *se_cmd = &cmd->se_cmd; 2988 struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf; 2989 struct scsi_sense_hdr sshdr; 2990 u8 rc = se_cmd->scsi_status; 2991 2992 if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb))) 2993 if (scsi_normalize_sense(se_cmd->sense_buffer, 2994 se_cmd->scsi_sense_length, &sshdr)) 2995 if (sshdr.sense_key == HARDWARE_ERROR && 2996 (se_cmd->residual_count == 0 || 2997 se_cmd->residual_count == se_cmd->data_length)) { 2998 rc = NO_SENSE; 2999 cmd->flags |= CMD_FAST_FAIL; 3000 } 3001 3002 return rc; 3003 } 3004 3005 /** 3006 * srp_build_response() - Build an SRP response buffer 3007 * @vscsi: Pointer to our adapter structure 3008 * @cmd: Pointer to command for which to send the response 3009 * @len_p: Where to return the length of the IU response sent. This 3010 * is needed to construct the CRQ response. 3011 * 3012 * Build the SRP response buffer and copy it to the client's memory space. 3013 */ 3014 static long srp_build_response(struct scsi_info *vscsi, 3015 struct ibmvscsis_cmd *cmd, uint *len_p) 3016 { 3017 struct iu_entry *iue = cmd->iue; 3018 struct se_cmd *se_cmd = &cmd->se_cmd; 3019 struct srp_rsp *rsp; 3020 uint len; 3021 u32 rsp_code; 3022 char *data; 3023 u32 *tsk_status; 3024 long rc = ADAPT_SUCCESS; 3025 3026 spin_lock_bh(&vscsi->intr_lock); 3027 3028 rsp = &vio_iu(iue)->srp.rsp; 3029 len = sizeof(*rsp); 3030 memset(rsp, 0, len); 3031 data = rsp->data; 3032 3033 rsp->opcode = SRP_RSP; 3034 3035 if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING) 3036 rsp->req_lim_delta = cpu_to_be32(vscsi->credit); 3037 else 3038 rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); 3039 rsp->tag = cmd->rsp.tag; 3040 rsp->flags = 0; 3041 3042 if (cmd->type == SCSI_CDB) { 3043 rsp->status = ibmvscsis_fast_fail(vscsi, cmd); 3044 if (rsp->status) { 3045 pr_debug("build_resp: cmd %p, scsi status %d\n", cmd, 3046 (int)rsp->status); 3047 ibmvscsis_determine_resid(se_cmd, rsp); 3048 if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) { 3049 rsp->sense_data_len = 3050 cpu_to_be32(se_cmd->scsi_sense_length); 3051 rsp->flags |= SRP_RSP_FLAG_SNSVALID; 3052 len += se_cmd->scsi_sense_length; 3053 memcpy(data, se_cmd->sense_buffer, 3054 se_cmd->scsi_sense_length); 3055 } 3056 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 3057 UCSOLNT_RESP_SHIFT; 3058 } else if (cmd->flags & CMD_FAST_FAIL) { 3059 pr_debug("build_resp: cmd %p, fast fail\n", cmd); 3060 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 3061 UCSOLNT_RESP_SHIFT; 3062 } else { 3063 rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >> 3064 SCSOLNT_RESP_SHIFT; 3065 } 3066 } else { 3067 /* this is task management */ 3068 rsp->status = 0; 3069 rsp->resp_data_len = cpu_to_be32(4); 3070 rsp->flags |= SRP_RSP_FLAG_RSPVALID; 3071 3072 switch (se_cmd->se_tmr_req->response) { 3073 case TMR_FUNCTION_COMPLETE: 3074 case TMR_TASK_DOES_NOT_EXIST: 3075 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE; 3076 rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >> 3077 SCSOLNT_RESP_SHIFT; 3078 break; 3079 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED: 3080 case TMR_LUN_DOES_NOT_EXIST: 3081 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED; 3082 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 3083 UCSOLNT_RESP_SHIFT; 3084 break; 3085 case TMR_FUNCTION_FAILED: 3086 case TMR_FUNCTION_REJECTED: 3087 default: 3088 rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED; 3089 rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >> 3090 UCSOLNT_RESP_SHIFT; 3091 break; 3092 } 3093 3094 tsk_status = (u32 *)data; 3095 *tsk_status = cpu_to_be32(rsp_code); 3096 data = (char *)(tsk_status + 1); 3097 len += 4; 3098 } 3099 3100 dma_wmb(); 3101 rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma, 3102 vscsi->dds.window[REMOTE].liobn, 3103 be64_to_cpu(iue->remote_token)); 3104 3105 switch (rc) { 3106 case H_SUCCESS: 3107 vscsi->credit = 0; 3108 *len_p = len; 3109 break; 3110 case H_PERMISSION: 3111 if (connection_broken(vscsi)) 3112 vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED; 3113 3114 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n", 3115 rc, vscsi->flags, vscsi->state); 3116 break; 3117 case H_SOURCE_PARM: 3118 case H_DEST_PARM: 3119 default: 3120 dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n", 3121 rc); 3122 break; 3123 } 3124 3125 spin_unlock_bh(&vscsi->intr_lock); 3126 3127 return rc; 3128 } 3129 3130 static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg, 3131 int nsg, struct srp_direct_buf *md, int nmd, 3132 enum dma_data_direction dir, unsigned int bytes) 3133 { 3134 struct iu_entry *iue = cmd->iue; 3135 struct srp_target *target = iue->target; 3136 struct scsi_info *vscsi = target->ldata; 3137 struct scatterlist *sgp; 3138 dma_addr_t client_ioba, server_ioba; 3139 ulong buf_len; 3140 ulong client_len, server_len; 3141 int md_idx; 3142 long tx_len; 3143 long rc = 0; 3144 3145 pr_debug("rdma: dir %d, bytes 0x%x\n", dir, bytes); 3146 3147 if (bytes == 0) 3148 return 0; 3149 3150 sgp = sg; 3151 client_len = 0; 3152 server_len = 0; 3153 md_idx = 0; 3154 tx_len = bytes; 3155 3156 do { 3157 if (client_len == 0) { 3158 if (md_idx >= nmd) { 3159 dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n"); 3160 rc = -EIO; 3161 break; 3162 } 3163 client_ioba = be64_to_cpu(md[md_idx].va); 3164 client_len = be32_to_cpu(md[md_idx].len); 3165 } 3166 if (server_len == 0) { 3167 if (!sgp) { 3168 dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n"); 3169 rc = -EIO; 3170 break; 3171 } 3172 server_ioba = sg_dma_address(sgp); 3173 server_len = sg_dma_len(sgp); 3174 } 3175 3176 buf_len = tx_len; 3177 3178 if (buf_len > client_len) 3179 buf_len = client_len; 3180 3181 if (buf_len > server_len) 3182 buf_len = server_len; 3183 3184 if (buf_len > max_vdma_size) 3185 buf_len = max_vdma_size; 3186 3187 if (dir == DMA_TO_DEVICE) { 3188 /* read from client */ 3189 rc = h_copy_rdma(buf_len, 3190 vscsi->dds.window[REMOTE].liobn, 3191 client_ioba, 3192 vscsi->dds.window[LOCAL].liobn, 3193 server_ioba); 3194 } else { 3195 /* write to client */ 3196 struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf; 3197 3198 if (!READ_CMD(srp->cdb)) 3199 print_hex_dump_bytes(" data:", DUMP_PREFIX_NONE, 3200 sg_virt(sgp), buf_len); 3201 /* The h_copy_rdma will cause phyp, running in another 3202 * partition, to read memory, so we need to make sure 3203 * the data has been written out, hence these syncs. 3204 */ 3205 /* ensure that everything is in memory */ 3206 isync(); 3207 /* ensure that memory has been made visible */ 3208 dma_wmb(); 3209 rc = h_copy_rdma(buf_len, 3210 vscsi->dds.window[LOCAL].liobn, 3211 server_ioba, 3212 vscsi->dds.window[REMOTE].liobn, 3213 client_ioba); 3214 } 3215 switch (rc) { 3216 case H_SUCCESS: 3217 break; 3218 case H_PERMISSION: 3219 case H_SOURCE_PARM: 3220 case H_DEST_PARM: 3221 if (connection_broken(vscsi)) { 3222 spin_lock_bh(&vscsi->intr_lock); 3223 vscsi->flags |= 3224 (RESPONSE_Q_DOWN | CLIENT_FAILED); 3225 spin_unlock_bh(&vscsi->intr_lock); 3226 } 3227 dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n", 3228 rc); 3229 break; 3230 3231 default: 3232 dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n", 3233 rc); 3234 break; 3235 } 3236 3237 if (!rc) { 3238 tx_len -= buf_len; 3239 if (tx_len) { 3240 client_len -= buf_len; 3241 if (client_len == 0) 3242 md_idx++; 3243 else 3244 client_ioba += buf_len; 3245 3246 server_len -= buf_len; 3247 if (server_len == 0) 3248 sgp = sg_next(sgp); 3249 else 3250 server_ioba += buf_len; 3251 } else { 3252 break; 3253 } 3254 } 3255 } while (!rc); 3256 3257 return rc; 3258 } 3259 3260 /** 3261 * ibmvscsis_handle_crq() - Handle CRQ 3262 * @data: Pointer to our adapter structure 3263 * 3264 * Read the command elements from the command queue and copy the payloads 3265 * associated with the command elements to local memory and execute the 3266 * SRP requests. 3267 * 3268 * Note: this is an edge triggered interrupt. It can not be shared. 3269 */ 3270 static void ibmvscsis_handle_crq(unsigned long data) 3271 { 3272 struct scsi_info *vscsi = (struct scsi_info *)data; 3273 struct viosrp_crq *crq; 3274 long rc; 3275 bool ack = true; 3276 volatile u8 valid; 3277 3278 spin_lock_bh(&vscsi->intr_lock); 3279 3280 pr_debug("got interrupt\n"); 3281 3282 /* 3283 * if we are in a path where we are waiting for all pending commands 3284 * to complete because we received a transport event and anything in 3285 * the command queue is for a new connection, do nothing 3286 */ 3287 if (TARGET_STOP(vscsi)) { 3288 vio_enable_interrupts(vscsi->dma_dev); 3289 3290 pr_debug("handle_crq, don't process: flags 0x%x, state 0x%hx\n", 3291 vscsi->flags, vscsi->state); 3292 spin_unlock_bh(&vscsi->intr_lock); 3293 return; 3294 } 3295 3296 rc = vscsi->flags & SCHEDULE_DISCONNECT; 3297 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 3298 valid = crq->valid; 3299 dma_rmb(); 3300 3301 while (valid) { 3302 /* 3303 * These are edege triggered interrupts. After dropping out of 3304 * the while loop, the code must check for work since an 3305 * interrupt could be lost, and an elment be left on the queue, 3306 * hence the label. 3307 */ 3308 cmd_work: 3309 vscsi->cmd_q.index = 3310 (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask; 3311 3312 if (!rc) { 3313 rc = ibmvscsis_parse_command(vscsi, crq); 3314 } else { 3315 if ((uint)crq->valid == VALID_TRANS_EVENT) { 3316 /* 3317 * must service the transport layer events even 3318 * in an error state, dont break out until all 3319 * the consecutive transport events have been 3320 * processed 3321 */ 3322 rc = ibmvscsis_trans_event(vscsi, crq); 3323 } else if (vscsi->flags & TRANS_EVENT) { 3324 /* 3325 * if a tranport event has occurred leave 3326 * everything but transport events on the queue 3327 */ 3328 pr_debug("handle_crq, ignoring\n"); 3329 3330 /* 3331 * need to decrement the queue index so we can 3332 * look at the elment again 3333 */ 3334 if (vscsi->cmd_q.index) 3335 vscsi->cmd_q.index -= 1; 3336 else 3337 /* 3338 * index is at 0 it just wrapped. 3339 * have it index last element in q 3340 */ 3341 vscsi->cmd_q.index = vscsi->cmd_q.mask; 3342 break; 3343 } 3344 } 3345 3346 crq->valid = INVALIDATE_CMD_RESP_EL; 3347 3348 crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; 3349 valid = crq->valid; 3350 dma_rmb(); 3351 } 3352 3353 if (!rc) { 3354 if (ack) { 3355 vio_enable_interrupts(vscsi->dma_dev); 3356 ack = false; 3357 pr_debug("handle_crq, reenabling interrupts\n"); 3358 } 3359 valid = crq->valid; 3360 dma_rmb(); 3361 if (valid) 3362 goto cmd_work; 3363 } else { 3364 pr_debug("handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n", 3365 vscsi->flags, vscsi->state, vscsi->cmd_q.index); 3366 } 3367 3368 pr_debug("Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n", 3369 (int)list_empty(&vscsi->schedule_q), vscsi->flags, 3370 vscsi->state); 3371 3372 spin_unlock_bh(&vscsi->intr_lock); 3373 } 3374 3375 static int ibmvscsis_probe(struct vio_dev *vdev, 3376 const struct vio_device_id *id) 3377 { 3378 struct scsi_info *vscsi; 3379 int rc = 0; 3380 long hrc = 0; 3381 char wq_name[24]; 3382 3383 vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL); 3384 if (!vscsi) { 3385 rc = -ENOMEM; 3386 pr_err("probe: allocation of adapter failed\n"); 3387 return rc; 3388 } 3389 3390 vscsi->dma_dev = vdev; 3391 vscsi->dev = vdev->dev; 3392 INIT_LIST_HEAD(&vscsi->schedule_q); 3393 INIT_LIST_HEAD(&vscsi->waiting_rsp); 3394 INIT_LIST_HEAD(&vscsi->active_q); 3395 3396 snprintf(vscsi->tport.tport_name, 256, "%s", dev_name(&vdev->dev)); 3397 3398 pr_debug("probe tport_name: %s\n", vscsi->tport.tport_name); 3399 3400 rc = read_dma_window(vscsi); 3401 if (rc) 3402 goto free_adapter; 3403 pr_debug("Probe: liobn 0x%x, riobn 0x%x\n", 3404 vscsi->dds.window[LOCAL].liobn, 3405 vscsi->dds.window[REMOTE].liobn); 3406 3407 strcpy(vscsi->eye, "VSCSI "); 3408 strncat(vscsi->eye, vdev->name, MAX_EYE); 3409 3410 vscsi->dds.unit_id = vdev->unit_address; 3411 3412 spin_lock_bh(&ibmvscsis_dev_lock); 3413 list_add_tail(&vscsi->list, &ibmvscsis_dev_list); 3414 spin_unlock_bh(&ibmvscsis_dev_lock); 3415 3416 /* 3417 * TBD: How do we determine # of cmds to request? Do we know how 3418 * many "children" we have? 3419 */ 3420 vscsi->request_limit = INITIAL_SRP_LIMIT; 3421 rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit, 3422 SRP_MAX_IU_LEN); 3423 if (rc) 3424 goto rem_list; 3425 3426 vscsi->target.ldata = vscsi; 3427 3428 rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit); 3429 if (rc) { 3430 dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n", 3431 rc, vscsi->request_limit); 3432 goto free_target; 3433 } 3434 3435 /* 3436 * Note: the lock is used in freeing timers, so must initialize 3437 * first so that ordering in case of error is correct. 3438 */ 3439 spin_lock_init(&vscsi->intr_lock); 3440 3441 rc = ibmvscsis_alloctimer(vscsi); 3442 if (rc) { 3443 dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc); 3444 goto free_cmds; 3445 } 3446 3447 rc = ibmvscsis_create_command_q(vscsi, 256); 3448 if (rc) { 3449 dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n", 3450 rc); 3451 goto free_timer; 3452 } 3453 3454 vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); 3455 if (!vscsi->map_buf) { 3456 rc = -ENOMEM; 3457 dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n"); 3458 goto destroy_queue; 3459 } 3460 3461 vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE, 3462 DMA_BIDIRECTIONAL); 3463 if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) { 3464 dev_err(&vscsi->dev, "probe: error mapping command buffer\n"); 3465 goto free_buf; 3466 } 3467 3468 hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO, 3469 (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0, 3470 0); 3471 if (hrc == H_SUCCESS) 3472 vscsi->client_data.partition_number = 3473 be64_to_cpu(*(u64 *)vscsi->map_buf); 3474 /* 3475 * We expect the VIOCTL to fail if we're configured as "any 3476 * client can connect" and the client isn't activated yet. 3477 * We'll make the call again when he sends an init msg. 3478 */ 3479 pr_debug("probe hrc %ld, client partition num %d\n", 3480 hrc, vscsi->client_data.partition_number); 3481 3482 tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq, 3483 (unsigned long)vscsi); 3484 3485 init_completion(&vscsi->wait_idle); 3486 3487 snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev)); 3488 vscsi->work_q = create_workqueue(wq_name); 3489 if (!vscsi->work_q) { 3490 rc = -ENOMEM; 3491 dev_err(&vscsi->dev, "create_workqueue failed\n"); 3492 goto unmap_buf; 3493 } 3494 3495 rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi); 3496 if (rc) { 3497 rc = -EPERM; 3498 dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc); 3499 goto destroy_WQ; 3500 } 3501 3502 spin_lock_bh(&vscsi->intr_lock); 3503 vio_enable_interrupts(vdev); 3504 if (rc) { 3505 dev_err(&vscsi->dev, "enabling interrupts failed, rc %d\n", rc); 3506 rc = -ENODEV; 3507 spin_unlock_bh(&vscsi->intr_lock); 3508 goto free_irq; 3509 } 3510 3511 if (ibmvscsis_check_q(vscsi)) { 3512 rc = ERROR; 3513 dev_err(&vscsi->dev, "probe: check_q failed, rc %d\n", rc); 3514 spin_unlock_bh(&vscsi->intr_lock); 3515 goto disable_interrupt; 3516 } 3517 spin_unlock_bh(&vscsi->intr_lock); 3518 3519 dev_set_drvdata(&vdev->dev, vscsi); 3520 3521 return 0; 3522 3523 disable_interrupt: 3524 vio_disable_interrupts(vdev); 3525 free_irq: 3526 free_irq(vdev->irq, vscsi); 3527 destroy_WQ: 3528 destroy_workqueue(vscsi->work_q); 3529 unmap_buf: 3530 dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE, 3531 DMA_BIDIRECTIONAL); 3532 free_buf: 3533 kfree(vscsi->map_buf); 3534 destroy_queue: 3535 tasklet_kill(&vscsi->work_task); 3536 ibmvscsis_unregister_command_q(vscsi); 3537 ibmvscsis_destroy_command_q(vscsi); 3538 free_timer: 3539 ibmvscsis_freetimer(vscsi); 3540 free_cmds: 3541 ibmvscsis_free_cmds(vscsi); 3542 free_target: 3543 srp_target_free(&vscsi->target); 3544 rem_list: 3545 spin_lock_bh(&ibmvscsis_dev_lock); 3546 list_del(&vscsi->list); 3547 spin_unlock_bh(&ibmvscsis_dev_lock); 3548 free_adapter: 3549 kfree(vscsi); 3550 3551 return rc; 3552 } 3553 3554 static int ibmvscsis_remove(struct vio_dev *vdev) 3555 { 3556 struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev); 3557 3558 pr_debug("remove (%s)\n", dev_name(&vscsi->dma_dev->dev)); 3559 3560 /* 3561 * TBD: Need to handle if there are commands on the waiting_rsp q 3562 * Actually, can there still be cmds outstanding to tcm? 3563 */ 3564 3565 vio_disable_interrupts(vdev); 3566 free_irq(vdev->irq, vscsi); 3567 destroy_workqueue(vscsi->work_q); 3568 dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE, 3569 DMA_BIDIRECTIONAL); 3570 kfree(vscsi->map_buf); 3571 tasklet_kill(&vscsi->work_task); 3572 ibmvscsis_unregister_command_q(vscsi); 3573 ibmvscsis_destroy_command_q(vscsi); 3574 ibmvscsis_freetimer(vscsi); 3575 ibmvscsis_free_cmds(vscsi); 3576 srp_target_free(&vscsi->target); 3577 spin_lock_bh(&ibmvscsis_dev_lock); 3578 list_del(&vscsi->list); 3579 spin_unlock_bh(&ibmvscsis_dev_lock); 3580 kfree(vscsi); 3581 3582 return 0; 3583 } 3584 3585 static ssize_t system_id_show(struct device *dev, 3586 struct device_attribute *attr, char *buf) 3587 { 3588 return snprintf(buf, PAGE_SIZE, "%s\n", system_id); 3589 } 3590 3591 static ssize_t partition_number_show(struct device *dev, 3592 struct device_attribute *attr, char *buf) 3593 { 3594 return snprintf(buf, PAGE_SIZE, "%x\n", partition_number); 3595 } 3596 3597 static ssize_t unit_address_show(struct device *dev, 3598 struct device_attribute *attr, char *buf) 3599 { 3600 struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev); 3601 3602 return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address); 3603 } 3604 3605 static int ibmvscsis_get_system_info(void) 3606 { 3607 struct device_node *rootdn, *vdevdn; 3608 const char *id, *model, *name; 3609 const uint *num; 3610 3611 rootdn = of_find_node_by_path("/"); 3612 if (!rootdn) 3613 return -ENOENT; 3614 3615 model = of_get_property(rootdn, "model", NULL); 3616 id = of_get_property(rootdn, "system-id", NULL); 3617 if (model && id) 3618 snprintf(system_id, sizeof(system_id), "%s-%s", model, id); 3619 3620 name = of_get_property(rootdn, "ibm,partition-name", NULL); 3621 if (name) 3622 strncpy(partition_name, name, sizeof(partition_name)); 3623 3624 num = of_get_property(rootdn, "ibm,partition-no", NULL); 3625 if (num) 3626 partition_number = *num; 3627 3628 of_node_put(rootdn); 3629 3630 vdevdn = of_find_node_by_path("/vdevice"); 3631 if (vdevdn) { 3632 const uint *mvds; 3633 3634 mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size", 3635 NULL); 3636 if (mvds) 3637 max_vdma_size = *mvds; 3638 of_node_put(vdevdn); 3639 } 3640 3641 return 0; 3642 } 3643 3644 static char *ibmvscsis_get_fabric_name(void) 3645 { 3646 return "ibmvscsis"; 3647 } 3648 3649 static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg) 3650 { 3651 struct ibmvscsis_tport *tport = 3652 container_of(se_tpg, struct ibmvscsis_tport, se_tpg); 3653 3654 return tport->tport_name; 3655 } 3656 3657 static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg) 3658 { 3659 struct ibmvscsis_tport *tport = 3660 container_of(se_tpg, struct ibmvscsis_tport, se_tpg); 3661 3662 return tport->tport_tpgt; 3663 } 3664 3665 static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg) 3666 { 3667 return 1; 3668 } 3669 3670 static int ibmvscsis_check_true(struct se_portal_group *se_tpg) 3671 { 3672 return 1; 3673 } 3674 3675 static int ibmvscsis_check_false(struct se_portal_group *se_tpg) 3676 { 3677 return 0; 3678 } 3679 3680 static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg) 3681 { 3682 return 1; 3683 } 3684 3685 static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd) 3686 { 3687 return target_put_sess_cmd(se_cmd); 3688 } 3689 3690 static void ibmvscsis_release_cmd(struct se_cmd *se_cmd) 3691 { 3692 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3693 se_cmd); 3694 struct scsi_info *vscsi = cmd->adapter; 3695 3696 pr_debug("release_cmd %p, flags %d\n", se_cmd, cmd->flags); 3697 3698 spin_lock_bh(&vscsi->intr_lock); 3699 /* Remove from active_q */ 3700 list_del(&cmd->list); 3701 list_add_tail(&cmd->list, &vscsi->waiting_rsp); 3702 ibmvscsis_send_messages(vscsi); 3703 spin_unlock_bh(&vscsi->intr_lock); 3704 } 3705 3706 static u32 ibmvscsis_sess_get_index(struct se_session *se_sess) 3707 { 3708 return 0; 3709 } 3710 3711 static int ibmvscsis_write_pending(struct se_cmd *se_cmd) 3712 { 3713 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3714 se_cmd); 3715 struct iu_entry *iue = cmd->iue; 3716 int rc; 3717 3718 pr_debug("write_pending, se_cmd %p, length 0x%x\n", 3719 se_cmd, se_cmd->data_length); 3720 3721 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 3722 1, 1); 3723 if (rc) { 3724 pr_err("srp_transfer_data() failed: %d\n", rc); 3725 return -EAGAIN; 3726 } 3727 /* 3728 * We now tell TCM to add this WRITE CDB directly into the TCM storage 3729 * object execution queue. 3730 */ 3731 target_execute_cmd(se_cmd); 3732 return 0; 3733 } 3734 3735 static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd) 3736 { 3737 return 0; 3738 } 3739 3740 static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl) 3741 { 3742 } 3743 3744 static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd) 3745 { 3746 return 0; 3747 } 3748 3749 static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd) 3750 { 3751 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3752 se_cmd); 3753 struct iu_entry *iue = cmd->iue; 3754 struct scsi_info *vscsi = cmd->adapter; 3755 char *sd; 3756 uint len = 0; 3757 int rc; 3758 3759 pr_debug("queue_data_in, se_cmd %p, length 0x%x\n", 3760 se_cmd, se_cmd->data_length); 3761 3762 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1, 3763 1); 3764 if (rc) { 3765 pr_err("srp_transfer_data failed: %d\n", rc); 3766 sd = se_cmd->sense_buffer; 3767 se_cmd->scsi_sense_length = 18; 3768 memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length); 3769 /* Logical Unit Communication Time-out asc/ascq = 0x0801 */ 3770 scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR, 3771 0x08, 0x01); 3772 } 3773 3774 srp_build_response(vscsi, cmd, &len); 3775 cmd->rsp.format = SRP_FORMAT; 3776 cmd->rsp.len = len; 3777 3778 return 0; 3779 } 3780 3781 static int ibmvscsis_queue_status(struct se_cmd *se_cmd) 3782 { 3783 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3784 se_cmd); 3785 struct scsi_info *vscsi = cmd->adapter; 3786 uint len; 3787 3788 pr_debug("queue_status %p\n", se_cmd); 3789 3790 srp_build_response(vscsi, cmd, &len); 3791 cmd->rsp.format = SRP_FORMAT; 3792 cmd->rsp.len = len; 3793 3794 return 0; 3795 } 3796 3797 static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd) 3798 { 3799 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, 3800 se_cmd); 3801 struct scsi_info *vscsi = cmd->adapter; 3802 uint len; 3803 3804 pr_debug("queue_tm_rsp %p, status %d\n", 3805 se_cmd, (int)se_cmd->se_tmr_req->response); 3806 3807 srp_build_response(vscsi, cmd, &len); 3808 cmd->rsp.format = SRP_FORMAT; 3809 cmd->rsp.len = len; 3810 } 3811 3812 static void ibmvscsis_aborted_task(struct se_cmd *se_cmd) 3813 { 3814 /* TBD: What (if anything) should we do here? */ 3815 pr_debug("ibmvscsis_aborted_task %p\n", se_cmd); 3816 } 3817 3818 static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf, 3819 struct config_group *group, 3820 const char *name) 3821 { 3822 struct ibmvscsis_tport *tport; 3823 3824 tport = ibmvscsis_lookup_port(name); 3825 if (tport) { 3826 tport->tport_proto_id = SCSI_PROTOCOL_SRP; 3827 pr_debug("make_tport(%s), pointer:%p, tport_id:%x\n", 3828 name, tport, tport->tport_proto_id); 3829 return &tport->tport_wwn; 3830 } 3831 3832 return ERR_PTR(-EINVAL); 3833 } 3834 3835 static void ibmvscsis_drop_tport(struct se_wwn *wwn) 3836 { 3837 struct ibmvscsis_tport *tport = container_of(wwn, 3838 struct ibmvscsis_tport, 3839 tport_wwn); 3840 3841 pr_debug("drop_tport(%s)\n", 3842 config_item_name(&tport->tport_wwn.wwn_group.cg_item)); 3843 } 3844 3845 static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn, 3846 struct config_group *group, 3847 const char *name) 3848 { 3849 struct ibmvscsis_tport *tport = 3850 container_of(wwn, struct ibmvscsis_tport, tport_wwn); 3851 int rc; 3852 3853 tport->releasing = false; 3854 3855 rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg, 3856 tport->tport_proto_id); 3857 if (rc) 3858 return ERR_PTR(rc); 3859 3860 return &tport->se_tpg; 3861 } 3862 3863 static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg) 3864 { 3865 struct ibmvscsis_tport *tport = container_of(se_tpg, 3866 struct ibmvscsis_tport, 3867 se_tpg); 3868 3869 tport->releasing = true; 3870 tport->enabled = false; 3871 3872 /* 3873 * Release the virtual I_T Nexus for this ibmvscsis TPG 3874 */ 3875 ibmvscsis_drop_nexus(tport); 3876 /* 3877 * Deregister the se_tpg from TCM.. 3878 */ 3879 core_tpg_deregister(se_tpg); 3880 } 3881 3882 static ssize_t ibmvscsis_wwn_version_show(struct config_item *item, 3883 char *page) 3884 { 3885 return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION); 3886 } 3887 CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version); 3888 3889 static struct configfs_attribute *ibmvscsis_wwn_attrs[] = { 3890 &ibmvscsis_wwn_attr_version, 3891 NULL, 3892 }; 3893 3894 static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item, 3895 char *page) 3896 { 3897 struct se_portal_group *se_tpg = to_tpg(item); 3898 struct ibmvscsis_tport *tport = container_of(se_tpg, 3899 struct ibmvscsis_tport, 3900 se_tpg); 3901 3902 return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0); 3903 } 3904 3905 static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item, 3906 const char *page, size_t count) 3907 { 3908 struct se_portal_group *se_tpg = to_tpg(item); 3909 struct ibmvscsis_tport *tport = container_of(se_tpg, 3910 struct ibmvscsis_tport, 3911 se_tpg); 3912 struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport); 3913 unsigned long tmp; 3914 int rc; 3915 long lrc; 3916 3917 rc = kstrtoul(page, 0, &tmp); 3918 if (rc < 0) { 3919 pr_err("Unable to extract srpt_tpg_store_enable\n"); 3920 return -EINVAL; 3921 } 3922 3923 if ((tmp != 0) && (tmp != 1)) { 3924 pr_err("Illegal value for srpt_tpg_store_enable\n"); 3925 return -EINVAL; 3926 } 3927 3928 if (tmp) { 3929 tport->enabled = true; 3930 spin_lock_bh(&vscsi->intr_lock); 3931 lrc = ibmvscsis_enable_change_state(vscsi); 3932 if (lrc) 3933 pr_err("enable_change_state failed, rc %ld state %d\n", 3934 lrc, vscsi->state); 3935 spin_unlock_bh(&vscsi->intr_lock); 3936 } else { 3937 tport->enabled = false; 3938 } 3939 3940 pr_debug("tpg_enable_store, state %d\n", vscsi->state); 3941 3942 return count; 3943 } 3944 CONFIGFS_ATTR(ibmvscsis_tpg_, enable); 3945 3946 static struct configfs_attribute *ibmvscsis_tpg_attrs[] = { 3947 &ibmvscsis_tpg_attr_enable, 3948 NULL, 3949 }; 3950 3951 static const struct target_core_fabric_ops ibmvscsis_ops = { 3952 .module = THIS_MODULE, 3953 .name = "ibmvscsis", 3954 .get_fabric_name = ibmvscsis_get_fabric_name, 3955 .tpg_get_wwn = ibmvscsis_get_fabric_wwn, 3956 .tpg_get_tag = ibmvscsis_get_tag, 3957 .tpg_get_default_depth = ibmvscsis_get_default_depth, 3958 .tpg_check_demo_mode = ibmvscsis_check_true, 3959 .tpg_check_demo_mode_cache = ibmvscsis_check_true, 3960 .tpg_check_demo_mode_write_protect = ibmvscsis_check_false, 3961 .tpg_check_prod_mode_write_protect = ibmvscsis_check_false, 3962 .tpg_get_inst_index = ibmvscsis_tpg_get_inst_index, 3963 .check_stop_free = ibmvscsis_check_stop_free, 3964 .release_cmd = ibmvscsis_release_cmd, 3965 .sess_get_index = ibmvscsis_sess_get_index, 3966 .write_pending = ibmvscsis_write_pending, 3967 .write_pending_status = ibmvscsis_write_pending_status, 3968 .set_default_node_attributes = ibmvscsis_set_default_node_attrs, 3969 .get_cmd_state = ibmvscsis_get_cmd_state, 3970 .queue_data_in = ibmvscsis_queue_data_in, 3971 .queue_status = ibmvscsis_queue_status, 3972 .queue_tm_rsp = ibmvscsis_queue_tm_rsp, 3973 .aborted_task = ibmvscsis_aborted_task, 3974 /* 3975 * Setup function pointers for logic in target_core_fabric_configfs.c 3976 */ 3977 .fabric_make_wwn = ibmvscsis_make_tport, 3978 .fabric_drop_wwn = ibmvscsis_drop_tport, 3979 .fabric_make_tpg = ibmvscsis_make_tpg, 3980 .fabric_drop_tpg = ibmvscsis_drop_tpg, 3981 3982 .tfc_wwn_attrs = ibmvscsis_wwn_attrs, 3983 .tfc_tpg_base_attrs = ibmvscsis_tpg_attrs, 3984 }; 3985 3986 static void ibmvscsis_dev_release(struct device *dev) {}; 3987 3988 static struct class_attribute ibmvscsis_class_attrs[] = { 3989 __ATTR_NULL, 3990 }; 3991 3992 static struct device_attribute dev_attr_system_id = 3993 __ATTR(system_id, S_IRUGO, system_id_show, NULL); 3994 3995 static struct device_attribute dev_attr_partition_number = 3996 __ATTR(partition_number, S_IRUGO, partition_number_show, NULL); 3997 3998 static struct device_attribute dev_attr_unit_address = 3999 __ATTR(unit_address, S_IRUGO, unit_address_show, NULL); 4000 4001 static struct attribute *ibmvscsis_dev_attrs[] = { 4002 &dev_attr_system_id.attr, 4003 &dev_attr_partition_number.attr, 4004 &dev_attr_unit_address.attr, 4005 }; 4006 ATTRIBUTE_GROUPS(ibmvscsis_dev); 4007 4008 static struct class ibmvscsis_class = { 4009 .name = "ibmvscsis", 4010 .dev_release = ibmvscsis_dev_release, 4011 .class_attrs = ibmvscsis_class_attrs, 4012 .dev_groups = ibmvscsis_dev_groups, 4013 }; 4014 4015 static struct vio_device_id ibmvscsis_device_table[] = { 4016 { "v-scsi-host", "IBM,v-scsi-host" }, 4017 { "", "" } 4018 }; 4019 MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table); 4020 4021 static struct vio_driver ibmvscsis_driver = { 4022 .name = "ibmvscsis", 4023 .id_table = ibmvscsis_device_table, 4024 .probe = ibmvscsis_probe, 4025 .remove = ibmvscsis_remove, 4026 }; 4027 4028 /* 4029 * ibmvscsis_init() - Kernel Module initialization 4030 * 4031 * Note: vio_register_driver() registers callback functions, and at least one 4032 * of those callback functions calls TCM - Linux IO Target Subsystem, thus 4033 * the SCSI Target template must be registered before vio_register_driver() 4034 * is called. 4035 */ 4036 static int __init ibmvscsis_init(void) 4037 { 4038 int rc = 0; 4039 4040 rc = ibmvscsis_get_system_info(); 4041 if (rc) { 4042 pr_err("rc %d from get_system_info\n", rc); 4043 goto out; 4044 } 4045 4046 rc = class_register(&ibmvscsis_class); 4047 if (rc) { 4048 pr_err("failed class register\n"); 4049 goto out; 4050 } 4051 4052 rc = target_register_template(&ibmvscsis_ops); 4053 if (rc) { 4054 pr_err("rc %d from target_register_template\n", rc); 4055 goto unregister_class; 4056 } 4057 4058 rc = vio_register_driver(&ibmvscsis_driver); 4059 if (rc) { 4060 pr_err("rc %d from vio_register_driver\n", rc); 4061 goto unregister_target; 4062 } 4063 4064 return 0; 4065 4066 unregister_target: 4067 target_unregister_template(&ibmvscsis_ops); 4068 unregister_class: 4069 class_unregister(&ibmvscsis_class); 4070 out: 4071 return rc; 4072 } 4073 4074 static void __exit ibmvscsis_exit(void) 4075 { 4076 pr_info("Unregister IBM virtual SCSI host driver\n"); 4077 vio_unregister_driver(&ibmvscsis_driver); 4078 target_unregister_template(&ibmvscsis_ops); 4079 class_unregister(&ibmvscsis_class); 4080 } 4081 4082 MODULE_DESCRIPTION("IBMVSCSIS fabric driver"); 4083 MODULE_AUTHOR("Bryant G. Ly and Michael Cyr"); 4084 MODULE_LICENSE("GPL"); 4085 MODULE_VERSION(IBMVSCSIS_VERSION); 4086 module_init(ibmvscsis_init); 4087 module_exit(ibmvscsis_exit); 4088