1 /* 2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 3 * Copyright (c) 2014- QLogic Corporation. 4 * All rights reserved 5 * www.qlogic.com 6 * 7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License (GPL) Version 2 as 11 * published by the Free Software Foundation 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 */ 18 19 /* 20 * bfad_im.c Linux driver IM module. 21 */ 22 23 #include <linux/export.h> 24 25 #include "bfad_drv.h" 26 #include "bfad_im.h" 27 #include "bfa_fcs.h" 28 29 BFA_TRC_FILE(LDRV, IM); 30 31 DEFINE_IDR(bfad_im_port_index); 32 struct scsi_transport_template *bfad_im_scsi_transport_template; 33 struct scsi_transport_template *bfad_im_scsi_vport_transport_template; 34 static void bfad_im_itnim_work_handler(struct work_struct *work); 35 static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd); 36 static int bfad_im_slave_alloc(struct scsi_device *sdev); 37 static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, 38 struct bfad_itnim_s *itnim); 39 40 void 41 bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, 42 enum bfi_ioim_status io_status, u8 scsi_status, 43 int sns_len, u8 *sns_info, s32 residue) 44 { 45 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 46 struct bfad_s *bfad = drv; 47 struct bfad_itnim_data_s *itnim_data; 48 struct bfad_itnim_s *itnim; 49 u8 host_status = DID_OK; 50 51 switch (io_status) { 52 case BFI_IOIM_STS_OK: 53 bfa_trc(bfad, scsi_status); 54 scsi_set_resid(cmnd, 0); 55 56 if (sns_len > 0) { 57 bfa_trc(bfad, sns_len); 58 if (sns_len > SCSI_SENSE_BUFFERSIZE) 59 sns_len = SCSI_SENSE_BUFFERSIZE; 60 memcpy(cmnd->sense_buffer, sns_info, sns_len); 61 } 62 63 if (residue > 0) { 64 bfa_trc(bfad, residue); 65 scsi_set_resid(cmnd, residue); 66 if (!sns_len && (scsi_status == SAM_STAT_GOOD) && 67 (scsi_bufflen(cmnd) - residue) < 68 cmnd->underflow) { 69 bfa_trc(bfad, 0); 70 host_status = DID_ERROR; 71 } 72 } 73 cmnd->result = host_status << 16 | scsi_status; 74 75 break; 76 77 case BFI_IOIM_STS_TIMEDOUT: 78 cmnd->result = DID_TIME_OUT << 16; 79 break; 80 case BFI_IOIM_STS_PATHTOV: 81 cmnd->result = DID_TRANSPORT_DISRUPTED << 16; 82 break; 83 default: 84 cmnd->result = DID_ERROR << 16; 85 } 86 87 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ 88 if (cmnd->device->host != NULL) 89 scsi_dma_unmap(cmnd); 90 91 cmnd->host_scribble = NULL; 92 bfa_trc(bfad, cmnd->result); 93 94 itnim_data = cmnd->device->hostdata; 95 if (itnim_data) { 96 itnim = itnim_data->itnim; 97 if (!cmnd->result && itnim && 98 (bfa_lun_queue_depth > cmnd->device->queue_depth)) { 99 /* Queue depth adjustment for good status completion */ 100 bfad_ramp_up_qdepth(itnim, cmnd->device); 101 } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) { 102 /* qfull handling */ 103 bfad_handle_qfull(itnim, cmnd->device); 104 } 105 } 106 107 cmnd->scsi_done(cmnd); 108 } 109 110 void 111 bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio) 112 { 113 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 114 struct bfad_itnim_data_s *itnim_data; 115 struct bfad_itnim_s *itnim; 116 117 cmnd->result = DID_OK << 16 | SCSI_STATUS_GOOD; 118 119 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ 120 if (cmnd->device->host != NULL) 121 scsi_dma_unmap(cmnd); 122 123 cmnd->host_scribble = NULL; 124 125 /* Queue depth adjustment */ 126 if (bfa_lun_queue_depth > cmnd->device->queue_depth) { 127 itnim_data = cmnd->device->hostdata; 128 if (itnim_data) { 129 itnim = itnim_data->itnim; 130 if (itnim) 131 bfad_ramp_up_qdepth(itnim, cmnd->device); 132 } 133 } 134 135 cmnd->scsi_done(cmnd); 136 } 137 138 void 139 bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio) 140 { 141 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 142 struct bfad_s *bfad = drv; 143 144 cmnd->result = DID_ERROR << 16; 145 146 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ 147 if (cmnd->device->host != NULL) 148 scsi_dma_unmap(cmnd); 149 150 bfa_trc(bfad, cmnd->result); 151 cmnd->host_scribble = NULL; 152 } 153 154 void 155 bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, 156 enum bfi_tskim_status tsk_status) 157 { 158 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk; 159 wait_queue_head_t *wq; 160 161 cmnd->SCp.Status |= tsk_status << 1; 162 set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status); 163 wq = (wait_queue_head_t *) cmnd->SCp.ptr; 164 cmnd->SCp.ptr = NULL; 165 166 if (wq) 167 wake_up(wq); 168 } 169 170 /* 171 * Scsi_Host_template SCSI host template 172 */ 173 /* 174 * Scsi_Host template entry, returns BFAD PCI info. 175 */ 176 static const char * 177 bfad_im_info(struct Scsi_Host *shost) 178 { 179 static char bfa_buf[256]; 180 struct bfad_im_port_s *im_port = 181 (struct bfad_im_port_s *) shost->hostdata[0]; 182 struct bfad_s *bfad = im_port->bfad; 183 184 memset(bfa_buf, 0, sizeof(bfa_buf)); 185 snprintf(bfa_buf, sizeof(bfa_buf), 186 "QLogic BR-series FC/FCOE Adapter, hwpath: %s driver: %s", 187 bfad->pci_name, BFAD_DRIVER_VERSION); 188 189 return bfa_buf; 190 } 191 192 /* 193 * Scsi_Host template entry, aborts the specified SCSI command. 194 * 195 * Returns: SUCCESS or FAILED. 196 */ 197 static int 198 bfad_im_abort_handler(struct scsi_cmnd *cmnd) 199 { 200 struct Scsi_Host *shost = cmnd->device->host; 201 struct bfad_im_port_s *im_port = 202 (struct bfad_im_port_s *) shost->hostdata[0]; 203 struct bfad_s *bfad = im_port->bfad; 204 struct bfa_ioim_s *hal_io; 205 unsigned long flags; 206 u32 timeout; 207 int rc = FAILED; 208 209 spin_lock_irqsave(&bfad->bfad_lock, flags); 210 hal_io = (struct bfa_ioim_s *) cmnd->host_scribble; 211 if (!hal_io) { 212 /* IO has been completed, return success */ 213 rc = SUCCESS; 214 goto out; 215 } 216 if (hal_io->dio != (struct bfad_ioim_s *) cmnd) { 217 rc = FAILED; 218 goto out; 219 } 220 221 bfa_trc(bfad, hal_io->iotag); 222 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 223 "scsi%d: abort cmnd %p iotag %x\n", 224 im_port->shost->host_no, cmnd, hal_io->iotag); 225 (void) bfa_ioim_abort(hal_io); 226 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 227 228 /* Need to wait until the command get aborted */ 229 timeout = 10; 230 while ((struct bfa_ioim_s *) cmnd->host_scribble == hal_io) { 231 set_current_state(TASK_UNINTERRUPTIBLE); 232 schedule_timeout(timeout); 233 if (timeout < 4 * HZ) 234 timeout *= 2; 235 } 236 237 cmnd->scsi_done(cmnd); 238 bfa_trc(bfad, hal_io->iotag); 239 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 240 "scsi%d: complete abort 0x%p iotag 0x%x\n", 241 im_port->shost->host_no, cmnd, hal_io->iotag); 242 return SUCCESS; 243 out: 244 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 245 return rc; 246 } 247 248 static bfa_status_t 249 bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd, 250 struct bfad_itnim_s *itnim) 251 { 252 struct bfa_tskim_s *tskim; 253 struct bfa_itnim_s *bfa_itnim; 254 bfa_status_t rc = BFA_STATUS_OK; 255 struct scsi_lun scsilun; 256 257 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 258 if (!tskim) { 259 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 260 "target reset, fail to allocate tskim\n"); 261 rc = BFA_STATUS_FAILED; 262 goto out; 263 } 264 265 /* 266 * Set host_scribble to NULL to avoid aborting a task command if 267 * happens. 268 */ 269 cmnd->host_scribble = NULL; 270 cmnd->SCp.Status = 0; 271 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); 272 /* 273 * bfa_itnim can be NULL if the port gets disconnected and the bfa 274 * and fcs layers have cleaned up their nexus with the targets and 275 * the same has not been cleaned up by the shim 276 */ 277 if (bfa_itnim == NULL) { 278 bfa_tskim_free(tskim); 279 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 280 "target reset, bfa_itnim is NULL\n"); 281 rc = BFA_STATUS_FAILED; 282 goto out; 283 } 284 285 memset(&scsilun, 0, sizeof(scsilun)); 286 bfa_tskim_start(tskim, bfa_itnim, scsilun, 287 FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO); 288 out: 289 return rc; 290 } 291 292 /* 293 * Scsi_Host template entry, resets a LUN and abort its all commands. 294 * 295 * Returns: SUCCESS or FAILED. 296 * 297 */ 298 static int 299 bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd) 300 { 301 struct Scsi_Host *shost = cmnd->device->host; 302 struct bfad_im_port_s *im_port = 303 (struct bfad_im_port_s *) shost->hostdata[0]; 304 struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata; 305 struct bfad_s *bfad = im_port->bfad; 306 struct bfa_tskim_s *tskim; 307 struct bfad_itnim_s *itnim; 308 struct bfa_itnim_s *bfa_itnim; 309 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); 310 int rc = SUCCESS; 311 unsigned long flags; 312 enum bfi_tskim_status task_status; 313 struct scsi_lun scsilun; 314 315 spin_lock_irqsave(&bfad->bfad_lock, flags); 316 itnim = itnim_data->itnim; 317 if (!itnim) { 318 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 319 rc = FAILED; 320 goto out; 321 } 322 323 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 324 if (!tskim) { 325 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 326 "LUN reset, fail to allocate tskim"); 327 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 328 rc = FAILED; 329 goto out; 330 } 331 332 /* 333 * Set host_scribble to NULL to avoid aborting a task command 334 * if happens. 335 */ 336 cmnd->host_scribble = NULL; 337 cmnd->SCp.ptr = (char *)&wq; 338 cmnd->SCp.Status = 0; 339 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); 340 /* 341 * bfa_itnim can be NULL if the port gets disconnected and the bfa 342 * and fcs layers have cleaned up their nexus with the targets and 343 * the same has not been cleaned up by the shim 344 */ 345 if (bfa_itnim == NULL) { 346 bfa_tskim_free(tskim); 347 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 348 "lun reset, bfa_itnim is NULL\n"); 349 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 350 rc = FAILED; 351 goto out; 352 } 353 int_to_scsilun(cmnd->device->lun, &scsilun); 354 bfa_tskim_start(tskim, bfa_itnim, scsilun, 355 FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO); 356 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 357 358 wait_event(wq, test_bit(IO_DONE_BIT, 359 (unsigned long *)&cmnd->SCp.Status)); 360 361 task_status = cmnd->SCp.Status >> 1; 362 if (task_status != BFI_TSKIM_STS_OK) { 363 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 364 "LUN reset failure, status: %d\n", task_status); 365 rc = FAILED; 366 } 367 368 out: 369 return rc; 370 } 371 372 /* 373 * Scsi_Host template entry, resets the target and abort all commands. 374 */ 375 static int 376 bfad_im_reset_target_handler(struct scsi_cmnd *cmnd) 377 { 378 struct Scsi_Host *shost = cmnd->device->host; 379 struct scsi_target *starget = scsi_target(cmnd->device); 380 struct bfad_im_port_s *im_port = 381 (struct bfad_im_port_s *) shost->hostdata[0]; 382 struct bfad_s *bfad = im_port->bfad; 383 struct bfad_itnim_s *itnim; 384 unsigned long flags; 385 u32 rc, rtn = FAILED; 386 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); 387 enum bfi_tskim_status task_status; 388 389 spin_lock_irqsave(&bfad->bfad_lock, flags); 390 itnim = bfad_get_itnim(im_port, starget->id); 391 if (itnim) { 392 cmnd->SCp.ptr = (char *)&wq; 393 rc = bfad_im_target_reset_send(bfad, cmnd, itnim); 394 if (rc == BFA_STATUS_OK) { 395 /* wait target reset to complete */ 396 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 397 wait_event(wq, test_bit(IO_DONE_BIT, 398 (unsigned long *)&cmnd->SCp.Status)); 399 spin_lock_irqsave(&bfad->bfad_lock, flags); 400 401 task_status = cmnd->SCp.Status >> 1; 402 if (task_status != BFI_TSKIM_STS_OK) 403 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 404 "target reset failure," 405 " status: %d\n", task_status); 406 else 407 rtn = SUCCESS; 408 } 409 } 410 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 411 412 return rtn; 413 } 414 415 /* 416 * Scsi_Host template entry slave_destroy. 417 */ 418 static void 419 bfad_im_slave_destroy(struct scsi_device *sdev) 420 { 421 sdev->hostdata = NULL; 422 return; 423 } 424 425 /* 426 * BFA FCS itnim callbacks 427 */ 428 429 /* 430 * BFA FCS itnim alloc callback, after successful PRLI 431 * Context: Interrupt 432 */ 433 int 434 bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, 435 struct bfad_itnim_s **itnim_drv) 436 { 437 *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC); 438 if (*itnim_drv == NULL) 439 return -ENOMEM; 440 441 (*itnim_drv)->im = bfad->im; 442 *itnim = &(*itnim_drv)->fcs_itnim; 443 (*itnim_drv)->state = ITNIM_STATE_NONE; 444 445 /* 446 * Initiaze the itnim_work 447 */ 448 INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler); 449 bfad->bfad_flags |= BFAD_RPORT_ONLINE; 450 return 0; 451 } 452 453 /* 454 * BFA FCS itnim free callback. 455 * Context: Interrupt. bfad_lock is held 456 */ 457 void 458 bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv) 459 { 460 struct bfad_port_s *port; 461 wwn_t wwpn; 462 u32 fcid; 463 char wwpn_str[32], fcid_str[16]; 464 struct bfad_im_s *im = itnim_drv->im; 465 466 /* online to free state transtion should not happen */ 467 WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE); 468 469 itnim_drv->queue_work = 1; 470 /* offline request is not yet done, use the same request to free */ 471 if (itnim_drv->state == ITNIM_STATE_OFFLINE_PENDING) 472 itnim_drv->queue_work = 0; 473 474 itnim_drv->state = ITNIM_STATE_FREE; 475 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); 476 itnim_drv->im_port = port->im_port; 477 wwpn = bfa_fcs_itnim_get_pwwn(&itnim_drv->fcs_itnim); 478 fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim); 479 wwn2str(wwpn_str, wwpn); 480 fcid2str(fcid_str, fcid); 481 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 482 "ITNIM FREE scsi%d: FCID: %s WWPN: %s\n", 483 port->im_port->shost->host_no, 484 fcid_str, wwpn_str); 485 486 /* ITNIM processing */ 487 if (itnim_drv->queue_work) 488 queue_work(im->drv_workq, &itnim_drv->itnim_work); 489 } 490 491 /* 492 * BFA FCS itnim online callback. 493 * Context: Interrupt. bfad_lock is held 494 */ 495 void 496 bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv) 497 { 498 struct bfad_port_s *port; 499 struct bfad_im_s *im = itnim_drv->im; 500 501 itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim); 502 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); 503 itnim_drv->state = ITNIM_STATE_ONLINE; 504 itnim_drv->queue_work = 1; 505 itnim_drv->im_port = port->im_port; 506 507 /* ITNIM processing */ 508 if (itnim_drv->queue_work) 509 queue_work(im->drv_workq, &itnim_drv->itnim_work); 510 } 511 512 /* 513 * BFA FCS itnim offline callback. 514 * Context: Interrupt. bfad_lock is held 515 */ 516 void 517 bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv) 518 { 519 struct bfad_port_s *port; 520 struct bfad_s *bfad; 521 struct bfad_im_s *im = itnim_drv->im; 522 523 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); 524 bfad = port->bfad; 525 if ((bfad->pport.flags & BFAD_PORT_DELETE) || 526 (port->flags & BFAD_PORT_DELETE)) { 527 itnim_drv->state = ITNIM_STATE_OFFLINE; 528 return; 529 } 530 itnim_drv->im_port = port->im_port; 531 itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING; 532 itnim_drv->queue_work = 1; 533 534 /* ITNIM processing */ 535 if (itnim_drv->queue_work) 536 queue_work(im->drv_workq, &itnim_drv->itnim_work); 537 } 538 539 /* 540 * Allocate a Scsi_Host for a port. 541 */ 542 int 543 bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, 544 struct device *dev) 545 { 546 struct bfad_im_port_pointer *im_portp; 547 int error = 1; 548 549 mutex_lock(&bfad_mutex); 550 error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL); 551 if (error < 0) { 552 mutex_unlock(&bfad_mutex); 553 printk(KERN_WARNING "idr_alloc failure\n"); 554 goto out; 555 } 556 im_port->idr_id = error; 557 mutex_unlock(&bfad_mutex); 558 559 im_port->shost = bfad_scsi_host_alloc(im_port, bfad); 560 if (!im_port->shost) { 561 error = 1; 562 goto out_free_idr; 563 } 564 565 im_portp = shost_priv(im_port->shost); 566 im_portp->p = im_port; 567 im_port->shost->unique_id = im_port->idr_id; 568 im_port->shost->this_id = -1; 569 im_port->shost->max_id = MAX_FCP_TARGET; 570 im_port->shost->max_lun = MAX_FCP_LUN; 571 im_port->shost->max_cmd_len = 16; 572 im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth; 573 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE) 574 im_port->shost->transportt = bfad_im_scsi_transport_template; 575 else 576 im_port->shost->transportt = 577 bfad_im_scsi_vport_transport_template; 578 579 error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev); 580 if (error) { 581 printk(KERN_WARNING "scsi_add_host failure %d\n", error); 582 goto out_fc_rel; 583 } 584 585 return 0; 586 587 out_fc_rel: 588 scsi_host_put(im_port->shost); 589 im_port->shost = NULL; 590 out_free_idr: 591 mutex_lock(&bfad_mutex); 592 idr_remove(&bfad_im_port_index, im_port->idr_id); 593 mutex_unlock(&bfad_mutex); 594 out: 595 return error; 596 } 597 598 void 599 bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) 600 { 601 bfa_trc(bfad, bfad->inst_no); 602 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n", 603 im_port->shost->host_no); 604 605 fc_remove_host(im_port->shost); 606 607 scsi_remove_host(im_port->shost); 608 scsi_host_put(im_port->shost); 609 610 mutex_lock(&bfad_mutex); 611 idr_remove(&bfad_im_port_index, im_port->idr_id); 612 mutex_unlock(&bfad_mutex); 613 } 614 615 static void 616 bfad_im_port_delete_handler(struct work_struct *work) 617 { 618 struct bfad_im_port_s *im_port = 619 container_of(work, struct bfad_im_port_s, port_delete_work); 620 621 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) { 622 im_port->flags |= BFAD_PORT_DELETE; 623 fc_vport_terminate(im_port->fc_vport); 624 } 625 } 626 627 bfa_status_t 628 bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port) 629 { 630 int rc = BFA_STATUS_OK; 631 struct bfad_im_port_s *im_port; 632 633 im_port = kzalloc(sizeof(struct bfad_im_port_s), GFP_ATOMIC); 634 if (im_port == NULL) { 635 rc = BFA_STATUS_ENOMEM; 636 goto ext; 637 } 638 port->im_port = im_port; 639 im_port->port = port; 640 im_port->bfad = bfad; 641 642 INIT_WORK(&im_port->port_delete_work, bfad_im_port_delete_handler); 643 INIT_LIST_HEAD(&im_port->itnim_mapped_list); 644 INIT_LIST_HEAD(&im_port->binding_list); 645 646 ext: 647 return rc; 648 } 649 650 void 651 bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port) 652 { 653 struct bfad_im_port_s *im_port = port->im_port; 654 655 queue_work(bfad->im->drv_workq, 656 &im_port->port_delete_work); 657 } 658 659 void 660 bfad_im_port_clean(struct bfad_im_port_s *im_port) 661 { 662 struct bfad_fcp_binding *bp, *bp_new; 663 unsigned long flags; 664 struct bfad_s *bfad = im_port->bfad; 665 666 spin_lock_irqsave(&bfad->bfad_lock, flags); 667 list_for_each_entry_safe(bp, bp_new, &im_port->binding_list, 668 list_entry) { 669 list_del(&bp->list_entry); 670 kfree(bp); 671 } 672 673 /* the itnim_mapped_list must be empty at this time */ 674 WARN_ON(!list_empty(&im_port->itnim_mapped_list)); 675 676 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 677 } 678 679 static void bfad_aen_im_notify_handler(struct work_struct *work) 680 { 681 struct bfad_im_s *im = 682 container_of(work, struct bfad_im_s, aen_im_notify_work); 683 struct bfa_aen_entry_s *aen_entry; 684 struct bfad_s *bfad = im->bfad; 685 struct Scsi_Host *shost = bfad->pport.im_port->shost; 686 void *event_data; 687 unsigned long flags; 688 689 while (!list_empty(&bfad->active_aen_q)) { 690 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); 691 bfa_q_deq(&bfad->active_aen_q, &aen_entry); 692 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); 693 event_data = (char *)aen_entry + sizeof(struct list_head); 694 fc_host_post_vendor_event(shost, fc_get_event_number(), 695 sizeof(struct bfa_aen_entry_s) - 696 sizeof(struct list_head), 697 (char *)event_data, BFAD_NL_VENDOR_ID); 698 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); 699 list_add_tail(&aen_entry->qe, &bfad->free_aen_q); 700 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); 701 } 702 } 703 704 bfa_status_t 705 bfad_im_probe(struct bfad_s *bfad) 706 { 707 struct bfad_im_s *im; 708 709 im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL); 710 if (im == NULL) 711 return BFA_STATUS_ENOMEM; 712 713 bfad->im = im; 714 im->bfad = bfad; 715 716 if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { 717 kfree(im); 718 return BFA_STATUS_FAILED; 719 } 720 721 INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler); 722 return BFA_STATUS_OK; 723 } 724 725 void 726 bfad_im_probe_undo(struct bfad_s *bfad) 727 { 728 if (bfad->im) { 729 bfad_destroy_workq(bfad->im); 730 kfree(bfad->im); 731 bfad->im = NULL; 732 } 733 } 734 735 struct Scsi_Host * 736 bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) 737 { 738 struct scsi_host_template *sht; 739 740 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE) 741 sht = &bfad_im_scsi_host_template; 742 else 743 sht = &bfad_im_vport_template; 744 745 if (max_xfer_size != BFAD_MAX_SECTORS >> 1) 746 sht->max_sectors = max_xfer_size << 1; 747 748 sht->sg_tablesize = bfad->cfg_data.io_max_sge; 749 750 return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer)); 751 } 752 753 void 754 bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) 755 { 756 if (!(im_port->flags & BFAD_PORT_DELETE)) 757 flush_workqueue(bfad->im->drv_workq); 758 bfad_im_scsi_host_free(im_port->bfad, im_port); 759 bfad_im_port_clean(im_port); 760 kfree(im_port); 761 } 762 763 void 764 bfad_destroy_workq(struct bfad_im_s *im) 765 { 766 if (im && im->drv_workq) { 767 flush_workqueue(im->drv_workq); 768 destroy_workqueue(im->drv_workq); 769 im->drv_workq = NULL; 770 } 771 } 772 773 bfa_status_t 774 bfad_thread_workq(struct bfad_s *bfad) 775 { 776 struct bfad_im_s *im = bfad->im; 777 778 bfa_trc(bfad, 0); 779 snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d", 780 bfad->inst_no); 781 im->drv_workq = create_singlethread_workqueue(im->drv_workq_name); 782 if (!im->drv_workq) 783 return BFA_STATUS_FAILED; 784 785 return BFA_STATUS_OK; 786 } 787 788 /* 789 * Scsi_Host template entry. 790 * 791 * Description: 792 * OS entry point to adjust the queue_depths on a per-device basis. 793 * Called once per device during the bus scan. 794 * Return non-zero if fails. 795 */ 796 static int 797 bfad_im_slave_configure(struct scsi_device *sdev) 798 { 799 scsi_change_queue_depth(sdev, bfa_lun_queue_depth); 800 return 0; 801 } 802 803 struct scsi_host_template bfad_im_scsi_host_template = { 804 .module = THIS_MODULE, 805 .name = BFAD_DRIVER_NAME, 806 .info = bfad_im_info, 807 .queuecommand = bfad_im_queuecommand, 808 .eh_timed_out = fc_eh_timed_out, 809 .eh_abort_handler = bfad_im_abort_handler, 810 .eh_device_reset_handler = bfad_im_reset_lun_handler, 811 .eh_target_reset_handler = bfad_im_reset_target_handler, 812 813 .slave_alloc = bfad_im_slave_alloc, 814 .slave_configure = bfad_im_slave_configure, 815 .slave_destroy = bfad_im_slave_destroy, 816 817 .this_id = -1, 818 .sg_tablesize = BFAD_IO_MAX_SGE, 819 .cmd_per_lun = 3, 820 .use_clustering = ENABLE_CLUSTERING, 821 .shost_attrs = bfad_im_host_attrs, 822 .max_sectors = BFAD_MAX_SECTORS, 823 .vendor_id = BFA_PCI_VENDOR_ID_BROCADE, 824 }; 825 826 struct scsi_host_template bfad_im_vport_template = { 827 .module = THIS_MODULE, 828 .name = BFAD_DRIVER_NAME, 829 .info = bfad_im_info, 830 .queuecommand = bfad_im_queuecommand, 831 .eh_timed_out = fc_eh_timed_out, 832 .eh_abort_handler = bfad_im_abort_handler, 833 .eh_device_reset_handler = bfad_im_reset_lun_handler, 834 .eh_target_reset_handler = bfad_im_reset_target_handler, 835 836 .slave_alloc = bfad_im_slave_alloc, 837 .slave_configure = bfad_im_slave_configure, 838 .slave_destroy = bfad_im_slave_destroy, 839 840 .this_id = -1, 841 .sg_tablesize = BFAD_IO_MAX_SGE, 842 .cmd_per_lun = 3, 843 .use_clustering = ENABLE_CLUSTERING, 844 .shost_attrs = bfad_im_vport_attrs, 845 .max_sectors = BFAD_MAX_SECTORS, 846 }; 847 848 bfa_status_t 849 bfad_im_module_init(void) 850 { 851 bfad_im_scsi_transport_template = 852 fc_attach_transport(&bfad_im_fc_function_template); 853 if (!bfad_im_scsi_transport_template) 854 return BFA_STATUS_ENOMEM; 855 856 bfad_im_scsi_vport_transport_template = 857 fc_attach_transport(&bfad_im_vport_fc_function_template); 858 if (!bfad_im_scsi_vport_transport_template) { 859 fc_release_transport(bfad_im_scsi_transport_template); 860 return BFA_STATUS_ENOMEM; 861 } 862 863 return BFA_STATUS_OK; 864 } 865 866 void 867 bfad_im_module_exit(void) 868 { 869 if (bfad_im_scsi_transport_template) 870 fc_release_transport(bfad_im_scsi_transport_template); 871 872 if (bfad_im_scsi_vport_transport_template) 873 fc_release_transport(bfad_im_scsi_vport_transport_template); 874 875 idr_destroy(&bfad_im_port_index); 876 } 877 878 void 879 bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) 880 { 881 struct scsi_device *tmp_sdev; 882 883 if (((jiffies - itnim->last_ramp_up_time) > 884 BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) && 885 ((jiffies - itnim->last_queue_full_time) > 886 BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) { 887 shost_for_each_device(tmp_sdev, sdev->host) { 888 if (bfa_lun_queue_depth > tmp_sdev->queue_depth) { 889 if (tmp_sdev->id != sdev->id) 890 continue; 891 scsi_change_queue_depth(tmp_sdev, 892 tmp_sdev->queue_depth + 1); 893 894 itnim->last_ramp_up_time = jiffies; 895 } 896 } 897 } 898 } 899 900 void 901 bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev) 902 { 903 struct scsi_device *tmp_sdev; 904 905 itnim->last_queue_full_time = jiffies; 906 907 shost_for_each_device(tmp_sdev, sdev->host) { 908 if (tmp_sdev->id != sdev->id) 909 continue; 910 scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1); 911 } 912 } 913 914 struct bfad_itnim_s * 915 bfad_get_itnim(struct bfad_im_port_s *im_port, int id) 916 { 917 struct bfad_itnim_s *itnim = NULL; 918 919 /* Search the mapped list for this target ID */ 920 list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) { 921 if (id == itnim->scsi_tgt_id) 922 return itnim; 923 } 924 925 return NULL; 926 } 927 928 /* 929 * Function is invoked from the SCSI Host Template slave_alloc() entry point. 930 * Has the logic to query the LUN Mask database to check if this LUN needs to 931 * be made visible to the SCSI mid-layer or not. 932 * 933 * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack. 934 * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack. 935 */ 936 static int 937 bfad_im_check_if_make_lun_visible(struct scsi_device *sdev, 938 struct fc_rport *rport) 939 { 940 struct bfad_itnim_data_s *itnim_data = 941 (struct bfad_itnim_data_s *) rport->dd_data; 942 struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa; 943 struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport; 944 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa); 945 int i = 0, ret = -ENXIO; 946 947 for (i = 0; i < MAX_LUN_MASK_CFG; i++) { 948 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE && 949 scsilun_to_int(&lun_list[i].lun) == sdev->lun && 950 lun_list[i].rp_tag == bfa_rport->rport_tag && 951 lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) { 952 ret = BFA_STATUS_OK; 953 break; 954 } 955 } 956 return ret; 957 } 958 959 /* 960 * Scsi_Host template entry slave_alloc 961 */ 962 static int 963 bfad_im_slave_alloc(struct scsi_device *sdev) 964 { 965 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 966 struct bfad_itnim_data_s *itnim_data; 967 struct bfa_s *bfa; 968 969 if (!rport || fc_remote_port_chkready(rport)) 970 return -ENXIO; 971 972 itnim_data = (struct bfad_itnim_data_s *) rport->dd_data; 973 bfa = itnim_data->itnim->bfa_itnim->bfa; 974 975 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) { 976 /* 977 * We should not mask LUN 0 - since this will translate 978 * to no LUN / TARGET for SCSI ml resulting no scan. 979 */ 980 if (sdev->lun == 0) { 981 sdev->sdev_bflags |= BLIST_NOREPORTLUN | 982 BLIST_SPARSELUN; 983 goto done; 984 } 985 986 /* 987 * Query LUN Mask configuration - to expose this LUN 988 * to the SCSI mid-layer or to mask it. 989 */ 990 if (bfad_im_check_if_make_lun_visible(sdev, rport) != 991 BFA_STATUS_OK) 992 return -ENXIO; 993 } 994 done: 995 sdev->hostdata = rport->dd_data; 996 997 return 0; 998 } 999 1000 u32 1001 bfad_im_supported_speeds(struct bfa_s *bfa) 1002 { 1003 struct bfa_ioc_attr_s *ioc_attr; 1004 u32 supported_speed = 0; 1005 1006 ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL); 1007 if (!ioc_attr) 1008 return 0; 1009 1010 bfa_ioc_get_attr(&bfa->ioc, ioc_attr); 1011 if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_16GBPS) 1012 supported_speed |= FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | 1013 FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT; 1014 else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { 1015 if (ioc_attr->adapter_attr.is_mezz) { 1016 supported_speed |= FC_PORTSPEED_8GBIT | 1017 FC_PORTSPEED_4GBIT | 1018 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1019 } else { 1020 supported_speed |= FC_PORTSPEED_8GBIT | 1021 FC_PORTSPEED_4GBIT | 1022 FC_PORTSPEED_2GBIT; 1023 } 1024 } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) { 1025 supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | 1026 FC_PORTSPEED_1GBIT; 1027 } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) { 1028 supported_speed |= FC_PORTSPEED_10GBIT; 1029 } 1030 kfree(ioc_attr); 1031 return supported_speed; 1032 } 1033 1034 void 1035 bfad_fc_host_init(struct bfad_im_port_s *im_port) 1036 { 1037 struct Scsi_Host *host = im_port->shost; 1038 struct bfad_s *bfad = im_port->bfad; 1039 struct bfad_port_s *port = im_port->port; 1040 char symname[BFA_SYMNAME_MAXLEN]; 1041 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 1042 1043 fc_host_node_name(host) = 1044 cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port))); 1045 fc_host_port_name(host) = 1046 cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port))); 1047 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); 1048 1049 fc_host_supported_classes(host) = FC_COS_CLASS3; 1050 1051 memset(fc_host_supported_fc4s(host), 0, 1052 sizeof(fc_host_supported_fc4s(host))); 1053 if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM) 1054 /* For FCP type 0x08 */ 1055 fc_host_supported_fc4s(host)[2] = 1; 1056 /* For fibre channel services type 0x20 */ 1057 fc_host_supported_fc4s(host)[7] = 1; 1058 1059 strlcpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, 1060 BFA_SYMNAME_MAXLEN); 1061 sprintf(fc_host_symbolic_name(host), "%s", symname); 1062 1063 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); 1064 fc_host_maxframe_size(host) = fcport->cfg.maxfrsize; 1065 } 1066 1067 static void 1068 bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim) 1069 { 1070 struct fc_rport_identifiers rport_ids; 1071 struct fc_rport *fc_rport; 1072 struct bfad_itnim_data_s *itnim_data; 1073 1074 rport_ids.node_name = 1075 cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim)); 1076 rport_ids.port_name = 1077 cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); 1078 rport_ids.port_id = 1079 bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim)); 1080 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 1081 1082 itnim->fc_rport = fc_rport = 1083 fc_remote_port_add(im_port->shost, 0, &rport_ids); 1084 1085 if (!fc_rport) 1086 return; 1087 1088 fc_rport->maxframe_size = 1089 bfa_fcs_itnim_get_maxfrsize(&itnim->fcs_itnim); 1090 fc_rport->supported_classes = bfa_fcs_itnim_get_cos(&itnim->fcs_itnim); 1091 1092 itnim_data = fc_rport->dd_data; 1093 itnim_data->itnim = itnim; 1094 1095 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 1096 1097 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 1098 fc_remote_port_rolechg(fc_rport, rport_ids.roles); 1099 1100 if ((fc_rport->scsi_target_id != -1) 1101 && (fc_rport->scsi_target_id < MAX_FCP_TARGET)) 1102 itnim->scsi_tgt_id = fc_rport->scsi_target_id; 1103 1104 itnim->channel = fc_rport->channel; 1105 1106 return; 1107 } 1108 1109 /* 1110 * Work queue handler using FC transport service 1111 * Context: kernel 1112 */ 1113 static void 1114 bfad_im_itnim_work_handler(struct work_struct *work) 1115 { 1116 struct bfad_itnim_s *itnim = container_of(work, struct bfad_itnim_s, 1117 itnim_work); 1118 struct bfad_im_s *im = itnim->im; 1119 struct bfad_s *bfad = im->bfad; 1120 struct bfad_im_port_s *im_port; 1121 unsigned long flags; 1122 struct fc_rport *fc_rport; 1123 wwn_t wwpn; 1124 u32 fcid; 1125 char wwpn_str[32], fcid_str[16]; 1126 1127 spin_lock_irqsave(&bfad->bfad_lock, flags); 1128 im_port = itnim->im_port; 1129 bfa_trc(bfad, itnim->state); 1130 switch (itnim->state) { 1131 case ITNIM_STATE_ONLINE: 1132 if (!itnim->fc_rport) { 1133 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1134 bfad_im_fc_rport_add(im_port, itnim); 1135 spin_lock_irqsave(&bfad->bfad_lock, flags); 1136 wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); 1137 fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); 1138 wwn2str(wwpn_str, wwpn); 1139 fcid2str(fcid_str, fcid); 1140 list_add_tail(&itnim->list_entry, 1141 &im_port->itnim_mapped_list); 1142 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 1143 "ITNIM ONLINE Target: %d:0:%d " 1144 "FCID: %s WWPN: %s\n", 1145 im_port->shost->host_no, 1146 itnim->scsi_tgt_id, 1147 fcid_str, wwpn_str); 1148 } else { 1149 printk(KERN_WARNING 1150 "%s: itnim %llx is already in online state\n", 1151 __func__, 1152 bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); 1153 } 1154 1155 break; 1156 case ITNIM_STATE_OFFLINE_PENDING: 1157 itnim->state = ITNIM_STATE_OFFLINE; 1158 if (itnim->fc_rport) { 1159 fc_rport = itnim->fc_rport; 1160 ((struct bfad_itnim_data_s *) 1161 fc_rport->dd_data)->itnim = NULL; 1162 itnim->fc_rport = NULL; 1163 if (!(im_port->port->flags & BFAD_PORT_DELETE)) { 1164 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1165 fc_rport->dev_loss_tmo = 1166 bfa_fcpim_path_tov_get(&bfad->bfa) + 1; 1167 fc_remote_port_delete(fc_rport); 1168 spin_lock_irqsave(&bfad->bfad_lock, flags); 1169 } 1170 wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); 1171 fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); 1172 wwn2str(wwpn_str, wwpn); 1173 fcid2str(fcid_str, fcid); 1174 list_del(&itnim->list_entry); 1175 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 1176 "ITNIM OFFLINE Target: %d:0:%d " 1177 "FCID: %s WWPN: %s\n", 1178 im_port->shost->host_no, 1179 itnim->scsi_tgt_id, 1180 fcid_str, wwpn_str); 1181 } 1182 break; 1183 case ITNIM_STATE_FREE: 1184 if (itnim->fc_rport) { 1185 fc_rport = itnim->fc_rport; 1186 ((struct bfad_itnim_data_s *) 1187 fc_rport->dd_data)->itnim = NULL; 1188 itnim->fc_rport = NULL; 1189 if (!(im_port->port->flags & BFAD_PORT_DELETE)) { 1190 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1191 fc_rport->dev_loss_tmo = 1192 bfa_fcpim_path_tov_get(&bfad->bfa) + 1; 1193 fc_remote_port_delete(fc_rport); 1194 spin_lock_irqsave(&bfad->bfad_lock, flags); 1195 } 1196 list_del(&itnim->list_entry); 1197 } 1198 1199 kfree(itnim); 1200 break; 1201 default: 1202 WARN_ON(1); 1203 break; 1204 } 1205 1206 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1207 } 1208 1209 /* 1210 * Scsi_Host template entry, queue a SCSI command to the BFAD. 1211 */ 1212 static int 1213 bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) 1214 { 1215 struct bfad_im_port_s *im_port = 1216 (struct bfad_im_port_s *) cmnd->device->host->hostdata[0]; 1217 struct bfad_s *bfad = im_port->bfad; 1218 struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata; 1219 struct bfad_itnim_s *itnim; 1220 struct bfa_ioim_s *hal_io; 1221 unsigned long flags; 1222 int rc; 1223 int sg_cnt = 0; 1224 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 1225 1226 rc = fc_remote_port_chkready(rport); 1227 if (rc) { 1228 cmnd->result = rc; 1229 done(cmnd); 1230 return 0; 1231 } 1232 1233 if (bfad->bfad_flags & BFAD_EEH_BUSY) { 1234 if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE) 1235 cmnd->result = DID_NO_CONNECT << 16; 1236 else 1237 cmnd->result = DID_REQUEUE << 16; 1238 done(cmnd); 1239 return 0; 1240 } 1241 1242 sg_cnt = scsi_dma_map(cmnd); 1243 if (sg_cnt < 0) 1244 return SCSI_MLQUEUE_HOST_BUSY; 1245 1246 cmnd->scsi_done = done; 1247 1248 spin_lock_irqsave(&bfad->bfad_lock, flags); 1249 if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) { 1250 printk(KERN_WARNING 1251 "bfad%d, queuecommand %p %x failed, BFA stopped\n", 1252 bfad->inst_no, cmnd, cmnd->cmnd[0]); 1253 cmnd->result = DID_NO_CONNECT << 16; 1254 goto out_fail_cmd; 1255 } 1256 1257 1258 itnim = itnim_data->itnim; 1259 if (!itnim) { 1260 cmnd->result = DID_IMM_RETRY << 16; 1261 goto out_fail_cmd; 1262 } 1263 1264 hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd, 1265 itnim->bfa_itnim, sg_cnt); 1266 if (!hal_io) { 1267 printk(KERN_WARNING "hal_io failure\n"); 1268 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1269 scsi_dma_unmap(cmnd); 1270 return SCSI_MLQUEUE_HOST_BUSY; 1271 } 1272 1273 cmnd->host_scribble = (char *)hal_io; 1274 bfa_ioim_start(hal_io); 1275 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1276 1277 return 0; 1278 1279 out_fail_cmd: 1280 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1281 scsi_dma_unmap(cmnd); 1282 if (done) 1283 done(cmnd); 1284 1285 return 0; 1286 } 1287 1288 static DEF_SCSI_QCMD(bfad_im_queuecommand) 1289 1290 void 1291 bfad_rport_online_wait(struct bfad_s *bfad) 1292 { 1293 int i; 1294 int rport_delay = 10; 1295 1296 for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE) 1297 && i < bfa_linkup_delay; i++) { 1298 set_current_state(TASK_UNINTERRUPTIBLE); 1299 schedule_timeout(HZ); 1300 } 1301 1302 if (bfad->bfad_flags & BFAD_PORT_ONLINE) { 1303 rport_delay = rport_delay < bfa_linkup_delay ? 1304 rport_delay : bfa_linkup_delay; 1305 for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE) 1306 && i < rport_delay; i++) { 1307 set_current_state(TASK_UNINTERRUPTIBLE); 1308 schedule_timeout(HZ); 1309 } 1310 1311 if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) { 1312 set_current_state(TASK_UNINTERRUPTIBLE); 1313 schedule_timeout(rport_delay * HZ); 1314 } 1315 } 1316 } 1317 1318 int 1319 bfad_get_linkup_delay(struct bfad_s *bfad) 1320 { 1321 u8 nwwns = 0; 1322 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; 1323 int linkup_delay; 1324 1325 /* 1326 * Querying for the boot target port wwns 1327 * -- read from boot information in flash. 1328 * If nwwns > 0 => boot over SAN and set linkup_delay = 30 1329 * else => local boot machine set linkup_delay = 0 1330 */ 1331 1332 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns); 1333 1334 if (nwwns > 0) 1335 /* If Boot over SAN set linkup_delay = 30sec */ 1336 linkup_delay = 30; 1337 else 1338 /* If local boot; no linkup_delay */ 1339 linkup_delay = 0; 1340 1341 return linkup_delay; 1342 } 1343