1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. 4 * Copyright (c) 2014- QLogic Corporation. 5 * All rights reserved 6 * www.qlogic.com 7 * 8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter. 9 */ 10 11 /* 12 * bfad_im.c Linux driver IM module. 13 */ 14 15 #include <linux/export.h> 16 17 #include "bfad_drv.h" 18 #include "bfad_im.h" 19 #include "bfa_fcs.h" 20 21 BFA_TRC_FILE(LDRV, IM); 22 23 DEFINE_IDR(bfad_im_port_index); 24 struct scsi_transport_template *bfad_im_scsi_transport_template; 25 struct scsi_transport_template *bfad_im_scsi_vport_transport_template; 26 static void bfad_im_itnim_work_handler(struct work_struct *work); 27 static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd); 28 static int bfad_im_slave_alloc(struct scsi_device *sdev); 29 static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, 30 struct bfad_itnim_s *itnim); 31 32 void 33 bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, 34 enum bfi_ioim_status io_status, u8 scsi_status, 35 int sns_len, u8 *sns_info, s32 residue) 36 { 37 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 38 struct bfad_s *bfad = drv; 39 struct bfad_itnim_data_s *itnim_data; 40 struct bfad_itnim_s *itnim; 41 u8 host_status = DID_OK; 42 43 switch (io_status) { 44 case BFI_IOIM_STS_OK: 45 bfa_trc(bfad, scsi_status); 46 scsi_set_resid(cmnd, 0); 47 48 if (sns_len > 0) { 49 bfa_trc(bfad, sns_len); 50 if (sns_len > SCSI_SENSE_BUFFERSIZE) 51 sns_len = SCSI_SENSE_BUFFERSIZE; 52 memcpy(cmnd->sense_buffer, sns_info, sns_len); 53 } 54 55 if (residue > 0) { 56 bfa_trc(bfad, residue); 57 scsi_set_resid(cmnd, residue); 58 if (!sns_len && (scsi_status == SAM_STAT_GOOD) && 59 (scsi_bufflen(cmnd) - residue) < 60 cmnd->underflow) { 61 bfa_trc(bfad, 0); 62 host_status = DID_ERROR; 63 } 64 } 65 cmnd->result = host_status << 16 | scsi_status; 66 67 break; 68 69 case BFI_IOIM_STS_TIMEDOUT: 70 cmnd->result = DID_TIME_OUT << 16; 71 break; 72 case BFI_IOIM_STS_PATHTOV: 73 cmnd->result = DID_TRANSPORT_DISRUPTED << 16; 74 break; 75 default: 76 cmnd->result = DID_ERROR << 16; 77 } 78 79 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ 80 if (cmnd->device->host != NULL) 81 scsi_dma_unmap(cmnd); 82 83 cmnd->host_scribble = NULL; 84 bfa_trc(bfad, cmnd->result); 85 86 itnim_data = cmnd->device->hostdata; 87 if (itnim_data) { 88 itnim = itnim_data->itnim; 89 if (!cmnd->result && itnim && 90 (bfa_lun_queue_depth > cmnd->device->queue_depth)) { 91 /* Queue depth adjustment for good status completion */ 92 bfad_ramp_up_qdepth(itnim, cmnd->device); 93 } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) { 94 /* qfull handling */ 95 bfad_handle_qfull(itnim, cmnd->device); 96 } 97 } 98 99 scsi_done(cmnd); 100 } 101 102 void 103 bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio) 104 { 105 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 106 struct bfad_itnim_data_s *itnim_data; 107 struct bfad_itnim_s *itnim; 108 109 cmnd->result = DID_OK << 16 | SAM_STAT_GOOD; 110 111 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ 112 if (cmnd->device->host != NULL) 113 scsi_dma_unmap(cmnd); 114 115 cmnd->host_scribble = NULL; 116 117 /* Queue depth adjustment */ 118 if (bfa_lun_queue_depth > cmnd->device->queue_depth) { 119 itnim_data = cmnd->device->hostdata; 120 if (itnim_data) { 121 itnim = itnim_data->itnim; 122 if (itnim) 123 bfad_ramp_up_qdepth(itnim, cmnd->device); 124 } 125 } 126 127 scsi_done(cmnd); 128 } 129 130 void 131 bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio) 132 { 133 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; 134 struct bfad_s *bfad = drv; 135 136 cmnd->result = DID_ERROR << 16; 137 138 /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ 139 if (cmnd->device->host != NULL) 140 scsi_dma_unmap(cmnd); 141 142 bfa_trc(bfad, cmnd->result); 143 cmnd->host_scribble = NULL; 144 } 145 146 void 147 bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, 148 enum bfi_tskim_status tsk_status) 149 { 150 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk; 151 wait_queue_head_t *wq; 152 153 bfad_priv(cmnd)->status |= tsk_status << 1; 154 set_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status); 155 wq = bfad_priv(cmnd)->wq; 156 bfad_priv(cmnd)->wq = NULL; 157 158 if (wq) 159 wake_up(wq); 160 } 161 162 /* 163 * Scsi_Host_template SCSI host template 164 */ 165 /* 166 * Scsi_Host template entry, returns BFAD PCI info. 167 */ 168 static const char * 169 bfad_im_info(struct Scsi_Host *shost) 170 { 171 static char bfa_buf[256]; 172 struct bfad_im_port_s *im_port = 173 (struct bfad_im_port_s *) shost->hostdata[0]; 174 struct bfad_s *bfad = im_port->bfad; 175 176 memset(bfa_buf, 0, sizeof(bfa_buf)); 177 snprintf(bfa_buf, sizeof(bfa_buf), 178 "QLogic BR-series FC/FCOE Adapter, hwpath: %s driver: %s", 179 bfad->pci_name, BFAD_DRIVER_VERSION); 180 181 return bfa_buf; 182 } 183 184 /* 185 * Scsi_Host template entry, aborts the specified SCSI command. 186 * 187 * Returns: SUCCESS or FAILED. 188 */ 189 static int 190 bfad_im_abort_handler(struct scsi_cmnd *cmnd) 191 { 192 struct Scsi_Host *shost = cmnd->device->host; 193 struct bfad_im_port_s *im_port = 194 (struct bfad_im_port_s *) shost->hostdata[0]; 195 struct bfad_s *bfad = im_port->bfad; 196 struct bfa_ioim_s *hal_io; 197 unsigned long flags; 198 u32 timeout; 199 int rc = FAILED; 200 201 spin_lock_irqsave(&bfad->bfad_lock, flags); 202 hal_io = (struct bfa_ioim_s *) cmnd->host_scribble; 203 if (!hal_io) { 204 /* IO has been completed, return success */ 205 rc = SUCCESS; 206 goto out; 207 } 208 if (hal_io->dio != (struct bfad_ioim_s *) cmnd) { 209 rc = FAILED; 210 goto out; 211 } 212 213 bfa_trc(bfad, hal_io->iotag); 214 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 215 "scsi%d: abort cmnd %p iotag %x\n", 216 im_port->shost->host_no, cmnd, hal_io->iotag); 217 (void) bfa_ioim_abort(hal_io); 218 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 219 220 /* Need to wait until the command get aborted */ 221 timeout = 10; 222 while ((struct bfa_ioim_s *) cmnd->host_scribble == hal_io) { 223 set_current_state(TASK_UNINTERRUPTIBLE); 224 schedule_timeout(timeout); 225 if (timeout < 4 * HZ) 226 timeout *= 2; 227 } 228 229 scsi_done(cmnd); 230 bfa_trc(bfad, hal_io->iotag); 231 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 232 "scsi%d: complete abort 0x%p iotag 0x%x\n", 233 im_port->shost->host_no, cmnd, hal_io->iotag); 234 return SUCCESS; 235 out: 236 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 237 return rc; 238 } 239 240 static bfa_status_t 241 bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd, 242 struct bfad_itnim_s *itnim) 243 { 244 struct bfa_tskim_s *tskim; 245 struct bfa_itnim_s *bfa_itnim; 246 bfa_status_t rc = BFA_STATUS_OK; 247 struct scsi_lun scsilun; 248 249 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 250 if (!tskim) { 251 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 252 "target reset, fail to allocate tskim\n"); 253 rc = BFA_STATUS_FAILED; 254 goto out; 255 } 256 257 /* 258 * Set host_scribble to NULL to avoid aborting a task command if 259 * happens. 260 */ 261 cmnd->host_scribble = NULL; 262 bfad_priv(cmnd)->status = 0; 263 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); 264 /* 265 * bfa_itnim can be NULL if the port gets disconnected and the bfa 266 * and fcs layers have cleaned up their nexus with the targets and 267 * the same has not been cleaned up by the shim 268 */ 269 if (bfa_itnim == NULL) { 270 bfa_tskim_free(tskim); 271 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 272 "target reset, bfa_itnim is NULL\n"); 273 rc = BFA_STATUS_FAILED; 274 goto out; 275 } 276 277 memset(&scsilun, 0, sizeof(scsilun)); 278 bfa_tskim_start(tskim, bfa_itnim, scsilun, 279 FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO); 280 out: 281 return rc; 282 } 283 284 /* 285 * Scsi_Host template entry, resets a LUN and abort its all commands. 286 * 287 * Returns: SUCCESS or FAILED. 288 * 289 */ 290 static int 291 bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd) 292 { 293 struct Scsi_Host *shost = cmnd->device->host; 294 struct bfad_im_port_s *im_port = 295 (struct bfad_im_port_s *) shost->hostdata[0]; 296 struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata; 297 struct bfad_s *bfad = im_port->bfad; 298 struct bfa_tskim_s *tskim; 299 struct bfad_itnim_s *itnim; 300 struct bfa_itnim_s *bfa_itnim; 301 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); 302 int rc = SUCCESS; 303 unsigned long flags; 304 enum bfi_tskim_status task_status; 305 struct scsi_lun scsilun; 306 307 spin_lock_irqsave(&bfad->bfad_lock, flags); 308 itnim = itnim_data->itnim; 309 if (!itnim) { 310 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 311 rc = FAILED; 312 goto out; 313 } 314 315 tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd); 316 if (!tskim) { 317 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 318 "LUN reset, fail to allocate tskim"); 319 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 320 rc = FAILED; 321 goto out; 322 } 323 324 /* 325 * Set host_scribble to NULL to avoid aborting a task command 326 * if happens. 327 */ 328 cmnd->host_scribble = NULL; 329 bfad_priv(cmnd)->wq = &wq; 330 bfad_priv(cmnd)->status = 0; 331 bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim); 332 /* 333 * bfa_itnim can be NULL if the port gets disconnected and the bfa 334 * and fcs layers have cleaned up their nexus with the targets and 335 * the same has not been cleaned up by the shim 336 */ 337 if (bfa_itnim == NULL) { 338 bfa_tskim_free(tskim); 339 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 340 "lun reset, bfa_itnim is NULL\n"); 341 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 342 rc = FAILED; 343 goto out; 344 } 345 int_to_scsilun(cmnd->device->lun, &scsilun); 346 bfa_tskim_start(tskim, bfa_itnim, scsilun, 347 FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO); 348 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 349 350 wait_event(wq, test_bit(IO_DONE_BIT, &bfad_priv(cmnd)->status)); 351 352 task_status = bfad_priv(cmnd)->status >> 1; 353 if (task_status != BFI_TSKIM_STS_OK) { 354 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 355 "LUN reset failure, status: %d\n", task_status); 356 rc = FAILED; 357 } 358 359 out: 360 return rc; 361 } 362 363 /* 364 * Scsi_Host template entry, resets the target and abort all commands. 365 */ 366 static int 367 bfad_im_reset_target_handler(struct scsi_cmnd *cmnd) 368 { 369 struct Scsi_Host *shost = cmnd->device->host; 370 struct scsi_target *starget = scsi_target(cmnd->device); 371 struct bfad_im_port_s *im_port = 372 (struct bfad_im_port_s *) shost->hostdata[0]; 373 struct bfad_s *bfad = im_port->bfad; 374 struct bfad_itnim_s *itnim; 375 unsigned long flags; 376 u32 rc, rtn = FAILED; 377 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); 378 enum bfi_tskim_status task_status; 379 380 spin_lock_irqsave(&bfad->bfad_lock, flags); 381 itnim = bfad_get_itnim(im_port, starget->id); 382 if (itnim) { 383 bfad_priv(cmnd)->wq = &wq; 384 rc = bfad_im_target_reset_send(bfad, cmnd, itnim); 385 if (rc == BFA_STATUS_OK) { 386 /* wait target reset to complete */ 387 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 388 wait_event(wq, test_bit(IO_DONE_BIT, 389 &bfad_priv(cmnd)->status)); 390 spin_lock_irqsave(&bfad->bfad_lock, flags); 391 392 task_status = bfad_priv(cmnd)->status >> 1; 393 if (task_status != BFI_TSKIM_STS_OK) 394 BFA_LOG(KERN_ERR, bfad, bfa_log_level, 395 "target reset failure," 396 " status: %d\n", task_status); 397 else 398 rtn = SUCCESS; 399 } 400 } 401 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 402 403 return rtn; 404 } 405 406 /* 407 * Scsi_Host template entry slave_destroy. 408 */ 409 static void 410 bfad_im_slave_destroy(struct scsi_device *sdev) 411 { 412 sdev->hostdata = NULL; 413 return; 414 } 415 416 /* 417 * BFA FCS itnim callbacks 418 */ 419 420 /* 421 * BFA FCS itnim alloc callback, after successful PRLI 422 * Context: Interrupt 423 */ 424 int 425 bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, 426 struct bfad_itnim_s **itnim_drv) 427 { 428 *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC); 429 if (*itnim_drv == NULL) 430 return -ENOMEM; 431 432 (*itnim_drv)->im = bfad->im; 433 *itnim = &(*itnim_drv)->fcs_itnim; 434 (*itnim_drv)->state = ITNIM_STATE_NONE; 435 436 /* 437 * Initiaze the itnim_work 438 */ 439 INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler); 440 bfad->bfad_flags |= BFAD_RPORT_ONLINE; 441 return 0; 442 } 443 444 /* 445 * BFA FCS itnim free callback. 446 * Context: Interrupt. bfad_lock is held 447 */ 448 void 449 bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv) 450 { 451 struct bfad_port_s *port; 452 wwn_t wwpn; 453 u32 fcid; 454 char wwpn_str[32], fcid_str[16]; 455 struct bfad_im_s *im = itnim_drv->im; 456 457 /* online to free state transtion should not happen */ 458 WARN_ON(itnim_drv->state == ITNIM_STATE_ONLINE); 459 460 itnim_drv->queue_work = 1; 461 /* offline request is not yet done, use the same request to free */ 462 if (itnim_drv->state == ITNIM_STATE_OFFLINE_PENDING) 463 itnim_drv->queue_work = 0; 464 465 itnim_drv->state = ITNIM_STATE_FREE; 466 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); 467 itnim_drv->im_port = port->im_port; 468 wwpn = bfa_fcs_itnim_get_pwwn(&itnim_drv->fcs_itnim); 469 fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim); 470 wwn2str(wwpn_str, wwpn); 471 fcid2str(fcid_str, fcid); 472 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 473 "ITNIM FREE scsi%d: FCID: %s WWPN: %s\n", 474 port->im_port->shost->host_no, 475 fcid_str, wwpn_str); 476 477 /* ITNIM processing */ 478 if (itnim_drv->queue_work) 479 queue_work(im->drv_workq, &itnim_drv->itnim_work); 480 } 481 482 /* 483 * BFA FCS itnim online callback. 484 * Context: Interrupt. bfad_lock is held 485 */ 486 void 487 bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv) 488 { 489 struct bfad_port_s *port; 490 struct bfad_im_s *im = itnim_drv->im; 491 492 itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim); 493 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); 494 itnim_drv->state = ITNIM_STATE_ONLINE; 495 itnim_drv->queue_work = 1; 496 itnim_drv->im_port = port->im_port; 497 498 /* ITNIM processing */ 499 if (itnim_drv->queue_work) 500 queue_work(im->drv_workq, &itnim_drv->itnim_work); 501 } 502 503 /* 504 * BFA FCS itnim offline callback. 505 * Context: Interrupt. bfad_lock is held 506 */ 507 void 508 bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv) 509 { 510 struct bfad_port_s *port; 511 struct bfad_s *bfad; 512 struct bfad_im_s *im = itnim_drv->im; 513 514 port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim); 515 bfad = port->bfad; 516 if ((bfad->pport.flags & BFAD_PORT_DELETE) || 517 (port->flags & BFAD_PORT_DELETE)) { 518 itnim_drv->state = ITNIM_STATE_OFFLINE; 519 return; 520 } 521 itnim_drv->im_port = port->im_port; 522 itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING; 523 itnim_drv->queue_work = 1; 524 525 /* ITNIM processing */ 526 if (itnim_drv->queue_work) 527 queue_work(im->drv_workq, &itnim_drv->itnim_work); 528 } 529 530 /* 531 * Allocate a Scsi_Host for a port. 532 */ 533 int 534 bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, 535 struct device *dev) 536 { 537 struct bfad_im_port_pointer *im_portp; 538 int error; 539 540 mutex_lock(&bfad_mutex); 541 error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL); 542 if (error < 0) { 543 mutex_unlock(&bfad_mutex); 544 printk(KERN_WARNING "idr_alloc failure\n"); 545 goto out; 546 } 547 im_port->idr_id = error; 548 mutex_unlock(&bfad_mutex); 549 550 im_port->shost = bfad_scsi_host_alloc(im_port, bfad); 551 if (!im_port->shost) { 552 error = 1; 553 goto out_free_idr; 554 } 555 556 im_portp = shost_priv(im_port->shost); 557 im_portp->p = im_port; 558 im_port->shost->unique_id = im_port->idr_id; 559 im_port->shost->this_id = -1; 560 im_port->shost->max_id = MAX_FCP_TARGET; 561 im_port->shost->max_lun = MAX_FCP_LUN; 562 im_port->shost->max_cmd_len = 16; 563 im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth; 564 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE) 565 im_port->shost->transportt = bfad_im_scsi_transport_template; 566 else 567 im_port->shost->transportt = 568 bfad_im_scsi_vport_transport_template; 569 570 error = scsi_add_host_with_dma(im_port->shost, dev, &bfad->pcidev->dev); 571 if (error) { 572 printk(KERN_WARNING "scsi_add_host failure %d\n", error); 573 goto out_fc_rel; 574 } 575 576 return 0; 577 578 out_fc_rel: 579 scsi_host_put(im_port->shost); 580 im_port->shost = NULL; 581 out_free_idr: 582 mutex_lock(&bfad_mutex); 583 idr_remove(&bfad_im_port_index, im_port->idr_id); 584 mutex_unlock(&bfad_mutex); 585 out: 586 return error; 587 } 588 589 void 590 bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) 591 { 592 bfa_trc(bfad, bfad->inst_no); 593 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Free scsi%d\n", 594 im_port->shost->host_no); 595 596 fc_remove_host(im_port->shost); 597 598 scsi_remove_host(im_port->shost); 599 scsi_host_put(im_port->shost); 600 601 mutex_lock(&bfad_mutex); 602 idr_remove(&bfad_im_port_index, im_port->idr_id); 603 mutex_unlock(&bfad_mutex); 604 } 605 606 static void 607 bfad_im_port_delete_handler(struct work_struct *work) 608 { 609 struct bfad_im_port_s *im_port = 610 container_of(work, struct bfad_im_port_s, port_delete_work); 611 612 if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) { 613 im_port->flags |= BFAD_PORT_DELETE; 614 fc_vport_terminate(im_port->fc_vport); 615 } 616 } 617 618 bfa_status_t 619 bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port) 620 { 621 int rc = BFA_STATUS_OK; 622 struct bfad_im_port_s *im_port; 623 624 im_port = kzalloc(sizeof(struct bfad_im_port_s), GFP_ATOMIC); 625 if (im_port == NULL) { 626 rc = BFA_STATUS_ENOMEM; 627 goto ext; 628 } 629 port->im_port = im_port; 630 im_port->port = port; 631 im_port->bfad = bfad; 632 633 INIT_WORK(&im_port->port_delete_work, bfad_im_port_delete_handler); 634 INIT_LIST_HEAD(&im_port->itnim_mapped_list); 635 INIT_LIST_HEAD(&im_port->binding_list); 636 637 ext: 638 return rc; 639 } 640 641 void 642 bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port) 643 { 644 struct bfad_im_port_s *im_port = port->im_port; 645 646 queue_work(bfad->im->drv_workq, 647 &im_port->port_delete_work); 648 } 649 650 void 651 bfad_im_port_clean(struct bfad_im_port_s *im_port) 652 { 653 struct bfad_fcp_binding *bp, *bp_new; 654 unsigned long flags; 655 struct bfad_s *bfad = im_port->bfad; 656 657 spin_lock_irqsave(&bfad->bfad_lock, flags); 658 list_for_each_entry_safe(bp, bp_new, &im_port->binding_list, 659 list_entry) { 660 list_del(&bp->list_entry); 661 kfree(bp); 662 } 663 664 /* the itnim_mapped_list must be empty at this time */ 665 WARN_ON(!list_empty(&im_port->itnim_mapped_list)); 666 667 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 668 } 669 670 static void bfad_aen_im_notify_handler(struct work_struct *work) 671 { 672 struct bfad_im_s *im = 673 container_of(work, struct bfad_im_s, aen_im_notify_work); 674 struct bfa_aen_entry_s *aen_entry; 675 struct bfad_s *bfad = im->bfad; 676 struct Scsi_Host *shost = bfad->pport.im_port->shost; 677 void *event_data; 678 unsigned long flags; 679 680 while (!list_empty(&bfad->active_aen_q)) { 681 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); 682 bfa_q_deq(&bfad->active_aen_q, &aen_entry); 683 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); 684 event_data = (char *)aen_entry + sizeof(struct list_head); 685 fc_host_post_vendor_event(shost, fc_get_event_number(), 686 sizeof(struct bfa_aen_entry_s) - 687 sizeof(struct list_head), 688 (char *)event_data, BFAD_NL_VENDOR_ID); 689 spin_lock_irqsave(&bfad->bfad_aen_spinlock, flags); 690 list_add_tail(&aen_entry->qe, &bfad->free_aen_q); 691 spin_unlock_irqrestore(&bfad->bfad_aen_spinlock, flags); 692 } 693 } 694 695 bfa_status_t 696 bfad_im_probe(struct bfad_s *bfad) 697 { 698 struct bfad_im_s *im; 699 700 im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL); 701 if (im == NULL) 702 return BFA_STATUS_ENOMEM; 703 704 bfad->im = im; 705 im->bfad = bfad; 706 707 if (bfad_thread_workq(bfad) != BFA_STATUS_OK) { 708 kfree(im); 709 return BFA_STATUS_FAILED; 710 } 711 712 INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler); 713 return BFA_STATUS_OK; 714 } 715 716 void 717 bfad_im_probe_undo(struct bfad_s *bfad) 718 { 719 if (bfad->im) { 720 bfad_destroy_workq(bfad->im); 721 kfree(bfad->im); 722 bfad->im = NULL; 723 } 724 } 725 726 struct Scsi_Host * 727 bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) 728 { 729 struct scsi_host_template *sht; 730 731 if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE) 732 sht = &bfad_im_scsi_host_template; 733 else 734 sht = &bfad_im_vport_template; 735 736 if (max_xfer_size != BFAD_MAX_SECTORS >> 1) 737 sht->max_sectors = max_xfer_size << 1; 738 739 sht->sg_tablesize = bfad->cfg_data.io_max_sge; 740 741 return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer)); 742 } 743 744 void 745 bfad_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port) 746 { 747 if (!(im_port->flags & BFAD_PORT_DELETE)) 748 flush_workqueue(bfad->im->drv_workq); 749 bfad_im_scsi_host_free(im_port->bfad, im_port); 750 bfad_im_port_clean(im_port); 751 kfree(im_port); 752 } 753 754 void 755 bfad_destroy_workq(struct bfad_im_s *im) 756 { 757 if (im && im->drv_workq) { 758 flush_workqueue(im->drv_workq); 759 destroy_workqueue(im->drv_workq); 760 im->drv_workq = NULL; 761 } 762 } 763 764 bfa_status_t 765 bfad_thread_workq(struct bfad_s *bfad) 766 { 767 struct bfad_im_s *im = bfad->im; 768 769 bfa_trc(bfad, 0); 770 snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d", 771 bfad->inst_no); 772 im->drv_workq = create_singlethread_workqueue(im->drv_workq_name); 773 if (!im->drv_workq) 774 return BFA_STATUS_FAILED; 775 776 return BFA_STATUS_OK; 777 } 778 779 /* 780 * Scsi_Host template entry. 781 * 782 * Description: 783 * OS entry point to adjust the queue_depths on a per-device basis. 784 * Called once per device during the bus scan. 785 * Return non-zero if fails. 786 */ 787 static int 788 bfad_im_slave_configure(struct scsi_device *sdev) 789 { 790 scsi_change_queue_depth(sdev, bfa_lun_queue_depth); 791 return 0; 792 } 793 794 struct scsi_host_template bfad_im_scsi_host_template = { 795 .module = THIS_MODULE, 796 .name = BFAD_DRIVER_NAME, 797 .info = bfad_im_info, 798 .queuecommand = bfad_im_queuecommand, 799 .cmd_size = sizeof(struct bfad_cmd_priv), 800 .eh_timed_out = fc_eh_timed_out, 801 .eh_abort_handler = bfad_im_abort_handler, 802 .eh_device_reset_handler = bfad_im_reset_lun_handler, 803 .eh_target_reset_handler = bfad_im_reset_target_handler, 804 805 .slave_alloc = bfad_im_slave_alloc, 806 .slave_configure = bfad_im_slave_configure, 807 .slave_destroy = bfad_im_slave_destroy, 808 809 .this_id = -1, 810 .sg_tablesize = BFAD_IO_MAX_SGE, 811 .cmd_per_lun = 3, 812 .shost_groups = bfad_im_host_groups, 813 .max_sectors = BFAD_MAX_SECTORS, 814 .vendor_id = BFA_PCI_VENDOR_ID_BROCADE, 815 }; 816 817 struct scsi_host_template bfad_im_vport_template = { 818 .module = THIS_MODULE, 819 .name = BFAD_DRIVER_NAME, 820 .info = bfad_im_info, 821 .queuecommand = bfad_im_queuecommand, 822 .cmd_size = sizeof(struct bfad_cmd_priv), 823 .eh_timed_out = fc_eh_timed_out, 824 .eh_abort_handler = bfad_im_abort_handler, 825 .eh_device_reset_handler = bfad_im_reset_lun_handler, 826 .eh_target_reset_handler = bfad_im_reset_target_handler, 827 828 .slave_alloc = bfad_im_slave_alloc, 829 .slave_configure = bfad_im_slave_configure, 830 .slave_destroy = bfad_im_slave_destroy, 831 832 .this_id = -1, 833 .sg_tablesize = BFAD_IO_MAX_SGE, 834 .cmd_per_lun = 3, 835 .shost_groups = bfad_im_vport_groups, 836 .max_sectors = BFAD_MAX_SECTORS, 837 }; 838 839 bfa_status_t 840 bfad_im_module_init(void) 841 { 842 bfad_im_scsi_transport_template = 843 fc_attach_transport(&bfad_im_fc_function_template); 844 if (!bfad_im_scsi_transport_template) 845 return BFA_STATUS_ENOMEM; 846 847 bfad_im_scsi_vport_transport_template = 848 fc_attach_transport(&bfad_im_vport_fc_function_template); 849 if (!bfad_im_scsi_vport_transport_template) { 850 fc_release_transport(bfad_im_scsi_transport_template); 851 return BFA_STATUS_ENOMEM; 852 } 853 854 return BFA_STATUS_OK; 855 } 856 857 void 858 bfad_im_module_exit(void) 859 { 860 if (bfad_im_scsi_transport_template) 861 fc_release_transport(bfad_im_scsi_transport_template); 862 863 if (bfad_im_scsi_vport_transport_template) 864 fc_release_transport(bfad_im_scsi_vport_transport_template); 865 866 idr_destroy(&bfad_im_port_index); 867 } 868 869 void 870 bfad_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev) 871 { 872 struct scsi_device *tmp_sdev; 873 874 if (((jiffies - itnim->last_ramp_up_time) > 875 BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) && 876 ((jiffies - itnim->last_queue_full_time) > 877 BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) { 878 shost_for_each_device(tmp_sdev, sdev->host) { 879 if (bfa_lun_queue_depth > tmp_sdev->queue_depth) { 880 if (tmp_sdev->id != sdev->id) 881 continue; 882 scsi_change_queue_depth(tmp_sdev, 883 tmp_sdev->queue_depth + 1); 884 885 itnim->last_ramp_up_time = jiffies; 886 } 887 } 888 } 889 } 890 891 void 892 bfad_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev) 893 { 894 struct scsi_device *tmp_sdev; 895 896 itnim->last_queue_full_time = jiffies; 897 898 shost_for_each_device(tmp_sdev, sdev->host) { 899 if (tmp_sdev->id != sdev->id) 900 continue; 901 scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1); 902 } 903 } 904 905 struct bfad_itnim_s * 906 bfad_get_itnim(struct bfad_im_port_s *im_port, int id) 907 { 908 struct bfad_itnim_s *itnim = NULL; 909 910 /* Search the mapped list for this target ID */ 911 list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) { 912 if (id == itnim->scsi_tgt_id) 913 return itnim; 914 } 915 916 return NULL; 917 } 918 919 /* 920 * Function is invoked from the SCSI Host Template slave_alloc() entry point. 921 * Has the logic to query the LUN Mask database to check if this LUN needs to 922 * be made visible to the SCSI mid-layer or not. 923 * 924 * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack. 925 * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack. 926 */ 927 static int 928 bfad_im_check_if_make_lun_visible(struct scsi_device *sdev, 929 struct fc_rport *rport) 930 { 931 struct bfad_itnim_data_s *itnim_data = 932 (struct bfad_itnim_data_s *) rport->dd_data; 933 struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa; 934 struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport; 935 struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa); 936 int i = 0, ret = -ENXIO; 937 938 for (i = 0; i < MAX_LUN_MASK_CFG; i++) { 939 if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE && 940 scsilun_to_int(&lun_list[i].lun) == sdev->lun && 941 lun_list[i].rp_tag == bfa_rport->rport_tag && 942 lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) { 943 ret = BFA_STATUS_OK; 944 break; 945 } 946 } 947 return ret; 948 } 949 950 /* 951 * Scsi_Host template entry slave_alloc 952 */ 953 static int 954 bfad_im_slave_alloc(struct scsi_device *sdev) 955 { 956 struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); 957 struct bfad_itnim_data_s *itnim_data; 958 struct bfa_s *bfa; 959 960 if (!rport || fc_remote_port_chkready(rport)) 961 return -ENXIO; 962 963 itnim_data = (struct bfad_itnim_data_s *) rport->dd_data; 964 bfa = itnim_data->itnim->bfa_itnim->bfa; 965 966 if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) { 967 /* 968 * We should not mask LUN 0 - since this will translate 969 * to no LUN / TARGET for SCSI ml resulting no scan. 970 */ 971 if (sdev->lun == 0) { 972 sdev->sdev_bflags |= BLIST_NOREPORTLUN | 973 BLIST_SPARSELUN; 974 goto done; 975 } 976 977 /* 978 * Query LUN Mask configuration - to expose this LUN 979 * to the SCSI mid-layer or to mask it. 980 */ 981 if (bfad_im_check_if_make_lun_visible(sdev, rport) != 982 BFA_STATUS_OK) 983 return -ENXIO; 984 } 985 done: 986 sdev->hostdata = rport->dd_data; 987 988 return 0; 989 } 990 991 u32 992 bfad_im_supported_speeds(struct bfa_s *bfa) 993 { 994 struct bfa_ioc_attr_s *ioc_attr; 995 u32 supported_speed = 0; 996 997 ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL); 998 if (!ioc_attr) 999 return 0; 1000 1001 bfa_ioc_get_attr(&bfa->ioc, ioc_attr); 1002 if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_16GBPS) 1003 supported_speed |= FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | 1004 FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT; 1005 else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { 1006 if (ioc_attr->adapter_attr.is_mezz) { 1007 supported_speed |= FC_PORTSPEED_8GBIT | 1008 FC_PORTSPEED_4GBIT | 1009 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1010 } else { 1011 supported_speed |= FC_PORTSPEED_8GBIT | 1012 FC_PORTSPEED_4GBIT | 1013 FC_PORTSPEED_2GBIT; 1014 } 1015 } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) { 1016 supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | 1017 FC_PORTSPEED_1GBIT; 1018 } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) { 1019 supported_speed |= FC_PORTSPEED_10GBIT; 1020 } 1021 kfree(ioc_attr); 1022 return supported_speed; 1023 } 1024 1025 void 1026 bfad_fc_host_init(struct bfad_im_port_s *im_port) 1027 { 1028 struct Scsi_Host *host = im_port->shost; 1029 struct bfad_s *bfad = im_port->bfad; 1030 struct bfad_port_s *port = im_port->port; 1031 char symname[BFA_SYMNAME_MAXLEN]; 1032 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); 1033 1034 fc_host_node_name(host) = 1035 cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port))); 1036 fc_host_port_name(host) = 1037 cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port))); 1038 fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); 1039 1040 fc_host_supported_classes(host) = FC_COS_CLASS3; 1041 1042 memset(fc_host_supported_fc4s(host), 0, 1043 sizeof(fc_host_supported_fc4s(host))); 1044 if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM) 1045 /* For FCP type 0x08 */ 1046 fc_host_supported_fc4s(host)[2] = 1; 1047 /* For fibre channel services type 0x20 */ 1048 fc_host_supported_fc4s(host)[7] = 1; 1049 1050 strlcpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, 1051 BFA_SYMNAME_MAXLEN); 1052 sprintf(fc_host_symbolic_name(host), "%s", symname); 1053 1054 fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); 1055 fc_host_maxframe_size(host) = fcport->cfg.maxfrsize; 1056 } 1057 1058 static void 1059 bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim) 1060 { 1061 struct fc_rport_identifiers rport_ids; 1062 struct fc_rport *fc_rport; 1063 struct bfad_itnim_data_s *itnim_data; 1064 1065 rport_ids.node_name = 1066 cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim)); 1067 rport_ids.port_name = 1068 cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); 1069 rport_ids.port_id = 1070 bfa_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim)); 1071 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 1072 1073 itnim->fc_rport = fc_rport = 1074 fc_remote_port_add(im_port->shost, 0, &rport_ids); 1075 1076 if (!fc_rport) 1077 return; 1078 1079 fc_rport->maxframe_size = 1080 bfa_fcs_itnim_get_maxfrsize(&itnim->fcs_itnim); 1081 fc_rport->supported_classes = bfa_fcs_itnim_get_cos(&itnim->fcs_itnim); 1082 1083 itnim_data = fc_rport->dd_data; 1084 itnim_data->itnim = itnim; 1085 1086 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 1087 1088 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 1089 fc_remote_port_rolechg(fc_rport, rport_ids.roles); 1090 1091 if ((fc_rport->scsi_target_id != -1) 1092 && (fc_rport->scsi_target_id < MAX_FCP_TARGET)) 1093 itnim->scsi_tgt_id = fc_rport->scsi_target_id; 1094 1095 itnim->channel = fc_rport->channel; 1096 1097 return; 1098 } 1099 1100 /* 1101 * Work queue handler using FC transport service 1102 * Context: kernel 1103 */ 1104 static void 1105 bfad_im_itnim_work_handler(struct work_struct *work) 1106 { 1107 struct bfad_itnim_s *itnim = container_of(work, struct bfad_itnim_s, 1108 itnim_work); 1109 struct bfad_im_s *im = itnim->im; 1110 struct bfad_s *bfad = im->bfad; 1111 struct bfad_im_port_s *im_port; 1112 unsigned long flags; 1113 struct fc_rport *fc_rport; 1114 wwn_t wwpn; 1115 u32 fcid; 1116 char wwpn_str[32], fcid_str[16]; 1117 1118 spin_lock_irqsave(&bfad->bfad_lock, flags); 1119 im_port = itnim->im_port; 1120 bfa_trc(bfad, itnim->state); 1121 switch (itnim->state) { 1122 case ITNIM_STATE_ONLINE: 1123 if (!itnim->fc_rport) { 1124 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1125 bfad_im_fc_rport_add(im_port, itnim); 1126 spin_lock_irqsave(&bfad->bfad_lock, flags); 1127 wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); 1128 fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); 1129 wwn2str(wwpn_str, wwpn); 1130 fcid2str(fcid_str, fcid); 1131 list_add_tail(&itnim->list_entry, 1132 &im_port->itnim_mapped_list); 1133 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 1134 "ITNIM ONLINE Target: %d:0:%d " 1135 "FCID: %s WWPN: %s\n", 1136 im_port->shost->host_no, 1137 itnim->scsi_tgt_id, 1138 fcid_str, wwpn_str); 1139 } else { 1140 printk(KERN_WARNING 1141 "%s: itnim %llx is already in online state\n", 1142 __func__, 1143 bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); 1144 } 1145 1146 break; 1147 case ITNIM_STATE_OFFLINE_PENDING: 1148 itnim->state = ITNIM_STATE_OFFLINE; 1149 if (itnim->fc_rport) { 1150 fc_rport = itnim->fc_rport; 1151 ((struct bfad_itnim_data_s *) 1152 fc_rport->dd_data)->itnim = NULL; 1153 itnim->fc_rport = NULL; 1154 if (!(im_port->port->flags & BFAD_PORT_DELETE)) { 1155 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1156 fc_rport->dev_loss_tmo = 1157 bfa_fcpim_path_tov_get(&bfad->bfa) + 1; 1158 fc_remote_port_delete(fc_rport); 1159 spin_lock_irqsave(&bfad->bfad_lock, flags); 1160 } 1161 wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); 1162 fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim); 1163 wwn2str(wwpn_str, wwpn); 1164 fcid2str(fcid_str, fcid); 1165 list_del(&itnim->list_entry); 1166 BFA_LOG(KERN_INFO, bfad, bfa_log_level, 1167 "ITNIM OFFLINE Target: %d:0:%d " 1168 "FCID: %s WWPN: %s\n", 1169 im_port->shost->host_no, 1170 itnim->scsi_tgt_id, 1171 fcid_str, wwpn_str); 1172 } 1173 break; 1174 case ITNIM_STATE_FREE: 1175 if (itnim->fc_rport) { 1176 fc_rport = itnim->fc_rport; 1177 ((struct bfad_itnim_data_s *) 1178 fc_rport->dd_data)->itnim = NULL; 1179 itnim->fc_rport = NULL; 1180 if (!(im_port->port->flags & BFAD_PORT_DELETE)) { 1181 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1182 fc_rport->dev_loss_tmo = 1183 bfa_fcpim_path_tov_get(&bfad->bfa) + 1; 1184 fc_remote_port_delete(fc_rport); 1185 spin_lock_irqsave(&bfad->bfad_lock, flags); 1186 } 1187 list_del(&itnim->list_entry); 1188 } 1189 1190 kfree(itnim); 1191 break; 1192 default: 1193 WARN_ON(1); 1194 break; 1195 } 1196 1197 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1198 } 1199 1200 /* 1201 * Scsi_Host template entry, queue a SCSI command to the BFAD. 1202 */ 1203 static int bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd) 1204 { 1205 void (*done)(struct scsi_cmnd *) = scsi_done; 1206 struct bfad_im_port_s *im_port = 1207 (struct bfad_im_port_s *) cmnd->device->host->hostdata[0]; 1208 struct bfad_s *bfad = im_port->bfad; 1209 struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata; 1210 struct bfad_itnim_s *itnim; 1211 struct bfa_ioim_s *hal_io; 1212 unsigned long flags; 1213 int rc; 1214 int sg_cnt = 0; 1215 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); 1216 1217 rc = fc_remote_port_chkready(rport); 1218 if (rc) { 1219 cmnd->result = rc; 1220 done(cmnd); 1221 return 0; 1222 } 1223 1224 if (bfad->bfad_flags & BFAD_EEH_BUSY) { 1225 if (bfad->bfad_flags & BFAD_EEH_PCI_CHANNEL_IO_PERM_FAILURE) 1226 cmnd->result = DID_NO_CONNECT << 16; 1227 else 1228 cmnd->result = DID_REQUEUE << 16; 1229 done(cmnd); 1230 return 0; 1231 } 1232 1233 sg_cnt = scsi_dma_map(cmnd); 1234 if (sg_cnt < 0) 1235 return SCSI_MLQUEUE_HOST_BUSY; 1236 1237 spin_lock_irqsave(&bfad->bfad_lock, flags); 1238 if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) { 1239 printk(KERN_WARNING 1240 "bfad%d, queuecommand %p %x failed, BFA stopped\n", 1241 bfad->inst_no, cmnd, cmnd->cmnd[0]); 1242 cmnd->result = DID_NO_CONNECT << 16; 1243 goto out_fail_cmd; 1244 } 1245 1246 1247 itnim = itnim_data->itnim; 1248 if (!itnim) { 1249 cmnd->result = DID_IMM_RETRY << 16; 1250 goto out_fail_cmd; 1251 } 1252 1253 hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd, 1254 itnim->bfa_itnim, sg_cnt); 1255 if (!hal_io) { 1256 printk(KERN_WARNING "hal_io failure\n"); 1257 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1258 scsi_dma_unmap(cmnd); 1259 return SCSI_MLQUEUE_HOST_BUSY; 1260 } 1261 1262 cmnd->host_scribble = (char *)hal_io; 1263 bfa_ioim_start(hal_io); 1264 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1265 1266 return 0; 1267 1268 out_fail_cmd: 1269 spin_unlock_irqrestore(&bfad->bfad_lock, flags); 1270 scsi_dma_unmap(cmnd); 1271 if (done) 1272 done(cmnd); 1273 1274 return 0; 1275 } 1276 1277 static DEF_SCSI_QCMD(bfad_im_queuecommand) 1278 1279 void 1280 bfad_rport_online_wait(struct bfad_s *bfad) 1281 { 1282 int i; 1283 int rport_delay = 10; 1284 1285 for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE) 1286 && i < bfa_linkup_delay; i++) { 1287 set_current_state(TASK_UNINTERRUPTIBLE); 1288 schedule_timeout(HZ); 1289 } 1290 1291 if (bfad->bfad_flags & BFAD_PORT_ONLINE) { 1292 rport_delay = rport_delay < bfa_linkup_delay ? 1293 rport_delay : bfa_linkup_delay; 1294 for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE) 1295 && i < rport_delay; i++) { 1296 set_current_state(TASK_UNINTERRUPTIBLE); 1297 schedule_timeout(HZ); 1298 } 1299 1300 if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) { 1301 set_current_state(TASK_UNINTERRUPTIBLE); 1302 schedule_timeout(rport_delay * HZ); 1303 } 1304 } 1305 } 1306 1307 int 1308 bfad_get_linkup_delay(struct bfad_s *bfad) 1309 { 1310 u8 nwwns = 0; 1311 wwn_t wwns[BFA_PREBOOT_BOOTLUN_MAX]; 1312 int linkup_delay; 1313 1314 /* 1315 * Querying for the boot target port wwns 1316 * -- read from boot information in flash. 1317 * If nwwns > 0 => boot over SAN and set linkup_delay = 30 1318 * else => local boot machine set linkup_delay = 0 1319 */ 1320 1321 bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns); 1322 1323 if (nwwns > 0) 1324 /* If Boot over SAN set linkup_delay = 30sec */ 1325 linkup_delay = 30; 1326 else 1327 /* If local boot; no linkup_delay */ 1328 linkup_delay = 0; 1329 1330 return linkup_delay; 1331 } 1332