1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_cmnd.h> 29 #include <scsi/scsi_device.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_transport_fc.h> 32 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_disc.h" 36 #include "lpfc_scsi.h" 37 #include "lpfc.h" 38 #include "lpfc_crtn.h" 39 #include "lpfc_logmsg.h" 40 #include "lpfc_compat.h" 41 42 /* 43 * Define macro to log: Mailbox command x%x cannot issue Data 44 * This allows multiple uses of lpfc_msgBlk0311 45 * w/o perturbing log msg utility. 46 */ 47 #define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \ 48 lpfc_printf_log(phba, \ 49 KERN_INFO, \ 50 LOG_MBOX | LOG_SLI, \ 51 "%d:0311 Mailbox command x%x cannot issue " \ 52 "Data: x%x x%x x%x\n", \ 53 phba->brd_no, \ 54 mb->mbxCommand, \ 55 phba->hba_state, \ 56 psli->sli_flag, \ 57 flag); 58 59 60 /* There are only four IOCB completion types. */ 61 typedef enum _lpfc_iocb_type { 62 LPFC_UNKNOWN_IOCB, 63 LPFC_UNSOL_IOCB, 64 LPFC_SOL_IOCB, 65 LPFC_ABORT_IOCB 66 } lpfc_iocb_type; 67 68 struct lpfc_iocbq * 69 lpfc_sli_get_iocbq(struct lpfc_hba * phba) 70 { 71 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 72 struct lpfc_iocbq * iocbq = NULL; 73 74 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 75 return iocbq; 76 } 77 78 void 79 lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) 80 { 81 size_t start_clean = (size_t)(&((struct lpfc_iocbq *)NULL)->iocb); 82 83 /* 84 * Clean all volatile data fields, preserve iotag and node struct. 85 */ 86 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 87 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 88 } 89 90 /* 91 * Translate the iocb command to an iocb command type used to decide the final 92 * disposition of each completed IOCB. 93 */ 94 static lpfc_iocb_type 95 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 96 { 97 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 98 99 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 100 return 0; 101 102 switch (iocb_cmnd) { 103 case CMD_XMIT_SEQUENCE_CR: 104 case CMD_XMIT_SEQUENCE_CX: 105 case CMD_XMIT_BCAST_CN: 106 case CMD_XMIT_BCAST_CX: 107 case CMD_ELS_REQUEST_CR: 108 case CMD_ELS_REQUEST_CX: 109 case CMD_CREATE_XRI_CR: 110 case CMD_CREATE_XRI_CX: 111 case CMD_GET_RPI_CN: 112 case CMD_XMIT_ELS_RSP_CX: 113 case CMD_GET_RPI_CR: 114 case CMD_FCP_IWRITE_CR: 115 case CMD_FCP_IWRITE_CX: 116 case CMD_FCP_IREAD_CR: 117 case CMD_FCP_IREAD_CX: 118 case CMD_FCP_ICMND_CR: 119 case CMD_FCP_ICMND_CX: 120 case CMD_FCP_TSEND_CX: 121 case CMD_FCP_TRSP_CX: 122 case CMD_FCP_TRECEIVE_CX: 123 case CMD_FCP_AUTO_TRSP_CX: 124 case CMD_ADAPTER_MSG: 125 case CMD_ADAPTER_DUMP: 126 case CMD_XMIT_SEQUENCE64_CR: 127 case CMD_XMIT_SEQUENCE64_CX: 128 case CMD_XMIT_BCAST64_CN: 129 case CMD_XMIT_BCAST64_CX: 130 case CMD_ELS_REQUEST64_CR: 131 case CMD_ELS_REQUEST64_CX: 132 case CMD_FCP_IWRITE64_CR: 133 case CMD_FCP_IWRITE64_CX: 134 case CMD_FCP_IREAD64_CR: 135 case CMD_FCP_IREAD64_CX: 136 case CMD_FCP_ICMND64_CR: 137 case CMD_FCP_ICMND64_CX: 138 case CMD_FCP_TSEND64_CX: 139 case CMD_FCP_TRSP64_CX: 140 case CMD_FCP_TRECEIVE64_CX: 141 case CMD_GEN_REQUEST64_CR: 142 case CMD_GEN_REQUEST64_CX: 143 case CMD_XMIT_ELS_RSP64_CX: 144 type = LPFC_SOL_IOCB; 145 break; 146 case CMD_ABORT_XRI_CN: 147 case CMD_ABORT_XRI_CX: 148 case CMD_CLOSE_XRI_CN: 149 case CMD_CLOSE_XRI_CX: 150 case CMD_XRI_ABORTED_CX: 151 case CMD_ABORT_MXRI64_CN: 152 type = LPFC_ABORT_IOCB; 153 break; 154 case CMD_RCV_SEQUENCE_CX: 155 case CMD_RCV_ELS_REQ_CX: 156 case CMD_RCV_SEQUENCE64_CX: 157 case CMD_RCV_ELS_REQ64_CX: 158 type = LPFC_UNSOL_IOCB; 159 break; 160 default: 161 type = LPFC_UNKNOWN_IOCB; 162 break; 163 } 164 165 return type; 166 } 167 168 static int 169 lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb) 170 { 171 struct lpfc_sli *psli = &phba->sli; 172 MAILBOX_t *pmbox = &pmb->mb; 173 int i, rc; 174 175 for (i = 0; i < psli->num_rings; i++) { 176 phba->hba_state = LPFC_INIT_MBX_CMDS; 177 lpfc_config_ring(phba, i, pmb); 178 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 179 if (rc != MBX_SUCCESS) { 180 lpfc_printf_log(phba, 181 KERN_ERR, 182 LOG_INIT, 183 "%d:0446 Adapter failed to init, " 184 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 185 "ring %d\n", 186 phba->brd_no, 187 pmbox->mbxCommand, 188 pmbox->mbxStatus, 189 i); 190 phba->hba_state = LPFC_HBA_ERROR; 191 return -ENXIO; 192 } 193 } 194 return 0; 195 } 196 197 static int 198 lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba, 199 struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb) 200 { 201 list_add_tail(&piocb->list, &pring->txcmplq); 202 pring->txcmplq_cnt++; 203 if (unlikely(pring->ringno == LPFC_ELS_RING)) 204 mod_timer(&phba->els_tmofunc, 205 jiffies + HZ * (phba->fc_ratov << 1)); 206 207 return (0); 208 } 209 210 static struct lpfc_iocbq * 211 lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 212 { 213 struct list_head *dlp; 214 struct lpfc_iocbq *cmd_iocb; 215 216 dlp = &pring->txq; 217 cmd_iocb = NULL; 218 list_remove_head((&pring->txq), cmd_iocb, 219 struct lpfc_iocbq, 220 list); 221 if (cmd_iocb) { 222 /* If the first ptr is not equal to the list header, 223 * deque the IOCBQ_t and return it. 224 */ 225 pring->txq_cnt--; 226 } 227 return (cmd_iocb); 228 } 229 230 static IOCB_t * 231 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 232 { 233 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 234 uint32_t max_cmd_idx = pring->numCiocb; 235 IOCB_t *iocb = NULL; 236 237 if ((pring->next_cmdidx == pring->cmdidx) && 238 (++pring->next_cmdidx >= max_cmd_idx)) 239 pring->next_cmdidx = 0; 240 241 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 242 243 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 244 245 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 246 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 247 "%d:0315 Ring %d issue: portCmdGet %d " 248 "is bigger then cmd ring %d\n", 249 phba->brd_no, pring->ringno, 250 pring->local_getidx, max_cmd_idx); 251 252 phba->hba_state = LPFC_HBA_ERROR; 253 /* 254 * All error attention handlers are posted to 255 * worker thread 256 */ 257 phba->work_ha |= HA_ERATT; 258 phba->work_hs = HS_FFER3; 259 if (phba->work_wait) 260 wake_up(phba->work_wait); 261 262 return NULL; 263 } 264 265 if (pring->local_getidx == pring->next_cmdidx) 266 return NULL; 267 } 268 269 iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx); 270 271 return iocb; 272 } 273 274 uint16_t 275 lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq) 276 { 277 struct lpfc_iocbq ** new_arr; 278 struct lpfc_iocbq ** old_arr; 279 size_t new_len; 280 struct lpfc_sli *psli = &phba->sli; 281 uint16_t iotag; 282 283 spin_lock_irq(phba->host->host_lock); 284 iotag = psli->last_iotag; 285 if(++iotag < psli->iocbq_lookup_len) { 286 psli->last_iotag = iotag; 287 psli->iocbq_lookup[iotag] = iocbq; 288 spin_unlock_irq(phba->host->host_lock); 289 iocbq->iotag = iotag; 290 return iotag; 291 } 292 else if (psli->iocbq_lookup_len < (0xffff 293 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 294 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 295 spin_unlock_irq(phba->host->host_lock); 296 new_arr = kmalloc(new_len * sizeof (struct lpfc_iocbq *), 297 GFP_KERNEL); 298 if (new_arr) { 299 memset((char *)new_arr, 0, 300 new_len * sizeof (struct lpfc_iocbq *)); 301 spin_lock_irq(phba->host->host_lock); 302 old_arr = psli->iocbq_lookup; 303 if (new_len <= psli->iocbq_lookup_len) { 304 /* highly unprobable case */ 305 kfree(new_arr); 306 iotag = psli->last_iotag; 307 if(++iotag < psli->iocbq_lookup_len) { 308 psli->last_iotag = iotag; 309 psli->iocbq_lookup[iotag] = iocbq; 310 spin_unlock_irq(phba->host->host_lock); 311 iocbq->iotag = iotag; 312 return iotag; 313 } 314 spin_unlock_irq(phba->host->host_lock); 315 return 0; 316 } 317 if (psli->iocbq_lookup) 318 memcpy(new_arr, old_arr, 319 ((psli->last_iotag + 1) * 320 sizeof (struct lpfc_iocbq *))); 321 psli->iocbq_lookup = new_arr; 322 psli->iocbq_lookup_len = new_len; 323 psli->last_iotag = iotag; 324 psli->iocbq_lookup[iotag] = iocbq; 325 spin_unlock_irq(phba->host->host_lock); 326 iocbq->iotag = iotag; 327 kfree(old_arr); 328 return iotag; 329 } 330 } else 331 spin_unlock_irq(phba->host->host_lock); 332 333 lpfc_printf_log(phba, KERN_ERR,LOG_SLI, 334 "%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n", 335 phba->brd_no, psli->last_iotag); 336 337 return 0; 338 } 339 340 static void 341 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 342 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 343 { 344 /* 345 * Set up an iotag 346 */ 347 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 348 349 /* 350 * Issue iocb command to adapter 351 */ 352 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t)); 353 wmb(); 354 pring->stats.iocb_cmd++; 355 356 /* 357 * If there is no completion routine to call, we can release the 358 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 359 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 360 */ 361 if (nextiocb->iocb_cmpl) 362 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 363 else 364 lpfc_sli_release_iocbq(phba, nextiocb); 365 366 /* 367 * Let the HBA know what IOCB slot will be the next one the 368 * driver will put a command into. 369 */ 370 pring->cmdidx = pring->next_cmdidx; 371 writel(pring->cmdidx, phba->MBslimaddr 372 + (SLIMOFF + (pring->ringno * 2)) * 4); 373 } 374 375 static void 376 lpfc_sli_update_full_ring(struct lpfc_hba * phba, 377 struct lpfc_sli_ring *pring) 378 { 379 int ringno = pring->ringno; 380 381 pring->flag |= LPFC_CALL_RING_AVAILABLE; 382 383 wmb(); 384 385 /* 386 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 387 * The HBA will tell us when an IOCB entry is available. 388 */ 389 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 390 readl(phba->CAregaddr); /* flush */ 391 392 pring->stats.iocb_cmd_full++; 393 } 394 395 static void 396 lpfc_sli_update_ring(struct lpfc_hba * phba, 397 struct lpfc_sli_ring *pring) 398 { 399 int ringno = pring->ringno; 400 401 /* 402 * Tell the HBA that there is work to do in this ring. 403 */ 404 wmb(); 405 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 406 readl(phba->CAregaddr); /* flush */ 407 } 408 409 static void 410 lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring) 411 { 412 IOCB_t *iocb; 413 struct lpfc_iocbq *nextiocb; 414 415 /* 416 * Check to see if: 417 * (a) there is anything on the txq to send 418 * (b) link is up 419 * (c) link attention events can be processed (fcp ring only) 420 * (d) IOCB processing is not blocked by the outstanding mbox command. 421 */ 422 if (pring->txq_cnt && 423 (phba->hba_state > LPFC_LINK_DOWN) && 424 (pring->ringno != phba->sli.fcp_ring || 425 phba->sli.sli_flag & LPFC_PROCESS_LA) && 426 !(pring->flag & LPFC_STOP_IOCB_MBX)) { 427 428 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 429 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 430 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 431 432 if (iocb) 433 lpfc_sli_update_ring(phba, pring); 434 else 435 lpfc_sli_update_full_ring(phba, pring); 436 } 437 438 return; 439 } 440 441 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */ 442 static void 443 lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno) 444 { 445 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[ringno]; 446 447 /* If the ring is active, flag it */ 448 if (phba->sli.ring[ringno].cmdringaddr) { 449 if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) { 450 phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX; 451 /* 452 * Force update of the local copy of cmdGetInx 453 */ 454 phba->sli.ring[ringno].local_getidx 455 = le32_to_cpu(pgp->cmdGetInx); 456 spin_lock_irq(phba->host->host_lock); 457 lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]); 458 spin_unlock_irq(phba->host->host_lock); 459 } 460 } 461 } 462 463 static int 464 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 465 { 466 uint8_t ret; 467 468 switch (mbxCommand) { 469 case MBX_LOAD_SM: 470 case MBX_READ_NV: 471 case MBX_WRITE_NV: 472 case MBX_RUN_BIU_DIAG: 473 case MBX_INIT_LINK: 474 case MBX_DOWN_LINK: 475 case MBX_CONFIG_LINK: 476 case MBX_CONFIG_RING: 477 case MBX_RESET_RING: 478 case MBX_READ_CONFIG: 479 case MBX_READ_RCONFIG: 480 case MBX_READ_SPARM: 481 case MBX_READ_STATUS: 482 case MBX_READ_RPI: 483 case MBX_READ_XRI: 484 case MBX_READ_REV: 485 case MBX_READ_LNK_STAT: 486 case MBX_REG_LOGIN: 487 case MBX_UNREG_LOGIN: 488 case MBX_READ_LA: 489 case MBX_CLEAR_LA: 490 case MBX_DUMP_MEMORY: 491 case MBX_DUMP_CONTEXT: 492 case MBX_RUN_DIAGS: 493 case MBX_RESTART: 494 case MBX_UPDATE_CFG: 495 case MBX_DOWN_LOAD: 496 case MBX_DEL_LD_ENTRY: 497 case MBX_RUN_PROGRAM: 498 case MBX_SET_MASK: 499 case MBX_SET_SLIM: 500 case MBX_UNREG_D_ID: 501 case MBX_KILL_BOARD: 502 case MBX_CONFIG_FARP: 503 case MBX_BEACON: 504 case MBX_LOAD_AREA: 505 case MBX_RUN_BIU_DIAG64: 506 case MBX_CONFIG_PORT: 507 case MBX_READ_SPARM64: 508 case MBX_READ_RPI64: 509 case MBX_REG_LOGIN64: 510 case MBX_READ_LA64: 511 case MBX_FLASH_WR_ULA: 512 case MBX_SET_DEBUG: 513 case MBX_LOAD_EXP_ROM: 514 ret = mbxCommand; 515 break; 516 default: 517 ret = MBX_SHUTDOWN; 518 break; 519 } 520 return (ret); 521 } 522 static void 523 lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) 524 { 525 wait_queue_head_t *pdone_q; 526 527 /* 528 * If pdone_q is empty, the driver thread gave up waiting and 529 * continued running. 530 */ 531 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 532 pdone_q = (wait_queue_head_t *) pmboxq->context1; 533 if (pdone_q) 534 wake_up_interruptible(pdone_q); 535 return; 536 } 537 538 void 539 lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 540 { 541 struct lpfc_dmabuf *mp; 542 uint16_t rpi; 543 int rc; 544 545 mp = (struct lpfc_dmabuf *) (pmb->context1); 546 547 if (mp) { 548 lpfc_mbuf_free(phba, mp->virt, mp->phys); 549 kfree(mp); 550 } 551 552 /* 553 * If a REG_LOGIN succeeded after node is destroyed or node 554 * is in re-discovery driver need to cleanup the RPI. 555 */ 556 if (!(phba->fc_flag & FC_UNLOADING) && 557 (pmb->mb.mbxCommand == MBX_REG_LOGIN64) && 558 (!pmb->mb.mbxStatus)) { 559 560 rpi = pmb->mb.un.varWords[0]; 561 lpfc_unreg_login(phba, rpi, pmb); 562 pmb->mbox_cmpl=lpfc_sli_def_mbox_cmpl; 563 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 564 if (rc != MBX_NOT_FINISHED) 565 return; 566 } 567 568 mempool_free( pmb, phba->mbox_mem_pool); 569 return; 570 } 571 572 int 573 lpfc_sli_handle_mb_event(struct lpfc_hba * phba) 574 { 575 MAILBOX_t *mbox; 576 MAILBOX_t *pmbox; 577 LPFC_MBOXQ_t *pmb; 578 struct lpfc_sli *psli; 579 int i, rc; 580 uint32_t process_next; 581 582 psli = &phba->sli; 583 /* We should only get here if we are in SLI2 mode */ 584 if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) { 585 return (1); 586 } 587 588 phba->sli.slistat.mbox_event++; 589 590 /* Get a Mailbox buffer to setup mailbox commands for callback */ 591 if ((pmb = phba->sli.mbox_active)) { 592 pmbox = &pmb->mb; 593 mbox = &phba->slim2p->mbx; 594 595 /* First check out the status word */ 596 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t)); 597 598 /* Sanity check to ensure the host owns the mailbox */ 599 if (pmbox->mbxOwner != OWN_HOST) { 600 /* Lets try for a while */ 601 for (i = 0; i < 10240; i++) { 602 /* First copy command data */ 603 lpfc_sli_pcimem_bcopy(mbox, pmbox, 604 sizeof (uint32_t)); 605 if (pmbox->mbxOwner == OWN_HOST) 606 goto mbout; 607 } 608 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus 609 <status> */ 610 lpfc_printf_log(phba, 611 KERN_WARNING, 612 LOG_MBOX | LOG_SLI, 613 "%d:0304 Stray Mailbox Interrupt " 614 "mbxCommand x%x mbxStatus x%x\n", 615 phba->brd_no, 616 pmbox->mbxCommand, 617 pmbox->mbxStatus); 618 619 spin_lock_irq(phba->host->host_lock); 620 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 621 spin_unlock_irq(phba->host->host_lock); 622 return (1); 623 } 624 625 mbout: 626 del_timer_sync(&phba->sli.mbox_tmo); 627 phba->work_hba_events &= ~WORKER_MBOX_TMO; 628 629 /* 630 * It is a fatal error if unknown mbox command completion. 631 */ 632 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 633 MBX_SHUTDOWN) { 634 635 /* Unknow mailbox command compl */ 636 lpfc_printf_log(phba, 637 KERN_ERR, 638 LOG_MBOX | LOG_SLI, 639 "%d:0323 Unknown Mailbox command %x Cmpl\n", 640 phba->brd_no, 641 pmbox->mbxCommand); 642 phba->hba_state = LPFC_HBA_ERROR; 643 phba->work_hs = HS_FFER3; 644 lpfc_handle_eratt(phba); 645 return (0); 646 } 647 648 phba->sli.mbox_active = NULL; 649 if (pmbox->mbxStatus) { 650 phba->sli.slistat.mbox_stat_err++; 651 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 652 /* Mbox cmd cmpl error - RETRYing */ 653 lpfc_printf_log(phba, 654 KERN_INFO, 655 LOG_MBOX | LOG_SLI, 656 "%d:0305 Mbox cmd cmpl error - " 657 "RETRYing Data: x%x x%x x%x x%x\n", 658 phba->brd_no, 659 pmbox->mbxCommand, 660 pmbox->mbxStatus, 661 pmbox->un.varWords[0], 662 phba->hba_state); 663 pmbox->mbxStatus = 0; 664 pmbox->mbxOwner = OWN_HOST; 665 spin_lock_irq(phba->host->host_lock); 666 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 667 spin_unlock_irq(phba->host->host_lock); 668 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 669 if (rc == MBX_SUCCESS) 670 return (0); 671 } 672 } 673 674 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 675 lpfc_printf_log(phba, 676 KERN_INFO, 677 LOG_MBOX | LOG_SLI, 678 "%d:0307 Mailbox cmd x%x Cmpl x%p " 679 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 680 phba->brd_no, 681 pmbox->mbxCommand, 682 pmb->mbox_cmpl, 683 *((uint32_t *) pmbox), 684 pmbox->un.varWords[0], 685 pmbox->un.varWords[1], 686 pmbox->un.varWords[2], 687 pmbox->un.varWords[3], 688 pmbox->un.varWords[4], 689 pmbox->un.varWords[5], 690 pmbox->un.varWords[6], 691 pmbox->un.varWords[7]); 692 693 if (pmb->mbox_cmpl) { 694 lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE); 695 pmb->mbox_cmpl(phba,pmb); 696 } 697 } 698 699 700 do { 701 process_next = 0; /* by default don't loop */ 702 spin_lock_irq(phba->host->host_lock); 703 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 704 705 /* Process next mailbox command if there is one */ 706 if ((pmb = lpfc_mbox_get(phba))) { 707 spin_unlock_irq(phba->host->host_lock); 708 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 709 if (rc == MBX_NOT_FINISHED) { 710 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 711 pmb->mbox_cmpl(phba,pmb); 712 process_next = 1; 713 continue; /* loop back */ 714 } 715 } else { 716 spin_unlock_irq(phba->host->host_lock); 717 /* Turn on IOCB processing */ 718 for (i = 0; i < phba->sli.num_rings; i++) 719 lpfc_sli_turn_on_ring(phba, i); 720 } 721 722 } while (process_next); 723 724 return (0); 725 } 726 static int 727 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 728 struct lpfc_iocbq *saveq) 729 { 730 IOCB_t * irsp; 731 WORD5 * w5p; 732 uint32_t Rctl, Type; 733 uint32_t match, i; 734 735 match = 0; 736 irsp = &(saveq->iocb); 737 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) 738 || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) { 739 Rctl = FC_ELS_REQ; 740 Type = FC_ELS_DATA; 741 } else { 742 w5p = 743 (WORD5 *) & (saveq->iocb.un. 744 ulpWord[5]); 745 Rctl = w5p->hcsw.Rctl; 746 Type = w5p->hcsw.Type; 747 748 /* Firmware Workaround */ 749 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 750 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) { 751 Rctl = FC_ELS_REQ; 752 Type = FC_ELS_DATA; 753 w5p->hcsw.Rctl = Rctl; 754 w5p->hcsw.Type = Type; 755 } 756 } 757 /* unSolicited Responses */ 758 if (pring->prt[0].profile) { 759 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 760 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 761 saveq); 762 match = 1; 763 } else { 764 /* We must search, based on rctl / type 765 for the right routine */ 766 for (i = 0; i < pring->num_mask; 767 i++) { 768 if ((pring->prt[i].rctl == 769 Rctl) 770 && (pring->prt[i]. 771 type == Type)) { 772 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 773 (pring->prt[i].lpfc_sli_rcv_unsol_event) 774 (phba, pring, saveq); 775 match = 1; 776 break; 777 } 778 } 779 } 780 if (match == 0) { 781 /* Unexpected Rctl / Type received */ 782 /* Ring <ringno> handler: unexpected 783 Rctl <Rctl> Type <Type> received */ 784 lpfc_printf_log(phba, 785 KERN_WARNING, 786 LOG_SLI, 787 "%d:0313 Ring %d handler: unexpected Rctl x%x " 788 "Type x%x received \n", 789 phba->brd_no, 790 pring->ringno, 791 Rctl, 792 Type); 793 } 794 return(1); 795 } 796 797 static struct lpfc_iocbq * 798 lpfc_sli_iocbq_lookup(struct lpfc_hba * phba, 799 struct lpfc_sli_ring * pring, 800 struct lpfc_iocbq * prspiocb) 801 { 802 struct lpfc_iocbq *cmd_iocb = NULL; 803 uint16_t iotag; 804 805 iotag = prspiocb->iocb.ulpIoTag; 806 807 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 808 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 809 list_del(&cmd_iocb->list); 810 pring->txcmplq_cnt--; 811 return cmd_iocb; 812 } 813 814 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 815 "%d:0317 iotag x%x is out off " 816 "range: max iotag x%x wd0 x%x\n", 817 phba->brd_no, iotag, 818 phba->sli.last_iotag, 819 *(((uint32_t *) &prspiocb->iocb) + 7)); 820 return NULL; 821 } 822 823 static int 824 lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 825 struct lpfc_iocbq *saveq) 826 { 827 struct lpfc_iocbq * cmdiocbp; 828 int rc = 1; 829 unsigned long iflag; 830 831 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 832 spin_lock_irqsave(phba->host->host_lock, iflag); 833 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 834 if (cmdiocbp) { 835 if (cmdiocbp->iocb_cmpl) { 836 /* 837 * Post all ELS completions to the worker thread. 838 * All other are passed to the completion callback. 839 */ 840 if (pring->ringno == LPFC_ELS_RING) { 841 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) { 842 cmdiocbp->iocb_flag &= 843 ~LPFC_DRIVER_ABORTED; 844 saveq->iocb.ulpStatus = 845 IOSTAT_LOCAL_REJECT; 846 saveq->iocb.un.ulpWord[4] = 847 IOERR_SLI_ABORTED; 848 } 849 spin_unlock_irqrestore(phba->host->host_lock, 850 iflag); 851 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 852 spin_lock_irqsave(phba->host->host_lock, iflag); 853 } 854 else { 855 spin_unlock_irqrestore(phba->host->host_lock, 856 iflag); 857 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 858 spin_lock_irqsave(phba->host->host_lock, iflag); 859 } 860 } else 861 lpfc_sli_release_iocbq(phba, cmdiocbp); 862 } else { 863 /* 864 * Unknown initiating command based on the response iotag. 865 * This could be the case on the ELS ring because of 866 * lpfc_els_abort(). 867 */ 868 if (pring->ringno != LPFC_ELS_RING) { 869 /* 870 * Ring <ringno> handler: unexpected completion IoTag 871 * <IoTag> 872 */ 873 lpfc_printf_log(phba, 874 KERN_WARNING, 875 LOG_SLI, 876 "%d:0322 Ring %d handler: unexpected " 877 "completion IoTag x%x Data: x%x x%x x%x x%x\n", 878 phba->brd_no, 879 pring->ringno, 880 saveq->iocb.ulpIoTag, 881 saveq->iocb.ulpStatus, 882 saveq->iocb.un.ulpWord[4], 883 saveq->iocb.ulpCommand, 884 saveq->iocb.ulpContext); 885 } 886 } 887 888 spin_unlock_irqrestore(phba->host->host_lock, iflag); 889 return rc; 890 } 891 892 static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba, 893 struct lpfc_sli_ring * pring) 894 { 895 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 896 /* 897 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 898 * rsp ring <portRspMax> 899 */ 900 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 901 "%d:0312 Ring %d handler: portRspPut %d " 902 "is bigger then rsp ring %d\n", 903 phba->brd_no, pring->ringno, 904 le32_to_cpu(pgp->rspPutInx), 905 pring->numRiocb); 906 907 phba->hba_state = LPFC_HBA_ERROR; 908 909 /* 910 * All error attention handlers are posted to 911 * worker thread 912 */ 913 phba->work_ha |= HA_ERATT; 914 phba->work_hs = HS_FFER3; 915 if (phba->work_wait) 916 wake_up(phba->work_wait); 917 918 return; 919 } 920 921 void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba) 922 { 923 struct lpfc_sli * psli = &phba->sli; 924 struct lpfc_sli_ring * pring = &psli->ring[LPFC_FCP_RING]; 925 IOCB_t *irsp = NULL; 926 IOCB_t *entry = NULL; 927 struct lpfc_iocbq *cmdiocbq = NULL; 928 struct lpfc_iocbq rspiocbq; 929 struct lpfc_pgp *pgp; 930 uint32_t status; 931 uint32_t portRspPut, portRspMax; 932 int type; 933 uint32_t rsp_cmpl = 0; 934 void __iomem *to_slim; 935 uint32_t ha_copy; 936 937 pring->stats.iocb_event++; 938 939 /* The driver assumes SLI-2 mode */ 940 pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 941 942 /* 943 * The next available response entry should never exceed the maximum 944 * entries. If it does, treat it as an adapter hardware error. 945 */ 946 portRspMax = pring->numRiocb; 947 portRspPut = le32_to_cpu(pgp->rspPutInx); 948 if (unlikely(portRspPut >= portRspMax)) { 949 lpfc_sli_rsp_pointers_error(phba, pring); 950 return; 951 } 952 953 rmb(); 954 while (pring->rspidx != portRspPut) { 955 956 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 957 958 if (++pring->rspidx >= portRspMax) 959 pring->rspidx = 0; 960 961 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 962 (uint32_t *) &rspiocbq.iocb, 963 sizeof (IOCB_t)); 964 irsp = &rspiocbq.iocb; 965 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 966 pring->stats.iocb_rsp++; 967 rsp_cmpl++; 968 969 if (unlikely(irsp->ulpStatus)) { 970 /* Rsp ring <ringno> error: IOCB */ 971 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 972 "%d:0326 Rsp Ring %d error: IOCB Data: " 973 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 974 phba->brd_no, pring->ringno, 975 irsp->un.ulpWord[0], 976 irsp->un.ulpWord[1], 977 irsp->un.ulpWord[2], 978 irsp->un.ulpWord[3], 979 irsp->un.ulpWord[4], 980 irsp->un.ulpWord[5], 981 *(((uint32_t *) irsp) + 6), 982 *(((uint32_t *) irsp) + 7)); 983 } 984 985 switch (type) { 986 case LPFC_ABORT_IOCB: 987 case LPFC_SOL_IOCB: 988 /* 989 * Idle exchange closed via ABTS from port. No iocb 990 * resources need to be recovered. 991 */ 992 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 993 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 994 "%d:0314 IOCB cmd 0x%x" 995 " processed. Skipping" 996 " completion", phba->brd_no, 997 irsp->ulpCommand); 998 break; 999 } 1000 1001 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1002 &rspiocbq); 1003 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1004 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1005 &rspiocbq); 1006 } 1007 break; 1008 default: 1009 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1010 char adaptermsg[LPFC_MAX_ADPTMSG]; 1011 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1012 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1013 MAX_MSG_DATA); 1014 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s", 1015 phba->brd_no, adaptermsg); 1016 } else { 1017 /* Unknown IOCB command */ 1018 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1019 "%d:0321 Unknown IOCB command " 1020 "Data: x%x, x%x x%x x%x x%x\n", 1021 phba->brd_no, type, 1022 irsp->ulpCommand, 1023 irsp->ulpStatus, 1024 irsp->ulpIoTag, 1025 irsp->ulpContext); 1026 } 1027 break; 1028 } 1029 1030 /* 1031 * The response IOCB has been processed. Update the ring 1032 * pointer in SLIM. If the port response put pointer has not 1033 * been updated, sync the pgp->rspPutInx and fetch the new port 1034 * response put pointer. 1035 */ 1036 to_slim = phba->MBslimaddr + 1037 (SLIMOFF + (pring->ringno * 2) + 1) * 4; 1038 writeb(pring->rspidx, to_slim); 1039 1040 if (pring->rspidx == portRspPut) 1041 portRspPut = le32_to_cpu(pgp->rspPutInx); 1042 } 1043 1044 ha_copy = readl(phba->HAregaddr); 1045 ha_copy >>= (LPFC_FCP_RING * 4); 1046 1047 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) { 1048 pring->stats.iocb_rsp_full++; 1049 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4)); 1050 writel(status, phba->CAregaddr); 1051 readl(phba->CAregaddr); 1052 } 1053 if ((ha_copy & HA_R0CE_RSP) && 1054 (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1055 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1056 pring->stats.iocb_cmd_empty++; 1057 1058 /* Force update of the local copy of cmdGetInx */ 1059 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1060 lpfc_sli_resume_iocb(phba, pring); 1061 1062 if ((pring->lpfc_sli_cmd_available)) 1063 (pring->lpfc_sli_cmd_available) (phba, pring); 1064 1065 } 1066 1067 return; 1068 } 1069 1070 /* 1071 * This routine presumes LPFC_FCP_RING handling and doesn't bother 1072 * to check it explicitly. 1073 */ 1074 static int 1075 lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba, 1076 struct lpfc_sli_ring * pring, uint32_t mask) 1077 { 1078 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1079 IOCB_t *irsp = NULL; 1080 IOCB_t *entry = NULL; 1081 struct lpfc_iocbq *cmdiocbq = NULL; 1082 struct lpfc_iocbq rspiocbq; 1083 uint32_t status; 1084 uint32_t portRspPut, portRspMax; 1085 int rc = 1; 1086 lpfc_iocb_type type; 1087 unsigned long iflag; 1088 uint32_t rsp_cmpl = 0; 1089 void __iomem *to_slim; 1090 1091 spin_lock_irqsave(phba->host->host_lock, iflag); 1092 pring->stats.iocb_event++; 1093 1094 /* 1095 * The next available response entry should never exceed the maximum 1096 * entries. If it does, treat it as an adapter hardware error. 1097 */ 1098 portRspMax = pring->numRiocb; 1099 portRspPut = le32_to_cpu(pgp->rspPutInx); 1100 if (unlikely(portRspPut >= portRspMax)) { 1101 lpfc_sli_rsp_pointers_error(phba, pring); 1102 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1103 return 1; 1104 } 1105 1106 rmb(); 1107 while (pring->rspidx != portRspPut) { 1108 /* 1109 * Fetch an entry off the ring and copy it into a local data 1110 * structure. The copy involves a byte-swap since the 1111 * network byte order and pci byte orders are different. 1112 */ 1113 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1114 1115 if (++pring->rspidx >= portRspMax) 1116 pring->rspidx = 0; 1117 1118 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1119 (uint32_t *) &rspiocbq.iocb, 1120 sizeof (IOCB_t)); 1121 INIT_LIST_HEAD(&(rspiocbq.list)); 1122 irsp = &rspiocbq.iocb; 1123 1124 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1125 pring->stats.iocb_rsp++; 1126 rsp_cmpl++; 1127 1128 if (unlikely(irsp->ulpStatus)) { 1129 /* Rsp ring <ringno> error: IOCB */ 1130 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1131 "%d:0336 Rsp Ring %d error: IOCB Data: " 1132 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1133 phba->brd_no, pring->ringno, 1134 irsp->un.ulpWord[0], irsp->un.ulpWord[1], 1135 irsp->un.ulpWord[2], irsp->un.ulpWord[3], 1136 irsp->un.ulpWord[4], irsp->un.ulpWord[5], 1137 *(((uint32_t *) irsp) + 6), 1138 *(((uint32_t *) irsp) + 7)); 1139 } 1140 1141 switch (type) { 1142 case LPFC_ABORT_IOCB: 1143 case LPFC_SOL_IOCB: 1144 /* 1145 * Idle exchange closed via ABTS from port. No iocb 1146 * resources need to be recovered. 1147 */ 1148 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1149 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1150 "%d:0333 IOCB cmd 0x%x" 1151 " processed. Skipping" 1152 " completion\n", phba->brd_no, 1153 irsp->ulpCommand); 1154 break; 1155 } 1156 1157 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1158 &rspiocbq); 1159 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1160 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1161 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1162 &rspiocbq); 1163 } else { 1164 spin_unlock_irqrestore( 1165 phba->host->host_lock, iflag); 1166 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1167 &rspiocbq); 1168 spin_lock_irqsave(phba->host->host_lock, 1169 iflag); 1170 } 1171 } 1172 break; 1173 case LPFC_UNSOL_IOCB: 1174 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1175 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 1176 spin_lock_irqsave(phba->host->host_lock, iflag); 1177 break; 1178 default: 1179 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1180 char adaptermsg[LPFC_MAX_ADPTMSG]; 1181 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1182 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1183 MAX_MSG_DATA); 1184 dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s", 1185 phba->brd_no, adaptermsg); 1186 } else { 1187 /* Unknown IOCB command */ 1188 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1189 "%d:0334 Unknown IOCB command " 1190 "Data: x%x, x%x x%x x%x x%x\n", 1191 phba->brd_no, type, irsp->ulpCommand, 1192 irsp->ulpStatus, irsp->ulpIoTag, 1193 irsp->ulpContext); 1194 } 1195 break; 1196 } 1197 1198 /* 1199 * The response IOCB has been processed. Update the ring 1200 * pointer in SLIM. If the port response put pointer has not 1201 * been updated, sync the pgp->rspPutInx and fetch the new port 1202 * response put pointer. 1203 */ 1204 to_slim = phba->MBslimaddr + 1205 (SLIMOFF + (pring->ringno * 2) + 1) * 4; 1206 writel(pring->rspidx, to_slim); 1207 1208 if (pring->rspidx == portRspPut) 1209 portRspPut = le32_to_cpu(pgp->rspPutInx); 1210 } 1211 1212 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 1213 pring->stats.iocb_rsp_full++; 1214 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1215 writel(status, phba->CAregaddr); 1216 readl(phba->CAregaddr); 1217 } 1218 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1219 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1220 pring->stats.iocb_cmd_empty++; 1221 1222 /* Force update of the local copy of cmdGetInx */ 1223 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1224 lpfc_sli_resume_iocb(phba, pring); 1225 1226 if ((pring->lpfc_sli_cmd_available)) 1227 (pring->lpfc_sli_cmd_available) (phba, pring); 1228 1229 } 1230 1231 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1232 return rc; 1233 } 1234 1235 1236 int 1237 lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba, 1238 struct lpfc_sli_ring * pring, uint32_t mask) 1239 { 1240 IOCB_t *entry; 1241 IOCB_t *irsp = NULL; 1242 struct lpfc_iocbq *rspiocbp = NULL; 1243 struct lpfc_iocbq *next_iocb; 1244 struct lpfc_iocbq *cmdiocbp; 1245 struct lpfc_iocbq *saveq; 1246 struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1247 uint8_t iocb_cmd_type; 1248 lpfc_iocb_type type; 1249 uint32_t status, free_saveq; 1250 uint32_t portRspPut, portRspMax; 1251 int rc = 1; 1252 unsigned long iflag; 1253 void __iomem *to_slim; 1254 1255 spin_lock_irqsave(phba->host->host_lock, iflag); 1256 pring->stats.iocb_event++; 1257 1258 /* 1259 * The next available response entry should never exceed the maximum 1260 * entries. If it does, treat it as an adapter hardware error. 1261 */ 1262 portRspMax = pring->numRiocb; 1263 portRspPut = le32_to_cpu(pgp->rspPutInx); 1264 if (portRspPut >= portRspMax) { 1265 /* 1266 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1267 * rsp ring <portRspMax> 1268 */ 1269 lpfc_printf_log(phba, 1270 KERN_ERR, 1271 LOG_SLI, 1272 "%d:0303 Ring %d handler: portRspPut %d " 1273 "is bigger then rsp ring %d\n", 1274 phba->brd_no, 1275 pring->ringno, portRspPut, portRspMax); 1276 1277 phba->hba_state = LPFC_HBA_ERROR; 1278 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1279 1280 phba->work_hs = HS_FFER3; 1281 lpfc_handle_eratt(phba); 1282 1283 return 1; 1284 } 1285 1286 rmb(); 1287 while (pring->rspidx != portRspPut) { 1288 /* 1289 * Build a completion list and call the appropriate handler. 1290 * The process is to get the next available response iocb, get 1291 * a free iocb from the list, copy the response data into the 1292 * free iocb, insert to the continuation list, and update the 1293 * next response index to slim. This process makes response 1294 * iocb's in the ring available to DMA as fast as possible but 1295 * pays a penalty for a copy operation. Since the iocb is 1296 * only 32 bytes, this penalty is considered small relative to 1297 * the PCI reads for register values and a slim write. When 1298 * the ulpLe field is set, the entire Command has been 1299 * received. 1300 */ 1301 entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx); 1302 rspiocbp = lpfc_sli_get_iocbq(phba); 1303 if (rspiocbp == NULL) { 1304 printk(KERN_ERR "%s: out of buffers! Failing " 1305 "completion.\n", __FUNCTION__); 1306 break; 1307 } 1308 1309 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t)); 1310 irsp = &rspiocbp->iocb; 1311 1312 if (++pring->rspidx >= portRspMax) 1313 pring->rspidx = 0; 1314 1315 to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2) 1316 + 1) * 4; 1317 writel(pring->rspidx, to_slim); 1318 1319 if (list_empty(&(pring->iocb_continueq))) { 1320 list_add(&rspiocbp->list, &(pring->iocb_continueq)); 1321 } else { 1322 list_add_tail(&rspiocbp->list, 1323 &(pring->iocb_continueq)); 1324 } 1325 1326 pring->iocb_continueq_cnt++; 1327 if (irsp->ulpLe) { 1328 /* 1329 * By default, the driver expects to free all resources 1330 * associated with this iocb completion. 1331 */ 1332 free_saveq = 1; 1333 saveq = list_get_first(&pring->iocb_continueq, 1334 struct lpfc_iocbq, list); 1335 irsp = &(saveq->iocb); 1336 list_del_init(&pring->iocb_continueq); 1337 pring->iocb_continueq_cnt = 0; 1338 1339 pring->stats.iocb_rsp++; 1340 1341 if (irsp->ulpStatus) { 1342 /* Rsp ring <ringno> error: IOCB */ 1343 lpfc_printf_log(phba, 1344 KERN_WARNING, 1345 LOG_SLI, 1346 "%d:0328 Rsp Ring %d error: IOCB Data: " 1347 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1348 phba->brd_no, 1349 pring->ringno, 1350 irsp->un.ulpWord[0], 1351 irsp->un.ulpWord[1], 1352 irsp->un.ulpWord[2], 1353 irsp->un.ulpWord[3], 1354 irsp->un.ulpWord[4], 1355 irsp->un.ulpWord[5], 1356 *(((uint32_t *) irsp) + 6), 1357 *(((uint32_t *) irsp) + 7)); 1358 } 1359 1360 /* 1361 * Fetch the IOCB command type and call the correct 1362 * completion routine. Solicited and Unsolicited 1363 * IOCBs on the ELS ring get freed back to the 1364 * lpfc_iocb_list by the discovery kernel thread. 1365 */ 1366 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 1367 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 1368 if (type == LPFC_SOL_IOCB) { 1369 spin_unlock_irqrestore(phba->host->host_lock, 1370 iflag); 1371 rc = lpfc_sli_process_sol_iocb(phba, pring, 1372 saveq); 1373 spin_lock_irqsave(phba->host->host_lock, iflag); 1374 } else if (type == LPFC_UNSOL_IOCB) { 1375 spin_unlock_irqrestore(phba->host->host_lock, 1376 iflag); 1377 rc = lpfc_sli_process_unsol_iocb(phba, pring, 1378 saveq); 1379 spin_lock_irqsave(phba->host->host_lock, iflag); 1380 } else if (type == LPFC_ABORT_IOCB) { 1381 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && 1382 ((cmdiocbp = 1383 lpfc_sli_iocbq_lookup(phba, pring, 1384 saveq)))) { 1385 /* Call the specified completion 1386 routine */ 1387 if (cmdiocbp->iocb_cmpl) { 1388 spin_unlock_irqrestore( 1389 phba->host->host_lock, 1390 iflag); 1391 (cmdiocbp->iocb_cmpl) (phba, 1392 cmdiocbp, saveq); 1393 spin_lock_irqsave( 1394 phba->host->host_lock, 1395 iflag); 1396 } else 1397 lpfc_sli_release_iocbq(phba, 1398 cmdiocbp); 1399 } 1400 } else if (type == LPFC_UNKNOWN_IOCB) { 1401 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1402 1403 char adaptermsg[LPFC_MAX_ADPTMSG]; 1404 1405 memset(adaptermsg, 0, 1406 LPFC_MAX_ADPTMSG); 1407 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1408 MAX_MSG_DATA); 1409 dev_warn(&((phba->pcidev)->dev), 1410 "lpfc%d: %s", 1411 phba->brd_no, adaptermsg); 1412 } else { 1413 /* Unknown IOCB command */ 1414 lpfc_printf_log(phba, 1415 KERN_ERR, 1416 LOG_SLI, 1417 "%d:0335 Unknown IOCB command " 1418 "Data: x%x x%x x%x x%x\n", 1419 phba->brd_no, 1420 irsp->ulpCommand, 1421 irsp->ulpStatus, 1422 irsp->ulpIoTag, 1423 irsp->ulpContext); 1424 } 1425 } 1426 1427 if (free_saveq) { 1428 if (!list_empty(&saveq->list)) { 1429 list_for_each_entry_safe(rspiocbp, 1430 next_iocb, 1431 &saveq->list, 1432 list) { 1433 list_del(&rspiocbp->list); 1434 lpfc_sli_release_iocbq(phba, 1435 rspiocbp); 1436 } 1437 } 1438 lpfc_sli_release_iocbq(phba, saveq); 1439 } 1440 } 1441 1442 /* 1443 * If the port response put pointer has not been updated, sync 1444 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 1445 * response put pointer. 1446 */ 1447 if (pring->rspidx == portRspPut) { 1448 portRspPut = le32_to_cpu(pgp->rspPutInx); 1449 } 1450 } /* while (pring->rspidx != portRspPut) */ 1451 1452 if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) { 1453 /* At least one response entry has been freed */ 1454 pring->stats.iocb_rsp_full++; 1455 /* SET RxRE_RSP in Chip Att register */ 1456 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1457 writel(status, phba->CAregaddr); 1458 readl(phba->CAregaddr); /* flush */ 1459 } 1460 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1461 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1462 pring->stats.iocb_cmd_empty++; 1463 1464 /* Force update of the local copy of cmdGetInx */ 1465 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1466 lpfc_sli_resume_iocb(phba, pring); 1467 1468 if ((pring->lpfc_sli_cmd_available)) 1469 (pring->lpfc_sli_cmd_available) (phba, pring); 1470 1471 } 1472 1473 spin_unlock_irqrestore(phba->host->host_lock, iflag); 1474 return rc; 1475 } 1476 1477 int 1478 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1479 { 1480 LIST_HEAD(completions); 1481 struct lpfc_iocbq *iocb, *next_iocb; 1482 IOCB_t *cmd = NULL; 1483 int errcnt; 1484 1485 errcnt = 0; 1486 1487 /* Error everything on txq and txcmplq 1488 * First do the txq. 1489 */ 1490 spin_lock_irq(phba->host->host_lock); 1491 list_splice_init(&pring->txq, &completions); 1492 pring->txq_cnt = 0; 1493 1494 /* Next issue ABTS for everything on the txcmplq */ 1495 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 1496 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 1497 1498 spin_unlock_irq(phba->host->host_lock); 1499 1500 while (!list_empty(&completions)) { 1501 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 1502 cmd = &iocb->iocb; 1503 list_del(&iocb->list); 1504 1505 if (iocb->iocb_cmpl) { 1506 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1507 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1508 (iocb->iocb_cmpl) (phba, iocb, iocb); 1509 } else 1510 lpfc_sli_release_iocbq(phba, iocb); 1511 } 1512 1513 return errcnt; 1514 } 1515 1516 int 1517 lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask) 1518 { 1519 uint32_t status; 1520 int i = 0; 1521 int retval = 0; 1522 1523 /* Read the HBA Host Status Register */ 1524 status = readl(phba->HSregaddr); 1525 1526 /* 1527 * Check status register every 100ms for 5 retries, then every 1528 * 500ms for 5, then every 2.5 sec for 5, then reset board and 1529 * every 2.5 sec for 4. 1530 * Break our of the loop if errors occurred during init. 1531 */ 1532 while (((status & mask) != mask) && 1533 !(status & HS_FFERM) && 1534 i++ < 20) { 1535 1536 if (i <= 5) 1537 msleep(10); 1538 else if (i <= 10) 1539 msleep(500); 1540 else 1541 msleep(2500); 1542 1543 if (i == 15) { 1544 phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */ 1545 lpfc_sli_brdrestart(phba); 1546 } 1547 /* Read the HBA Host Status Register */ 1548 status = readl(phba->HSregaddr); 1549 } 1550 1551 /* Check to see if any errors occurred during init */ 1552 if ((status & HS_FFERM) || (i >= 20)) { 1553 phba->hba_state = LPFC_HBA_ERROR; 1554 retval = 1; 1555 } 1556 1557 return retval; 1558 } 1559 1560 #define BARRIER_TEST_PATTERN (0xdeadbeef) 1561 1562 void lpfc_reset_barrier(struct lpfc_hba * phba) 1563 { 1564 uint32_t __iomem *resp_buf; 1565 uint32_t __iomem *mbox_buf; 1566 volatile uint32_t mbox; 1567 uint32_t hc_copy; 1568 int i; 1569 uint8_t hdrtype; 1570 1571 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 1572 if (hdrtype != 0x80 || 1573 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 1574 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 1575 return; 1576 1577 /* 1578 * Tell the other part of the chip to suspend temporarily all 1579 * its DMA activity. 1580 */ 1581 resp_buf = phba->MBslimaddr; 1582 1583 /* Disable the error attention */ 1584 hc_copy = readl(phba->HCregaddr); 1585 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 1586 readl(phba->HCregaddr); /* flush */ 1587 phba->fc_flag |= FC_IGNORE_ERATT; 1588 1589 if (readl(phba->HAregaddr) & HA_ERATT) { 1590 /* Clear Chip error bit */ 1591 writel(HA_ERATT, phba->HAregaddr); 1592 phba->stopped = 1; 1593 } 1594 1595 mbox = 0; 1596 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 1597 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 1598 1599 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 1600 mbox_buf = phba->MBslimaddr; 1601 writel(mbox, mbox_buf); 1602 1603 for (i = 0; 1604 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) 1605 mdelay(1); 1606 1607 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 1608 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 1609 phba->stopped) 1610 goto restore_hc; 1611 else 1612 goto clear_errat; 1613 } 1614 1615 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 1616 for (i = 0; readl(resp_buf) != mbox && i < 500; i++) 1617 mdelay(1); 1618 1619 clear_errat: 1620 1621 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) 1622 mdelay(1); 1623 1624 if (readl(phba->HAregaddr) & HA_ERATT) { 1625 writel(HA_ERATT, phba->HAregaddr); 1626 phba->stopped = 1; 1627 } 1628 1629 restore_hc: 1630 phba->fc_flag &= ~FC_IGNORE_ERATT; 1631 writel(hc_copy, phba->HCregaddr); 1632 readl(phba->HCregaddr); /* flush */ 1633 } 1634 1635 int 1636 lpfc_sli_brdkill(struct lpfc_hba * phba) 1637 { 1638 struct lpfc_sli *psli; 1639 LPFC_MBOXQ_t *pmb; 1640 uint32_t status; 1641 uint32_t ha_copy; 1642 int retval; 1643 int i = 0; 1644 1645 psli = &phba->sli; 1646 1647 /* Kill HBA */ 1648 lpfc_printf_log(phba, 1649 KERN_INFO, 1650 LOG_SLI, 1651 "%d:0329 Kill HBA Data: x%x x%x\n", 1652 phba->brd_no, 1653 phba->hba_state, 1654 psli->sli_flag); 1655 1656 if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, 1657 GFP_KERNEL)) == 0) 1658 return 1; 1659 1660 /* Disable the error attention */ 1661 spin_lock_irq(phba->host->host_lock); 1662 status = readl(phba->HCregaddr); 1663 status &= ~HC_ERINT_ENA; 1664 writel(status, phba->HCregaddr); 1665 readl(phba->HCregaddr); /* flush */ 1666 phba->fc_flag |= FC_IGNORE_ERATT; 1667 spin_unlock_irq(phba->host->host_lock); 1668 1669 lpfc_kill_board(phba, pmb); 1670 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1671 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1672 1673 if (retval != MBX_SUCCESS) { 1674 if (retval != MBX_BUSY) 1675 mempool_free(pmb, phba->mbox_mem_pool); 1676 spin_lock_irq(phba->host->host_lock); 1677 phba->fc_flag &= ~FC_IGNORE_ERATT; 1678 spin_unlock_irq(phba->host->host_lock); 1679 return 1; 1680 } 1681 1682 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 1683 1684 mempool_free(pmb, phba->mbox_mem_pool); 1685 1686 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 1687 * attention every 100ms for 3 seconds. If we don't get ERATT after 1688 * 3 seconds we still set HBA_ERROR state because the status of the 1689 * board is now undefined. 1690 */ 1691 ha_copy = readl(phba->HAregaddr); 1692 1693 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 1694 mdelay(100); 1695 ha_copy = readl(phba->HAregaddr); 1696 } 1697 1698 del_timer_sync(&psli->mbox_tmo); 1699 if (ha_copy & HA_ERATT) { 1700 writel(HA_ERATT, phba->HAregaddr); 1701 phba->stopped = 1; 1702 } 1703 spin_lock_irq(phba->host->host_lock); 1704 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1705 phba->fc_flag &= ~FC_IGNORE_ERATT; 1706 spin_unlock_irq(phba->host->host_lock); 1707 1708 psli->mbox_active = NULL; 1709 lpfc_hba_down_post(phba); 1710 phba->hba_state = LPFC_HBA_ERROR; 1711 1712 return (ha_copy & HA_ERATT ? 0 : 1); 1713 } 1714 1715 int 1716 lpfc_sli_brdreset(struct lpfc_hba * phba) 1717 { 1718 struct lpfc_sli *psli; 1719 struct lpfc_sli_ring *pring; 1720 uint16_t cfg_value; 1721 int i; 1722 1723 psli = &phba->sli; 1724 1725 /* Reset HBA */ 1726 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1727 "%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no, 1728 phba->hba_state, psli->sli_flag); 1729 1730 /* perform board reset */ 1731 phba->fc_eventTag = 0; 1732 phba->fc_myDID = 0; 1733 phba->fc_prevDID = 0; 1734 1735 /* Turn off parity checking and serr during the physical reset */ 1736 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 1737 pci_write_config_word(phba->pcidev, PCI_COMMAND, 1738 (cfg_value & 1739 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 1740 1741 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 1742 /* Now toggle INITFF bit in the Host Control Register */ 1743 writel(HC_INITFF, phba->HCregaddr); 1744 mdelay(1); 1745 readl(phba->HCregaddr); /* flush */ 1746 writel(0, phba->HCregaddr); 1747 readl(phba->HCregaddr); /* flush */ 1748 1749 /* Restore PCI cmd register */ 1750 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 1751 1752 /* Initialize relevant SLI info */ 1753 for (i = 0; i < psli->num_rings; i++) { 1754 pring = &psli->ring[i]; 1755 pring->flag = 0; 1756 pring->rspidx = 0; 1757 pring->next_cmdidx = 0; 1758 pring->local_getidx = 0; 1759 pring->cmdidx = 0; 1760 pring->missbufcnt = 0; 1761 } 1762 1763 phba->hba_state = LPFC_WARM_START; 1764 return 0; 1765 } 1766 1767 int 1768 lpfc_sli_brdrestart(struct lpfc_hba * phba) 1769 { 1770 MAILBOX_t *mb; 1771 struct lpfc_sli *psli; 1772 uint16_t skip_post; 1773 volatile uint32_t word0; 1774 void __iomem *to_slim; 1775 1776 spin_lock_irq(phba->host->host_lock); 1777 1778 psli = &phba->sli; 1779 1780 /* Restart HBA */ 1781 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1782 "%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no, 1783 phba->hba_state, psli->sli_flag); 1784 1785 word0 = 0; 1786 mb = (MAILBOX_t *) &word0; 1787 mb->mbxCommand = MBX_RESTART; 1788 mb->mbxHc = 1; 1789 1790 lpfc_reset_barrier(phba); 1791 1792 to_slim = phba->MBslimaddr; 1793 writel(*(uint32_t *) mb, to_slim); 1794 readl(to_slim); /* flush */ 1795 1796 /* Only skip post after fc_ffinit is completed */ 1797 if (phba->hba_state) { 1798 skip_post = 1; 1799 word0 = 1; /* This is really setting up word1 */ 1800 } else { 1801 skip_post = 0; 1802 word0 = 0; /* This is really setting up word1 */ 1803 } 1804 to_slim = phba->MBslimaddr + sizeof (uint32_t); 1805 writel(*(uint32_t *) mb, to_slim); 1806 readl(to_slim); /* flush */ 1807 1808 lpfc_sli_brdreset(phba); 1809 phba->stopped = 0; 1810 phba->hba_state = LPFC_INIT_START; 1811 1812 spin_unlock_irq(phba->host->host_lock); 1813 1814 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 1815 psli->stats_start = get_seconds(); 1816 1817 if (skip_post) 1818 mdelay(100); 1819 else 1820 mdelay(2000); 1821 1822 lpfc_hba_down_post(phba); 1823 1824 return 0; 1825 } 1826 1827 static int 1828 lpfc_sli_chipset_init(struct lpfc_hba *phba) 1829 { 1830 uint32_t status, i = 0; 1831 1832 /* Read the HBA Host Status Register */ 1833 status = readl(phba->HSregaddr); 1834 1835 /* Check status register to see what current state is */ 1836 i = 0; 1837 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 1838 1839 /* Check every 100ms for 5 retries, then every 500ms for 5, then 1840 * every 2.5 sec for 5, then reset board and every 2.5 sec for 1841 * 4. 1842 */ 1843 if (i++ >= 20) { 1844 /* Adapter failed to init, timeout, status reg 1845 <status> */ 1846 lpfc_printf_log(phba, 1847 KERN_ERR, 1848 LOG_INIT, 1849 "%d:0436 Adapter failed to init, " 1850 "timeout, status reg x%x\n", 1851 phba->brd_no, 1852 status); 1853 phba->hba_state = LPFC_HBA_ERROR; 1854 return -ETIMEDOUT; 1855 } 1856 1857 /* Check to see if any errors occurred during init */ 1858 if (status & HS_FFERM) { 1859 /* ERROR: During chipset initialization */ 1860 /* Adapter failed to init, chipset, status reg 1861 <status> */ 1862 lpfc_printf_log(phba, 1863 KERN_ERR, 1864 LOG_INIT, 1865 "%d:0437 Adapter failed to init, " 1866 "chipset, status reg x%x\n", 1867 phba->brd_no, 1868 status); 1869 phba->hba_state = LPFC_HBA_ERROR; 1870 return -EIO; 1871 } 1872 1873 if (i <= 5) { 1874 msleep(10); 1875 } else if (i <= 10) { 1876 msleep(500); 1877 } else { 1878 msleep(2500); 1879 } 1880 1881 if (i == 15) { 1882 phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */ 1883 lpfc_sli_brdrestart(phba); 1884 } 1885 /* Read the HBA Host Status Register */ 1886 status = readl(phba->HSregaddr); 1887 } 1888 1889 /* Check to see if any errors occurred during init */ 1890 if (status & HS_FFERM) { 1891 /* ERROR: During chipset initialization */ 1892 /* Adapter failed to init, chipset, status reg <status> */ 1893 lpfc_printf_log(phba, 1894 KERN_ERR, 1895 LOG_INIT, 1896 "%d:0438 Adapter failed to init, chipset, " 1897 "status reg x%x\n", 1898 phba->brd_no, 1899 status); 1900 phba->hba_state = LPFC_HBA_ERROR; 1901 return -EIO; 1902 } 1903 1904 /* Clear all interrupt enable conditions */ 1905 writel(0, phba->HCregaddr); 1906 readl(phba->HCregaddr); /* flush */ 1907 1908 /* setup host attn register */ 1909 writel(0xffffffff, phba->HAregaddr); 1910 readl(phba->HAregaddr); /* flush */ 1911 return 0; 1912 } 1913 1914 int 1915 lpfc_sli_hba_setup(struct lpfc_hba * phba) 1916 { 1917 LPFC_MBOXQ_t *pmb; 1918 uint32_t resetcount = 0, rc = 0, done = 0; 1919 1920 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1921 if (!pmb) { 1922 phba->hba_state = LPFC_HBA_ERROR; 1923 return -ENOMEM; 1924 } 1925 1926 while (resetcount < 2 && !done) { 1927 spin_lock_irq(phba->host->host_lock); 1928 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 1929 spin_unlock_irq(phba->host->host_lock); 1930 phba->hba_state = LPFC_STATE_UNKNOWN; 1931 lpfc_sli_brdrestart(phba); 1932 msleep(2500); 1933 rc = lpfc_sli_chipset_init(phba); 1934 if (rc) 1935 break; 1936 1937 spin_lock_irq(phba->host->host_lock); 1938 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1939 spin_unlock_irq(phba->host->host_lock); 1940 resetcount++; 1941 1942 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0 1943 * means the call was successful. Any other nonzero value is a failure, 1944 * but if ERESTART is returned, the driver may reset the HBA and try 1945 * again. 1946 */ 1947 rc = lpfc_config_port_prep(phba); 1948 if (rc == -ERESTART) { 1949 phba->hba_state = 0; 1950 continue; 1951 } else if (rc) { 1952 break; 1953 } 1954 1955 phba->hba_state = LPFC_INIT_MBX_CMDS; 1956 lpfc_config_port(phba, pmb); 1957 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1958 if (rc == MBX_SUCCESS) 1959 done = 1; 1960 else { 1961 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1962 "%d:0442 Adapter failed to init, mbxCmd x%x " 1963 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 1964 phba->brd_no, pmb->mb.mbxCommand, 1965 pmb->mb.mbxStatus, 0); 1966 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 1967 } 1968 } 1969 if (!done) 1970 goto lpfc_sli_hba_setup_error; 1971 1972 rc = lpfc_sli_ring_map(phba, pmb); 1973 1974 if (rc) 1975 goto lpfc_sli_hba_setup_error; 1976 1977 phba->sli.sli_flag |= LPFC_PROCESS_LA; 1978 1979 rc = lpfc_config_port_post(phba); 1980 if (rc) 1981 goto lpfc_sli_hba_setup_error; 1982 1983 goto lpfc_sli_hba_setup_exit; 1984 lpfc_sli_hba_setup_error: 1985 phba->hba_state = LPFC_HBA_ERROR; 1986 lpfc_sli_hba_setup_exit: 1987 mempool_free(pmb, phba->mbox_mem_pool); 1988 return rc; 1989 } 1990 1991 /*! lpfc_mbox_timeout 1992 * 1993 * \pre 1994 * \post 1995 * \param hba Pointer to per struct lpfc_hba structure 1996 * \param l1 Pointer to the driver's mailbox queue. 1997 * \return 1998 * void 1999 * 2000 * \b Description: 2001 * 2002 * This routine handles mailbox timeout events at timer interrupt context. 2003 */ 2004 void 2005 lpfc_mbox_timeout(unsigned long ptr) 2006 { 2007 struct lpfc_hba *phba; 2008 unsigned long iflag; 2009 2010 phba = (struct lpfc_hba *)ptr; 2011 spin_lock_irqsave(phba->host->host_lock, iflag); 2012 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) { 2013 phba->work_hba_events |= WORKER_MBOX_TMO; 2014 if (phba->work_wait) 2015 wake_up(phba->work_wait); 2016 } 2017 spin_unlock_irqrestore(phba->host->host_lock, iflag); 2018 } 2019 2020 void 2021 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 2022 { 2023 LPFC_MBOXQ_t *pmbox; 2024 MAILBOX_t *mb; 2025 struct lpfc_sli *psli = &phba->sli; 2026 struct lpfc_sli_ring *pring; 2027 2028 spin_lock_irq(phba->host->host_lock); 2029 if (!(phba->work_hba_events & WORKER_MBOX_TMO)) { 2030 spin_unlock_irq(phba->host->host_lock); 2031 return; 2032 } 2033 2034 pmbox = phba->sli.mbox_active; 2035 mb = &pmbox->mb; 2036 2037 /* Mbox cmd <mbxCommand> timeout */ 2038 lpfc_printf_log(phba, 2039 KERN_ERR, 2040 LOG_MBOX | LOG_SLI, 2041 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 2042 phba->brd_no, 2043 mb->mbxCommand, 2044 phba->hba_state, 2045 phba->sli.sli_flag, 2046 phba->sli.mbox_active); 2047 2048 /* Setting state unknown so lpfc_sli_abort_iocb_ring 2049 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 2050 * it to fail all oustanding SCSI IO. 2051 */ 2052 phba->hba_state = LPFC_STATE_UNKNOWN; 2053 phba->work_hba_events &= ~WORKER_MBOX_TMO; 2054 phba->fc_flag |= FC_ESTABLISH_LINK; 2055 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2056 spin_unlock_irq(phba->host->host_lock); 2057 2058 pring = &psli->ring[psli->fcp_ring]; 2059 lpfc_sli_abort_iocb_ring(phba, pring); 2060 2061 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2062 "%d:0316 Resetting board due to mailbox timeout\n", 2063 phba->brd_no); 2064 /* 2065 * lpfc_offline calls lpfc_sli_hba_down which will clean up 2066 * on oustanding mailbox commands. 2067 */ 2068 lpfc_offline_prep(phba); 2069 lpfc_offline(phba); 2070 lpfc_sli_brdrestart(phba); 2071 if (lpfc_online(phba) == 0) /* Initialize the HBA */ 2072 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); 2073 lpfc_unblock_mgmt_io(phba); 2074 return; 2075 } 2076 2077 int 2078 lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag) 2079 { 2080 MAILBOX_t *mb; 2081 struct lpfc_sli *psli; 2082 uint32_t status, evtctr; 2083 uint32_t ha_copy; 2084 int i; 2085 unsigned long drvr_flag = 0; 2086 volatile uint32_t word0, ldata; 2087 void __iomem *to_slim; 2088 2089 /* If the PCI channel is in offline state, do not post mbox. */ 2090 if (unlikely(pci_channel_offline(phba->pcidev))) 2091 return MBX_NOT_FINISHED; 2092 2093 psli = &phba->sli; 2094 2095 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 2096 2097 2098 mb = &pmbox->mb; 2099 status = MBX_SUCCESS; 2100 2101 if (phba->hba_state == LPFC_HBA_ERROR) { 2102 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2103 2104 /* Mbox command <mbxCommand> cannot issue */ 2105 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2106 return (MBX_NOT_FINISHED); 2107 } 2108 2109 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2110 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2111 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2112 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2113 return (MBX_NOT_FINISHED); 2114 } 2115 2116 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 2117 /* Polling for a mbox command when another one is already active 2118 * is not allowed in SLI. Also, the driver must have established 2119 * SLI2 mode to queue and process multiple mbox commands. 2120 */ 2121 2122 if (flag & MBX_POLL) { 2123 spin_unlock_irqrestore(phba->host->host_lock, 2124 drvr_flag); 2125 2126 /* Mbox command <mbxCommand> cannot issue */ 2127 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2128 return (MBX_NOT_FINISHED); 2129 } 2130 2131 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 2132 spin_unlock_irqrestore(phba->host->host_lock, 2133 drvr_flag); 2134 /* Mbox command <mbxCommand> cannot issue */ 2135 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) 2136 return (MBX_NOT_FINISHED); 2137 } 2138 2139 /* Handle STOP IOCB processing flag. This is only meaningful 2140 * if we are not polling for mbox completion. 2141 */ 2142 if (flag & MBX_STOP_IOCB) { 2143 flag &= ~MBX_STOP_IOCB; 2144 /* Now flag each ring */ 2145 for (i = 0; i < psli->num_rings; i++) { 2146 /* If the ring is active, flag it */ 2147 if (psli->ring[i].cmdringaddr) { 2148 psli->ring[i].flag |= 2149 LPFC_STOP_IOCB_MBX; 2150 } 2151 } 2152 } 2153 2154 /* Another mailbox command is still being processed, queue this 2155 * command to be processed later. 2156 */ 2157 lpfc_mbox_put(phba, pmbox); 2158 2159 /* Mbox cmd issue - BUSY */ 2160 lpfc_printf_log(phba, 2161 KERN_INFO, 2162 LOG_MBOX | LOG_SLI, 2163 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n", 2164 phba->brd_no, 2165 mb->mbxCommand, 2166 phba->hba_state, 2167 psli->sli_flag, 2168 flag); 2169 2170 psli->slistat.mbox_busy++; 2171 spin_unlock_irqrestore(phba->host->host_lock, 2172 drvr_flag); 2173 2174 return (MBX_BUSY); 2175 } 2176 2177 /* Handle STOP IOCB processing flag. This is only meaningful 2178 * if we are not polling for mbox completion. 2179 */ 2180 if (flag & MBX_STOP_IOCB) { 2181 flag &= ~MBX_STOP_IOCB; 2182 if (flag == MBX_NOWAIT) { 2183 /* Now flag each ring */ 2184 for (i = 0; i < psli->num_rings; i++) { 2185 /* If the ring is active, flag it */ 2186 if (psli->ring[i].cmdringaddr) { 2187 psli->ring[i].flag |= 2188 LPFC_STOP_IOCB_MBX; 2189 } 2190 } 2191 } 2192 } 2193 2194 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2195 2196 /* If we are not polling, we MUST be in SLI2 mode */ 2197 if (flag != MBX_POLL) { 2198 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 2199 (mb->mbxCommand != MBX_KILL_BOARD)) { 2200 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2201 spin_unlock_irqrestore(phba->host->host_lock, 2202 drvr_flag); 2203 /* Mbox command <mbxCommand> cannot issue */ 2204 LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag); 2205 return (MBX_NOT_FINISHED); 2206 } 2207 /* timeout active mbox command */ 2208 mod_timer(&psli->mbox_tmo, (jiffies + 2209 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); 2210 } 2211 2212 /* Mailbox cmd <cmd> issue */ 2213 lpfc_printf_log(phba, 2214 KERN_INFO, 2215 LOG_MBOX | LOG_SLI, 2216 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n", 2217 phba->brd_no, 2218 mb->mbxCommand, 2219 phba->hba_state, 2220 psli->sli_flag, 2221 flag); 2222 2223 psli->slistat.mbox_cmd++; 2224 evtctr = psli->slistat.mbox_event; 2225 2226 /* next set own bit for the adapter and copy over command word */ 2227 mb->mbxOwner = OWN_CHIP; 2228 2229 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2230 /* First copy command data to host SLIM area */ 2231 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE); 2232 } else { 2233 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2234 /* copy command data into host mbox for cmpl */ 2235 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, 2236 MAILBOX_CMD_SIZE); 2237 } 2238 2239 /* First copy mbox command data to HBA SLIM, skip past first 2240 word */ 2241 to_slim = phba->MBslimaddr + sizeof (uint32_t); 2242 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 2243 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 2244 2245 /* Next copy over first word, with mbxOwner set */ 2246 ldata = *((volatile uint32_t *)mb); 2247 to_slim = phba->MBslimaddr; 2248 writel(ldata, to_slim); 2249 readl(to_slim); /* flush */ 2250 2251 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2252 /* switch over to host mailbox */ 2253 psli->sli_flag |= LPFC_SLI2_ACTIVE; 2254 } 2255 } 2256 2257 wmb(); 2258 /* interrupt board to doit right away */ 2259 writel(CA_MBATT, phba->CAregaddr); 2260 readl(phba->CAregaddr); /* flush */ 2261 2262 switch (flag) { 2263 case MBX_NOWAIT: 2264 /* Don't wait for it to finish, just return */ 2265 psli->mbox_active = pmbox; 2266 break; 2267 2268 case MBX_POLL: 2269 psli->mbox_active = NULL; 2270 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2271 /* First read mbox status word */ 2272 word0 = *((volatile uint32_t *)&phba->slim2p->mbx); 2273 word0 = le32_to_cpu(word0); 2274 } else { 2275 /* First read mbox status word */ 2276 word0 = readl(phba->MBslimaddr); 2277 } 2278 2279 /* Read the HBA Host Attention Register */ 2280 ha_copy = readl(phba->HAregaddr); 2281 2282 i = lpfc_mbox_tmo_val(phba, mb->mbxCommand); 2283 i *= 1000; /* Convert to ms */ 2284 2285 /* Wait for command to complete */ 2286 while (((word0 & OWN_CHIP) == OWN_CHIP) || 2287 (!(ha_copy & HA_MBATT) && 2288 (phba->hba_state > LPFC_WARM_START))) { 2289 if (i-- <= 0) { 2290 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2291 spin_unlock_irqrestore(phba->host->host_lock, 2292 drvr_flag); 2293 return (MBX_NOT_FINISHED); 2294 } 2295 2296 /* Check if we took a mbox interrupt while we were 2297 polling */ 2298 if (((word0 & OWN_CHIP) != OWN_CHIP) 2299 && (evtctr != psli->slistat.mbox_event)) 2300 break; 2301 2302 spin_unlock_irqrestore(phba->host->host_lock, 2303 drvr_flag); 2304 2305 msleep(1); 2306 2307 spin_lock_irqsave(phba->host->host_lock, drvr_flag); 2308 2309 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2310 /* First copy command data */ 2311 word0 = *((volatile uint32_t *) 2312 &phba->slim2p->mbx); 2313 word0 = le32_to_cpu(word0); 2314 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2315 MAILBOX_t *slimmb; 2316 volatile uint32_t slimword0; 2317 /* Check real SLIM for any errors */ 2318 slimword0 = readl(phba->MBslimaddr); 2319 slimmb = (MAILBOX_t *) & slimword0; 2320 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 2321 && slimmb->mbxStatus) { 2322 psli->sli_flag &= 2323 ~LPFC_SLI2_ACTIVE; 2324 word0 = slimword0; 2325 } 2326 } 2327 } else { 2328 /* First copy command data */ 2329 word0 = readl(phba->MBslimaddr); 2330 } 2331 /* Read the HBA Host Attention Register */ 2332 ha_copy = readl(phba->HAregaddr); 2333 } 2334 2335 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2336 /* copy results back to user */ 2337 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, 2338 MAILBOX_CMD_SIZE); 2339 } else { 2340 /* First copy command data */ 2341 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 2342 MAILBOX_CMD_SIZE); 2343 if ((mb->mbxCommand == MBX_DUMP_MEMORY) && 2344 pmbox->context2) { 2345 lpfc_memcpy_from_slim((void *)pmbox->context2, 2346 phba->MBslimaddr + DMP_RSP_OFFSET, 2347 mb->un.varDmp.word_cnt); 2348 } 2349 } 2350 2351 writel(HA_MBATT, phba->HAregaddr); 2352 readl(phba->HAregaddr); /* flush */ 2353 2354 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2355 status = mb->mbxStatus; 2356 } 2357 2358 spin_unlock_irqrestore(phba->host->host_lock, drvr_flag); 2359 return (status); 2360 } 2361 2362 static int 2363 lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 2364 struct lpfc_iocbq * piocb) 2365 { 2366 /* Insert the caller's iocb in the txq tail for later processing. */ 2367 list_add_tail(&piocb->list, &pring->txq); 2368 pring->txq_cnt++; 2369 return (0); 2370 } 2371 2372 static struct lpfc_iocbq * 2373 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2374 struct lpfc_iocbq ** piocb) 2375 { 2376 struct lpfc_iocbq * nextiocb; 2377 2378 nextiocb = lpfc_sli_ringtx_get(phba, pring); 2379 if (!nextiocb) { 2380 nextiocb = *piocb; 2381 *piocb = NULL; 2382 } 2383 2384 return nextiocb; 2385 } 2386 2387 int 2388 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2389 struct lpfc_iocbq *piocb, uint32_t flag) 2390 { 2391 struct lpfc_iocbq *nextiocb; 2392 IOCB_t *iocb; 2393 2394 /* If the PCI channel is in offline state, do not post iocbs. */ 2395 if (unlikely(pci_channel_offline(phba->pcidev))) 2396 return IOCB_ERROR; 2397 2398 /* 2399 * We should never get an IOCB if we are in a < LINK_DOWN state 2400 */ 2401 if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) 2402 return IOCB_ERROR; 2403 2404 /* 2405 * Check to see if we are blocking IOCB processing because of a 2406 * outstanding mbox command. 2407 */ 2408 if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX)) 2409 goto iocb_busy; 2410 2411 if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) { 2412 /* 2413 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 2414 * can be issued if the link is not up. 2415 */ 2416 switch (piocb->iocb.ulpCommand) { 2417 case CMD_QUE_RING_BUF_CN: 2418 case CMD_QUE_RING_BUF64_CN: 2419 /* 2420 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 2421 * completion, iocb_cmpl MUST be 0. 2422 */ 2423 if (piocb->iocb_cmpl) 2424 piocb->iocb_cmpl = NULL; 2425 /*FALLTHROUGH*/ 2426 case CMD_CREATE_XRI_CR: 2427 case CMD_CLOSE_XRI_CN: 2428 case CMD_CLOSE_XRI_CX: 2429 break; 2430 default: 2431 goto iocb_busy; 2432 } 2433 2434 /* 2435 * For FCP commands, we must be in a state where we can process link 2436 * attention events. 2437 */ 2438 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 2439 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) 2440 goto iocb_busy; 2441 2442 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 2443 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 2444 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 2445 2446 if (iocb) 2447 lpfc_sli_update_ring(phba, pring); 2448 else 2449 lpfc_sli_update_full_ring(phba, pring); 2450 2451 if (!piocb) 2452 return IOCB_SUCCESS; 2453 2454 goto out_busy; 2455 2456 iocb_busy: 2457 pring->stats.iocb_cmd_delay++; 2458 2459 out_busy: 2460 2461 if (!(flag & SLI_IOCB_RET_IOCB)) { 2462 lpfc_sli_ringtx_put(phba, pring, piocb); 2463 return IOCB_SUCCESS; 2464 } 2465 2466 return IOCB_BUSY; 2467 } 2468 2469 static int 2470 lpfc_extra_ring_setup( struct lpfc_hba *phba) 2471 { 2472 struct lpfc_sli *psli; 2473 struct lpfc_sli_ring *pring; 2474 2475 psli = &phba->sli; 2476 2477 /* Adjust cmd/rsp ring iocb entries more evenly */ 2478 2479 /* Take some away from the FCP ring */ 2480 pring = &psli->ring[psli->fcp_ring]; 2481 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2482 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2483 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2484 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2485 2486 /* and give them to the extra ring */ 2487 pring = &psli->ring[psli->extra_ring]; 2488 2489 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2490 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2491 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2492 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2493 2494 /* Setup default profile for this ring */ 2495 pring->iotag_max = 4096; 2496 pring->num_mask = 1; 2497 pring->prt[0].profile = 0; /* Mask 0 */ 2498 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 2499 pring->prt[0].type = phba->cfg_multi_ring_type; 2500 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 2501 return 0; 2502 } 2503 2504 int 2505 lpfc_sli_setup(struct lpfc_hba *phba) 2506 { 2507 int i, totiocb = 0; 2508 struct lpfc_sli *psli = &phba->sli; 2509 struct lpfc_sli_ring *pring; 2510 2511 psli->num_rings = MAX_CONFIGURED_RINGS; 2512 psli->sli_flag = 0; 2513 psli->fcp_ring = LPFC_FCP_RING; 2514 psli->next_ring = LPFC_FCP_NEXT_RING; 2515 psli->extra_ring = LPFC_EXTRA_RING; 2516 2517 psli->iocbq_lookup = NULL; 2518 psli->iocbq_lookup_len = 0; 2519 psli->last_iotag = 0; 2520 2521 for (i = 0; i < psli->num_rings; i++) { 2522 pring = &psli->ring[i]; 2523 switch (i) { 2524 case LPFC_FCP_RING: /* ring 0 - FCP */ 2525 /* numCiocb and numRiocb are used in config_port */ 2526 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 2527 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 2528 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 2529 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 2530 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 2531 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 2532 pring->iotag_ctr = 0; 2533 pring->iotag_max = 2534 (phba->cfg_hba_queue_depth * 2); 2535 pring->fast_iotag = pring->iotag_max; 2536 pring->num_mask = 0; 2537 break; 2538 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 2539 /* numCiocb and numRiocb are used in config_port */ 2540 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 2541 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 2542 pring->num_mask = 0; 2543 break; 2544 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 2545 /* numCiocb and numRiocb are used in config_port */ 2546 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 2547 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 2548 pring->fast_iotag = 0; 2549 pring->iotag_ctr = 0; 2550 pring->iotag_max = 4096; 2551 pring->num_mask = 4; 2552 pring->prt[0].profile = 0; /* Mask 0 */ 2553 pring->prt[0].rctl = FC_ELS_REQ; 2554 pring->prt[0].type = FC_ELS_DATA; 2555 pring->prt[0].lpfc_sli_rcv_unsol_event = 2556 lpfc_els_unsol_event; 2557 pring->prt[1].profile = 0; /* Mask 1 */ 2558 pring->prt[1].rctl = FC_ELS_RSP; 2559 pring->prt[1].type = FC_ELS_DATA; 2560 pring->prt[1].lpfc_sli_rcv_unsol_event = 2561 lpfc_els_unsol_event; 2562 pring->prt[2].profile = 0; /* Mask 2 */ 2563 /* NameServer Inquiry */ 2564 pring->prt[2].rctl = FC_UNSOL_CTL; 2565 /* NameServer */ 2566 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 2567 pring->prt[2].lpfc_sli_rcv_unsol_event = 2568 lpfc_ct_unsol_event; 2569 pring->prt[3].profile = 0; /* Mask 3 */ 2570 /* NameServer response */ 2571 pring->prt[3].rctl = FC_SOL_CTL; 2572 /* NameServer */ 2573 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 2574 pring->prt[3].lpfc_sli_rcv_unsol_event = 2575 lpfc_ct_unsol_event; 2576 break; 2577 } 2578 totiocb += (pring->numCiocb + pring->numRiocb); 2579 } 2580 if (totiocb > MAX_SLI2_IOCB) { 2581 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 2582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2583 "%d:0462 Too many cmd / rsp ring entries in " 2584 "SLI2 SLIM Data: x%x x%x\n", 2585 phba->brd_no, totiocb, MAX_SLI2_IOCB); 2586 } 2587 if (phba->cfg_multi_ring_support == 2) 2588 lpfc_extra_ring_setup(phba); 2589 2590 return 0; 2591 } 2592 2593 int 2594 lpfc_sli_queue_setup(struct lpfc_hba * phba) 2595 { 2596 struct lpfc_sli *psli; 2597 struct lpfc_sli_ring *pring; 2598 int i; 2599 2600 psli = &phba->sli; 2601 spin_lock_irq(phba->host->host_lock); 2602 INIT_LIST_HEAD(&psli->mboxq); 2603 /* Initialize list headers for txq and txcmplq as double linked lists */ 2604 for (i = 0; i < psli->num_rings; i++) { 2605 pring = &psli->ring[i]; 2606 pring->ringno = i; 2607 pring->next_cmdidx = 0; 2608 pring->local_getidx = 0; 2609 pring->cmdidx = 0; 2610 INIT_LIST_HEAD(&pring->txq); 2611 INIT_LIST_HEAD(&pring->txcmplq); 2612 INIT_LIST_HEAD(&pring->iocb_continueq); 2613 INIT_LIST_HEAD(&pring->postbufq); 2614 } 2615 spin_unlock_irq(phba->host->host_lock); 2616 return (1); 2617 } 2618 2619 int 2620 lpfc_sli_hba_down(struct lpfc_hba * phba) 2621 { 2622 LIST_HEAD(completions); 2623 struct lpfc_sli *psli; 2624 struct lpfc_sli_ring *pring; 2625 LPFC_MBOXQ_t *pmb; 2626 struct lpfc_iocbq *iocb; 2627 IOCB_t *cmd = NULL; 2628 int i; 2629 unsigned long flags = 0; 2630 2631 psli = &phba->sli; 2632 lpfc_hba_down_prep(phba); 2633 2634 spin_lock_irqsave(phba->host->host_lock, flags); 2635 for (i = 0; i < psli->num_rings; i++) { 2636 pring = &psli->ring[i]; 2637 pring->flag |= LPFC_DEFERRED_RING_EVENT; 2638 2639 /* 2640 * Error everything on the txq since these iocbs have not been 2641 * given to the FW yet. 2642 */ 2643 list_splice_init(&pring->txq, &completions); 2644 pring->txq_cnt = 0; 2645 2646 } 2647 spin_unlock_irqrestore(phba->host->host_lock, flags); 2648 2649 while (!list_empty(&completions)) { 2650 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 2651 cmd = &iocb->iocb; 2652 list_del(&iocb->list); 2653 2654 if (iocb->iocb_cmpl) { 2655 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 2656 cmd->un.ulpWord[4] = IOERR_SLI_DOWN; 2657 (iocb->iocb_cmpl) (phba, iocb, iocb); 2658 } else 2659 lpfc_sli_release_iocbq(phba, iocb); 2660 } 2661 2662 /* Return any active mbox cmds */ 2663 del_timer_sync(&psli->mbox_tmo); 2664 spin_lock_irqsave(phba->host->host_lock, flags); 2665 phba->work_hba_events &= ~WORKER_MBOX_TMO; 2666 if (psli->mbox_active) { 2667 pmb = psli->mbox_active; 2668 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 2669 if (pmb->mbox_cmpl) { 2670 spin_unlock_irqrestore(phba->host->host_lock, flags); 2671 pmb->mbox_cmpl(phba,pmb); 2672 spin_lock_irqsave(phba->host->host_lock, flags); 2673 } 2674 } 2675 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2676 psli->mbox_active = NULL; 2677 2678 /* Return any pending mbox cmds */ 2679 while ((pmb = lpfc_mbox_get(phba)) != NULL) { 2680 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 2681 if (pmb->mbox_cmpl) { 2682 spin_unlock_irqrestore(phba->host->host_lock, flags); 2683 pmb->mbox_cmpl(phba,pmb); 2684 spin_lock_irqsave(phba->host->host_lock, flags); 2685 } 2686 } 2687 2688 INIT_LIST_HEAD(&psli->mboxq); 2689 2690 spin_unlock_irqrestore(phba->host->host_lock, flags); 2691 2692 return 1; 2693 } 2694 2695 void 2696 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 2697 { 2698 uint32_t *src = srcp; 2699 uint32_t *dest = destp; 2700 uint32_t ldata; 2701 int i; 2702 2703 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 2704 ldata = *src; 2705 ldata = le32_to_cpu(ldata); 2706 *dest = ldata; 2707 src++; 2708 dest++; 2709 } 2710 } 2711 2712 int 2713 lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring, 2714 struct lpfc_dmabuf * mp) 2715 { 2716 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 2717 later */ 2718 list_add_tail(&mp->list, &pring->postbufq); 2719 2720 pring->postbufq_cnt++; 2721 return 0; 2722 } 2723 2724 2725 struct lpfc_dmabuf * 2726 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2727 dma_addr_t phys) 2728 { 2729 struct lpfc_dmabuf *mp, *next_mp; 2730 struct list_head *slp = &pring->postbufq; 2731 2732 /* Search postbufq, from the begining, looking for a match on phys */ 2733 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 2734 if (mp->phys == phys) { 2735 list_del_init(&mp->list); 2736 pring->postbufq_cnt--; 2737 return mp; 2738 } 2739 } 2740 2741 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2742 "%d:0410 Cannot find virtual addr for mapped buf on " 2743 "ring %d Data x%llx x%p x%p x%x\n", 2744 phba->brd_no, pring->ringno, (unsigned long long)phys, 2745 slp->next, slp->prev, pring->postbufq_cnt); 2746 return NULL; 2747 } 2748 2749 static void 2750 lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 2751 struct lpfc_iocbq * rspiocb) 2752 { 2753 IOCB_t *irsp; 2754 uint16_t abort_iotag, abort_context; 2755 struct lpfc_iocbq *abort_iocb, *rsp_ab_iocb; 2756 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 2757 2758 abort_iocb = NULL; 2759 irsp = &rspiocb->iocb; 2760 2761 spin_lock_irq(phba->host->host_lock); 2762 2763 if (irsp->ulpStatus) { 2764 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 2765 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 2766 2767 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 2768 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 2769 2770 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2771 "%d:0327 Cannot abort els iocb %p" 2772 " with tag %x context %x\n", 2773 phba->brd_no, abort_iocb, 2774 abort_iotag, abort_context); 2775 2776 /* 2777 * make sure we have the right iocbq before taking it 2778 * off the txcmplq and try to call completion routine. 2779 */ 2780 if (abort_iocb && 2781 abort_iocb->iocb.ulpContext == abort_context && 2782 abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) { 2783 list_del(&abort_iocb->list); 2784 pring->txcmplq_cnt--; 2785 2786 rsp_ab_iocb = lpfc_sli_get_iocbq(phba); 2787 if (rsp_ab_iocb == NULL) 2788 lpfc_sli_release_iocbq(phba, abort_iocb); 2789 else { 2790 abort_iocb->iocb_flag &= 2791 ~LPFC_DRIVER_ABORTED; 2792 rsp_ab_iocb->iocb.ulpStatus = 2793 IOSTAT_LOCAL_REJECT; 2794 rsp_ab_iocb->iocb.un.ulpWord[4] = 2795 IOERR_SLI_ABORTED; 2796 spin_unlock_irq(phba->host->host_lock); 2797 (abort_iocb->iocb_cmpl) 2798 (phba, abort_iocb, rsp_ab_iocb); 2799 spin_lock_irq(phba->host->host_lock); 2800 lpfc_sli_release_iocbq(phba, rsp_ab_iocb); 2801 } 2802 } 2803 } 2804 2805 lpfc_sli_release_iocbq(phba, cmdiocb); 2806 spin_unlock_irq(phba->host->host_lock); 2807 return; 2808 } 2809 2810 int 2811 lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba, 2812 struct lpfc_sli_ring * pring, 2813 struct lpfc_iocbq * cmdiocb) 2814 { 2815 struct lpfc_iocbq *abtsiocbp; 2816 IOCB_t *icmd = NULL; 2817 IOCB_t *iabt = NULL; 2818 int retval = IOCB_ERROR; 2819 2820 /* There are certain command types we don't want 2821 * to abort. 2822 */ 2823 icmd = &cmdiocb->iocb; 2824 if ((icmd->ulpCommand == CMD_ABORT_XRI_CN) || 2825 (icmd->ulpCommand == CMD_CLOSE_XRI_CN)) 2826 return 0; 2827 2828 /* If we're unloading, interrupts are disabled so we 2829 * need to cleanup the iocb here. 2830 */ 2831 if (phba->fc_flag & FC_UNLOADING) 2832 goto abort_iotag_exit; 2833 2834 /* issue ABTS for this IOCB based on iotag */ 2835 abtsiocbp = lpfc_sli_get_iocbq(phba); 2836 if (abtsiocbp == NULL) 2837 return 0; 2838 2839 /* This signals the response to set the correct status 2840 * before calling the completion handler. 2841 */ 2842 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 2843 2844 iabt = &abtsiocbp->iocb; 2845 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 2846 iabt->un.acxri.abortContextTag = icmd->ulpContext; 2847 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 2848 iabt->ulpLe = 1; 2849 iabt->ulpClass = icmd->ulpClass; 2850 2851 if (phba->hba_state >= LPFC_LINK_UP) 2852 iabt->ulpCommand = CMD_ABORT_XRI_CN; 2853 else 2854 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 2855 2856 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 2857 2858 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2859 "%d:0339 Abort xri x%x, original iotag x%x, abort " 2860 "cmd iotag x%x\n", 2861 phba->brd_no, iabt->un.acxri.abortContextTag, 2862 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 2863 retval = lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 2864 2865 abort_iotag_exit: 2866 2867 /* If we could not issue an abort dequeue the iocb and handle 2868 * the completion here. 2869 */ 2870 if (retval == IOCB_ERROR) { 2871 list_del(&cmdiocb->list); 2872 pring->txcmplq_cnt--; 2873 2874 if (cmdiocb->iocb_cmpl) { 2875 icmd->ulpStatus = IOSTAT_LOCAL_REJECT; 2876 icmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 2877 spin_unlock_irq(phba->host->host_lock); 2878 (cmdiocb->iocb_cmpl) (phba, cmdiocb, cmdiocb); 2879 spin_lock_irq(phba->host->host_lock); 2880 } else 2881 lpfc_sli_release_iocbq(phba, cmdiocb); 2882 } 2883 2884 return 1; 2885 } 2886 2887 static int 2888 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id, 2889 uint64_t lun_id, uint32_t ctx, 2890 lpfc_ctx_cmd ctx_cmd) 2891 { 2892 struct lpfc_scsi_buf *lpfc_cmd; 2893 struct scsi_cmnd *cmnd; 2894 int rc = 1; 2895 2896 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 2897 return rc; 2898 2899 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 2900 cmnd = lpfc_cmd->pCmd; 2901 2902 if (cmnd == NULL) 2903 return rc; 2904 2905 switch (ctx_cmd) { 2906 case LPFC_CTX_LUN: 2907 if ((cmnd->device->id == tgt_id) && 2908 (cmnd->device->lun == lun_id)) 2909 rc = 0; 2910 break; 2911 case LPFC_CTX_TGT: 2912 if (cmnd->device->id == tgt_id) 2913 rc = 0; 2914 break; 2915 case LPFC_CTX_CTX: 2916 if (iocbq->iocb.ulpContext == ctx) 2917 rc = 0; 2918 break; 2919 case LPFC_CTX_HOST: 2920 rc = 0; 2921 break; 2922 default: 2923 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 2924 __FUNCTION__, ctx_cmd); 2925 break; 2926 } 2927 2928 return rc; 2929 } 2930 2931 int 2932 lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2933 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd) 2934 { 2935 struct lpfc_iocbq *iocbq; 2936 int sum, i; 2937 2938 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 2939 iocbq = phba->sli.iocbq_lookup[i]; 2940 2941 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id, 2942 0, ctx_cmd) == 0) 2943 sum++; 2944 } 2945 2946 return sum; 2947 } 2948 2949 void 2950 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, 2951 struct lpfc_iocbq * rspiocb) 2952 { 2953 unsigned long iflags; 2954 2955 spin_lock_irqsave(phba->host->host_lock, iflags); 2956 lpfc_sli_release_iocbq(phba, cmdiocb); 2957 spin_unlock_irqrestore(phba->host->host_lock, iflags); 2958 return; 2959 } 2960 2961 int 2962 lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2963 uint16_t tgt_id, uint64_t lun_id, uint32_t ctx, 2964 lpfc_ctx_cmd abort_cmd) 2965 { 2966 struct lpfc_iocbq *iocbq; 2967 struct lpfc_iocbq *abtsiocb; 2968 IOCB_t *cmd = NULL; 2969 int errcnt = 0, ret_val = 0; 2970 int i; 2971 2972 for (i = 1; i <= phba->sli.last_iotag; i++) { 2973 iocbq = phba->sli.iocbq_lookup[i]; 2974 2975 if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id, 2976 0, abort_cmd) != 0) 2977 continue; 2978 2979 /* issue ABTS for this IOCB based on iotag */ 2980 abtsiocb = lpfc_sli_get_iocbq(phba); 2981 if (abtsiocb == NULL) { 2982 errcnt++; 2983 continue; 2984 } 2985 2986 cmd = &iocbq->iocb; 2987 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 2988 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 2989 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 2990 abtsiocb->iocb.ulpLe = 1; 2991 abtsiocb->iocb.ulpClass = cmd->ulpClass; 2992 2993 if (phba->hba_state >= LPFC_LINK_UP) 2994 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 2995 else 2996 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 2997 2998 /* Setup callback routine and issue the command. */ 2999 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3000 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 3001 if (ret_val == IOCB_ERROR) { 3002 lpfc_sli_release_iocbq(phba, abtsiocb); 3003 errcnt++; 3004 continue; 3005 } 3006 } 3007 3008 return errcnt; 3009 } 3010 3011 static void 3012 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 3013 struct lpfc_iocbq *cmdiocbq, 3014 struct lpfc_iocbq *rspiocbq) 3015 { 3016 wait_queue_head_t *pdone_q; 3017 unsigned long iflags; 3018 3019 spin_lock_irqsave(phba->host->host_lock, iflags); 3020 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 3021 if (cmdiocbq->context2 && rspiocbq) 3022 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 3023 &rspiocbq->iocb, sizeof(IOCB_t)); 3024 3025 pdone_q = cmdiocbq->context_un.wait_queue; 3026 spin_unlock_irqrestore(phba->host->host_lock, iflags); 3027 if (pdone_q) 3028 wake_up(pdone_q); 3029 return; 3030 } 3031 3032 /* 3033 * Issue the caller's iocb and wait for its completion, but no longer than the 3034 * caller's timeout. Note that iocb_flags is cleared before the 3035 * lpfc_sli_issue_call since the wake routine sets a unique value and by 3036 * definition this is a wait function. 3037 */ 3038 int 3039 lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba, 3040 struct lpfc_sli_ring * pring, 3041 struct lpfc_iocbq * piocb, 3042 struct lpfc_iocbq * prspiocbq, 3043 uint32_t timeout) 3044 { 3045 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 3046 long timeleft, timeout_req = 0; 3047 int retval = IOCB_SUCCESS; 3048 uint32_t creg_val; 3049 3050 /* 3051 * If the caller has provided a response iocbq buffer, then context2 3052 * is NULL or its an error. 3053 */ 3054 if (prspiocbq) { 3055 if (piocb->context2) 3056 return IOCB_ERROR; 3057 piocb->context2 = prspiocbq; 3058 } 3059 3060 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 3061 piocb->context_un.wait_queue = &done_q; 3062 piocb->iocb_flag &= ~LPFC_IO_WAKE; 3063 3064 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 3065 creg_val = readl(phba->HCregaddr); 3066 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 3067 writel(creg_val, phba->HCregaddr); 3068 readl(phba->HCregaddr); /* flush */ 3069 } 3070 3071 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 3072 if (retval == IOCB_SUCCESS) { 3073 timeout_req = timeout * HZ; 3074 spin_unlock_irq(phba->host->host_lock); 3075 timeleft = wait_event_timeout(done_q, 3076 piocb->iocb_flag & LPFC_IO_WAKE, 3077 timeout_req); 3078 spin_lock_irq(phba->host->host_lock); 3079 3080 if (piocb->iocb_flag & LPFC_IO_WAKE) { 3081 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3082 "%d:0331 IOCB wake signaled\n", 3083 phba->brd_no); 3084 } else if (timeleft == 0) { 3085 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3086 "%d:0338 IOCB wait timeout error - no " 3087 "wake response Data x%x\n", 3088 phba->brd_no, timeout); 3089 retval = IOCB_TIMEDOUT; 3090 } else { 3091 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3092 "%d:0330 IOCB wake NOT set, " 3093 "Data x%x x%lx\n", phba->brd_no, 3094 timeout, (timeleft / jiffies)); 3095 retval = IOCB_TIMEDOUT; 3096 } 3097 } else { 3098 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3099 "%d:0332 IOCB wait issue failed, Data x%x\n", 3100 phba->brd_no, retval); 3101 retval = IOCB_ERROR; 3102 } 3103 3104 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 3105 creg_val = readl(phba->HCregaddr); 3106 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 3107 writel(creg_val, phba->HCregaddr); 3108 readl(phba->HCregaddr); /* flush */ 3109 } 3110 3111 if (prspiocbq) 3112 piocb->context2 = NULL; 3113 3114 piocb->context_un.wait_queue = NULL; 3115 piocb->iocb_cmpl = NULL; 3116 return retval; 3117 } 3118 3119 int 3120 lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq, 3121 uint32_t timeout) 3122 { 3123 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 3124 int retval; 3125 3126 /* The caller must leave context1 empty. */ 3127 if (pmboxq->context1 != 0) { 3128 return (MBX_NOT_FINISHED); 3129 } 3130 3131 /* setup wake call as IOCB callback */ 3132 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 3133 /* setup context field to pass wait_queue pointer to wake function */ 3134 pmboxq->context1 = &done_q; 3135 3136 /* now issue the command */ 3137 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3138 3139 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 3140 wait_event_interruptible_timeout(done_q, 3141 pmboxq->mbox_flag & LPFC_MBX_WAKE, 3142 timeout * HZ); 3143 3144 pmboxq->context1 = NULL; 3145 /* 3146 * if LPFC_MBX_WAKE flag is set the mailbox is completed 3147 * else do not free the resources. 3148 */ 3149 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) 3150 retval = MBX_SUCCESS; 3151 else 3152 retval = MBX_TIMEOUT; 3153 } 3154 3155 return retval; 3156 } 3157 3158 int 3159 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 3160 { 3161 int i = 0; 3162 3163 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) { 3164 if (i++ > LPFC_MBOX_TMO * 1000) 3165 return 1; 3166 3167 if (lpfc_sli_handle_mb_event(phba) == 0) 3168 i = 0; 3169 3170 msleep(1); 3171 } 3172 3173 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 3174 } 3175 3176 irqreturn_t 3177 lpfc_intr_handler(int irq, void *dev_id) 3178 { 3179 struct lpfc_hba *phba; 3180 uint32_t ha_copy; 3181 uint32_t work_ha_copy; 3182 unsigned long status; 3183 int i; 3184 uint32_t control; 3185 3186 /* 3187 * Get the driver's phba structure from the dev_id and 3188 * assume the HBA is not interrupting. 3189 */ 3190 phba = (struct lpfc_hba *) dev_id; 3191 3192 if (unlikely(!phba)) 3193 return IRQ_NONE; 3194 3195 /* If the pci channel is offline, ignore all the interrupts. */ 3196 if (unlikely(pci_channel_offline(phba->pcidev))) 3197 return IRQ_NONE; 3198 3199 phba->sli.slistat.sli_intr++; 3200 3201 /* 3202 * Call the HBA to see if it is interrupting. If not, don't claim 3203 * the interrupt 3204 */ 3205 3206 /* Ignore all interrupts during initialization. */ 3207 if (unlikely(phba->hba_state < LPFC_LINK_DOWN)) 3208 return IRQ_NONE; 3209 3210 /* 3211 * Read host attention register to determine interrupt source 3212 * Clear Attention Sources, except Error Attention (to 3213 * preserve status) and Link Attention 3214 */ 3215 spin_lock(phba->host->host_lock); 3216 ha_copy = readl(phba->HAregaddr); 3217 /* If somebody is waiting to handle an eratt don't process it 3218 * here. The brdkill function will do this. 3219 */ 3220 if (phba->fc_flag & FC_IGNORE_ERATT) 3221 ha_copy &= ~HA_ERATT; 3222 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 3223 readl(phba->HAregaddr); /* flush */ 3224 spin_unlock(phba->host->host_lock); 3225 3226 if (unlikely(!ha_copy)) 3227 return IRQ_NONE; 3228 3229 work_ha_copy = ha_copy & phba->work_ha_mask; 3230 3231 if (unlikely(work_ha_copy)) { 3232 if (work_ha_copy & HA_LATT) { 3233 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 3234 /* 3235 * Turn off Link Attention interrupts 3236 * until CLEAR_LA done 3237 */ 3238 spin_lock(phba->host->host_lock); 3239 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 3240 control = readl(phba->HCregaddr); 3241 control &= ~HC_LAINT_ENA; 3242 writel(control, phba->HCregaddr); 3243 readl(phba->HCregaddr); /* flush */ 3244 spin_unlock(phba->host->host_lock); 3245 } 3246 else 3247 work_ha_copy &= ~HA_LATT; 3248 } 3249 3250 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) { 3251 for (i = 0; i < phba->sli.num_rings; i++) { 3252 if (work_ha_copy & (HA_RXATT << (4*i))) { 3253 /* 3254 * Turn off Slow Rings interrupts 3255 */ 3256 spin_lock(phba->host->host_lock); 3257 control = readl(phba->HCregaddr); 3258 control &= ~(HC_R0INT_ENA << i); 3259 writel(control, phba->HCregaddr); 3260 readl(phba->HCregaddr); /* flush */ 3261 spin_unlock(phba->host->host_lock); 3262 } 3263 } 3264 } 3265 3266 if (work_ha_copy & HA_ERATT) { 3267 phba->hba_state = LPFC_HBA_ERROR; 3268 /* 3269 * There was a link/board error. Read the 3270 * status register to retrieve the error event 3271 * and process it. 3272 */ 3273 phba->sli.slistat.err_attn_event++; 3274 /* Save status info */ 3275 phba->work_hs = readl(phba->HSregaddr); 3276 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 3277 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 3278 3279 /* Clear Chip error bit */ 3280 writel(HA_ERATT, phba->HAregaddr); 3281 readl(phba->HAregaddr); /* flush */ 3282 phba->stopped = 1; 3283 } 3284 3285 spin_lock(phba->host->host_lock); 3286 phba->work_ha |= work_ha_copy; 3287 if (phba->work_wait) 3288 wake_up(phba->work_wait); 3289 spin_unlock(phba->host->host_lock); 3290 } 3291 3292 ha_copy &= ~(phba->work_ha_mask); 3293 3294 /* 3295 * Process all events on FCP ring. Take the optimized path for 3296 * FCP IO. Any other IO is slow path and is handled by 3297 * the worker thread. 3298 */ 3299 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 3300 status >>= (4*LPFC_FCP_RING); 3301 if (status & HA_RXATT) 3302 lpfc_sli_handle_fast_ring_event(phba, 3303 &phba->sli.ring[LPFC_FCP_RING], 3304 status); 3305 3306 if (phba->cfg_multi_ring_support == 2) { 3307 /* 3308 * Process all events on extra ring. Take the optimized path 3309 * for extra ring IO. Any other IO is slow path and is handled 3310 * by the worker thread. 3311 */ 3312 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 3313 status >>= (4*LPFC_EXTRA_RING); 3314 if (status & HA_RXATT) { 3315 lpfc_sli_handle_fast_ring_event(phba, 3316 &phba->sli.ring[LPFC_EXTRA_RING], 3317 status); 3318 } 3319 } 3320 return IRQ_HANDLED; 3321 3322 } /* lpfc_intr_handler */ 3323