1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2007 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_cmnd.h> 29 #include <scsi/scsi_device.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_transport_fc.h> 32 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_disc.h" 36 #include "lpfc_scsi.h" 37 #include "lpfc.h" 38 #include "lpfc_crtn.h" 39 #include "lpfc_logmsg.h" 40 #include "lpfc_compat.h" 41 #include "lpfc_debugfs.h" 42 43 /* 44 * Define macro to log: Mailbox command x%x cannot issue Data 45 * This allows multiple uses of lpfc_msgBlk0311 46 * w/o perturbing log msg utility. 47 */ 48 #define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \ 49 lpfc_printf_log(phba, \ 50 KERN_INFO, \ 51 LOG_MBOX | LOG_SLI, \ 52 "(%d):0311 Mailbox command x%x cannot " \ 53 "issue Data: x%x x%x x%x\n", \ 54 pmbox->vport ? pmbox->vport->vpi : 0, \ 55 pmbox->mb.mbxCommand, \ 56 phba->pport->port_state, \ 57 psli->sli_flag, \ 58 flag) 59 60 61 /* There are only four IOCB completion types. */ 62 typedef enum _lpfc_iocb_type { 63 LPFC_UNKNOWN_IOCB, 64 LPFC_UNSOL_IOCB, 65 LPFC_SOL_IOCB, 66 LPFC_ABORT_IOCB 67 } lpfc_iocb_type; 68 69 /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer 70 * to the start of the ring, and the slot number of the 71 * desired iocb entry, calc a pointer to that entry. 72 */ 73 static inline IOCB_t * 74 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 75 { 76 return (IOCB_t *) (((char *) pring->cmdringaddr) + 77 pring->cmdidx * phba->iocb_cmd_size); 78 } 79 80 static inline IOCB_t * 81 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 82 { 83 return (IOCB_t *) (((char *) pring->rspringaddr) + 84 pring->rspidx * phba->iocb_rsp_size); 85 } 86 87 static struct lpfc_iocbq * 88 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 89 { 90 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 91 struct lpfc_iocbq * iocbq = NULL; 92 93 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 94 return iocbq; 95 } 96 97 struct lpfc_iocbq * 98 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 99 { 100 struct lpfc_iocbq * iocbq = NULL; 101 unsigned long iflags; 102 103 spin_lock_irqsave(&phba->hbalock, iflags); 104 iocbq = __lpfc_sli_get_iocbq(phba); 105 spin_unlock_irqrestore(&phba->hbalock, iflags); 106 return iocbq; 107 } 108 109 static void 110 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 111 { 112 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 113 114 /* 115 * Clean all volatile data fields, preserve iotag and node struct. 116 */ 117 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 118 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 119 } 120 121 void 122 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 123 { 124 unsigned long iflags; 125 126 /* 127 * Clean all volatile data fields, preserve iotag and node struct. 128 */ 129 spin_lock_irqsave(&phba->hbalock, iflags); 130 __lpfc_sli_release_iocbq(phba, iocbq); 131 spin_unlock_irqrestore(&phba->hbalock, iflags); 132 } 133 134 /* 135 * Translate the iocb command to an iocb command type used to decide the final 136 * disposition of each completed IOCB. 137 */ 138 static lpfc_iocb_type 139 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 140 { 141 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 142 143 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 144 return 0; 145 146 switch (iocb_cmnd) { 147 case CMD_XMIT_SEQUENCE_CR: 148 case CMD_XMIT_SEQUENCE_CX: 149 case CMD_XMIT_BCAST_CN: 150 case CMD_XMIT_BCAST_CX: 151 case CMD_ELS_REQUEST_CR: 152 case CMD_ELS_REQUEST_CX: 153 case CMD_CREATE_XRI_CR: 154 case CMD_CREATE_XRI_CX: 155 case CMD_GET_RPI_CN: 156 case CMD_XMIT_ELS_RSP_CX: 157 case CMD_GET_RPI_CR: 158 case CMD_FCP_IWRITE_CR: 159 case CMD_FCP_IWRITE_CX: 160 case CMD_FCP_IREAD_CR: 161 case CMD_FCP_IREAD_CX: 162 case CMD_FCP_ICMND_CR: 163 case CMD_FCP_ICMND_CX: 164 case CMD_FCP_TSEND_CX: 165 case CMD_FCP_TRSP_CX: 166 case CMD_FCP_TRECEIVE_CX: 167 case CMD_FCP_AUTO_TRSP_CX: 168 case CMD_ADAPTER_MSG: 169 case CMD_ADAPTER_DUMP: 170 case CMD_XMIT_SEQUENCE64_CR: 171 case CMD_XMIT_SEQUENCE64_CX: 172 case CMD_XMIT_BCAST64_CN: 173 case CMD_XMIT_BCAST64_CX: 174 case CMD_ELS_REQUEST64_CR: 175 case CMD_ELS_REQUEST64_CX: 176 case CMD_FCP_IWRITE64_CR: 177 case CMD_FCP_IWRITE64_CX: 178 case CMD_FCP_IREAD64_CR: 179 case CMD_FCP_IREAD64_CX: 180 case CMD_FCP_ICMND64_CR: 181 case CMD_FCP_ICMND64_CX: 182 case CMD_FCP_TSEND64_CX: 183 case CMD_FCP_TRSP64_CX: 184 case CMD_FCP_TRECEIVE64_CX: 185 case CMD_GEN_REQUEST64_CR: 186 case CMD_GEN_REQUEST64_CX: 187 case CMD_XMIT_ELS_RSP64_CX: 188 type = LPFC_SOL_IOCB; 189 break; 190 case CMD_ABORT_XRI_CN: 191 case CMD_ABORT_XRI_CX: 192 case CMD_CLOSE_XRI_CN: 193 case CMD_CLOSE_XRI_CX: 194 case CMD_XRI_ABORTED_CX: 195 case CMD_ABORT_MXRI64_CN: 196 type = LPFC_ABORT_IOCB; 197 break; 198 case CMD_RCV_SEQUENCE_CX: 199 case CMD_RCV_ELS_REQ_CX: 200 case CMD_RCV_SEQUENCE64_CX: 201 case CMD_RCV_ELS_REQ64_CX: 202 case CMD_ASYNC_STATUS: 203 case CMD_IOCB_RCV_SEQ64_CX: 204 case CMD_IOCB_RCV_ELS64_CX: 205 case CMD_IOCB_RCV_CONT64_CX: 206 type = LPFC_UNSOL_IOCB; 207 break; 208 default: 209 type = LPFC_UNKNOWN_IOCB; 210 break; 211 } 212 213 return type; 214 } 215 216 static int 217 lpfc_sli_ring_map(struct lpfc_hba *phba) 218 { 219 struct lpfc_sli *psli = &phba->sli; 220 LPFC_MBOXQ_t *pmb; 221 MAILBOX_t *pmbox; 222 int i, rc, ret = 0; 223 224 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 225 if (!pmb) 226 return -ENOMEM; 227 pmbox = &pmb->mb; 228 phba->link_state = LPFC_INIT_MBX_CMDS; 229 for (i = 0; i < psli->num_rings; i++) { 230 lpfc_config_ring(phba, i, pmb); 231 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 232 if (rc != MBX_SUCCESS) { 233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 234 "0446 Adapter failed to init (%d), " 235 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 236 "ring %d\n", 237 rc, pmbox->mbxCommand, 238 pmbox->mbxStatus, i); 239 phba->link_state = LPFC_HBA_ERROR; 240 ret = -ENXIO; 241 break; 242 } 243 } 244 mempool_free(pmb, phba->mbox_mem_pool); 245 return ret; 246 } 247 248 static int 249 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 250 struct lpfc_iocbq *piocb) 251 { 252 list_add_tail(&piocb->list, &pring->txcmplq); 253 pring->txcmplq_cnt++; 254 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 255 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 256 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 257 if (!piocb->vport) 258 BUG(); 259 else 260 mod_timer(&piocb->vport->els_tmofunc, 261 jiffies + HZ * (phba->fc_ratov << 1)); 262 } 263 264 265 return 0; 266 } 267 268 static struct lpfc_iocbq * 269 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 270 { 271 struct lpfc_iocbq *cmd_iocb; 272 273 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 274 if (cmd_iocb != NULL) 275 pring->txq_cnt--; 276 return cmd_iocb; 277 } 278 279 static IOCB_t * 280 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 281 { 282 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 283 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 284 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 285 uint32_t max_cmd_idx = pring->numCiocb; 286 287 if ((pring->next_cmdidx == pring->cmdidx) && 288 (++pring->next_cmdidx >= max_cmd_idx)) 289 pring->next_cmdidx = 0; 290 291 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 292 293 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 294 295 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 296 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 297 "0315 Ring %d issue: portCmdGet %d " 298 "is bigger then cmd ring %d\n", 299 pring->ringno, 300 pring->local_getidx, max_cmd_idx); 301 302 phba->link_state = LPFC_HBA_ERROR; 303 /* 304 * All error attention handlers are posted to 305 * worker thread 306 */ 307 phba->work_ha |= HA_ERATT; 308 phba->work_hs = HS_FFER3; 309 310 /* hbalock should already be held */ 311 if (phba->work_wait) 312 lpfc_worker_wake_up(phba); 313 314 return NULL; 315 } 316 317 if (pring->local_getidx == pring->next_cmdidx) 318 return NULL; 319 } 320 321 return lpfc_cmd_iocb(phba, pring); 322 } 323 324 uint16_t 325 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 326 { 327 struct lpfc_iocbq **new_arr; 328 struct lpfc_iocbq **old_arr; 329 size_t new_len; 330 struct lpfc_sli *psli = &phba->sli; 331 uint16_t iotag; 332 333 spin_lock_irq(&phba->hbalock); 334 iotag = psli->last_iotag; 335 if(++iotag < psli->iocbq_lookup_len) { 336 psli->last_iotag = iotag; 337 psli->iocbq_lookup[iotag] = iocbq; 338 spin_unlock_irq(&phba->hbalock); 339 iocbq->iotag = iotag; 340 return iotag; 341 } else if (psli->iocbq_lookup_len < (0xffff 342 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 343 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 344 spin_unlock_irq(&phba->hbalock); 345 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 346 GFP_KERNEL); 347 if (new_arr) { 348 spin_lock_irq(&phba->hbalock); 349 old_arr = psli->iocbq_lookup; 350 if (new_len <= psli->iocbq_lookup_len) { 351 /* highly unprobable case */ 352 kfree(new_arr); 353 iotag = psli->last_iotag; 354 if(++iotag < psli->iocbq_lookup_len) { 355 psli->last_iotag = iotag; 356 psli->iocbq_lookup[iotag] = iocbq; 357 spin_unlock_irq(&phba->hbalock); 358 iocbq->iotag = iotag; 359 return iotag; 360 } 361 spin_unlock_irq(&phba->hbalock); 362 return 0; 363 } 364 if (psli->iocbq_lookup) 365 memcpy(new_arr, old_arr, 366 ((psli->last_iotag + 1) * 367 sizeof (struct lpfc_iocbq *))); 368 psli->iocbq_lookup = new_arr; 369 psli->iocbq_lookup_len = new_len; 370 psli->last_iotag = iotag; 371 psli->iocbq_lookup[iotag] = iocbq; 372 spin_unlock_irq(&phba->hbalock); 373 iocbq->iotag = iotag; 374 kfree(old_arr); 375 return iotag; 376 } 377 } else 378 spin_unlock_irq(&phba->hbalock); 379 380 lpfc_printf_log(phba, KERN_ERR,LOG_SLI, 381 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 382 psli->last_iotag); 383 384 return 0; 385 } 386 387 static void 388 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 389 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 390 { 391 /* 392 * Set up an iotag 393 */ 394 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 395 396 if (pring->ringno == LPFC_ELS_RING) { 397 lpfc_debugfs_slow_ring_trc(phba, 398 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 399 *(((uint32_t *) &nextiocb->iocb) + 4), 400 *(((uint32_t *) &nextiocb->iocb) + 6), 401 *(((uint32_t *) &nextiocb->iocb) + 7)); 402 } 403 404 /* 405 * Issue iocb command to adapter 406 */ 407 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 408 wmb(); 409 pring->stats.iocb_cmd++; 410 411 /* 412 * If there is no completion routine to call, we can release the 413 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 414 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 415 */ 416 if (nextiocb->iocb_cmpl) 417 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 418 else 419 __lpfc_sli_release_iocbq(phba, nextiocb); 420 421 /* 422 * Let the HBA know what IOCB slot will be the next one the 423 * driver will put a command into. 424 */ 425 pring->cmdidx = pring->next_cmdidx; 426 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 427 } 428 429 static void 430 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 431 { 432 int ringno = pring->ringno; 433 434 pring->flag |= LPFC_CALL_RING_AVAILABLE; 435 436 wmb(); 437 438 /* 439 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 440 * The HBA will tell us when an IOCB entry is available. 441 */ 442 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 443 readl(phba->CAregaddr); /* flush */ 444 445 pring->stats.iocb_cmd_full++; 446 } 447 448 static void 449 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 450 { 451 int ringno = pring->ringno; 452 453 /* 454 * Tell the HBA that there is work to do in this ring. 455 */ 456 wmb(); 457 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 458 readl(phba->CAregaddr); /* flush */ 459 } 460 461 static void 462 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 463 { 464 IOCB_t *iocb; 465 struct lpfc_iocbq *nextiocb; 466 467 /* 468 * Check to see if: 469 * (a) there is anything on the txq to send 470 * (b) link is up 471 * (c) link attention events can be processed (fcp ring only) 472 * (d) IOCB processing is not blocked by the outstanding mbox command. 473 */ 474 if (pring->txq_cnt && 475 lpfc_is_link_up(phba) && 476 (pring->ringno != phba->sli.fcp_ring || 477 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 478 479 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 480 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 481 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 482 483 if (iocb) 484 lpfc_sli_update_ring(phba, pring); 485 else 486 lpfc_sli_update_full_ring(phba, pring); 487 } 488 489 return; 490 } 491 492 static struct lpfc_hbq_entry * 493 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 494 { 495 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 496 497 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 498 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 499 hbqp->next_hbqPutIdx = 0; 500 501 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 502 uint32_t raw_index = phba->hbq_get[hbqno]; 503 uint32_t getidx = le32_to_cpu(raw_index); 504 505 hbqp->local_hbqGetIdx = getidx; 506 507 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 508 lpfc_printf_log(phba, KERN_ERR, 509 LOG_SLI | LOG_VPORT, 510 "1802 HBQ %d: local_hbqGetIdx " 511 "%u is > than hbqp->entry_count %u\n", 512 hbqno, hbqp->local_hbqGetIdx, 513 hbqp->entry_count); 514 515 phba->link_state = LPFC_HBA_ERROR; 516 return NULL; 517 } 518 519 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 520 return NULL; 521 } 522 523 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 524 hbqp->hbqPutIdx; 525 } 526 527 void 528 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 529 { 530 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 531 struct hbq_dmabuf *hbq_buf; 532 int i, hbq_count; 533 534 hbq_count = lpfc_sli_hbq_count(); 535 /* Return all memory used by all HBQs */ 536 for (i = 0; i < hbq_count; ++i) { 537 list_for_each_entry_safe(dmabuf, next_dmabuf, 538 &phba->hbqs[i].hbq_buffer_list, list) { 539 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 540 list_del(&hbq_buf->dbuf.list); 541 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 542 } 543 phba->hbqs[i].buffer_count = 0; 544 } 545 } 546 547 static struct lpfc_hbq_entry * 548 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 549 struct hbq_dmabuf *hbq_buf) 550 { 551 struct lpfc_hbq_entry *hbqe; 552 dma_addr_t physaddr = hbq_buf->dbuf.phys; 553 554 /* Get next HBQ entry slot to use */ 555 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 556 if (hbqe) { 557 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 558 559 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 560 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 561 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 562 hbqe->bde.tus.f.bdeFlags = 0; 563 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 564 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 565 /* Sync SLIM */ 566 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 567 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 568 /* flush */ 569 readl(phba->hbq_put + hbqno); 570 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 571 } 572 return hbqe; 573 } 574 575 static struct lpfc_hbq_init lpfc_els_hbq = { 576 .rn = 1, 577 .entry_count = 200, 578 .mask_count = 0, 579 .profile = 0, 580 .ring_mask = (1 << LPFC_ELS_RING), 581 .buffer_count = 0, 582 .init_count = 20, 583 .add_count = 5, 584 }; 585 586 static struct lpfc_hbq_init lpfc_extra_hbq = { 587 .rn = 1, 588 .entry_count = 200, 589 .mask_count = 0, 590 .profile = 0, 591 .ring_mask = (1 << LPFC_EXTRA_RING), 592 .buffer_count = 0, 593 .init_count = 0, 594 .add_count = 5, 595 }; 596 597 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 598 &lpfc_els_hbq, 599 &lpfc_extra_hbq, 600 }; 601 602 static int 603 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 604 { 605 uint32_t i, start, end; 606 struct hbq_dmabuf *hbq_buffer; 607 608 if (!phba->hbqs[hbqno].hbq_alloc_buffer) { 609 return 0; 610 } 611 612 start = phba->hbqs[hbqno].buffer_count; 613 end = count + start; 614 if (end > lpfc_hbq_defs[hbqno]->entry_count) { 615 end = lpfc_hbq_defs[hbqno]->entry_count; 616 } 617 618 /* Populate HBQ entries */ 619 for (i = start; i < end; i++) { 620 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 621 if (!hbq_buffer) 622 return 1; 623 hbq_buffer->tag = (i | (hbqno << 16)); 624 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 625 phba->hbqs[hbqno].buffer_count++; 626 else 627 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 628 } 629 return 0; 630 } 631 632 int 633 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 634 { 635 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 636 lpfc_hbq_defs[qno]->add_count)); 637 } 638 639 static int 640 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 641 { 642 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 643 lpfc_hbq_defs[qno]->init_count)); 644 } 645 646 static struct hbq_dmabuf * 647 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 648 { 649 struct lpfc_dmabuf *d_buf; 650 struct hbq_dmabuf *hbq_buf; 651 uint32_t hbqno; 652 653 hbqno = tag >> 16; 654 if (hbqno >= LPFC_MAX_HBQS) 655 return NULL; 656 657 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 658 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 659 if (hbq_buf->tag == tag) { 660 return hbq_buf; 661 } 662 } 663 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 664 "1803 Bad hbq tag. Data: x%x x%x\n", 665 tag, phba->hbqs[tag >> 16].buffer_count); 666 return NULL; 667 } 668 669 void 670 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 671 { 672 uint32_t hbqno; 673 674 if (hbq_buffer) { 675 hbqno = hbq_buffer->tag >> 16; 676 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 677 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 678 } 679 } 680 } 681 682 static int 683 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 684 { 685 uint8_t ret; 686 687 switch (mbxCommand) { 688 case MBX_LOAD_SM: 689 case MBX_READ_NV: 690 case MBX_WRITE_NV: 691 case MBX_WRITE_VPARMS: 692 case MBX_RUN_BIU_DIAG: 693 case MBX_INIT_LINK: 694 case MBX_DOWN_LINK: 695 case MBX_CONFIG_LINK: 696 case MBX_CONFIG_RING: 697 case MBX_RESET_RING: 698 case MBX_READ_CONFIG: 699 case MBX_READ_RCONFIG: 700 case MBX_READ_SPARM: 701 case MBX_READ_STATUS: 702 case MBX_READ_RPI: 703 case MBX_READ_XRI: 704 case MBX_READ_REV: 705 case MBX_READ_LNK_STAT: 706 case MBX_REG_LOGIN: 707 case MBX_UNREG_LOGIN: 708 case MBX_READ_LA: 709 case MBX_CLEAR_LA: 710 case MBX_DUMP_MEMORY: 711 case MBX_DUMP_CONTEXT: 712 case MBX_RUN_DIAGS: 713 case MBX_RESTART: 714 case MBX_UPDATE_CFG: 715 case MBX_DOWN_LOAD: 716 case MBX_DEL_LD_ENTRY: 717 case MBX_RUN_PROGRAM: 718 case MBX_SET_MASK: 719 case MBX_SET_VARIABLE: 720 case MBX_UNREG_D_ID: 721 case MBX_KILL_BOARD: 722 case MBX_CONFIG_FARP: 723 case MBX_BEACON: 724 case MBX_LOAD_AREA: 725 case MBX_RUN_BIU_DIAG64: 726 case MBX_CONFIG_PORT: 727 case MBX_READ_SPARM64: 728 case MBX_READ_RPI64: 729 case MBX_REG_LOGIN64: 730 case MBX_READ_LA64: 731 case MBX_WRITE_WWN: 732 case MBX_SET_DEBUG: 733 case MBX_LOAD_EXP_ROM: 734 case MBX_ASYNCEVT_ENABLE: 735 case MBX_REG_VPI: 736 case MBX_UNREG_VPI: 737 case MBX_HEARTBEAT: 738 ret = mbxCommand; 739 break; 740 default: 741 ret = MBX_SHUTDOWN; 742 break; 743 } 744 return ret; 745 } 746 static void 747 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 748 { 749 wait_queue_head_t *pdone_q; 750 unsigned long drvr_flag; 751 752 /* 753 * If pdone_q is empty, the driver thread gave up waiting and 754 * continued running. 755 */ 756 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 757 spin_lock_irqsave(&phba->hbalock, drvr_flag); 758 pdone_q = (wait_queue_head_t *) pmboxq->context1; 759 if (pdone_q) 760 wake_up_interruptible(pdone_q); 761 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 762 return; 763 } 764 765 void 766 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 767 { 768 struct lpfc_dmabuf *mp; 769 uint16_t rpi; 770 int rc; 771 772 mp = (struct lpfc_dmabuf *) (pmb->context1); 773 774 if (mp) { 775 lpfc_mbuf_free(phba, mp->virt, mp->phys); 776 kfree(mp); 777 } 778 779 /* 780 * If a REG_LOGIN succeeded after node is destroyed or node 781 * is in re-discovery driver need to cleanup the RPI. 782 */ 783 if (!(phba->pport->load_flag & FC_UNLOADING) && 784 pmb->mb.mbxCommand == MBX_REG_LOGIN64 && 785 !pmb->mb.mbxStatus) { 786 787 rpi = pmb->mb.un.varWords[0]; 788 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); 789 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 790 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 791 if (rc != MBX_NOT_FINISHED) 792 return; 793 } 794 795 mempool_free(pmb, phba->mbox_mem_pool); 796 return; 797 } 798 799 int 800 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 801 { 802 MAILBOX_t *pmbox; 803 LPFC_MBOXQ_t *pmb; 804 int rc; 805 LIST_HEAD(cmplq); 806 807 phba->sli.slistat.mbox_event++; 808 809 /* Get all completed mailboxe buffers into the cmplq */ 810 spin_lock_irq(&phba->hbalock); 811 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 812 spin_unlock_irq(&phba->hbalock); 813 814 /* Get a Mailbox buffer to setup mailbox commands for callback */ 815 do { 816 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 817 if (pmb == NULL) 818 break; 819 820 pmbox = &pmb->mb; 821 822 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 823 if (pmb->vport) { 824 lpfc_debugfs_disc_trc(pmb->vport, 825 LPFC_DISC_TRC_MBOX_VPORT, 826 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 827 (uint32_t)pmbox->mbxCommand, 828 pmbox->un.varWords[0], 829 pmbox->un.varWords[1]); 830 } 831 else { 832 lpfc_debugfs_disc_trc(phba->pport, 833 LPFC_DISC_TRC_MBOX, 834 "MBOX cmpl: cmd:x%x mb:x%x x%x", 835 (uint32_t)pmbox->mbxCommand, 836 pmbox->un.varWords[0], 837 pmbox->un.varWords[1]); 838 } 839 } 840 841 /* 842 * It is a fatal error if unknown mbox command completion. 843 */ 844 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 845 MBX_SHUTDOWN) { 846 /* Unknow mailbox command compl */ 847 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 848 "(%d):0323 Unknown Mailbox command " 849 "%x Cmpl\n", 850 pmb->vport ? pmb->vport->vpi : 0, 851 pmbox->mbxCommand); 852 phba->link_state = LPFC_HBA_ERROR; 853 phba->work_hs = HS_FFER3; 854 lpfc_handle_eratt(phba); 855 continue; 856 } 857 858 if (pmbox->mbxStatus) { 859 phba->sli.slistat.mbox_stat_err++; 860 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 861 /* Mbox cmd cmpl error - RETRYing */ 862 lpfc_printf_log(phba, KERN_INFO, 863 LOG_MBOX | LOG_SLI, 864 "(%d):0305 Mbox cmd cmpl " 865 "error - RETRYing Data: x%x " 866 "x%x x%x x%x\n", 867 pmb->vport ? pmb->vport->vpi :0, 868 pmbox->mbxCommand, 869 pmbox->mbxStatus, 870 pmbox->un.varWords[0], 871 pmb->vport->port_state); 872 pmbox->mbxStatus = 0; 873 pmbox->mbxOwner = OWN_HOST; 874 spin_lock_irq(&phba->hbalock); 875 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 876 spin_unlock_irq(&phba->hbalock); 877 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 878 if (rc == MBX_SUCCESS) 879 continue; 880 } 881 } 882 883 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 884 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 885 "(%d):0307 Mailbox cmd x%x Cmpl x%p " 886 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 887 pmb->vport ? pmb->vport->vpi : 0, 888 pmbox->mbxCommand, 889 pmb->mbox_cmpl, 890 *((uint32_t *) pmbox), 891 pmbox->un.varWords[0], 892 pmbox->un.varWords[1], 893 pmbox->un.varWords[2], 894 pmbox->un.varWords[3], 895 pmbox->un.varWords[4], 896 pmbox->un.varWords[5], 897 pmbox->un.varWords[6], 898 pmbox->un.varWords[7]); 899 900 if (pmb->mbox_cmpl) 901 pmb->mbox_cmpl(phba,pmb); 902 } while (1); 903 return 0; 904 } 905 906 static struct lpfc_dmabuf * 907 lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) 908 { 909 struct hbq_dmabuf *hbq_entry, *new_hbq_entry; 910 uint32_t hbqno; 911 void *virt; /* virtual address ptr */ 912 dma_addr_t phys; /* mapped address */ 913 914 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 915 if (hbq_entry == NULL) 916 return NULL; 917 list_del(&hbq_entry->dbuf.list); 918 919 hbqno = tag >> 16; 920 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 921 if (new_hbq_entry == NULL) 922 return &hbq_entry->dbuf; 923 new_hbq_entry->tag = -1; 924 phys = new_hbq_entry->dbuf.phys; 925 virt = new_hbq_entry->dbuf.virt; 926 new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys; 927 new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt; 928 hbq_entry->dbuf.phys = phys; 929 hbq_entry->dbuf.virt = virt; 930 lpfc_sli_free_hbq(phba, hbq_entry); 931 return &new_hbq_entry->dbuf; 932 } 933 934 static struct lpfc_dmabuf * 935 lpfc_sli_get_buff(struct lpfc_hba *phba, 936 struct lpfc_sli_ring *pring, 937 uint32_t tag) 938 { 939 if (tag & QUE_BUFTAG_BIT) 940 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 941 else 942 return lpfc_sli_replace_hbqbuff(phba, tag); 943 } 944 945 static int 946 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 947 struct lpfc_iocbq *saveq) 948 { 949 IOCB_t * irsp; 950 WORD5 * w5p; 951 uint32_t Rctl, Type; 952 uint32_t match, i; 953 struct lpfc_iocbq *iocbq; 954 955 match = 0; 956 irsp = &(saveq->iocb); 957 958 if (irsp->ulpStatus == IOSTAT_NEED_BUFFER) 959 return 1; 960 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 961 if (pring->lpfc_sli_rcv_async_status) 962 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 963 else 964 lpfc_printf_log(phba, 965 KERN_WARNING, 966 LOG_SLI, 967 "0316 Ring %d handler: unexpected " 968 "ASYNC_STATUS iocb received evt_code " 969 "0x%x\n", 970 pring->ringno, 971 irsp->un.asyncstat.evt_code); 972 return 1; 973 } 974 975 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 976 if (irsp->ulpBdeCount != 0) { 977 saveq->context2 = lpfc_sli_get_buff(phba, pring, 978 irsp->un.ulpWord[3]); 979 if (!saveq->context2) 980 lpfc_printf_log(phba, 981 KERN_ERR, 982 LOG_SLI, 983 "0341 Ring %d Cannot find buffer for " 984 "an unsolicited iocb. tag 0x%x\n", 985 pring->ringno, 986 irsp->un.ulpWord[3]); 987 } 988 if (irsp->ulpBdeCount == 2) { 989 saveq->context3 = lpfc_sli_get_buff(phba, pring, 990 irsp->unsli3.sli3Words[7]); 991 if (!saveq->context3) 992 lpfc_printf_log(phba, 993 KERN_ERR, 994 LOG_SLI, 995 "0342 Ring %d Cannot find buffer for an" 996 " unsolicited iocb. tag 0x%x\n", 997 pring->ringno, 998 irsp->unsli3.sli3Words[7]); 999 } 1000 list_for_each_entry(iocbq, &saveq->list, list) { 1001 irsp = &(iocbq->iocb); 1002 if (irsp->ulpBdeCount != 0) { 1003 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 1004 irsp->un.ulpWord[3]); 1005 if (!iocbq->context2) 1006 lpfc_printf_log(phba, 1007 KERN_ERR, 1008 LOG_SLI, 1009 "0343 Ring %d Cannot find " 1010 "buffer for an unsolicited iocb" 1011 ". tag 0x%x\n", pring->ringno, 1012 irsp->un.ulpWord[3]); 1013 } 1014 if (irsp->ulpBdeCount == 2) { 1015 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 1016 irsp->unsli3.sli3Words[7]); 1017 if (!iocbq->context3) 1018 lpfc_printf_log(phba, 1019 KERN_ERR, 1020 LOG_SLI, 1021 "0344 Ring %d Cannot find " 1022 "buffer for an unsolicited " 1023 "iocb. tag 0x%x\n", 1024 pring->ringno, 1025 irsp->unsli3.sli3Words[7]); 1026 } 1027 } 1028 } 1029 if (irsp->ulpBdeCount != 0 && 1030 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 1031 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 1032 int found = 0; 1033 1034 /* search continue save q for same XRI */ 1035 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 1036 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) { 1037 list_add_tail(&saveq->list, &iocbq->list); 1038 found = 1; 1039 break; 1040 } 1041 } 1042 if (!found) 1043 list_add_tail(&saveq->clist, 1044 &pring->iocb_continue_saveq); 1045 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 1046 list_del_init(&iocbq->clist); 1047 saveq = iocbq; 1048 irsp = &(saveq->iocb); 1049 } else 1050 return 0; 1051 } 1052 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 1053 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 1054 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 1055 Rctl = FC_ELS_REQ; 1056 Type = FC_ELS_DATA; 1057 } else { 1058 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 1059 Rctl = w5p->hcsw.Rctl; 1060 Type = w5p->hcsw.Type; 1061 1062 /* Firmware Workaround */ 1063 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 1064 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 1065 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 1066 Rctl = FC_ELS_REQ; 1067 Type = FC_ELS_DATA; 1068 w5p->hcsw.Rctl = Rctl; 1069 w5p->hcsw.Type = Type; 1070 } 1071 } 1072 1073 /* unSolicited Responses */ 1074 if (pring->prt[0].profile) { 1075 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 1076 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 1077 saveq); 1078 match = 1; 1079 } else { 1080 /* We must search, based on rctl / type 1081 for the right routine */ 1082 for (i = 0; i < pring->num_mask; i++) { 1083 if ((pring->prt[i].rctl == Rctl) 1084 && (pring->prt[i].type == Type)) { 1085 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 1086 (pring->prt[i].lpfc_sli_rcv_unsol_event) 1087 (phba, pring, saveq); 1088 match = 1; 1089 break; 1090 } 1091 } 1092 } 1093 if (match == 0) { 1094 /* Unexpected Rctl / Type received */ 1095 /* Ring <ringno> handler: unexpected 1096 Rctl <Rctl> Type <Type> received */ 1097 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1098 "0313 Ring %d handler: unexpected Rctl x%x " 1099 "Type x%x received\n", 1100 pring->ringno, Rctl, Type); 1101 } 1102 return 1; 1103 } 1104 1105 static struct lpfc_iocbq * 1106 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 1107 struct lpfc_sli_ring *pring, 1108 struct lpfc_iocbq *prspiocb) 1109 { 1110 struct lpfc_iocbq *cmd_iocb = NULL; 1111 uint16_t iotag; 1112 1113 iotag = prspiocb->iocb.ulpIoTag; 1114 1115 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 1116 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 1117 list_del_init(&cmd_iocb->list); 1118 pring->txcmplq_cnt--; 1119 return cmd_iocb; 1120 } 1121 1122 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1123 "0317 iotag x%x is out off " 1124 "range: max iotag x%x wd0 x%x\n", 1125 iotag, phba->sli.last_iotag, 1126 *(((uint32_t *) &prspiocb->iocb) + 7)); 1127 return NULL; 1128 } 1129 1130 static int 1131 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1132 struct lpfc_iocbq *saveq) 1133 { 1134 struct lpfc_iocbq *cmdiocbp; 1135 int rc = 1; 1136 unsigned long iflag; 1137 1138 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 1139 spin_lock_irqsave(&phba->hbalock, iflag); 1140 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 1141 spin_unlock_irqrestore(&phba->hbalock, iflag); 1142 1143 if (cmdiocbp) { 1144 if (cmdiocbp->iocb_cmpl) { 1145 /* 1146 * Post all ELS completions to the worker thread. 1147 * All other are passed to the completion callback. 1148 */ 1149 if (pring->ringno == LPFC_ELS_RING) { 1150 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) { 1151 cmdiocbp->iocb_flag &= 1152 ~LPFC_DRIVER_ABORTED; 1153 saveq->iocb.ulpStatus = 1154 IOSTAT_LOCAL_REJECT; 1155 saveq->iocb.un.ulpWord[4] = 1156 IOERR_SLI_ABORTED; 1157 1158 /* Firmware could still be in progress 1159 * of DMAing payload, so don't free data 1160 * buffer till after a hbeat. 1161 */ 1162 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 1163 } 1164 } 1165 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 1166 } else 1167 lpfc_sli_release_iocbq(phba, cmdiocbp); 1168 } else { 1169 /* 1170 * Unknown initiating command based on the response iotag. 1171 * This could be the case on the ELS ring because of 1172 * lpfc_els_abort(). 1173 */ 1174 if (pring->ringno != LPFC_ELS_RING) { 1175 /* 1176 * Ring <ringno> handler: unexpected completion IoTag 1177 * <IoTag> 1178 */ 1179 lpfc_printf_vlog(cmdiocbp->vport, KERN_WARNING, LOG_SLI, 1180 "0322 Ring %d handler: " 1181 "unexpected completion IoTag x%x " 1182 "Data: x%x x%x x%x x%x\n", 1183 pring->ringno, 1184 saveq->iocb.ulpIoTag, 1185 saveq->iocb.ulpStatus, 1186 saveq->iocb.un.ulpWord[4], 1187 saveq->iocb.ulpCommand, 1188 saveq->iocb.ulpContext); 1189 } 1190 } 1191 1192 return rc; 1193 } 1194 1195 static void 1196 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1197 { 1198 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 1199 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1200 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1201 /* 1202 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1203 * rsp ring <portRspMax> 1204 */ 1205 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1206 "0312 Ring %d handler: portRspPut %d " 1207 "is bigger then rsp ring %d\n", 1208 pring->ringno, le32_to_cpu(pgp->rspPutInx), 1209 pring->numRiocb); 1210 1211 phba->link_state = LPFC_HBA_ERROR; 1212 1213 /* 1214 * All error attention handlers are posted to 1215 * worker thread 1216 */ 1217 phba->work_ha |= HA_ERATT; 1218 phba->work_hs = HS_FFER3; 1219 1220 /* hbalock should already be held */ 1221 if (phba->work_wait) 1222 lpfc_worker_wake_up(phba); 1223 1224 return; 1225 } 1226 1227 void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) 1228 { 1229 struct lpfc_sli *psli = &phba->sli; 1230 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING]; 1231 IOCB_t *irsp = NULL; 1232 IOCB_t *entry = NULL; 1233 struct lpfc_iocbq *cmdiocbq = NULL; 1234 struct lpfc_iocbq rspiocbq; 1235 struct lpfc_pgp *pgp; 1236 uint32_t status; 1237 uint32_t portRspPut, portRspMax; 1238 int type; 1239 uint32_t rsp_cmpl = 0; 1240 uint32_t ha_copy; 1241 unsigned long iflags; 1242 1243 pring->stats.iocb_event++; 1244 1245 pgp = (phba->sli_rev == 3) ? 1246 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1247 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1248 1249 1250 /* 1251 * The next available response entry should never exceed the maximum 1252 * entries. If it does, treat it as an adapter hardware error. 1253 */ 1254 portRspMax = pring->numRiocb; 1255 portRspPut = le32_to_cpu(pgp->rspPutInx); 1256 if (unlikely(portRspPut >= portRspMax)) { 1257 lpfc_sli_rsp_pointers_error(phba, pring); 1258 return; 1259 } 1260 1261 rmb(); 1262 while (pring->rspidx != portRspPut) { 1263 entry = lpfc_resp_iocb(phba, pring); 1264 if (++pring->rspidx >= portRspMax) 1265 pring->rspidx = 0; 1266 1267 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1268 (uint32_t *) &rspiocbq.iocb, 1269 phba->iocb_rsp_size); 1270 irsp = &rspiocbq.iocb; 1271 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1272 pring->stats.iocb_rsp++; 1273 rsp_cmpl++; 1274 1275 if (unlikely(irsp->ulpStatus)) { 1276 /* Rsp ring <ringno> error: IOCB */ 1277 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1278 "0326 Rsp Ring %d error: IOCB Data: " 1279 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1280 pring->ringno, 1281 irsp->un.ulpWord[0], 1282 irsp->un.ulpWord[1], 1283 irsp->un.ulpWord[2], 1284 irsp->un.ulpWord[3], 1285 irsp->un.ulpWord[4], 1286 irsp->un.ulpWord[5], 1287 *(((uint32_t *) irsp) + 6), 1288 *(((uint32_t *) irsp) + 7)); 1289 } 1290 1291 switch (type) { 1292 case LPFC_ABORT_IOCB: 1293 case LPFC_SOL_IOCB: 1294 /* 1295 * Idle exchange closed via ABTS from port. No iocb 1296 * resources need to be recovered. 1297 */ 1298 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1299 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1300 "0314 IOCB cmd 0x%x " 1301 "processed. Skipping " 1302 "completion", 1303 irsp->ulpCommand); 1304 break; 1305 } 1306 1307 spin_lock_irqsave(&phba->hbalock, iflags); 1308 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1309 &rspiocbq); 1310 spin_unlock_irqrestore(&phba->hbalock, iflags); 1311 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1312 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1313 &rspiocbq); 1314 } 1315 break; 1316 default: 1317 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1318 char adaptermsg[LPFC_MAX_ADPTMSG]; 1319 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1320 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1321 MAX_MSG_DATA); 1322 dev_warn(&((phba->pcidev)->dev), 1323 "lpfc%d: %s\n", 1324 phba->brd_no, adaptermsg); 1325 } else { 1326 /* Unknown IOCB command */ 1327 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1328 "0321 Unknown IOCB command " 1329 "Data: x%x, x%x x%x x%x x%x\n", 1330 type, irsp->ulpCommand, 1331 irsp->ulpStatus, 1332 irsp->ulpIoTag, 1333 irsp->ulpContext); 1334 } 1335 break; 1336 } 1337 1338 /* 1339 * The response IOCB has been processed. Update the ring 1340 * pointer in SLIM. If the port response put pointer has not 1341 * been updated, sync the pgp->rspPutInx and fetch the new port 1342 * response put pointer. 1343 */ 1344 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 1345 1346 if (pring->rspidx == portRspPut) 1347 portRspPut = le32_to_cpu(pgp->rspPutInx); 1348 } 1349 1350 ha_copy = readl(phba->HAregaddr); 1351 ha_copy >>= (LPFC_FCP_RING * 4); 1352 1353 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) { 1354 spin_lock_irqsave(&phba->hbalock, iflags); 1355 pring->stats.iocb_rsp_full++; 1356 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4)); 1357 writel(status, phba->CAregaddr); 1358 readl(phba->CAregaddr); 1359 spin_unlock_irqrestore(&phba->hbalock, iflags); 1360 } 1361 if ((ha_copy & HA_R0CE_RSP) && 1362 (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1363 spin_lock_irqsave(&phba->hbalock, iflags); 1364 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1365 pring->stats.iocb_cmd_empty++; 1366 1367 /* Force update of the local copy of cmdGetInx */ 1368 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1369 lpfc_sli_resume_iocb(phba, pring); 1370 1371 if ((pring->lpfc_sli_cmd_available)) 1372 (pring->lpfc_sli_cmd_available) (phba, pring); 1373 1374 spin_unlock_irqrestore(&phba->hbalock, iflags); 1375 } 1376 1377 return; 1378 } 1379 1380 /* 1381 * This routine presumes LPFC_FCP_RING handling and doesn't bother 1382 * to check it explicitly. 1383 */ 1384 static int 1385 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 1386 struct lpfc_sli_ring *pring, uint32_t mask) 1387 { 1388 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 1389 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1390 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1391 IOCB_t *irsp = NULL; 1392 IOCB_t *entry = NULL; 1393 struct lpfc_iocbq *cmdiocbq = NULL; 1394 struct lpfc_iocbq rspiocbq; 1395 uint32_t status; 1396 uint32_t portRspPut, portRspMax; 1397 int rc = 1; 1398 lpfc_iocb_type type; 1399 unsigned long iflag; 1400 uint32_t rsp_cmpl = 0; 1401 1402 spin_lock_irqsave(&phba->hbalock, iflag); 1403 pring->stats.iocb_event++; 1404 1405 /* 1406 * The next available response entry should never exceed the maximum 1407 * entries. If it does, treat it as an adapter hardware error. 1408 */ 1409 portRspMax = pring->numRiocb; 1410 portRspPut = le32_to_cpu(pgp->rspPutInx); 1411 if (unlikely(portRspPut >= portRspMax)) { 1412 lpfc_sli_rsp_pointers_error(phba, pring); 1413 spin_unlock_irqrestore(&phba->hbalock, iflag); 1414 return 1; 1415 } 1416 1417 rmb(); 1418 while (pring->rspidx != portRspPut) { 1419 /* 1420 * Fetch an entry off the ring and copy it into a local data 1421 * structure. The copy involves a byte-swap since the 1422 * network byte order and pci byte orders are different. 1423 */ 1424 entry = lpfc_resp_iocb(phba, pring); 1425 phba->last_completion_time = jiffies; 1426 1427 if (++pring->rspidx >= portRspMax) 1428 pring->rspidx = 0; 1429 1430 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1431 (uint32_t *) &rspiocbq.iocb, 1432 phba->iocb_rsp_size); 1433 INIT_LIST_HEAD(&(rspiocbq.list)); 1434 irsp = &rspiocbq.iocb; 1435 1436 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1437 pring->stats.iocb_rsp++; 1438 rsp_cmpl++; 1439 1440 if (unlikely(irsp->ulpStatus)) { 1441 /* 1442 * If resource errors reported from HBA, reduce 1443 * queuedepths of the SCSI device. 1444 */ 1445 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1446 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 1447 spin_unlock_irqrestore(&phba->hbalock, iflag); 1448 lpfc_adjust_queue_depth(phba); 1449 spin_lock_irqsave(&phba->hbalock, iflag); 1450 } 1451 1452 /* Rsp ring <ringno> error: IOCB */ 1453 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1454 "0336 Rsp Ring %d error: IOCB Data: " 1455 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1456 pring->ringno, 1457 irsp->un.ulpWord[0], 1458 irsp->un.ulpWord[1], 1459 irsp->un.ulpWord[2], 1460 irsp->un.ulpWord[3], 1461 irsp->un.ulpWord[4], 1462 irsp->un.ulpWord[5], 1463 *(((uint32_t *) irsp) + 6), 1464 *(((uint32_t *) irsp) + 7)); 1465 } 1466 1467 switch (type) { 1468 case LPFC_ABORT_IOCB: 1469 case LPFC_SOL_IOCB: 1470 /* 1471 * Idle exchange closed via ABTS from port. No iocb 1472 * resources need to be recovered. 1473 */ 1474 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1475 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1476 "0333 IOCB cmd 0x%x" 1477 " processed. Skipping" 1478 " completion\n", 1479 irsp->ulpCommand); 1480 break; 1481 } 1482 1483 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1484 &rspiocbq); 1485 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1486 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1487 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1488 &rspiocbq); 1489 } else { 1490 spin_unlock_irqrestore(&phba->hbalock, 1491 iflag); 1492 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1493 &rspiocbq); 1494 spin_lock_irqsave(&phba->hbalock, 1495 iflag); 1496 } 1497 } 1498 break; 1499 case LPFC_UNSOL_IOCB: 1500 spin_unlock_irqrestore(&phba->hbalock, iflag); 1501 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 1502 spin_lock_irqsave(&phba->hbalock, iflag); 1503 break; 1504 default: 1505 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1506 char adaptermsg[LPFC_MAX_ADPTMSG]; 1507 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1508 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1509 MAX_MSG_DATA); 1510 dev_warn(&((phba->pcidev)->dev), 1511 "lpfc%d: %s\n", 1512 phba->brd_no, adaptermsg); 1513 } else { 1514 /* Unknown IOCB command */ 1515 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1516 "0334 Unknown IOCB command " 1517 "Data: x%x, x%x x%x x%x x%x\n", 1518 type, irsp->ulpCommand, 1519 irsp->ulpStatus, 1520 irsp->ulpIoTag, 1521 irsp->ulpContext); 1522 } 1523 break; 1524 } 1525 1526 /* 1527 * The response IOCB has been processed. Update the ring 1528 * pointer in SLIM. If the port response put pointer has not 1529 * been updated, sync the pgp->rspPutInx and fetch the new port 1530 * response put pointer. 1531 */ 1532 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 1533 1534 if (pring->rspidx == portRspPut) 1535 portRspPut = le32_to_cpu(pgp->rspPutInx); 1536 } 1537 1538 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 1539 pring->stats.iocb_rsp_full++; 1540 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1541 writel(status, phba->CAregaddr); 1542 readl(phba->CAregaddr); 1543 } 1544 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1545 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1546 pring->stats.iocb_cmd_empty++; 1547 1548 /* Force update of the local copy of cmdGetInx */ 1549 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1550 lpfc_sli_resume_iocb(phba, pring); 1551 1552 if ((pring->lpfc_sli_cmd_available)) 1553 (pring->lpfc_sli_cmd_available) (phba, pring); 1554 1555 } 1556 1557 spin_unlock_irqrestore(&phba->hbalock, iflag); 1558 return rc; 1559 } 1560 1561 int 1562 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 1563 struct lpfc_sli_ring *pring, uint32_t mask) 1564 { 1565 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 1566 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1567 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1568 IOCB_t *entry; 1569 IOCB_t *irsp = NULL; 1570 struct lpfc_iocbq *rspiocbp = NULL; 1571 struct lpfc_iocbq *next_iocb; 1572 struct lpfc_iocbq *cmdiocbp; 1573 struct lpfc_iocbq *saveq; 1574 uint8_t iocb_cmd_type; 1575 lpfc_iocb_type type; 1576 uint32_t status, free_saveq; 1577 uint32_t portRspPut, portRspMax; 1578 int rc = 1; 1579 unsigned long iflag; 1580 1581 spin_lock_irqsave(&phba->hbalock, iflag); 1582 pring->stats.iocb_event++; 1583 1584 /* 1585 * The next available response entry should never exceed the maximum 1586 * entries. If it does, treat it as an adapter hardware error. 1587 */ 1588 portRspMax = pring->numRiocb; 1589 portRspPut = le32_to_cpu(pgp->rspPutInx); 1590 if (portRspPut >= portRspMax) { 1591 /* 1592 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1593 * rsp ring <portRspMax> 1594 */ 1595 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1596 "0303 Ring %d handler: portRspPut %d " 1597 "is bigger then rsp ring %d\n", 1598 pring->ringno, portRspPut, portRspMax); 1599 1600 phba->link_state = LPFC_HBA_ERROR; 1601 spin_unlock_irqrestore(&phba->hbalock, iflag); 1602 1603 phba->work_hs = HS_FFER3; 1604 lpfc_handle_eratt(phba); 1605 1606 return 1; 1607 } 1608 1609 rmb(); 1610 while (pring->rspidx != portRspPut) { 1611 /* 1612 * Build a completion list and call the appropriate handler. 1613 * The process is to get the next available response iocb, get 1614 * a free iocb from the list, copy the response data into the 1615 * free iocb, insert to the continuation list, and update the 1616 * next response index to slim. This process makes response 1617 * iocb's in the ring available to DMA as fast as possible but 1618 * pays a penalty for a copy operation. Since the iocb is 1619 * only 32 bytes, this penalty is considered small relative to 1620 * the PCI reads for register values and a slim write. When 1621 * the ulpLe field is set, the entire Command has been 1622 * received. 1623 */ 1624 entry = lpfc_resp_iocb(phba, pring); 1625 1626 phba->last_completion_time = jiffies; 1627 rspiocbp = __lpfc_sli_get_iocbq(phba); 1628 if (rspiocbp == NULL) { 1629 printk(KERN_ERR "%s: out of buffers! Failing " 1630 "completion.\n", __FUNCTION__); 1631 break; 1632 } 1633 1634 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 1635 phba->iocb_rsp_size); 1636 irsp = &rspiocbp->iocb; 1637 1638 if (++pring->rspidx >= portRspMax) 1639 pring->rspidx = 0; 1640 1641 if (pring->ringno == LPFC_ELS_RING) { 1642 lpfc_debugfs_slow_ring_trc(phba, 1643 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1644 *(((uint32_t *) irsp) + 4), 1645 *(((uint32_t *) irsp) + 6), 1646 *(((uint32_t *) irsp) + 7)); 1647 } 1648 1649 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 1650 1651 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 1652 1653 pring->iocb_continueq_cnt++; 1654 if (irsp->ulpLe) { 1655 /* 1656 * By default, the driver expects to free all resources 1657 * associated with this iocb completion. 1658 */ 1659 free_saveq = 1; 1660 saveq = list_get_first(&pring->iocb_continueq, 1661 struct lpfc_iocbq, list); 1662 irsp = &(saveq->iocb); 1663 list_del_init(&pring->iocb_continueq); 1664 pring->iocb_continueq_cnt = 0; 1665 1666 pring->stats.iocb_rsp++; 1667 1668 /* 1669 * If resource errors reported from HBA, reduce 1670 * queuedepths of the SCSI device. 1671 */ 1672 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1673 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 1674 spin_unlock_irqrestore(&phba->hbalock, iflag); 1675 lpfc_adjust_queue_depth(phba); 1676 spin_lock_irqsave(&phba->hbalock, iflag); 1677 } 1678 1679 if (irsp->ulpStatus) { 1680 /* Rsp ring <ringno> error: IOCB */ 1681 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1682 "0328 Rsp Ring %d error: " 1683 "IOCB Data: " 1684 "x%x x%x x%x x%x " 1685 "x%x x%x x%x x%x " 1686 "x%x x%x x%x x%x " 1687 "x%x x%x x%x x%x\n", 1688 pring->ringno, 1689 irsp->un.ulpWord[0], 1690 irsp->un.ulpWord[1], 1691 irsp->un.ulpWord[2], 1692 irsp->un.ulpWord[3], 1693 irsp->un.ulpWord[4], 1694 irsp->un.ulpWord[5], 1695 *(((uint32_t *) irsp) + 6), 1696 *(((uint32_t *) irsp) + 7), 1697 *(((uint32_t *) irsp) + 8), 1698 *(((uint32_t *) irsp) + 9), 1699 *(((uint32_t *) irsp) + 10), 1700 *(((uint32_t *) irsp) + 11), 1701 *(((uint32_t *) irsp) + 12), 1702 *(((uint32_t *) irsp) + 13), 1703 *(((uint32_t *) irsp) + 14), 1704 *(((uint32_t *) irsp) + 15)); 1705 } 1706 1707 /* 1708 * Fetch the IOCB command type and call the correct 1709 * completion routine. Solicited and Unsolicited 1710 * IOCBs on the ELS ring get freed back to the 1711 * lpfc_iocb_list by the discovery kernel thread. 1712 */ 1713 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 1714 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 1715 if (type == LPFC_SOL_IOCB) { 1716 spin_unlock_irqrestore(&phba->hbalock, iflag); 1717 rc = lpfc_sli_process_sol_iocb(phba, pring, 1718 saveq); 1719 spin_lock_irqsave(&phba->hbalock, iflag); 1720 } else if (type == LPFC_UNSOL_IOCB) { 1721 spin_unlock_irqrestore(&phba->hbalock, iflag); 1722 rc = lpfc_sli_process_unsol_iocb(phba, pring, 1723 saveq); 1724 spin_lock_irqsave(&phba->hbalock, iflag); 1725 if (!rc) 1726 free_saveq = 0; 1727 } else if (type == LPFC_ABORT_IOCB) { 1728 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && 1729 ((cmdiocbp = 1730 lpfc_sli_iocbq_lookup(phba, pring, 1731 saveq)))) { 1732 /* Call the specified completion 1733 routine */ 1734 if (cmdiocbp->iocb_cmpl) { 1735 spin_unlock_irqrestore( 1736 &phba->hbalock, 1737 iflag); 1738 (cmdiocbp->iocb_cmpl) (phba, 1739 cmdiocbp, saveq); 1740 spin_lock_irqsave( 1741 &phba->hbalock, 1742 iflag); 1743 } else 1744 __lpfc_sli_release_iocbq(phba, 1745 cmdiocbp); 1746 } 1747 } else if (type == LPFC_UNKNOWN_IOCB) { 1748 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1749 1750 char adaptermsg[LPFC_MAX_ADPTMSG]; 1751 1752 memset(adaptermsg, 0, 1753 LPFC_MAX_ADPTMSG); 1754 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1755 MAX_MSG_DATA); 1756 dev_warn(&((phba->pcidev)->dev), 1757 "lpfc%d: %s\n", 1758 phba->brd_no, adaptermsg); 1759 } else { 1760 /* Unknown IOCB command */ 1761 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1762 "0335 Unknown IOCB " 1763 "command Data: x%x " 1764 "x%x x%x x%x\n", 1765 irsp->ulpCommand, 1766 irsp->ulpStatus, 1767 irsp->ulpIoTag, 1768 irsp->ulpContext); 1769 } 1770 } 1771 1772 if (free_saveq) { 1773 list_for_each_entry_safe(rspiocbp, next_iocb, 1774 &saveq->list, list) { 1775 list_del(&rspiocbp->list); 1776 __lpfc_sli_release_iocbq(phba, 1777 rspiocbp); 1778 } 1779 __lpfc_sli_release_iocbq(phba, saveq); 1780 } 1781 rspiocbp = NULL; 1782 } 1783 1784 /* 1785 * If the port response put pointer has not been updated, sync 1786 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 1787 * response put pointer. 1788 */ 1789 if (pring->rspidx == portRspPut) { 1790 portRspPut = le32_to_cpu(pgp->rspPutInx); 1791 } 1792 } /* while (pring->rspidx != portRspPut) */ 1793 1794 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 1795 /* At least one response entry has been freed */ 1796 pring->stats.iocb_rsp_full++; 1797 /* SET RxRE_RSP in Chip Att register */ 1798 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1799 writel(status, phba->CAregaddr); 1800 readl(phba->CAregaddr); /* flush */ 1801 } 1802 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1803 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1804 pring->stats.iocb_cmd_empty++; 1805 1806 /* Force update of the local copy of cmdGetInx */ 1807 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1808 lpfc_sli_resume_iocb(phba, pring); 1809 1810 if ((pring->lpfc_sli_cmd_available)) 1811 (pring->lpfc_sli_cmd_available) (phba, pring); 1812 1813 } 1814 1815 spin_unlock_irqrestore(&phba->hbalock, iflag); 1816 return rc; 1817 } 1818 1819 void 1820 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1821 { 1822 LIST_HEAD(completions); 1823 struct lpfc_iocbq *iocb, *next_iocb; 1824 IOCB_t *cmd = NULL; 1825 1826 if (pring->ringno == LPFC_ELS_RING) { 1827 lpfc_fabric_abort_hba(phba); 1828 } 1829 1830 /* Error everything on txq and txcmplq 1831 * First do the txq. 1832 */ 1833 spin_lock_irq(&phba->hbalock); 1834 list_splice_init(&pring->txq, &completions); 1835 pring->txq_cnt = 0; 1836 1837 /* Next issue ABTS for everything on the txcmplq */ 1838 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 1839 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 1840 1841 spin_unlock_irq(&phba->hbalock); 1842 1843 while (!list_empty(&completions)) { 1844 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 1845 cmd = &iocb->iocb; 1846 list_del_init(&iocb->list); 1847 1848 if (!iocb->iocb_cmpl) 1849 lpfc_sli_release_iocbq(phba, iocb); 1850 else { 1851 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1852 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1853 (iocb->iocb_cmpl) (phba, iocb, iocb); 1854 } 1855 } 1856 } 1857 1858 int 1859 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 1860 { 1861 uint32_t status; 1862 int i = 0; 1863 int retval = 0; 1864 1865 /* Read the HBA Host Status Register */ 1866 status = readl(phba->HSregaddr); 1867 1868 /* 1869 * Check status register every 100ms for 5 retries, then every 1870 * 500ms for 5, then every 2.5 sec for 5, then reset board and 1871 * every 2.5 sec for 4. 1872 * Break our of the loop if errors occurred during init. 1873 */ 1874 while (((status & mask) != mask) && 1875 !(status & HS_FFERM) && 1876 i++ < 20) { 1877 1878 if (i <= 5) 1879 msleep(10); 1880 else if (i <= 10) 1881 msleep(500); 1882 else 1883 msleep(2500); 1884 1885 if (i == 15) { 1886 /* Do post */ 1887 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 1888 lpfc_sli_brdrestart(phba); 1889 } 1890 /* Read the HBA Host Status Register */ 1891 status = readl(phba->HSregaddr); 1892 } 1893 1894 /* Check to see if any errors occurred during init */ 1895 if ((status & HS_FFERM) || (i >= 20)) { 1896 phba->link_state = LPFC_HBA_ERROR; 1897 retval = 1; 1898 } 1899 1900 return retval; 1901 } 1902 1903 #define BARRIER_TEST_PATTERN (0xdeadbeef) 1904 1905 void lpfc_reset_barrier(struct lpfc_hba *phba) 1906 { 1907 uint32_t __iomem *resp_buf; 1908 uint32_t __iomem *mbox_buf; 1909 volatile uint32_t mbox; 1910 uint32_t hc_copy; 1911 int i; 1912 uint8_t hdrtype; 1913 1914 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 1915 if (hdrtype != 0x80 || 1916 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 1917 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 1918 return; 1919 1920 /* 1921 * Tell the other part of the chip to suspend temporarily all 1922 * its DMA activity. 1923 */ 1924 resp_buf = phba->MBslimaddr; 1925 1926 /* Disable the error attention */ 1927 hc_copy = readl(phba->HCregaddr); 1928 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 1929 readl(phba->HCregaddr); /* flush */ 1930 phba->link_flag |= LS_IGNORE_ERATT; 1931 1932 if (readl(phba->HAregaddr) & HA_ERATT) { 1933 /* Clear Chip error bit */ 1934 writel(HA_ERATT, phba->HAregaddr); 1935 phba->pport->stopped = 1; 1936 } 1937 1938 mbox = 0; 1939 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 1940 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 1941 1942 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 1943 mbox_buf = phba->MBslimaddr; 1944 writel(mbox, mbox_buf); 1945 1946 for (i = 0; 1947 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) 1948 mdelay(1); 1949 1950 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 1951 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 1952 phba->pport->stopped) 1953 goto restore_hc; 1954 else 1955 goto clear_errat; 1956 } 1957 1958 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 1959 for (i = 0; readl(resp_buf) != mbox && i < 500; i++) 1960 mdelay(1); 1961 1962 clear_errat: 1963 1964 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) 1965 mdelay(1); 1966 1967 if (readl(phba->HAregaddr) & HA_ERATT) { 1968 writel(HA_ERATT, phba->HAregaddr); 1969 phba->pport->stopped = 1; 1970 } 1971 1972 restore_hc: 1973 phba->link_flag &= ~LS_IGNORE_ERATT; 1974 writel(hc_copy, phba->HCregaddr); 1975 readl(phba->HCregaddr); /* flush */ 1976 } 1977 1978 int 1979 lpfc_sli_brdkill(struct lpfc_hba *phba) 1980 { 1981 struct lpfc_sli *psli; 1982 LPFC_MBOXQ_t *pmb; 1983 uint32_t status; 1984 uint32_t ha_copy; 1985 int retval; 1986 int i = 0; 1987 1988 psli = &phba->sli; 1989 1990 /* Kill HBA */ 1991 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1992 "0329 Kill HBA Data: x%x x%x\n", 1993 phba->pport->port_state, psli->sli_flag); 1994 1995 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1996 if (!pmb) 1997 return 1; 1998 1999 /* Disable the error attention */ 2000 spin_lock_irq(&phba->hbalock); 2001 status = readl(phba->HCregaddr); 2002 status &= ~HC_ERINT_ENA; 2003 writel(status, phba->HCregaddr); 2004 readl(phba->HCregaddr); /* flush */ 2005 phba->link_flag |= LS_IGNORE_ERATT; 2006 spin_unlock_irq(&phba->hbalock); 2007 2008 lpfc_kill_board(phba, pmb); 2009 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2010 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2011 2012 if (retval != MBX_SUCCESS) { 2013 if (retval != MBX_BUSY) 2014 mempool_free(pmb, phba->mbox_mem_pool); 2015 spin_lock_irq(&phba->hbalock); 2016 phba->link_flag &= ~LS_IGNORE_ERATT; 2017 spin_unlock_irq(&phba->hbalock); 2018 return 1; 2019 } 2020 2021 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2022 2023 mempool_free(pmb, phba->mbox_mem_pool); 2024 2025 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 2026 * attention every 100ms for 3 seconds. If we don't get ERATT after 2027 * 3 seconds we still set HBA_ERROR state because the status of the 2028 * board is now undefined. 2029 */ 2030 ha_copy = readl(phba->HAregaddr); 2031 2032 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 2033 mdelay(100); 2034 ha_copy = readl(phba->HAregaddr); 2035 } 2036 2037 del_timer_sync(&psli->mbox_tmo); 2038 if (ha_copy & HA_ERATT) { 2039 writel(HA_ERATT, phba->HAregaddr); 2040 phba->pport->stopped = 1; 2041 } 2042 spin_lock_irq(&phba->hbalock); 2043 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2044 phba->link_flag &= ~LS_IGNORE_ERATT; 2045 spin_unlock_irq(&phba->hbalock); 2046 2047 psli->mbox_active = NULL; 2048 lpfc_hba_down_post(phba); 2049 phba->link_state = LPFC_HBA_ERROR; 2050 2051 return ha_copy & HA_ERATT ? 0 : 1; 2052 } 2053 2054 int 2055 lpfc_sli_brdreset(struct lpfc_hba *phba) 2056 { 2057 struct lpfc_sli *psli; 2058 struct lpfc_sli_ring *pring; 2059 uint16_t cfg_value; 2060 int i; 2061 2062 psli = &phba->sli; 2063 2064 /* Reset HBA */ 2065 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2066 "0325 Reset HBA Data: x%x x%x\n", 2067 phba->pport->port_state, psli->sli_flag); 2068 2069 /* perform board reset */ 2070 phba->fc_eventTag = 0; 2071 phba->pport->fc_myDID = 0; 2072 phba->pport->fc_prevDID = 0; 2073 2074 /* Turn off parity checking and serr during the physical reset */ 2075 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 2076 pci_write_config_word(phba->pcidev, PCI_COMMAND, 2077 (cfg_value & 2078 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 2079 2080 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 2081 /* Now toggle INITFF bit in the Host Control Register */ 2082 writel(HC_INITFF, phba->HCregaddr); 2083 mdelay(1); 2084 readl(phba->HCregaddr); /* flush */ 2085 writel(0, phba->HCregaddr); 2086 readl(phba->HCregaddr); /* flush */ 2087 2088 /* Restore PCI cmd register */ 2089 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 2090 2091 /* Initialize relevant SLI info */ 2092 for (i = 0; i < psli->num_rings; i++) { 2093 pring = &psli->ring[i]; 2094 pring->flag = 0; 2095 pring->rspidx = 0; 2096 pring->next_cmdidx = 0; 2097 pring->local_getidx = 0; 2098 pring->cmdidx = 0; 2099 pring->missbufcnt = 0; 2100 } 2101 2102 phba->link_state = LPFC_WARM_START; 2103 return 0; 2104 } 2105 2106 int 2107 lpfc_sli_brdrestart(struct lpfc_hba *phba) 2108 { 2109 MAILBOX_t *mb; 2110 struct lpfc_sli *psli; 2111 uint16_t skip_post; 2112 volatile uint32_t word0; 2113 void __iomem *to_slim; 2114 2115 spin_lock_irq(&phba->hbalock); 2116 2117 psli = &phba->sli; 2118 2119 /* Restart HBA */ 2120 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2121 "0337 Restart HBA Data: x%x x%x\n", 2122 phba->pport->port_state, psli->sli_flag); 2123 2124 word0 = 0; 2125 mb = (MAILBOX_t *) &word0; 2126 mb->mbxCommand = MBX_RESTART; 2127 mb->mbxHc = 1; 2128 2129 lpfc_reset_barrier(phba); 2130 2131 to_slim = phba->MBslimaddr; 2132 writel(*(uint32_t *) mb, to_slim); 2133 readl(to_slim); /* flush */ 2134 2135 /* Only skip post after fc_ffinit is completed */ 2136 if (phba->pport->port_state) { 2137 skip_post = 1; 2138 word0 = 1; /* This is really setting up word1 */ 2139 } else { 2140 skip_post = 0; 2141 word0 = 0; /* This is really setting up word1 */ 2142 } 2143 to_slim = phba->MBslimaddr + sizeof (uint32_t); 2144 writel(*(uint32_t *) mb, to_slim); 2145 readl(to_slim); /* flush */ 2146 2147 lpfc_sli_brdreset(phba); 2148 phba->pport->stopped = 0; 2149 phba->link_state = LPFC_INIT_START; 2150 2151 spin_unlock_irq(&phba->hbalock); 2152 2153 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 2154 psli->stats_start = get_seconds(); 2155 2156 if (skip_post) 2157 mdelay(100); 2158 else 2159 mdelay(2000); 2160 2161 lpfc_hba_down_post(phba); 2162 2163 return 0; 2164 } 2165 2166 static int 2167 lpfc_sli_chipset_init(struct lpfc_hba *phba) 2168 { 2169 uint32_t status, i = 0; 2170 2171 /* Read the HBA Host Status Register */ 2172 status = readl(phba->HSregaddr); 2173 2174 /* Check status register to see what current state is */ 2175 i = 0; 2176 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 2177 2178 /* Check every 100ms for 5 retries, then every 500ms for 5, then 2179 * every 2.5 sec for 5, then reset board and every 2.5 sec for 2180 * 4. 2181 */ 2182 if (i++ >= 20) { 2183 /* Adapter failed to init, timeout, status reg 2184 <status> */ 2185 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2186 "0436 Adapter failed to init, " 2187 "timeout, status reg x%x, " 2188 "FW Data: A8 x%x AC x%x\n", status, 2189 readl(phba->MBslimaddr + 0xa8), 2190 readl(phba->MBslimaddr + 0xac)); 2191 phba->link_state = LPFC_HBA_ERROR; 2192 return -ETIMEDOUT; 2193 } 2194 2195 /* Check to see if any errors occurred during init */ 2196 if (status & HS_FFERM) { 2197 /* ERROR: During chipset initialization */ 2198 /* Adapter failed to init, chipset, status reg 2199 <status> */ 2200 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2201 "0437 Adapter failed to init, " 2202 "chipset, status reg x%x, " 2203 "FW Data: A8 x%x AC x%x\n", status, 2204 readl(phba->MBslimaddr + 0xa8), 2205 readl(phba->MBslimaddr + 0xac)); 2206 phba->link_state = LPFC_HBA_ERROR; 2207 return -EIO; 2208 } 2209 2210 if (i <= 5) { 2211 msleep(10); 2212 } else if (i <= 10) { 2213 msleep(500); 2214 } else { 2215 msleep(2500); 2216 } 2217 2218 if (i == 15) { 2219 /* Do post */ 2220 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 2221 lpfc_sli_brdrestart(phba); 2222 } 2223 /* Read the HBA Host Status Register */ 2224 status = readl(phba->HSregaddr); 2225 } 2226 2227 /* Check to see if any errors occurred during init */ 2228 if (status & HS_FFERM) { 2229 /* ERROR: During chipset initialization */ 2230 /* Adapter failed to init, chipset, status reg <status> */ 2231 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2232 "0438 Adapter failed to init, chipset, " 2233 "status reg x%x, " 2234 "FW Data: A8 x%x AC x%x\n", status, 2235 readl(phba->MBslimaddr + 0xa8), 2236 readl(phba->MBslimaddr + 0xac)); 2237 phba->link_state = LPFC_HBA_ERROR; 2238 return -EIO; 2239 } 2240 2241 /* Clear all interrupt enable conditions */ 2242 writel(0, phba->HCregaddr); 2243 readl(phba->HCregaddr); /* flush */ 2244 2245 /* setup host attn register */ 2246 writel(0xffffffff, phba->HAregaddr); 2247 readl(phba->HAregaddr); /* flush */ 2248 return 0; 2249 } 2250 2251 int 2252 lpfc_sli_hbq_count(void) 2253 { 2254 return ARRAY_SIZE(lpfc_hbq_defs); 2255 } 2256 2257 static int 2258 lpfc_sli_hbq_entry_count(void) 2259 { 2260 int hbq_count = lpfc_sli_hbq_count(); 2261 int count = 0; 2262 int i; 2263 2264 for (i = 0; i < hbq_count; ++i) 2265 count += lpfc_hbq_defs[i]->entry_count; 2266 return count; 2267 } 2268 2269 int 2270 lpfc_sli_hbq_size(void) 2271 { 2272 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 2273 } 2274 2275 static int 2276 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 2277 { 2278 int hbq_count = lpfc_sli_hbq_count(); 2279 LPFC_MBOXQ_t *pmb; 2280 MAILBOX_t *pmbox; 2281 uint32_t hbqno; 2282 uint32_t hbq_entry_index; 2283 2284 /* Get a Mailbox buffer to setup mailbox 2285 * commands for HBA initialization 2286 */ 2287 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2288 2289 if (!pmb) 2290 return -ENOMEM; 2291 2292 pmbox = &pmb->mb; 2293 2294 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 2295 phba->link_state = LPFC_INIT_MBX_CMDS; 2296 2297 hbq_entry_index = 0; 2298 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 2299 phba->hbqs[hbqno].next_hbqPutIdx = 0; 2300 phba->hbqs[hbqno].hbqPutIdx = 0; 2301 phba->hbqs[hbqno].local_hbqGetIdx = 0; 2302 phba->hbqs[hbqno].entry_count = 2303 lpfc_hbq_defs[hbqno]->entry_count; 2304 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 2305 hbq_entry_index, pmb); 2306 hbq_entry_index += phba->hbqs[hbqno].entry_count; 2307 2308 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 2309 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 2310 mbxStatus <status>, ring <num> */ 2311 2312 lpfc_printf_log(phba, KERN_ERR, 2313 LOG_SLI | LOG_VPORT, 2314 "1805 Adapter failed to init. " 2315 "Data: x%x x%x x%x\n", 2316 pmbox->mbxCommand, 2317 pmbox->mbxStatus, hbqno); 2318 2319 phba->link_state = LPFC_HBA_ERROR; 2320 mempool_free(pmb, phba->mbox_mem_pool); 2321 return ENXIO; 2322 } 2323 } 2324 phba->hbq_count = hbq_count; 2325 2326 mempool_free(pmb, phba->mbox_mem_pool); 2327 2328 /* Initially populate or replenish the HBQs */ 2329 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 2330 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno)) 2331 return -ENOMEM; 2332 } 2333 return 0; 2334 } 2335 2336 static int 2337 lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode) 2338 { 2339 LPFC_MBOXQ_t *pmb; 2340 uint32_t resetcount = 0, rc = 0, done = 0; 2341 2342 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2343 if (!pmb) { 2344 phba->link_state = LPFC_HBA_ERROR; 2345 return -ENOMEM; 2346 } 2347 2348 phba->sli_rev = sli_mode; 2349 while (resetcount < 2 && !done) { 2350 spin_lock_irq(&phba->hbalock); 2351 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2352 spin_unlock_irq(&phba->hbalock); 2353 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 2354 lpfc_sli_brdrestart(phba); 2355 msleep(2500); 2356 rc = lpfc_sli_chipset_init(phba); 2357 if (rc) 2358 break; 2359 2360 spin_lock_irq(&phba->hbalock); 2361 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2362 spin_unlock_irq(&phba->hbalock); 2363 resetcount++; 2364 2365 /* Call pre CONFIG_PORT mailbox command initialization. A 2366 * value of 0 means the call was successful. Any other 2367 * nonzero value is a failure, but if ERESTART is returned, 2368 * the driver may reset the HBA and try again. 2369 */ 2370 rc = lpfc_config_port_prep(phba); 2371 if (rc == -ERESTART) { 2372 phba->link_state = LPFC_LINK_UNKNOWN; 2373 continue; 2374 } else if (rc) { 2375 break; 2376 } 2377 2378 phba->link_state = LPFC_INIT_MBX_CMDS; 2379 lpfc_config_port(phba, pmb); 2380 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 2381 if (rc != MBX_SUCCESS) { 2382 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2383 "0442 Adapter failed to init, mbxCmd x%x " 2384 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 2385 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); 2386 spin_lock_irq(&phba->hbalock); 2387 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 2388 spin_unlock_irq(&phba->hbalock); 2389 rc = -ENXIO; 2390 } else { 2391 done = 1; 2392 phba->max_vpi = (phba->max_vpi && 2393 pmb->mb.un.varCfgPort.gmv) != 0 2394 ? pmb->mb.un.varCfgPort.max_vpi 2395 : 0; 2396 } 2397 } 2398 2399 if (!done) { 2400 rc = -EINVAL; 2401 goto do_prep_failed; 2402 } 2403 2404 if ((pmb->mb.un.varCfgPort.sli_mode == 3) && 2405 (!pmb->mb.un.varCfgPort.cMA)) { 2406 rc = -ENXIO; 2407 goto do_prep_failed; 2408 } 2409 return rc; 2410 2411 do_prep_failed: 2412 mempool_free(pmb, phba->mbox_mem_pool); 2413 return rc; 2414 } 2415 2416 int 2417 lpfc_sli_hba_setup(struct lpfc_hba *phba) 2418 { 2419 uint32_t rc; 2420 int mode = 3; 2421 2422 switch (lpfc_sli_mode) { 2423 case 2: 2424 if (phba->cfg_enable_npiv) { 2425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 2426 "1824 NPIV enabled: Override lpfc_sli_mode " 2427 "parameter (%d) to auto (0).\n", 2428 lpfc_sli_mode); 2429 break; 2430 } 2431 mode = 2; 2432 break; 2433 case 0: 2434 case 3: 2435 break; 2436 default: 2437 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 2438 "1819 Unrecognized lpfc_sli_mode " 2439 "parameter: %d.\n", lpfc_sli_mode); 2440 2441 break; 2442 } 2443 2444 rc = lpfc_do_config_port(phba, mode); 2445 if (rc && lpfc_sli_mode == 3) 2446 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 2447 "1820 Unable to select SLI-3. " 2448 "Not supported by adapter.\n"); 2449 if (rc && mode != 2) 2450 rc = lpfc_do_config_port(phba, 2); 2451 if (rc) 2452 goto lpfc_sli_hba_setup_error; 2453 2454 if (phba->sli_rev == 3) { 2455 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 2456 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 2457 phba->sli3_options |= LPFC_SLI3_ENABLED; 2458 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 2459 2460 } else { 2461 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 2462 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 2463 phba->sli3_options = 0; 2464 } 2465 2466 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2467 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 2468 phba->sli_rev, phba->max_vpi); 2469 rc = lpfc_sli_ring_map(phba); 2470 2471 if (rc) 2472 goto lpfc_sli_hba_setup_error; 2473 2474 /* Init HBQs */ 2475 2476 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2477 rc = lpfc_sli_hbq_setup(phba); 2478 if (rc) 2479 goto lpfc_sli_hba_setup_error; 2480 } 2481 2482 phba->sli.sli_flag |= LPFC_PROCESS_LA; 2483 2484 rc = lpfc_config_port_post(phba); 2485 if (rc) 2486 goto lpfc_sli_hba_setup_error; 2487 2488 return rc; 2489 2490 lpfc_sli_hba_setup_error: 2491 phba->link_state = LPFC_HBA_ERROR; 2492 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2493 "0445 Firmware initialization failed\n"); 2494 return rc; 2495 } 2496 2497 /*! lpfc_mbox_timeout 2498 * 2499 * \pre 2500 * \post 2501 * \param hba Pointer to per struct lpfc_hba structure 2502 * \param l1 Pointer to the driver's mailbox queue. 2503 * \return 2504 * void 2505 * 2506 * \b Description: 2507 * 2508 * This routine handles mailbox timeout events at timer interrupt context. 2509 */ 2510 void 2511 lpfc_mbox_timeout(unsigned long ptr) 2512 { 2513 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 2514 unsigned long iflag; 2515 uint32_t tmo_posted; 2516 2517 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 2518 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 2519 if (!tmo_posted) 2520 phba->pport->work_port_events |= WORKER_MBOX_TMO; 2521 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 2522 2523 if (!tmo_posted) { 2524 spin_lock_irqsave(&phba->hbalock, iflag); 2525 if (phba->work_wait) 2526 lpfc_worker_wake_up(phba); 2527 spin_unlock_irqrestore(&phba->hbalock, iflag); 2528 } 2529 } 2530 2531 void 2532 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 2533 { 2534 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 2535 MAILBOX_t *mb = &pmbox->mb; 2536 struct lpfc_sli *psli = &phba->sli; 2537 struct lpfc_sli_ring *pring; 2538 2539 if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) { 2540 return; 2541 } 2542 2543 /* Mbox cmd <mbxCommand> timeout */ 2544 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2545 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 2546 mb->mbxCommand, 2547 phba->pport->port_state, 2548 phba->sli.sli_flag, 2549 phba->sli.mbox_active); 2550 2551 /* Setting state unknown so lpfc_sli_abort_iocb_ring 2552 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 2553 * it to fail all oustanding SCSI IO. 2554 */ 2555 spin_lock_irq(&phba->pport->work_port_lock); 2556 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 2557 spin_unlock_irq(&phba->pport->work_port_lock); 2558 spin_lock_irq(&phba->hbalock); 2559 phba->link_state = LPFC_LINK_UNKNOWN; 2560 phba->pport->fc_flag |= FC_ESTABLISH_LINK; 2561 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2562 spin_unlock_irq(&phba->hbalock); 2563 2564 pring = &psli->ring[psli->fcp_ring]; 2565 lpfc_sli_abort_iocb_ring(phba, pring); 2566 2567 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2568 "0345 Resetting board due to mailbox timeout\n"); 2569 /* 2570 * lpfc_offline calls lpfc_sli_hba_down which will clean up 2571 * on oustanding mailbox commands. 2572 */ 2573 /* If resets are disabled then set error state and return. */ 2574 if (!phba->cfg_enable_hba_reset) { 2575 phba->link_state = LPFC_HBA_ERROR; 2576 return; 2577 } 2578 lpfc_offline_prep(phba); 2579 lpfc_offline(phba); 2580 lpfc_sli_brdrestart(phba); 2581 if (lpfc_online(phba) == 0) /* Initialize the HBA */ 2582 mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60); 2583 lpfc_unblock_mgmt_io(phba); 2584 return; 2585 } 2586 2587 int 2588 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 2589 { 2590 MAILBOX_t *mb; 2591 struct lpfc_sli *psli = &phba->sli; 2592 uint32_t status, evtctr; 2593 uint32_t ha_copy; 2594 int i; 2595 unsigned long timeout; 2596 unsigned long drvr_flag = 0; 2597 volatile uint32_t word0, ldata; 2598 void __iomem *to_slim; 2599 2600 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 2601 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 2602 if(!pmbox->vport) { 2603 lpfc_printf_log(phba, KERN_ERR, 2604 LOG_MBOX | LOG_VPORT, 2605 "1806 Mbox x%x failed. No vport\n", 2606 pmbox->mb.mbxCommand); 2607 dump_stack(); 2608 return MBX_NOT_FINISHED; 2609 } 2610 } 2611 2612 2613 /* If the PCI channel is in offline state, do not post mbox. */ 2614 if (unlikely(pci_channel_offline(phba->pcidev))) 2615 return MBX_NOT_FINISHED; 2616 2617 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2618 psli = &phba->sli; 2619 2620 2621 mb = &pmbox->mb; 2622 status = MBX_SUCCESS; 2623 2624 if (phba->link_state == LPFC_HBA_ERROR) { 2625 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2626 2627 /* Mbox command <mbxCommand> cannot issue */ 2628 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) 2629 return MBX_NOT_FINISHED; 2630 } 2631 2632 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2633 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2634 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2635 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) 2636 return MBX_NOT_FINISHED; 2637 } 2638 2639 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 2640 /* Polling for a mbox command when another one is already active 2641 * is not allowed in SLI. Also, the driver must have established 2642 * SLI2 mode to queue and process multiple mbox commands. 2643 */ 2644 2645 if (flag & MBX_POLL) { 2646 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2647 2648 /* Mbox command <mbxCommand> cannot issue */ 2649 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2650 return MBX_NOT_FINISHED; 2651 } 2652 2653 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 2654 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2655 /* Mbox command <mbxCommand> cannot issue */ 2656 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2657 return MBX_NOT_FINISHED; 2658 } 2659 2660 /* Another mailbox command is still being processed, queue this 2661 * command to be processed later. 2662 */ 2663 lpfc_mbox_put(phba, pmbox); 2664 2665 /* Mbox cmd issue - BUSY */ 2666 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2667 "(%d):0308 Mbox cmd issue - BUSY Data: " 2668 "x%x x%x x%x x%x\n", 2669 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 2670 mb->mbxCommand, phba->pport->port_state, 2671 psli->sli_flag, flag); 2672 2673 psli->slistat.mbox_busy++; 2674 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2675 2676 if (pmbox->vport) { 2677 lpfc_debugfs_disc_trc(pmbox->vport, 2678 LPFC_DISC_TRC_MBOX_VPORT, 2679 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 2680 (uint32_t)mb->mbxCommand, 2681 mb->un.varWords[0], mb->un.varWords[1]); 2682 } 2683 else { 2684 lpfc_debugfs_disc_trc(phba->pport, 2685 LPFC_DISC_TRC_MBOX, 2686 "MBOX Bsy: cmd:x%x mb:x%x x%x", 2687 (uint32_t)mb->mbxCommand, 2688 mb->un.varWords[0], mb->un.varWords[1]); 2689 } 2690 2691 return MBX_BUSY; 2692 } 2693 2694 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2695 2696 /* If we are not polling, we MUST be in SLI2 mode */ 2697 if (flag != MBX_POLL) { 2698 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 2699 (mb->mbxCommand != MBX_KILL_BOARD)) { 2700 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2701 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2702 /* Mbox command <mbxCommand> cannot issue */ 2703 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2704 return MBX_NOT_FINISHED; 2705 } 2706 /* timeout active mbox command */ 2707 mod_timer(&psli->mbox_tmo, (jiffies + 2708 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); 2709 } 2710 2711 /* Mailbox cmd <cmd> issue */ 2712 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2713 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 2714 "x%x\n", 2715 pmbox->vport ? pmbox->vport->vpi : 0, 2716 mb->mbxCommand, phba->pport->port_state, 2717 psli->sli_flag, flag); 2718 2719 if (mb->mbxCommand != MBX_HEARTBEAT) { 2720 if (pmbox->vport) { 2721 lpfc_debugfs_disc_trc(pmbox->vport, 2722 LPFC_DISC_TRC_MBOX_VPORT, 2723 "MBOX Send vport: cmd:x%x mb:x%x x%x", 2724 (uint32_t)mb->mbxCommand, 2725 mb->un.varWords[0], mb->un.varWords[1]); 2726 } 2727 else { 2728 lpfc_debugfs_disc_trc(phba->pport, 2729 LPFC_DISC_TRC_MBOX, 2730 "MBOX Send: cmd:x%x mb:x%x x%x", 2731 (uint32_t)mb->mbxCommand, 2732 mb->un.varWords[0], mb->un.varWords[1]); 2733 } 2734 } 2735 2736 psli->slistat.mbox_cmd++; 2737 evtctr = psli->slistat.mbox_event; 2738 2739 /* next set own bit for the adapter and copy over command word */ 2740 mb->mbxOwner = OWN_CHIP; 2741 2742 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2743 /* First copy command data to host SLIM area */ 2744 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE); 2745 } else { 2746 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2747 /* copy command data into host mbox for cmpl */ 2748 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, 2749 MAILBOX_CMD_SIZE); 2750 } 2751 2752 /* First copy mbox command data to HBA SLIM, skip past first 2753 word */ 2754 to_slim = phba->MBslimaddr + sizeof (uint32_t); 2755 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 2756 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 2757 2758 /* Next copy over first word, with mbxOwner set */ 2759 ldata = *((volatile uint32_t *)mb); 2760 to_slim = phba->MBslimaddr; 2761 writel(ldata, to_slim); 2762 readl(to_slim); /* flush */ 2763 2764 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2765 /* switch over to host mailbox */ 2766 psli->sli_flag |= LPFC_SLI2_ACTIVE; 2767 } 2768 } 2769 2770 wmb(); 2771 2772 switch (flag) { 2773 case MBX_NOWAIT: 2774 /* Set up reference to mailbox command */ 2775 psli->mbox_active = pmbox; 2776 /* Interrupt board to do it */ 2777 writel(CA_MBATT, phba->CAregaddr); 2778 readl(phba->CAregaddr); /* flush */ 2779 /* Don't wait for it to finish, just return */ 2780 break; 2781 2782 case MBX_POLL: 2783 /* Set up null reference to mailbox command */ 2784 psli->mbox_active = NULL; 2785 /* Interrupt board to do it */ 2786 writel(CA_MBATT, phba->CAregaddr); 2787 readl(phba->CAregaddr); /* flush */ 2788 2789 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2790 /* First read mbox status word */ 2791 word0 = *((volatile uint32_t *)&phba->slim2p->mbx); 2792 word0 = le32_to_cpu(word0); 2793 } else { 2794 /* First read mbox status word */ 2795 word0 = readl(phba->MBslimaddr); 2796 } 2797 2798 /* Read the HBA Host Attention Register */ 2799 ha_copy = readl(phba->HAregaddr); 2800 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2801 mb->mbxCommand) * 2802 1000) + jiffies; 2803 i = 0; 2804 /* Wait for command to complete */ 2805 while (((word0 & OWN_CHIP) == OWN_CHIP) || 2806 (!(ha_copy & HA_MBATT) && 2807 (phba->link_state > LPFC_WARM_START))) { 2808 if (time_after(jiffies, timeout)) { 2809 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2810 spin_unlock_irqrestore(&phba->hbalock, 2811 drvr_flag); 2812 return MBX_NOT_FINISHED; 2813 } 2814 2815 /* Check if we took a mbox interrupt while we were 2816 polling */ 2817 if (((word0 & OWN_CHIP) != OWN_CHIP) 2818 && (evtctr != psli->slistat.mbox_event)) 2819 break; 2820 2821 if (i++ > 10) { 2822 spin_unlock_irqrestore(&phba->hbalock, 2823 drvr_flag); 2824 msleep(1); 2825 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2826 } 2827 2828 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2829 /* First copy command data */ 2830 word0 = *((volatile uint32_t *) 2831 &phba->slim2p->mbx); 2832 word0 = le32_to_cpu(word0); 2833 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2834 MAILBOX_t *slimmb; 2835 volatile uint32_t slimword0; 2836 /* Check real SLIM for any errors */ 2837 slimword0 = readl(phba->MBslimaddr); 2838 slimmb = (MAILBOX_t *) & slimword0; 2839 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 2840 && slimmb->mbxStatus) { 2841 psli->sli_flag &= 2842 ~LPFC_SLI2_ACTIVE; 2843 word0 = slimword0; 2844 } 2845 } 2846 } else { 2847 /* First copy command data */ 2848 word0 = readl(phba->MBslimaddr); 2849 } 2850 /* Read the HBA Host Attention Register */ 2851 ha_copy = readl(phba->HAregaddr); 2852 } 2853 2854 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2855 /* copy results back to user */ 2856 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, 2857 MAILBOX_CMD_SIZE); 2858 } else { 2859 /* First copy command data */ 2860 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 2861 MAILBOX_CMD_SIZE); 2862 if ((mb->mbxCommand == MBX_DUMP_MEMORY) && 2863 pmbox->context2) { 2864 lpfc_memcpy_from_slim((void *)pmbox->context2, 2865 phba->MBslimaddr + DMP_RSP_OFFSET, 2866 mb->un.varDmp.word_cnt); 2867 } 2868 } 2869 2870 writel(HA_MBATT, phba->HAregaddr); 2871 readl(phba->HAregaddr); /* flush */ 2872 2873 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2874 status = mb->mbxStatus; 2875 } 2876 2877 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2878 return status; 2879 } 2880 2881 /* 2882 * Caller needs to hold lock. 2883 */ 2884 static void 2885 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2886 struct lpfc_iocbq *piocb) 2887 { 2888 /* Insert the caller's iocb in the txq tail for later processing. */ 2889 list_add_tail(&piocb->list, &pring->txq); 2890 pring->txq_cnt++; 2891 } 2892 2893 static struct lpfc_iocbq * 2894 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2895 struct lpfc_iocbq **piocb) 2896 { 2897 struct lpfc_iocbq * nextiocb; 2898 2899 nextiocb = lpfc_sli_ringtx_get(phba, pring); 2900 if (!nextiocb) { 2901 nextiocb = *piocb; 2902 *piocb = NULL; 2903 } 2904 2905 return nextiocb; 2906 } 2907 2908 /* 2909 * Lockless version of lpfc_sli_issue_iocb. 2910 */ 2911 static int 2912 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2913 struct lpfc_iocbq *piocb, uint32_t flag) 2914 { 2915 struct lpfc_iocbq *nextiocb; 2916 IOCB_t *iocb; 2917 2918 if (piocb->iocb_cmpl && (!piocb->vport) && 2919 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 2920 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 2921 lpfc_printf_log(phba, KERN_ERR, 2922 LOG_SLI | LOG_VPORT, 2923 "1807 IOCB x%x failed. No vport\n", 2924 piocb->iocb.ulpCommand); 2925 dump_stack(); 2926 return IOCB_ERROR; 2927 } 2928 2929 2930 /* If the PCI channel is in offline state, do not post iocbs. */ 2931 if (unlikely(pci_channel_offline(phba->pcidev))) 2932 return IOCB_ERROR; 2933 2934 /* 2935 * We should never get an IOCB if we are in a < LINK_DOWN state 2936 */ 2937 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 2938 return IOCB_ERROR; 2939 2940 /* 2941 * Check to see if we are blocking IOCB processing because of a 2942 * outstanding event. 2943 */ 2944 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 2945 goto iocb_busy; 2946 2947 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 2948 /* 2949 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 2950 * can be issued if the link is not up. 2951 */ 2952 switch (piocb->iocb.ulpCommand) { 2953 case CMD_QUE_RING_BUF_CN: 2954 case CMD_QUE_RING_BUF64_CN: 2955 /* 2956 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 2957 * completion, iocb_cmpl MUST be 0. 2958 */ 2959 if (piocb->iocb_cmpl) 2960 piocb->iocb_cmpl = NULL; 2961 /*FALLTHROUGH*/ 2962 case CMD_CREATE_XRI_CR: 2963 case CMD_CLOSE_XRI_CN: 2964 case CMD_CLOSE_XRI_CX: 2965 break; 2966 default: 2967 goto iocb_busy; 2968 } 2969 2970 /* 2971 * For FCP commands, we must be in a state where we can process link 2972 * attention events. 2973 */ 2974 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 2975 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 2976 goto iocb_busy; 2977 } 2978 2979 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 2980 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 2981 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 2982 2983 if (iocb) 2984 lpfc_sli_update_ring(phba, pring); 2985 else 2986 lpfc_sli_update_full_ring(phba, pring); 2987 2988 if (!piocb) 2989 return IOCB_SUCCESS; 2990 2991 goto out_busy; 2992 2993 iocb_busy: 2994 pring->stats.iocb_cmd_delay++; 2995 2996 out_busy: 2997 2998 if (!(flag & SLI_IOCB_RET_IOCB)) { 2999 __lpfc_sli_ringtx_put(phba, pring, piocb); 3000 return IOCB_SUCCESS; 3001 } 3002 3003 return IOCB_BUSY; 3004 } 3005 3006 3007 int 3008 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3009 struct lpfc_iocbq *piocb, uint32_t flag) 3010 { 3011 unsigned long iflags; 3012 int rc; 3013 3014 spin_lock_irqsave(&phba->hbalock, iflags); 3015 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); 3016 spin_unlock_irqrestore(&phba->hbalock, iflags); 3017 3018 return rc; 3019 } 3020 3021 static int 3022 lpfc_extra_ring_setup( struct lpfc_hba *phba) 3023 { 3024 struct lpfc_sli *psli; 3025 struct lpfc_sli_ring *pring; 3026 3027 psli = &phba->sli; 3028 3029 /* Adjust cmd/rsp ring iocb entries more evenly */ 3030 3031 /* Take some away from the FCP ring */ 3032 pring = &psli->ring[psli->fcp_ring]; 3033 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 3034 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3035 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3036 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3037 3038 /* and give them to the extra ring */ 3039 pring = &psli->ring[psli->extra_ring]; 3040 3041 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 3042 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3043 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3044 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3045 3046 /* Setup default profile for this ring */ 3047 pring->iotag_max = 4096; 3048 pring->num_mask = 1; 3049 pring->prt[0].profile = 0; /* Mask 0 */ 3050 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 3051 pring->prt[0].type = phba->cfg_multi_ring_type; 3052 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 3053 return 0; 3054 } 3055 3056 static void 3057 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 3058 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 3059 { 3060 IOCB_t *icmd; 3061 uint16_t evt_code; 3062 uint16_t temp; 3063 struct temp_event temp_event_data; 3064 struct Scsi_Host *shost; 3065 3066 icmd = &iocbq->iocb; 3067 evt_code = icmd->un.asyncstat.evt_code; 3068 temp = icmd->ulpContext; 3069 3070 if ((evt_code != ASYNC_TEMP_WARN) && 3071 (evt_code != ASYNC_TEMP_SAFE)) { 3072 lpfc_printf_log(phba, 3073 KERN_ERR, 3074 LOG_SLI, 3075 "0346 Ring %d handler: unexpected ASYNC_STATUS" 3076 " evt_code 0x%x\n", 3077 pring->ringno, 3078 icmd->un.asyncstat.evt_code); 3079 return; 3080 } 3081 temp_event_data.data = (uint32_t)temp; 3082 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 3083 if (evt_code == ASYNC_TEMP_WARN) { 3084 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 3085 lpfc_printf_log(phba, 3086 KERN_ERR, 3087 LOG_TEMP, 3088 "0347 Adapter is very hot, please take " 3089 "corrective action. temperature : %d Celsius\n", 3090 temp); 3091 } 3092 if (evt_code == ASYNC_TEMP_SAFE) { 3093 temp_event_data.event_code = LPFC_NORMAL_TEMP; 3094 lpfc_printf_log(phba, 3095 KERN_ERR, 3096 LOG_TEMP, 3097 "0340 Adapter temperature is OK now. " 3098 "temperature : %d Celsius\n", 3099 temp); 3100 } 3101 3102 /* Send temperature change event to applications */ 3103 shost = lpfc_shost_from_vport(phba->pport); 3104 fc_host_post_vendor_event(shost, fc_get_event_number(), 3105 sizeof(temp_event_data), (char *) &temp_event_data, 3106 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 3107 3108 } 3109 3110 3111 int 3112 lpfc_sli_setup(struct lpfc_hba *phba) 3113 { 3114 int i, totiocbsize = 0; 3115 struct lpfc_sli *psli = &phba->sli; 3116 struct lpfc_sli_ring *pring; 3117 3118 psli->num_rings = MAX_CONFIGURED_RINGS; 3119 psli->sli_flag = 0; 3120 psli->fcp_ring = LPFC_FCP_RING; 3121 psli->next_ring = LPFC_FCP_NEXT_RING; 3122 psli->extra_ring = LPFC_EXTRA_RING; 3123 3124 psli->iocbq_lookup = NULL; 3125 psli->iocbq_lookup_len = 0; 3126 psli->last_iotag = 0; 3127 3128 for (i = 0; i < psli->num_rings; i++) { 3129 pring = &psli->ring[i]; 3130 switch (i) { 3131 case LPFC_FCP_RING: /* ring 0 - FCP */ 3132 /* numCiocb and numRiocb are used in config_port */ 3133 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 3134 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 3135 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 3136 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3137 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3138 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3139 pring->sizeCiocb = (phba->sli_rev == 3) ? 3140 SLI3_IOCB_CMD_SIZE : 3141 SLI2_IOCB_CMD_SIZE; 3142 pring->sizeRiocb = (phba->sli_rev == 3) ? 3143 SLI3_IOCB_RSP_SIZE : 3144 SLI2_IOCB_RSP_SIZE; 3145 pring->iotag_ctr = 0; 3146 pring->iotag_max = 3147 (phba->cfg_hba_queue_depth * 2); 3148 pring->fast_iotag = pring->iotag_max; 3149 pring->num_mask = 0; 3150 break; 3151 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 3152 /* numCiocb and numRiocb are used in config_port */ 3153 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 3154 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 3155 pring->sizeCiocb = (phba->sli_rev == 3) ? 3156 SLI3_IOCB_CMD_SIZE : 3157 SLI2_IOCB_CMD_SIZE; 3158 pring->sizeRiocb = (phba->sli_rev == 3) ? 3159 SLI3_IOCB_RSP_SIZE : 3160 SLI2_IOCB_RSP_SIZE; 3161 pring->iotag_max = phba->cfg_hba_queue_depth; 3162 pring->num_mask = 0; 3163 break; 3164 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 3165 /* numCiocb and numRiocb are used in config_port */ 3166 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 3167 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 3168 pring->sizeCiocb = (phba->sli_rev == 3) ? 3169 SLI3_IOCB_CMD_SIZE : 3170 SLI2_IOCB_CMD_SIZE; 3171 pring->sizeRiocb = (phba->sli_rev == 3) ? 3172 SLI3_IOCB_RSP_SIZE : 3173 SLI2_IOCB_RSP_SIZE; 3174 pring->fast_iotag = 0; 3175 pring->iotag_ctr = 0; 3176 pring->iotag_max = 4096; 3177 pring->lpfc_sli_rcv_async_status = 3178 lpfc_sli_async_event_handler; 3179 pring->num_mask = 4; 3180 pring->prt[0].profile = 0; /* Mask 0 */ 3181 pring->prt[0].rctl = FC_ELS_REQ; 3182 pring->prt[0].type = FC_ELS_DATA; 3183 pring->prt[0].lpfc_sli_rcv_unsol_event = 3184 lpfc_els_unsol_event; 3185 pring->prt[1].profile = 0; /* Mask 1 */ 3186 pring->prt[1].rctl = FC_ELS_RSP; 3187 pring->prt[1].type = FC_ELS_DATA; 3188 pring->prt[1].lpfc_sli_rcv_unsol_event = 3189 lpfc_els_unsol_event; 3190 pring->prt[2].profile = 0; /* Mask 2 */ 3191 /* NameServer Inquiry */ 3192 pring->prt[2].rctl = FC_UNSOL_CTL; 3193 /* NameServer */ 3194 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 3195 pring->prt[2].lpfc_sli_rcv_unsol_event = 3196 lpfc_ct_unsol_event; 3197 pring->prt[3].profile = 0; /* Mask 3 */ 3198 /* NameServer response */ 3199 pring->prt[3].rctl = FC_SOL_CTL; 3200 /* NameServer */ 3201 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 3202 pring->prt[3].lpfc_sli_rcv_unsol_event = 3203 lpfc_ct_unsol_event; 3204 break; 3205 } 3206 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 3207 (pring->numRiocb * pring->sizeRiocb); 3208 } 3209 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 3210 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 3211 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 3212 "SLI2 SLIM Data: x%x x%lx\n", 3213 phba->brd_no, totiocbsize, 3214 (unsigned long) MAX_SLIM_IOCB_SIZE); 3215 } 3216 if (phba->cfg_multi_ring_support == 2) 3217 lpfc_extra_ring_setup(phba); 3218 3219 return 0; 3220 } 3221 3222 int 3223 lpfc_sli_queue_setup(struct lpfc_hba *phba) 3224 { 3225 struct lpfc_sli *psli; 3226 struct lpfc_sli_ring *pring; 3227 int i; 3228 3229 psli = &phba->sli; 3230 spin_lock_irq(&phba->hbalock); 3231 INIT_LIST_HEAD(&psli->mboxq); 3232 INIT_LIST_HEAD(&psli->mboxq_cmpl); 3233 /* Initialize list headers for txq and txcmplq as double linked lists */ 3234 for (i = 0; i < psli->num_rings; i++) { 3235 pring = &psli->ring[i]; 3236 pring->ringno = i; 3237 pring->next_cmdidx = 0; 3238 pring->local_getidx = 0; 3239 pring->cmdidx = 0; 3240 INIT_LIST_HEAD(&pring->txq); 3241 INIT_LIST_HEAD(&pring->txcmplq); 3242 INIT_LIST_HEAD(&pring->iocb_continueq); 3243 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 3244 INIT_LIST_HEAD(&pring->postbufq); 3245 } 3246 spin_unlock_irq(&phba->hbalock); 3247 return 1; 3248 } 3249 3250 int 3251 lpfc_sli_host_down(struct lpfc_vport *vport) 3252 { 3253 LIST_HEAD(completions); 3254 struct lpfc_hba *phba = vport->phba; 3255 struct lpfc_sli *psli = &phba->sli; 3256 struct lpfc_sli_ring *pring; 3257 struct lpfc_iocbq *iocb, *next_iocb; 3258 int i; 3259 unsigned long flags = 0; 3260 uint16_t prev_pring_flag; 3261 3262 lpfc_cleanup_discovery_resources(vport); 3263 3264 spin_lock_irqsave(&phba->hbalock, flags); 3265 for (i = 0; i < psli->num_rings; i++) { 3266 pring = &psli->ring[i]; 3267 prev_pring_flag = pring->flag; 3268 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ 3269 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3270 /* 3271 * Error everything on the txq since these iocbs have not been 3272 * given to the FW yet. 3273 */ 3274 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 3275 if (iocb->vport != vport) 3276 continue; 3277 list_move_tail(&iocb->list, &completions); 3278 pring->txq_cnt--; 3279 } 3280 3281 /* Next issue ABTS for everything on the txcmplq */ 3282 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 3283 list) { 3284 if (iocb->vport != vport) 3285 continue; 3286 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3287 } 3288 3289 pring->flag = prev_pring_flag; 3290 } 3291 3292 spin_unlock_irqrestore(&phba->hbalock, flags); 3293 3294 while (!list_empty(&completions)) { 3295 list_remove_head(&completions, iocb, struct lpfc_iocbq, list); 3296 3297 if (!iocb->iocb_cmpl) 3298 lpfc_sli_release_iocbq(phba, iocb); 3299 else { 3300 iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 3301 iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN; 3302 (iocb->iocb_cmpl) (phba, iocb, iocb); 3303 } 3304 } 3305 return 1; 3306 } 3307 3308 int 3309 lpfc_sli_hba_down(struct lpfc_hba *phba) 3310 { 3311 LIST_HEAD(completions); 3312 struct lpfc_sli *psli = &phba->sli; 3313 struct lpfc_sli_ring *pring; 3314 struct lpfc_dmabuf *buf_ptr; 3315 LPFC_MBOXQ_t *pmb; 3316 struct lpfc_iocbq *iocb; 3317 IOCB_t *cmd = NULL; 3318 int i; 3319 unsigned long flags = 0; 3320 3321 lpfc_hba_down_prep(phba); 3322 3323 lpfc_fabric_abort_hba(phba); 3324 3325 spin_lock_irqsave(&phba->hbalock, flags); 3326 for (i = 0; i < psli->num_rings; i++) { 3327 pring = &psli->ring[i]; 3328 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ 3329 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3330 3331 /* 3332 * Error everything on the txq since these iocbs have not been 3333 * given to the FW yet. 3334 */ 3335 list_splice_init(&pring->txq, &completions); 3336 pring->txq_cnt = 0; 3337 3338 } 3339 spin_unlock_irqrestore(&phba->hbalock, flags); 3340 3341 while (!list_empty(&completions)) { 3342 list_remove_head(&completions, iocb, struct lpfc_iocbq, list); 3343 cmd = &iocb->iocb; 3344 3345 if (!iocb->iocb_cmpl) 3346 lpfc_sli_release_iocbq(phba, iocb); 3347 else { 3348 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 3349 cmd->un.ulpWord[4] = IOERR_SLI_DOWN; 3350 (iocb->iocb_cmpl) (phba, iocb, iocb); 3351 } 3352 } 3353 3354 spin_lock_irqsave(&phba->hbalock, flags); 3355 list_splice_init(&phba->elsbuf, &completions); 3356 phba->elsbuf_cnt = 0; 3357 phba->elsbuf_prev_cnt = 0; 3358 spin_unlock_irqrestore(&phba->hbalock, flags); 3359 3360 while (!list_empty(&completions)) { 3361 list_remove_head(&completions, buf_ptr, 3362 struct lpfc_dmabuf, list); 3363 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3364 kfree(buf_ptr); 3365 } 3366 3367 /* Return any active mbox cmds */ 3368 del_timer_sync(&psli->mbox_tmo); 3369 spin_lock_irqsave(&phba->hbalock, flags); 3370 3371 spin_lock(&phba->pport->work_port_lock); 3372 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 3373 spin_unlock(&phba->pport->work_port_lock); 3374 3375 if (psli->mbox_active) { 3376 list_add_tail(&psli->mbox_active->list, &completions); 3377 psli->mbox_active = NULL; 3378 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3379 } 3380 3381 /* Return any pending or completed mbox cmds */ 3382 list_splice_init(&phba->sli.mboxq, &completions); 3383 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 3384 INIT_LIST_HEAD(&psli->mboxq); 3385 INIT_LIST_HEAD(&psli->mboxq_cmpl); 3386 3387 spin_unlock_irqrestore(&phba->hbalock, flags); 3388 3389 while (!list_empty(&completions)) { 3390 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 3391 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 3392 if (pmb->mbox_cmpl) { 3393 pmb->mbox_cmpl(phba,pmb); 3394 } 3395 } 3396 return 1; 3397 } 3398 3399 void 3400 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 3401 { 3402 uint32_t *src = srcp; 3403 uint32_t *dest = destp; 3404 uint32_t ldata; 3405 int i; 3406 3407 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 3408 ldata = *src; 3409 ldata = le32_to_cpu(ldata); 3410 *dest = ldata; 3411 src++; 3412 dest++; 3413 } 3414 } 3415 3416 int 3417 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3418 struct lpfc_dmabuf *mp) 3419 { 3420 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 3421 later */ 3422 spin_lock_irq(&phba->hbalock); 3423 list_add_tail(&mp->list, &pring->postbufq); 3424 pring->postbufq_cnt++; 3425 spin_unlock_irq(&phba->hbalock); 3426 return 0; 3427 } 3428 3429 uint32_t 3430 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 3431 { 3432 spin_lock_irq(&phba->hbalock); 3433 phba->buffer_tag_count++; 3434 /* 3435 * Always set the QUE_BUFTAG_BIT to distiguish between 3436 * a tag assigned by HBQ. 3437 */ 3438 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 3439 spin_unlock_irq(&phba->hbalock); 3440 return phba->buffer_tag_count; 3441 } 3442 3443 struct lpfc_dmabuf * 3444 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3445 uint32_t tag) 3446 { 3447 struct lpfc_dmabuf *mp, *next_mp; 3448 struct list_head *slp = &pring->postbufq; 3449 3450 /* Search postbufq, from the begining, looking for a match on tag */ 3451 spin_lock_irq(&phba->hbalock); 3452 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 3453 if (mp->buffer_tag == tag) { 3454 list_del_init(&mp->list); 3455 pring->postbufq_cnt--; 3456 spin_unlock_irq(&phba->hbalock); 3457 return mp; 3458 } 3459 } 3460 3461 spin_unlock_irq(&phba->hbalock); 3462 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3463 "0410 Cannot find virtual addr for buffer tag on " 3464 "ring %d Data x%lx x%p x%p x%x\n", 3465 pring->ringno, (unsigned long) tag, 3466 slp->next, slp->prev, pring->postbufq_cnt); 3467 3468 return NULL; 3469 } 3470 3471 struct lpfc_dmabuf * 3472 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3473 dma_addr_t phys) 3474 { 3475 struct lpfc_dmabuf *mp, *next_mp; 3476 struct list_head *slp = &pring->postbufq; 3477 3478 /* Search postbufq, from the begining, looking for a match on phys */ 3479 spin_lock_irq(&phba->hbalock); 3480 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 3481 if (mp->phys == phys) { 3482 list_del_init(&mp->list); 3483 pring->postbufq_cnt--; 3484 spin_unlock_irq(&phba->hbalock); 3485 return mp; 3486 } 3487 } 3488 3489 spin_unlock_irq(&phba->hbalock); 3490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3491 "0410 Cannot find virtual addr for mapped buf on " 3492 "ring %d Data x%llx x%p x%p x%x\n", 3493 pring->ringno, (unsigned long long)phys, 3494 slp->next, slp->prev, pring->postbufq_cnt); 3495 return NULL; 3496 } 3497 3498 static void 3499 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3500 struct lpfc_iocbq *rspiocb) 3501 { 3502 IOCB_t *irsp = &rspiocb->iocb; 3503 uint16_t abort_iotag, abort_context; 3504 struct lpfc_iocbq *abort_iocb; 3505 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3506 3507 abort_iocb = NULL; 3508 3509 if (irsp->ulpStatus) { 3510 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 3511 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 3512 3513 spin_lock_irq(&phba->hbalock); 3514 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 3515 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 3516 3517 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, 3518 "0327 Cannot abort els iocb %p " 3519 "with tag %x context %x, abort status %x, " 3520 "abort code %x\n", 3521 abort_iocb, abort_iotag, abort_context, 3522 irsp->ulpStatus, irsp->un.ulpWord[4]); 3523 3524 /* 3525 * make sure we have the right iocbq before taking it 3526 * off the txcmplq and try to call completion routine. 3527 */ 3528 if (!abort_iocb || 3529 abort_iocb->iocb.ulpContext != abort_context || 3530 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 3531 spin_unlock_irq(&phba->hbalock); 3532 else { 3533 list_del_init(&abort_iocb->list); 3534 pring->txcmplq_cnt--; 3535 spin_unlock_irq(&phba->hbalock); 3536 3537 /* Firmware could still be in progress of DMAing 3538 * payload, so don't free data buffer till after 3539 * a hbeat. 3540 */ 3541 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 3542 3543 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3544 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 3545 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; 3546 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 3547 } 3548 } 3549 3550 lpfc_sli_release_iocbq(phba, cmdiocb); 3551 return; 3552 } 3553 3554 static void 3555 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3556 struct lpfc_iocbq *rspiocb) 3557 { 3558 IOCB_t *irsp = &rspiocb->iocb; 3559 3560 /* ELS cmd tag <ulpIoTag> completes */ 3561 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3562 "0133 Ignoring ELS cmd tag x%x completion Data: " 3563 "x%x x%x x%x\n", 3564 irsp->ulpIoTag, irsp->ulpStatus, 3565 irsp->un.ulpWord[4], irsp->ulpTimeout); 3566 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 3567 lpfc_ct_free_iocb(phba, cmdiocb); 3568 else 3569 lpfc_els_free_iocb(phba, cmdiocb); 3570 return; 3571 } 3572 3573 int 3574 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3575 struct lpfc_iocbq *cmdiocb) 3576 { 3577 struct lpfc_vport *vport = cmdiocb->vport; 3578 struct lpfc_iocbq *abtsiocbp; 3579 IOCB_t *icmd = NULL; 3580 IOCB_t *iabt = NULL; 3581 int retval = IOCB_ERROR; 3582 3583 /* 3584 * There are certain command types we don't want to abort. And we 3585 * don't want to abort commands that are already in the process of 3586 * being aborted. 3587 */ 3588 icmd = &cmdiocb->iocb; 3589 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 3590 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 3591 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 3592 return 0; 3593 3594 /* If we're unloading, don't abort iocb on the ELS ring, but change the 3595 * callback so that nothing happens when it finishes. 3596 */ 3597 if ((vport->load_flag & FC_UNLOADING) && 3598 (pring->ringno == LPFC_ELS_RING)) { 3599 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 3600 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 3601 else 3602 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 3603 goto abort_iotag_exit; 3604 } 3605 3606 /* issue ABTS for this IOCB based on iotag */ 3607 abtsiocbp = __lpfc_sli_get_iocbq(phba); 3608 if (abtsiocbp == NULL) 3609 return 0; 3610 3611 /* This signals the response to set the correct status 3612 * before calling the completion handler. 3613 */ 3614 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 3615 3616 iabt = &abtsiocbp->iocb; 3617 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 3618 iabt->un.acxri.abortContextTag = icmd->ulpContext; 3619 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 3620 iabt->ulpLe = 1; 3621 iabt->ulpClass = icmd->ulpClass; 3622 3623 if (phba->link_state >= LPFC_LINK_UP) 3624 iabt->ulpCommand = CMD_ABORT_XRI_CN; 3625 else 3626 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 3627 3628 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 3629 3630 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 3631 "0339 Abort xri x%x, original iotag x%x, " 3632 "abort cmd iotag x%x\n", 3633 iabt->un.acxri.abortContextTag, 3634 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 3635 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 3636 3637 abort_iotag_exit: 3638 /* 3639 * Caller to this routine should check for IOCB_ERROR 3640 * and handle it properly. This routine no longer removes 3641 * iocb off txcmplq and call compl in case of IOCB_ERROR. 3642 */ 3643 return retval; 3644 } 3645 3646 static int 3647 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 3648 uint16_t tgt_id, uint64_t lun_id, 3649 lpfc_ctx_cmd ctx_cmd) 3650 { 3651 struct lpfc_scsi_buf *lpfc_cmd; 3652 struct scsi_cmnd *cmnd; 3653 int rc = 1; 3654 3655 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 3656 return rc; 3657 3658 if (iocbq->vport != vport) 3659 return rc; 3660 3661 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 3662 cmnd = lpfc_cmd->pCmd; 3663 3664 if (cmnd == NULL) 3665 return rc; 3666 3667 switch (ctx_cmd) { 3668 case LPFC_CTX_LUN: 3669 if ((cmnd->device->id == tgt_id) && 3670 (cmnd->device->lun == lun_id)) 3671 rc = 0; 3672 break; 3673 case LPFC_CTX_TGT: 3674 if (cmnd->device->id == tgt_id) 3675 rc = 0; 3676 break; 3677 case LPFC_CTX_HOST: 3678 rc = 0; 3679 break; 3680 default: 3681 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 3682 __FUNCTION__, ctx_cmd); 3683 break; 3684 } 3685 3686 return rc; 3687 } 3688 3689 int 3690 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 3691 lpfc_ctx_cmd ctx_cmd) 3692 { 3693 struct lpfc_hba *phba = vport->phba; 3694 struct lpfc_iocbq *iocbq; 3695 int sum, i; 3696 3697 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 3698 iocbq = phba->sli.iocbq_lookup[i]; 3699 3700 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 3701 ctx_cmd) == 0) 3702 sum++; 3703 } 3704 3705 return sum; 3706 } 3707 3708 void 3709 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3710 struct lpfc_iocbq *rspiocb) 3711 { 3712 lpfc_sli_release_iocbq(phba, cmdiocb); 3713 return; 3714 } 3715 3716 int 3717 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 3718 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 3719 { 3720 struct lpfc_hba *phba = vport->phba; 3721 struct lpfc_iocbq *iocbq; 3722 struct lpfc_iocbq *abtsiocb; 3723 IOCB_t *cmd = NULL; 3724 int errcnt = 0, ret_val = 0; 3725 int i; 3726 3727 for (i = 1; i <= phba->sli.last_iotag; i++) { 3728 iocbq = phba->sli.iocbq_lookup[i]; 3729 3730 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 3731 abort_cmd) != 0) 3732 continue; 3733 3734 /* issue ABTS for this IOCB based on iotag */ 3735 abtsiocb = lpfc_sli_get_iocbq(phba); 3736 if (abtsiocb == NULL) { 3737 errcnt++; 3738 continue; 3739 } 3740 3741 cmd = &iocbq->iocb; 3742 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 3743 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 3744 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 3745 abtsiocb->iocb.ulpLe = 1; 3746 abtsiocb->iocb.ulpClass = cmd->ulpClass; 3747 abtsiocb->vport = phba->pport; 3748 3749 if (lpfc_is_link_up(phba)) 3750 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 3751 else 3752 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 3753 3754 /* Setup callback routine and issue the command. */ 3755 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3756 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 3757 if (ret_val == IOCB_ERROR) { 3758 lpfc_sli_release_iocbq(phba, abtsiocb); 3759 errcnt++; 3760 continue; 3761 } 3762 } 3763 3764 return errcnt; 3765 } 3766 3767 static void 3768 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 3769 struct lpfc_iocbq *cmdiocbq, 3770 struct lpfc_iocbq *rspiocbq) 3771 { 3772 wait_queue_head_t *pdone_q; 3773 unsigned long iflags; 3774 3775 spin_lock_irqsave(&phba->hbalock, iflags); 3776 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 3777 if (cmdiocbq->context2 && rspiocbq) 3778 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 3779 &rspiocbq->iocb, sizeof(IOCB_t)); 3780 3781 pdone_q = cmdiocbq->context_un.wait_queue; 3782 if (pdone_q) 3783 wake_up(pdone_q); 3784 spin_unlock_irqrestore(&phba->hbalock, iflags); 3785 return; 3786 } 3787 3788 /* 3789 * Issue the caller's iocb and wait for its completion, but no longer than the 3790 * caller's timeout. Note that iocb_flags is cleared before the 3791 * lpfc_sli_issue_call since the wake routine sets a unique value and by 3792 * definition this is a wait function. 3793 */ 3794 3795 int 3796 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 3797 struct lpfc_sli_ring *pring, 3798 struct lpfc_iocbq *piocb, 3799 struct lpfc_iocbq *prspiocbq, 3800 uint32_t timeout) 3801 { 3802 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 3803 long timeleft, timeout_req = 0; 3804 int retval = IOCB_SUCCESS; 3805 uint32_t creg_val; 3806 3807 /* 3808 * If the caller has provided a response iocbq buffer, then context2 3809 * is NULL or its an error. 3810 */ 3811 if (prspiocbq) { 3812 if (piocb->context2) 3813 return IOCB_ERROR; 3814 piocb->context2 = prspiocbq; 3815 } 3816 3817 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 3818 piocb->context_un.wait_queue = &done_q; 3819 piocb->iocb_flag &= ~LPFC_IO_WAKE; 3820 3821 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 3822 creg_val = readl(phba->HCregaddr); 3823 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 3824 writel(creg_val, phba->HCregaddr); 3825 readl(phba->HCregaddr); /* flush */ 3826 } 3827 3828 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 3829 if (retval == IOCB_SUCCESS) { 3830 timeout_req = timeout * HZ; 3831 timeleft = wait_event_timeout(done_q, 3832 piocb->iocb_flag & LPFC_IO_WAKE, 3833 timeout_req); 3834 3835 if (piocb->iocb_flag & LPFC_IO_WAKE) { 3836 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3837 "0331 IOCB wake signaled\n"); 3838 } else if (timeleft == 0) { 3839 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3840 "0338 IOCB wait timeout error - no " 3841 "wake response Data x%x\n", timeout); 3842 retval = IOCB_TIMEDOUT; 3843 } else { 3844 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3845 "0330 IOCB wake NOT set, " 3846 "Data x%x x%lx\n", 3847 timeout, (timeleft / jiffies)); 3848 retval = IOCB_TIMEDOUT; 3849 } 3850 } else { 3851 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3852 ":0332 IOCB wait issue failed, Data x%x\n", 3853 retval); 3854 retval = IOCB_ERROR; 3855 } 3856 3857 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 3858 creg_val = readl(phba->HCregaddr); 3859 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 3860 writel(creg_val, phba->HCregaddr); 3861 readl(phba->HCregaddr); /* flush */ 3862 } 3863 3864 if (prspiocbq) 3865 piocb->context2 = NULL; 3866 3867 piocb->context_un.wait_queue = NULL; 3868 piocb->iocb_cmpl = NULL; 3869 return retval; 3870 } 3871 3872 int 3873 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 3874 uint32_t timeout) 3875 { 3876 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 3877 int retval; 3878 unsigned long flag; 3879 3880 /* The caller must leave context1 empty. */ 3881 if (pmboxq->context1) 3882 return MBX_NOT_FINISHED; 3883 3884 /* setup wake call as IOCB callback */ 3885 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 3886 /* setup context field to pass wait_queue pointer to wake function */ 3887 pmboxq->context1 = &done_q; 3888 3889 /* now issue the command */ 3890 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 3891 3892 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 3893 wait_event_interruptible_timeout(done_q, 3894 pmboxq->mbox_flag & LPFC_MBX_WAKE, 3895 timeout * HZ); 3896 3897 spin_lock_irqsave(&phba->hbalock, flag); 3898 pmboxq->context1 = NULL; 3899 /* 3900 * if LPFC_MBX_WAKE flag is set the mailbox is completed 3901 * else do not free the resources. 3902 */ 3903 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) 3904 retval = MBX_SUCCESS; 3905 else { 3906 retval = MBX_TIMEOUT; 3907 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3908 } 3909 spin_unlock_irqrestore(&phba->hbalock, flag); 3910 } 3911 3912 return retval; 3913 } 3914 3915 int 3916 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 3917 { 3918 struct lpfc_vport *vport = phba->pport; 3919 int i = 0; 3920 uint32_t ha_copy; 3921 3922 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { 3923 if (i++ > LPFC_MBOX_TMO * 1000) 3924 return 1; 3925 3926 /* 3927 * Call lpfc_sli_handle_mb_event only if a mailbox cmd 3928 * did finish. This way we won't get the misleading 3929 * "Stray Mailbox Interrupt" message. 3930 */ 3931 spin_lock_irq(&phba->hbalock); 3932 ha_copy = phba->work_ha; 3933 phba->work_ha &= ~HA_MBATT; 3934 spin_unlock_irq(&phba->hbalock); 3935 3936 if (ha_copy & HA_MBATT) 3937 if (lpfc_sli_handle_mb_event(phba) == 0) 3938 i = 0; 3939 3940 msleep(1); 3941 } 3942 3943 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 3944 } 3945 3946 irqreturn_t 3947 lpfc_intr_handler(int irq, void *dev_id) 3948 { 3949 struct lpfc_hba *phba; 3950 uint32_t ha_copy; 3951 uint32_t work_ha_copy; 3952 unsigned long status; 3953 uint32_t control; 3954 3955 MAILBOX_t *mbox, *pmbox; 3956 struct lpfc_vport *vport; 3957 struct lpfc_nodelist *ndlp; 3958 struct lpfc_dmabuf *mp; 3959 LPFC_MBOXQ_t *pmb; 3960 int rc; 3961 3962 /* 3963 * Get the driver's phba structure from the dev_id and 3964 * assume the HBA is not interrupting. 3965 */ 3966 phba = (struct lpfc_hba *) dev_id; 3967 3968 if (unlikely(!phba)) 3969 return IRQ_NONE; 3970 3971 /* If the pci channel is offline, ignore all the interrupts. */ 3972 if (unlikely(pci_channel_offline(phba->pcidev))) 3973 return IRQ_NONE; 3974 3975 phba->sli.slistat.sli_intr++; 3976 3977 /* 3978 * Call the HBA to see if it is interrupting. If not, don't claim 3979 * the interrupt 3980 */ 3981 3982 /* Ignore all interrupts during initialization. */ 3983 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 3984 return IRQ_NONE; 3985 3986 /* 3987 * Read host attention register to determine interrupt source 3988 * Clear Attention Sources, except Error Attention (to 3989 * preserve status) and Link Attention 3990 */ 3991 spin_lock(&phba->hbalock); 3992 ha_copy = readl(phba->HAregaddr); 3993 /* If somebody is waiting to handle an eratt don't process it 3994 * here. The brdkill function will do this. 3995 */ 3996 if (phba->link_flag & LS_IGNORE_ERATT) 3997 ha_copy &= ~HA_ERATT; 3998 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 3999 readl(phba->HAregaddr); /* flush */ 4000 spin_unlock(&phba->hbalock); 4001 4002 if (unlikely(!ha_copy)) 4003 return IRQ_NONE; 4004 4005 work_ha_copy = ha_copy & phba->work_ha_mask; 4006 4007 if (unlikely(work_ha_copy)) { 4008 if (work_ha_copy & HA_LATT) { 4009 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 4010 /* 4011 * Turn off Link Attention interrupts 4012 * until CLEAR_LA done 4013 */ 4014 spin_lock(&phba->hbalock); 4015 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 4016 control = readl(phba->HCregaddr); 4017 control &= ~HC_LAINT_ENA; 4018 writel(control, phba->HCregaddr); 4019 readl(phba->HCregaddr); /* flush */ 4020 spin_unlock(&phba->hbalock); 4021 } 4022 else 4023 work_ha_copy &= ~HA_LATT; 4024 } 4025 4026 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) { 4027 /* 4028 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 4029 * the only slow ring. 4030 */ 4031 status = (work_ha_copy & 4032 (HA_RXMASK << (4*LPFC_ELS_RING))); 4033 status >>= (4*LPFC_ELS_RING); 4034 if (status & HA_RXMASK) { 4035 spin_lock(&phba->hbalock); 4036 control = readl(phba->HCregaddr); 4037 4038 lpfc_debugfs_slow_ring_trc(phba, 4039 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 4040 control, status, 4041 (uint32_t)phba->sli.slistat.sli_intr); 4042 4043 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 4044 lpfc_debugfs_slow_ring_trc(phba, 4045 "ISR Disable ring:" 4046 "pwork:x%x hawork:x%x wait:x%x", 4047 phba->work_ha, work_ha_copy, 4048 (uint32_t)((unsigned long) 4049 phba->work_wait)); 4050 4051 control &= 4052 ~(HC_R0INT_ENA << LPFC_ELS_RING); 4053 writel(control, phba->HCregaddr); 4054 readl(phba->HCregaddr); /* flush */ 4055 } 4056 else { 4057 lpfc_debugfs_slow_ring_trc(phba, 4058 "ISR slow ring: pwork:" 4059 "x%x hawork:x%x wait:x%x", 4060 phba->work_ha, work_ha_copy, 4061 (uint32_t)((unsigned long) 4062 phba->work_wait)); 4063 } 4064 spin_unlock(&phba->hbalock); 4065 } 4066 } 4067 4068 if (work_ha_copy & HA_ERATT) { 4069 /* 4070 * There was a link/board error. Read the 4071 * status register to retrieve the error event 4072 * and process it. 4073 */ 4074 phba->sli.slistat.err_attn_event++; 4075 /* Save status info */ 4076 phba->work_hs = readl(phba->HSregaddr); 4077 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 4078 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 4079 4080 /* Clear Chip error bit */ 4081 writel(HA_ERATT, phba->HAregaddr); 4082 readl(phba->HAregaddr); /* flush */ 4083 phba->pport->stopped = 1; 4084 } 4085 4086 if ((work_ha_copy & HA_MBATT) && 4087 (phba->sli.mbox_active)) { 4088 pmb = phba->sli.mbox_active; 4089 pmbox = &pmb->mb; 4090 mbox = &phba->slim2p->mbx; 4091 vport = pmb->vport; 4092 4093 /* First check out the status word */ 4094 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 4095 if (pmbox->mbxOwner != OWN_HOST) { 4096 /* 4097 * Stray Mailbox Interrupt, mbxCommand <cmd> 4098 * mbxStatus <status> 4099 */ 4100 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 4101 LOG_SLI, 4102 "(%d):0304 Stray Mailbox " 4103 "Interrupt mbxCommand x%x " 4104 "mbxStatus x%x\n", 4105 (vport ? vport->vpi : 0), 4106 pmbox->mbxCommand, 4107 pmbox->mbxStatus); 4108 /* clear mailbox attention bit */ 4109 work_ha_copy &= ~HA_MBATT; 4110 } else { 4111 phba->last_completion_time = jiffies; 4112 del_timer(&phba->sli.mbox_tmo); 4113 4114 phba->sli.mbox_active = NULL; 4115 if (pmb->mbox_cmpl) { 4116 lpfc_sli_pcimem_bcopy(mbox, pmbox, 4117 MAILBOX_CMD_SIZE); 4118 } 4119 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 4120 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 4121 4122 lpfc_debugfs_disc_trc(vport, 4123 LPFC_DISC_TRC_MBOX_VPORT, 4124 "MBOX dflt rpi: : " 4125 "status:x%x rpi:x%x", 4126 (uint32_t)pmbox->mbxStatus, 4127 pmbox->un.varWords[0], 0); 4128 4129 if (!pmbox->mbxStatus) { 4130 mp = (struct lpfc_dmabuf *) 4131 (pmb->context1); 4132 ndlp = (struct lpfc_nodelist *) 4133 pmb->context2; 4134 4135 /* Reg_LOGIN of dflt RPI was 4136 * successful. new lets get 4137 * rid of the RPI using the 4138 * same mbox buffer. 4139 */ 4140 lpfc_unreg_login(phba, 4141 vport->vpi, 4142 pmbox->un.varWords[0], 4143 pmb); 4144 pmb->mbox_cmpl = 4145 lpfc_mbx_cmpl_dflt_rpi; 4146 pmb->context1 = mp; 4147 pmb->context2 = ndlp; 4148 pmb->vport = vport; 4149 spin_lock(&phba->hbalock); 4150 phba->sli.sli_flag &= 4151 ~LPFC_SLI_MBOX_ACTIVE; 4152 spin_unlock(&phba->hbalock); 4153 goto send_current_mbox; 4154 } 4155 } 4156 spin_lock(&phba->pport->work_port_lock); 4157 phba->pport->work_port_events &= 4158 ~WORKER_MBOX_TMO; 4159 spin_unlock(&phba->pport->work_port_lock); 4160 lpfc_mbox_cmpl_put(phba, pmb); 4161 } 4162 } 4163 if ((work_ha_copy & HA_MBATT) && 4164 (phba->sli.mbox_active == NULL)) { 4165 send_next_mbox: 4166 spin_lock(&phba->hbalock); 4167 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4168 pmb = lpfc_mbox_get(phba); 4169 spin_unlock(&phba->hbalock); 4170 send_current_mbox: 4171 /* Process next mailbox command if there is one */ 4172 if (pmb != NULL) { 4173 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4174 if (rc == MBX_NOT_FINISHED) { 4175 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 4176 lpfc_mbox_cmpl_put(phba, pmb); 4177 goto send_next_mbox; 4178 } 4179 } 4180 4181 } 4182 4183 spin_lock(&phba->hbalock); 4184 phba->work_ha |= work_ha_copy; 4185 if (phba->work_wait) 4186 lpfc_worker_wake_up(phba); 4187 spin_unlock(&phba->hbalock); 4188 } 4189 4190 ha_copy &= ~(phba->work_ha_mask); 4191 4192 /* 4193 * Process all events on FCP ring. Take the optimized path for 4194 * FCP IO. Any other IO is slow path and is handled by 4195 * the worker thread. 4196 */ 4197 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 4198 status >>= (4*LPFC_FCP_RING); 4199 if (status & HA_RXMASK) 4200 lpfc_sli_handle_fast_ring_event(phba, 4201 &phba->sli.ring[LPFC_FCP_RING], 4202 status); 4203 4204 if (phba->cfg_multi_ring_support == 2) { 4205 /* 4206 * Process all events on extra ring. Take the optimized path 4207 * for extra ring IO. Any other IO is slow path and is handled 4208 * by the worker thread. 4209 */ 4210 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 4211 status >>= (4*LPFC_EXTRA_RING); 4212 if (status & HA_RXMASK) { 4213 lpfc_sli_handle_fast_ring_event(phba, 4214 &phba->sli.ring[LPFC_EXTRA_RING], 4215 status); 4216 } 4217 } 4218 return IRQ_HANDLED; 4219 4220 } /* lpfc_intr_handler */ 4221