1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_cmnd.h> 29 #include <scsi/scsi_device.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_transport_fc.h> 32 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_disc.h" 36 #include "lpfc_scsi.h" 37 #include "lpfc.h" 38 #include "lpfc_crtn.h" 39 #include "lpfc_logmsg.h" 40 #include "lpfc_compat.h" 41 #include "lpfc_debugfs.h" 42 43 /* 44 * Define macro to log: Mailbox command x%x cannot issue Data 45 * This allows multiple uses of lpfc_msgBlk0311 46 * w/o perturbing log msg utility. 47 */ 48 #define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \ 49 lpfc_printf_log(phba, \ 50 KERN_INFO, \ 51 LOG_MBOX | LOG_SLI, \ 52 "(%d):0311 Mailbox command x%x cannot " \ 53 "issue Data: x%x x%x x%x\n", \ 54 pmbox->vport ? pmbox->vport->vpi : 0, \ 55 pmbox->mb.mbxCommand, \ 56 phba->pport->port_state, \ 57 psli->sli_flag, \ 58 flag) 59 60 61 /* There are only four IOCB completion types. */ 62 typedef enum _lpfc_iocb_type { 63 LPFC_UNKNOWN_IOCB, 64 LPFC_UNSOL_IOCB, 65 LPFC_SOL_IOCB, 66 LPFC_ABORT_IOCB 67 } lpfc_iocb_type; 68 69 /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer 70 * to the start of the ring, and the slot number of the 71 * desired iocb entry, calc a pointer to that entry. 72 */ 73 static inline IOCB_t * 74 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 75 { 76 return (IOCB_t *) (((char *) pring->cmdringaddr) + 77 pring->cmdidx * phba->iocb_cmd_size); 78 } 79 80 static inline IOCB_t * 81 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 82 { 83 return (IOCB_t *) (((char *) pring->rspringaddr) + 84 pring->rspidx * phba->iocb_rsp_size); 85 } 86 87 static struct lpfc_iocbq * 88 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 89 { 90 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 91 struct lpfc_iocbq * iocbq = NULL; 92 93 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 94 return iocbq; 95 } 96 97 struct lpfc_iocbq * 98 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 99 { 100 struct lpfc_iocbq * iocbq = NULL; 101 unsigned long iflags; 102 103 spin_lock_irqsave(&phba->hbalock, iflags); 104 iocbq = __lpfc_sli_get_iocbq(phba); 105 spin_unlock_irqrestore(&phba->hbalock, iflags); 106 return iocbq; 107 } 108 109 static void 110 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 111 { 112 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 113 114 /* 115 * Clean all volatile data fields, preserve iotag and node struct. 116 */ 117 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 118 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 119 } 120 121 void 122 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 123 { 124 unsigned long iflags; 125 126 /* 127 * Clean all volatile data fields, preserve iotag and node struct. 128 */ 129 spin_lock_irqsave(&phba->hbalock, iflags); 130 __lpfc_sli_release_iocbq(phba, iocbq); 131 spin_unlock_irqrestore(&phba->hbalock, iflags); 132 } 133 134 /* 135 * Translate the iocb command to an iocb command type used to decide the final 136 * disposition of each completed IOCB. 137 */ 138 static lpfc_iocb_type 139 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 140 { 141 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 142 143 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 144 return 0; 145 146 switch (iocb_cmnd) { 147 case CMD_XMIT_SEQUENCE_CR: 148 case CMD_XMIT_SEQUENCE_CX: 149 case CMD_XMIT_BCAST_CN: 150 case CMD_XMIT_BCAST_CX: 151 case CMD_ELS_REQUEST_CR: 152 case CMD_ELS_REQUEST_CX: 153 case CMD_CREATE_XRI_CR: 154 case CMD_CREATE_XRI_CX: 155 case CMD_GET_RPI_CN: 156 case CMD_XMIT_ELS_RSP_CX: 157 case CMD_GET_RPI_CR: 158 case CMD_FCP_IWRITE_CR: 159 case CMD_FCP_IWRITE_CX: 160 case CMD_FCP_IREAD_CR: 161 case CMD_FCP_IREAD_CX: 162 case CMD_FCP_ICMND_CR: 163 case CMD_FCP_ICMND_CX: 164 case CMD_FCP_TSEND_CX: 165 case CMD_FCP_TRSP_CX: 166 case CMD_FCP_TRECEIVE_CX: 167 case CMD_FCP_AUTO_TRSP_CX: 168 case CMD_ADAPTER_MSG: 169 case CMD_ADAPTER_DUMP: 170 case CMD_XMIT_SEQUENCE64_CR: 171 case CMD_XMIT_SEQUENCE64_CX: 172 case CMD_XMIT_BCAST64_CN: 173 case CMD_XMIT_BCAST64_CX: 174 case CMD_ELS_REQUEST64_CR: 175 case CMD_ELS_REQUEST64_CX: 176 case CMD_FCP_IWRITE64_CR: 177 case CMD_FCP_IWRITE64_CX: 178 case CMD_FCP_IREAD64_CR: 179 case CMD_FCP_IREAD64_CX: 180 case CMD_FCP_ICMND64_CR: 181 case CMD_FCP_ICMND64_CX: 182 case CMD_FCP_TSEND64_CX: 183 case CMD_FCP_TRSP64_CX: 184 case CMD_FCP_TRECEIVE64_CX: 185 case CMD_GEN_REQUEST64_CR: 186 case CMD_GEN_REQUEST64_CX: 187 case CMD_XMIT_ELS_RSP64_CX: 188 type = LPFC_SOL_IOCB; 189 break; 190 case CMD_ABORT_XRI_CN: 191 case CMD_ABORT_XRI_CX: 192 case CMD_CLOSE_XRI_CN: 193 case CMD_CLOSE_XRI_CX: 194 case CMD_XRI_ABORTED_CX: 195 case CMD_ABORT_MXRI64_CN: 196 type = LPFC_ABORT_IOCB; 197 break; 198 case CMD_RCV_SEQUENCE_CX: 199 case CMD_RCV_ELS_REQ_CX: 200 case CMD_RCV_SEQUENCE64_CX: 201 case CMD_RCV_ELS_REQ64_CX: 202 case CMD_ASYNC_STATUS: 203 case CMD_IOCB_RCV_SEQ64_CX: 204 case CMD_IOCB_RCV_ELS64_CX: 205 case CMD_IOCB_RCV_CONT64_CX: 206 case CMD_IOCB_RET_XRI64_CX: 207 type = LPFC_UNSOL_IOCB; 208 break; 209 case CMD_IOCB_XMIT_MSEQ64_CR: 210 case CMD_IOCB_XMIT_MSEQ64_CX: 211 case CMD_IOCB_RCV_SEQ_LIST64_CX: 212 case CMD_IOCB_RCV_ELS_LIST64_CX: 213 case CMD_IOCB_CLOSE_EXTENDED_CN: 214 case CMD_IOCB_ABORT_EXTENDED_CN: 215 case CMD_IOCB_RET_HBQE64_CN: 216 case CMD_IOCB_FCP_IBIDIR64_CR: 217 case CMD_IOCB_FCP_IBIDIR64_CX: 218 case CMD_IOCB_FCP_ITASKMGT64_CX: 219 case CMD_IOCB_LOGENTRY_CN: 220 case CMD_IOCB_LOGENTRY_ASYNC_CN: 221 printk("%s - Unhandled SLI-3 Command x%x\n", 222 __func__, iocb_cmnd); 223 type = LPFC_UNKNOWN_IOCB; 224 break; 225 default: 226 type = LPFC_UNKNOWN_IOCB; 227 break; 228 } 229 230 return type; 231 } 232 233 static int 234 lpfc_sli_ring_map(struct lpfc_hba *phba) 235 { 236 struct lpfc_sli *psli = &phba->sli; 237 LPFC_MBOXQ_t *pmb; 238 MAILBOX_t *pmbox; 239 int i, rc, ret = 0; 240 241 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 242 if (!pmb) 243 return -ENOMEM; 244 pmbox = &pmb->mb; 245 phba->link_state = LPFC_INIT_MBX_CMDS; 246 for (i = 0; i < psli->num_rings; i++) { 247 lpfc_config_ring(phba, i, pmb); 248 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 249 if (rc != MBX_SUCCESS) { 250 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 251 "0446 Adapter failed to init (%d), " 252 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 253 "ring %d\n", 254 rc, pmbox->mbxCommand, 255 pmbox->mbxStatus, i); 256 phba->link_state = LPFC_HBA_ERROR; 257 ret = -ENXIO; 258 break; 259 } 260 } 261 mempool_free(pmb, phba->mbox_mem_pool); 262 return ret; 263 } 264 265 static int 266 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 267 struct lpfc_iocbq *piocb) 268 { 269 list_add_tail(&piocb->list, &pring->txcmplq); 270 pring->txcmplq_cnt++; 271 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 272 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 273 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 274 if (!piocb->vport) 275 BUG(); 276 else 277 mod_timer(&piocb->vport->els_tmofunc, 278 jiffies + HZ * (phba->fc_ratov << 1)); 279 } 280 281 282 return 0; 283 } 284 285 static struct lpfc_iocbq * 286 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 287 { 288 struct lpfc_iocbq *cmd_iocb; 289 290 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 291 if (cmd_iocb != NULL) 292 pring->txq_cnt--; 293 return cmd_iocb; 294 } 295 296 static IOCB_t * 297 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 298 { 299 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 300 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 301 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 302 uint32_t max_cmd_idx = pring->numCiocb; 303 304 if ((pring->next_cmdidx == pring->cmdidx) && 305 (++pring->next_cmdidx >= max_cmd_idx)) 306 pring->next_cmdidx = 0; 307 308 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 309 310 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 311 312 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 314 "0315 Ring %d issue: portCmdGet %d " 315 "is bigger then cmd ring %d\n", 316 pring->ringno, 317 pring->local_getidx, max_cmd_idx); 318 319 phba->link_state = LPFC_HBA_ERROR; 320 /* 321 * All error attention handlers are posted to 322 * worker thread 323 */ 324 phba->work_ha |= HA_ERATT; 325 phba->work_hs = HS_FFER3; 326 327 lpfc_worker_wake_up(phba); 328 329 return NULL; 330 } 331 332 if (pring->local_getidx == pring->next_cmdidx) 333 return NULL; 334 } 335 336 return lpfc_cmd_iocb(phba, pring); 337 } 338 339 uint16_t 340 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 341 { 342 struct lpfc_iocbq **new_arr; 343 struct lpfc_iocbq **old_arr; 344 size_t new_len; 345 struct lpfc_sli *psli = &phba->sli; 346 uint16_t iotag; 347 348 spin_lock_irq(&phba->hbalock); 349 iotag = psli->last_iotag; 350 if(++iotag < psli->iocbq_lookup_len) { 351 psli->last_iotag = iotag; 352 psli->iocbq_lookup[iotag] = iocbq; 353 spin_unlock_irq(&phba->hbalock); 354 iocbq->iotag = iotag; 355 return iotag; 356 } else if (psli->iocbq_lookup_len < (0xffff 357 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 358 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 359 spin_unlock_irq(&phba->hbalock); 360 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 361 GFP_KERNEL); 362 if (new_arr) { 363 spin_lock_irq(&phba->hbalock); 364 old_arr = psli->iocbq_lookup; 365 if (new_len <= psli->iocbq_lookup_len) { 366 /* highly unprobable case */ 367 kfree(new_arr); 368 iotag = psli->last_iotag; 369 if(++iotag < psli->iocbq_lookup_len) { 370 psli->last_iotag = iotag; 371 psli->iocbq_lookup[iotag] = iocbq; 372 spin_unlock_irq(&phba->hbalock); 373 iocbq->iotag = iotag; 374 return iotag; 375 } 376 spin_unlock_irq(&phba->hbalock); 377 return 0; 378 } 379 if (psli->iocbq_lookup) 380 memcpy(new_arr, old_arr, 381 ((psli->last_iotag + 1) * 382 sizeof (struct lpfc_iocbq *))); 383 psli->iocbq_lookup = new_arr; 384 psli->iocbq_lookup_len = new_len; 385 psli->last_iotag = iotag; 386 psli->iocbq_lookup[iotag] = iocbq; 387 spin_unlock_irq(&phba->hbalock); 388 iocbq->iotag = iotag; 389 kfree(old_arr); 390 return iotag; 391 } 392 } else 393 spin_unlock_irq(&phba->hbalock); 394 395 lpfc_printf_log(phba, KERN_ERR,LOG_SLI, 396 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 397 psli->last_iotag); 398 399 return 0; 400 } 401 402 static void 403 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 404 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 405 { 406 /* 407 * Set up an iotag 408 */ 409 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 410 411 if (pring->ringno == LPFC_ELS_RING) { 412 lpfc_debugfs_slow_ring_trc(phba, 413 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 414 *(((uint32_t *) &nextiocb->iocb) + 4), 415 *(((uint32_t *) &nextiocb->iocb) + 6), 416 *(((uint32_t *) &nextiocb->iocb) + 7)); 417 } 418 419 /* 420 * Issue iocb command to adapter 421 */ 422 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 423 wmb(); 424 pring->stats.iocb_cmd++; 425 426 /* 427 * If there is no completion routine to call, we can release the 428 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 429 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 430 */ 431 if (nextiocb->iocb_cmpl) 432 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 433 else 434 __lpfc_sli_release_iocbq(phba, nextiocb); 435 436 /* 437 * Let the HBA know what IOCB slot will be the next one the 438 * driver will put a command into. 439 */ 440 pring->cmdidx = pring->next_cmdidx; 441 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 442 } 443 444 static void 445 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 446 { 447 int ringno = pring->ringno; 448 449 pring->flag |= LPFC_CALL_RING_AVAILABLE; 450 451 wmb(); 452 453 /* 454 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 455 * The HBA will tell us when an IOCB entry is available. 456 */ 457 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 458 readl(phba->CAregaddr); /* flush */ 459 460 pring->stats.iocb_cmd_full++; 461 } 462 463 static void 464 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 465 { 466 int ringno = pring->ringno; 467 468 /* 469 * Tell the HBA that there is work to do in this ring. 470 */ 471 wmb(); 472 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 473 readl(phba->CAregaddr); /* flush */ 474 } 475 476 static void 477 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 478 { 479 IOCB_t *iocb; 480 struct lpfc_iocbq *nextiocb; 481 482 /* 483 * Check to see if: 484 * (a) there is anything on the txq to send 485 * (b) link is up 486 * (c) link attention events can be processed (fcp ring only) 487 * (d) IOCB processing is not blocked by the outstanding mbox command. 488 */ 489 if (pring->txq_cnt && 490 lpfc_is_link_up(phba) && 491 (pring->ringno != phba->sli.fcp_ring || 492 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 493 494 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 495 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 496 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 497 498 if (iocb) 499 lpfc_sli_update_ring(phba, pring); 500 else 501 lpfc_sli_update_full_ring(phba, pring); 502 } 503 504 return; 505 } 506 507 static struct lpfc_hbq_entry * 508 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 509 { 510 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 511 512 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 513 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 514 hbqp->next_hbqPutIdx = 0; 515 516 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 517 uint32_t raw_index = phba->hbq_get[hbqno]; 518 uint32_t getidx = le32_to_cpu(raw_index); 519 520 hbqp->local_hbqGetIdx = getidx; 521 522 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 523 lpfc_printf_log(phba, KERN_ERR, 524 LOG_SLI | LOG_VPORT, 525 "1802 HBQ %d: local_hbqGetIdx " 526 "%u is > than hbqp->entry_count %u\n", 527 hbqno, hbqp->local_hbqGetIdx, 528 hbqp->entry_count); 529 530 phba->link_state = LPFC_HBA_ERROR; 531 return NULL; 532 } 533 534 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 535 return NULL; 536 } 537 538 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 539 hbqp->hbqPutIdx; 540 } 541 542 void 543 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 544 { 545 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 546 struct hbq_dmabuf *hbq_buf; 547 unsigned long flags; 548 int i, hbq_count; 549 uint32_t hbqno; 550 551 hbq_count = lpfc_sli_hbq_count(); 552 /* Return all memory used by all HBQs */ 553 spin_lock_irqsave(&phba->hbalock, flags); 554 for (i = 0; i < hbq_count; ++i) { 555 list_for_each_entry_safe(dmabuf, next_dmabuf, 556 &phba->hbqs[i].hbq_buffer_list, list) { 557 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 558 list_del(&hbq_buf->dbuf.list); 559 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 560 } 561 phba->hbqs[i].buffer_count = 0; 562 } 563 /* Return all HBQ buffer that are in-fly */ 564 list_for_each_entry_safe(dmabuf, next_dmabuf, 565 &phba->hbqbuf_in_list, list) { 566 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 567 list_del(&hbq_buf->dbuf.list); 568 if (hbq_buf->tag == -1) { 569 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 570 (phba, hbq_buf); 571 } else { 572 hbqno = hbq_buf->tag >> 16; 573 if (hbqno >= LPFC_MAX_HBQS) 574 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 575 (phba, hbq_buf); 576 else 577 (phba->hbqs[hbqno].hbq_free_buffer)(phba, 578 hbq_buf); 579 } 580 } 581 582 /* Mark the HBQs not in use */ 583 phba->hbq_in_use = 0; 584 spin_unlock_irqrestore(&phba->hbalock, flags); 585 } 586 587 static struct lpfc_hbq_entry * 588 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 589 struct hbq_dmabuf *hbq_buf) 590 { 591 struct lpfc_hbq_entry *hbqe; 592 dma_addr_t physaddr = hbq_buf->dbuf.phys; 593 594 /* Get next HBQ entry slot to use */ 595 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 596 if (hbqe) { 597 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 598 599 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 600 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 601 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 602 hbqe->bde.tus.f.bdeFlags = 0; 603 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 604 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 605 /* Sync SLIM */ 606 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 607 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 608 /* flush */ 609 readl(phba->hbq_put + hbqno); 610 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 611 } 612 return hbqe; 613 } 614 615 static struct lpfc_hbq_init lpfc_els_hbq = { 616 .rn = 1, 617 .entry_count = 200, 618 .mask_count = 0, 619 .profile = 0, 620 .ring_mask = (1 << LPFC_ELS_RING), 621 .buffer_count = 0, 622 .init_count = 20, 623 .add_count = 5, 624 }; 625 626 static struct lpfc_hbq_init lpfc_extra_hbq = { 627 .rn = 1, 628 .entry_count = 200, 629 .mask_count = 0, 630 .profile = 0, 631 .ring_mask = (1 << LPFC_EXTRA_RING), 632 .buffer_count = 0, 633 .init_count = 0, 634 .add_count = 5, 635 }; 636 637 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 638 &lpfc_els_hbq, 639 &lpfc_extra_hbq, 640 }; 641 642 static int 643 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 644 { 645 uint32_t i, start, end; 646 unsigned long flags; 647 struct hbq_dmabuf *hbq_buffer; 648 649 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 650 return 0; 651 652 start = phba->hbqs[hbqno].buffer_count; 653 end = count + start; 654 if (end > lpfc_hbq_defs[hbqno]->entry_count) 655 end = lpfc_hbq_defs[hbqno]->entry_count; 656 657 /* Check whether HBQ is still in use */ 658 spin_lock_irqsave(&phba->hbalock, flags); 659 if (!phba->hbq_in_use) 660 goto out; 661 662 /* Populate HBQ entries */ 663 for (i = start; i < end; i++) { 664 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 665 if (!hbq_buffer) 666 goto err; 667 hbq_buffer->tag = (i | (hbqno << 16)); 668 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 669 phba->hbqs[hbqno].buffer_count++; 670 else 671 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 672 } 673 674 out: 675 spin_unlock_irqrestore(&phba->hbalock, flags); 676 return 0; 677 err: 678 spin_unlock_irqrestore(&phba->hbalock, flags); 679 return 1; 680 } 681 682 int 683 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 684 { 685 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 686 lpfc_hbq_defs[qno]->add_count)); 687 } 688 689 static int 690 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 691 { 692 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 693 lpfc_hbq_defs[qno]->init_count)); 694 } 695 696 static struct hbq_dmabuf * 697 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 698 { 699 struct lpfc_dmabuf *d_buf; 700 struct hbq_dmabuf *hbq_buf; 701 uint32_t hbqno; 702 703 hbqno = tag >> 16; 704 if (hbqno >= LPFC_MAX_HBQS) 705 return NULL; 706 707 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 708 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 709 if (hbq_buf->tag == tag) { 710 return hbq_buf; 711 } 712 } 713 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 714 "1803 Bad hbq tag. Data: x%x x%x\n", 715 tag, phba->hbqs[tag >> 16].buffer_count); 716 return NULL; 717 } 718 719 void 720 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 721 { 722 uint32_t hbqno; 723 724 if (hbq_buffer) { 725 hbqno = hbq_buffer->tag >> 16; 726 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 727 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 728 } 729 } 730 } 731 732 static int 733 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 734 { 735 uint8_t ret; 736 737 switch (mbxCommand) { 738 case MBX_LOAD_SM: 739 case MBX_READ_NV: 740 case MBX_WRITE_NV: 741 case MBX_WRITE_VPARMS: 742 case MBX_RUN_BIU_DIAG: 743 case MBX_INIT_LINK: 744 case MBX_DOWN_LINK: 745 case MBX_CONFIG_LINK: 746 case MBX_CONFIG_RING: 747 case MBX_RESET_RING: 748 case MBX_READ_CONFIG: 749 case MBX_READ_RCONFIG: 750 case MBX_READ_SPARM: 751 case MBX_READ_STATUS: 752 case MBX_READ_RPI: 753 case MBX_READ_XRI: 754 case MBX_READ_REV: 755 case MBX_READ_LNK_STAT: 756 case MBX_REG_LOGIN: 757 case MBX_UNREG_LOGIN: 758 case MBX_READ_LA: 759 case MBX_CLEAR_LA: 760 case MBX_DUMP_MEMORY: 761 case MBX_DUMP_CONTEXT: 762 case MBX_RUN_DIAGS: 763 case MBX_RESTART: 764 case MBX_UPDATE_CFG: 765 case MBX_DOWN_LOAD: 766 case MBX_DEL_LD_ENTRY: 767 case MBX_RUN_PROGRAM: 768 case MBX_SET_MASK: 769 case MBX_SET_VARIABLE: 770 case MBX_UNREG_D_ID: 771 case MBX_KILL_BOARD: 772 case MBX_CONFIG_FARP: 773 case MBX_BEACON: 774 case MBX_LOAD_AREA: 775 case MBX_RUN_BIU_DIAG64: 776 case MBX_CONFIG_PORT: 777 case MBX_READ_SPARM64: 778 case MBX_READ_RPI64: 779 case MBX_REG_LOGIN64: 780 case MBX_READ_LA64: 781 case MBX_WRITE_WWN: 782 case MBX_SET_DEBUG: 783 case MBX_LOAD_EXP_ROM: 784 case MBX_ASYNCEVT_ENABLE: 785 case MBX_REG_VPI: 786 case MBX_UNREG_VPI: 787 case MBX_HEARTBEAT: 788 ret = mbxCommand; 789 break; 790 default: 791 ret = MBX_SHUTDOWN; 792 break; 793 } 794 return ret; 795 } 796 static void 797 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 798 { 799 wait_queue_head_t *pdone_q; 800 unsigned long drvr_flag; 801 802 /* 803 * If pdone_q is empty, the driver thread gave up waiting and 804 * continued running. 805 */ 806 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 807 spin_lock_irqsave(&phba->hbalock, drvr_flag); 808 pdone_q = (wait_queue_head_t *) pmboxq->context1; 809 if (pdone_q) 810 wake_up_interruptible(pdone_q); 811 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 812 return; 813 } 814 815 void 816 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 817 { 818 struct lpfc_dmabuf *mp; 819 uint16_t rpi; 820 int rc; 821 822 mp = (struct lpfc_dmabuf *) (pmb->context1); 823 824 if (mp) { 825 lpfc_mbuf_free(phba, mp->virt, mp->phys); 826 kfree(mp); 827 } 828 829 /* 830 * If a REG_LOGIN succeeded after node is destroyed or node 831 * is in re-discovery driver need to cleanup the RPI. 832 */ 833 if (!(phba->pport->load_flag & FC_UNLOADING) && 834 pmb->mb.mbxCommand == MBX_REG_LOGIN64 && 835 !pmb->mb.mbxStatus) { 836 837 rpi = pmb->mb.un.varWords[0]; 838 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); 839 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 840 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 841 if (rc != MBX_NOT_FINISHED) 842 return; 843 } 844 845 mempool_free(pmb, phba->mbox_mem_pool); 846 return; 847 } 848 849 int 850 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 851 { 852 MAILBOX_t *pmbox; 853 LPFC_MBOXQ_t *pmb; 854 int rc; 855 LIST_HEAD(cmplq); 856 857 phba->sli.slistat.mbox_event++; 858 859 /* Get all completed mailboxe buffers into the cmplq */ 860 spin_lock_irq(&phba->hbalock); 861 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 862 spin_unlock_irq(&phba->hbalock); 863 864 /* Get a Mailbox buffer to setup mailbox commands for callback */ 865 do { 866 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 867 if (pmb == NULL) 868 break; 869 870 pmbox = &pmb->mb; 871 872 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 873 if (pmb->vport) { 874 lpfc_debugfs_disc_trc(pmb->vport, 875 LPFC_DISC_TRC_MBOX_VPORT, 876 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 877 (uint32_t)pmbox->mbxCommand, 878 pmbox->un.varWords[0], 879 pmbox->un.varWords[1]); 880 } 881 else { 882 lpfc_debugfs_disc_trc(phba->pport, 883 LPFC_DISC_TRC_MBOX, 884 "MBOX cmpl: cmd:x%x mb:x%x x%x", 885 (uint32_t)pmbox->mbxCommand, 886 pmbox->un.varWords[0], 887 pmbox->un.varWords[1]); 888 } 889 } 890 891 /* 892 * It is a fatal error if unknown mbox command completion. 893 */ 894 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 895 MBX_SHUTDOWN) { 896 /* Unknow mailbox command compl */ 897 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 898 "(%d):0323 Unknown Mailbox command " 899 "%x Cmpl\n", 900 pmb->vport ? pmb->vport->vpi : 0, 901 pmbox->mbxCommand); 902 phba->link_state = LPFC_HBA_ERROR; 903 phba->work_hs = HS_FFER3; 904 lpfc_handle_eratt(phba); 905 continue; 906 } 907 908 if (pmbox->mbxStatus) { 909 phba->sli.slistat.mbox_stat_err++; 910 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 911 /* Mbox cmd cmpl error - RETRYing */ 912 lpfc_printf_log(phba, KERN_INFO, 913 LOG_MBOX | LOG_SLI, 914 "(%d):0305 Mbox cmd cmpl " 915 "error - RETRYing Data: x%x " 916 "x%x x%x x%x\n", 917 pmb->vport ? pmb->vport->vpi :0, 918 pmbox->mbxCommand, 919 pmbox->mbxStatus, 920 pmbox->un.varWords[0], 921 pmb->vport->port_state); 922 pmbox->mbxStatus = 0; 923 pmbox->mbxOwner = OWN_HOST; 924 spin_lock_irq(&phba->hbalock); 925 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 926 spin_unlock_irq(&phba->hbalock); 927 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 928 if (rc == MBX_SUCCESS) 929 continue; 930 } 931 } 932 933 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 934 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 935 "(%d):0307 Mailbox cmd x%x Cmpl x%p " 936 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 937 pmb->vport ? pmb->vport->vpi : 0, 938 pmbox->mbxCommand, 939 pmb->mbox_cmpl, 940 *((uint32_t *) pmbox), 941 pmbox->un.varWords[0], 942 pmbox->un.varWords[1], 943 pmbox->un.varWords[2], 944 pmbox->un.varWords[3], 945 pmbox->un.varWords[4], 946 pmbox->un.varWords[5], 947 pmbox->un.varWords[6], 948 pmbox->un.varWords[7]); 949 950 if (pmb->mbox_cmpl) 951 pmb->mbox_cmpl(phba,pmb); 952 } while (1); 953 return 0; 954 } 955 956 static struct lpfc_dmabuf * 957 lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) 958 { 959 struct hbq_dmabuf *hbq_entry, *new_hbq_entry; 960 uint32_t hbqno; 961 void *virt; /* virtual address ptr */ 962 dma_addr_t phys; /* mapped address */ 963 unsigned long flags; 964 965 /* Check whether HBQ is still in use */ 966 spin_lock_irqsave(&phba->hbalock, flags); 967 if (!phba->hbq_in_use) { 968 spin_unlock_irqrestore(&phba->hbalock, flags); 969 return NULL; 970 } 971 972 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 973 if (hbq_entry == NULL) { 974 spin_unlock_irqrestore(&phba->hbalock, flags); 975 return NULL; 976 } 977 list_del(&hbq_entry->dbuf.list); 978 979 hbqno = tag >> 16; 980 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 981 if (new_hbq_entry == NULL) { 982 list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list); 983 spin_unlock_irqrestore(&phba->hbalock, flags); 984 return &hbq_entry->dbuf; 985 } 986 new_hbq_entry->tag = -1; 987 phys = new_hbq_entry->dbuf.phys; 988 virt = new_hbq_entry->dbuf.virt; 989 new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys; 990 new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt; 991 hbq_entry->dbuf.phys = phys; 992 hbq_entry->dbuf.virt = virt; 993 lpfc_sli_free_hbq(phba, hbq_entry); 994 list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list); 995 spin_unlock_irqrestore(&phba->hbalock, flags); 996 997 return &new_hbq_entry->dbuf; 998 } 999 1000 static struct lpfc_dmabuf * 1001 lpfc_sli_get_buff(struct lpfc_hba *phba, 1002 struct lpfc_sli_ring *pring, 1003 uint32_t tag) 1004 { 1005 if (tag & QUE_BUFTAG_BIT) 1006 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 1007 else 1008 return lpfc_sli_replace_hbqbuff(phba, tag); 1009 } 1010 1011 static int 1012 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1013 struct lpfc_iocbq *saveq) 1014 { 1015 IOCB_t * irsp; 1016 WORD5 * w5p; 1017 uint32_t Rctl, Type; 1018 uint32_t match, i; 1019 struct lpfc_iocbq *iocbq; 1020 struct lpfc_dmabuf *dmzbuf; 1021 1022 match = 0; 1023 irsp = &(saveq->iocb); 1024 1025 if (irsp->ulpStatus == IOSTAT_NEED_BUFFER) 1026 return 1; 1027 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 1028 if (pring->lpfc_sli_rcv_async_status) 1029 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 1030 else 1031 lpfc_printf_log(phba, 1032 KERN_WARNING, 1033 LOG_SLI, 1034 "0316 Ring %d handler: unexpected " 1035 "ASYNC_STATUS iocb received evt_code " 1036 "0x%x\n", 1037 pring->ringno, 1038 irsp->un.asyncstat.evt_code); 1039 return 1; 1040 } 1041 1042 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 1043 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 1044 if (irsp->ulpBdeCount > 0) { 1045 dmzbuf = lpfc_sli_get_buff(phba, pring, 1046 irsp->un.ulpWord[3]); 1047 lpfc_in_buf_free(phba, dmzbuf); 1048 } 1049 1050 if (irsp->ulpBdeCount > 1) { 1051 dmzbuf = lpfc_sli_get_buff(phba, pring, 1052 irsp->unsli3.sli3Words[3]); 1053 lpfc_in_buf_free(phba, dmzbuf); 1054 } 1055 1056 if (irsp->ulpBdeCount > 2) { 1057 dmzbuf = lpfc_sli_get_buff(phba, pring, 1058 irsp->unsli3.sli3Words[7]); 1059 lpfc_in_buf_free(phba, dmzbuf); 1060 } 1061 1062 return 1; 1063 } 1064 1065 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 1066 if (irsp->ulpBdeCount != 0) { 1067 saveq->context2 = lpfc_sli_get_buff(phba, pring, 1068 irsp->un.ulpWord[3]); 1069 if (!saveq->context2) 1070 lpfc_printf_log(phba, 1071 KERN_ERR, 1072 LOG_SLI, 1073 "0341 Ring %d Cannot find buffer for " 1074 "an unsolicited iocb. tag 0x%x\n", 1075 pring->ringno, 1076 irsp->un.ulpWord[3]); 1077 } 1078 if (irsp->ulpBdeCount == 2) { 1079 saveq->context3 = lpfc_sli_get_buff(phba, pring, 1080 irsp->unsli3.sli3Words[7]); 1081 if (!saveq->context3) 1082 lpfc_printf_log(phba, 1083 KERN_ERR, 1084 LOG_SLI, 1085 "0342 Ring %d Cannot find buffer for an" 1086 " unsolicited iocb. tag 0x%x\n", 1087 pring->ringno, 1088 irsp->unsli3.sli3Words[7]); 1089 } 1090 list_for_each_entry(iocbq, &saveq->list, list) { 1091 irsp = &(iocbq->iocb); 1092 if (irsp->ulpBdeCount != 0) { 1093 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 1094 irsp->un.ulpWord[3]); 1095 if (!iocbq->context2) 1096 lpfc_printf_log(phba, 1097 KERN_ERR, 1098 LOG_SLI, 1099 "0343 Ring %d Cannot find " 1100 "buffer for an unsolicited iocb" 1101 ". tag 0x%x\n", pring->ringno, 1102 irsp->un.ulpWord[3]); 1103 } 1104 if (irsp->ulpBdeCount == 2) { 1105 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 1106 irsp->unsli3.sli3Words[7]); 1107 if (!iocbq->context3) 1108 lpfc_printf_log(phba, 1109 KERN_ERR, 1110 LOG_SLI, 1111 "0344 Ring %d Cannot find " 1112 "buffer for an unsolicited " 1113 "iocb. tag 0x%x\n", 1114 pring->ringno, 1115 irsp->unsli3.sli3Words[7]); 1116 } 1117 } 1118 } 1119 if (irsp->ulpBdeCount != 0 && 1120 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 1121 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 1122 int found = 0; 1123 1124 /* search continue save q for same XRI */ 1125 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 1126 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) { 1127 list_add_tail(&saveq->list, &iocbq->list); 1128 found = 1; 1129 break; 1130 } 1131 } 1132 if (!found) 1133 list_add_tail(&saveq->clist, 1134 &pring->iocb_continue_saveq); 1135 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 1136 list_del_init(&iocbq->clist); 1137 saveq = iocbq; 1138 irsp = &(saveq->iocb); 1139 } else 1140 return 0; 1141 } 1142 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 1143 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 1144 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 1145 Rctl = FC_ELS_REQ; 1146 Type = FC_ELS_DATA; 1147 } else { 1148 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 1149 Rctl = w5p->hcsw.Rctl; 1150 Type = w5p->hcsw.Type; 1151 1152 /* Firmware Workaround */ 1153 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 1154 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 1155 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 1156 Rctl = FC_ELS_REQ; 1157 Type = FC_ELS_DATA; 1158 w5p->hcsw.Rctl = Rctl; 1159 w5p->hcsw.Type = Type; 1160 } 1161 } 1162 1163 /* unSolicited Responses */ 1164 if (pring->prt[0].profile) { 1165 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 1166 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 1167 saveq); 1168 match = 1; 1169 } else { 1170 /* We must search, based on rctl / type 1171 for the right routine */ 1172 for (i = 0; i < pring->num_mask; i++) { 1173 if ((pring->prt[i].rctl == Rctl) 1174 && (pring->prt[i].type == Type)) { 1175 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 1176 (pring->prt[i].lpfc_sli_rcv_unsol_event) 1177 (phba, pring, saveq); 1178 match = 1; 1179 break; 1180 } 1181 } 1182 } 1183 if (match == 0) { 1184 /* Unexpected Rctl / Type received */ 1185 /* Ring <ringno> handler: unexpected 1186 Rctl <Rctl> Type <Type> received */ 1187 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1188 "0313 Ring %d handler: unexpected Rctl x%x " 1189 "Type x%x received\n", 1190 pring->ringno, Rctl, Type); 1191 } 1192 return 1; 1193 } 1194 1195 static struct lpfc_iocbq * 1196 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 1197 struct lpfc_sli_ring *pring, 1198 struct lpfc_iocbq *prspiocb) 1199 { 1200 struct lpfc_iocbq *cmd_iocb = NULL; 1201 uint16_t iotag; 1202 1203 iotag = prspiocb->iocb.ulpIoTag; 1204 1205 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 1206 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 1207 list_del_init(&cmd_iocb->list); 1208 pring->txcmplq_cnt--; 1209 return cmd_iocb; 1210 } 1211 1212 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1213 "0317 iotag x%x is out off " 1214 "range: max iotag x%x wd0 x%x\n", 1215 iotag, phba->sli.last_iotag, 1216 *(((uint32_t *) &prspiocb->iocb) + 7)); 1217 return NULL; 1218 } 1219 1220 static int 1221 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1222 struct lpfc_iocbq *saveq) 1223 { 1224 struct lpfc_iocbq *cmdiocbp; 1225 int rc = 1; 1226 unsigned long iflag; 1227 1228 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 1229 spin_lock_irqsave(&phba->hbalock, iflag); 1230 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 1231 spin_unlock_irqrestore(&phba->hbalock, iflag); 1232 1233 if (cmdiocbp) { 1234 if (cmdiocbp->iocb_cmpl) { 1235 /* 1236 * Post all ELS completions to the worker thread. 1237 * All other are passed to the completion callback. 1238 */ 1239 if (pring->ringno == LPFC_ELS_RING) { 1240 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) { 1241 cmdiocbp->iocb_flag &= 1242 ~LPFC_DRIVER_ABORTED; 1243 saveq->iocb.ulpStatus = 1244 IOSTAT_LOCAL_REJECT; 1245 saveq->iocb.un.ulpWord[4] = 1246 IOERR_SLI_ABORTED; 1247 1248 /* Firmware could still be in progress 1249 * of DMAing payload, so don't free data 1250 * buffer till after a hbeat. 1251 */ 1252 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 1253 } 1254 } 1255 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 1256 } else 1257 lpfc_sli_release_iocbq(phba, cmdiocbp); 1258 } else { 1259 /* 1260 * Unknown initiating command based on the response iotag. 1261 * This could be the case on the ELS ring because of 1262 * lpfc_els_abort(). 1263 */ 1264 if (pring->ringno != LPFC_ELS_RING) { 1265 /* 1266 * Ring <ringno> handler: unexpected completion IoTag 1267 * <IoTag> 1268 */ 1269 lpfc_printf_vlog(cmdiocbp->vport, KERN_WARNING, LOG_SLI, 1270 "0322 Ring %d handler: " 1271 "unexpected completion IoTag x%x " 1272 "Data: x%x x%x x%x x%x\n", 1273 pring->ringno, 1274 saveq->iocb.ulpIoTag, 1275 saveq->iocb.ulpStatus, 1276 saveq->iocb.un.ulpWord[4], 1277 saveq->iocb.ulpCommand, 1278 saveq->iocb.ulpContext); 1279 } 1280 } 1281 1282 return rc; 1283 } 1284 1285 static void 1286 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1287 { 1288 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 1289 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1290 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1291 /* 1292 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1293 * rsp ring <portRspMax> 1294 */ 1295 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1296 "0312 Ring %d handler: portRspPut %d " 1297 "is bigger then rsp ring %d\n", 1298 pring->ringno, le32_to_cpu(pgp->rspPutInx), 1299 pring->numRiocb); 1300 1301 phba->link_state = LPFC_HBA_ERROR; 1302 1303 /* 1304 * All error attention handlers are posted to 1305 * worker thread 1306 */ 1307 phba->work_ha |= HA_ERATT; 1308 phba->work_hs = HS_FFER3; 1309 1310 lpfc_worker_wake_up(phba); 1311 1312 return; 1313 } 1314 1315 void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) 1316 { 1317 struct lpfc_sli *psli = &phba->sli; 1318 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING]; 1319 IOCB_t *irsp = NULL; 1320 IOCB_t *entry = NULL; 1321 struct lpfc_iocbq *cmdiocbq = NULL; 1322 struct lpfc_iocbq rspiocbq; 1323 struct lpfc_pgp *pgp; 1324 uint32_t status; 1325 uint32_t portRspPut, portRspMax; 1326 int type; 1327 uint32_t rsp_cmpl = 0; 1328 uint32_t ha_copy; 1329 unsigned long iflags; 1330 1331 pring->stats.iocb_event++; 1332 1333 pgp = (phba->sli_rev == 3) ? 1334 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1335 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1336 1337 1338 /* 1339 * The next available response entry should never exceed the maximum 1340 * entries. If it does, treat it as an adapter hardware error. 1341 */ 1342 portRspMax = pring->numRiocb; 1343 portRspPut = le32_to_cpu(pgp->rspPutInx); 1344 if (unlikely(portRspPut >= portRspMax)) { 1345 lpfc_sli_rsp_pointers_error(phba, pring); 1346 return; 1347 } 1348 1349 rmb(); 1350 while (pring->rspidx != portRspPut) { 1351 entry = lpfc_resp_iocb(phba, pring); 1352 if (++pring->rspidx >= portRspMax) 1353 pring->rspidx = 0; 1354 1355 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1356 (uint32_t *) &rspiocbq.iocb, 1357 phba->iocb_rsp_size); 1358 irsp = &rspiocbq.iocb; 1359 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1360 pring->stats.iocb_rsp++; 1361 rsp_cmpl++; 1362 1363 if (unlikely(irsp->ulpStatus)) { 1364 /* Rsp ring <ringno> error: IOCB */ 1365 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1366 "0326 Rsp Ring %d error: IOCB Data: " 1367 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1368 pring->ringno, 1369 irsp->un.ulpWord[0], 1370 irsp->un.ulpWord[1], 1371 irsp->un.ulpWord[2], 1372 irsp->un.ulpWord[3], 1373 irsp->un.ulpWord[4], 1374 irsp->un.ulpWord[5], 1375 *(((uint32_t *) irsp) + 6), 1376 *(((uint32_t *) irsp) + 7)); 1377 } 1378 1379 switch (type) { 1380 case LPFC_ABORT_IOCB: 1381 case LPFC_SOL_IOCB: 1382 /* 1383 * Idle exchange closed via ABTS from port. No iocb 1384 * resources need to be recovered. 1385 */ 1386 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1387 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1388 "0314 IOCB cmd 0x%x " 1389 "processed. Skipping " 1390 "completion", 1391 irsp->ulpCommand); 1392 break; 1393 } 1394 1395 spin_lock_irqsave(&phba->hbalock, iflags); 1396 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1397 &rspiocbq); 1398 spin_unlock_irqrestore(&phba->hbalock, iflags); 1399 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1400 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1401 &rspiocbq); 1402 } 1403 break; 1404 default: 1405 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1406 char adaptermsg[LPFC_MAX_ADPTMSG]; 1407 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1408 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1409 MAX_MSG_DATA); 1410 dev_warn(&((phba->pcidev)->dev), 1411 "lpfc%d: %s\n", 1412 phba->brd_no, adaptermsg); 1413 } else { 1414 /* Unknown IOCB command */ 1415 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1416 "0321 Unknown IOCB command " 1417 "Data: x%x, x%x x%x x%x x%x\n", 1418 type, irsp->ulpCommand, 1419 irsp->ulpStatus, 1420 irsp->ulpIoTag, 1421 irsp->ulpContext); 1422 } 1423 break; 1424 } 1425 1426 /* 1427 * The response IOCB has been processed. Update the ring 1428 * pointer in SLIM. If the port response put pointer has not 1429 * been updated, sync the pgp->rspPutInx and fetch the new port 1430 * response put pointer. 1431 */ 1432 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 1433 1434 if (pring->rspidx == portRspPut) 1435 portRspPut = le32_to_cpu(pgp->rspPutInx); 1436 } 1437 1438 ha_copy = readl(phba->HAregaddr); 1439 ha_copy >>= (LPFC_FCP_RING * 4); 1440 1441 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) { 1442 spin_lock_irqsave(&phba->hbalock, iflags); 1443 pring->stats.iocb_rsp_full++; 1444 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4)); 1445 writel(status, phba->CAregaddr); 1446 readl(phba->CAregaddr); 1447 spin_unlock_irqrestore(&phba->hbalock, iflags); 1448 } 1449 if ((ha_copy & HA_R0CE_RSP) && 1450 (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1451 spin_lock_irqsave(&phba->hbalock, iflags); 1452 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1453 pring->stats.iocb_cmd_empty++; 1454 1455 /* Force update of the local copy of cmdGetInx */ 1456 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1457 lpfc_sli_resume_iocb(phba, pring); 1458 1459 if ((pring->lpfc_sli_cmd_available)) 1460 (pring->lpfc_sli_cmd_available) (phba, pring); 1461 1462 spin_unlock_irqrestore(&phba->hbalock, iflags); 1463 } 1464 1465 return; 1466 } 1467 1468 /* 1469 * This routine presumes LPFC_FCP_RING handling and doesn't bother 1470 * to check it explicitly. 1471 */ 1472 static int 1473 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 1474 struct lpfc_sli_ring *pring, uint32_t mask) 1475 { 1476 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 1477 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1478 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1479 IOCB_t *irsp = NULL; 1480 IOCB_t *entry = NULL; 1481 struct lpfc_iocbq *cmdiocbq = NULL; 1482 struct lpfc_iocbq rspiocbq; 1483 uint32_t status; 1484 uint32_t portRspPut, portRspMax; 1485 int rc = 1; 1486 lpfc_iocb_type type; 1487 unsigned long iflag; 1488 uint32_t rsp_cmpl = 0; 1489 1490 spin_lock_irqsave(&phba->hbalock, iflag); 1491 pring->stats.iocb_event++; 1492 1493 /* 1494 * The next available response entry should never exceed the maximum 1495 * entries. If it does, treat it as an adapter hardware error. 1496 */ 1497 portRspMax = pring->numRiocb; 1498 portRspPut = le32_to_cpu(pgp->rspPutInx); 1499 if (unlikely(portRspPut >= portRspMax)) { 1500 lpfc_sli_rsp_pointers_error(phba, pring); 1501 spin_unlock_irqrestore(&phba->hbalock, iflag); 1502 return 1; 1503 } 1504 1505 rmb(); 1506 while (pring->rspidx != portRspPut) { 1507 /* 1508 * Fetch an entry off the ring and copy it into a local data 1509 * structure. The copy involves a byte-swap since the 1510 * network byte order and pci byte orders are different. 1511 */ 1512 entry = lpfc_resp_iocb(phba, pring); 1513 phba->last_completion_time = jiffies; 1514 1515 if (++pring->rspidx >= portRspMax) 1516 pring->rspidx = 0; 1517 1518 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1519 (uint32_t *) &rspiocbq.iocb, 1520 phba->iocb_rsp_size); 1521 INIT_LIST_HEAD(&(rspiocbq.list)); 1522 irsp = &rspiocbq.iocb; 1523 1524 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1525 pring->stats.iocb_rsp++; 1526 rsp_cmpl++; 1527 1528 if (unlikely(irsp->ulpStatus)) { 1529 /* 1530 * If resource errors reported from HBA, reduce 1531 * queuedepths of the SCSI device. 1532 */ 1533 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1534 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 1535 spin_unlock_irqrestore(&phba->hbalock, iflag); 1536 lpfc_adjust_queue_depth(phba); 1537 spin_lock_irqsave(&phba->hbalock, iflag); 1538 } 1539 1540 /* Rsp ring <ringno> error: IOCB */ 1541 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1542 "0336 Rsp Ring %d error: IOCB Data: " 1543 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1544 pring->ringno, 1545 irsp->un.ulpWord[0], 1546 irsp->un.ulpWord[1], 1547 irsp->un.ulpWord[2], 1548 irsp->un.ulpWord[3], 1549 irsp->un.ulpWord[4], 1550 irsp->un.ulpWord[5], 1551 *(((uint32_t *) irsp) + 6), 1552 *(((uint32_t *) irsp) + 7)); 1553 } 1554 1555 switch (type) { 1556 case LPFC_ABORT_IOCB: 1557 case LPFC_SOL_IOCB: 1558 /* 1559 * Idle exchange closed via ABTS from port. No iocb 1560 * resources need to be recovered. 1561 */ 1562 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1563 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1564 "0333 IOCB cmd 0x%x" 1565 " processed. Skipping" 1566 " completion\n", 1567 irsp->ulpCommand); 1568 break; 1569 } 1570 1571 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1572 &rspiocbq); 1573 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1574 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1575 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1576 &rspiocbq); 1577 } else { 1578 spin_unlock_irqrestore(&phba->hbalock, 1579 iflag); 1580 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1581 &rspiocbq); 1582 spin_lock_irqsave(&phba->hbalock, 1583 iflag); 1584 } 1585 } 1586 break; 1587 case LPFC_UNSOL_IOCB: 1588 spin_unlock_irqrestore(&phba->hbalock, iflag); 1589 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 1590 spin_lock_irqsave(&phba->hbalock, iflag); 1591 break; 1592 default: 1593 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1594 char adaptermsg[LPFC_MAX_ADPTMSG]; 1595 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1596 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1597 MAX_MSG_DATA); 1598 dev_warn(&((phba->pcidev)->dev), 1599 "lpfc%d: %s\n", 1600 phba->brd_no, adaptermsg); 1601 } else { 1602 /* Unknown IOCB command */ 1603 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1604 "0334 Unknown IOCB command " 1605 "Data: x%x, x%x x%x x%x x%x\n", 1606 type, irsp->ulpCommand, 1607 irsp->ulpStatus, 1608 irsp->ulpIoTag, 1609 irsp->ulpContext); 1610 } 1611 break; 1612 } 1613 1614 /* 1615 * The response IOCB has been processed. Update the ring 1616 * pointer in SLIM. If the port response put pointer has not 1617 * been updated, sync the pgp->rspPutInx and fetch the new port 1618 * response put pointer. 1619 */ 1620 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 1621 1622 if (pring->rspidx == portRspPut) 1623 portRspPut = le32_to_cpu(pgp->rspPutInx); 1624 } 1625 1626 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 1627 pring->stats.iocb_rsp_full++; 1628 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1629 writel(status, phba->CAregaddr); 1630 readl(phba->CAregaddr); 1631 } 1632 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1633 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1634 pring->stats.iocb_cmd_empty++; 1635 1636 /* Force update of the local copy of cmdGetInx */ 1637 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1638 lpfc_sli_resume_iocb(phba, pring); 1639 1640 if ((pring->lpfc_sli_cmd_available)) 1641 (pring->lpfc_sli_cmd_available) (phba, pring); 1642 1643 } 1644 1645 spin_unlock_irqrestore(&phba->hbalock, iflag); 1646 return rc; 1647 } 1648 1649 int 1650 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 1651 struct lpfc_sli_ring *pring, uint32_t mask) 1652 { 1653 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 1654 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1655 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1656 IOCB_t *entry; 1657 IOCB_t *irsp = NULL; 1658 struct lpfc_iocbq *rspiocbp = NULL; 1659 struct lpfc_iocbq *next_iocb; 1660 struct lpfc_iocbq *cmdiocbp; 1661 struct lpfc_iocbq *saveq; 1662 uint8_t iocb_cmd_type; 1663 lpfc_iocb_type type; 1664 uint32_t status, free_saveq; 1665 uint32_t portRspPut, portRspMax; 1666 int rc = 1; 1667 unsigned long iflag; 1668 1669 spin_lock_irqsave(&phba->hbalock, iflag); 1670 pring->stats.iocb_event++; 1671 1672 /* 1673 * The next available response entry should never exceed the maximum 1674 * entries. If it does, treat it as an adapter hardware error. 1675 */ 1676 portRspMax = pring->numRiocb; 1677 portRspPut = le32_to_cpu(pgp->rspPutInx); 1678 if (portRspPut >= portRspMax) { 1679 /* 1680 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1681 * rsp ring <portRspMax> 1682 */ 1683 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1684 "0303 Ring %d handler: portRspPut %d " 1685 "is bigger then rsp ring %d\n", 1686 pring->ringno, portRspPut, portRspMax); 1687 1688 phba->link_state = LPFC_HBA_ERROR; 1689 spin_unlock_irqrestore(&phba->hbalock, iflag); 1690 1691 phba->work_hs = HS_FFER3; 1692 lpfc_handle_eratt(phba); 1693 1694 return 1; 1695 } 1696 1697 rmb(); 1698 while (pring->rspidx != portRspPut) { 1699 /* 1700 * Build a completion list and call the appropriate handler. 1701 * The process is to get the next available response iocb, get 1702 * a free iocb from the list, copy the response data into the 1703 * free iocb, insert to the continuation list, and update the 1704 * next response index to slim. This process makes response 1705 * iocb's in the ring available to DMA as fast as possible but 1706 * pays a penalty for a copy operation. Since the iocb is 1707 * only 32 bytes, this penalty is considered small relative to 1708 * the PCI reads for register values and a slim write. When 1709 * the ulpLe field is set, the entire Command has been 1710 * received. 1711 */ 1712 entry = lpfc_resp_iocb(phba, pring); 1713 1714 phba->last_completion_time = jiffies; 1715 rspiocbp = __lpfc_sli_get_iocbq(phba); 1716 if (rspiocbp == NULL) { 1717 printk(KERN_ERR "%s: out of buffers! Failing " 1718 "completion.\n", __func__); 1719 break; 1720 } 1721 1722 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 1723 phba->iocb_rsp_size); 1724 irsp = &rspiocbp->iocb; 1725 1726 if (++pring->rspidx >= portRspMax) 1727 pring->rspidx = 0; 1728 1729 if (pring->ringno == LPFC_ELS_RING) { 1730 lpfc_debugfs_slow_ring_trc(phba, 1731 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1732 *(((uint32_t *) irsp) + 4), 1733 *(((uint32_t *) irsp) + 6), 1734 *(((uint32_t *) irsp) + 7)); 1735 } 1736 1737 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 1738 1739 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 1740 1741 pring->iocb_continueq_cnt++; 1742 if (irsp->ulpLe) { 1743 /* 1744 * By default, the driver expects to free all resources 1745 * associated with this iocb completion. 1746 */ 1747 free_saveq = 1; 1748 saveq = list_get_first(&pring->iocb_continueq, 1749 struct lpfc_iocbq, list); 1750 irsp = &(saveq->iocb); 1751 list_del_init(&pring->iocb_continueq); 1752 pring->iocb_continueq_cnt = 0; 1753 1754 pring->stats.iocb_rsp++; 1755 1756 /* 1757 * If resource errors reported from HBA, reduce 1758 * queuedepths of the SCSI device. 1759 */ 1760 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1761 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 1762 spin_unlock_irqrestore(&phba->hbalock, iflag); 1763 lpfc_adjust_queue_depth(phba); 1764 spin_lock_irqsave(&phba->hbalock, iflag); 1765 } 1766 1767 if (irsp->ulpStatus) { 1768 /* Rsp ring <ringno> error: IOCB */ 1769 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1770 "0328 Rsp Ring %d error: " 1771 "IOCB Data: " 1772 "x%x x%x x%x x%x " 1773 "x%x x%x x%x x%x " 1774 "x%x x%x x%x x%x " 1775 "x%x x%x x%x x%x\n", 1776 pring->ringno, 1777 irsp->un.ulpWord[0], 1778 irsp->un.ulpWord[1], 1779 irsp->un.ulpWord[2], 1780 irsp->un.ulpWord[3], 1781 irsp->un.ulpWord[4], 1782 irsp->un.ulpWord[5], 1783 *(((uint32_t *) irsp) + 6), 1784 *(((uint32_t *) irsp) + 7), 1785 *(((uint32_t *) irsp) + 8), 1786 *(((uint32_t *) irsp) + 9), 1787 *(((uint32_t *) irsp) + 10), 1788 *(((uint32_t *) irsp) + 11), 1789 *(((uint32_t *) irsp) + 12), 1790 *(((uint32_t *) irsp) + 13), 1791 *(((uint32_t *) irsp) + 14), 1792 *(((uint32_t *) irsp) + 15)); 1793 } 1794 1795 /* 1796 * Fetch the IOCB command type and call the correct 1797 * completion routine. Solicited and Unsolicited 1798 * IOCBs on the ELS ring get freed back to the 1799 * lpfc_iocb_list by the discovery kernel thread. 1800 */ 1801 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 1802 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 1803 if (type == LPFC_SOL_IOCB) { 1804 spin_unlock_irqrestore(&phba->hbalock, iflag); 1805 rc = lpfc_sli_process_sol_iocb(phba, pring, 1806 saveq); 1807 spin_lock_irqsave(&phba->hbalock, iflag); 1808 } else if (type == LPFC_UNSOL_IOCB) { 1809 spin_unlock_irqrestore(&phba->hbalock, iflag); 1810 rc = lpfc_sli_process_unsol_iocb(phba, pring, 1811 saveq); 1812 spin_lock_irqsave(&phba->hbalock, iflag); 1813 if (!rc) 1814 free_saveq = 0; 1815 } else if (type == LPFC_ABORT_IOCB) { 1816 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && 1817 ((cmdiocbp = 1818 lpfc_sli_iocbq_lookup(phba, pring, 1819 saveq)))) { 1820 /* Call the specified completion 1821 routine */ 1822 if (cmdiocbp->iocb_cmpl) { 1823 spin_unlock_irqrestore( 1824 &phba->hbalock, 1825 iflag); 1826 (cmdiocbp->iocb_cmpl) (phba, 1827 cmdiocbp, saveq); 1828 spin_lock_irqsave( 1829 &phba->hbalock, 1830 iflag); 1831 } else 1832 __lpfc_sli_release_iocbq(phba, 1833 cmdiocbp); 1834 } 1835 } else if (type == LPFC_UNKNOWN_IOCB) { 1836 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1837 1838 char adaptermsg[LPFC_MAX_ADPTMSG]; 1839 1840 memset(adaptermsg, 0, 1841 LPFC_MAX_ADPTMSG); 1842 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1843 MAX_MSG_DATA); 1844 dev_warn(&((phba->pcidev)->dev), 1845 "lpfc%d: %s\n", 1846 phba->brd_no, adaptermsg); 1847 } else { 1848 /* Unknown IOCB command */ 1849 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1850 "0335 Unknown IOCB " 1851 "command Data: x%x " 1852 "x%x x%x x%x\n", 1853 irsp->ulpCommand, 1854 irsp->ulpStatus, 1855 irsp->ulpIoTag, 1856 irsp->ulpContext); 1857 } 1858 } 1859 1860 if (free_saveq) { 1861 list_for_each_entry_safe(rspiocbp, next_iocb, 1862 &saveq->list, list) { 1863 list_del(&rspiocbp->list); 1864 __lpfc_sli_release_iocbq(phba, 1865 rspiocbp); 1866 } 1867 __lpfc_sli_release_iocbq(phba, saveq); 1868 } 1869 rspiocbp = NULL; 1870 } 1871 1872 /* 1873 * If the port response put pointer has not been updated, sync 1874 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 1875 * response put pointer. 1876 */ 1877 if (pring->rspidx == portRspPut) { 1878 portRspPut = le32_to_cpu(pgp->rspPutInx); 1879 } 1880 } /* while (pring->rspidx != portRspPut) */ 1881 1882 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 1883 /* At least one response entry has been freed */ 1884 pring->stats.iocb_rsp_full++; 1885 /* SET RxRE_RSP in Chip Att register */ 1886 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1887 writel(status, phba->CAregaddr); 1888 readl(phba->CAregaddr); /* flush */ 1889 } 1890 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1891 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1892 pring->stats.iocb_cmd_empty++; 1893 1894 /* Force update of the local copy of cmdGetInx */ 1895 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1896 lpfc_sli_resume_iocb(phba, pring); 1897 1898 if ((pring->lpfc_sli_cmd_available)) 1899 (pring->lpfc_sli_cmd_available) (phba, pring); 1900 1901 } 1902 1903 spin_unlock_irqrestore(&phba->hbalock, iflag); 1904 return rc; 1905 } 1906 1907 void 1908 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1909 { 1910 LIST_HEAD(completions); 1911 struct lpfc_iocbq *iocb, *next_iocb; 1912 IOCB_t *cmd = NULL; 1913 1914 if (pring->ringno == LPFC_ELS_RING) { 1915 lpfc_fabric_abort_hba(phba); 1916 } 1917 1918 /* Error everything on txq and txcmplq 1919 * First do the txq. 1920 */ 1921 spin_lock_irq(&phba->hbalock); 1922 list_splice_init(&pring->txq, &completions); 1923 pring->txq_cnt = 0; 1924 1925 /* Next issue ABTS for everything on the txcmplq */ 1926 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 1927 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 1928 1929 spin_unlock_irq(&phba->hbalock); 1930 1931 while (!list_empty(&completions)) { 1932 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 1933 cmd = &iocb->iocb; 1934 list_del_init(&iocb->list); 1935 1936 if (!iocb->iocb_cmpl) 1937 lpfc_sli_release_iocbq(phba, iocb); 1938 else { 1939 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1940 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1941 (iocb->iocb_cmpl) (phba, iocb, iocb); 1942 } 1943 } 1944 } 1945 1946 int 1947 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 1948 { 1949 uint32_t status; 1950 int i = 0; 1951 int retval = 0; 1952 1953 /* Read the HBA Host Status Register */ 1954 status = readl(phba->HSregaddr); 1955 1956 /* 1957 * Check status register every 100ms for 5 retries, then every 1958 * 500ms for 5, then every 2.5 sec for 5, then reset board and 1959 * every 2.5 sec for 4. 1960 * Break our of the loop if errors occurred during init. 1961 */ 1962 while (((status & mask) != mask) && 1963 !(status & HS_FFERM) && 1964 i++ < 20) { 1965 1966 if (i <= 5) 1967 msleep(10); 1968 else if (i <= 10) 1969 msleep(500); 1970 else 1971 msleep(2500); 1972 1973 if (i == 15) { 1974 /* Do post */ 1975 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 1976 lpfc_sli_brdrestart(phba); 1977 } 1978 /* Read the HBA Host Status Register */ 1979 status = readl(phba->HSregaddr); 1980 } 1981 1982 /* Check to see if any errors occurred during init */ 1983 if ((status & HS_FFERM) || (i >= 20)) { 1984 phba->link_state = LPFC_HBA_ERROR; 1985 retval = 1; 1986 } 1987 1988 return retval; 1989 } 1990 1991 #define BARRIER_TEST_PATTERN (0xdeadbeef) 1992 1993 void lpfc_reset_barrier(struct lpfc_hba *phba) 1994 { 1995 uint32_t __iomem *resp_buf; 1996 uint32_t __iomem *mbox_buf; 1997 volatile uint32_t mbox; 1998 uint32_t hc_copy; 1999 int i; 2000 uint8_t hdrtype; 2001 2002 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 2003 if (hdrtype != 0x80 || 2004 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 2005 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 2006 return; 2007 2008 /* 2009 * Tell the other part of the chip to suspend temporarily all 2010 * its DMA activity. 2011 */ 2012 resp_buf = phba->MBslimaddr; 2013 2014 /* Disable the error attention */ 2015 hc_copy = readl(phba->HCregaddr); 2016 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 2017 readl(phba->HCregaddr); /* flush */ 2018 phba->link_flag |= LS_IGNORE_ERATT; 2019 2020 if (readl(phba->HAregaddr) & HA_ERATT) { 2021 /* Clear Chip error bit */ 2022 writel(HA_ERATT, phba->HAregaddr); 2023 phba->pport->stopped = 1; 2024 } 2025 2026 mbox = 0; 2027 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 2028 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 2029 2030 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 2031 mbox_buf = phba->MBslimaddr; 2032 writel(mbox, mbox_buf); 2033 2034 for (i = 0; 2035 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) 2036 mdelay(1); 2037 2038 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 2039 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 2040 phba->pport->stopped) 2041 goto restore_hc; 2042 else 2043 goto clear_errat; 2044 } 2045 2046 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 2047 for (i = 0; readl(resp_buf) != mbox && i < 500; i++) 2048 mdelay(1); 2049 2050 clear_errat: 2051 2052 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) 2053 mdelay(1); 2054 2055 if (readl(phba->HAregaddr) & HA_ERATT) { 2056 writel(HA_ERATT, phba->HAregaddr); 2057 phba->pport->stopped = 1; 2058 } 2059 2060 restore_hc: 2061 phba->link_flag &= ~LS_IGNORE_ERATT; 2062 writel(hc_copy, phba->HCregaddr); 2063 readl(phba->HCregaddr); /* flush */ 2064 } 2065 2066 int 2067 lpfc_sli_brdkill(struct lpfc_hba *phba) 2068 { 2069 struct lpfc_sli *psli; 2070 LPFC_MBOXQ_t *pmb; 2071 uint32_t status; 2072 uint32_t ha_copy; 2073 int retval; 2074 int i = 0; 2075 2076 psli = &phba->sli; 2077 2078 /* Kill HBA */ 2079 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2080 "0329 Kill HBA Data: x%x x%x\n", 2081 phba->pport->port_state, psli->sli_flag); 2082 2083 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2084 if (!pmb) 2085 return 1; 2086 2087 /* Disable the error attention */ 2088 spin_lock_irq(&phba->hbalock); 2089 status = readl(phba->HCregaddr); 2090 status &= ~HC_ERINT_ENA; 2091 writel(status, phba->HCregaddr); 2092 readl(phba->HCregaddr); /* flush */ 2093 phba->link_flag |= LS_IGNORE_ERATT; 2094 spin_unlock_irq(&phba->hbalock); 2095 2096 lpfc_kill_board(phba, pmb); 2097 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2098 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2099 2100 if (retval != MBX_SUCCESS) { 2101 if (retval != MBX_BUSY) 2102 mempool_free(pmb, phba->mbox_mem_pool); 2103 spin_lock_irq(&phba->hbalock); 2104 phba->link_flag &= ~LS_IGNORE_ERATT; 2105 spin_unlock_irq(&phba->hbalock); 2106 return 1; 2107 } 2108 2109 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2110 2111 mempool_free(pmb, phba->mbox_mem_pool); 2112 2113 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 2114 * attention every 100ms for 3 seconds. If we don't get ERATT after 2115 * 3 seconds we still set HBA_ERROR state because the status of the 2116 * board is now undefined. 2117 */ 2118 ha_copy = readl(phba->HAregaddr); 2119 2120 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 2121 mdelay(100); 2122 ha_copy = readl(phba->HAregaddr); 2123 } 2124 2125 del_timer_sync(&psli->mbox_tmo); 2126 if (ha_copy & HA_ERATT) { 2127 writel(HA_ERATT, phba->HAregaddr); 2128 phba->pport->stopped = 1; 2129 } 2130 spin_lock_irq(&phba->hbalock); 2131 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2132 phba->link_flag &= ~LS_IGNORE_ERATT; 2133 spin_unlock_irq(&phba->hbalock); 2134 2135 psli->mbox_active = NULL; 2136 lpfc_hba_down_post(phba); 2137 phba->link_state = LPFC_HBA_ERROR; 2138 2139 return ha_copy & HA_ERATT ? 0 : 1; 2140 } 2141 2142 int 2143 lpfc_sli_brdreset(struct lpfc_hba *phba) 2144 { 2145 struct lpfc_sli *psli; 2146 struct lpfc_sli_ring *pring; 2147 uint16_t cfg_value; 2148 int i; 2149 2150 psli = &phba->sli; 2151 2152 /* Reset HBA */ 2153 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2154 "0325 Reset HBA Data: x%x x%x\n", 2155 phba->pport->port_state, psli->sli_flag); 2156 2157 /* perform board reset */ 2158 phba->fc_eventTag = 0; 2159 phba->pport->fc_myDID = 0; 2160 phba->pport->fc_prevDID = 0; 2161 2162 /* Turn off parity checking and serr during the physical reset */ 2163 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 2164 pci_write_config_word(phba->pcidev, PCI_COMMAND, 2165 (cfg_value & 2166 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 2167 2168 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 2169 /* Now toggle INITFF bit in the Host Control Register */ 2170 writel(HC_INITFF, phba->HCregaddr); 2171 mdelay(1); 2172 readl(phba->HCregaddr); /* flush */ 2173 writel(0, phba->HCregaddr); 2174 readl(phba->HCregaddr); /* flush */ 2175 2176 /* Restore PCI cmd register */ 2177 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 2178 2179 /* Initialize relevant SLI info */ 2180 for (i = 0; i < psli->num_rings; i++) { 2181 pring = &psli->ring[i]; 2182 pring->flag = 0; 2183 pring->rspidx = 0; 2184 pring->next_cmdidx = 0; 2185 pring->local_getidx = 0; 2186 pring->cmdidx = 0; 2187 pring->missbufcnt = 0; 2188 } 2189 2190 phba->link_state = LPFC_WARM_START; 2191 return 0; 2192 } 2193 2194 int 2195 lpfc_sli_brdrestart(struct lpfc_hba *phba) 2196 { 2197 MAILBOX_t *mb; 2198 struct lpfc_sli *psli; 2199 uint16_t skip_post; 2200 volatile uint32_t word0; 2201 void __iomem *to_slim; 2202 2203 spin_lock_irq(&phba->hbalock); 2204 2205 psli = &phba->sli; 2206 2207 /* Restart HBA */ 2208 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2209 "0337 Restart HBA Data: x%x x%x\n", 2210 phba->pport->port_state, psli->sli_flag); 2211 2212 word0 = 0; 2213 mb = (MAILBOX_t *) &word0; 2214 mb->mbxCommand = MBX_RESTART; 2215 mb->mbxHc = 1; 2216 2217 lpfc_reset_barrier(phba); 2218 2219 to_slim = phba->MBslimaddr; 2220 writel(*(uint32_t *) mb, to_slim); 2221 readl(to_slim); /* flush */ 2222 2223 /* Only skip post after fc_ffinit is completed */ 2224 if (phba->pport->port_state) { 2225 skip_post = 1; 2226 word0 = 1; /* This is really setting up word1 */ 2227 } else { 2228 skip_post = 0; 2229 word0 = 0; /* This is really setting up word1 */ 2230 } 2231 to_slim = phba->MBslimaddr + sizeof (uint32_t); 2232 writel(*(uint32_t *) mb, to_slim); 2233 readl(to_slim); /* flush */ 2234 2235 lpfc_sli_brdreset(phba); 2236 phba->pport->stopped = 0; 2237 phba->link_state = LPFC_INIT_START; 2238 2239 spin_unlock_irq(&phba->hbalock); 2240 2241 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 2242 psli->stats_start = get_seconds(); 2243 2244 if (skip_post) 2245 mdelay(100); 2246 else 2247 mdelay(2000); 2248 2249 lpfc_hba_down_post(phba); 2250 2251 return 0; 2252 } 2253 2254 static int 2255 lpfc_sli_chipset_init(struct lpfc_hba *phba) 2256 { 2257 uint32_t status, i = 0; 2258 2259 /* Read the HBA Host Status Register */ 2260 status = readl(phba->HSregaddr); 2261 2262 /* Check status register to see what current state is */ 2263 i = 0; 2264 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 2265 2266 /* Check every 100ms for 5 retries, then every 500ms for 5, then 2267 * every 2.5 sec for 5, then reset board and every 2.5 sec for 2268 * 4. 2269 */ 2270 if (i++ >= 20) { 2271 /* Adapter failed to init, timeout, status reg 2272 <status> */ 2273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2274 "0436 Adapter failed to init, " 2275 "timeout, status reg x%x, " 2276 "FW Data: A8 x%x AC x%x\n", status, 2277 readl(phba->MBslimaddr + 0xa8), 2278 readl(phba->MBslimaddr + 0xac)); 2279 phba->link_state = LPFC_HBA_ERROR; 2280 return -ETIMEDOUT; 2281 } 2282 2283 /* Check to see if any errors occurred during init */ 2284 if (status & HS_FFERM) { 2285 /* ERROR: During chipset initialization */ 2286 /* Adapter failed to init, chipset, status reg 2287 <status> */ 2288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2289 "0437 Adapter failed to init, " 2290 "chipset, status reg x%x, " 2291 "FW Data: A8 x%x AC x%x\n", status, 2292 readl(phba->MBslimaddr + 0xa8), 2293 readl(phba->MBslimaddr + 0xac)); 2294 phba->link_state = LPFC_HBA_ERROR; 2295 return -EIO; 2296 } 2297 2298 if (i <= 5) { 2299 msleep(10); 2300 } else if (i <= 10) { 2301 msleep(500); 2302 } else { 2303 msleep(2500); 2304 } 2305 2306 if (i == 15) { 2307 /* Do post */ 2308 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 2309 lpfc_sli_brdrestart(phba); 2310 } 2311 /* Read the HBA Host Status Register */ 2312 status = readl(phba->HSregaddr); 2313 } 2314 2315 /* Check to see if any errors occurred during init */ 2316 if (status & HS_FFERM) { 2317 /* ERROR: During chipset initialization */ 2318 /* Adapter failed to init, chipset, status reg <status> */ 2319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2320 "0438 Adapter failed to init, chipset, " 2321 "status reg x%x, " 2322 "FW Data: A8 x%x AC x%x\n", status, 2323 readl(phba->MBslimaddr + 0xa8), 2324 readl(phba->MBslimaddr + 0xac)); 2325 phba->link_state = LPFC_HBA_ERROR; 2326 return -EIO; 2327 } 2328 2329 /* Clear all interrupt enable conditions */ 2330 writel(0, phba->HCregaddr); 2331 readl(phba->HCregaddr); /* flush */ 2332 2333 /* setup host attn register */ 2334 writel(0xffffffff, phba->HAregaddr); 2335 readl(phba->HAregaddr); /* flush */ 2336 return 0; 2337 } 2338 2339 int 2340 lpfc_sli_hbq_count(void) 2341 { 2342 return ARRAY_SIZE(lpfc_hbq_defs); 2343 } 2344 2345 static int 2346 lpfc_sli_hbq_entry_count(void) 2347 { 2348 int hbq_count = lpfc_sli_hbq_count(); 2349 int count = 0; 2350 int i; 2351 2352 for (i = 0; i < hbq_count; ++i) 2353 count += lpfc_hbq_defs[i]->entry_count; 2354 return count; 2355 } 2356 2357 int 2358 lpfc_sli_hbq_size(void) 2359 { 2360 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 2361 } 2362 2363 static int 2364 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 2365 { 2366 int hbq_count = lpfc_sli_hbq_count(); 2367 LPFC_MBOXQ_t *pmb; 2368 MAILBOX_t *pmbox; 2369 uint32_t hbqno; 2370 uint32_t hbq_entry_index; 2371 2372 /* Get a Mailbox buffer to setup mailbox 2373 * commands for HBA initialization 2374 */ 2375 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2376 2377 if (!pmb) 2378 return -ENOMEM; 2379 2380 pmbox = &pmb->mb; 2381 2382 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 2383 phba->link_state = LPFC_INIT_MBX_CMDS; 2384 phba->hbq_in_use = 1; 2385 2386 hbq_entry_index = 0; 2387 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 2388 phba->hbqs[hbqno].next_hbqPutIdx = 0; 2389 phba->hbqs[hbqno].hbqPutIdx = 0; 2390 phba->hbqs[hbqno].local_hbqGetIdx = 0; 2391 phba->hbqs[hbqno].entry_count = 2392 lpfc_hbq_defs[hbqno]->entry_count; 2393 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 2394 hbq_entry_index, pmb); 2395 hbq_entry_index += phba->hbqs[hbqno].entry_count; 2396 2397 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 2398 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 2399 mbxStatus <status>, ring <num> */ 2400 2401 lpfc_printf_log(phba, KERN_ERR, 2402 LOG_SLI | LOG_VPORT, 2403 "1805 Adapter failed to init. " 2404 "Data: x%x x%x x%x\n", 2405 pmbox->mbxCommand, 2406 pmbox->mbxStatus, hbqno); 2407 2408 phba->link_state = LPFC_HBA_ERROR; 2409 mempool_free(pmb, phba->mbox_mem_pool); 2410 return ENXIO; 2411 } 2412 } 2413 phba->hbq_count = hbq_count; 2414 2415 mempool_free(pmb, phba->mbox_mem_pool); 2416 2417 /* Initially populate or replenish the HBQs */ 2418 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 2419 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno)) 2420 return -ENOMEM; 2421 } 2422 return 0; 2423 } 2424 2425 static int 2426 lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode) 2427 { 2428 LPFC_MBOXQ_t *pmb; 2429 uint32_t resetcount = 0, rc = 0, done = 0; 2430 2431 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2432 if (!pmb) { 2433 phba->link_state = LPFC_HBA_ERROR; 2434 return -ENOMEM; 2435 } 2436 2437 phba->sli_rev = sli_mode; 2438 while (resetcount < 2 && !done) { 2439 spin_lock_irq(&phba->hbalock); 2440 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2441 spin_unlock_irq(&phba->hbalock); 2442 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 2443 lpfc_sli_brdrestart(phba); 2444 msleep(2500); 2445 rc = lpfc_sli_chipset_init(phba); 2446 if (rc) 2447 break; 2448 2449 spin_lock_irq(&phba->hbalock); 2450 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2451 spin_unlock_irq(&phba->hbalock); 2452 resetcount++; 2453 2454 /* Call pre CONFIG_PORT mailbox command initialization. A 2455 * value of 0 means the call was successful. Any other 2456 * nonzero value is a failure, but if ERESTART is returned, 2457 * the driver may reset the HBA and try again. 2458 */ 2459 rc = lpfc_config_port_prep(phba); 2460 if (rc == -ERESTART) { 2461 phba->link_state = LPFC_LINK_UNKNOWN; 2462 continue; 2463 } else if (rc) { 2464 break; 2465 } 2466 2467 phba->link_state = LPFC_INIT_MBX_CMDS; 2468 lpfc_config_port(phba, pmb); 2469 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 2470 if (rc != MBX_SUCCESS) { 2471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2472 "0442 Adapter failed to init, mbxCmd x%x " 2473 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 2474 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); 2475 spin_lock_irq(&phba->hbalock); 2476 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 2477 spin_unlock_irq(&phba->hbalock); 2478 rc = -ENXIO; 2479 } else { 2480 done = 1; 2481 phba->max_vpi = (phba->max_vpi && 2482 pmb->mb.un.varCfgPort.gmv) != 0 2483 ? pmb->mb.un.varCfgPort.max_vpi 2484 : 0; 2485 } 2486 } 2487 2488 if (!done) { 2489 rc = -EINVAL; 2490 goto do_prep_failed; 2491 } 2492 2493 if ((pmb->mb.un.varCfgPort.sli_mode == 3) && 2494 (!pmb->mb.un.varCfgPort.cMA)) { 2495 rc = -ENXIO; 2496 } 2497 2498 do_prep_failed: 2499 mempool_free(pmb, phba->mbox_mem_pool); 2500 return rc; 2501 } 2502 2503 int 2504 lpfc_sli_hba_setup(struct lpfc_hba *phba) 2505 { 2506 uint32_t rc; 2507 int mode = 3; 2508 2509 switch (lpfc_sli_mode) { 2510 case 2: 2511 if (phba->cfg_enable_npiv) { 2512 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 2513 "1824 NPIV enabled: Override lpfc_sli_mode " 2514 "parameter (%d) to auto (0).\n", 2515 lpfc_sli_mode); 2516 break; 2517 } 2518 mode = 2; 2519 break; 2520 case 0: 2521 case 3: 2522 break; 2523 default: 2524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 2525 "1819 Unrecognized lpfc_sli_mode " 2526 "parameter: %d.\n", lpfc_sli_mode); 2527 2528 break; 2529 } 2530 2531 rc = lpfc_do_config_port(phba, mode); 2532 if (rc && lpfc_sli_mode == 3) 2533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 2534 "1820 Unable to select SLI-3. " 2535 "Not supported by adapter.\n"); 2536 if (rc && mode != 2) 2537 rc = lpfc_do_config_port(phba, 2); 2538 if (rc) 2539 goto lpfc_sli_hba_setup_error; 2540 2541 if (phba->sli_rev == 3) { 2542 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 2543 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 2544 phba->sli3_options |= LPFC_SLI3_ENABLED; 2545 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 2546 2547 } else { 2548 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 2549 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 2550 phba->sli3_options = 0; 2551 } 2552 2553 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2554 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 2555 phba->sli_rev, phba->max_vpi); 2556 rc = lpfc_sli_ring_map(phba); 2557 2558 if (rc) 2559 goto lpfc_sli_hba_setup_error; 2560 2561 /* Init HBQs */ 2562 2563 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2564 rc = lpfc_sli_hbq_setup(phba); 2565 if (rc) 2566 goto lpfc_sli_hba_setup_error; 2567 } 2568 2569 phba->sli.sli_flag |= LPFC_PROCESS_LA; 2570 2571 rc = lpfc_config_port_post(phba); 2572 if (rc) 2573 goto lpfc_sli_hba_setup_error; 2574 2575 return rc; 2576 2577 lpfc_sli_hba_setup_error: 2578 phba->link_state = LPFC_HBA_ERROR; 2579 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2580 "0445 Firmware initialization failed\n"); 2581 return rc; 2582 } 2583 2584 /*! lpfc_mbox_timeout 2585 * 2586 * \pre 2587 * \post 2588 * \param hba Pointer to per struct lpfc_hba structure 2589 * \param l1 Pointer to the driver's mailbox queue. 2590 * \return 2591 * void 2592 * 2593 * \b Description: 2594 * 2595 * This routine handles mailbox timeout events at timer interrupt context. 2596 */ 2597 void 2598 lpfc_mbox_timeout(unsigned long ptr) 2599 { 2600 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 2601 unsigned long iflag; 2602 uint32_t tmo_posted; 2603 2604 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 2605 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 2606 if (!tmo_posted) 2607 phba->pport->work_port_events |= WORKER_MBOX_TMO; 2608 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 2609 2610 if (!tmo_posted) 2611 lpfc_worker_wake_up(phba); 2612 return; 2613 } 2614 2615 void 2616 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 2617 { 2618 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 2619 MAILBOX_t *mb = &pmbox->mb; 2620 struct lpfc_sli *psli = &phba->sli; 2621 struct lpfc_sli_ring *pring; 2622 2623 if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) { 2624 return; 2625 } 2626 2627 /* Mbox cmd <mbxCommand> timeout */ 2628 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2629 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 2630 mb->mbxCommand, 2631 phba->pport->port_state, 2632 phba->sli.sli_flag, 2633 phba->sli.mbox_active); 2634 2635 /* Setting state unknown so lpfc_sli_abort_iocb_ring 2636 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 2637 * it to fail all oustanding SCSI IO. 2638 */ 2639 spin_lock_irq(&phba->pport->work_port_lock); 2640 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 2641 spin_unlock_irq(&phba->pport->work_port_lock); 2642 spin_lock_irq(&phba->hbalock); 2643 phba->link_state = LPFC_LINK_UNKNOWN; 2644 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2645 spin_unlock_irq(&phba->hbalock); 2646 2647 pring = &psli->ring[psli->fcp_ring]; 2648 lpfc_sli_abort_iocb_ring(phba, pring); 2649 2650 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2651 "0345 Resetting board due to mailbox timeout\n"); 2652 /* 2653 * lpfc_offline calls lpfc_sli_hba_down which will clean up 2654 * on oustanding mailbox commands. 2655 */ 2656 /* If resets are disabled then set error state and return. */ 2657 if (!phba->cfg_enable_hba_reset) { 2658 phba->link_state = LPFC_HBA_ERROR; 2659 return; 2660 } 2661 lpfc_offline_prep(phba); 2662 lpfc_offline(phba); 2663 lpfc_sli_brdrestart(phba); 2664 lpfc_online(phba); 2665 lpfc_unblock_mgmt_io(phba); 2666 return; 2667 } 2668 2669 int 2670 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 2671 { 2672 MAILBOX_t *mb; 2673 struct lpfc_sli *psli = &phba->sli; 2674 uint32_t status, evtctr; 2675 uint32_t ha_copy; 2676 int i; 2677 unsigned long timeout; 2678 unsigned long drvr_flag = 0; 2679 volatile uint32_t word0, ldata; 2680 void __iomem *to_slim; 2681 int processing_queue = 0; 2682 2683 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2684 if (!pmbox) { 2685 /* processing mbox queue from intr_handler */ 2686 processing_queue = 1; 2687 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2688 pmbox = lpfc_mbox_get(phba); 2689 if (!pmbox) { 2690 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2691 return MBX_SUCCESS; 2692 } 2693 } 2694 2695 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 2696 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 2697 if(!pmbox->vport) { 2698 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2699 lpfc_printf_log(phba, KERN_ERR, 2700 LOG_MBOX | LOG_VPORT, 2701 "1806 Mbox x%x failed. No vport\n", 2702 pmbox->mb.mbxCommand); 2703 dump_stack(); 2704 goto out_not_finished; 2705 } 2706 } 2707 2708 /* If the PCI channel is in offline state, do not post mbox. */ 2709 if (unlikely(pci_channel_offline(phba->pcidev))) { 2710 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2711 goto out_not_finished; 2712 } 2713 2714 psli = &phba->sli; 2715 2716 mb = &pmbox->mb; 2717 status = MBX_SUCCESS; 2718 2719 if (phba->link_state == LPFC_HBA_ERROR) { 2720 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2721 2722 /* Mbox command <mbxCommand> cannot issue */ 2723 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2724 goto out_not_finished; 2725 } 2726 2727 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2728 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2729 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2730 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2731 goto out_not_finished; 2732 } 2733 2734 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 2735 /* Polling for a mbox command when another one is already active 2736 * is not allowed in SLI. Also, the driver must have established 2737 * SLI2 mode to queue and process multiple mbox commands. 2738 */ 2739 2740 if (flag & MBX_POLL) { 2741 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2742 2743 /* Mbox command <mbxCommand> cannot issue */ 2744 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2745 goto out_not_finished; 2746 } 2747 2748 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 2749 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2750 /* Mbox command <mbxCommand> cannot issue */ 2751 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2752 goto out_not_finished; 2753 } 2754 2755 /* Another mailbox command is still being processed, queue this 2756 * command to be processed later. 2757 */ 2758 lpfc_mbox_put(phba, pmbox); 2759 2760 /* Mbox cmd issue - BUSY */ 2761 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2762 "(%d):0308 Mbox cmd issue - BUSY Data: " 2763 "x%x x%x x%x x%x\n", 2764 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 2765 mb->mbxCommand, phba->pport->port_state, 2766 psli->sli_flag, flag); 2767 2768 psli->slistat.mbox_busy++; 2769 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2770 2771 if (pmbox->vport) { 2772 lpfc_debugfs_disc_trc(pmbox->vport, 2773 LPFC_DISC_TRC_MBOX_VPORT, 2774 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 2775 (uint32_t)mb->mbxCommand, 2776 mb->un.varWords[0], mb->un.varWords[1]); 2777 } 2778 else { 2779 lpfc_debugfs_disc_trc(phba->pport, 2780 LPFC_DISC_TRC_MBOX, 2781 "MBOX Bsy: cmd:x%x mb:x%x x%x", 2782 (uint32_t)mb->mbxCommand, 2783 mb->un.varWords[0], mb->un.varWords[1]); 2784 } 2785 2786 return MBX_BUSY; 2787 } 2788 2789 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2790 2791 /* If we are not polling, we MUST be in SLI2 mode */ 2792 if (flag != MBX_POLL) { 2793 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 2794 (mb->mbxCommand != MBX_KILL_BOARD)) { 2795 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2796 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2797 /* Mbox command <mbxCommand> cannot issue */ 2798 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2799 goto out_not_finished; 2800 } 2801 /* timeout active mbox command */ 2802 mod_timer(&psli->mbox_tmo, (jiffies + 2803 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); 2804 } 2805 2806 /* Mailbox cmd <cmd> issue */ 2807 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2808 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 2809 "x%x\n", 2810 pmbox->vport ? pmbox->vport->vpi : 0, 2811 mb->mbxCommand, phba->pport->port_state, 2812 psli->sli_flag, flag); 2813 2814 if (mb->mbxCommand != MBX_HEARTBEAT) { 2815 if (pmbox->vport) { 2816 lpfc_debugfs_disc_trc(pmbox->vport, 2817 LPFC_DISC_TRC_MBOX_VPORT, 2818 "MBOX Send vport: cmd:x%x mb:x%x x%x", 2819 (uint32_t)mb->mbxCommand, 2820 mb->un.varWords[0], mb->un.varWords[1]); 2821 } 2822 else { 2823 lpfc_debugfs_disc_trc(phba->pport, 2824 LPFC_DISC_TRC_MBOX, 2825 "MBOX Send: cmd:x%x mb:x%x x%x", 2826 (uint32_t)mb->mbxCommand, 2827 mb->un.varWords[0], mb->un.varWords[1]); 2828 } 2829 } 2830 2831 psli->slistat.mbox_cmd++; 2832 evtctr = psli->slistat.mbox_event; 2833 2834 /* next set own bit for the adapter and copy over command word */ 2835 mb->mbxOwner = OWN_CHIP; 2836 2837 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2838 /* First copy command data to host SLIM area */ 2839 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE); 2840 } else { 2841 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2842 /* copy command data into host mbox for cmpl */ 2843 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, 2844 MAILBOX_CMD_SIZE); 2845 } 2846 2847 /* First copy mbox command data to HBA SLIM, skip past first 2848 word */ 2849 to_slim = phba->MBslimaddr + sizeof (uint32_t); 2850 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 2851 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 2852 2853 /* Next copy over first word, with mbxOwner set */ 2854 ldata = *((volatile uint32_t *)mb); 2855 to_slim = phba->MBslimaddr; 2856 writel(ldata, to_slim); 2857 readl(to_slim); /* flush */ 2858 2859 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2860 /* switch over to host mailbox */ 2861 psli->sli_flag |= LPFC_SLI2_ACTIVE; 2862 } 2863 } 2864 2865 wmb(); 2866 2867 switch (flag) { 2868 case MBX_NOWAIT: 2869 /* Set up reference to mailbox command */ 2870 psli->mbox_active = pmbox; 2871 /* Interrupt board to do it */ 2872 writel(CA_MBATT, phba->CAregaddr); 2873 readl(phba->CAregaddr); /* flush */ 2874 /* Don't wait for it to finish, just return */ 2875 break; 2876 2877 case MBX_POLL: 2878 /* Set up null reference to mailbox command */ 2879 psli->mbox_active = NULL; 2880 /* Interrupt board to do it */ 2881 writel(CA_MBATT, phba->CAregaddr); 2882 readl(phba->CAregaddr); /* flush */ 2883 2884 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2885 /* First read mbox status word */ 2886 word0 = *((volatile uint32_t *)&phba->slim2p->mbx); 2887 word0 = le32_to_cpu(word0); 2888 } else { 2889 /* First read mbox status word */ 2890 word0 = readl(phba->MBslimaddr); 2891 } 2892 2893 /* Read the HBA Host Attention Register */ 2894 ha_copy = readl(phba->HAregaddr); 2895 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2896 mb->mbxCommand) * 2897 1000) + jiffies; 2898 i = 0; 2899 /* Wait for command to complete */ 2900 while (((word0 & OWN_CHIP) == OWN_CHIP) || 2901 (!(ha_copy & HA_MBATT) && 2902 (phba->link_state > LPFC_WARM_START))) { 2903 if (time_after(jiffies, timeout)) { 2904 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2905 spin_unlock_irqrestore(&phba->hbalock, 2906 drvr_flag); 2907 goto out_not_finished; 2908 } 2909 2910 /* Check if we took a mbox interrupt while we were 2911 polling */ 2912 if (((word0 & OWN_CHIP) != OWN_CHIP) 2913 && (evtctr != psli->slistat.mbox_event)) 2914 break; 2915 2916 if (i++ > 10) { 2917 spin_unlock_irqrestore(&phba->hbalock, 2918 drvr_flag); 2919 msleep(1); 2920 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2921 } 2922 2923 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2924 /* First copy command data */ 2925 word0 = *((volatile uint32_t *) 2926 &phba->slim2p->mbx); 2927 word0 = le32_to_cpu(word0); 2928 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2929 MAILBOX_t *slimmb; 2930 volatile uint32_t slimword0; 2931 /* Check real SLIM for any errors */ 2932 slimword0 = readl(phba->MBslimaddr); 2933 slimmb = (MAILBOX_t *) & slimword0; 2934 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 2935 && slimmb->mbxStatus) { 2936 psli->sli_flag &= 2937 ~LPFC_SLI2_ACTIVE; 2938 word0 = slimword0; 2939 } 2940 } 2941 } else { 2942 /* First copy command data */ 2943 word0 = readl(phba->MBslimaddr); 2944 } 2945 /* Read the HBA Host Attention Register */ 2946 ha_copy = readl(phba->HAregaddr); 2947 } 2948 2949 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2950 /* copy results back to user */ 2951 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, 2952 MAILBOX_CMD_SIZE); 2953 } else { 2954 /* First copy command data */ 2955 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 2956 MAILBOX_CMD_SIZE); 2957 if ((mb->mbxCommand == MBX_DUMP_MEMORY) && 2958 pmbox->context2) { 2959 lpfc_memcpy_from_slim((void *)pmbox->context2, 2960 phba->MBslimaddr + DMP_RSP_OFFSET, 2961 mb->un.varDmp.word_cnt); 2962 } 2963 } 2964 2965 writel(HA_MBATT, phba->HAregaddr); 2966 readl(phba->HAregaddr); /* flush */ 2967 2968 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2969 status = mb->mbxStatus; 2970 } 2971 2972 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2973 return status; 2974 2975 out_not_finished: 2976 if (processing_queue) { 2977 pmbox->mb.mbxStatus = MBX_NOT_FINISHED; 2978 lpfc_mbox_cmpl_put(phba, pmbox); 2979 } 2980 return MBX_NOT_FINISHED; 2981 } 2982 2983 /* 2984 * Caller needs to hold lock. 2985 */ 2986 static void 2987 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2988 struct lpfc_iocbq *piocb) 2989 { 2990 /* Insert the caller's iocb in the txq tail for later processing. */ 2991 list_add_tail(&piocb->list, &pring->txq); 2992 pring->txq_cnt++; 2993 } 2994 2995 static struct lpfc_iocbq * 2996 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2997 struct lpfc_iocbq **piocb) 2998 { 2999 struct lpfc_iocbq * nextiocb; 3000 3001 nextiocb = lpfc_sli_ringtx_get(phba, pring); 3002 if (!nextiocb) { 3003 nextiocb = *piocb; 3004 *piocb = NULL; 3005 } 3006 3007 return nextiocb; 3008 } 3009 3010 /* 3011 * Lockless version of lpfc_sli_issue_iocb. 3012 */ 3013 static int 3014 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3015 struct lpfc_iocbq *piocb, uint32_t flag) 3016 { 3017 struct lpfc_iocbq *nextiocb; 3018 IOCB_t *iocb; 3019 3020 if (piocb->iocb_cmpl && (!piocb->vport) && 3021 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 3022 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 3023 lpfc_printf_log(phba, KERN_ERR, 3024 LOG_SLI | LOG_VPORT, 3025 "1807 IOCB x%x failed. No vport\n", 3026 piocb->iocb.ulpCommand); 3027 dump_stack(); 3028 return IOCB_ERROR; 3029 } 3030 3031 3032 /* If the PCI channel is in offline state, do not post iocbs. */ 3033 if (unlikely(pci_channel_offline(phba->pcidev))) 3034 return IOCB_ERROR; 3035 3036 /* 3037 * We should never get an IOCB if we are in a < LINK_DOWN state 3038 */ 3039 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 3040 return IOCB_ERROR; 3041 3042 /* 3043 * Check to see if we are blocking IOCB processing because of a 3044 * outstanding event. 3045 */ 3046 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 3047 goto iocb_busy; 3048 3049 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 3050 /* 3051 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 3052 * can be issued if the link is not up. 3053 */ 3054 switch (piocb->iocb.ulpCommand) { 3055 case CMD_QUE_RING_BUF_CN: 3056 case CMD_QUE_RING_BUF64_CN: 3057 /* 3058 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 3059 * completion, iocb_cmpl MUST be 0. 3060 */ 3061 if (piocb->iocb_cmpl) 3062 piocb->iocb_cmpl = NULL; 3063 /*FALLTHROUGH*/ 3064 case CMD_CREATE_XRI_CR: 3065 case CMD_CLOSE_XRI_CN: 3066 case CMD_CLOSE_XRI_CX: 3067 break; 3068 default: 3069 goto iocb_busy; 3070 } 3071 3072 /* 3073 * For FCP commands, we must be in a state where we can process link 3074 * attention events. 3075 */ 3076 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 3077 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 3078 goto iocb_busy; 3079 } 3080 3081 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 3082 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 3083 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 3084 3085 if (iocb) 3086 lpfc_sli_update_ring(phba, pring); 3087 else 3088 lpfc_sli_update_full_ring(phba, pring); 3089 3090 if (!piocb) 3091 return IOCB_SUCCESS; 3092 3093 goto out_busy; 3094 3095 iocb_busy: 3096 pring->stats.iocb_cmd_delay++; 3097 3098 out_busy: 3099 3100 if (!(flag & SLI_IOCB_RET_IOCB)) { 3101 __lpfc_sli_ringtx_put(phba, pring, piocb); 3102 return IOCB_SUCCESS; 3103 } 3104 3105 return IOCB_BUSY; 3106 } 3107 3108 3109 int 3110 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3111 struct lpfc_iocbq *piocb, uint32_t flag) 3112 { 3113 unsigned long iflags; 3114 int rc; 3115 3116 spin_lock_irqsave(&phba->hbalock, iflags); 3117 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); 3118 spin_unlock_irqrestore(&phba->hbalock, iflags); 3119 3120 return rc; 3121 } 3122 3123 static int 3124 lpfc_extra_ring_setup( struct lpfc_hba *phba) 3125 { 3126 struct lpfc_sli *psli; 3127 struct lpfc_sli_ring *pring; 3128 3129 psli = &phba->sli; 3130 3131 /* Adjust cmd/rsp ring iocb entries more evenly */ 3132 3133 /* Take some away from the FCP ring */ 3134 pring = &psli->ring[psli->fcp_ring]; 3135 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 3136 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3137 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3138 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3139 3140 /* and give them to the extra ring */ 3141 pring = &psli->ring[psli->extra_ring]; 3142 3143 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 3144 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3145 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3146 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3147 3148 /* Setup default profile for this ring */ 3149 pring->iotag_max = 4096; 3150 pring->num_mask = 1; 3151 pring->prt[0].profile = 0; /* Mask 0 */ 3152 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 3153 pring->prt[0].type = phba->cfg_multi_ring_type; 3154 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 3155 return 0; 3156 } 3157 3158 static void 3159 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 3160 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 3161 { 3162 IOCB_t *icmd; 3163 uint16_t evt_code; 3164 uint16_t temp; 3165 struct temp_event temp_event_data; 3166 struct Scsi_Host *shost; 3167 3168 icmd = &iocbq->iocb; 3169 evt_code = icmd->un.asyncstat.evt_code; 3170 temp = icmd->ulpContext; 3171 3172 if ((evt_code != ASYNC_TEMP_WARN) && 3173 (evt_code != ASYNC_TEMP_SAFE)) { 3174 lpfc_printf_log(phba, 3175 KERN_ERR, 3176 LOG_SLI, 3177 "0346 Ring %d handler: unexpected ASYNC_STATUS" 3178 " evt_code 0x%x\n", 3179 pring->ringno, 3180 icmd->un.asyncstat.evt_code); 3181 return; 3182 } 3183 temp_event_data.data = (uint32_t)temp; 3184 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 3185 if (evt_code == ASYNC_TEMP_WARN) { 3186 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 3187 lpfc_printf_log(phba, 3188 KERN_ERR, 3189 LOG_TEMP, 3190 "0347 Adapter is very hot, please take " 3191 "corrective action. temperature : %d Celsius\n", 3192 temp); 3193 } 3194 if (evt_code == ASYNC_TEMP_SAFE) { 3195 temp_event_data.event_code = LPFC_NORMAL_TEMP; 3196 lpfc_printf_log(phba, 3197 KERN_ERR, 3198 LOG_TEMP, 3199 "0340 Adapter temperature is OK now. " 3200 "temperature : %d Celsius\n", 3201 temp); 3202 } 3203 3204 /* Send temperature change event to applications */ 3205 shost = lpfc_shost_from_vport(phba->pport); 3206 fc_host_post_vendor_event(shost, fc_get_event_number(), 3207 sizeof(temp_event_data), (char *) &temp_event_data, 3208 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 3209 3210 } 3211 3212 3213 int 3214 lpfc_sli_setup(struct lpfc_hba *phba) 3215 { 3216 int i, totiocbsize = 0; 3217 struct lpfc_sli *psli = &phba->sli; 3218 struct lpfc_sli_ring *pring; 3219 3220 psli->num_rings = MAX_CONFIGURED_RINGS; 3221 psli->sli_flag = 0; 3222 psli->fcp_ring = LPFC_FCP_RING; 3223 psli->next_ring = LPFC_FCP_NEXT_RING; 3224 psli->extra_ring = LPFC_EXTRA_RING; 3225 3226 psli->iocbq_lookup = NULL; 3227 psli->iocbq_lookup_len = 0; 3228 psli->last_iotag = 0; 3229 3230 for (i = 0; i < psli->num_rings; i++) { 3231 pring = &psli->ring[i]; 3232 switch (i) { 3233 case LPFC_FCP_RING: /* ring 0 - FCP */ 3234 /* numCiocb and numRiocb are used in config_port */ 3235 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 3236 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 3237 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 3238 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3239 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3240 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3241 pring->sizeCiocb = (phba->sli_rev == 3) ? 3242 SLI3_IOCB_CMD_SIZE : 3243 SLI2_IOCB_CMD_SIZE; 3244 pring->sizeRiocb = (phba->sli_rev == 3) ? 3245 SLI3_IOCB_RSP_SIZE : 3246 SLI2_IOCB_RSP_SIZE; 3247 pring->iotag_ctr = 0; 3248 pring->iotag_max = 3249 (phba->cfg_hba_queue_depth * 2); 3250 pring->fast_iotag = pring->iotag_max; 3251 pring->num_mask = 0; 3252 break; 3253 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 3254 /* numCiocb and numRiocb are used in config_port */ 3255 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 3256 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 3257 pring->sizeCiocb = (phba->sli_rev == 3) ? 3258 SLI3_IOCB_CMD_SIZE : 3259 SLI2_IOCB_CMD_SIZE; 3260 pring->sizeRiocb = (phba->sli_rev == 3) ? 3261 SLI3_IOCB_RSP_SIZE : 3262 SLI2_IOCB_RSP_SIZE; 3263 pring->iotag_max = phba->cfg_hba_queue_depth; 3264 pring->num_mask = 0; 3265 break; 3266 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 3267 /* numCiocb and numRiocb are used in config_port */ 3268 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 3269 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 3270 pring->sizeCiocb = (phba->sli_rev == 3) ? 3271 SLI3_IOCB_CMD_SIZE : 3272 SLI2_IOCB_CMD_SIZE; 3273 pring->sizeRiocb = (phba->sli_rev == 3) ? 3274 SLI3_IOCB_RSP_SIZE : 3275 SLI2_IOCB_RSP_SIZE; 3276 pring->fast_iotag = 0; 3277 pring->iotag_ctr = 0; 3278 pring->iotag_max = 4096; 3279 pring->lpfc_sli_rcv_async_status = 3280 lpfc_sli_async_event_handler; 3281 pring->num_mask = 4; 3282 pring->prt[0].profile = 0; /* Mask 0 */ 3283 pring->prt[0].rctl = FC_ELS_REQ; 3284 pring->prt[0].type = FC_ELS_DATA; 3285 pring->prt[0].lpfc_sli_rcv_unsol_event = 3286 lpfc_els_unsol_event; 3287 pring->prt[1].profile = 0; /* Mask 1 */ 3288 pring->prt[1].rctl = FC_ELS_RSP; 3289 pring->prt[1].type = FC_ELS_DATA; 3290 pring->prt[1].lpfc_sli_rcv_unsol_event = 3291 lpfc_els_unsol_event; 3292 pring->prt[2].profile = 0; /* Mask 2 */ 3293 /* NameServer Inquiry */ 3294 pring->prt[2].rctl = FC_UNSOL_CTL; 3295 /* NameServer */ 3296 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 3297 pring->prt[2].lpfc_sli_rcv_unsol_event = 3298 lpfc_ct_unsol_event; 3299 pring->prt[3].profile = 0; /* Mask 3 */ 3300 /* NameServer response */ 3301 pring->prt[3].rctl = FC_SOL_CTL; 3302 /* NameServer */ 3303 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 3304 pring->prt[3].lpfc_sli_rcv_unsol_event = 3305 lpfc_ct_unsol_event; 3306 break; 3307 } 3308 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 3309 (pring->numRiocb * pring->sizeRiocb); 3310 } 3311 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 3312 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 3313 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 3314 "SLI2 SLIM Data: x%x x%lx\n", 3315 phba->brd_no, totiocbsize, 3316 (unsigned long) MAX_SLIM_IOCB_SIZE); 3317 } 3318 if (phba->cfg_multi_ring_support == 2) 3319 lpfc_extra_ring_setup(phba); 3320 3321 return 0; 3322 } 3323 3324 int 3325 lpfc_sli_queue_setup(struct lpfc_hba *phba) 3326 { 3327 struct lpfc_sli *psli; 3328 struct lpfc_sli_ring *pring; 3329 int i; 3330 3331 psli = &phba->sli; 3332 spin_lock_irq(&phba->hbalock); 3333 INIT_LIST_HEAD(&psli->mboxq); 3334 INIT_LIST_HEAD(&psli->mboxq_cmpl); 3335 /* Initialize list headers for txq and txcmplq as double linked lists */ 3336 for (i = 0; i < psli->num_rings; i++) { 3337 pring = &psli->ring[i]; 3338 pring->ringno = i; 3339 pring->next_cmdidx = 0; 3340 pring->local_getidx = 0; 3341 pring->cmdidx = 0; 3342 INIT_LIST_HEAD(&pring->txq); 3343 INIT_LIST_HEAD(&pring->txcmplq); 3344 INIT_LIST_HEAD(&pring->iocb_continueq); 3345 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 3346 INIT_LIST_HEAD(&pring->postbufq); 3347 } 3348 spin_unlock_irq(&phba->hbalock); 3349 return 1; 3350 } 3351 3352 int 3353 lpfc_sli_host_down(struct lpfc_vport *vport) 3354 { 3355 LIST_HEAD(completions); 3356 struct lpfc_hba *phba = vport->phba; 3357 struct lpfc_sli *psli = &phba->sli; 3358 struct lpfc_sli_ring *pring; 3359 struct lpfc_iocbq *iocb, *next_iocb; 3360 int i; 3361 unsigned long flags = 0; 3362 uint16_t prev_pring_flag; 3363 3364 lpfc_cleanup_discovery_resources(vport); 3365 3366 spin_lock_irqsave(&phba->hbalock, flags); 3367 for (i = 0; i < psli->num_rings; i++) { 3368 pring = &psli->ring[i]; 3369 prev_pring_flag = pring->flag; 3370 /* Only slow rings */ 3371 if (pring->ringno == LPFC_ELS_RING) { 3372 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3373 /* Set the lpfc data pending flag */ 3374 set_bit(LPFC_DATA_READY, &phba->data_flags); 3375 } 3376 /* 3377 * Error everything on the txq since these iocbs have not been 3378 * given to the FW yet. 3379 */ 3380 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 3381 if (iocb->vport != vport) 3382 continue; 3383 list_move_tail(&iocb->list, &completions); 3384 pring->txq_cnt--; 3385 } 3386 3387 /* Next issue ABTS for everything on the txcmplq */ 3388 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 3389 list) { 3390 if (iocb->vport != vport) 3391 continue; 3392 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3393 } 3394 3395 pring->flag = prev_pring_flag; 3396 } 3397 3398 spin_unlock_irqrestore(&phba->hbalock, flags); 3399 3400 while (!list_empty(&completions)) { 3401 list_remove_head(&completions, iocb, struct lpfc_iocbq, list); 3402 3403 if (!iocb->iocb_cmpl) 3404 lpfc_sli_release_iocbq(phba, iocb); 3405 else { 3406 iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 3407 iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN; 3408 (iocb->iocb_cmpl) (phba, iocb, iocb); 3409 } 3410 } 3411 return 1; 3412 } 3413 3414 int 3415 lpfc_sli_hba_down(struct lpfc_hba *phba) 3416 { 3417 LIST_HEAD(completions); 3418 struct lpfc_sli *psli = &phba->sli; 3419 struct lpfc_sli_ring *pring; 3420 struct lpfc_dmabuf *buf_ptr; 3421 LPFC_MBOXQ_t *pmb; 3422 struct lpfc_iocbq *iocb; 3423 IOCB_t *cmd = NULL; 3424 int i; 3425 unsigned long flags = 0; 3426 3427 lpfc_hba_down_prep(phba); 3428 3429 lpfc_fabric_abort_hba(phba); 3430 3431 spin_lock_irqsave(&phba->hbalock, flags); 3432 for (i = 0; i < psli->num_rings; i++) { 3433 pring = &psli->ring[i]; 3434 /* Only slow rings */ 3435 if (pring->ringno == LPFC_ELS_RING) { 3436 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3437 /* Set the lpfc data pending flag */ 3438 set_bit(LPFC_DATA_READY, &phba->data_flags); 3439 } 3440 3441 /* 3442 * Error everything on the txq since these iocbs have not been 3443 * given to the FW yet. 3444 */ 3445 list_splice_init(&pring->txq, &completions); 3446 pring->txq_cnt = 0; 3447 3448 } 3449 spin_unlock_irqrestore(&phba->hbalock, flags); 3450 3451 while (!list_empty(&completions)) { 3452 list_remove_head(&completions, iocb, struct lpfc_iocbq, list); 3453 cmd = &iocb->iocb; 3454 3455 if (!iocb->iocb_cmpl) 3456 lpfc_sli_release_iocbq(phba, iocb); 3457 else { 3458 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 3459 cmd->un.ulpWord[4] = IOERR_SLI_DOWN; 3460 (iocb->iocb_cmpl) (phba, iocb, iocb); 3461 } 3462 } 3463 3464 spin_lock_irqsave(&phba->hbalock, flags); 3465 list_splice_init(&phba->elsbuf, &completions); 3466 phba->elsbuf_cnt = 0; 3467 phba->elsbuf_prev_cnt = 0; 3468 spin_unlock_irqrestore(&phba->hbalock, flags); 3469 3470 while (!list_empty(&completions)) { 3471 list_remove_head(&completions, buf_ptr, 3472 struct lpfc_dmabuf, list); 3473 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3474 kfree(buf_ptr); 3475 } 3476 3477 /* Return any active mbox cmds */ 3478 del_timer_sync(&psli->mbox_tmo); 3479 spin_lock_irqsave(&phba->hbalock, flags); 3480 3481 spin_lock(&phba->pport->work_port_lock); 3482 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 3483 spin_unlock(&phba->pport->work_port_lock); 3484 3485 /* Return any pending or completed mbox cmds */ 3486 list_splice_init(&phba->sli.mboxq, &completions); 3487 if (psli->mbox_active) { 3488 list_add_tail(&psli->mbox_active->list, &completions); 3489 psli->mbox_active = NULL; 3490 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3491 } 3492 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 3493 spin_unlock_irqrestore(&phba->hbalock, flags); 3494 3495 while (!list_empty(&completions)) { 3496 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 3497 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 3498 if (pmb->mbox_cmpl) 3499 pmb->mbox_cmpl(phba,pmb); 3500 } 3501 return 1; 3502 } 3503 3504 void 3505 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 3506 { 3507 uint32_t *src = srcp; 3508 uint32_t *dest = destp; 3509 uint32_t ldata; 3510 int i; 3511 3512 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 3513 ldata = *src; 3514 ldata = le32_to_cpu(ldata); 3515 *dest = ldata; 3516 src++; 3517 dest++; 3518 } 3519 } 3520 3521 int 3522 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3523 struct lpfc_dmabuf *mp) 3524 { 3525 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 3526 later */ 3527 spin_lock_irq(&phba->hbalock); 3528 list_add_tail(&mp->list, &pring->postbufq); 3529 pring->postbufq_cnt++; 3530 spin_unlock_irq(&phba->hbalock); 3531 return 0; 3532 } 3533 3534 uint32_t 3535 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 3536 { 3537 spin_lock_irq(&phba->hbalock); 3538 phba->buffer_tag_count++; 3539 /* 3540 * Always set the QUE_BUFTAG_BIT to distiguish between 3541 * a tag assigned by HBQ. 3542 */ 3543 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 3544 spin_unlock_irq(&phba->hbalock); 3545 return phba->buffer_tag_count; 3546 } 3547 3548 struct lpfc_dmabuf * 3549 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3550 uint32_t tag) 3551 { 3552 struct lpfc_dmabuf *mp, *next_mp; 3553 struct list_head *slp = &pring->postbufq; 3554 3555 /* Search postbufq, from the begining, looking for a match on tag */ 3556 spin_lock_irq(&phba->hbalock); 3557 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 3558 if (mp->buffer_tag == tag) { 3559 list_del_init(&mp->list); 3560 pring->postbufq_cnt--; 3561 spin_unlock_irq(&phba->hbalock); 3562 return mp; 3563 } 3564 } 3565 3566 spin_unlock_irq(&phba->hbalock); 3567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3568 "0410 Cannot find virtual addr for buffer tag on " 3569 "ring %d Data x%lx x%p x%p x%x\n", 3570 pring->ringno, (unsigned long) tag, 3571 slp->next, slp->prev, pring->postbufq_cnt); 3572 3573 return NULL; 3574 } 3575 3576 struct lpfc_dmabuf * 3577 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3578 dma_addr_t phys) 3579 { 3580 struct lpfc_dmabuf *mp, *next_mp; 3581 struct list_head *slp = &pring->postbufq; 3582 3583 /* Search postbufq, from the begining, looking for a match on phys */ 3584 spin_lock_irq(&phba->hbalock); 3585 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 3586 if (mp->phys == phys) { 3587 list_del_init(&mp->list); 3588 pring->postbufq_cnt--; 3589 spin_unlock_irq(&phba->hbalock); 3590 return mp; 3591 } 3592 } 3593 3594 spin_unlock_irq(&phba->hbalock); 3595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3596 "0410 Cannot find virtual addr for mapped buf on " 3597 "ring %d Data x%llx x%p x%p x%x\n", 3598 pring->ringno, (unsigned long long)phys, 3599 slp->next, slp->prev, pring->postbufq_cnt); 3600 return NULL; 3601 } 3602 3603 static void 3604 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3605 struct lpfc_iocbq *rspiocb) 3606 { 3607 IOCB_t *irsp = &rspiocb->iocb; 3608 uint16_t abort_iotag, abort_context; 3609 struct lpfc_iocbq *abort_iocb; 3610 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3611 3612 abort_iocb = NULL; 3613 3614 if (irsp->ulpStatus) { 3615 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 3616 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 3617 3618 spin_lock_irq(&phba->hbalock); 3619 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 3620 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 3621 3622 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, 3623 "0327 Cannot abort els iocb %p " 3624 "with tag %x context %x, abort status %x, " 3625 "abort code %x\n", 3626 abort_iocb, abort_iotag, abort_context, 3627 irsp->ulpStatus, irsp->un.ulpWord[4]); 3628 3629 /* 3630 * If the iocb is not found in Firmware queue the iocb 3631 * might have completed already. Do not free it again. 3632 */ 3633 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 3634 spin_unlock_irq(&phba->hbalock); 3635 lpfc_sli_release_iocbq(phba, cmdiocb); 3636 return; 3637 } 3638 /* 3639 * make sure we have the right iocbq before taking it 3640 * off the txcmplq and try to call completion routine. 3641 */ 3642 if (!abort_iocb || 3643 abort_iocb->iocb.ulpContext != abort_context || 3644 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 3645 spin_unlock_irq(&phba->hbalock); 3646 else { 3647 list_del_init(&abort_iocb->list); 3648 pring->txcmplq_cnt--; 3649 spin_unlock_irq(&phba->hbalock); 3650 3651 /* Firmware could still be in progress of DMAing 3652 * payload, so don't free data buffer till after 3653 * a hbeat. 3654 */ 3655 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 3656 3657 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3658 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 3659 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; 3660 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 3661 } 3662 } 3663 3664 lpfc_sli_release_iocbq(phba, cmdiocb); 3665 return; 3666 } 3667 3668 static void 3669 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3670 struct lpfc_iocbq *rspiocb) 3671 { 3672 IOCB_t *irsp = &rspiocb->iocb; 3673 3674 /* ELS cmd tag <ulpIoTag> completes */ 3675 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3676 "0133 Ignoring ELS cmd tag x%x completion Data: " 3677 "x%x x%x x%x\n", 3678 irsp->ulpIoTag, irsp->ulpStatus, 3679 irsp->un.ulpWord[4], irsp->ulpTimeout); 3680 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 3681 lpfc_ct_free_iocb(phba, cmdiocb); 3682 else 3683 lpfc_els_free_iocb(phba, cmdiocb); 3684 return; 3685 } 3686 3687 int 3688 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3689 struct lpfc_iocbq *cmdiocb) 3690 { 3691 struct lpfc_vport *vport = cmdiocb->vport; 3692 struct lpfc_iocbq *abtsiocbp; 3693 IOCB_t *icmd = NULL; 3694 IOCB_t *iabt = NULL; 3695 int retval = IOCB_ERROR; 3696 3697 /* 3698 * There are certain command types we don't want to abort. And we 3699 * don't want to abort commands that are already in the process of 3700 * being aborted. 3701 */ 3702 icmd = &cmdiocb->iocb; 3703 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 3704 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 3705 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 3706 return 0; 3707 3708 /* If we're unloading, don't abort iocb on the ELS ring, but change the 3709 * callback so that nothing happens when it finishes. 3710 */ 3711 if ((vport->load_flag & FC_UNLOADING) && 3712 (pring->ringno == LPFC_ELS_RING)) { 3713 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 3714 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 3715 else 3716 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 3717 goto abort_iotag_exit; 3718 } 3719 3720 /* issue ABTS for this IOCB based on iotag */ 3721 abtsiocbp = __lpfc_sli_get_iocbq(phba); 3722 if (abtsiocbp == NULL) 3723 return 0; 3724 3725 /* This signals the response to set the correct status 3726 * before calling the completion handler. 3727 */ 3728 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 3729 3730 iabt = &abtsiocbp->iocb; 3731 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 3732 iabt->un.acxri.abortContextTag = icmd->ulpContext; 3733 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 3734 iabt->ulpLe = 1; 3735 iabt->ulpClass = icmd->ulpClass; 3736 3737 if (phba->link_state >= LPFC_LINK_UP) 3738 iabt->ulpCommand = CMD_ABORT_XRI_CN; 3739 else 3740 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 3741 3742 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 3743 3744 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 3745 "0339 Abort xri x%x, original iotag x%x, " 3746 "abort cmd iotag x%x\n", 3747 iabt->un.acxri.abortContextTag, 3748 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 3749 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 3750 3751 abort_iotag_exit: 3752 /* 3753 * Caller to this routine should check for IOCB_ERROR 3754 * and handle it properly. This routine no longer removes 3755 * iocb off txcmplq and call compl in case of IOCB_ERROR. 3756 */ 3757 return retval; 3758 } 3759 3760 static int 3761 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 3762 uint16_t tgt_id, uint64_t lun_id, 3763 lpfc_ctx_cmd ctx_cmd) 3764 { 3765 struct lpfc_scsi_buf *lpfc_cmd; 3766 int rc = 1; 3767 3768 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 3769 return rc; 3770 3771 if (iocbq->vport != vport) 3772 return rc; 3773 3774 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 3775 3776 if (lpfc_cmd->pCmd == NULL) 3777 return rc; 3778 3779 switch (ctx_cmd) { 3780 case LPFC_CTX_LUN: 3781 if ((lpfc_cmd->rdata->pnode) && 3782 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 3783 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 3784 rc = 0; 3785 break; 3786 case LPFC_CTX_TGT: 3787 if ((lpfc_cmd->rdata->pnode) && 3788 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 3789 rc = 0; 3790 break; 3791 case LPFC_CTX_HOST: 3792 rc = 0; 3793 break; 3794 default: 3795 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 3796 __func__, ctx_cmd); 3797 break; 3798 } 3799 3800 return rc; 3801 } 3802 3803 int 3804 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 3805 lpfc_ctx_cmd ctx_cmd) 3806 { 3807 struct lpfc_hba *phba = vport->phba; 3808 struct lpfc_iocbq *iocbq; 3809 int sum, i; 3810 3811 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 3812 iocbq = phba->sli.iocbq_lookup[i]; 3813 3814 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 3815 ctx_cmd) == 0) 3816 sum++; 3817 } 3818 3819 return sum; 3820 } 3821 3822 void 3823 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3824 struct lpfc_iocbq *rspiocb) 3825 { 3826 lpfc_sli_release_iocbq(phba, cmdiocb); 3827 return; 3828 } 3829 3830 int 3831 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 3832 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 3833 { 3834 struct lpfc_hba *phba = vport->phba; 3835 struct lpfc_iocbq *iocbq; 3836 struct lpfc_iocbq *abtsiocb; 3837 IOCB_t *cmd = NULL; 3838 int errcnt = 0, ret_val = 0; 3839 int i; 3840 3841 for (i = 1; i <= phba->sli.last_iotag; i++) { 3842 iocbq = phba->sli.iocbq_lookup[i]; 3843 3844 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 3845 abort_cmd) != 0) 3846 continue; 3847 3848 /* issue ABTS for this IOCB based on iotag */ 3849 abtsiocb = lpfc_sli_get_iocbq(phba); 3850 if (abtsiocb == NULL) { 3851 errcnt++; 3852 continue; 3853 } 3854 3855 cmd = &iocbq->iocb; 3856 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 3857 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 3858 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 3859 abtsiocb->iocb.ulpLe = 1; 3860 abtsiocb->iocb.ulpClass = cmd->ulpClass; 3861 abtsiocb->vport = phba->pport; 3862 3863 if (lpfc_is_link_up(phba)) 3864 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 3865 else 3866 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 3867 3868 /* Setup callback routine and issue the command. */ 3869 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3870 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 3871 if (ret_val == IOCB_ERROR) { 3872 lpfc_sli_release_iocbq(phba, abtsiocb); 3873 errcnt++; 3874 continue; 3875 } 3876 } 3877 3878 return errcnt; 3879 } 3880 3881 static void 3882 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 3883 struct lpfc_iocbq *cmdiocbq, 3884 struct lpfc_iocbq *rspiocbq) 3885 { 3886 wait_queue_head_t *pdone_q; 3887 unsigned long iflags; 3888 3889 spin_lock_irqsave(&phba->hbalock, iflags); 3890 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 3891 if (cmdiocbq->context2 && rspiocbq) 3892 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 3893 &rspiocbq->iocb, sizeof(IOCB_t)); 3894 3895 pdone_q = cmdiocbq->context_un.wait_queue; 3896 if (pdone_q) 3897 wake_up(pdone_q); 3898 spin_unlock_irqrestore(&phba->hbalock, iflags); 3899 return; 3900 } 3901 3902 /* 3903 * Issue the caller's iocb and wait for its completion, but no longer than the 3904 * caller's timeout. Note that iocb_flags is cleared before the 3905 * lpfc_sli_issue_call since the wake routine sets a unique value and by 3906 * definition this is a wait function. 3907 */ 3908 3909 int 3910 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 3911 struct lpfc_sli_ring *pring, 3912 struct lpfc_iocbq *piocb, 3913 struct lpfc_iocbq *prspiocbq, 3914 uint32_t timeout) 3915 { 3916 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 3917 long timeleft, timeout_req = 0; 3918 int retval = IOCB_SUCCESS; 3919 uint32_t creg_val; 3920 3921 /* 3922 * If the caller has provided a response iocbq buffer, then context2 3923 * is NULL or its an error. 3924 */ 3925 if (prspiocbq) { 3926 if (piocb->context2) 3927 return IOCB_ERROR; 3928 piocb->context2 = prspiocbq; 3929 } 3930 3931 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 3932 piocb->context_un.wait_queue = &done_q; 3933 piocb->iocb_flag &= ~LPFC_IO_WAKE; 3934 3935 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 3936 creg_val = readl(phba->HCregaddr); 3937 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 3938 writel(creg_val, phba->HCregaddr); 3939 readl(phba->HCregaddr); /* flush */ 3940 } 3941 3942 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 3943 if (retval == IOCB_SUCCESS) { 3944 timeout_req = timeout * HZ; 3945 timeleft = wait_event_timeout(done_q, 3946 piocb->iocb_flag & LPFC_IO_WAKE, 3947 timeout_req); 3948 3949 if (piocb->iocb_flag & LPFC_IO_WAKE) { 3950 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3951 "0331 IOCB wake signaled\n"); 3952 } else if (timeleft == 0) { 3953 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3954 "0338 IOCB wait timeout error - no " 3955 "wake response Data x%x\n", timeout); 3956 retval = IOCB_TIMEDOUT; 3957 } else { 3958 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3959 "0330 IOCB wake NOT set, " 3960 "Data x%x x%lx\n", 3961 timeout, (timeleft / jiffies)); 3962 retval = IOCB_TIMEDOUT; 3963 } 3964 } else { 3965 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3966 ":0332 IOCB wait issue failed, Data x%x\n", 3967 retval); 3968 retval = IOCB_ERROR; 3969 } 3970 3971 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 3972 creg_val = readl(phba->HCregaddr); 3973 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 3974 writel(creg_val, phba->HCregaddr); 3975 readl(phba->HCregaddr); /* flush */ 3976 } 3977 3978 if (prspiocbq) 3979 piocb->context2 = NULL; 3980 3981 piocb->context_un.wait_queue = NULL; 3982 piocb->iocb_cmpl = NULL; 3983 return retval; 3984 } 3985 3986 int 3987 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 3988 uint32_t timeout) 3989 { 3990 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 3991 int retval; 3992 unsigned long flag; 3993 3994 /* The caller must leave context1 empty. */ 3995 if (pmboxq->context1) 3996 return MBX_NOT_FINISHED; 3997 3998 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 3999 /* setup wake call as IOCB callback */ 4000 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 4001 /* setup context field to pass wait_queue pointer to wake function */ 4002 pmboxq->context1 = &done_q; 4003 4004 /* now issue the command */ 4005 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4006 4007 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 4008 wait_event_interruptible_timeout(done_q, 4009 pmboxq->mbox_flag & LPFC_MBX_WAKE, 4010 timeout * HZ); 4011 4012 spin_lock_irqsave(&phba->hbalock, flag); 4013 pmboxq->context1 = NULL; 4014 /* 4015 * if LPFC_MBX_WAKE flag is set the mailbox is completed 4016 * else do not free the resources. 4017 */ 4018 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) 4019 retval = MBX_SUCCESS; 4020 else { 4021 retval = MBX_TIMEOUT; 4022 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4023 } 4024 spin_unlock_irqrestore(&phba->hbalock, flag); 4025 } 4026 4027 return retval; 4028 } 4029 4030 int 4031 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 4032 { 4033 struct lpfc_vport *vport = phba->pport; 4034 int i = 0; 4035 uint32_t ha_copy; 4036 4037 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { 4038 if (i++ > LPFC_MBOX_TMO * 1000) 4039 return 1; 4040 4041 /* 4042 * Call lpfc_sli_handle_mb_event only if a mailbox cmd 4043 * did finish. This way we won't get the misleading 4044 * "Stray Mailbox Interrupt" message. 4045 */ 4046 spin_lock_irq(&phba->hbalock); 4047 ha_copy = phba->work_ha; 4048 phba->work_ha &= ~HA_MBATT; 4049 spin_unlock_irq(&phba->hbalock); 4050 4051 if (ha_copy & HA_MBATT) 4052 if (lpfc_sli_handle_mb_event(phba) == 0) 4053 i = 0; 4054 4055 msleep(1); 4056 } 4057 4058 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 4059 } 4060 4061 irqreturn_t 4062 lpfc_intr_handler(int irq, void *dev_id) 4063 { 4064 struct lpfc_hba *phba; 4065 uint32_t ha_copy; 4066 uint32_t work_ha_copy; 4067 unsigned long status; 4068 uint32_t control; 4069 4070 MAILBOX_t *mbox, *pmbox; 4071 struct lpfc_vport *vport; 4072 struct lpfc_nodelist *ndlp; 4073 struct lpfc_dmabuf *mp; 4074 LPFC_MBOXQ_t *pmb; 4075 int rc; 4076 4077 /* 4078 * Get the driver's phba structure from the dev_id and 4079 * assume the HBA is not interrupting. 4080 */ 4081 phba = (struct lpfc_hba *) dev_id; 4082 4083 if (unlikely(!phba)) 4084 return IRQ_NONE; 4085 4086 /* If the pci channel is offline, ignore all the interrupts. */ 4087 if (unlikely(pci_channel_offline(phba->pcidev))) 4088 return IRQ_NONE; 4089 4090 phba->sli.slistat.sli_intr++; 4091 4092 /* 4093 * Call the HBA to see if it is interrupting. If not, don't claim 4094 * the interrupt 4095 */ 4096 4097 /* Ignore all interrupts during initialization. */ 4098 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 4099 return IRQ_NONE; 4100 4101 /* 4102 * Read host attention register to determine interrupt source 4103 * Clear Attention Sources, except Error Attention (to 4104 * preserve status) and Link Attention 4105 */ 4106 spin_lock(&phba->hbalock); 4107 ha_copy = readl(phba->HAregaddr); 4108 /* If somebody is waiting to handle an eratt don't process it 4109 * here. The brdkill function will do this. 4110 */ 4111 if (phba->link_flag & LS_IGNORE_ERATT) 4112 ha_copy &= ~HA_ERATT; 4113 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 4114 readl(phba->HAregaddr); /* flush */ 4115 spin_unlock(&phba->hbalock); 4116 4117 if (unlikely(!ha_copy)) 4118 return IRQ_NONE; 4119 4120 work_ha_copy = ha_copy & phba->work_ha_mask; 4121 4122 if (unlikely(work_ha_copy)) { 4123 if (work_ha_copy & HA_LATT) { 4124 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 4125 /* 4126 * Turn off Link Attention interrupts 4127 * until CLEAR_LA done 4128 */ 4129 spin_lock(&phba->hbalock); 4130 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 4131 control = readl(phba->HCregaddr); 4132 control &= ~HC_LAINT_ENA; 4133 writel(control, phba->HCregaddr); 4134 readl(phba->HCregaddr); /* flush */ 4135 spin_unlock(&phba->hbalock); 4136 } 4137 else 4138 work_ha_copy &= ~HA_LATT; 4139 } 4140 4141 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) { 4142 /* 4143 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 4144 * the only slow ring. 4145 */ 4146 status = (work_ha_copy & 4147 (HA_RXMASK << (4*LPFC_ELS_RING))); 4148 status >>= (4*LPFC_ELS_RING); 4149 if (status & HA_RXMASK) { 4150 spin_lock(&phba->hbalock); 4151 control = readl(phba->HCregaddr); 4152 4153 lpfc_debugfs_slow_ring_trc(phba, 4154 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 4155 control, status, 4156 (uint32_t)phba->sli.slistat.sli_intr); 4157 4158 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 4159 lpfc_debugfs_slow_ring_trc(phba, 4160 "ISR Disable ring:" 4161 "pwork:x%x hawork:x%x wait:x%x", 4162 phba->work_ha, work_ha_copy, 4163 (uint32_t)((unsigned long) 4164 &phba->work_waitq)); 4165 4166 control &= 4167 ~(HC_R0INT_ENA << LPFC_ELS_RING); 4168 writel(control, phba->HCregaddr); 4169 readl(phba->HCregaddr); /* flush */ 4170 } 4171 else { 4172 lpfc_debugfs_slow_ring_trc(phba, 4173 "ISR slow ring: pwork:" 4174 "x%x hawork:x%x wait:x%x", 4175 phba->work_ha, work_ha_copy, 4176 (uint32_t)((unsigned long) 4177 &phba->work_waitq)); 4178 } 4179 spin_unlock(&phba->hbalock); 4180 } 4181 } 4182 4183 if (work_ha_copy & HA_ERATT) { 4184 /* 4185 * There was a link/board error. Read the 4186 * status register to retrieve the error event 4187 * and process it. 4188 */ 4189 phba->sli.slistat.err_attn_event++; 4190 /* Save status info */ 4191 phba->work_hs = readl(phba->HSregaddr); 4192 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 4193 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 4194 4195 /* Clear Chip error bit */ 4196 writel(HA_ERATT, phba->HAregaddr); 4197 readl(phba->HAregaddr); /* flush */ 4198 phba->pport->stopped = 1; 4199 } 4200 4201 spin_lock(&phba->hbalock); 4202 if ((work_ha_copy & HA_MBATT) && 4203 (phba->sli.mbox_active)) { 4204 pmb = phba->sli.mbox_active; 4205 pmbox = &pmb->mb; 4206 mbox = &phba->slim2p->mbx; 4207 vport = pmb->vport; 4208 4209 /* First check out the status word */ 4210 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 4211 if (pmbox->mbxOwner != OWN_HOST) { 4212 spin_unlock(&phba->hbalock); 4213 /* 4214 * Stray Mailbox Interrupt, mbxCommand <cmd> 4215 * mbxStatus <status> 4216 */ 4217 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 4218 LOG_SLI, 4219 "(%d):0304 Stray Mailbox " 4220 "Interrupt mbxCommand x%x " 4221 "mbxStatus x%x\n", 4222 (vport ? vport->vpi : 0), 4223 pmbox->mbxCommand, 4224 pmbox->mbxStatus); 4225 /* clear mailbox attention bit */ 4226 work_ha_copy &= ~HA_MBATT; 4227 } else { 4228 phba->sli.mbox_active = NULL; 4229 spin_unlock(&phba->hbalock); 4230 phba->last_completion_time = jiffies; 4231 del_timer(&phba->sli.mbox_tmo); 4232 if (pmb->mbox_cmpl) { 4233 lpfc_sli_pcimem_bcopy(mbox, pmbox, 4234 MAILBOX_CMD_SIZE); 4235 } 4236 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 4237 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 4238 4239 lpfc_debugfs_disc_trc(vport, 4240 LPFC_DISC_TRC_MBOX_VPORT, 4241 "MBOX dflt rpi: : " 4242 "status:x%x rpi:x%x", 4243 (uint32_t)pmbox->mbxStatus, 4244 pmbox->un.varWords[0], 0); 4245 4246 if (!pmbox->mbxStatus) { 4247 mp = (struct lpfc_dmabuf *) 4248 (pmb->context1); 4249 ndlp = (struct lpfc_nodelist *) 4250 pmb->context2; 4251 4252 /* Reg_LOGIN of dflt RPI was 4253 * successful. new lets get 4254 * rid of the RPI using the 4255 * same mbox buffer. 4256 */ 4257 lpfc_unreg_login(phba, 4258 vport->vpi, 4259 pmbox->un.varWords[0], 4260 pmb); 4261 pmb->mbox_cmpl = 4262 lpfc_mbx_cmpl_dflt_rpi; 4263 pmb->context1 = mp; 4264 pmb->context2 = ndlp; 4265 pmb->vport = vport; 4266 rc = lpfc_sli_issue_mbox(phba, 4267 pmb, 4268 MBX_NOWAIT); 4269 if (rc != MBX_BUSY) 4270 lpfc_printf_log(phba, 4271 KERN_ERR, 4272 LOG_MBOX | LOG_SLI, 4273 "0306 rc should have" 4274 "been MBX_BUSY"); 4275 goto send_current_mbox; 4276 } 4277 } 4278 spin_lock(&phba->pport->work_port_lock); 4279 phba->pport->work_port_events &= 4280 ~WORKER_MBOX_TMO; 4281 spin_unlock(&phba->pport->work_port_lock); 4282 lpfc_mbox_cmpl_put(phba, pmb); 4283 } 4284 } else 4285 spin_unlock(&phba->hbalock); 4286 if ((work_ha_copy & HA_MBATT) && 4287 (phba->sli.mbox_active == NULL)) { 4288 send_current_mbox: 4289 /* Process next mailbox command if there is one */ 4290 do { 4291 rc = lpfc_sli_issue_mbox(phba, NULL, 4292 MBX_NOWAIT); 4293 } while (rc == MBX_NOT_FINISHED); 4294 if (rc != MBX_SUCCESS) 4295 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 4296 LOG_SLI, "0349 rc should be " 4297 "MBX_SUCCESS"); 4298 } 4299 4300 spin_lock(&phba->hbalock); 4301 phba->work_ha |= work_ha_copy; 4302 spin_unlock(&phba->hbalock); 4303 lpfc_worker_wake_up(phba); 4304 } 4305 4306 ha_copy &= ~(phba->work_ha_mask); 4307 4308 /* 4309 * Process all events on FCP ring. Take the optimized path for 4310 * FCP IO. Any other IO is slow path and is handled by 4311 * the worker thread. 4312 */ 4313 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 4314 status >>= (4*LPFC_FCP_RING); 4315 if (status & HA_RXMASK) 4316 lpfc_sli_handle_fast_ring_event(phba, 4317 &phba->sli.ring[LPFC_FCP_RING], 4318 status); 4319 4320 if (phba->cfg_multi_ring_support == 2) { 4321 /* 4322 * Process all events on extra ring. Take the optimized path 4323 * for extra ring IO. Any other IO is slow path and is handled 4324 * by the worker thread. 4325 */ 4326 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 4327 status >>= (4*LPFC_EXTRA_RING); 4328 if (status & HA_RXMASK) { 4329 lpfc_sli_handle_fast_ring_event(phba, 4330 &phba->sli.ring[LPFC_EXTRA_RING], 4331 status); 4332 } 4333 } 4334 return IRQ_HANDLED; 4335 4336 } /* lpfc_intr_handler */ 4337