1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_cmnd.h> 29 #include <scsi/scsi_device.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_transport_fc.h> 32 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_disc.h" 36 #include "lpfc_scsi.h" 37 #include "lpfc.h" 38 #include "lpfc_crtn.h" 39 #include "lpfc_logmsg.h" 40 #include "lpfc_compat.h" 41 #include "lpfc_debugfs.h" 42 43 /* 44 * Define macro to log: Mailbox command x%x cannot issue Data 45 * This allows multiple uses of lpfc_msgBlk0311 46 * w/o perturbing log msg utility. 47 */ 48 #define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \ 49 lpfc_printf_log(phba, \ 50 KERN_INFO, \ 51 LOG_MBOX | LOG_SLI, \ 52 "(%d):0311 Mailbox command x%x cannot " \ 53 "issue Data: x%x x%x x%x\n", \ 54 pmbox->vport ? pmbox->vport->vpi : 0, \ 55 pmbox->mb.mbxCommand, \ 56 phba->pport->port_state, \ 57 psli->sli_flag, \ 58 flag) 59 60 61 /* There are only four IOCB completion types. */ 62 typedef enum _lpfc_iocb_type { 63 LPFC_UNKNOWN_IOCB, 64 LPFC_UNSOL_IOCB, 65 LPFC_SOL_IOCB, 66 LPFC_ABORT_IOCB 67 } lpfc_iocb_type; 68 69 /* SLI-2/SLI-3 provide different sized iocbs. Given a pointer 70 * to the start of the ring, and the slot number of the 71 * desired iocb entry, calc a pointer to that entry. 72 */ 73 static inline IOCB_t * 74 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 75 { 76 return (IOCB_t *) (((char *) pring->cmdringaddr) + 77 pring->cmdidx * phba->iocb_cmd_size); 78 } 79 80 static inline IOCB_t * 81 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 82 { 83 return (IOCB_t *) (((char *) pring->rspringaddr) + 84 pring->rspidx * phba->iocb_rsp_size); 85 } 86 87 static struct lpfc_iocbq * 88 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 89 { 90 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 91 struct lpfc_iocbq * iocbq = NULL; 92 93 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 94 return iocbq; 95 } 96 97 struct lpfc_iocbq * 98 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 99 { 100 struct lpfc_iocbq * iocbq = NULL; 101 unsigned long iflags; 102 103 spin_lock_irqsave(&phba->hbalock, iflags); 104 iocbq = __lpfc_sli_get_iocbq(phba); 105 spin_unlock_irqrestore(&phba->hbalock, iflags); 106 return iocbq; 107 } 108 109 static void 110 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 111 { 112 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 113 114 /* 115 * Clean all volatile data fields, preserve iotag and node struct. 116 */ 117 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 118 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 119 } 120 121 void 122 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 123 { 124 unsigned long iflags; 125 126 /* 127 * Clean all volatile data fields, preserve iotag and node struct. 128 */ 129 spin_lock_irqsave(&phba->hbalock, iflags); 130 __lpfc_sli_release_iocbq(phba, iocbq); 131 spin_unlock_irqrestore(&phba->hbalock, iflags); 132 } 133 134 /* 135 * Translate the iocb command to an iocb command type used to decide the final 136 * disposition of each completed IOCB. 137 */ 138 static lpfc_iocb_type 139 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 140 { 141 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 142 143 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 144 return 0; 145 146 switch (iocb_cmnd) { 147 case CMD_XMIT_SEQUENCE_CR: 148 case CMD_XMIT_SEQUENCE_CX: 149 case CMD_XMIT_BCAST_CN: 150 case CMD_XMIT_BCAST_CX: 151 case CMD_ELS_REQUEST_CR: 152 case CMD_ELS_REQUEST_CX: 153 case CMD_CREATE_XRI_CR: 154 case CMD_CREATE_XRI_CX: 155 case CMD_GET_RPI_CN: 156 case CMD_XMIT_ELS_RSP_CX: 157 case CMD_GET_RPI_CR: 158 case CMD_FCP_IWRITE_CR: 159 case CMD_FCP_IWRITE_CX: 160 case CMD_FCP_IREAD_CR: 161 case CMD_FCP_IREAD_CX: 162 case CMD_FCP_ICMND_CR: 163 case CMD_FCP_ICMND_CX: 164 case CMD_FCP_TSEND_CX: 165 case CMD_FCP_TRSP_CX: 166 case CMD_FCP_TRECEIVE_CX: 167 case CMD_FCP_AUTO_TRSP_CX: 168 case CMD_ADAPTER_MSG: 169 case CMD_ADAPTER_DUMP: 170 case CMD_XMIT_SEQUENCE64_CR: 171 case CMD_XMIT_SEQUENCE64_CX: 172 case CMD_XMIT_BCAST64_CN: 173 case CMD_XMIT_BCAST64_CX: 174 case CMD_ELS_REQUEST64_CR: 175 case CMD_ELS_REQUEST64_CX: 176 case CMD_FCP_IWRITE64_CR: 177 case CMD_FCP_IWRITE64_CX: 178 case CMD_FCP_IREAD64_CR: 179 case CMD_FCP_IREAD64_CX: 180 case CMD_FCP_ICMND64_CR: 181 case CMD_FCP_ICMND64_CX: 182 case CMD_FCP_TSEND64_CX: 183 case CMD_FCP_TRSP64_CX: 184 case CMD_FCP_TRECEIVE64_CX: 185 case CMD_GEN_REQUEST64_CR: 186 case CMD_GEN_REQUEST64_CX: 187 case CMD_XMIT_ELS_RSP64_CX: 188 type = LPFC_SOL_IOCB; 189 break; 190 case CMD_ABORT_XRI_CN: 191 case CMD_ABORT_XRI_CX: 192 case CMD_CLOSE_XRI_CN: 193 case CMD_CLOSE_XRI_CX: 194 case CMD_XRI_ABORTED_CX: 195 case CMD_ABORT_MXRI64_CN: 196 type = LPFC_ABORT_IOCB; 197 break; 198 case CMD_RCV_SEQUENCE_CX: 199 case CMD_RCV_ELS_REQ_CX: 200 case CMD_RCV_SEQUENCE64_CX: 201 case CMD_RCV_ELS_REQ64_CX: 202 case CMD_ASYNC_STATUS: 203 case CMD_IOCB_RCV_SEQ64_CX: 204 case CMD_IOCB_RCV_ELS64_CX: 205 case CMD_IOCB_RCV_CONT64_CX: 206 case CMD_IOCB_RET_XRI64_CX: 207 type = LPFC_UNSOL_IOCB; 208 break; 209 case CMD_IOCB_XMIT_MSEQ64_CR: 210 case CMD_IOCB_XMIT_MSEQ64_CX: 211 case CMD_IOCB_RCV_SEQ_LIST64_CX: 212 case CMD_IOCB_RCV_ELS_LIST64_CX: 213 case CMD_IOCB_CLOSE_EXTENDED_CN: 214 case CMD_IOCB_ABORT_EXTENDED_CN: 215 case CMD_IOCB_RET_HBQE64_CN: 216 case CMD_IOCB_FCP_IBIDIR64_CR: 217 case CMD_IOCB_FCP_IBIDIR64_CX: 218 case CMD_IOCB_FCP_ITASKMGT64_CX: 219 case CMD_IOCB_LOGENTRY_CN: 220 case CMD_IOCB_LOGENTRY_ASYNC_CN: 221 printk("%s - Unhandled SLI-3 Command x%x\n", 222 __FUNCTION__, iocb_cmnd); 223 type = LPFC_UNKNOWN_IOCB; 224 break; 225 default: 226 type = LPFC_UNKNOWN_IOCB; 227 break; 228 } 229 230 return type; 231 } 232 233 static int 234 lpfc_sli_ring_map(struct lpfc_hba *phba) 235 { 236 struct lpfc_sli *psli = &phba->sli; 237 LPFC_MBOXQ_t *pmb; 238 MAILBOX_t *pmbox; 239 int i, rc, ret = 0; 240 241 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 242 if (!pmb) 243 return -ENOMEM; 244 pmbox = &pmb->mb; 245 phba->link_state = LPFC_INIT_MBX_CMDS; 246 for (i = 0; i < psli->num_rings; i++) { 247 lpfc_config_ring(phba, i, pmb); 248 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 249 if (rc != MBX_SUCCESS) { 250 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 251 "0446 Adapter failed to init (%d), " 252 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 253 "ring %d\n", 254 rc, pmbox->mbxCommand, 255 pmbox->mbxStatus, i); 256 phba->link_state = LPFC_HBA_ERROR; 257 ret = -ENXIO; 258 break; 259 } 260 } 261 mempool_free(pmb, phba->mbox_mem_pool); 262 return ret; 263 } 264 265 static int 266 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 267 struct lpfc_iocbq *piocb) 268 { 269 list_add_tail(&piocb->list, &pring->txcmplq); 270 pring->txcmplq_cnt++; 271 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 272 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 273 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 274 if (!piocb->vport) 275 BUG(); 276 else 277 mod_timer(&piocb->vport->els_tmofunc, 278 jiffies + HZ * (phba->fc_ratov << 1)); 279 } 280 281 282 return 0; 283 } 284 285 static struct lpfc_iocbq * 286 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 287 { 288 struct lpfc_iocbq *cmd_iocb; 289 290 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 291 if (cmd_iocb != NULL) 292 pring->txq_cnt--; 293 return cmd_iocb; 294 } 295 296 static IOCB_t * 297 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 298 { 299 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 300 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 301 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 302 uint32_t max_cmd_idx = pring->numCiocb; 303 304 if ((pring->next_cmdidx == pring->cmdidx) && 305 (++pring->next_cmdidx >= max_cmd_idx)) 306 pring->next_cmdidx = 0; 307 308 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 309 310 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 311 312 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 313 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 314 "0315 Ring %d issue: portCmdGet %d " 315 "is bigger then cmd ring %d\n", 316 pring->ringno, 317 pring->local_getidx, max_cmd_idx); 318 319 phba->link_state = LPFC_HBA_ERROR; 320 /* 321 * All error attention handlers are posted to 322 * worker thread 323 */ 324 phba->work_ha |= HA_ERATT; 325 phba->work_hs = HS_FFER3; 326 327 /* hbalock should already be held */ 328 if (phba->work_wait) 329 lpfc_worker_wake_up(phba); 330 331 return NULL; 332 } 333 334 if (pring->local_getidx == pring->next_cmdidx) 335 return NULL; 336 } 337 338 return lpfc_cmd_iocb(phba, pring); 339 } 340 341 uint16_t 342 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 343 { 344 struct lpfc_iocbq **new_arr; 345 struct lpfc_iocbq **old_arr; 346 size_t new_len; 347 struct lpfc_sli *psli = &phba->sli; 348 uint16_t iotag; 349 350 spin_lock_irq(&phba->hbalock); 351 iotag = psli->last_iotag; 352 if(++iotag < psli->iocbq_lookup_len) { 353 psli->last_iotag = iotag; 354 psli->iocbq_lookup[iotag] = iocbq; 355 spin_unlock_irq(&phba->hbalock); 356 iocbq->iotag = iotag; 357 return iotag; 358 } else if (psli->iocbq_lookup_len < (0xffff 359 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 360 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 361 spin_unlock_irq(&phba->hbalock); 362 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 363 GFP_KERNEL); 364 if (new_arr) { 365 spin_lock_irq(&phba->hbalock); 366 old_arr = psli->iocbq_lookup; 367 if (new_len <= psli->iocbq_lookup_len) { 368 /* highly unprobable case */ 369 kfree(new_arr); 370 iotag = psli->last_iotag; 371 if(++iotag < psli->iocbq_lookup_len) { 372 psli->last_iotag = iotag; 373 psli->iocbq_lookup[iotag] = iocbq; 374 spin_unlock_irq(&phba->hbalock); 375 iocbq->iotag = iotag; 376 return iotag; 377 } 378 spin_unlock_irq(&phba->hbalock); 379 return 0; 380 } 381 if (psli->iocbq_lookup) 382 memcpy(new_arr, old_arr, 383 ((psli->last_iotag + 1) * 384 sizeof (struct lpfc_iocbq *))); 385 psli->iocbq_lookup = new_arr; 386 psli->iocbq_lookup_len = new_len; 387 psli->last_iotag = iotag; 388 psli->iocbq_lookup[iotag] = iocbq; 389 spin_unlock_irq(&phba->hbalock); 390 iocbq->iotag = iotag; 391 kfree(old_arr); 392 return iotag; 393 } 394 } else 395 spin_unlock_irq(&phba->hbalock); 396 397 lpfc_printf_log(phba, KERN_ERR,LOG_SLI, 398 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 399 psli->last_iotag); 400 401 return 0; 402 } 403 404 static void 405 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 406 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 407 { 408 /* 409 * Set up an iotag 410 */ 411 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 412 413 if (pring->ringno == LPFC_ELS_RING) { 414 lpfc_debugfs_slow_ring_trc(phba, 415 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 416 *(((uint32_t *) &nextiocb->iocb) + 4), 417 *(((uint32_t *) &nextiocb->iocb) + 6), 418 *(((uint32_t *) &nextiocb->iocb) + 7)); 419 } 420 421 /* 422 * Issue iocb command to adapter 423 */ 424 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 425 wmb(); 426 pring->stats.iocb_cmd++; 427 428 /* 429 * If there is no completion routine to call, we can release the 430 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 431 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 432 */ 433 if (nextiocb->iocb_cmpl) 434 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 435 else 436 __lpfc_sli_release_iocbq(phba, nextiocb); 437 438 /* 439 * Let the HBA know what IOCB slot will be the next one the 440 * driver will put a command into. 441 */ 442 pring->cmdidx = pring->next_cmdidx; 443 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 444 } 445 446 static void 447 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 448 { 449 int ringno = pring->ringno; 450 451 pring->flag |= LPFC_CALL_RING_AVAILABLE; 452 453 wmb(); 454 455 /* 456 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 457 * The HBA will tell us when an IOCB entry is available. 458 */ 459 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 460 readl(phba->CAregaddr); /* flush */ 461 462 pring->stats.iocb_cmd_full++; 463 } 464 465 static void 466 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 467 { 468 int ringno = pring->ringno; 469 470 /* 471 * Tell the HBA that there is work to do in this ring. 472 */ 473 wmb(); 474 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 475 readl(phba->CAregaddr); /* flush */ 476 } 477 478 static void 479 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 480 { 481 IOCB_t *iocb; 482 struct lpfc_iocbq *nextiocb; 483 484 /* 485 * Check to see if: 486 * (a) there is anything on the txq to send 487 * (b) link is up 488 * (c) link attention events can be processed (fcp ring only) 489 * (d) IOCB processing is not blocked by the outstanding mbox command. 490 */ 491 if (pring->txq_cnt && 492 lpfc_is_link_up(phba) && 493 (pring->ringno != phba->sli.fcp_ring || 494 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 495 496 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 497 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 498 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 499 500 if (iocb) 501 lpfc_sli_update_ring(phba, pring); 502 else 503 lpfc_sli_update_full_ring(phba, pring); 504 } 505 506 return; 507 } 508 509 static struct lpfc_hbq_entry * 510 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 511 { 512 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 513 514 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 515 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 516 hbqp->next_hbqPutIdx = 0; 517 518 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 519 uint32_t raw_index = phba->hbq_get[hbqno]; 520 uint32_t getidx = le32_to_cpu(raw_index); 521 522 hbqp->local_hbqGetIdx = getidx; 523 524 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 525 lpfc_printf_log(phba, KERN_ERR, 526 LOG_SLI | LOG_VPORT, 527 "1802 HBQ %d: local_hbqGetIdx " 528 "%u is > than hbqp->entry_count %u\n", 529 hbqno, hbqp->local_hbqGetIdx, 530 hbqp->entry_count); 531 532 phba->link_state = LPFC_HBA_ERROR; 533 return NULL; 534 } 535 536 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 537 return NULL; 538 } 539 540 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 541 hbqp->hbqPutIdx; 542 } 543 544 void 545 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 546 { 547 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 548 struct hbq_dmabuf *hbq_buf; 549 unsigned long flags; 550 int i, hbq_count; 551 uint32_t hbqno; 552 553 hbq_count = lpfc_sli_hbq_count(); 554 /* Return all memory used by all HBQs */ 555 spin_lock_irqsave(&phba->hbalock, flags); 556 for (i = 0; i < hbq_count; ++i) { 557 list_for_each_entry_safe(dmabuf, next_dmabuf, 558 &phba->hbqs[i].hbq_buffer_list, list) { 559 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 560 list_del(&hbq_buf->dbuf.list); 561 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 562 } 563 phba->hbqs[i].buffer_count = 0; 564 } 565 /* Return all HBQ buffer that are in-fly */ 566 list_for_each_entry_safe(dmabuf, next_dmabuf, 567 &phba->hbqbuf_in_list, list) { 568 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 569 list_del(&hbq_buf->dbuf.list); 570 if (hbq_buf->tag == -1) { 571 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 572 (phba, hbq_buf); 573 } else { 574 hbqno = hbq_buf->tag >> 16; 575 if (hbqno >= LPFC_MAX_HBQS) 576 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 577 (phba, hbq_buf); 578 else 579 (phba->hbqs[hbqno].hbq_free_buffer)(phba, 580 hbq_buf); 581 } 582 } 583 584 /* Mark the HBQs not in use */ 585 phba->hbq_in_use = 0; 586 spin_unlock_irqrestore(&phba->hbalock, flags); 587 } 588 589 static struct lpfc_hbq_entry * 590 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 591 struct hbq_dmabuf *hbq_buf) 592 { 593 struct lpfc_hbq_entry *hbqe; 594 dma_addr_t physaddr = hbq_buf->dbuf.phys; 595 596 /* Get next HBQ entry slot to use */ 597 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 598 if (hbqe) { 599 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 600 601 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 602 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 603 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 604 hbqe->bde.tus.f.bdeFlags = 0; 605 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 606 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 607 /* Sync SLIM */ 608 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 609 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 610 /* flush */ 611 readl(phba->hbq_put + hbqno); 612 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 613 } 614 return hbqe; 615 } 616 617 static struct lpfc_hbq_init lpfc_els_hbq = { 618 .rn = 1, 619 .entry_count = 200, 620 .mask_count = 0, 621 .profile = 0, 622 .ring_mask = (1 << LPFC_ELS_RING), 623 .buffer_count = 0, 624 .init_count = 20, 625 .add_count = 5, 626 }; 627 628 static struct lpfc_hbq_init lpfc_extra_hbq = { 629 .rn = 1, 630 .entry_count = 200, 631 .mask_count = 0, 632 .profile = 0, 633 .ring_mask = (1 << LPFC_EXTRA_RING), 634 .buffer_count = 0, 635 .init_count = 0, 636 .add_count = 5, 637 }; 638 639 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 640 &lpfc_els_hbq, 641 &lpfc_extra_hbq, 642 }; 643 644 static int 645 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 646 { 647 uint32_t i, start, end; 648 unsigned long flags; 649 struct hbq_dmabuf *hbq_buffer; 650 651 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 652 return 0; 653 654 start = phba->hbqs[hbqno].buffer_count; 655 end = count + start; 656 if (end > lpfc_hbq_defs[hbqno]->entry_count) 657 end = lpfc_hbq_defs[hbqno]->entry_count; 658 659 /* Check whether HBQ is still in use */ 660 spin_lock_irqsave(&phba->hbalock, flags); 661 if (!phba->hbq_in_use) 662 goto out; 663 664 /* Populate HBQ entries */ 665 for (i = start; i < end; i++) { 666 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 667 if (!hbq_buffer) 668 goto err; 669 hbq_buffer->tag = (i | (hbqno << 16)); 670 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 671 phba->hbqs[hbqno].buffer_count++; 672 else 673 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 674 } 675 676 out: 677 spin_unlock_irqrestore(&phba->hbalock, flags); 678 return 0; 679 err: 680 spin_unlock_irqrestore(&phba->hbalock, flags); 681 return 1; 682 } 683 684 int 685 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 686 { 687 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 688 lpfc_hbq_defs[qno]->add_count)); 689 } 690 691 static int 692 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 693 { 694 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 695 lpfc_hbq_defs[qno]->init_count)); 696 } 697 698 static struct hbq_dmabuf * 699 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 700 { 701 struct lpfc_dmabuf *d_buf; 702 struct hbq_dmabuf *hbq_buf; 703 uint32_t hbqno; 704 705 hbqno = tag >> 16; 706 if (hbqno >= LPFC_MAX_HBQS) 707 return NULL; 708 709 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 710 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 711 if (hbq_buf->tag == tag) { 712 return hbq_buf; 713 } 714 } 715 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 716 "1803 Bad hbq tag. Data: x%x x%x\n", 717 tag, phba->hbqs[tag >> 16].buffer_count); 718 return NULL; 719 } 720 721 void 722 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 723 { 724 uint32_t hbqno; 725 726 if (hbq_buffer) { 727 hbqno = hbq_buffer->tag >> 16; 728 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 729 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 730 } 731 } 732 } 733 734 static int 735 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 736 { 737 uint8_t ret; 738 739 switch (mbxCommand) { 740 case MBX_LOAD_SM: 741 case MBX_READ_NV: 742 case MBX_WRITE_NV: 743 case MBX_WRITE_VPARMS: 744 case MBX_RUN_BIU_DIAG: 745 case MBX_INIT_LINK: 746 case MBX_DOWN_LINK: 747 case MBX_CONFIG_LINK: 748 case MBX_CONFIG_RING: 749 case MBX_RESET_RING: 750 case MBX_READ_CONFIG: 751 case MBX_READ_RCONFIG: 752 case MBX_READ_SPARM: 753 case MBX_READ_STATUS: 754 case MBX_READ_RPI: 755 case MBX_READ_XRI: 756 case MBX_READ_REV: 757 case MBX_READ_LNK_STAT: 758 case MBX_REG_LOGIN: 759 case MBX_UNREG_LOGIN: 760 case MBX_READ_LA: 761 case MBX_CLEAR_LA: 762 case MBX_DUMP_MEMORY: 763 case MBX_DUMP_CONTEXT: 764 case MBX_RUN_DIAGS: 765 case MBX_RESTART: 766 case MBX_UPDATE_CFG: 767 case MBX_DOWN_LOAD: 768 case MBX_DEL_LD_ENTRY: 769 case MBX_RUN_PROGRAM: 770 case MBX_SET_MASK: 771 case MBX_SET_VARIABLE: 772 case MBX_UNREG_D_ID: 773 case MBX_KILL_BOARD: 774 case MBX_CONFIG_FARP: 775 case MBX_BEACON: 776 case MBX_LOAD_AREA: 777 case MBX_RUN_BIU_DIAG64: 778 case MBX_CONFIG_PORT: 779 case MBX_READ_SPARM64: 780 case MBX_READ_RPI64: 781 case MBX_REG_LOGIN64: 782 case MBX_READ_LA64: 783 case MBX_WRITE_WWN: 784 case MBX_SET_DEBUG: 785 case MBX_LOAD_EXP_ROM: 786 case MBX_ASYNCEVT_ENABLE: 787 case MBX_REG_VPI: 788 case MBX_UNREG_VPI: 789 case MBX_HEARTBEAT: 790 ret = mbxCommand; 791 break; 792 default: 793 ret = MBX_SHUTDOWN; 794 break; 795 } 796 return ret; 797 } 798 static void 799 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 800 { 801 wait_queue_head_t *pdone_q; 802 unsigned long drvr_flag; 803 804 /* 805 * If pdone_q is empty, the driver thread gave up waiting and 806 * continued running. 807 */ 808 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 809 spin_lock_irqsave(&phba->hbalock, drvr_flag); 810 pdone_q = (wait_queue_head_t *) pmboxq->context1; 811 if (pdone_q) 812 wake_up_interruptible(pdone_q); 813 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 814 return; 815 } 816 817 void 818 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 819 { 820 struct lpfc_dmabuf *mp; 821 uint16_t rpi; 822 int rc; 823 824 mp = (struct lpfc_dmabuf *) (pmb->context1); 825 826 if (mp) { 827 lpfc_mbuf_free(phba, mp->virt, mp->phys); 828 kfree(mp); 829 } 830 831 /* 832 * If a REG_LOGIN succeeded after node is destroyed or node 833 * is in re-discovery driver need to cleanup the RPI. 834 */ 835 if (!(phba->pport->load_flag & FC_UNLOADING) && 836 pmb->mb.mbxCommand == MBX_REG_LOGIN64 && 837 !pmb->mb.mbxStatus) { 838 839 rpi = pmb->mb.un.varWords[0]; 840 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); 841 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 842 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 843 if (rc != MBX_NOT_FINISHED) 844 return; 845 } 846 847 mempool_free(pmb, phba->mbox_mem_pool); 848 return; 849 } 850 851 int 852 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 853 { 854 MAILBOX_t *pmbox; 855 LPFC_MBOXQ_t *pmb; 856 int rc; 857 LIST_HEAD(cmplq); 858 859 phba->sli.slistat.mbox_event++; 860 861 /* Get all completed mailboxe buffers into the cmplq */ 862 spin_lock_irq(&phba->hbalock); 863 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 864 spin_unlock_irq(&phba->hbalock); 865 866 /* Get a Mailbox buffer to setup mailbox commands for callback */ 867 do { 868 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 869 if (pmb == NULL) 870 break; 871 872 pmbox = &pmb->mb; 873 874 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 875 if (pmb->vport) { 876 lpfc_debugfs_disc_trc(pmb->vport, 877 LPFC_DISC_TRC_MBOX_VPORT, 878 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 879 (uint32_t)pmbox->mbxCommand, 880 pmbox->un.varWords[0], 881 pmbox->un.varWords[1]); 882 } 883 else { 884 lpfc_debugfs_disc_trc(phba->pport, 885 LPFC_DISC_TRC_MBOX, 886 "MBOX cmpl: cmd:x%x mb:x%x x%x", 887 (uint32_t)pmbox->mbxCommand, 888 pmbox->un.varWords[0], 889 pmbox->un.varWords[1]); 890 } 891 } 892 893 /* 894 * It is a fatal error if unknown mbox command completion. 895 */ 896 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 897 MBX_SHUTDOWN) { 898 /* Unknow mailbox command compl */ 899 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 900 "(%d):0323 Unknown Mailbox command " 901 "%x Cmpl\n", 902 pmb->vport ? pmb->vport->vpi : 0, 903 pmbox->mbxCommand); 904 phba->link_state = LPFC_HBA_ERROR; 905 phba->work_hs = HS_FFER3; 906 lpfc_handle_eratt(phba); 907 continue; 908 } 909 910 if (pmbox->mbxStatus) { 911 phba->sli.slistat.mbox_stat_err++; 912 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 913 /* Mbox cmd cmpl error - RETRYing */ 914 lpfc_printf_log(phba, KERN_INFO, 915 LOG_MBOX | LOG_SLI, 916 "(%d):0305 Mbox cmd cmpl " 917 "error - RETRYing Data: x%x " 918 "x%x x%x x%x\n", 919 pmb->vport ? pmb->vport->vpi :0, 920 pmbox->mbxCommand, 921 pmbox->mbxStatus, 922 pmbox->un.varWords[0], 923 pmb->vport->port_state); 924 pmbox->mbxStatus = 0; 925 pmbox->mbxOwner = OWN_HOST; 926 spin_lock_irq(&phba->hbalock); 927 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 928 spin_unlock_irq(&phba->hbalock); 929 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 930 if (rc == MBX_SUCCESS) 931 continue; 932 } 933 } 934 935 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 936 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 937 "(%d):0307 Mailbox cmd x%x Cmpl x%p " 938 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 939 pmb->vport ? pmb->vport->vpi : 0, 940 pmbox->mbxCommand, 941 pmb->mbox_cmpl, 942 *((uint32_t *) pmbox), 943 pmbox->un.varWords[0], 944 pmbox->un.varWords[1], 945 pmbox->un.varWords[2], 946 pmbox->un.varWords[3], 947 pmbox->un.varWords[4], 948 pmbox->un.varWords[5], 949 pmbox->un.varWords[6], 950 pmbox->un.varWords[7]); 951 952 if (pmb->mbox_cmpl) 953 pmb->mbox_cmpl(phba,pmb); 954 } while (1); 955 return 0; 956 } 957 958 static struct lpfc_dmabuf * 959 lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag) 960 { 961 struct hbq_dmabuf *hbq_entry, *new_hbq_entry; 962 uint32_t hbqno; 963 void *virt; /* virtual address ptr */ 964 dma_addr_t phys; /* mapped address */ 965 unsigned long flags; 966 967 /* Check whether HBQ is still in use */ 968 spin_lock_irqsave(&phba->hbalock, flags); 969 if (!phba->hbq_in_use) { 970 spin_unlock_irqrestore(&phba->hbalock, flags); 971 return NULL; 972 } 973 974 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 975 if (hbq_entry == NULL) { 976 spin_unlock_irqrestore(&phba->hbalock, flags); 977 return NULL; 978 } 979 list_del(&hbq_entry->dbuf.list); 980 981 hbqno = tag >> 16; 982 new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 983 if (new_hbq_entry == NULL) { 984 list_add_tail(&hbq_entry->dbuf.list, &phba->hbqbuf_in_list); 985 spin_unlock_irqrestore(&phba->hbalock, flags); 986 return &hbq_entry->dbuf; 987 } 988 new_hbq_entry->tag = -1; 989 phys = new_hbq_entry->dbuf.phys; 990 virt = new_hbq_entry->dbuf.virt; 991 new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys; 992 new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt; 993 hbq_entry->dbuf.phys = phys; 994 hbq_entry->dbuf.virt = virt; 995 lpfc_sli_free_hbq(phba, hbq_entry); 996 list_add_tail(&new_hbq_entry->dbuf.list, &phba->hbqbuf_in_list); 997 spin_unlock_irqrestore(&phba->hbalock, flags); 998 999 return &new_hbq_entry->dbuf; 1000 } 1001 1002 static struct lpfc_dmabuf * 1003 lpfc_sli_get_buff(struct lpfc_hba *phba, 1004 struct lpfc_sli_ring *pring, 1005 uint32_t tag) 1006 { 1007 if (tag & QUE_BUFTAG_BIT) 1008 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 1009 else 1010 return lpfc_sli_replace_hbqbuff(phba, tag); 1011 } 1012 1013 static int 1014 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1015 struct lpfc_iocbq *saveq) 1016 { 1017 IOCB_t * irsp; 1018 WORD5 * w5p; 1019 uint32_t Rctl, Type; 1020 uint32_t match, i; 1021 struct lpfc_iocbq *iocbq; 1022 struct lpfc_dmabuf *dmzbuf; 1023 1024 match = 0; 1025 irsp = &(saveq->iocb); 1026 1027 if (irsp->ulpStatus == IOSTAT_NEED_BUFFER) 1028 return 1; 1029 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 1030 if (pring->lpfc_sli_rcv_async_status) 1031 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 1032 else 1033 lpfc_printf_log(phba, 1034 KERN_WARNING, 1035 LOG_SLI, 1036 "0316 Ring %d handler: unexpected " 1037 "ASYNC_STATUS iocb received evt_code " 1038 "0x%x\n", 1039 pring->ringno, 1040 irsp->un.asyncstat.evt_code); 1041 return 1; 1042 } 1043 1044 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 1045 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 1046 if (irsp->ulpBdeCount > 0) { 1047 dmzbuf = lpfc_sli_get_buff(phba, pring, 1048 irsp->un.ulpWord[3]); 1049 lpfc_in_buf_free(phba, dmzbuf); 1050 } 1051 1052 if (irsp->ulpBdeCount > 1) { 1053 dmzbuf = lpfc_sli_get_buff(phba, pring, 1054 irsp->unsli3.sli3Words[3]); 1055 lpfc_in_buf_free(phba, dmzbuf); 1056 } 1057 1058 if (irsp->ulpBdeCount > 2) { 1059 dmzbuf = lpfc_sli_get_buff(phba, pring, 1060 irsp->unsli3.sli3Words[7]); 1061 lpfc_in_buf_free(phba, dmzbuf); 1062 } 1063 1064 return 1; 1065 } 1066 1067 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 1068 if (irsp->ulpBdeCount != 0) { 1069 saveq->context2 = lpfc_sli_get_buff(phba, pring, 1070 irsp->un.ulpWord[3]); 1071 if (!saveq->context2) 1072 lpfc_printf_log(phba, 1073 KERN_ERR, 1074 LOG_SLI, 1075 "0341 Ring %d Cannot find buffer for " 1076 "an unsolicited iocb. tag 0x%x\n", 1077 pring->ringno, 1078 irsp->un.ulpWord[3]); 1079 } 1080 if (irsp->ulpBdeCount == 2) { 1081 saveq->context3 = lpfc_sli_get_buff(phba, pring, 1082 irsp->unsli3.sli3Words[7]); 1083 if (!saveq->context3) 1084 lpfc_printf_log(phba, 1085 KERN_ERR, 1086 LOG_SLI, 1087 "0342 Ring %d Cannot find buffer for an" 1088 " unsolicited iocb. tag 0x%x\n", 1089 pring->ringno, 1090 irsp->unsli3.sli3Words[7]); 1091 } 1092 list_for_each_entry(iocbq, &saveq->list, list) { 1093 irsp = &(iocbq->iocb); 1094 if (irsp->ulpBdeCount != 0) { 1095 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 1096 irsp->un.ulpWord[3]); 1097 if (!iocbq->context2) 1098 lpfc_printf_log(phba, 1099 KERN_ERR, 1100 LOG_SLI, 1101 "0343 Ring %d Cannot find " 1102 "buffer for an unsolicited iocb" 1103 ". tag 0x%x\n", pring->ringno, 1104 irsp->un.ulpWord[3]); 1105 } 1106 if (irsp->ulpBdeCount == 2) { 1107 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 1108 irsp->unsli3.sli3Words[7]); 1109 if (!iocbq->context3) 1110 lpfc_printf_log(phba, 1111 KERN_ERR, 1112 LOG_SLI, 1113 "0344 Ring %d Cannot find " 1114 "buffer for an unsolicited " 1115 "iocb. tag 0x%x\n", 1116 pring->ringno, 1117 irsp->unsli3.sli3Words[7]); 1118 } 1119 } 1120 } 1121 if (irsp->ulpBdeCount != 0 && 1122 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 1123 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 1124 int found = 0; 1125 1126 /* search continue save q for same XRI */ 1127 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 1128 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) { 1129 list_add_tail(&saveq->list, &iocbq->list); 1130 found = 1; 1131 break; 1132 } 1133 } 1134 if (!found) 1135 list_add_tail(&saveq->clist, 1136 &pring->iocb_continue_saveq); 1137 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 1138 list_del_init(&iocbq->clist); 1139 saveq = iocbq; 1140 irsp = &(saveq->iocb); 1141 } else 1142 return 0; 1143 } 1144 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 1145 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 1146 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 1147 Rctl = FC_ELS_REQ; 1148 Type = FC_ELS_DATA; 1149 } else { 1150 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 1151 Rctl = w5p->hcsw.Rctl; 1152 Type = w5p->hcsw.Type; 1153 1154 /* Firmware Workaround */ 1155 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 1156 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 1157 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 1158 Rctl = FC_ELS_REQ; 1159 Type = FC_ELS_DATA; 1160 w5p->hcsw.Rctl = Rctl; 1161 w5p->hcsw.Type = Type; 1162 } 1163 } 1164 1165 /* unSolicited Responses */ 1166 if (pring->prt[0].profile) { 1167 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 1168 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 1169 saveq); 1170 match = 1; 1171 } else { 1172 /* We must search, based on rctl / type 1173 for the right routine */ 1174 for (i = 0; i < pring->num_mask; i++) { 1175 if ((pring->prt[i].rctl == Rctl) 1176 && (pring->prt[i].type == Type)) { 1177 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 1178 (pring->prt[i].lpfc_sli_rcv_unsol_event) 1179 (phba, pring, saveq); 1180 match = 1; 1181 break; 1182 } 1183 } 1184 } 1185 if (match == 0) { 1186 /* Unexpected Rctl / Type received */ 1187 /* Ring <ringno> handler: unexpected 1188 Rctl <Rctl> Type <Type> received */ 1189 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1190 "0313 Ring %d handler: unexpected Rctl x%x " 1191 "Type x%x received\n", 1192 pring->ringno, Rctl, Type); 1193 } 1194 return 1; 1195 } 1196 1197 static struct lpfc_iocbq * 1198 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 1199 struct lpfc_sli_ring *pring, 1200 struct lpfc_iocbq *prspiocb) 1201 { 1202 struct lpfc_iocbq *cmd_iocb = NULL; 1203 uint16_t iotag; 1204 1205 iotag = prspiocb->iocb.ulpIoTag; 1206 1207 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 1208 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 1209 list_del_init(&cmd_iocb->list); 1210 pring->txcmplq_cnt--; 1211 return cmd_iocb; 1212 } 1213 1214 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1215 "0317 iotag x%x is out off " 1216 "range: max iotag x%x wd0 x%x\n", 1217 iotag, phba->sli.last_iotag, 1218 *(((uint32_t *) &prspiocb->iocb) + 7)); 1219 return NULL; 1220 } 1221 1222 static int 1223 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1224 struct lpfc_iocbq *saveq) 1225 { 1226 struct lpfc_iocbq *cmdiocbp; 1227 int rc = 1; 1228 unsigned long iflag; 1229 1230 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 1231 spin_lock_irqsave(&phba->hbalock, iflag); 1232 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 1233 spin_unlock_irqrestore(&phba->hbalock, iflag); 1234 1235 if (cmdiocbp) { 1236 if (cmdiocbp->iocb_cmpl) { 1237 /* 1238 * Post all ELS completions to the worker thread. 1239 * All other are passed to the completion callback. 1240 */ 1241 if (pring->ringno == LPFC_ELS_RING) { 1242 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) { 1243 cmdiocbp->iocb_flag &= 1244 ~LPFC_DRIVER_ABORTED; 1245 saveq->iocb.ulpStatus = 1246 IOSTAT_LOCAL_REJECT; 1247 saveq->iocb.un.ulpWord[4] = 1248 IOERR_SLI_ABORTED; 1249 1250 /* Firmware could still be in progress 1251 * of DMAing payload, so don't free data 1252 * buffer till after a hbeat. 1253 */ 1254 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 1255 } 1256 } 1257 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 1258 } else 1259 lpfc_sli_release_iocbq(phba, cmdiocbp); 1260 } else { 1261 /* 1262 * Unknown initiating command based on the response iotag. 1263 * This could be the case on the ELS ring because of 1264 * lpfc_els_abort(). 1265 */ 1266 if (pring->ringno != LPFC_ELS_RING) { 1267 /* 1268 * Ring <ringno> handler: unexpected completion IoTag 1269 * <IoTag> 1270 */ 1271 lpfc_printf_vlog(cmdiocbp->vport, KERN_WARNING, LOG_SLI, 1272 "0322 Ring %d handler: " 1273 "unexpected completion IoTag x%x " 1274 "Data: x%x x%x x%x x%x\n", 1275 pring->ringno, 1276 saveq->iocb.ulpIoTag, 1277 saveq->iocb.ulpStatus, 1278 saveq->iocb.un.ulpWord[4], 1279 saveq->iocb.ulpCommand, 1280 saveq->iocb.ulpContext); 1281 } 1282 } 1283 1284 return rc; 1285 } 1286 1287 static void 1288 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1289 { 1290 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 1291 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1292 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1293 /* 1294 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1295 * rsp ring <portRspMax> 1296 */ 1297 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1298 "0312 Ring %d handler: portRspPut %d " 1299 "is bigger then rsp ring %d\n", 1300 pring->ringno, le32_to_cpu(pgp->rspPutInx), 1301 pring->numRiocb); 1302 1303 phba->link_state = LPFC_HBA_ERROR; 1304 1305 /* 1306 * All error attention handlers are posted to 1307 * worker thread 1308 */ 1309 phba->work_ha |= HA_ERATT; 1310 phba->work_hs = HS_FFER3; 1311 1312 /* hbalock should already be held */ 1313 if (phba->work_wait) 1314 lpfc_worker_wake_up(phba); 1315 1316 return; 1317 } 1318 1319 void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) 1320 { 1321 struct lpfc_sli *psli = &phba->sli; 1322 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING]; 1323 IOCB_t *irsp = NULL; 1324 IOCB_t *entry = NULL; 1325 struct lpfc_iocbq *cmdiocbq = NULL; 1326 struct lpfc_iocbq rspiocbq; 1327 struct lpfc_pgp *pgp; 1328 uint32_t status; 1329 uint32_t portRspPut, portRspMax; 1330 int type; 1331 uint32_t rsp_cmpl = 0; 1332 uint32_t ha_copy; 1333 unsigned long iflags; 1334 1335 pring->stats.iocb_event++; 1336 1337 pgp = (phba->sli_rev == 3) ? 1338 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1339 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1340 1341 1342 /* 1343 * The next available response entry should never exceed the maximum 1344 * entries. If it does, treat it as an adapter hardware error. 1345 */ 1346 portRspMax = pring->numRiocb; 1347 portRspPut = le32_to_cpu(pgp->rspPutInx); 1348 if (unlikely(portRspPut >= portRspMax)) { 1349 lpfc_sli_rsp_pointers_error(phba, pring); 1350 return; 1351 } 1352 1353 rmb(); 1354 while (pring->rspidx != portRspPut) { 1355 entry = lpfc_resp_iocb(phba, pring); 1356 if (++pring->rspidx >= portRspMax) 1357 pring->rspidx = 0; 1358 1359 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1360 (uint32_t *) &rspiocbq.iocb, 1361 phba->iocb_rsp_size); 1362 irsp = &rspiocbq.iocb; 1363 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1364 pring->stats.iocb_rsp++; 1365 rsp_cmpl++; 1366 1367 if (unlikely(irsp->ulpStatus)) { 1368 /* Rsp ring <ringno> error: IOCB */ 1369 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1370 "0326 Rsp Ring %d error: IOCB Data: " 1371 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1372 pring->ringno, 1373 irsp->un.ulpWord[0], 1374 irsp->un.ulpWord[1], 1375 irsp->un.ulpWord[2], 1376 irsp->un.ulpWord[3], 1377 irsp->un.ulpWord[4], 1378 irsp->un.ulpWord[5], 1379 *(((uint32_t *) irsp) + 6), 1380 *(((uint32_t *) irsp) + 7)); 1381 } 1382 1383 switch (type) { 1384 case LPFC_ABORT_IOCB: 1385 case LPFC_SOL_IOCB: 1386 /* 1387 * Idle exchange closed via ABTS from port. No iocb 1388 * resources need to be recovered. 1389 */ 1390 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1391 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1392 "0314 IOCB cmd 0x%x " 1393 "processed. Skipping " 1394 "completion", 1395 irsp->ulpCommand); 1396 break; 1397 } 1398 1399 spin_lock_irqsave(&phba->hbalock, iflags); 1400 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1401 &rspiocbq); 1402 spin_unlock_irqrestore(&phba->hbalock, iflags); 1403 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1404 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1405 &rspiocbq); 1406 } 1407 break; 1408 default: 1409 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1410 char adaptermsg[LPFC_MAX_ADPTMSG]; 1411 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1412 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1413 MAX_MSG_DATA); 1414 dev_warn(&((phba->pcidev)->dev), 1415 "lpfc%d: %s\n", 1416 phba->brd_no, adaptermsg); 1417 } else { 1418 /* Unknown IOCB command */ 1419 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1420 "0321 Unknown IOCB command " 1421 "Data: x%x, x%x x%x x%x x%x\n", 1422 type, irsp->ulpCommand, 1423 irsp->ulpStatus, 1424 irsp->ulpIoTag, 1425 irsp->ulpContext); 1426 } 1427 break; 1428 } 1429 1430 /* 1431 * The response IOCB has been processed. Update the ring 1432 * pointer in SLIM. If the port response put pointer has not 1433 * been updated, sync the pgp->rspPutInx and fetch the new port 1434 * response put pointer. 1435 */ 1436 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 1437 1438 if (pring->rspidx == portRspPut) 1439 portRspPut = le32_to_cpu(pgp->rspPutInx); 1440 } 1441 1442 ha_copy = readl(phba->HAregaddr); 1443 ha_copy >>= (LPFC_FCP_RING * 4); 1444 1445 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) { 1446 spin_lock_irqsave(&phba->hbalock, iflags); 1447 pring->stats.iocb_rsp_full++; 1448 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4)); 1449 writel(status, phba->CAregaddr); 1450 readl(phba->CAregaddr); 1451 spin_unlock_irqrestore(&phba->hbalock, iflags); 1452 } 1453 if ((ha_copy & HA_R0CE_RSP) && 1454 (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1455 spin_lock_irqsave(&phba->hbalock, iflags); 1456 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1457 pring->stats.iocb_cmd_empty++; 1458 1459 /* Force update of the local copy of cmdGetInx */ 1460 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1461 lpfc_sli_resume_iocb(phba, pring); 1462 1463 if ((pring->lpfc_sli_cmd_available)) 1464 (pring->lpfc_sli_cmd_available) (phba, pring); 1465 1466 spin_unlock_irqrestore(&phba->hbalock, iflags); 1467 } 1468 1469 return; 1470 } 1471 1472 /* 1473 * This routine presumes LPFC_FCP_RING handling and doesn't bother 1474 * to check it explicitly. 1475 */ 1476 static int 1477 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 1478 struct lpfc_sli_ring *pring, uint32_t mask) 1479 { 1480 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 1481 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1482 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1483 IOCB_t *irsp = NULL; 1484 IOCB_t *entry = NULL; 1485 struct lpfc_iocbq *cmdiocbq = NULL; 1486 struct lpfc_iocbq rspiocbq; 1487 uint32_t status; 1488 uint32_t portRspPut, portRspMax; 1489 int rc = 1; 1490 lpfc_iocb_type type; 1491 unsigned long iflag; 1492 uint32_t rsp_cmpl = 0; 1493 1494 spin_lock_irqsave(&phba->hbalock, iflag); 1495 pring->stats.iocb_event++; 1496 1497 /* 1498 * The next available response entry should never exceed the maximum 1499 * entries. If it does, treat it as an adapter hardware error. 1500 */ 1501 portRspMax = pring->numRiocb; 1502 portRspPut = le32_to_cpu(pgp->rspPutInx); 1503 if (unlikely(portRspPut >= portRspMax)) { 1504 lpfc_sli_rsp_pointers_error(phba, pring); 1505 spin_unlock_irqrestore(&phba->hbalock, iflag); 1506 return 1; 1507 } 1508 1509 rmb(); 1510 while (pring->rspidx != portRspPut) { 1511 /* 1512 * Fetch an entry off the ring and copy it into a local data 1513 * structure. The copy involves a byte-swap since the 1514 * network byte order and pci byte orders are different. 1515 */ 1516 entry = lpfc_resp_iocb(phba, pring); 1517 phba->last_completion_time = jiffies; 1518 1519 if (++pring->rspidx >= portRspMax) 1520 pring->rspidx = 0; 1521 1522 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1523 (uint32_t *) &rspiocbq.iocb, 1524 phba->iocb_rsp_size); 1525 INIT_LIST_HEAD(&(rspiocbq.list)); 1526 irsp = &rspiocbq.iocb; 1527 1528 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1529 pring->stats.iocb_rsp++; 1530 rsp_cmpl++; 1531 1532 if (unlikely(irsp->ulpStatus)) { 1533 /* 1534 * If resource errors reported from HBA, reduce 1535 * queuedepths of the SCSI device. 1536 */ 1537 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1538 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 1539 spin_unlock_irqrestore(&phba->hbalock, iflag); 1540 lpfc_adjust_queue_depth(phba); 1541 spin_lock_irqsave(&phba->hbalock, iflag); 1542 } 1543 1544 /* Rsp ring <ringno> error: IOCB */ 1545 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1546 "0336 Rsp Ring %d error: IOCB Data: " 1547 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1548 pring->ringno, 1549 irsp->un.ulpWord[0], 1550 irsp->un.ulpWord[1], 1551 irsp->un.ulpWord[2], 1552 irsp->un.ulpWord[3], 1553 irsp->un.ulpWord[4], 1554 irsp->un.ulpWord[5], 1555 *(((uint32_t *) irsp) + 6), 1556 *(((uint32_t *) irsp) + 7)); 1557 } 1558 1559 switch (type) { 1560 case LPFC_ABORT_IOCB: 1561 case LPFC_SOL_IOCB: 1562 /* 1563 * Idle exchange closed via ABTS from port. No iocb 1564 * resources need to be recovered. 1565 */ 1566 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1567 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1568 "0333 IOCB cmd 0x%x" 1569 " processed. Skipping" 1570 " completion\n", 1571 irsp->ulpCommand); 1572 break; 1573 } 1574 1575 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1576 &rspiocbq); 1577 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1578 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1579 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1580 &rspiocbq); 1581 } else { 1582 spin_unlock_irqrestore(&phba->hbalock, 1583 iflag); 1584 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1585 &rspiocbq); 1586 spin_lock_irqsave(&phba->hbalock, 1587 iflag); 1588 } 1589 } 1590 break; 1591 case LPFC_UNSOL_IOCB: 1592 spin_unlock_irqrestore(&phba->hbalock, iflag); 1593 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 1594 spin_lock_irqsave(&phba->hbalock, iflag); 1595 break; 1596 default: 1597 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1598 char adaptermsg[LPFC_MAX_ADPTMSG]; 1599 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1600 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1601 MAX_MSG_DATA); 1602 dev_warn(&((phba->pcidev)->dev), 1603 "lpfc%d: %s\n", 1604 phba->brd_no, adaptermsg); 1605 } else { 1606 /* Unknown IOCB command */ 1607 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1608 "0334 Unknown IOCB command " 1609 "Data: x%x, x%x x%x x%x x%x\n", 1610 type, irsp->ulpCommand, 1611 irsp->ulpStatus, 1612 irsp->ulpIoTag, 1613 irsp->ulpContext); 1614 } 1615 break; 1616 } 1617 1618 /* 1619 * The response IOCB has been processed. Update the ring 1620 * pointer in SLIM. If the port response put pointer has not 1621 * been updated, sync the pgp->rspPutInx and fetch the new port 1622 * response put pointer. 1623 */ 1624 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 1625 1626 if (pring->rspidx == portRspPut) 1627 portRspPut = le32_to_cpu(pgp->rspPutInx); 1628 } 1629 1630 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 1631 pring->stats.iocb_rsp_full++; 1632 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1633 writel(status, phba->CAregaddr); 1634 readl(phba->CAregaddr); 1635 } 1636 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1637 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1638 pring->stats.iocb_cmd_empty++; 1639 1640 /* Force update of the local copy of cmdGetInx */ 1641 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1642 lpfc_sli_resume_iocb(phba, pring); 1643 1644 if ((pring->lpfc_sli_cmd_available)) 1645 (pring->lpfc_sli_cmd_available) (phba, pring); 1646 1647 } 1648 1649 spin_unlock_irqrestore(&phba->hbalock, iflag); 1650 return rc; 1651 } 1652 1653 int 1654 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 1655 struct lpfc_sli_ring *pring, uint32_t mask) 1656 { 1657 struct lpfc_pgp *pgp = (phba->sli_rev == 3) ? 1658 &phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] : 1659 &phba->slim2p->mbx.us.s2.port[pring->ringno]; 1660 IOCB_t *entry; 1661 IOCB_t *irsp = NULL; 1662 struct lpfc_iocbq *rspiocbp = NULL; 1663 struct lpfc_iocbq *next_iocb; 1664 struct lpfc_iocbq *cmdiocbp; 1665 struct lpfc_iocbq *saveq; 1666 uint8_t iocb_cmd_type; 1667 lpfc_iocb_type type; 1668 uint32_t status, free_saveq; 1669 uint32_t portRspPut, portRspMax; 1670 int rc = 1; 1671 unsigned long iflag; 1672 1673 spin_lock_irqsave(&phba->hbalock, iflag); 1674 pring->stats.iocb_event++; 1675 1676 /* 1677 * The next available response entry should never exceed the maximum 1678 * entries. If it does, treat it as an adapter hardware error. 1679 */ 1680 portRspMax = pring->numRiocb; 1681 portRspPut = le32_to_cpu(pgp->rspPutInx); 1682 if (portRspPut >= portRspMax) { 1683 /* 1684 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then 1685 * rsp ring <portRspMax> 1686 */ 1687 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1688 "0303 Ring %d handler: portRspPut %d " 1689 "is bigger then rsp ring %d\n", 1690 pring->ringno, portRspPut, portRspMax); 1691 1692 phba->link_state = LPFC_HBA_ERROR; 1693 spin_unlock_irqrestore(&phba->hbalock, iflag); 1694 1695 phba->work_hs = HS_FFER3; 1696 lpfc_handle_eratt(phba); 1697 1698 return 1; 1699 } 1700 1701 rmb(); 1702 while (pring->rspidx != portRspPut) { 1703 /* 1704 * Build a completion list and call the appropriate handler. 1705 * The process is to get the next available response iocb, get 1706 * a free iocb from the list, copy the response data into the 1707 * free iocb, insert to the continuation list, and update the 1708 * next response index to slim. This process makes response 1709 * iocb's in the ring available to DMA as fast as possible but 1710 * pays a penalty for a copy operation. Since the iocb is 1711 * only 32 bytes, this penalty is considered small relative to 1712 * the PCI reads for register values and a slim write. When 1713 * the ulpLe field is set, the entire Command has been 1714 * received. 1715 */ 1716 entry = lpfc_resp_iocb(phba, pring); 1717 1718 phba->last_completion_time = jiffies; 1719 rspiocbp = __lpfc_sli_get_iocbq(phba); 1720 if (rspiocbp == NULL) { 1721 printk(KERN_ERR "%s: out of buffers! Failing " 1722 "completion.\n", __FUNCTION__); 1723 break; 1724 } 1725 1726 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 1727 phba->iocb_rsp_size); 1728 irsp = &rspiocbp->iocb; 1729 1730 if (++pring->rspidx >= portRspMax) 1731 pring->rspidx = 0; 1732 1733 if (pring->ringno == LPFC_ELS_RING) { 1734 lpfc_debugfs_slow_ring_trc(phba, 1735 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1736 *(((uint32_t *) irsp) + 4), 1737 *(((uint32_t *) irsp) + 6), 1738 *(((uint32_t *) irsp) + 7)); 1739 } 1740 1741 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 1742 1743 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 1744 1745 pring->iocb_continueq_cnt++; 1746 if (irsp->ulpLe) { 1747 /* 1748 * By default, the driver expects to free all resources 1749 * associated with this iocb completion. 1750 */ 1751 free_saveq = 1; 1752 saveq = list_get_first(&pring->iocb_continueq, 1753 struct lpfc_iocbq, list); 1754 irsp = &(saveq->iocb); 1755 list_del_init(&pring->iocb_continueq); 1756 pring->iocb_continueq_cnt = 0; 1757 1758 pring->stats.iocb_rsp++; 1759 1760 /* 1761 * If resource errors reported from HBA, reduce 1762 * queuedepths of the SCSI device. 1763 */ 1764 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1765 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 1766 spin_unlock_irqrestore(&phba->hbalock, iflag); 1767 lpfc_adjust_queue_depth(phba); 1768 spin_lock_irqsave(&phba->hbalock, iflag); 1769 } 1770 1771 if (irsp->ulpStatus) { 1772 /* Rsp ring <ringno> error: IOCB */ 1773 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1774 "0328 Rsp Ring %d error: " 1775 "IOCB Data: " 1776 "x%x x%x x%x x%x " 1777 "x%x x%x x%x x%x " 1778 "x%x x%x x%x x%x " 1779 "x%x x%x x%x x%x\n", 1780 pring->ringno, 1781 irsp->un.ulpWord[0], 1782 irsp->un.ulpWord[1], 1783 irsp->un.ulpWord[2], 1784 irsp->un.ulpWord[3], 1785 irsp->un.ulpWord[4], 1786 irsp->un.ulpWord[5], 1787 *(((uint32_t *) irsp) + 6), 1788 *(((uint32_t *) irsp) + 7), 1789 *(((uint32_t *) irsp) + 8), 1790 *(((uint32_t *) irsp) + 9), 1791 *(((uint32_t *) irsp) + 10), 1792 *(((uint32_t *) irsp) + 11), 1793 *(((uint32_t *) irsp) + 12), 1794 *(((uint32_t *) irsp) + 13), 1795 *(((uint32_t *) irsp) + 14), 1796 *(((uint32_t *) irsp) + 15)); 1797 } 1798 1799 /* 1800 * Fetch the IOCB command type and call the correct 1801 * completion routine. Solicited and Unsolicited 1802 * IOCBs on the ELS ring get freed back to the 1803 * lpfc_iocb_list by the discovery kernel thread. 1804 */ 1805 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 1806 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 1807 if (type == LPFC_SOL_IOCB) { 1808 spin_unlock_irqrestore(&phba->hbalock, iflag); 1809 rc = lpfc_sli_process_sol_iocb(phba, pring, 1810 saveq); 1811 spin_lock_irqsave(&phba->hbalock, iflag); 1812 } else if (type == LPFC_UNSOL_IOCB) { 1813 spin_unlock_irqrestore(&phba->hbalock, iflag); 1814 rc = lpfc_sli_process_unsol_iocb(phba, pring, 1815 saveq); 1816 spin_lock_irqsave(&phba->hbalock, iflag); 1817 if (!rc) 1818 free_saveq = 0; 1819 } else if (type == LPFC_ABORT_IOCB) { 1820 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && 1821 ((cmdiocbp = 1822 lpfc_sli_iocbq_lookup(phba, pring, 1823 saveq)))) { 1824 /* Call the specified completion 1825 routine */ 1826 if (cmdiocbp->iocb_cmpl) { 1827 spin_unlock_irqrestore( 1828 &phba->hbalock, 1829 iflag); 1830 (cmdiocbp->iocb_cmpl) (phba, 1831 cmdiocbp, saveq); 1832 spin_lock_irqsave( 1833 &phba->hbalock, 1834 iflag); 1835 } else 1836 __lpfc_sli_release_iocbq(phba, 1837 cmdiocbp); 1838 } 1839 } else if (type == LPFC_UNKNOWN_IOCB) { 1840 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1841 1842 char adaptermsg[LPFC_MAX_ADPTMSG]; 1843 1844 memset(adaptermsg, 0, 1845 LPFC_MAX_ADPTMSG); 1846 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1847 MAX_MSG_DATA); 1848 dev_warn(&((phba->pcidev)->dev), 1849 "lpfc%d: %s\n", 1850 phba->brd_no, adaptermsg); 1851 } else { 1852 /* Unknown IOCB command */ 1853 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1854 "0335 Unknown IOCB " 1855 "command Data: x%x " 1856 "x%x x%x x%x\n", 1857 irsp->ulpCommand, 1858 irsp->ulpStatus, 1859 irsp->ulpIoTag, 1860 irsp->ulpContext); 1861 } 1862 } 1863 1864 if (free_saveq) { 1865 list_for_each_entry_safe(rspiocbp, next_iocb, 1866 &saveq->list, list) { 1867 list_del(&rspiocbp->list); 1868 __lpfc_sli_release_iocbq(phba, 1869 rspiocbp); 1870 } 1871 __lpfc_sli_release_iocbq(phba, saveq); 1872 } 1873 rspiocbp = NULL; 1874 } 1875 1876 /* 1877 * If the port response put pointer has not been updated, sync 1878 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 1879 * response put pointer. 1880 */ 1881 if (pring->rspidx == portRspPut) { 1882 portRspPut = le32_to_cpu(pgp->rspPutInx); 1883 } 1884 } /* while (pring->rspidx != portRspPut) */ 1885 1886 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 1887 /* At least one response entry has been freed */ 1888 pring->stats.iocb_rsp_full++; 1889 /* SET RxRE_RSP in Chip Att register */ 1890 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 1891 writel(status, phba->CAregaddr); 1892 readl(phba->CAregaddr); /* flush */ 1893 } 1894 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1895 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1896 pring->stats.iocb_cmd_empty++; 1897 1898 /* Force update of the local copy of cmdGetInx */ 1899 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1900 lpfc_sli_resume_iocb(phba, pring); 1901 1902 if ((pring->lpfc_sli_cmd_available)) 1903 (pring->lpfc_sli_cmd_available) (phba, pring); 1904 1905 } 1906 1907 spin_unlock_irqrestore(&phba->hbalock, iflag); 1908 return rc; 1909 } 1910 1911 void 1912 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1913 { 1914 LIST_HEAD(completions); 1915 struct lpfc_iocbq *iocb, *next_iocb; 1916 IOCB_t *cmd = NULL; 1917 1918 if (pring->ringno == LPFC_ELS_RING) { 1919 lpfc_fabric_abort_hba(phba); 1920 } 1921 1922 /* Error everything on txq and txcmplq 1923 * First do the txq. 1924 */ 1925 spin_lock_irq(&phba->hbalock); 1926 list_splice_init(&pring->txq, &completions); 1927 pring->txq_cnt = 0; 1928 1929 /* Next issue ABTS for everything on the txcmplq */ 1930 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 1931 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 1932 1933 spin_unlock_irq(&phba->hbalock); 1934 1935 while (!list_empty(&completions)) { 1936 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 1937 cmd = &iocb->iocb; 1938 list_del_init(&iocb->list); 1939 1940 if (!iocb->iocb_cmpl) 1941 lpfc_sli_release_iocbq(phba, iocb); 1942 else { 1943 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 1944 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 1945 (iocb->iocb_cmpl) (phba, iocb, iocb); 1946 } 1947 } 1948 } 1949 1950 int 1951 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 1952 { 1953 uint32_t status; 1954 int i = 0; 1955 int retval = 0; 1956 1957 /* Read the HBA Host Status Register */ 1958 status = readl(phba->HSregaddr); 1959 1960 /* 1961 * Check status register every 100ms for 5 retries, then every 1962 * 500ms for 5, then every 2.5 sec for 5, then reset board and 1963 * every 2.5 sec for 4. 1964 * Break our of the loop if errors occurred during init. 1965 */ 1966 while (((status & mask) != mask) && 1967 !(status & HS_FFERM) && 1968 i++ < 20) { 1969 1970 if (i <= 5) 1971 msleep(10); 1972 else if (i <= 10) 1973 msleep(500); 1974 else 1975 msleep(2500); 1976 1977 if (i == 15) { 1978 /* Do post */ 1979 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 1980 lpfc_sli_brdrestart(phba); 1981 } 1982 /* Read the HBA Host Status Register */ 1983 status = readl(phba->HSregaddr); 1984 } 1985 1986 /* Check to see if any errors occurred during init */ 1987 if ((status & HS_FFERM) || (i >= 20)) { 1988 phba->link_state = LPFC_HBA_ERROR; 1989 retval = 1; 1990 } 1991 1992 return retval; 1993 } 1994 1995 #define BARRIER_TEST_PATTERN (0xdeadbeef) 1996 1997 void lpfc_reset_barrier(struct lpfc_hba *phba) 1998 { 1999 uint32_t __iomem *resp_buf; 2000 uint32_t __iomem *mbox_buf; 2001 volatile uint32_t mbox; 2002 uint32_t hc_copy; 2003 int i; 2004 uint8_t hdrtype; 2005 2006 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 2007 if (hdrtype != 0x80 || 2008 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 2009 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 2010 return; 2011 2012 /* 2013 * Tell the other part of the chip to suspend temporarily all 2014 * its DMA activity. 2015 */ 2016 resp_buf = phba->MBslimaddr; 2017 2018 /* Disable the error attention */ 2019 hc_copy = readl(phba->HCregaddr); 2020 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 2021 readl(phba->HCregaddr); /* flush */ 2022 phba->link_flag |= LS_IGNORE_ERATT; 2023 2024 if (readl(phba->HAregaddr) & HA_ERATT) { 2025 /* Clear Chip error bit */ 2026 writel(HA_ERATT, phba->HAregaddr); 2027 phba->pport->stopped = 1; 2028 } 2029 2030 mbox = 0; 2031 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 2032 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 2033 2034 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 2035 mbox_buf = phba->MBslimaddr; 2036 writel(mbox, mbox_buf); 2037 2038 for (i = 0; 2039 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) 2040 mdelay(1); 2041 2042 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 2043 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 2044 phba->pport->stopped) 2045 goto restore_hc; 2046 else 2047 goto clear_errat; 2048 } 2049 2050 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 2051 for (i = 0; readl(resp_buf) != mbox && i < 500; i++) 2052 mdelay(1); 2053 2054 clear_errat: 2055 2056 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) 2057 mdelay(1); 2058 2059 if (readl(phba->HAregaddr) & HA_ERATT) { 2060 writel(HA_ERATT, phba->HAregaddr); 2061 phba->pport->stopped = 1; 2062 } 2063 2064 restore_hc: 2065 phba->link_flag &= ~LS_IGNORE_ERATT; 2066 writel(hc_copy, phba->HCregaddr); 2067 readl(phba->HCregaddr); /* flush */ 2068 } 2069 2070 int 2071 lpfc_sli_brdkill(struct lpfc_hba *phba) 2072 { 2073 struct lpfc_sli *psli; 2074 LPFC_MBOXQ_t *pmb; 2075 uint32_t status; 2076 uint32_t ha_copy; 2077 int retval; 2078 int i = 0; 2079 2080 psli = &phba->sli; 2081 2082 /* Kill HBA */ 2083 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2084 "0329 Kill HBA Data: x%x x%x\n", 2085 phba->pport->port_state, psli->sli_flag); 2086 2087 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2088 if (!pmb) 2089 return 1; 2090 2091 /* Disable the error attention */ 2092 spin_lock_irq(&phba->hbalock); 2093 status = readl(phba->HCregaddr); 2094 status &= ~HC_ERINT_ENA; 2095 writel(status, phba->HCregaddr); 2096 readl(phba->HCregaddr); /* flush */ 2097 phba->link_flag |= LS_IGNORE_ERATT; 2098 spin_unlock_irq(&phba->hbalock); 2099 2100 lpfc_kill_board(phba, pmb); 2101 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2102 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2103 2104 if (retval != MBX_SUCCESS) { 2105 if (retval != MBX_BUSY) 2106 mempool_free(pmb, phba->mbox_mem_pool); 2107 spin_lock_irq(&phba->hbalock); 2108 phba->link_flag &= ~LS_IGNORE_ERATT; 2109 spin_unlock_irq(&phba->hbalock); 2110 return 1; 2111 } 2112 2113 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2114 2115 mempool_free(pmb, phba->mbox_mem_pool); 2116 2117 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 2118 * attention every 100ms for 3 seconds. If we don't get ERATT after 2119 * 3 seconds we still set HBA_ERROR state because the status of the 2120 * board is now undefined. 2121 */ 2122 ha_copy = readl(phba->HAregaddr); 2123 2124 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 2125 mdelay(100); 2126 ha_copy = readl(phba->HAregaddr); 2127 } 2128 2129 del_timer_sync(&psli->mbox_tmo); 2130 if (ha_copy & HA_ERATT) { 2131 writel(HA_ERATT, phba->HAregaddr); 2132 phba->pport->stopped = 1; 2133 } 2134 spin_lock_irq(&phba->hbalock); 2135 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2136 phba->link_flag &= ~LS_IGNORE_ERATT; 2137 spin_unlock_irq(&phba->hbalock); 2138 2139 psli->mbox_active = NULL; 2140 lpfc_hba_down_post(phba); 2141 phba->link_state = LPFC_HBA_ERROR; 2142 2143 return ha_copy & HA_ERATT ? 0 : 1; 2144 } 2145 2146 int 2147 lpfc_sli_brdreset(struct lpfc_hba *phba) 2148 { 2149 struct lpfc_sli *psli; 2150 struct lpfc_sli_ring *pring; 2151 uint16_t cfg_value; 2152 int i; 2153 2154 psli = &phba->sli; 2155 2156 /* Reset HBA */ 2157 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2158 "0325 Reset HBA Data: x%x x%x\n", 2159 phba->pport->port_state, psli->sli_flag); 2160 2161 /* perform board reset */ 2162 phba->fc_eventTag = 0; 2163 phba->pport->fc_myDID = 0; 2164 phba->pport->fc_prevDID = 0; 2165 2166 /* Turn off parity checking and serr during the physical reset */ 2167 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 2168 pci_write_config_word(phba->pcidev, PCI_COMMAND, 2169 (cfg_value & 2170 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 2171 2172 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 2173 /* Now toggle INITFF bit in the Host Control Register */ 2174 writel(HC_INITFF, phba->HCregaddr); 2175 mdelay(1); 2176 readl(phba->HCregaddr); /* flush */ 2177 writel(0, phba->HCregaddr); 2178 readl(phba->HCregaddr); /* flush */ 2179 2180 /* Restore PCI cmd register */ 2181 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 2182 2183 /* Initialize relevant SLI info */ 2184 for (i = 0; i < psli->num_rings; i++) { 2185 pring = &psli->ring[i]; 2186 pring->flag = 0; 2187 pring->rspidx = 0; 2188 pring->next_cmdidx = 0; 2189 pring->local_getidx = 0; 2190 pring->cmdidx = 0; 2191 pring->missbufcnt = 0; 2192 } 2193 2194 phba->link_state = LPFC_WARM_START; 2195 return 0; 2196 } 2197 2198 int 2199 lpfc_sli_brdrestart(struct lpfc_hba *phba) 2200 { 2201 MAILBOX_t *mb; 2202 struct lpfc_sli *psli; 2203 uint16_t skip_post; 2204 volatile uint32_t word0; 2205 void __iomem *to_slim; 2206 2207 spin_lock_irq(&phba->hbalock); 2208 2209 psli = &phba->sli; 2210 2211 /* Restart HBA */ 2212 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2213 "0337 Restart HBA Data: x%x x%x\n", 2214 phba->pport->port_state, psli->sli_flag); 2215 2216 word0 = 0; 2217 mb = (MAILBOX_t *) &word0; 2218 mb->mbxCommand = MBX_RESTART; 2219 mb->mbxHc = 1; 2220 2221 lpfc_reset_barrier(phba); 2222 2223 to_slim = phba->MBslimaddr; 2224 writel(*(uint32_t *) mb, to_slim); 2225 readl(to_slim); /* flush */ 2226 2227 /* Only skip post after fc_ffinit is completed */ 2228 if (phba->pport->port_state) { 2229 skip_post = 1; 2230 word0 = 1; /* This is really setting up word1 */ 2231 } else { 2232 skip_post = 0; 2233 word0 = 0; /* This is really setting up word1 */ 2234 } 2235 to_slim = phba->MBslimaddr + sizeof (uint32_t); 2236 writel(*(uint32_t *) mb, to_slim); 2237 readl(to_slim); /* flush */ 2238 2239 lpfc_sli_brdreset(phba); 2240 phba->pport->stopped = 0; 2241 phba->link_state = LPFC_INIT_START; 2242 2243 spin_unlock_irq(&phba->hbalock); 2244 2245 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 2246 psli->stats_start = get_seconds(); 2247 2248 if (skip_post) 2249 mdelay(100); 2250 else 2251 mdelay(2000); 2252 2253 lpfc_hba_down_post(phba); 2254 2255 return 0; 2256 } 2257 2258 static int 2259 lpfc_sli_chipset_init(struct lpfc_hba *phba) 2260 { 2261 uint32_t status, i = 0; 2262 2263 /* Read the HBA Host Status Register */ 2264 status = readl(phba->HSregaddr); 2265 2266 /* Check status register to see what current state is */ 2267 i = 0; 2268 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 2269 2270 /* Check every 100ms for 5 retries, then every 500ms for 5, then 2271 * every 2.5 sec for 5, then reset board and every 2.5 sec for 2272 * 4. 2273 */ 2274 if (i++ >= 20) { 2275 /* Adapter failed to init, timeout, status reg 2276 <status> */ 2277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2278 "0436 Adapter failed to init, " 2279 "timeout, status reg x%x, " 2280 "FW Data: A8 x%x AC x%x\n", status, 2281 readl(phba->MBslimaddr + 0xa8), 2282 readl(phba->MBslimaddr + 0xac)); 2283 phba->link_state = LPFC_HBA_ERROR; 2284 return -ETIMEDOUT; 2285 } 2286 2287 /* Check to see if any errors occurred during init */ 2288 if (status & HS_FFERM) { 2289 /* ERROR: During chipset initialization */ 2290 /* Adapter failed to init, chipset, status reg 2291 <status> */ 2292 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2293 "0437 Adapter failed to init, " 2294 "chipset, status reg x%x, " 2295 "FW Data: A8 x%x AC x%x\n", status, 2296 readl(phba->MBslimaddr + 0xa8), 2297 readl(phba->MBslimaddr + 0xac)); 2298 phba->link_state = LPFC_HBA_ERROR; 2299 return -EIO; 2300 } 2301 2302 if (i <= 5) { 2303 msleep(10); 2304 } else if (i <= 10) { 2305 msleep(500); 2306 } else { 2307 msleep(2500); 2308 } 2309 2310 if (i == 15) { 2311 /* Do post */ 2312 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 2313 lpfc_sli_brdrestart(phba); 2314 } 2315 /* Read the HBA Host Status Register */ 2316 status = readl(phba->HSregaddr); 2317 } 2318 2319 /* Check to see if any errors occurred during init */ 2320 if (status & HS_FFERM) { 2321 /* ERROR: During chipset initialization */ 2322 /* Adapter failed to init, chipset, status reg <status> */ 2323 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2324 "0438 Adapter failed to init, chipset, " 2325 "status reg x%x, " 2326 "FW Data: A8 x%x AC x%x\n", status, 2327 readl(phba->MBslimaddr + 0xa8), 2328 readl(phba->MBslimaddr + 0xac)); 2329 phba->link_state = LPFC_HBA_ERROR; 2330 return -EIO; 2331 } 2332 2333 /* Clear all interrupt enable conditions */ 2334 writel(0, phba->HCregaddr); 2335 readl(phba->HCregaddr); /* flush */ 2336 2337 /* setup host attn register */ 2338 writel(0xffffffff, phba->HAregaddr); 2339 readl(phba->HAregaddr); /* flush */ 2340 return 0; 2341 } 2342 2343 int 2344 lpfc_sli_hbq_count(void) 2345 { 2346 return ARRAY_SIZE(lpfc_hbq_defs); 2347 } 2348 2349 static int 2350 lpfc_sli_hbq_entry_count(void) 2351 { 2352 int hbq_count = lpfc_sli_hbq_count(); 2353 int count = 0; 2354 int i; 2355 2356 for (i = 0; i < hbq_count; ++i) 2357 count += lpfc_hbq_defs[i]->entry_count; 2358 return count; 2359 } 2360 2361 int 2362 lpfc_sli_hbq_size(void) 2363 { 2364 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 2365 } 2366 2367 static int 2368 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 2369 { 2370 int hbq_count = lpfc_sli_hbq_count(); 2371 LPFC_MBOXQ_t *pmb; 2372 MAILBOX_t *pmbox; 2373 uint32_t hbqno; 2374 uint32_t hbq_entry_index; 2375 2376 /* Get a Mailbox buffer to setup mailbox 2377 * commands for HBA initialization 2378 */ 2379 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2380 2381 if (!pmb) 2382 return -ENOMEM; 2383 2384 pmbox = &pmb->mb; 2385 2386 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 2387 phba->link_state = LPFC_INIT_MBX_CMDS; 2388 phba->hbq_in_use = 1; 2389 2390 hbq_entry_index = 0; 2391 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 2392 phba->hbqs[hbqno].next_hbqPutIdx = 0; 2393 phba->hbqs[hbqno].hbqPutIdx = 0; 2394 phba->hbqs[hbqno].local_hbqGetIdx = 0; 2395 phba->hbqs[hbqno].entry_count = 2396 lpfc_hbq_defs[hbqno]->entry_count; 2397 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 2398 hbq_entry_index, pmb); 2399 hbq_entry_index += phba->hbqs[hbqno].entry_count; 2400 2401 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 2402 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 2403 mbxStatus <status>, ring <num> */ 2404 2405 lpfc_printf_log(phba, KERN_ERR, 2406 LOG_SLI | LOG_VPORT, 2407 "1805 Adapter failed to init. " 2408 "Data: x%x x%x x%x\n", 2409 pmbox->mbxCommand, 2410 pmbox->mbxStatus, hbqno); 2411 2412 phba->link_state = LPFC_HBA_ERROR; 2413 mempool_free(pmb, phba->mbox_mem_pool); 2414 return ENXIO; 2415 } 2416 } 2417 phba->hbq_count = hbq_count; 2418 2419 mempool_free(pmb, phba->mbox_mem_pool); 2420 2421 /* Initially populate or replenish the HBQs */ 2422 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 2423 if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno)) 2424 return -ENOMEM; 2425 } 2426 return 0; 2427 } 2428 2429 static int 2430 lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode) 2431 { 2432 LPFC_MBOXQ_t *pmb; 2433 uint32_t resetcount = 0, rc = 0, done = 0; 2434 2435 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2436 if (!pmb) { 2437 phba->link_state = LPFC_HBA_ERROR; 2438 return -ENOMEM; 2439 } 2440 2441 phba->sli_rev = sli_mode; 2442 while (resetcount < 2 && !done) { 2443 spin_lock_irq(&phba->hbalock); 2444 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2445 spin_unlock_irq(&phba->hbalock); 2446 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 2447 lpfc_sli_brdrestart(phba); 2448 msleep(2500); 2449 rc = lpfc_sli_chipset_init(phba); 2450 if (rc) 2451 break; 2452 2453 spin_lock_irq(&phba->hbalock); 2454 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2455 spin_unlock_irq(&phba->hbalock); 2456 resetcount++; 2457 2458 /* Call pre CONFIG_PORT mailbox command initialization. A 2459 * value of 0 means the call was successful. Any other 2460 * nonzero value is a failure, but if ERESTART is returned, 2461 * the driver may reset the HBA and try again. 2462 */ 2463 rc = lpfc_config_port_prep(phba); 2464 if (rc == -ERESTART) { 2465 phba->link_state = LPFC_LINK_UNKNOWN; 2466 continue; 2467 } else if (rc) { 2468 break; 2469 } 2470 2471 phba->link_state = LPFC_INIT_MBX_CMDS; 2472 lpfc_config_port(phba, pmb); 2473 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 2474 if (rc != MBX_SUCCESS) { 2475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2476 "0442 Adapter failed to init, mbxCmd x%x " 2477 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 2478 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); 2479 spin_lock_irq(&phba->hbalock); 2480 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 2481 spin_unlock_irq(&phba->hbalock); 2482 rc = -ENXIO; 2483 } else { 2484 done = 1; 2485 phba->max_vpi = (phba->max_vpi && 2486 pmb->mb.un.varCfgPort.gmv) != 0 2487 ? pmb->mb.un.varCfgPort.max_vpi 2488 : 0; 2489 } 2490 } 2491 2492 if (!done) { 2493 rc = -EINVAL; 2494 goto do_prep_failed; 2495 } 2496 2497 if ((pmb->mb.un.varCfgPort.sli_mode == 3) && 2498 (!pmb->mb.un.varCfgPort.cMA)) { 2499 rc = -ENXIO; 2500 } 2501 2502 do_prep_failed: 2503 mempool_free(pmb, phba->mbox_mem_pool); 2504 return rc; 2505 } 2506 2507 int 2508 lpfc_sli_hba_setup(struct lpfc_hba *phba) 2509 { 2510 uint32_t rc; 2511 int mode = 3; 2512 2513 switch (lpfc_sli_mode) { 2514 case 2: 2515 if (phba->cfg_enable_npiv) { 2516 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 2517 "1824 NPIV enabled: Override lpfc_sli_mode " 2518 "parameter (%d) to auto (0).\n", 2519 lpfc_sli_mode); 2520 break; 2521 } 2522 mode = 2; 2523 break; 2524 case 0: 2525 case 3: 2526 break; 2527 default: 2528 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 2529 "1819 Unrecognized lpfc_sli_mode " 2530 "parameter: %d.\n", lpfc_sli_mode); 2531 2532 break; 2533 } 2534 2535 rc = lpfc_do_config_port(phba, mode); 2536 if (rc && lpfc_sli_mode == 3) 2537 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 2538 "1820 Unable to select SLI-3. " 2539 "Not supported by adapter.\n"); 2540 if (rc && mode != 2) 2541 rc = lpfc_do_config_port(phba, 2); 2542 if (rc) 2543 goto lpfc_sli_hba_setup_error; 2544 2545 if (phba->sli_rev == 3) { 2546 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 2547 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 2548 phba->sli3_options |= LPFC_SLI3_ENABLED; 2549 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 2550 2551 } else { 2552 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 2553 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 2554 phba->sli3_options = 0; 2555 } 2556 2557 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2558 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 2559 phba->sli_rev, phba->max_vpi); 2560 rc = lpfc_sli_ring_map(phba); 2561 2562 if (rc) 2563 goto lpfc_sli_hba_setup_error; 2564 2565 /* Init HBQs */ 2566 2567 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2568 rc = lpfc_sli_hbq_setup(phba); 2569 if (rc) 2570 goto lpfc_sli_hba_setup_error; 2571 } 2572 2573 phba->sli.sli_flag |= LPFC_PROCESS_LA; 2574 2575 rc = lpfc_config_port_post(phba); 2576 if (rc) 2577 goto lpfc_sli_hba_setup_error; 2578 2579 return rc; 2580 2581 lpfc_sli_hba_setup_error: 2582 phba->link_state = LPFC_HBA_ERROR; 2583 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 2584 "0445 Firmware initialization failed\n"); 2585 return rc; 2586 } 2587 2588 /*! lpfc_mbox_timeout 2589 * 2590 * \pre 2591 * \post 2592 * \param hba Pointer to per struct lpfc_hba structure 2593 * \param l1 Pointer to the driver's mailbox queue. 2594 * \return 2595 * void 2596 * 2597 * \b Description: 2598 * 2599 * This routine handles mailbox timeout events at timer interrupt context. 2600 */ 2601 void 2602 lpfc_mbox_timeout(unsigned long ptr) 2603 { 2604 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 2605 unsigned long iflag; 2606 uint32_t tmo_posted; 2607 2608 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 2609 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 2610 if (!tmo_posted) 2611 phba->pport->work_port_events |= WORKER_MBOX_TMO; 2612 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 2613 2614 if (!tmo_posted) { 2615 spin_lock_irqsave(&phba->hbalock, iflag); 2616 if (phba->work_wait) 2617 lpfc_worker_wake_up(phba); 2618 spin_unlock_irqrestore(&phba->hbalock, iflag); 2619 } 2620 } 2621 2622 void 2623 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 2624 { 2625 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 2626 MAILBOX_t *mb = &pmbox->mb; 2627 struct lpfc_sli *psli = &phba->sli; 2628 struct lpfc_sli_ring *pring; 2629 2630 if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) { 2631 return; 2632 } 2633 2634 /* Mbox cmd <mbxCommand> timeout */ 2635 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2636 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 2637 mb->mbxCommand, 2638 phba->pport->port_state, 2639 phba->sli.sli_flag, 2640 phba->sli.mbox_active); 2641 2642 /* Setting state unknown so lpfc_sli_abort_iocb_ring 2643 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 2644 * it to fail all oustanding SCSI IO. 2645 */ 2646 spin_lock_irq(&phba->pport->work_port_lock); 2647 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 2648 spin_unlock_irq(&phba->pport->work_port_lock); 2649 spin_lock_irq(&phba->hbalock); 2650 phba->link_state = LPFC_LINK_UNKNOWN; 2651 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2652 spin_unlock_irq(&phba->hbalock); 2653 2654 pring = &psli->ring[psli->fcp_ring]; 2655 lpfc_sli_abort_iocb_ring(phba, pring); 2656 2657 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2658 "0345 Resetting board due to mailbox timeout\n"); 2659 /* 2660 * lpfc_offline calls lpfc_sli_hba_down which will clean up 2661 * on oustanding mailbox commands. 2662 */ 2663 /* If resets are disabled then set error state and return. */ 2664 if (!phba->cfg_enable_hba_reset) { 2665 phba->link_state = LPFC_HBA_ERROR; 2666 return; 2667 } 2668 lpfc_offline_prep(phba); 2669 lpfc_offline(phba); 2670 lpfc_sli_brdrestart(phba); 2671 lpfc_online(phba); 2672 lpfc_unblock_mgmt_io(phba); 2673 return; 2674 } 2675 2676 int 2677 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 2678 { 2679 MAILBOX_t *mb; 2680 struct lpfc_sli *psli = &phba->sli; 2681 uint32_t status, evtctr; 2682 uint32_t ha_copy; 2683 int i; 2684 unsigned long timeout; 2685 unsigned long drvr_flag = 0; 2686 volatile uint32_t word0, ldata; 2687 void __iomem *to_slim; 2688 int processing_queue = 0; 2689 2690 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2691 if (!pmbox) { 2692 /* processing mbox queue from intr_handler */ 2693 processing_queue = 1; 2694 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2695 pmbox = lpfc_mbox_get(phba); 2696 if (!pmbox) { 2697 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2698 return MBX_SUCCESS; 2699 } 2700 } 2701 2702 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 2703 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 2704 if(!pmbox->vport) { 2705 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2706 lpfc_printf_log(phba, KERN_ERR, 2707 LOG_MBOX | LOG_VPORT, 2708 "1806 Mbox x%x failed. No vport\n", 2709 pmbox->mb.mbxCommand); 2710 dump_stack(); 2711 goto out_not_finished; 2712 } 2713 } 2714 2715 /* If the PCI channel is in offline state, do not post mbox. */ 2716 if (unlikely(pci_channel_offline(phba->pcidev))) { 2717 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2718 goto out_not_finished; 2719 } 2720 2721 psli = &phba->sli; 2722 2723 mb = &pmbox->mb; 2724 status = MBX_SUCCESS; 2725 2726 if (phba->link_state == LPFC_HBA_ERROR) { 2727 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2728 2729 /* Mbox command <mbxCommand> cannot issue */ 2730 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2731 goto out_not_finished; 2732 } 2733 2734 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 2735 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 2736 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2737 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2738 goto out_not_finished; 2739 } 2740 2741 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 2742 /* Polling for a mbox command when another one is already active 2743 * is not allowed in SLI. Also, the driver must have established 2744 * SLI2 mode to queue and process multiple mbox commands. 2745 */ 2746 2747 if (flag & MBX_POLL) { 2748 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2749 2750 /* Mbox command <mbxCommand> cannot issue */ 2751 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2752 goto out_not_finished; 2753 } 2754 2755 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 2756 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2757 /* Mbox command <mbxCommand> cannot issue */ 2758 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2759 goto out_not_finished; 2760 } 2761 2762 /* Another mailbox command is still being processed, queue this 2763 * command to be processed later. 2764 */ 2765 lpfc_mbox_put(phba, pmbox); 2766 2767 /* Mbox cmd issue - BUSY */ 2768 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2769 "(%d):0308 Mbox cmd issue - BUSY Data: " 2770 "x%x x%x x%x x%x\n", 2771 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 2772 mb->mbxCommand, phba->pport->port_state, 2773 psli->sli_flag, flag); 2774 2775 psli->slistat.mbox_busy++; 2776 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2777 2778 if (pmbox->vport) { 2779 lpfc_debugfs_disc_trc(pmbox->vport, 2780 LPFC_DISC_TRC_MBOX_VPORT, 2781 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 2782 (uint32_t)mb->mbxCommand, 2783 mb->un.varWords[0], mb->un.varWords[1]); 2784 } 2785 else { 2786 lpfc_debugfs_disc_trc(phba->pport, 2787 LPFC_DISC_TRC_MBOX, 2788 "MBOX Bsy: cmd:x%x mb:x%x x%x", 2789 (uint32_t)mb->mbxCommand, 2790 mb->un.varWords[0], mb->un.varWords[1]); 2791 } 2792 2793 return MBX_BUSY; 2794 } 2795 2796 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 2797 2798 /* If we are not polling, we MUST be in SLI2 mode */ 2799 if (flag != MBX_POLL) { 2800 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 2801 (mb->mbxCommand != MBX_KILL_BOARD)) { 2802 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2803 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2804 /* Mbox command <mbxCommand> cannot issue */ 2805 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 2806 goto out_not_finished; 2807 } 2808 /* timeout active mbox command */ 2809 mod_timer(&psli->mbox_tmo, (jiffies + 2810 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); 2811 } 2812 2813 /* Mailbox cmd <cmd> issue */ 2814 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2815 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 2816 "x%x\n", 2817 pmbox->vport ? pmbox->vport->vpi : 0, 2818 mb->mbxCommand, phba->pport->port_state, 2819 psli->sli_flag, flag); 2820 2821 if (mb->mbxCommand != MBX_HEARTBEAT) { 2822 if (pmbox->vport) { 2823 lpfc_debugfs_disc_trc(pmbox->vport, 2824 LPFC_DISC_TRC_MBOX_VPORT, 2825 "MBOX Send vport: cmd:x%x mb:x%x x%x", 2826 (uint32_t)mb->mbxCommand, 2827 mb->un.varWords[0], mb->un.varWords[1]); 2828 } 2829 else { 2830 lpfc_debugfs_disc_trc(phba->pport, 2831 LPFC_DISC_TRC_MBOX, 2832 "MBOX Send: cmd:x%x mb:x%x x%x", 2833 (uint32_t)mb->mbxCommand, 2834 mb->un.varWords[0], mb->un.varWords[1]); 2835 } 2836 } 2837 2838 psli->slistat.mbox_cmd++; 2839 evtctr = psli->slistat.mbox_event; 2840 2841 /* next set own bit for the adapter and copy over command word */ 2842 mb->mbxOwner = OWN_CHIP; 2843 2844 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2845 /* First copy command data to host SLIM area */ 2846 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE); 2847 } else { 2848 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2849 /* copy command data into host mbox for cmpl */ 2850 lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, 2851 MAILBOX_CMD_SIZE); 2852 } 2853 2854 /* First copy mbox command data to HBA SLIM, skip past first 2855 word */ 2856 to_slim = phba->MBslimaddr + sizeof (uint32_t); 2857 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 2858 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 2859 2860 /* Next copy over first word, with mbxOwner set */ 2861 ldata = *((volatile uint32_t *)mb); 2862 to_slim = phba->MBslimaddr; 2863 writel(ldata, to_slim); 2864 readl(to_slim); /* flush */ 2865 2866 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2867 /* switch over to host mailbox */ 2868 psli->sli_flag |= LPFC_SLI2_ACTIVE; 2869 } 2870 } 2871 2872 wmb(); 2873 2874 switch (flag) { 2875 case MBX_NOWAIT: 2876 /* Set up reference to mailbox command */ 2877 psli->mbox_active = pmbox; 2878 /* Interrupt board to do it */ 2879 writel(CA_MBATT, phba->CAregaddr); 2880 readl(phba->CAregaddr); /* flush */ 2881 /* Don't wait for it to finish, just return */ 2882 break; 2883 2884 case MBX_POLL: 2885 /* Set up null reference to mailbox command */ 2886 psli->mbox_active = NULL; 2887 /* Interrupt board to do it */ 2888 writel(CA_MBATT, phba->CAregaddr); 2889 readl(phba->CAregaddr); /* flush */ 2890 2891 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2892 /* First read mbox status word */ 2893 word0 = *((volatile uint32_t *)&phba->slim2p->mbx); 2894 word0 = le32_to_cpu(word0); 2895 } else { 2896 /* First read mbox status word */ 2897 word0 = readl(phba->MBslimaddr); 2898 } 2899 2900 /* Read the HBA Host Attention Register */ 2901 ha_copy = readl(phba->HAregaddr); 2902 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 2903 mb->mbxCommand) * 2904 1000) + jiffies; 2905 i = 0; 2906 /* Wait for command to complete */ 2907 while (((word0 & OWN_CHIP) == OWN_CHIP) || 2908 (!(ha_copy & HA_MBATT) && 2909 (phba->link_state > LPFC_WARM_START))) { 2910 if (time_after(jiffies, timeout)) { 2911 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2912 spin_unlock_irqrestore(&phba->hbalock, 2913 drvr_flag); 2914 goto out_not_finished; 2915 } 2916 2917 /* Check if we took a mbox interrupt while we were 2918 polling */ 2919 if (((word0 & OWN_CHIP) != OWN_CHIP) 2920 && (evtctr != psli->slistat.mbox_event)) 2921 break; 2922 2923 if (i++ > 10) { 2924 spin_unlock_irqrestore(&phba->hbalock, 2925 drvr_flag); 2926 msleep(1); 2927 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2928 } 2929 2930 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2931 /* First copy command data */ 2932 word0 = *((volatile uint32_t *) 2933 &phba->slim2p->mbx); 2934 word0 = le32_to_cpu(word0); 2935 if (mb->mbxCommand == MBX_CONFIG_PORT) { 2936 MAILBOX_t *slimmb; 2937 volatile uint32_t slimword0; 2938 /* Check real SLIM for any errors */ 2939 slimword0 = readl(phba->MBslimaddr); 2940 slimmb = (MAILBOX_t *) & slimword0; 2941 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 2942 && slimmb->mbxStatus) { 2943 psli->sli_flag &= 2944 ~LPFC_SLI2_ACTIVE; 2945 word0 = slimword0; 2946 } 2947 } 2948 } else { 2949 /* First copy command data */ 2950 word0 = readl(phba->MBslimaddr); 2951 } 2952 /* Read the HBA Host Attention Register */ 2953 ha_copy = readl(phba->HAregaddr); 2954 } 2955 2956 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 2957 /* copy results back to user */ 2958 lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb, 2959 MAILBOX_CMD_SIZE); 2960 } else { 2961 /* First copy command data */ 2962 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 2963 MAILBOX_CMD_SIZE); 2964 if ((mb->mbxCommand == MBX_DUMP_MEMORY) && 2965 pmbox->context2) { 2966 lpfc_memcpy_from_slim((void *)pmbox->context2, 2967 phba->MBslimaddr + DMP_RSP_OFFSET, 2968 mb->un.varDmp.word_cnt); 2969 } 2970 } 2971 2972 writel(HA_MBATT, phba->HAregaddr); 2973 readl(phba->HAregaddr); /* flush */ 2974 2975 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2976 status = mb->mbxStatus; 2977 } 2978 2979 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2980 return status; 2981 2982 out_not_finished: 2983 if (processing_queue) { 2984 pmbox->mb.mbxStatus = MBX_NOT_FINISHED; 2985 lpfc_mbox_cmpl_put(phba, pmbox); 2986 } 2987 return MBX_NOT_FINISHED; 2988 } 2989 2990 /* 2991 * Caller needs to hold lock. 2992 */ 2993 static void 2994 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2995 struct lpfc_iocbq *piocb) 2996 { 2997 /* Insert the caller's iocb in the txq tail for later processing. */ 2998 list_add_tail(&piocb->list, &pring->txq); 2999 pring->txq_cnt++; 3000 } 3001 3002 static struct lpfc_iocbq * 3003 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3004 struct lpfc_iocbq **piocb) 3005 { 3006 struct lpfc_iocbq * nextiocb; 3007 3008 nextiocb = lpfc_sli_ringtx_get(phba, pring); 3009 if (!nextiocb) { 3010 nextiocb = *piocb; 3011 *piocb = NULL; 3012 } 3013 3014 return nextiocb; 3015 } 3016 3017 /* 3018 * Lockless version of lpfc_sli_issue_iocb. 3019 */ 3020 static int 3021 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3022 struct lpfc_iocbq *piocb, uint32_t flag) 3023 { 3024 struct lpfc_iocbq *nextiocb; 3025 IOCB_t *iocb; 3026 3027 if (piocb->iocb_cmpl && (!piocb->vport) && 3028 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 3029 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 3030 lpfc_printf_log(phba, KERN_ERR, 3031 LOG_SLI | LOG_VPORT, 3032 "1807 IOCB x%x failed. No vport\n", 3033 piocb->iocb.ulpCommand); 3034 dump_stack(); 3035 return IOCB_ERROR; 3036 } 3037 3038 3039 /* If the PCI channel is in offline state, do not post iocbs. */ 3040 if (unlikely(pci_channel_offline(phba->pcidev))) 3041 return IOCB_ERROR; 3042 3043 /* 3044 * We should never get an IOCB if we are in a < LINK_DOWN state 3045 */ 3046 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 3047 return IOCB_ERROR; 3048 3049 /* 3050 * Check to see if we are blocking IOCB processing because of a 3051 * outstanding event. 3052 */ 3053 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 3054 goto iocb_busy; 3055 3056 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 3057 /* 3058 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 3059 * can be issued if the link is not up. 3060 */ 3061 switch (piocb->iocb.ulpCommand) { 3062 case CMD_QUE_RING_BUF_CN: 3063 case CMD_QUE_RING_BUF64_CN: 3064 /* 3065 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 3066 * completion, iocb_cmpl MUST be 0. 3067 */ 3068 if (piocb->iocb_cmpl) 3069 piocb->iocb_cmpl = NULL; 3070 /*FALLTHROUGH*/ 3071 case CMD_CREATE_XRI_CR: 3072 case CMD_CLOSE_XRI_CN: 3073 case CMD_CLOSE_XRI_CX: 3074 break; 3075 default: 3076 goto iocb_busy; 3077 } 3078 3079 /* 3080 * For FCP commands, we must be in a state where we can process link 3081 * attention events. 3082 */ 3083 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 3084 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 3085 goto iocb_busy; 3086 } 3087 3088 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 3089 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 3090 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 3091 3092 if (iocb) 3093 lpfc_sli_update_ring(phba, pring); 3094 else 3095 lpfc_sli_update_full_ring(phba, pring); 3096 3097 if (!piocb) 3098 return IOCB_SUCCESS; 3099 3100 goto out_busy; 3101 3102 iocb_busy: 3103 pring->stats.iocb_cmd_delay++; 3104 3105 out_busy: 3106 3107 if (!(flag & SLI_IOCB_RET_IOCB)) { 3108 __lpfc_sli_ringtx_put(phba, pring, piocb); 3109 return IOCB_SUCCESS; 3110 } 3111 3112 return IOCB_BUSY; 3113 } 3114 3115 3116 int 3117 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3118 struct lpfc_iocbq *piocb, uint32_t flag) 3119 { 3120 unsigned long iflags; 3121 int rc; 3122 3123 spin_lock_irqsave(&phba->hbalock, iflags); 3124 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); 3125 spin_unlock_irqrestore(&phba->hbalock, iflags); 3126 3127 return rc; 3128 } 3129 3130 static int 3131 lpfc_extra_ring_setup( struct lpfc_hba *phba) 3132 { 3133 struct lpfc_sli *psli; 3134 struct lpfc_sli_ring *pring; 3135 3136 psli = &phba->sli; 3137 3138 /* Adjust cmd/rsp ring iocb entries more evenly */ 3139 3140 /* Take some away from the FCP ring */ 3141 pring = &psli->ring[psli->fcp_ring]; 3142 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 3143 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3144 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3145 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3146 3147 /* and give them to the extra ring */ 3148 pring = &psli->ring[psli->extra_ring]; 3149 3150 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 3151 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3152 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3153 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3154 3155 /* Setup default profile for this ring */ 3156 pring->iotag_max = 4096; 3157 pring->num_mask = 1; 3158 pring->prt[0].profile = 0; /* Mask 0 */ 3159 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 3160 pring->prt[0].type = phba->cfg_multi_ring_type; 3161 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 3162 return 0; 3163 } 3164 3165 static void 3166 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 3167 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 3168 { 3169 IOCB_t *icmd; 3170 uint16_t evt_code; 3171 uint16_t temp; 3172 struct temp_event temp_event_data; 3173 struct Scsi_Host *shost; 3174 3175 icmd = &iocbq->iocb; 3176 evt_code = icmd->un.asyncstat.evt_code; 3177 temp = icmd->ulpContext; 3178 3179 if ((evt_code != ASYNC_TEMP_WARN) && 3180 (evt_code != ASYNC_TEMP_SAFE)) { 3181 lpfc_printf_log(phba, 3182 KERN_ERR, 3183 LOG_SLI, 3184 "0346 Ring %d handler: unexpected ASYNC_STATUS" 3185 " evt_code 0x%x\n", 3186 pring->ringno, 3187 icmd->un.asyncstat.evt_code); 3188 return; 3189 } 3190 temp_event_data.data = (uint32_t)temp; 3191 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 3192 if (evt_code == ASYNC_TEMP_WARN) { 3193 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 3194 lpfc_printf_log(phba, 3195 KERN_ERR, 3196 LOG_TEMP, 3197 "0347 Adapter is very hot, please take " 3198 "corrective action. temperature : %d Celsius\n", 3199 temp); 3200 } 3201 if (evt_code == ASYNC_TEMP_SAFE) { 3202 temp_event_data.event_code = LPFC_NORMAL_TEMP; 3203 lpfc_printf_log(phba, 3204 KERN_ERR, 3205 LOG_TEMP, 3206 "0340 Adapter temperature is OK now. " 3207 "temperature : %d Celsius\n", 3208 temp); 3209 } 3210 3211 /* Send temperature change event to applications */ 3212 shost = lpfc_shost_from_vport(phba->pport); 3213 fc_host_post_vendor_event(shost, fc_get_event_number(), 3214 sizeof(temp_event_data), (char *) &temp_event_data, 3215 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); 3216 3217 } 3218 3219 3220 int 3221 lpfc_sli_setup(struct lpfc_hba *phba) 3222 { 3223 int i, totiocbsize = 0; 3224 struct lpfc_sli *psli = &phba->sli; 3225 struct lpfc_sli_ring *pring; 3226 3227 psli->num_rings = MAX_CONFIGURED_RINGS; 3228 psli->sli_flag = 0; 3229 psli->fcp_ring = LPFC_FCP_RING; 3230 psli->next_ring = LPFC_FCP_NEXT_RING; 3231 psli->extra_ring = LPFC_EXTRA_RING; 3232 3233 psli->iocbq_lookup = NULL; 3234 psli->iocbq_lookup_len = 0; 3235 psli->last_iotag = 0; 3236 3237 for (i = 0; i < psli->num_rings; i++) { 3238 pring = &psli->ring[i]; 3239 switch (i) { 3240 case LPFC_FCP_RING: /* ring 0 - FCP */ 3241 /* numCiocb and numRiocb are used in config_port */ 3242 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 3243 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 3244 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 3245 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3246 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3247 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3248 pring->sizeCiocb = (phba->sli_rev == 3) ? 3249 SLI3_IOCB_CMD_SIZE : 3250 SLI2_IOCB_CMD_SIZE; 3251 pring->sizeRiocb = (phba->sli_rev == 3) ? 3252 SLI3_IOCB_RSP_SIZE : 3253 SLI2_IOCB_RSP_SIZE; 3254 pring->iotag_ctr = 0; 3255 pring->iotag_max = 3256 (phba->cfg_hba_queue_depth * 2); 3257 pring->fast_iotag = pring->iotag_max; 3258 pring->num_mask = 0; 3259 break; 3260 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 3261 /* numCiocb and numRiocb are used in config_port */ 3262 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 3263 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 3264 pring->sizeCiocb = (phba->sli_rev == 3) ? 3265 SLI3_IOCB_CMD_SIZE : 3266 SLI2_IOCB_CMD_SIZE; 3267 pring->sizeRiocb = (phba->sli_rev == 3) ? 3268 SLI3_IOCB_RSP_SIZE : 3269 SLI2_IOCB_RSP_SIZE; 3270 pring->iotag_max = phba->cfg_hba_queue_depth; 3271 pring->num_mask = 0; 3272 break; 3273 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 3274 /* numCiocb and numRiocb are used in config_port */ 3275 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 3276 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 3277 pring->sizeCiocb = (phba->sli_rev == 3) ? 3278 SLI3_IOCB_CMD_SIZE : 3279 SLI2_IOCB_CMD_SIZE; 3280 pring->sizeRiocb = (phba->sli_rev == 3) ? 3281 SLI3_IOCB_RSP_SIZE : 3282 SLI2_IOCB_RSP_SIZE; 3283 pring->fast_iotag = 0; 3284 pring->iotag_ctr = 0; 3285 pring->iotag_max = 4096; 3286 pring->lpfc_sli_rcv_async_status = 3287 lpfc_sli_async_event_handler; 3288 pring->num_mask = 4; 3289 pring->prt[0].profile = 0; /* Mask 0 */ 3290 pring->prt[0].rctl = FC_ELS_REQ; 3291 pring->prt[0].type = FC_ELS_DATA; 3292 pring->prt[0].lpfc_sli_rcv_unsol_event = 3293 lpfc_els_unsol_event; 3294 pring->prt[1].profile = 0; /* Mask 1 */ 3295 pring->prt[1].rctl = FC_ELS_RSP; 3296 pring->prt[1].type = FC_ELS_DATA; 3297 pring->prt[1].lpfc_sli_rcv_unsol_event = 3298 lpfc_els_unsol_event; 3299 pring->prt[2].profile = 0; /* Mask 2 */ 3300 /* NameServer Inquiry */ 3301 pring->prt[2].rctl = FC_UNSOL_CTL; 3302 /* NameServer */ 3303 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 3304 pring->prt[2].lpfc_sli_rcv_unsol_event = 3305 lpfc_ct_unsol_event; 3306 pring->prt[3].profile = 0; /* Mask 3 */ 3307 /* NameServer response */ 3308 pring->prt[3].rctl = FC_SOL_CTL; 3309 /* NameServer */ 3310 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 3311 pring->prt[3].lpfc_sli_rcv_unsol_event = 3312 lpfc_ct_unsol_event; 3313 break; 3314 } 3315 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 3316 (pring->numRiocb * pring->sizeRiocb); 3317 } 3318 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 3319 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 3320 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 3321 "SLI2 SLIM Data: x%x x%lx\n", 3322 phba->brd_no, totiocbsize, 3323 (unsigned long) MAX_SLIM_IOCB_SIZE); 3324 } 3325 if (phba->cfg_multi_ring_support == 2) 3326 lpfc_extra_ring_setup(phba); 3327 3328 return 0; 3329 } 3330 3331 int 3332 lpfc_sli_queue_setup(struct lpfc_hba *phba) 3333 { 3334 struct lpfc_sli *psli; 3335 struct lpfc_sli_ring *pring; 3336 int i; 3337 3338 psli = &phba->sli; 3339 spin_lock_irq(&phba->hbalock); 3340 INIT_LIST_HEAD(&psli->mboxq); 3341 INIT_LIST_HEAD(&psli->mboxq_cmpl); 3342 /* Initialize list headers for txq and txcmplq as double linked lists */ 3343 for (i = 0; i < psli->num_rings; i++) { 3344 pring = &psli->ring[i]; 3345 pring->ringno = i; 3346 pring->next_cmdidx = 0; 3347 pring->local_getidx = 0; 3348 pring->cmdidx = 0; 3349 INIT_LIST_HEAD(&pring->txq); 3350 INIT_LIST_HEAD(&pring->txcmplq); 3351 INIT_LIST_HEAD(&pring->iocb_continueq); 3352 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 3353 INIT_LIST_HEAD(&pring->postbufq); 3354 } 3355 spin_unlock_irq(&phba->hbalock); 3356 return 1; 3357 } 3358 3359 int 3360 lpfc_sli_host_down(struct lpfc_vport *vport) 3361 { 3362 LIST_HEAD(completions); 3363 struct lpfc_hba *phba = vport->phba; 3364 struct lpfc_sli *psli = &phba->sli; 3365 struct lpfc_sli_ring *pring; 3366 struct lpfc_iocbq *iocb, *next_iocb; 3367 int i; 3368 unsigned long flags = 0; 3369 uint16_t prev_pring_flag; 3370 3371 lpfc_cleanup_discovery_resources(vport); 3372 3373 spin_lock_irqsave(&phba->hbalock, flags); 3374 for (i = 0; i < psli->num_rings; i++) { 3375 pring = &psli->ring[i]; 3376 prev_pring_flag = pring->flag; 3377 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ 3378 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3379 /* 3380 * Error everything on the txq since these iocbs have not been 3381 * given to the FW yet. 3382 */ 3383 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 3384 if (iocb->vport != vport) 3385 continue; 3386 list_move_tail(&iocb->list, &completions); 3387 pring->txq_cnt--; 3388 } 3389 3390 /* Next issue ABTS for everything on the txcmplq */ 3391 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 3392 list) { 3393 if (iocb->vport != vport) 3394 continue; 3395 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3396 } 3397 3398 pring->flag = prev_pring_flag; 3399 } 3400 3401 spin_unlock_irqrestore(&phba->hbalock, flags); 3402 3403 while (!list_empty(&completions)) { 3404 list_remove_head(&completions, iocb, struct lpfc_iocbq, list); 3405 3406 if (!iocb->iocb_cmpl) 3407 lpfc_sli_release_iocbq(phba, iocb); 3408 else { 3409 iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 3410 iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN; 3411 (iocb->iocb_cmpl) (phba, iocb, iocb); 3412 } 3413 } 3414 return 1; 3415 } 3416 3417 int 3418 lpfc_sli_hba_down(struct lpfc_hba *phba) 3419 { 3420 LIST_HEAD(completions); 3421 struct lpfc_sli *psli = &phba->sli; 3422 struct lpfc_sli_ring *pring; 3423 struct lpfc_dmabuf *buf_ptr; 3424 LPFC_MBOXQ_t *pmb; 3425 struct lpfc_iocbq *iocb; 3426 IOCB_t *cmd = NULL; 3427 int i; 3428 unsigned long flags = 0; 3429 3430 lpfc_hba_down_prep(phba); 3431 3432 lpfc_fabric_abort_hba(phba); 3433 3434 spin_lock_irqsave(&phba->hbalock, flags); 3435 for (i = 0; i < psli->num_rings; i++) { 3436 pring = &psli->ring[i]; 3437 if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */ 3438 pring->flag |= LPFC_DEFERRED_RING_EVENT; 3439 3440 /* 3441 * Error everything on the txq since these iocbs have not been 3442 * given to the FW yet. 3443 */ 3444 list_splice_init(&pring->txq, &completions); 3445 pring->txq_cnt = 0; 3446 3447 } 3448 spin_unlock_irqrestore(&phba->hbalock, flags); 3449 3450 while (!list_empty(&completions)) { 3451 list_remove_head(&completions, iocb, struct lpfc_iocbq, list); 3452 cmd = &iocb->iocb; 3453 3454 if (!iocb->iocb_cmpl) 3455 lpfc_sli_release_iocbq(phba, iocb); 3456 else { 3457 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 3458 cmd->un.ulpWord[4] = IOERR_SLI_DOWN; 3459 (iocb->iocb_cmpl) (phba, iocb, iocb); 3460 } 3461 } 3462 3463 spin_lock_irqsave(&phba->hbalock, flags); 3464 list_splice_init(&phba->elsbuf, &completions); 3465 phba->elsbuf_cnt = 0; 3466 phba->elsbuf_prev_cnt = 0; 3467 spin_unlock_irqrestore(&phba->hbalock, flags); 3468 3469 while (!list_empty(&completions)) { 3470 list_remove_head(&completions, buf_ptr, 3471 struct lpfc_dmabuf, list); 3472 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 3473 kfree(buf_ptr); 3474 } 3475 3476 /* Return any active mbox cmds */ 3477 del_timer_sync(&psli->mbox_tmo); 3478 spin_lock_irqsave(&phba->hbalock, flags); 3479 3480 spin_lock(&phba->pport->work_port_lock); 3481 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 3482 spin_unlock(&phba->pport->work_port_lock); 3483 3484 /* Return any pending or completed mbox cmds */ 3485 list_splice_init(&phba->sli.mboxq, &completions); 3486 if (psli->mbox_active) { 3487 list_add_tail(&psli->mbox_active->list, &completions); 3488 psli->mbox_active = NULL; 3489 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3490 } 3491 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 3492 spin_unlock_irqrestore(&phba->hbalock, flags); 3493 3494 while (!list_empty(&completions)) { 3495 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 3496 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 3497 if (pmb->mbox_cmpl) 3498 pmb->mbox_cmpl(phba,pmb); 3499 } 3500 return 1; 3501 } 3502 3503 void 3504 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 3505 { 3506 uint32_t *src = srcp; 3507 uint32_t *dest = destp; 3508 uint32_t ldata; 3509 int i; 3510 3511 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 3512 ldata = *src; 3513 ldata = le32_to_cpu(ldata); 3514 *dest = ldata; 3515 src++; 3516 dest++; 3517 } 3518 } 3519 3520 int 3521 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3522 struct lpfc_dmabuf *mp) 3523 { 3524 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 3525 later */ 3526 spin_lock_irq(&phba->hbalock); 3527 list_add_tail(&mp->list, &pring->postbufq); 3528 pring->postbufq_cnt++; 3529 spin_unlock_irq(&phba->hbalock); 3530 return 0; 3531 } 3532 3533 uint32_t 3534 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 3535 { 3536 spin_lock_irq(&phba->hbalock); 3537 phba->buffer_tag_count++; 3538 /* 3539 * Always set the QUE_BUFTAG_BIT to distiguish between 3540 * a tag assigned by HBQ. 3541 */ 3542 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 3543 spin_unlock_irq(&phba->hbalock); 3544 return phba->buffer_tag_count; 3545 } 3546 3547 struct lpfc_dmabuf * 3548 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3549 uint32_t tag) 3550 { 3551 struct lpfc_dmabuf *mp, *next_mp; 3552 struct list_head *slp = &pring->postbufq; 3553 3554 /* Search postbufq, from the begining, looking for a match on tag */ 3555 spin_lock_irq(&phba->hbalock); 3556 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 3557 if (mp->buffer_tag == tag) { 3558 list_del_init(&mp->list); 3559 pring->postbufq_cnt--; 3560 spin_unlock_irq(&phba->hbalock); 3561 return mp; 3562 } 3563 } 3564 3565 spin_unlock_irq(&phba->hbalock); 3566 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3567 "0410 Cannot find virtual addr for buffer tag on " 3568 "ring %d Data x%lx x%p x%p x%x\n", 3569 pring->ringno, (unsigned long) tag, 3570 slp->next, slp->prev, pring->postbufq_cnt); 3571 3572 return NULL; 3573 } 3574 3575 struct lpfc_dmabuf * 3576 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3577 dma_addr_t phys) 3578 { 3579 struct lpfc_dmabuf *mp, *next_mp; 3580 struct list_head *slp = &pring->postbufq; 3581 3582 /* Search postbufq, from the begining, looking for a match on phys */ 3583 spin_lock_irq(&phba->hbalock); 3584 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 3585 if (mp->phys == phys) { 3586 list_del_init(&mp->list); 3587 pring->postbufq_cnt--; 3588 spin_unlock_irq(&phba->hbalock); 3589 return mp; 3590 } 3591 } 3592 3593 spin_unlock_irq(&phba->hbalock); 3594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3595 "0410 Cannot find virtual addr for mapped buf on " 3596 "ring %d Data x%llx x%p x%p x%x\n", 3597 pring->ringno, (unsigned long long)phys, 3598 slp->next, slp->prev, pring->postbufq_cnt); 3599 return NULL; 3600 } 3601 3602 static void 3603 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3604 struct lpfc_iocbq *rspiocb) 3605 { 3606 IOCB_t *irsp = &rspiocb->iocb; 3607 uint16_t abort_iotag, abort_context; 3608 struct lpfc_iocbq *abort_iocb; 3609 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 3610 3611 abort_iocb = NULL; 3612 3613 if (irsp->ulpStatus) { 3614 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 3615 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 3616 3617 spin_lock_irq(&phba->hbalock); 3618 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 3619 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 3620 3621 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, 3622 "0327 Cannot abort els iocb %p " 3623 "with tag %x context %x, abort status %x, " 3624 "abort code %x\n", 3625 abort_iocb, abort_iotag, abort_context, 3626 irsp->ulpStatus, irsp->un.ulpWord[4]); 3627 3628 /* 3629 * If the iocb is not found in Firmware queue the iocb 3630 * might have completed already. Do not free it again. 3631 */ 3632 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 3633 spin_unlock_irq(&phba->hbalock); 3634 lpfc_sli_release_iocbq(phba, cmdiocb); 3635 return; 3636 } 3637 /* 3638 * make sure we have the right iocbq before taking it 3639 * off the txcmplq and try to call completion routine. 3640 */ 3641 if (!abort_iocb || 3642 abort_iocb->iocb.ulpContext != abort_context || 3643 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 3644 spin_unlock_irq(&phba->hbalock); 3645 else { 3646 list_del_init(&abort_iocb->list); 3647 pring->txcmplq_cnt--; 3648 spin_unlock_irq(&phba->hbalock); 3649 3650 /* Firmware could still be in progress of DMAing 3651 * payload, so don't free data buffer till after 3652 * a hbeat. 3653 */ 3654 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 3655 3656 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3657 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 3658 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; 3659 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 3660 } 3661 } 3662 3663 lpfc_sli_release_iocbq(phba, cmdiocb); 3664 return; 3665 } 3666 3667 static void 3668 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3669 struct lpfc_iocbq *rspiocb) 3670 { 3671 IOCB_t *irsp = &rspiocb->iocb; 3672 3673 /* ELS cmd tag <ulpIoTag> completes */ 3674 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 3675 "0133 Ignoring ELS cmd tag x%x completion Data: " 3676 "x%x x%x x%x\n", 3677 irsp->ulpIoTag, irsp->ulpStatus, 3678 irsp->un.ulpWord[4], irsp->ulpTimeout); 3679 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 3680 lpfc_ct_free_iocb(phba, cmdiocb); 3681 else 3682 lpfc_els_free_iocb(phba, cmdiocb); 3683 return; 3684 } 3685 3686 int 3687 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3688 struct lpfc_iocbq *cmdiocb) 3689 { 3690 struct lpfc_vport *vport = cmdiocb->vport; 3691 struct lpfc_iocbq *abtsiocbp; 3692 IOCB_t *icmd = NULL; 3693 IOCB_t *iabt = NULL; 3694 int retval = IOCB_ERROR; 3695 3696 /* 3697 * There are certain command types we don't want to abort. And we 3698 * don't want to abort commands that are already in the process of 3699 * being aborted. 3700 */ 3701 icmd = &cmdiocb->iocb; 3702 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 3703 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 3704 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 3705 return 0; 3706 3707 /* If we're unloading, don't abort iocb on the ELS ring, but change the 3708 * callback so that nothing happens when it finishes. 3709 */ 3710 if ((vport->load_flag & FC_UNLOADING) && 3711 (pring->ringno == LPFC_ELS_RING)) { 3712 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 3713 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 3714 else 3715 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 3716 goto abort_iotag_exit; 3717 } 3718 3719 /* issue ABTS for this IOCB based on iotag */ 3720 abtsiocbp = __lpfc_sli_get_iocbq(phba); 3721 if (abtsiocbp == NULL) 3722 return 0; 3723 3724 /* This signals the response to set the correct status 3725 * before calling the completion handler. 3726 */ 3727 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 3728 3729 iabt = &abtsiocbp->iocb; 3730 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 3731 iabt->un.acxri.abortContextTag = icmd->ulpContext; 3732 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 3733 iabt->ulpLe = 1; 3734 iabt->ulpClass = icmd->ulpClass; 3735 3736 if (phba->link_state >= LPFC_LINK_UP) 3737 iabt->ulpCommand = CMD_ABORT_XRI_CN; 3738 else 3739 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 3740 3741 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 3742 3743 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 3744 "0339 Abort xri x%x, original iotag x%x, " 3745 "abort cmd iotag x%x\n", 3746 iabt->un.acxri.abortContextTag, 3747 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 3748 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 3749 3750 abort_iotag_exit: 3751 /* 3752 * Caller to this routine should check for IOCB_ERROR 3753 * and handle it properly. This routine no longer removes 3754 * iocb off txcmplq and call compl in case of IOCB_ERROR. 3755 */ 3756 return retval; 3757 } 3758 3759 static int 3760 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 3761 uint16_t tgt_id, uint64_t lun_id, 3762 lpfc_ctx_cmd ctx_cmd) 3763 { 3764 struct lpfc_scsi_buf *lpfc_cmd; 3765 struct scsi_cmnd *cmnd; 3766 int rc = 1; 3767 3768 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 3769 return rc; 3770 3771 if (iocbq->vport != vport) 3772 return rc; 3773 3774 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 3775 cmnd = lpfc_cmd->pCmd; 3776 3777 if (cmnd == NULL) 3778 return rc; 3779 3780 switch (ctx_cmd) { 3781 case LPFC_CTX_LUN: 3782 if ((cmnd->device->id == tgt_id) && 3783 (cmnd->device->lun == lun_id)) 3784 rc = 0; 3785 break; 3786 case LPFC_CTX_TGT: 3787 if (cmnd->device->id == tgt_id) 3788 rc = 0; 3789 break; 3790 case LPFC_CTX_HOST: 3791 rc = 0; 3792 break; 3793 default: 3794 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 3795 __FUNCTION__, ctx_cmd); 3796 break; 3797 } 3798 3799 return rc; 3800 } 3801 3802 int 3803 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 3804 lpfc_ctx_cmd ctx_cmd) 3805 { 3806 struct lpfc_hba *phba = vport->phba; 3807 struct lpfc_iocbq *iocbq; 3808 int sum, i; 3809 3810 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 3811 iocbq = phba->sli.iocbq_lookup[i]; 3812 3813 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 3814 ctx_cmd) == 0) 3815 sum++; 3816 } 3817 3818 return sum; 3819 } 3820 3821 void 3822 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 3823 struct lpfc_iocbq *rspiocb) 3824 { 3825 lpfc_sli_release_iocbq(phba, cmdiocb); 3826 return; 3827 } 3828 3829 int 3830 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 3831 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 3832 { 3833 struct lpfc_hba *phba = vport->phba; 3834 struct lpfc_iocbq *iocbq; 3835 struct lpfc_iocbq *abtsiocb; 3836 IOCB_t *cmd = NULL; 3837 int errcnt = 0, ret_val = 0; 3838 int i; 3839 3840 for (i = 1; i <= phba->sli.last_iotag; i++) { 3841 iocbq = phba->sli.iocbq_lookup[i]; 3842 3843 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 3844 abort_cmd) != 0) 3845 continue; 3846 3847 /* issue ABTS for this IOCB based on iotag */ 3848 abtsiocb = lpfc_sli_get_iocbq(phba); 3849 if (abtsiocb == NULL) { 3850 errcnt++; 3851 continue; 3852 } 3853 3854 cmd = &iocbq->iocb; 3855 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 3856 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 3857 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 3858 abtsiocb->iocb.ulpLe = 1; 3859 abtsiocb->iocb.ulpClass = cmd->ulpClass; 3860 abtsiocb->vport = phba->pport; 3861 3862 if (lpfc_is_link_up(phba)) 3863 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 3864 else 3865 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 3866 3867 /* Setup callback routine and issue the command. */ 3868 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 3869 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 3870 if (ret_val == IOCB_ERROR) { 3871 lpfc_sli_release_iocbq(phba, abtsiocb); 3872 errcnt++; 3873 continue; 3874 } 3875 } 3876 3877 return errcnt; 3878 } 3879 3880 static void 3881 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 3882 struct lpfc_iocbq *cmdiocbq, 3883 struct lpfc_iocbq *rspiocbq) 3884 { 3885 wait_queue_head_t *pdone_q; 3886 unsigned long iflags; 3887 3888 spin_lock_irqsave(&phba->hbalock, iflags); 3889 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 3890 if (cmdiocbq->context2 && rspiocbq) 3891 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 3892 &rspiocbq->iocb, sizeof(IOCB_t)); 3893 3894 pdone_q = cmdiocbq->context_un.wait_queue; 3895 if (pdone_q) 3896 wake_up(pdone_q); 3897 spin_unlock_irqrestore(&phba->hbalock, iflags); 3898 return; 3899 } 3900 3901 /* 3902 * Issue the caller's iocb and wait for its completion, but no longer than the 3903 * caller's timeout. Note that iocb_flags is cleared before the 3904 * lpfc_sli_issue_call since the wake routine sets a unique value and by 3905 * definition this is a wait function. 3906 */ 3907 3908 int 3909 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 3910 struct lpfc_sli_ring *pring, 3911 struct lpfc_iocbq *piocb, 3912 struct lpfc_iocbq *prspiocbq, 3913 uint32_t timeout) 3914 { 3915 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 3916 long timeleft, timeout_req = 0; 3917 int retval = IOCB_SUCCESS; 3918 uint32_t creg_val; 3919 3920 /* 3921 * If the caller has provided a response iocbq buffer, then context2 3922 * is NULL or its an error. 3923 */ 3924 if (prspiocbq) { 3925 if (piocb->context2) 3926 return IOCB_ERROR; 3927 piocb->context2 = prspiocbq; 3928 } 3929 3930 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 3931 piocb->context_un.wait_queue = &done_q; 3932 piocb->iocb_flag &= ~LPFC_IO_WAKE; 3933 3934 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 3935 creg_val = readl(phba->HCregaddr); 3936 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 3937 writel(creg_val, phba->HCregaddr); 3938 readl(phba->HCregaddr); /* flush */ 3939 } 3940 3941 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 3942 if (retval == IOCB_SUCCESS) { 3943 timeout_req = timeout * HZ; 3944 timeleft = wait_event_timeout(done_q, 3945 piocb->iocb_flag & LPFC_IO_WAKE, 3946 timeout_req); 3947 3948 if (piocb->iocb_flag & LPFC_IO_WAKE) { 3949 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3950 "0331 IOCB wake signaled\n"); 3951 } else if (timeleft == 0) { 3952 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3953 "0338 IOCB wait timeout error - no " 3954 "wake response Data x%x\n", timeout); 3955 retval = IOCB_TIMEDOUT; 3956 } else { 3957 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3958 "0330 IOCB wake NOT set, " 3959 "Data x%x x%lx\n", 3960 timeout, (timeleft / jiffies)); 3961 retval = IOCB_TIMEDOUT; 3962 } 3963 } else { 3964 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3965 ":0332 IOCB wait issue failed, Data x%x\n", 3966 retval); 3967 retval = IOCB_ERROR; 3968 } 3969 3970 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 3971 creg_val = readl(phba->HCregaddr); 3972 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 3973 writel(creg_val, phba->HCregaddr); 3974 readl(phba->HCregaddr); /* flush */ 3975 } 3976 3977 if (prspiocbq) 3978 piocb->context2 = NULL; 3979 3980 piocb->context_un.wait_queue = NULL; 3981 piocb->iocb_cmpl = NULL; 3982 return retval; 3983 } 3984 3985 int 3986 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 3987 uint32_t timeout) 3988 { 3989 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 3990 int retval; 3991 unsigned long flag; 3992 3993 /* The caller must leave context1 empty. */ 3994 if (pmboxq->context1) 3995 return MBX_NOT_FINISHED; 3996 3997 /* setup wake call as IOCB callback */ 3998 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 3999 /* setup context field to pass wait_queue pointer to wake function */ 4000 pmboxq->context1 = &done_q; 4001 4002 /* now issue the command */ 4003 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 4004 4005 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 4006 wait_event_interruptible_timeout(done_q, 4007 pmboxq->mbox_flag & LPFC_MBX_WAKE, 4008 timeout * HZ); 4009 4010 spin_lock_irqsave(&phba->hbalock, flag); 4011 pmboxq->context1 = NULL; 4012 /* 4013 * if LPFC_MBX_WAKE flag is set the mailbox is completed 4014 * else do not free the resources. 4015 */ 4016 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) 4017 retval = MBX_SUCCESS; 4018 else { 4019 retval = MBX_TIMEOUT; 4020 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4021 } 4022 spin_unlock_irqrestore(&phba->hbalock, flag); 4023 } 4024 4025 return retval; 4026 } 4027 4028 int 4029 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 4030 { 4031 struct lpfc_vport *vport = phba->pport; 4032 int i = 0; 4033 uint32_t ha_copy; 4034 4035 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { 4036 if (i++ > LPFC_MBOX_TMO * 1000) 4037 return 1; 4038 4039 /* 4040 * Call lpfc_sli_handle_mb_event only if a mailbox cmd 4041 * did finish. This way we won't get the misleading 4042 * "Stray Mailbox Interrupt" message. 4043 */ 4044 spin_lock_irq(&phba->hbalock); 4045 ha_copy = phba->work_ha; 4046 phba->work_ha &= ~HA_MBATT; 4047 spin_unlock_irq(&phba->hbalock); 4048 4049 if (ha_copy & HA_MBATT) 4050 if (lpfc_sli_handle_mb_event(phba) == 0) 4051 i = 0; 4052 4053 msleep(1); 4054 } 4055 4056 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 4057 } 4058 4059 irqreturn_t 4060 lpfc_intr_handler(int irq, void *dev_id) 4061 { 4062 struct lpfc_hba *phba; 4063 uint32_t ha_copy; 4064 uint32_t work_ha_copy; 4065 unsigned long status; 4066 uint32_t control; 4067 4068 MAILBOX_t *mbox, *pmbox; 4069 struct lpfc_vport *vport; 4070 struct lpfc_nodelist *ndlp; 4071 struct lpfc_dmabuf *mp; 4072 LPFC_MBOXQ_t *pmb; 4073 int rc; 4074 4075 /* 4076 * Get the driver's phba structure from the dev_id and 4077 * assume the HBA is not interrupting. 4078 */ 4079 phba = (struct lpfc_hba *) dev_id; 4080 4081 if (unlikely(!phba)) 4082 return IRQ_NONE; 4083 4084 /* If the pci channel is offline, ignore all the interrupts. */ 4085 if (unlikely(pci_channel_offline(phba->pcidev))) 4086 return IRQ_NONE; 4087 4088 phba->sli.slistat.sli_intr++; 4089 4090 /* 4091 * Call the HBA to see if it is interrupting. If not, don't claim 4092 * the interrupt 4093 */ 4094 4095 /* Ignore all interrupts during initialization. */ 4096 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 4097 return IRQ_NONE; 4098 4099 /* 4100 * Read host attention register to determine interrupt source 4101 * Clear Attention Sources, except Error Attention (to 4102 * preserve status) and Link Attention 4103 */ 4104 spin_lock(&phba->hbalock); 4105 ha_copy = readl(phba->HAregaddr); 4106 /* If somebody is waiting to handle an eratt don't process it 4107 * here. The brdkill function will do this. 4108 */ 4109 if (phba->link_flag & LS_IGNORE_ERATT) 4110 ha_copy &= ~HA_ERATT; 4111 writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 4112 readl(phba->HAregaddr); /* flush */ 4113 spin_unlock(&phba->hbalock); 4114 4115 if (unlikely(!ha_copy)) 4116 return IRQ_NONE; 4117 4118 work_ha_copy = ha_copy & phba->work_ha_mask; 4119 4120 if (unlikely(work_ha_copy)) { 4121 if (work_ha_copy & HA_LATT) { 4122 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 4123 /* 4124 * Turn off Link Attention interrupts 4125 * until CLEAR_LA done 4126 */ 4127 spin_lock(&phba->hbalock); 4128 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 4129 control = readl(phba->HCregaddr); 4130 control &= ~HC_LAINT_ENA; 4131 writel(control, phba->HCregaddr); 4132 readl(phba->HCregaddr); /* flush */ 4133 spin_unlock(&phba->hbalock); 4134 } 4135 else 4136 work_ha_copy &= ~HA_LATT; 4137 } 4138 4139 if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) { 4140 /* 4141 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 4142 * the only slow ring. 4143 */ 4144 status = (work_ha_copy & 4145 (HA_RXMASK << (4*LPFC_ELS_RING))); 4146 status >>= (4*LPFC_ELS_RING); 4147 if (status & HA_RXMASK) { 4148 spin_lock(&phba->hbalock); 4149 control = readl(phba->HCregaddr); 4150 4151 lpfc_debugfs_slow_ring_trc(phba, 4152 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 4153 control, status, 4154 (uint32_t)phba->sli.slistat.sli_intr); 4155 4156 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 4157 lpfc_debugfs_slow_ring_trc(phba, 4158 "ISR Disable ring:" 4159 "pwork:x%x hawork:x%x wait:x%x", 4160 phba->work_ha, work_ha_copy, 4161 (uint32_t)((unsigned long) 4162 phba->work_wait)); 4163 4164 control &= 4165 ~(HC_R0INT_ENA << LPFC_ELS_RING); 4166 writel(control, phba->HCregaddr); 4167 readl(phba->HCregaddr); /* flush */ 4168 } 4169 else { 4170 lpfc_debugfs_slow_ring_trc(phba, 4171 "ISR slow ring: pwork:" 4172 "x%x hawork:x%x wait:x%x", 4173 phba->work_ha, work_ha_copy, 4174 (uint32_t)((unsigned long) 4175 phba->work_wait)); 4176 } 4177 spin_unlock(&phba->hbalock); 4178 } 4179 } 4180 4181 if (work_ha_copy & HA_ERATT) { 4182 /* 4183 * There was a link/board error. Read the 4184 * status register to retrieve the error event 4185 * and process it. 4186 */ 4187 phba->sli.slistat.err_attn_event++; 4188 /* Save status info */ 4189 phba->work_hs = readl(phba->HSregaddr); 4190 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); 4191 phba->work_status[1] = readl(phba->MBslimaddr + 0xac); 4192 4193 /* Clear Chip error bit */ 4194 writel(HA_ERATT, phba->HAregaddr); 4195 readl(phba->HAregaddr); /* flush */ 4196 phba->pport->stopped = 1; 4197 } 4198 4199 spin_lock(&phba->hbalock); 4200 if ((work_ha_copy & HA_MBATT) && 4201 (phba->sli.mbox_active)) { 4202 pmb = phba->sli.mbox_active; 4203 pmbox = &pmb->mb; 4204 mbox = &phba->slim2p->mbx; 4205 vport = pmb->vport; 4206 4207 /* First check out the status word */ 4208 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 4209 if (pmbox->mbxOwner != OWN_HOST) { 4210 spin_unlock(&phba->hbalock); 4211 /* 4212 * Stray Mailbox Interrupt, mbxCommand <cmd> 4213 * mbxStatus <status> 4214 */ 4215 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 4216 LOG_SLI, 4217 "(%d):0304 Stray Mailbox " 4218 "Interrupt mbxCommand x%x " 4219 "mbxStatus x%x\n", 4220 (vport ? vport->vpi : 0), 4221 pmbox->mbxCommand, 4222 pmbox->mbxStatus); 4223 /* clear mailbox attention bit */ 4224 work_ha_copy &= ~HA_MBATT; 4225 } else { 4226 phba->sli.mbox_active = NULL; 4227 spin_unlock(&phba->hbalock); 4228 phba->last_completion_time = jiffies; 4229 del_timer(&phba->sli.mbox_tmo); 4230 if (pmb->mbox_cmpl) { 4231 lpfc_sli_pcimem_bcopy(mbox, pmbox, 4232 MAILBOX_CMD_SIZE); 4233 } 4234 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 4235 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 4236 4237 lpfc_debugfs_disc_trc(vport, 4238 LPFC_DISC_TRC_MBOX_VPORT, 4239 "MBOX dflt rpi: : " 4240 "status:x%x rpi:x%x", 4241 (uint32_t)pmbox->mbxStatus, 4242 pmbox->un.varWords[0], 0); 4243 4244 if (!pmbox->mbxStatus) { 4245 mp = (struct lpfc_dmabuf *) 4246 (pmb->context1); 4247 ndlp = (struct lpfc_nodelist *) 4248 pmb->context2; 4249 4250 /* Reg_LOGIN of dflt RPI was 4251 * successful. new lets get 4252 * rid of the RPI using the 4253 * same mbox buffer. 4254 */ 4255 lpfc_unreg_login(phba, 4256 vport->vpi, 4257 pmbox->un.varWords[0], 4258 pmb); 4259 pmb->mbox_cmpl = 4260 lpfc_mbx_cmpl_dflt_rpi; 4261 pmb->context1 = mp; 4262 pmb->context2 = ndlp; 4263 pmb->vport = vport; 4264 rc = lpfc_sli_issue_mbox(phba, 4265 pmb, 4266 MBX_NOWAIT); 4267 if (rc != MBX_BUSY) 4268 lpfc_printf_log(phba, 4269 KERN_ERR, 4270 LOG_MBOX | LOG_SLI, 4271 "0306 rc should have" 4272 "been MBX_BUSY"); 4273 goto send_current_mbox; 4274 } 4275 } 4276 spin_lock(&phba->pport->work_port_lock); 4277 phba->pport->work_port_events &= 4278 ~WORKER_MBOX_TMO; 4279 spin_unlock(&phba->pport->work_port_lock); 4280 lpfc_mbox_cmpl_put(phba, pmb); 4281 } 4282 } else 4283 spin_unlock(&phba->hbalock); 4284 if ((work_ha_copy & HA_MBATT) && 4285 (phba->sli.mbox_active == NULL)) { 4286 send_current_mbox: 4287 /* Process next mailbox command if there is one */ 4288 do { 4289 rc = lpfc_sli_issue_mbox(phba, NULL, 4290 MBX_NOWAIT); 4291 } while (rc == MBX_NOT_FINISHED); 4292 if (rc != MBX_SUCCESS) 4293 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 4294 LOG_SLI, "0349 rc should be " 4295 "MBX_SUCCESS"); 4296 } 4297 4298 spin_lock(&phba->hbalock); 4299 phba->work_ha |= work_ha_copy; 4300 if (phba->work_wait) 4301 lpfc_worker_wake_up(phba); 4302 spin_unlock(&phba->hbalock); 4303 } 4304 4305 ha_copy &= ~(phba->work_ha_mask); 4306 4307 /* 4308 * Process all events on FCP ring. Take the optimized path for 4309 * FCP IO. Any other IO is slow path and is handled by 4310 * the worker thread. 4311 */ 4312 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 4313 status >>= (4*LPFC_FCP_RING); 4314 if (status & HA_RXMASK) 4315 lpfc_sli_handle_fast_ring_event(phba, 4316 &phba->sli.ring[LPFC_FCP_RING], 4317 status); 4318 4319 if (phba->cfg_multi_ring_support == 2) { 4320 /* 4321 * Process all events on extra ring. Take the optimized path 4322 * for extra ring IO. Any other IO is slow path and is handled 4323 * by the worker thread. 4324 */ 4325 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 4326 status >>= (4*LPFC_EXTRA_RING); 4327 if (status & HA_RXMASK) { 4328 lpfc_sli_handle_fast_ring_event(phba, 4329 &phba->sli.ring[LPFC_EXTRA_RING], 4330 status); 4331 } 4332 } 4333 return IRQ_HANDLED; 4334 4335 } /* lpfc_intr_handler */ 4336