1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2008 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 27 #include <scsi/scsi.h> 28 #include <scsi/scsi_cmnd.h> 29 #include <scsi/scsi_device.h> 30 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_transport_fc.h> 32 33 #include "lpfc_hw.h" 34 #include "lpfc_sli.h" 35 #include "lpfc_nl.h" 36 #include "lpfc_disc.h" 37 #include "lpfc_scsi.h" 38 #include "lpfc.h" 39 #include "lpfc_crtn.h" 40 #include "lpfc_logmsg.h" 41 #include "lpfc_compat.h" 42 #include "lpfc_debugfs.h" 43 44 /* 45 * Define macro to log: Mailbox command x%x cannot issue Data 46 * This allows multiple uses of lpfc_msgBlk0311 47 * w/o perturbing log msg utility. 48 */ 49 #define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \ 50 lpfc_printf_log(phba, \ 51 KERN_INFO, \ 52 LOG_MBOX | LOG_SLI, \ 53 "(%d):0311 Mailbox command x%x cannot " \ 54 "issue Data: x%x x%x x%x\n", \ 55 pmbox->vport ? pmbox->vport->vpi : 0, \ 56 pmbox->mb.mbxCommand, \ 57 phba->pport->port_state, \ 58 psli->sli_flag, \ 59 flag) 60 61 62 /* There are only four IOCB completion types. */ 63 typedef enum _lpfc_iocb_type { 64 LPFC_UNKNOWN_IOCB, 65 LPFC_UNSOL_IOCB, 66 LPFC_SOL_IOCB, 67 LPFC_ABORT_IOCB 68 } lpfc_iocb_type; 69 70 /** 71 * lpfc_cmd_iocb: Get next command iocb entry in the ring. 72 * @phba: Pointer to HBA context object. 73 * @pring: Pointer to driver SLI ring object. 74 * 75 * This function returns pointer to next command iocb entry 76 * in the command ring. The caller must hold hbalock to prevent 77 * other threads consume the next command iocb. 78 * SLI-2/SLI-3 provide different sized iocbs. 79 **/ 80 static inline IOCB_t * 81 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 82 { 83 return (IOCB_t *) (((char *) pring->cmdringaddr) + 84 pring->cmdidx * phba->iocb_cmd_size); 85 } 86 87 /** 88 * lpfc_resp_iocb: Get next response iocb entry in the ring. 89 * @phba: Pointer to HBA context object. 90 * @pring: Pointer to driver SLI ring object. 91 * 92 * This function returns pointer to next response iocb entry 93 * in the response ring. The caller must hold hbalock to make sure 94 * that no other thread consume the next response iocb. 95 * SLI-2/SLI-3 provide different sized iocbs. 96 **/ 97 static inline IOCB_t * 98 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 99 { 100 return (IOCB_t *) (((char *) pring->rspringaddr) + 101 pring->rspidx * phba->iocb_rsp_size); 102 } 103 104 /** 105 * __lpfc_sli_get_iocbq: Allocates an iocb object from iocb pool. 106 * @phba: Pointer to HBA context object. 107 * 108 * This function is called with hbalock held. This function 109 * allocates a new driver iocb object from the iocb pool. If the 110 * allocation is successful, it returns pointer to the newly 111 * allocated iocb object else it returns NULL. 112 **/ 113 static struct lpfc_iocbq * 114 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 115 { 116 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 117 struct lpfc_iocbq * iocbq = NULL; 118 119 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 120 return iocbq; 121 } 122 123 /** 124 * lpfc_sli_get_iocbq: Allocates an iocb object from iocb pool. 125 * @phba: Pointer to HBA context object. 126 * 127 * This function is called with no lock held. This function 128 * allocates a new driver iocb object from the iocb pool. If the 129 * allocation is successful, it returns pointer to the newly 130 * allocated iocb object else it returns NULL. 131 **/ 132 struct lpfc_iocbq * 133 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 134 { 135 struct lpfc_iocbq * iocbq = NULL; 136 unsigned long iflags; 137 138 spin_lock_irqsave(&phba->hbalock, iflags); 139 iocbq = __lpfc_sli_get_iocbq(phba); 140 spin_unlock_irqrestore(&phba->hbalock, iflags); 141 return iocbq; 142 } 143 144 /** 145 * __lpfc_sli_release_iocbq: Release iocb to the iocb pool. 146 * @phba: Pointer to HBA context object. 147 * @iocbq: Pointer to driver iocb object. 148 * 149 * This function is called with hbalock held to release driver 150 * iocb object to the iocb pool. The iotag in the iocb object 151 * does not change for each use of the iocb object. This function 152 * clears all other fields of the iocb object when it is freed. 153 **/ 154 static void 155 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 156 { 157 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 158 159 /* 160 * Clean all volatile data fields, preserve iotag and node struct. 161 */ 162 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 163 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 164 } 165 166 /** 167 * lpfc_sli_release_iocbq: Release iocb to the iocb pool. 168 * @phba: Pointer to HBA context object. 169 * @iocbq: Pointer to driver iocb object. 170 * 171 * This function is called with no lock held to release the iocb to 172 * iocb pool. 173 **/ 174 void 175 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 176 { 177 unsigned long iflags; 178 179 /* 180 * Clean all volatile data fields, preserve iotag and node struct. 181 */ 182 spin_lock_irqsave(&phba->hbalock, iflags); 183 __lpfc_sli_release_iocbq(phba, iocbq); 184 spin_unlock_irqrestore(&phba->hbalock, iflags); 185 } 186 187 /** 188 * lpfc_sli_iocb_cmd_type: Get the iocb type. 189 * @iocb_cmnd : iocb command code. 190 * 191 * This function is called by ring event handler function to get the iocb type. 192 * This function translates the iocb command to an iocb command type used to 193 * decide the final disposition of each completed IOCB. 194 * The function returns 195 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 196 * LPFC_SOL_IOCB if it is a solicited iocb completion 197 * LPFC_ABORT_IOCB if it is an abort iocb 198 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 199 * 200 * The caller is not required to hold any lock. 201 **/ 202 static lpfc_iocb_type 203 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 204 { 205 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 206 207 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 208 return 0; 209 210 switch (iocb_cmnd) { 211 case CMD_XMIT_SEQUENCE_CR: 212 case CMD_XMIT_SEQUENCE_CX: 213 case CMD_XMIT_BCAST_CN: 214 case CMD_XMIT_BCAST_CX: 215 case CMD_ELS_REQUEST_CR: 216 case CMD_ELS_REQUEST_CX: 217 case CMD_CREATE_XRI_CR: 218 case CMD_CREATE_XRI_CX: 219 case CMD_GET_RPI_CN: 220 case CMD_XMIT_ELS_RSP_CX: 221 case CMD_GET_RPI_CR: 222 case CMD_FCP_IWRITE_CR: 223 case CMD_FCP_IWRITE_CX: 224 case CMD_FCP_IREAD_CR: 225 case CMD_FCP_IREAD_CX: 226 case CMD_FCP_ICMND_CR: 227 case CMD_FCP_ICMND_CX: 228 case CMD_FCP_TSEND_CX: 229 case CMD_FCP_TRSP_CX: 230 case CMD_FCP_TRECEIVE_CX: 231 case CMD_FCP_AUTO_TRSP_CX: 232 case CMD_ADAPTER_MSG: 233 case CMD_ADAPTER_DUMP: 234 case CMD_XMIT_SEQUENCE64_CR: 235 case CMD_XMIT_SEQUENCE64_CX: 236 case CMD_XMIT_BCAST64_CN: 237 case CMD_XMIT_BCAST64_CX: 238 case CMD_ELS_REQUEST64_CR: 239 case CMD_ELS_REQUEST64_CX: 240 case CMD_FCP_IWRITE64_CR: 241 case CMD_FCP_IWRITE64_CX: 242 case CMD_FCP_IREAD64_CR: 243 case CMD_FCP_IREAD64_CX: 244 case CMD_FCP_ICMND64_CR: 245 case CMD_FCP_ICMND64_CX: 246 case CMD_FCP_TSEND64_CX: 247 case CMD_FCP_TRSP64_CX: 248 case CMD_FCP_TRECEIVE64_CX: 249 case CMD_GEN_REQUEST64_CR: 250 case CMD_GEN_REQUEST64_CX: 251 case CMD_XMIT_ELS_RSP64_CX: 252 type = LPFC_SOL_IOCB; 253 break; 254 case CMD_ABORT_XRI_CN: 255 case CMD_ABORT_XRI_CX: 256 case CMD_CLOSE_XRI_CN: 257 case CMD_CLOSE_XRI_CX: 258 case CMD_XRI_ABORTED_CX: 259 case CMD_ABORT_MXRI64_CN: 260 type = LPFC_ABORT_IOCB; 261 break; 262 case CMD_RCV_SEQUENCE_CX: 263 case CMD_RCV_ELS_REQ_CX: 264 case CMD_RCV_SEQUENCE64_CX: 265 case CMD_RCV_ELS_REQ64_CX: 266 case CMD_ASYNC_STATUS: 267 case CMD_IOCB_RCV_SEQ64_CX: 268 case CMD_IOCB_RCV_ELS64_CX: 269 case CMD_IOCB_RCV_CONT64_CX: 270 case CMD_IOCB_RET_XRI64_CX: 271 type = LPFC_UNSOL_IOCB; 272 break; 273 case CMD_IOCB_XMIT_MSEQ64_CR: 274 case CMD_IOCB_XMIT_MSEQ64_CX: 275 case CMD_IOCB_RCV_SEQ_LIST64_CX: 276 case CMD_IOCB_RCV_ELS_LIST64_CX: 277 case CMD_IOCB_CLOSE_EXTENDED_CN: 278 case CMD_IOCB_ABORT_EXTENDED_CN: 279 case CMD_IOCB_RET_HBQE64_CN: 280 case CMD_IOCB_FCP_IBIDIR64_CR: 281 case CMD_IOCB_FCP_IBIDIR64_CX: 282 case CMD_IOCB_FCP_ITASKMGT64_CX: 283 case CMD_IOCB_LOGENTRY_CN: 284 case CMD_IOCB_LOGENTRY_ASYNC_CN: 285 printk("%s - Unhandled SLI-3 Command x%x\n", 286 __func__, iocb_cmnd); 287 type = LPFC_UNKNOWN_IOCB; 288 break; 289 default: 290 type = LPFC_UNKNOWN_IOCB; 291 break; 292 } 293 294 return type; 295 } 296 297 /** 298 * lpfc_sli_ring_map: Issue config_ring mbox for all rings. 299 * @phba: Pointer to HBA context object. 300 * 301 * This function is called from SLI initialization code 302 * to configure every ring of the HBA's SLI interface. The 303 * caller is not required to hold any lock. This function issues 304 * a config_ring mailbox command for each ring. 305 * This function returns zero if successful else returns a negative 306 * error code. 307 **/ 308 static int 309 lpfc_sli_ring_map(struct lpfc_hba *phba) 310 { 311 struct lpfc_sli *psli = &phba->sli; 312 LPFC_MBOXQ_t *pmb; 313 MAILBOX_t *pmbox; 314 int i, rc, ret = 0; 315 316 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 317 if (!pmb) 318 return -ENOMEM; 319 pmbox = &pmb->mb; 320 phba->link_state = LPFC_INIT_MBX_CMDS; 321 for (i = 0; i < psli->num_rings; i++) { 322 lpfc_config_ring(phba, i, pmb); 323 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 324 if (rc != MBX_SUCCESS) { 325 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 326 "0446 Adapter failed to init (%d), " 327 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 328 "ring %d\n", 329 rc, pmbox->mbxCommand, 330 pmbox->mbxStatus, i); 331 phba->link_state = LPFC_HBA_ERROR; 332 ret = -ENXIO; 333 break; 334 } 335 } 336 mempool_free(pmb, phba->mbox_mem_pool); 337 return ret; 338 } 339 340 /** 341 * lpfc_sli_ringtxcmpl_put: Adds new iocb to the txcmplq. 342 * @phba: Pointer to HBA context object. 343 * @pring: Pointer to driver SLI ring object. 344 * @piocb: Pointer to the driver iocb object. 345 * 346 * This function is called with hbalock held. The function adds the 347 * new iocb to txcmplq of the given ring. This function always returns 348 * 0. If this function is called for ELS ring, this function checks if 349 * there is a vport associated with the ELS command. This function also 350 * starts els_tmofunc timer if this is an ELS command. 351 **/ 352 static int 353 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 354 struct lpfc_iocbq *piocb) 355 { 356 list_add_tail(&piocb->list, &pring->txcmplq); 357 pring->txcmplq_cnt++; 358 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 359 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 360 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 361 if (!piocb->vport) 362 BUG(); 363 else 364 mod_timer(&piocb->vport->els_tmofunc, 365 jiffies + HZ * (phba->fc_ratov << 1)); 366 } 367 368 369 return 0; 370 } 371 372 /** 373 * lpfc_sli_ringtx_get: Get first element of the txq. 374 * @phba: Pointer to HBA context object. 375 * @pring: Pointer to driver SLI ring object. 376 * 377 * This function is called with hbalock held to get next 378 * iocb in txq of the given ring. If there is any iocb in 379 * the txq, the function returns first iocb in the list after 380 * removing the iocb from the list, else it returns NULL. 381 **/ 382 static struct lpfc_iocbq * 383 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 384 { 385 struct lpfc_iocbq *cmd_iocb; 386 387 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 388 if (cmd_iocb != NULL) 389 pring->txq_cnt--; 390 return cmd_iocb; 391 } 392 393 /** 394 * lpfc_sli_next_iocb_slot: Get next iocb slot in the ring. 395 * @phba: Pointer to HBA context object. 396 * @pring: Pointer to driver SLI ring object. 397 * 398 * This function is called with hbalock held and the caller must post the 399 * iocb without releasing the lock. If the caller releases the lock, 400 * iocb slot returned by the function is not guaranteed to be available. 401 * The function returns pointer to the next available iocb slot if there 402 * is available slot in the ring, else it returns NULL. 403 * If the get index of the ring is ahead of the put index, the function 404 * will post an error attention event to the worker thread to take the 405 * HBA to offline state. 406 **/ 407 static IOCB_t * 408 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 409 { 410 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 411 uint32_t max_cmd_idx = pring->numCiocb; 412 if ((pring->next_cmdidx == pring->cmdidx) && 413 (++pring->next_cmdidx >= max_cmd_idx)) 414 pring->next_cmdidx = 0; 415 416 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 417 418 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 419 420 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 421 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 422 "0315 Ring %d issue: portCmdGet %d " 423 "is bigger than cmd ring %d\n", 424 pring->ringno, 425 pring->local_getidx, max_cmd_idx); 426 427 phba->link_state = LPFC_HBA_ERROR; 428 /* 429 * All error attention handlers are posted to 430 * worker thread 431 */ 432 phba->work_ha |= HA_ERATT; 433 phba->work_hs = HS_FFER3; 434 435 lpfc_worker_wake_up(phba); 436 437 return NULL; 438 } 439 440 if (pring->local_getidx == pring->next_cmdidx) 441 return NULL; 442 } 443 444 return lpfc_cmd_iocb(phba, pring); 445 } 446 447 /** 448 * lpfc_sli_next_iotag: Get an iotag for the iocb. 449 * @phba: Pointer to HBA context object. 450 * @iocbq: Pointer to driver iocb object. 451 * 452 * This function gets an iotag for the iocb. If there is no unused iotag and 453 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 454 * array and assigns a new iotag. 455 * The function returns the allocated iotag if successful, else returns zero. 456 * Zero is not a valid iotag. 457 * The caller is not required to hold any lock. 458 **/ 459 uint16_t 460 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 461 { 462 struct lpfc_iocbq **new_arr; 463 struct lpfc_iocbq **old_arr; 464 size_t new_len; 465 struct lpfc_sli *psli = &phba->sli; 466 uint16_t iotag; 467 468 spin_lock_irq(&phba->hbalock); 469 iotag = psli->last_iotag; 470 if(++iotag < psli->iocbq_lookup_len) { 471 psli->last_iotag = iotag; 472 psli->iocbq_lookup[iotag] = iocbq; 473 spin_unlock_irq(&phba->hbalock); 474 iocbq->iotag = iotag; 475 return iotag; 476 } else if (psli->iocbq_lookup_len < (0xffff 477 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 478 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 479 spin_unlock_irq(&phba->hbalock); 480 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 481 GFP_KERNEL); 482 if (new_arr) { 483 spin_lock_irq(&phba->hbalock); 484 old_arr = psli->iocbq_lookup; 485 if (new_len <= psli->iocbq_lookup_len) { 486 /* highly unprobable case */ 487 kfree(new_arr); 488 iotag = psli->last_iotag; 489 if(++iotag < psli->iocbq_lookup_len) { 490 psli->last_iotag = iotag; 491 psli->iocbq_lookup[iotag] = iocbq; 492 spin_unlock_irq(&phba->hbalock); 493 iocbq->iotag = iotag; 494 return iotag; 495 } 496 spin_unlock_irq(&phba->hbalock); 497 return 0; 498 } 499 if (psli->iocbq_lookup) 500 memcpy(new_arr, old_arr, 501 ((psli->last_iotag + 1) * 502 sizeof (struct lpfc_iocbq *))); 503 psli->iocbq_lookup = new_arr; 504 psli->iocbq_lookup_len = new_len; 505 psli->last_iotag = iotag; 506 psli->iocbq_lookup[iotag] = iocbq; 507 spin_unlock_irq(&phba->hbalock); 508 iocbq->iotag = iotag; 509 kfree(old_arr); 510 return iotag; 511 } 512 } else 513 spin_unlock_irq(&phba->hbalock); 514 515 lpfc_printf_log(phba, KERN_ERR,LOG_SLI, 516 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 517 psli->last_iotag); 518 519 return 0; 520 } 521 522 /** 523 * lpfc_sli_submit_iocb: Submit an iocb to the firmware. 524 * @phba: Pointer to HBA context object. 525 * @pring: Pointer to driver SLI ring object. 526 * @iocb: Pointer to iocb slot in the ring. 527 * @nextiocb: Pointer to driver iocb object which need to be 528 * posted to firmware. 529 * 530 * This function is called with hbalock held to post a new iocb to 531 * the firmware. This function copies the new iocb to ring iocb slot and 532 * updates the ring pointers. It adds the new iocb to txcmplq if there is 533 * a completion call back for this iocb else the function will free the 534 * iocb object. 535 **/ 536 static void 537 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 538 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 539 { 540 /* 541 * Set up an iotag 542 */ 543 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 544 545 546 if (pring->ringno == LPFC_ELS_RING) { 547 lpfc_debugfs_slow_ring_trc(phba, 548 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 549 *(((uint32_t *) &nextiocb->iocb) + 4), 550 *(((uint32_t *) &nextiocb->iocb) + 6), 551 *(((uint32_t *) &nextiocb->iocb) + 7)); 552 } 553 554 /* 555 * Issue iocb command to adapter 556 */ 557 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 558 wmb(); 559 pring->stats.iocb_cmd++; 560 561 /* 562 * If there is no completion routine to call, we can release the 563 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 564 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 565 */ 566 if (nextiocb->iocb_cmpl) 567 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 568 else 569 __lpfc_sli_release_iocbq(phba, nextiocb); 570 571 /* 572 * Let the HBA know what IOCB slot will be the next one the 573 * driver will put a command into. 574 */ 575 pring->cmdidx = pring->next_cmdidx; 576 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 577 } 578 579 /** 580 * lpfc_sli_update_full_ring: Update the chip attention register. 581 * @phba: Pointer to HBA context object. 582 * @pring: Pointer to driver SLI ring object. 583 * 584 * The caller is not required to hold any lock for calling this function. 585 * This function updates the chip attention bits for the ring to inform firmware 586 * that there are pending work to be done for this ring and requests an 587 * interrupt when there is space available in the ring. This function is 588 * called when the driver is unable to post more iocbs to the ring due 589 * to unavailability of space in the ring. 590 **/ 591 static void 592 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 593 { 594 int ringno = pring->ringno; 595 596 pring->flag |= LPFC_CALL_RING_AVAILABLE; 597 598 wmb(); 599 600 /* 601 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 602 * The HBA will tell us when an IOCB entry is available. 603 */ 604 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 605 readl(phba->CAregaddr); /* flush */ 606 607 pring->stats.iocb_cmd_full++; 608 } 609 610 /** 611 * lpfc_sli_update_ring: Update chip attention register. 612 * @phba: Pointer to HBA context object. 613 * @pring: Pointer to driver SLI ring object. 614 * 615 * This function updates the chip attention register bit for the 616 * given ring to inform HBA that there is more work to be done 617 * in this ring. The caller is not required to hold any lock. 618 **/ 619 static void 620 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 621 { 622 int ringno = pring->ringno; 623 624 /* 625 * Tell the HBA that there is work to do in this ring. 626 */ 627 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 628 wmb(); 629 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 630 readl(phba->CAregaddr); /* flush */ 631 } 632 } 633 634 /** 635 * lpfc_sli_resume_iocb: Process iocbs in the txq. 636 * @phba: Pointer to HBA context object. 637 * @pring: Pointer to driver SLI ring object. 638 * 639 * This function is called with hbalock held to post pending iocbs 640 * in the txq to the firmware. This function is called when driver 641 * detects space available in the ring. 642 **/ 643 static void 644 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 645 { 646 IOCB_t *iocb; 647 struct lpfc_iocbq *nextiocb; 648 649 /* 650 * Check to see if: 651 * (a) there is anything on the txq to send 652 * (b) link is up 653 * (c) link attention events can be processed (fcp ring only) 654 * (d) IOCB processing is not blocked by the outstanding mbox command. 655 */ 656 if (pring->txq_cnt && 657 lpfc_is_link_up(phba) && 658 (pring->ringno != phba->sli.fcp_ring || 659 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 660 661 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 662 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 663 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 664 665 if (iocb) 666 lpfc_sli_update_ring(phba, pring); 667 else 668 lpfc_sli_update_full_ring(phba, pring); 669 } 670 671 return; 672 } 673 674 /** 675 * lpfc_sli_next_hbq_slot: Get next hbq entry for the HBQ. 676 * @phba: Pointer to HBA context object. 677 * @hbqno: HBQ number. 678 * 679 * This function is called with hbalock held to get the next 680 * available slot for the given HBQ. If there is free slot 681 * available for the HBQ it will return pointer to the next available 682 * HBQ entry else it will return NULL. 683 **/ 684 static struct lpfc_hbq_entry * 685 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 686 { 687 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 688 689 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 690 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 691 hbqp->next_hbqPutIdx = 0; 692 693 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 694 uint32_t raw_index = phba->hbq_get[hbqno]; 695 uint32_t getidx = le32_to_cpu(raw_index); 696 697 hbqp->local_hbqGetIdx = getidx; 698 699 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 700 lpfc_printf_log(phba, KERN_ERR, 701 LOG_SLI | LOG_VPORT, 702 "1802 HBQ %d: local_hbqGetIdx " 703 "%u is > than hbqp->entry_count %u\n", 704 hbqno, hbqp->local_hbqGetIdx, 705 hbqp->entry_count); 706 707 phba->link_state = LPFC_HBA_ERROR; 708 return NULL; 709 } 710 711 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 712 return NULL; 713 } 714 715 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 716 hbqp->hbqPutIdx; 717 } 718 719 /** 720 * lpfc_sli_hbqbuf_free_all: Free all the hbq buffers. 721 * @phba: Pointer to HBA context object. 722 * 723 * This function is called with no lock held to free all the 724 * hbq buffers while uninitializing the SLI interface. It also 725 * frees the HBQ buffers returned by the firmware but not yet 726 * processed by the upper layers. 727 **/ 728 void 729 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 730 { 731 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 732 struct hbq_dmabuf *hbq_buf; 733 unsigned long flags; 734 int i, hbq_count; 735 uint32_t hbqno; 736 737 hbq_count = lpfc_sli_hbq_count(); 738 /* Return all memory used by all HBQs */ 739 spin_lock_irqsave(&phba->hbalock, flags); 740 for (i = 0; i < hbq_count; ++i) { 741 list_for_each_entry_safe(dmabuf, next_dmabuf, 742 &phba->hbqs[i].hbq_buffer_list, list) { 743 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 744 list_del(&hbq_buf->dbuf.list); 745 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 746 } 747 phba->hbqs[i].buffer_count = 0; 748 } 749 /* Return all HBQ buffer that are in-fly */ 750 list_for_each_entry_safe(dmabuf, next_dmabuf, 751 &phba->hbqbuf_in_list, list) { 752 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 753 list_del(&hbq_buf->dbuf.list); 754 if (hbq_buf->tag == -1) { 755 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 756 (phba, hbq_buf); 757 } else { 758 hbqno = hbq_buf->tag >> 16; 759 if (hbqno >= LPFC_MAX_HBQS) 760 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 761 (phba, hbq_buf); 762 else 763 (phba->hbqs[hbqno].hbq_free_buffer)(phba, 764 hbq_buf); 765 } 766 } 767 768 /* Mark the HBQs not in use */ 769 phba->hbq_in_use = 0; 770 spin_unlock_irqrestore(&phba->hbalock, flags); 771 } 772 773 /** 774 * lpfc_sli_hbq_to_firmware: Post the hbq buffer to firmware. 775 * @phba: Pointer to HBA context object. 776 * @hbqno: HBQ number. 777 * @hbq_buf: Pointer to HBQ buffer. 778 * 779 * This function is called with the hbalock held to post a 780 * hbq buffer to the firmware. If the function finds an empty 781 * slot in the HBQ, it will post the buffer. The function will return 782 * pointer to the hbq entry if it successfully post the buffer 783 * else it will return NULL. 784 **/ 785 static struct lpfc_hbq_entry * 786 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 787 struct hbq_dmabuf *hbq_buf) 788 { 789 struct lpfc_hbq_entry *hbqe; 790 dma_addr_t physaddr = hbq_buf->dbuf.phys; 791 792 /* Get next HBQ entry slot to use */ 793 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 794 if (hbqe) { 795 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 796 797 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 798 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 799 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 800 hbqe->bde.tus.f.bdeFlags = 0; 801 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 802 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 803 /* Sync SLIM */ 804 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 805 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 806 /* flush */ 807 readl(phba->hbq_put + hbqno); 808 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 809 } 810 return hbqe; 811 } 812 813 /* HBQ for ELS and CT traffic. */ 814 static struct lpfc_hbq_init lpfc_els_hbq = { 815 .rn = 1, 816 .entry_count = 200, 817 .mask_count = 0, 818 .profile = 0, 819 .ring_mask = (1 << LPFC_ELS_RING), 820 .buffer_count = 0, 821 .init_count = 20, 822 .add_count = 5, 823 }; 824 825 /* HBQ for the extra ring if needed */ 826 static struct lpfc_hbq_init lpfc_extra_hbq = { 827 .rn = 1, 828 .entry_count = 200, 829 .mask_count = 0, 830 .profile = 0, 831 .ring_mask = (1 << LPFC_EXTRA_RING), 832 .buffer_count = 0, 833 .init_count = 0, 834 .add_count = 5, 835 }; 836 837 /* Array of HBQs */ 838 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 839 &lpfc_els_hbq, 840 &lpfc_extra_hbq, 841 }; 842 843 /** 844 * lpfc_sli_hbqbuf_fill_hbqs: Post more hbq buffers to HBQ. 845 * @phba: Pointer to HBA context object. 846 * @hbqno: HBQ number. 847 * @count: Number of HBQ buffers to be posted. 848 * 849 * This function is called with no lock held to post more hbq buffers to the 850 * given HBQ. The function returns the number of HBQ buffers successfully 851 * posted. 852 **/ 853 static int 854 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 855 { 856 uint32_t i, posted = 0; 857 unsigned long flags; 858 struct hbq_dmabuf *hbq_buffer; 859 LIST_HEAD(hbq_buf_list); 860 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 861 return 0; 862 863 if ((phba->hbqs[hbqno].buffer_count + count) > 864 lpfc_hbq_defs[hbqno]->entry_count) 865 count = lpfc_hbq_defs[hbqno]->entry_count - 866 phba->hbqs[hbqno].buffer_count; 867 if (!count) 868 return 0; 869 /* Allocate HBQ entries */ 870 for (i = 0; i < count; i++) { 871 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 872 if (!hbq_buffer) 873 break; 874 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 875 } 876 /* Check whether HBQ is still in use */ 877 spin_lock_irqsave(&phba->hbalock, flags); 878 if (!phba->hbq_in_use) 879 goto err; 880 while (!list_empty(&hbq_buf_list)) { 881 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 882 dbuf.list); 883 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 884 (hbqno << 16)); 885 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 886 phba->hbqs[hbqno].buffer_count++; 887 posted++; 888 } else 889 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 890 } 891 spin_unlock_irqrestore(&phba->hbalock, flags); 892 return posted; 893 err: 894 spin_unlock_irqrestore(&phba->hbalock, flags); 895 while (!list_empty(&hbq_buf_list)) { 896 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 897 dbuf.list); 898 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 899 } 900 return 0; 901 } 902 903 /** 904 * lpfc_sli_hbqbuf_add_hbqs: Post more HBQ buffers to firmware. 905 * @phba: Pointer to HBA context object. 906 * @qno: HBQ number. 907 * 908 * This function posts more buffers to the HBQ. This function 909 * is called with no lock held. The function returns the number of HBQ entries 910 * successfully allocated. 911 **/ 912 int 913 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 914 { 915 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 916 lpfc_hbq_defs[qno]->add_count)); 917 } 918 919 /** 920 * lpfc_sli_hbqbuf_init_hbqs: Post initial buffers to the HBQ. 921 * @phba: Pointer to HBA context object. 922 * @qno: HBQ queue number. 923 * 924 * This function is called from SLI initialization code path with 925 * no lock held to post initial HBQ buffers to firmware. The 926 * function returns the number of HBQ entries successfully allocated. 927 **/ 928 static int 929 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 930 { 931 return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 932 lpfc_hbq_defs[qno]->init_count)); 933 } 934 935 /** 936 * lpfc_sli_hbqbuf_find: Find the hbq buffer associated with a tag. 937 * @phba: Pointer to HBA context object. 938 * @tag: Tag of the hbq buffer. 939 * 940 * This function is called with hbalock held. This function searches 941 * for the hbq buffer associated with the given tag in the hbq buffer 942 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise 943 * it returns NULL. 944 **/ 945 static struct hbq_dmabuf * 946 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 947 { 948 struct lpfc_dmabuf *d_buf; 949 struct hbq_dmabuf *hbq_buf; 950 uint32_t hbqno; 951 952 hbqno = tag >> 16; 953 if (hbqno >= LPFC_MAX_HBQS) 954 return NULL; 955 956 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 957 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 958 if (hbq_buf->tag == tag) { 959 return hbq_buf; 960 } 961 } 962 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 963 "1803 Bad hbq tag. Data: x%x x%x\n", 964 tag, phba->hbqs[tag >> 16].buffer_count); 965 return NULL; 966 } 967 968 /** 969 * lpfc_sli_free_hbq: Give back the hbq buffer to firmware. 970 * @phba: Pointer to HBA context object. 971 * @hbq_buffer: Pointer to HBQ buffer. 972 * 973 * This function is called with hbalock. This function gives back 974 * the hbq buffer to firmware. If the HBQ does not have space to 975 * post the buffer, it will free the buffer. 976 **/ 977 void 978 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 979 { 980 uint32_t hbqno; 981 982 if (hbq_buffer) { 983 hbqno = hbq_buffer->tag >> 16; 984 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 985 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 986 } 987 } 988 } 989 990 /** 991 * lpfc_sli_chk_mbx_command: Check if the mailbox is a legitimate mailbox. 992 * @mbxCommand: mailbox command code. 993 * 994 * This function is called by the mailbox event handler function to verify 995 * that the completed mailbox command is a legitimate mailbox command. If the 996 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 997 * and the mailbox event handler will take the HBA offline. 998 **/ 999 static int 1000 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 1001 { 1002 uint8_t ret; 1003 1004 switch (mbxCommand) { 1005 case MBX_LOAD_SM: 1006 case MBX_READ_NV: 1007 case MBX_WRITE_NV: 1008 case MBX_WRITE_VPARMS: 1009 case MBX_RUN_BIU_DIAG: 1010 case MBX_INIT_LINK: 1011 case MBX_DOWN_LINK: 1012 case MBX_CONFIG_LINK: 1013 case MBX_CONFIG_RING: 1014 case MBX_RESET_RING: 1015 case MBX_READ_CONFIG: 1016 case MBX_READ_RCONFIG: 1017 case MBX_READ_SPARM: 1018 case MBX_READ_STATUS: 1019 case MBX_READ_RPI: 1020 case MBX_READ_XRI: 1021 case MBX_READ_REV: 1022 case MBX_READ_LNK_STAT: 1023 case MBX_REG_LOGIN: 1024 case MBX_UNREG_LOGIN: 1025 case MBX_READ_LA: 1026 case MBX_CLEAR_LA: 1027 case MBX_DUMP_MEMORY: 1028 case MBX_DUMP_CONTEXT: 1029 case MBX_RUN_DIAGS: 1030 case MBX_RESTART: 1031 case MBX_UPDATE_CFG: 1032 case MBX_DOWN_LOAD: 1033 case MBX_DEL_LD_ENTRY: 1034 case MBX_RUN_PROGRAM: 1035 case MBX_SET_MASK: 1036 case MBX_SET_VARIABLE: 1037 case MBX_UNREG_D_ID: 1038 case MBX_KILL_BOARD: 1039 case MBX_CONFIG_FARP: 1040 case MBX_BEACON: 1041 case MBX_LOAD_AREA: 1042 case MBX_RUN_BIU_DIAG64: 1043 case MBX_CONFIG_PORT: 1044 case MBX_READ_SPARM64: 1045 case MBX_READ_RPI64: 1046 case MBX_REG_LOGIN64: 1047 case MBX_READ_LA64: 1048 case MBX_WRITE_WWN: 1049 case MBX_SET_DEBUG: 1050 case MBX_LOAD_EXP_ROM: 1051 case MBX_ASYNCEVT_ENABLE: 1052 case MBX_REG_VPI: 1053 case MBX_UNREG_VPI: 1054 case MBX_HEARTBEAT: 1055 case MBX_PORT_CAPABILITIES: 1056 case MBX_PORT_IOV_CONTROL: 1057 ret = mbxCommand; 1058 break; 1059 default: 1060 ret = MBX_SHUTDOWN; 1061 break; 1062 } 1063 return ret; 1064 } 1065 1066 /** 1067 * lpfc_sli_wake_mbox_wait: Completion handler for mbox issued from 1068 * lpfc_sli_issue_mbox_wait. 1069 * @phba: Pointer to HBA context object. 1070 * @pmboxq: Pointer to mailbox command. 1071 * 1072 * This is completion handler function for mailbox commands issued from 1073 * lpfc_sli_issue_mbox_wait function. This function is called by the 1074 * mailbox event handler function with no lock held. This function 1075 * will wake up thread waiting on the wait queue pointed by context1 1076 * of the mailbox. 1077 **/ 1078 static void 1079 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 1080 { 1081 wait_queue_head_t *pdone_q; 1082 unsigned long drvr_flag; 1083 1084 /* 1085 * If pdone_q is empty, the driver thread gave up waiting and 1086 * continued running. 1087 */ 1088 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 1089 spin_lock_irqsave(&phba->hbalock, drvr_flag); 1090 pdone_q = (wait_queue_head_t *) pmboxq->context1; 1091 if (pdone_q) 1092 wake_up_interruptible(pdone_q); 1093 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 1094 return; 1095 } 1096 1097 1098 /** 1099 * lpfc_sli_def_mbox_cmpl: Default mailbox completion handler. 1100 * @phba: Pointer to HBA context object. 1101 * @pmb: Pointer to mailbox object. 1102 * 1103 * This function is the default mailbox completion handler. It 1104 * frees the memory resources associated with the completed mailbox 1105 * command. If the completed command is a REG_LOGIN mailbox command, 1106 * this function will issue a UREG_LOGIN to re-claim the RPI. 1107 **/ 1108 void 1109 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1110 { 1111 struct lpfc_dmabuf *mp; 1112 uint16_t rpi; 1113 int rc; 1114 1115 mp = (struct lpfc_dmabuf *) (pmb->context1); 1116 1117 if (mp) { 1118 lpfc_mbuf_free(phba, mp->virt, mp->phys); 1119 kfree(mp); 1120 } 1121 1122 /* 1123 * If a REG_LOGIN succeeded after node is destroyed or node 1124 * is in re-discovery driver need to cleanup the RPI. 1125 */ 1126 if (!(phba->pport->load_flag & FC_UNLOADING) && 1127 pmb->mb.mbxCommand == MBX_REG_LOGIN64 && 1128 !pmb->mb.mbxStatus) { 1129 1130 rpi = pmb->mb.un.varWords[0]; 1131 lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); 1132 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 1133 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1134 if (rc != MBX_NOT_FINISHED) 1135 return; 1136 } 1137 1138 mempool_free(pmb, phba->mbox_mem_pool); 1139 return; 1140 } 1141 1142 /** 1143 * lpfc_sli_handle_mb_event: Handle mailbox completions from firmware. 1144 * @phba: Pointer to HBA context object. 1145 * 1146 * This function is called with no lock held. This function processes all 1147 * the completed mailbox commands and gives it to upper layers. The interrupt 1148 * service routine processes mailbox completion interrupt and adds completed 1149 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 1150 * Worker thread call lpfc_sli_handle_mb_event, which will return the 1151 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 1152 * function returns the mailbox commands to the upper layer by calling the 1153 * completion handler function of each mailbox. 1154 **/ 1155 int 1156 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 1157 { 1158 MAILBOX_t *pmbox; 1159 LPFC_MBOXQ_t *pmb; 1160 int rc; 1161 LIST_HEAD(cmplq); 1162 1163 phba->sli.slistat.mbox_event++; 1164 1165 /* Get all completed mailboxe buffers into the cmplq */ 1166 spin_lock_irq(&phba->hbalock); 1167 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 1168 spin_unlock_irq(&phba->hbalock); 1169 1170 /* Get a Mailbox buffer to setup mailbox commands for callback */ 1171 do { 1172 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 1173 if (pmb == NULL) 1174 break; 1175 1176 pmbox = &pmb->mb; 1177 1178 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 1179 if (pmb->vport) { 1180 lpfc_debugfs_disc_trc(pmb->vport, 1181 LPFC_DISC_TRC_MBOX_VPORT, 1182 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 1183 (uint32_t)pmbox->mbxCommand, 1184 pmbox->un.varWords[0], 1185 pmbox->un.varWords[1]); 1186 } 1187 else { 1188 lpfc_debugfs_disc_trc(phba->pport, 1189 LPFC_DISC_TRC_MBOX, 1190 "MBOX cmpl: cmd:x%x mb:x%x x%x", 1191 (uint32_t)pmbox->mbxCommand, 1192 pmbox->un.varWords[0], 1193 pmbox->un.varWords[1]); 1194 } 1195 } 1196 1197 /* 1198 * It is a fatal error if unknown mbox command completion. 1199 */ 1200 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 1201 MBX_SHUTDOWN) { 1202 /* Unknow mailbox command compl */ 1203 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 1204 "(%d):0323 Unknown Mailbox command " 1205 "%x Cmpl\n", 1206 pmb->vport ? pmb->vport->vpi : 0, 1207 pmbox->mbxCommand); 1208 phba->link_state = LPFC_HBA_ERROR; 1209 phba->work_hs = HS_FFER3; 1210 lpfc_handle_eratt(phba); 1211 continue; 1212 } 1213 1214 if (pmbox->mbxStatus) { 1215 phba->sli.slistat.mbox_stat_err++; 1216 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 1217 /* Mbox cmd cmpl error - RETRYing */ 1218 lpfc_printf_log(phba, KERN_INFO, 1219 LOG_MBOX | LOG_SLI, 1220 "(%d):0305 Mbox cmd cmpl " 1221 "error - RETRYing Data: x%x " 1222 "x%x x%x x%x\n", 1223 pmb->vport ? pmb->vport->vpi :0, 1224 pmbox->mbxCommand, 1225 pmbox->mbxStatus, 1226 pmbox->un.varWords[0], 1227 pmb->vport->port_state); 1228 pmbox->mbxStatus = 0; 1229 pmbox->mbxOwner = OWN_HOST; 1230 spin_lock_irq(&phba->hbalock); 1231 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 1232 spin_unlock_irq(&phba->hbalock); 1233 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 1234 if (rc == MBX_SUCCESS) 1235 continue; 1236 } 1237 } 1238 1239 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 1240 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 1241 "(%d):0307 Mailbox cmd x%x Cmpl x%p " 1242 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 1243 pmb->vport ? pmb->vport->vpi : 0, 1244 pmbox->mbxCommand, 1245 pmb->mbox_cmpl, 1246 *((uint32_t *) pmbox), 1247 pmbox->un.varWords[0], 1248 pmbox->un.varWords[1], 1249 pmbox->un.varWords[2], 1250 pmbox->un.varWords[3], 1251 pmbox->un.varWords[4], 1252 pmbox->un.varWords[5], 1253 pmbox->un.varWords[6], 1254 pmbox->un.varWords[7]); 1255 1256 if (pmb->mbox_cmpl) 1257 pmb->mbox_cmpl(phba,pmb); 1258 } while (1); 1259 return 0; 1260 } 1261 1262 /** 1263 * lpfc_sli_get_buff: Get the buffer associated with the buffer tag. 1264 * @phba: Pointer to HBA context object. 1265 * @pring: Pointer to driver SLI ring object. 1266 * @tag: buffer tag. 1267 * 1268 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 1269 * is set in the tag the buffer is posted for a particular exchange, 1270 * the function will return the buffer without replacing the buffer. 1271 * If the buffer is for unsolicited ELS or CT traffic, this function 1272 * returns the buffer and also posts another buffer to the firmware. 1273 **/ 1274 static struct lpfc_dmabuf * 1275 lpfc_sli_get_buff(struct lpfc_hba *phba, 1276 struct lpfc_sli_ring *pring, 1277 uint32_t tag) 1278 { 1279 struct hbq_dmabuf *hbq_entry; 1280 1281 if (tag & QUE_BUFTAG_BIT) 1282 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 1283 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 1284 if (!hbq_entry) 1285 return NULL; 1286 return &hbq_entry->dbuf; 1287 } 1288 1289 1290 /** 1291 * lpfc_sli_process_unsol_iocb: Unsolicited iocb handler. 1292 * @phba: Pointer to HBA context object. 1293 * @pring: Pointer to driver SLI ring object. 1294 * @saveq: Pointer to the unsolicited iocb. 1295 * 1296 * This function is called with no lock held by the ring event handler 1297 * when there is an unsolicited iocb posted to the response ring by the 1298 * firmware. This function gets the buffer associated with the iocbs 1299 * and calls the event handler for the ring. This function handles both 1300 * qring buffers and hbq buffers. 1301 * When the function returns 1 the caller can free the iocb object otherwise 1302 * upper layer functions will free the iocb objects. 1303 **/ 1304 static int 1305 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1306 struct lpfc_iocbq *saveq) 1307 { 1308 IOCB_t * irsp; 1309 WORD5 * w5p; 1310 uint32_t Rctl, Type; 1311 uint32_t match, i; 1312 struct lpfc_iocbq *iocbq; 1313 struct lpfc_dmabuf *dmzbuf; 1314 1315 match = 0; 1316 irsp = &(saveq->iocb); 1317 1318 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 1319 if (pring->lpfc_sli_rcv_async_status) 1320 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 1321 else 1322 lpfc_printf_log(phba, 1323 KERN_WARNING, 1324 LOG_SLI, 1325 "0316 Ring %d handler: unexpected " 1326 "ASYNC_STATUS iocb received evt_code " 1327 "0x%x\n", 1328 pring->ringno, 1329 irsp->un.asyncstat.evt_code); 1330 return 1; 1331 } 1332 1333 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 1334 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 1335 if (irsp->ulpBdeCount > 0) { 1336 dmzbuf = lpfc_sli_get_buff(phba, pring, 1337 irsp->un.ulpWord[3]); 1338 lpfc_in_buf_free(phba, dmzbuf); 1339 } 1340 1341 if (irsp->ulpBdeCount > 1) { 1342 dmzbuf = lpfc_sli_get_buff(phba, pring, 1343 irsp->unsli3.sli3Words[3]); 1344 lpfc_in_buf_free(phba, dmzbuf); 1345 } 1346 1347 if (irsp->ulpBdeCount > 2) { 1348 dmzbuf = lpfc_sli_get_buff(phba, pring, 1349 irsp->unsli3.sli3Words[7]); 1350 lpfc_in_buf_free(phba, dmzbuf); 1351 } 1352 1353 return 1; 1354 } 1355 1356 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 1357 if (irsp->ulpBdeCount != 0) { 1358 saveq->context2 = lpfc_sli_get_buff(phba, pring, 1359 irsp->un.ulpWord[3]); 1360 if (!saveq->context2) 1361 lpfc_printf_log(phba, 1362 KERN_ERR, 1363 LOG_SLI, 1364 "0341 Ring %d Cannot find buffer for " 1365 "an unsolicited iocb. tag 0x%x\n", 1366 pring->ringno, 1367 irsp->un.ulpWord[3]); 1368 } 1369 if (irsp->ulpBdeCount == 2) { 1370 saveq->context3 = lpfc_sli_get_buff(phba, pring, 1371 irsp->unsli3.sli3Words[7]); 1372 if (!saveq->context3) 1373 lpfc_printf_log(phba, 1374 KERN_ERR, 1375 LOG_SLI, 1376 "0342 Ring %d Cannot find buffer for an" 1377 " unsolicited iocb. tag 0x%x\n", 1378 pring->ringno, 1379 irsp->unsli3.sli3Words[7]); 1380 } 1381 list_for_each_entry(iocbq, &saveq->list, list) { 1382 irsp = &(iocbq->iocb); 1383 if (irsp->ulpBdeCount != 0) { 1384 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 1385 irsp->un.ulpWord[3]); 1386 if (!iocbq->context2) 1387 lpfc_printf_log(phba, 1388 KERN_ERR, 1389 LOG_SLI, 1390 "0343 Ring %d Cannot find " 1391 "buffer for an unsolicited iocb" 1392 ". tag 0x%x\n", pring->ringno, 1393 irsp->un.ulpWord[3]); 1394 } 1395 if (irsp->ulpBdeCount == 2) { 1396 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 1397 irsp->unsli3.sli3Words[7]); 1398 if (!iocbq->context3) 1399 lpfc_printf_log(phba, 1400 KERN_ERR, 1401 LOG_SLI, 1402 "0344 Ring %d Cannot find " 1403 "buffer for an unsolicited " 1404 "iocb. tag 0x%x\n", 1405 pring->ringno, 1406 irsp->unsli3.sli3Words[7]); 1407 } 1408 } 1409 } 1410 if (irsp->ulpBdeCount != 0 && 1411 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 1412 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 1413 int found = 0; 1414 1415 /* search continue save q for same XRI */ 1416 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 1417 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) { 1418 list_add_tail(&saveq->list, &iocbq->list); 1419 found = 1; 1420 break; 1421 } 1422 } 1423 if (!found) 1424 list_add_tail(&saveq->clist, 1425 &pring->iocb_continue_saveq); 1426 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 1427 list_del_init(&iocbq->clist); 1428 saveq = iocbq; 1429 irsp = &(saveq->iocb); 1430 } else 1431 return 0; 1432 } 1433 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 1434 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 1435 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 1436 Rctl = FC_ELS_REQ; 1437 Type = FC_ELS_DATA; 1438 } else { 1439 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 1440 Rctl = w5p->hcsw.Rctl; 1441 Type = w5p->hcsw.Type; 1442 1443 /* Firmware Workaround */ 1444 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 1445 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 1446 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 1447 Rctl = FC_ELS_REQ; 1448 Type = FC_ELS_DATA; 1449 w5p->hcsw.Rctl = Rctl; 1450 w5p->hcsw.Type = Type; 1451 } 1452 } 1453 1454 /* unSolicited Responses */ 1455 if (pring->prt[0].profile) { 1456 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 1457 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 1458 saveq); 1459 match = 1; 1460 } else { 1461 /* We must search, based on rctl / type 1462 for the right routine */ 1463 for (i = 0; i < pring->num_mask; i++) { 1464 if ((pring->prt[i].rctl == Rctl) 1465 && (pring->prt[i].type == Type)) { 1466 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 1467 (pring->prt[i].lpfc_sli_rcv_unsol_event) 1468 (phba, pring, saveq); 1469 match = 1; 1470 break; 1471 } 1472 } 1473 } 1474 if (match == 0) { 1475 /* Unexpected Rctl / Type received */ 1476 /* Ring <ringno> handler: unexpected 1477 Rctl <Rctl> Type <Type> received */ 1478 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1479 "0313 Ring %d handler: unexpected Rctl x%x " 1480 "Type x%x received\n", 1481 pring->ringno, Rctl, Type); 1482 } 1483 return 1; 1484 } 1485 1486 /** 1487 * lpfc_sli_iocbq_lookup: Find command iocb for the given response iocb. 1488 * @phba: Pointer to HBA context object. 1489 * @pring: Pointer to driver SLI ring object. 1490 * @prspiocb: Pointer to response iocb object. 1491 * 1492 * This function looks up the iocb_lookup table to get the command iocb 1493 * corresponding to the given response iocb using the iotag of the 1494 * response iocb. This function is called with the hbalock held. 1495 * This function returns the command iocb object if it finds the command 1496 * iocb else returns NULL. 1497 **/ 1498 static struct lpfc_iocbq * 1499 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 1500 struct lpfc_sli_ring *pring, 1501 struct lpfc_iocbq *prspiocb) 1502 { 1503 struct lpfc_iocbq *cmd_iocb = NULL; 1504 uint16_t iotag; 1505 1506 iotag = prspiocb->iocb.ulpIoTag; 1507 1508 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 1509 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 1510 list_del_init(&cmd_iocb->list); 1511 pring->txcmplq_cnt--; 1512 return cmd_iocb; 1513 } 1514 1515 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1516 "0317 iotag x%x is out off " 1517 "range: max iotag x%x wd0 x%x\n", 1518 iotag, phba->sli.last_iotag, 1519 *(((uint32_t *) &prspiocb->iocb) + 7)); 1520 return NULL; 1521 } 1522 1523 /** 1524 * lpfc_sli_process_sol_iocb: process solicited iocb completion. 1525 * @phba: Pointer to HBA context object. 1526 * @pring: Pointer to driver SLI ring object. 1527 * @saveq: Pointer to the response iocb to be processed. 1528 * 1529 * This function is called by the ring event handler for non-fcp 1530 * rings when there is a new response iocb in the response ring. 1531 * The caller is not required to hold any locks. This function 1532 * gets the command iocb associated with the response iocb and 1533 * calls the completion handler for the command iocb. If there 1534 * is no completion handler, the function will free the resources 1535 * associated with command iocb. If the response iocb is for 1536 * an already aborted command iocb, the status of the completion 1537 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 1538 * This function always returns 1. 1539 **/ 1540 static int 1541 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1542 struct lpfc_iocbq *saveq) 1543 { 1544 struct lpfc_iocbq *cmdiocbp; 1545 int rc = 1; 1546 unsigned long iflag; 1547 1548 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 1549 spin_lock_irqsave(&phba->hbalock, iflag); 1550 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 1551 spin_unlock_irqrestore(&phba->hbalock, iflag); 1552 1553 if (cmdiocbp) { 1554 if (cmdiocbp->iocb_cmpl) { 1555 /* 1556 * If an ELS command failed send an event to mgmt 1557 * application. 1558 */ 1559 if (saveq->iocb.ulpStatus && 1560 (pring->ringno == LPFC_ELS_RING) && 1561 (cmdiocbp->iocb.ulpCommand == 1562 CMD_ELS_REQUEST64_CR)) 1563 lpfc_send_els_failure_event(phba, 1564 cmdiocbp, saveq); 1565 1566 /* 1567 * Post all ELS completions to the worker thread. 1568 * All other are passed to the completion callback. 1569 */ 1570 if (pring->ringno == LPFC_ELS_RING) { 1571 if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) { 1572 cmdiocbp->iocb_flag &= 1573 ~LPFC_DRIVER_ABORTED; 1574 saveq->iocb.ulpStatus = 1575 IOSTAT_LOCAL_REJECT; 1576 saveq->iocb.un.ulpWord[4] = 1577 IOERR_SLI_ABORTED; 1578 1579 /* Firmware could still be in progress 1580 * of DMAing payload, so don't free data 1581 * buffer till after a hbeat. 1582 */ 1583 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 1584 } 1585 } 1586 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 1587 } else 1588 lpfc_sli_release_iocbq(phba, cmdiocbp); 1589 } else { 1590 /* 1591 * Unknown initiating command based on the response iotag. 1592 * This could be the case on the ELS ring because of 1593 * lpfc_els_abort(). 1594 */ 1595 if (pring->ringno != LPFC_ELS_RING) { 1596 /* 1597 * Ring <ringno> handler: unexpected completion IoTag 1598 * <IoTag> 1599 */ 1600 lpfc_printf_vlog(cmdiocbp->vport, KERN_WARNING, LOG_SLI, 1601 "0322 Ring %d handler: " 1602 "unexpected completion IoTag x%x " 1603 "Data: x%x x%x x%x x%x\n", 1604 pring->ringno, 1605 saveq->iocb.ulpIoTag, 1606 saveq->iocb.ulpStatus, 1607 saveq->iocb.un.ulpWord[4], 1608 saveq->iocb.ulpCommand, 1609 saveq->iocb.ulpContext); 1610 } 1611 } 1612 1613 return rc; 1614 } 1615 1616 /** 1617 * lpfc_sli_rsp_pointers_error: Response ring pointer error handler. 1618 * @phba: Pointer to HBA context object. 1619 * @pring: Pointer to driver SLI ring object. 1620 * 1621 * This function is called from the iocb ring event handlers when 1622 * put pointer is ahead of the get pointer for a ring. This function signal 1623 * an error attention condition to the worker thread and the worker 1624 * thread will transition the HBA to offline state. 1625 **/ 1626 static void 1627 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1628 { 1629 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1630 /* 1631 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 1632 * rsp ring <portRspMax> 1633 */ 1634 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1635 "0312 Ring %d handler: portRspPut %d " 1636 "is bigger than rsp ring %d\n", 1637 pring->ringno, le32_to_cpu(pgp->rspPutInx), 1638 pring->numRiocb); 1639 1640 phba->link_state = LPFC_HBA_ERROR; 1641 1642 /* 1643 * All error attention handlers are posted to 1644 * worker thread 1645 */ 1646 phba->work_ha |= HA_ERATT; 1647 phba->work_hs = HS_FFER3; 1648 1649 lpfc_worker_wake_up(phba); 1650 1651 return; 1652 } 1653 1654 /** 1655 * lpfc_poll_eratt: Error attention polling timer timeout handler. 1656 * @ptr: Pointer to address of HBA context object. 1657 * 1658 * This function is invoked by the Error Attention polling timer when the 1659 * timer times out. It will check the SLI Error Attention register for 1660 * possible attention events. If so, it will post an Error Attention event 1661 * and wake up worker thread to process it. Otherwise, it will set up the 1662 * Error Attention polling timer for the next poll. 1663 **/ 1664 void lpfc_poll_eratt(unsigned long ptr) 1665 { 1666 struct lpfc_hba *phba; 1667 uint32_t eratt = 0; 1668 1669 phba = (struct lpfc_hba *)ptr; 1670 1671 /* Check chip HA register for error event */ 1672 eratt = lpfc_sli_check_eratt(phba); 1673 1674 if (eratt) 1675 /* Tell the worker thread there is work to do */ 1676 lpfc_worker_wake_up(phba); 1677 else 1678 /* Restart the timer for next eratt poll */ 1679 mod_timer(&phba->eratt_poll, jiffies + 1680 HZ * LPFC_ERATT_POLL_INTERVAL); 1681 return; 1682 } 1683 1684 /** 1685 * lpfc_sli_poll_fcp_ring: Handle FCP ring completion in polling mode. 1686 * @phba: Pointer to HBA context object. 1687 * 1688 * This function is called from lpfc_queuecommand, lpfc_poll_timeout, 1689 * lpfc_abort_handler and lpfc_slave_configure when FCP_RING_POLLING 1690 * is enabled. 1691 * 1692 * The caller does not hold any lock. 1693 * The function processes each response iocb in the response ring until it 1694 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with 1695 * LE bit set. The function will call the completion handler of the command iocb 1696 * if the response iocb indicates a completion for a command iocb or it is 1697 * an abort completion. 1698 **/ 1699 void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba) 1700 { 1701 struct lpfc_sli *psli = &phba->sli; 1702 struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING]; 1703 IOCB_t *irsp = NULL; 1704 IOCB_t *entry = NULL; 1705 struct lpfc_iocbq *cmdiocbq = NULL; 1706 struct lpfc_iocbq rspiocbq; 1707 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1708 uint32_t status; 1709 uint32_t portRspPut, portRspMax; 1710 int type; 1711 uint32_t rsp_cmpl = 0; 1712 uint32_t ha_copy; 1713 unsigned long iflags; 1714 1715 pring->stats.iocb_event++; 1716 1717 /* 1718 * The next available response entry should never exceed the maximum 1719 * entries. If it does, treat it as an adapter hardware error. 1720 */ 1721 portRspMax = pring->numRiocb; 1722 portRspPut = le32_to_cpu(pgp->rspPutInx); 1723 if (unlikely(portRspPut >= portRspMax)) { 1724 lpfc_sli_rsp_pointers_error(phba, pring); 1725 return; 1726 } 1727 1728 rmb(); 1729 while (pring->rspidx != portRspPut) { 1730 entry = lpfc_resp_iocb(phba, pring); 1731 if (++pring->rspidx >= portRspMax) 1732 pring->rspidx = 0; 1733 1734 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1735 (uint32_t *) &rspiocbq.iocb, 1736 phba->iocb_rsp_size); 1737 irsp = &rspiocbq.iocb; 1738 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1739 pring->stats.iocb_rsp++; 1740 rsp_cmpl++; 1741 1742 if (unlikely(irsp->ulpStatus)) { 1743 /* Rsp ring <ringno> error: IOCB */ 1744 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1745 "0326 Rsp Ring %d error: IOCB Data: " 1746 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1747 pring->ringno, 1748 irsp->un.ulpWord[0], 1749 irsp->un.ulpWord[1], 1750 irsp->un.ulpWord[2], 1751 irsp->un.ulpWord[3], 1752 irsp->un.ulpWord[4], 1753 irsp->un.ulpWord[5], 1754 *(uint32_t *)&irsp->un1, 1755 *((uint32_t *)&irsp->un1 + 1)); 1756 } 1757 1758 switch (type) { 1759 case LPFC_ABORT_IOCB: 1760 case LPFC_SOL_IOCB: 1761 /* 1762 * Idle exchange closed via ABTS from port. No iocb 1763 * resources need to be recovered. 1764 */ 1765 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1766 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1767 "0314 IOCB cmd 0x%x " 1768 "processed. Skipping " 1769 "completion", 1770 irsp->ulpCommand); 1771 break; 1772 } 1773 1774 spin_lock_irqsave(&phba->hbalock, iflags); 1775 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1776 &rspiocbq); 1777 spin_unlock_irqrestore(&phba->hbalock, iflags); 1778 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1779 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1780 &rspiocbq); 1781 } 1782 break; 1783 default: 1784 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1785 char adaptermsg[LPFC_MAX_ADPTMSG]; 1786 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1787 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1788 MAX_MSG_DATA); 1789 dev_warn(&((phba->pcidev)->dev), 1790 "lpfc%d: %s\n", 1791 phba->brd_no, adaptermsg); 1792 } else { 1793 /* Unknown IOCB command */ 1794 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1795 "0321 Unknown IOCB command " 1796 "Data: x%x, x%x x%x x%x x%x\n", 1797 type, irsp->ulpCommand, 1798 irsp->ulpStatus, 1799 irsp->ulpIoTag, 1800 irsp->ulpContext); 1801 } 1802 break; 1803 } 1804 1805 /* 1806 * The response IOCB has been processed. Update the ring 1807 * pointer in SLIM. If the port response put pointer has not 1808 * been updated, sync the pgp->rspPutInx and fetch the new port 1809 * response put pointer. 1810 */ 1811 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 1812 1813 if (pring->rspidx == portRspPut) 1814 portRspPut = le32_to_cpu(pgp->rspPutInx); 1815 } 1816 1817 ha_copy = readl(phba->HAregaddr); 1818 ha_copy >>= (LPFC_FCP_RING * 4); 1819 1820 if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) { 1821 spin_lock_irqsave(&phba->hbalock, iflags); 1822 pring->stats.iocb_rsp_full++; 1823 status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4)); 1824 writel(status, phba->CAregaddr); 1825 readl(phba->CAregaddr); 1826 spin_unlock_irqrestore(&phba->hbalock, iflags); 1827 } 1828 if ((ha_copy & HA_R0CE_RSP) && 1829 (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 1830 spin_lock_irqsave(&phba->hbalock, iflags); 1831 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 1832 pring->stats.iocb_cmd_empty++; 1833 1834 /* Force update of the local copy of cmdGetInx */ 1835 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1836 lpfc_sli_resume_iocb(phba, pring); 1837 1838 if ((pring->lpfc_sli_cmd_available)) 1839 (pring->lpfc_sli_cmd_available) (phba, pring); 1840 1841 spin_unlock_irqrestore(&phba->hbalock, iflags); 1842 } 1843 1844 return; 1845 } 1846 1847 /** 1848 * lpfc_sli_handle_fast_ring_event: Handle ring events on FCP ring. 1849 * @phba: Pointer to HBA context object. 1850 * @pring: Pointer to driver SLI ring object. 1851 * @mask: Host attention register mask for this ring. 1852 * 1853 * This function is called from the interrupt context when there is a ring 1854 * event for the fcp ring. The caller does not hold any lock. 1855 * The function processes each response iocb in the response ring until it 1856 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with 1857 * LE bit set. The function will call the completion handler of the command iocb 1858 * if the response iocb indicates a completion for a command iocb or it is 1859 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 1860 * function if this is an unsolicited iocb. 1861 * This routine presumes LPFC_FCP_RING handling and doesn't bother 1862 * to check it explicitly. This function always returns 1. 1863 **/ 1864 static int 1865 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 1866 struct lpfc_sli_ring *pring, uint32_t mask) 1867 { 1868 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1869 IOCB_t *irsp = NULL; 1870 IOCB_t *entry = NULL; 1871 struct lpfc_iocbq *cmdiocbq = NULL; 1872 struct lpfc_iocbq rspiocbq; 1873 uint32_t status; 1874 uint32_t portRspPut, portRspMax; 1875 int rc = 1; 1876 lpfc_iocb_type type; 1877 unsigned long iflag; 1878 uint32_t rsp_cmpl = 0; 1879 1880 spin_lock_irqsave(&phba->hbalock, iflag); 1881 pring->stats.iocb_event++; 1882 1883 /* 1884 * The next available response entry should never exceed the maximum 1885 * entries. If it does, treat it as an adapter hardware error. 1886 */ 1887 portRspMax = pring->numRiocb; 1888 portRspPut = le32_to_cpu(pgp->rspPutInx); 1889 if (unlikely(portRspPut >= portRspMax)) { 1890 lpfc_sli_rsp_pointers_error(phba, pring); 1891 spin_unlock_irqrestore(&phba->hbalock, iflag); 1892 return 1; 1893 } 1894 1895 rmb(); 1896 while (pring->rspidx != portRspPut) { 1897 /* 1898 * Fetch an entry off the ring and copy it into a local data 1899 * structure. The copy involves a byte-swap since the 1900 * network byte order and pci byte orders are different. 1901 */ 1902 entry = lpfc_resp_iocb(phba, pring); 1903 phba->last_completion_time = jiffies; 1904 1905 if (++pring->rspidx >= portRspMax) 1906 pring->rspidx = 0; 1907 1908 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 1909 (uint32_t *) &rspiocbq.iocb, 1910 phba->iocb_rsp_size); 1911 INIT_LIST_HEAD(&(rspiocbq.list)); 1912 irsp = &rspiocbq.iocb; 1913 1914 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 1915 pring->stats.iocb_rsp++; 1916 rsp_cmpl++; 1917 1918 if (unlikely(irsp->ulpStatus)) { 1919 /* 1920 * If resource errors reported from HBA, reduce 1921 * queuedepths of the SCSI device. 1922 */ 1923 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 1924 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 1925 spin_unlock_irqrestore(&phba->hbalock, iflag); 1926 lpfc_rampdown_queue_depth(phba); 1927 spin_lock_irqsave(&phba->hbalock, iflag); 1928 } 1929 1930 /* Rsp ring <ringno> error: IOCB */ 1931 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1932 "0336 Rsp Ring %d error: IOCB Data: " 1933 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 1934 pring->ringno, 1935 irsp->un.ulpWord[0], 1936 irsp->un.ulpWord[1], 1937 irsp->un.ulpWord[2], 1938 irsp->un.ulpWord[3], 1939 irsp->un.ulpWord[4], 1940 irsp->un.ulpWord[5], 1941 *(uint32_t *)&irsp->un1, 1942 *((uint32_t *)&irsp->un1 + 1)); 1943 } 1944 1945 switch (type) { 1946 case LPFC_ABORT_IOCB: 1947 case LPFC_SOL_IOCB: 1948 /* 1949 * Idle exchange closed via ABTS from port. No iocb 1950 * resources need to be recovered. 1951 */ 1952 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 1953 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1954 "0333 IOCB cmd 0x%x" 1955 " processed. Skipping" 1956 " completion\n", 1957 irsp->ulpCommand); 1958 break; 1959 } 1960 1961 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 1962 &rspiocbq); 1963 if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { 1964 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { 1965 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1966 &rspiocbq); 1967 } else { 1968 spin_unlock_irqrestore(&phba->hbalock, 1969 iflag); 1970 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 1971 &rspiocbq); 1972 spin_lock_irqsave(&phba->hbalock, 1973 iflag); 1974 } 1975 } 1976 break; 1977 case LPFC_UNSOL_IOCB: 1978 spin_unlock_irqrestore(&phba->hbalock, iflag); 1979 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 1980 spin_lock_irqsave(&phba->hbalock, iflag); 1981 break; 1982 default: 1983 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 1984 char adaptermsg[LPFC_MAX_ADPTMSG]; 1985 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 1986 memcpy(&adaptermsg[0], (uint8_t *) irsp, 1987 MAX_MSG_DATA); 1988 dev_warn(&((phba->pcidev)->dev), 1989 "lpfc%d: %s\n", 1990 phba->brd_no, adaptermsg); 1991 } else { 1992 /* Unknown IOCB command */ 1993 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1994 "0334 Unknown IOCB command " 1995 "Data: x%x, x%x x%x x%x x%x\n", 1996 type, irsp->ulpCommand, 1997 irsp->ulpStatus, 1998 irsp->ulpIoTag, 1999 irsp->ulpContext); 2000 } 2001 break; 2002 } 2003 2004 /* 2005 * The response IOCB has been processed. Update the ring 2006 * pointer in SLIM. If the port response put pointer has not 2007 * been updated, sync the pgp->rspPutInx and fetch the new port 2008 * response put pointer. 2009 */ 2010 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2011 2012 if (pring->rspidx == portRspPut) 2013 portRspPut = le32_to_cpu(pgp->rspPutInx); 2014 } 2015 2016 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 2017 pring->stats.iocb_rsp_full++; 2018 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 2019 writel(status, phba->CAregaddr); 2020 readl(phba->CAregaddr); 2021 } 2022 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 2023 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 2024 pring->stats.iocb_cmd_empty++; 2025 2026 /* Force update of the local copy of cmdGetInx */ 2027 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 2028 lpfc_sli_resume_iocb(phba, pring); 2029 2030 if ((pring->lpfc_sli_cmd_available)) 2031 (pring->lpfc_sli_cmd_available) (phba, pring); 2032 2033 } 2034 2035 spin_unlock_irqrestore(&phba->hbalock, iflag); 2036 return rc; 2037 } 2038 2039 /** 2040 * lpfc_sli_handle_slow_ring_event: Handle ring events for non-FCP rings. 2041 * @phba: Pointer to HBA context object. 2042 * @pring: Pointer to driver SLI ring object. 2043 * @mask: Host attention register mask for this ring. 2044 * 2045 * This function is called from the worker thread when there is a ring 2046 * event for non-fcp rings. The caller does not hold any lock . 2047 * The function processes each response iocb in the response ring until it 2048 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with 2049 * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the 2050 * response iocb indicates a completion of a command iocb. The function 2051 * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited 2052 * iocb. The function frees the resources or calls the completion handler if 2053 * this iocb is an abort completion. The function returns 0 when the allocated 2054 * iocbs are not freed, otherwise returns 1. 2055 **/ 2056 int 2057 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 2058 struct lpfc_sli_ring *pring, uint32_t mask) 2059 { 2060 struct lpfc_pgp *pgp; 2061 IOCB_t *entry; 2062 IOCB_t *irsp = NULL; 2063 struct lpfc_iocbq *rspiocbp = NULL; 2064 struct lpfc_iocbq *next_iocb; 2065 struct lpfc_iocbq *cmdiocbp; 2066 struct lpfc_iocbq *saveq; 2067 uint8_t iocb_cmd_type; 2068 lpfc_iocb_type type; 2069 uint32_t status, free_saveq; 2070 uint32_t portRspPut, portRspMax; 2071 int rc = 1; 2072 unsigned long iflag; 2073 2074 pgp = &phba->port_gp[pring->ringno]; 2075 spin_lock_irqsave(&phba->hbalock, iflag); 2076 pring->stats.iocb_event++; 2077 2078 /* 2079 * The next available response entry should never exceed the maximum 2080 * entries. If it does, treat it as an adapter hardware error. 2081 */ 2082 portRspMax = pring->numRiocb; 2083 portRspPut = le32_to_cpu(pgp->rspPutInx); 2084 if (portRspPut >= portRspMax) { 2085 /* 2086 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2087 * rsp ring <portRspMax> 2088 */ 2089 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2090 "0303 Ring %d handler: portRspPut %d " 2091 "is bigger than rsp ring %d\n", 2092 pring->ringno, portRspPut, portRspMax); 2093 2094 phba->link_state = LPFC_HBA_ERROR; 2095 spin_unlock_irqrestore(&phba->hbalock, iflag); 2096 2097 phba->work_hs = HS_FFER3; 2098 lpfc_handle_eratt(phba); 2099 2100 return 1; 2101 } 2102 2103 rmb(); 2104 while (pring->rspidx != portRspPut) { 2105 /* 2106 * Build a completion list and call the appropriate handler. 2107 * The process is to get the next available response iocb, get 2108 * a free iocb from the list, copy the response data into the 2109 * free iocb, insert to the continuation list, and update the 2110 * next response index to slim. This process makes response 2111 * iocb's in the ring available to DMA as fast as possible but 2112 * pays a penalty for a copy operation. Since the iocb is 2113 * only 32 bytes, this penalty is considered small relative to 2114 * the PCI reads for register values and a slim write. When 2115 * the ulpLe field is set, the entire Command has been 2116 * received. 2117 */ 2118 entry = lpfc_resp_iocb(phba, pring); 2119 2120 phba->last_completion_time = jiffies; 2121 rspiocbp = __lpfc_sli_get_iocbq(phba); 2122 if (rspiocbp == NULL) { 2123 printk(KERN_ERR "%s: out of buffers! Failing " 2124 "completion.\n", __func__); 2125 break; 2126 } 2127 2128 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 2129 phba->iocb_rsp_size); 2130 irsp = &rspiocbp->iocb; 2131 2132 if (++pring->rspidx >= portRspMax) 2133 pring->rspidx = 0; 2134 2135 if (pring->ringno == LPFC_ELS_RING) { 2136 lpfc_debugfs_slow_ring_trc(phba, 2137 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 2138 *(((uint32_t *) irsp) + 4), 2139 *(((uint32_t *) irsp) + 6), 2140 *(((uint32_t *) irsp) + 7)); 2141 } 2142 2143 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2144 2145 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 2146 2147 pring->iocb_continueq_cnt++; 2148 if (irsp->ulpLe) { 2149 /* 2150 * By default, the driver expects to free all resources 2151 * associated with this iocb completion. 2152 */ 2153 free_saveq = 1; 2154 saveq = list_get_first(&pring->iocb_continueq, 2155 struct lpfc_iocbq, list); 2156 irsp = &(saveq->iocb); 2157 list_del_init(&pring->iocb_continueq); 2158 pring->iocb_continueq_cnt = 0; 2159 2160 pring->stats.iocb_rsp++; 2161 2162 /* 2163 * If resource errors reported from HBA, reduce 2164 * queuedepths of the SCSI device. 2165 */ 2166 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2167 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2168 spin_unlock_irqrestore(&phba->hbalock, iflag); 2169 lpfc_rampdown_queue_depth(phba); 2170 spin_lock_irqsave(&phba->hbalock, iflag); 2171 } 2172 2173 if (irsp->ulpStatus) { 2174 /* Rsp ring <ringno> error: IOCB */ 2175 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2176 "0328 Rsp Ring %d error: " 2177 "IOCB Data: " 2178 "x%x x%x x%x x%x " 2179 "x%x x%x x%x x%x " 2180 "x%x x%x x%x x%x " 2181 "x%x x%x x%x x%x\n", 2182 pring->ringno, 2183 irsp->un.ulpWord[0], 2184 irsp->un.ulpWord[1], 2185 irsp->un.ulpWord[2], 2186 irsp->un.ulpWord[3], 2187 irsp->un.ulpWord[4], 2188 irsp->un.ulpWord[5], 2189 *(((uint32_t *) irsp) + 6), 2190 *(((uint32_t *) irsp) + 7), 2191 *(((uint32_t *) irsp) + 8), 2192 *(((uint32_t *) irsp) + 9), 2193 *(((uint32_t *) irsp) + 10), 2194 *(((uint32_t *) irsp) + 11), 2195 *(((uint32_t *) irsp) + 12), 2196 *(((uint32_t *) irsp) + 13), 2197 *(((uint32_t *) irsp) + 14), 2198 *(((uint32_t *) irsp) + 15)); 2199 } 2200 2201 /* 2202 * Fetch the IOCB command type and call the correct 2203 * completion routine. Solicited and Unsolicited 2204 * IOCBs on the ELS ring get freed back to the 2205 * lpfc_iocb_list by the discovery kernel thread. 2206 */ 2207 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 2208 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 2209 if (type == LPFC_SOL_IOCB) { 2210 spin_unlock_irqrestore(&phba->hbalock, iflag); 2211 rc = lpfc_sli_process_sol_iocb(phba, pring, 2212 saveq); 2213 spin_lock_irqsave(&phba->hbalock, iflag); 2214 } else if (type == LPFC_UNSOL_IOCB) { 2215 spin_unlock_irqrestore(&phba->hbalock, iflag); 2216 rc = lpfc_sli_process_unsol_iocb(phba, pring, 2217 saveq); 2218 spin_lock_irqsave(&phba->hbalock, iflag); 2219 if (!rc) 2220 free_saveq = 0; 2221 } else if (type == LPFC_ABORT_IOCB) { 2222 if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && 2223 ((cmdiocbp = 2224 lpfc_sli_iocbq_lookup(phba, pring, 2225 saveq)))) { 2226 /* Call the specified completion 2227 routine */ 2228 if (cmdiocbp->iocb_cmpl) { 2229 spin_unlock_irqrestore( 2230 &phba->hbalock, 2231 iflag); 2232 (cmdiocbp->iocb_cmpl) (phba, 2233 cmdiocbp, saveq); 2234 spin_lock_irqsave( 2235 &phba->hbalock, 2236 iflag); 2237 } else 2238 __lpfc_sli_release_iocbq(phba, 2239 cmdiocbp); 2240 } 2241 } else if (type == LPFC_UNKNOWN_IOCB) { 2242 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 2243 2244 char adaptermsg[LPFC_MAX_ADPTMSG]; 2245 2246 memset(adaptermsg, 0, 2247 LPFC_MAX_ADPTMSG); 2248 memcpy(&adaptermsg[0], (uint8_t *) irsp, 2249 MAX_MSG_DATA); 2250 dev_warn(&((phba->pcidev)->dev), 2251 "lpfc%d: %s\n", 2252 phba->brd_no, adaptermsg); 2253 } else { 2254 /* Unknown IOCB command */ 2255 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2256 "0335 Unknown IOCB " 2257 "command Data: x%x " 2258 "x%x x%x x%x\n", 2259 irsp->ulpCommand, 2260 irsp->ulpStatus, 2261 irsp->ulpIoTag, 2262 irsp->ulpContext); 2263 } 2264 } 2265 2266 if (free_saveq) { 2267 list_for_each_entry_safe(rspiocbp, next_iocb, 2268 &saveq->list, list) { 2269 list_del(&rspiocbp->list); 2270 __lpfc_sli_release_iocbq(phba, 2271 rspiocbp); 2272 } 2273 __lpfc_sli_release_iocbq(phba, saveq); 2274 } 2275 rspiocbp = NULL; 2276 } 2277 2278 /* 2279 * If the port response put pointer has not been updated, sync 2280 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 2281 * response put pointer. 2282 */ 2283 if (pring->rspidx == portRspPut) { 2284 portRspPut = le32_to_cpu(pgp->rspPutInx); 2285 } 2286 } /* while (pring->rspidx != portRspPut) */ 2287 2288 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 2289 /* At least one response entry has been freed */ 2290 pring->stats.iocb_rsp_full++; 2291 /* SET RxRE_RSP in Chip Att register */ 2292 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 2293 writel(status, phba->CAregaddr); 2294 readl(phba->CAregaddr); /* flush */ 2295 } 2296 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 2297 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 2298 pring->stats.iocb_cmd_empty++; 2299 2300 /* Force update of the local copy of cmdGetInx */ 2301 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 2302 lpfc_sli_resume_iocb(phba, pring); 2303 2304 if ((pring->lpfc_sli_cmd_available)) 2305 (pring->lpfc_sli_cmd_available) (phba, pring); 2306 2307 } 2308 2309 spin_unlock_irqrestore(&phba->hbalock, iflag); 2310 return rc; 2311 } 2312 2313 /** 2314 * lpfc_sli_abort_iocb_ring: Abort all iocbs in the ring. 2315 * @phba: Pointer to HBA context object. 2316 * @pring: Pointer to driver SLI ring object. 2317 * 2318 * This function aborts all iocbs in the given ring and frees all the iocb 2319 * objects in txq. This function issues an abort iocb for all the iocb commands 2320 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 2321 * the return of this function. The caller is not required to hold any locks. 2322 **/ 2323 void 2324 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2325 { 2326 LIST_HEAD(completions); 2327 struct lpfc_iocbq *iocb, *next_iocb; 2328 IOCB_t *cmd = NULL; 2329 2330 if (pring->ringno == LPFC_ELS_RING) { 2331 lpfc_fabric_abort_hba(phba); 2332 } 2333 2334 /* Error everything on txq and txcmplq 2335 * First do the txq. 2336 */ 2337 spin_lock_irq(&phba->hbalock); 2338 list_splice_init(&pring->txq, &completions); 2339 pring->txq_cnt = 0; 2340 2341 /* Next issue ABTS for everything on the txcmplq */ 2342 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 2343 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 2344 2345 spin_unlock_irq(&phba->hbalock); 2346 2347 while (!list_empty(&completions)) { 2348 iocb = list_get_first(&completions, struct lpfc_iocbq, list); 2349 cmd = &iocb->iocb; 2350 list_del_init(&iocb->list); 2351 2352 if (!iocb->iocb_cmpl) 2353 lpfc_sli_release_iocbq(phba, iocb); 2354 else { 2355 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 2356 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED; 2357 (iocb->iocb_cmpl) (phba, iocb, iocb); 2358 } 2359 } 2360 } 2361 2362 /** 2363 * lpfc_sli_flush_fcp_rings: flush all iocbs in the fcp ring. 2364 * @phba: Pointer to HBA context object. 2365 * 2366 * This function flushes all iocbs in the fcp ring and frees all the iocb 2367 * objects in txq and txcmplq. This function will not issue abort iocbs 2368 * for all the iocb commands in txcmplq, they will just be returned with 2369 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 2370 * slot has been permanently disabled. 2371 **/ 2372 void 2373 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 2374 { 2375 LIST_HEAD(txq); 2376 LIST_HEAD(txcmplq); 2377 struct lpfc_iocbq *iocb; 2378 IOCB_t *cmd = NULL; 2379 struct lpfc_sli *psli = &phba->sli; 2380 struct lpfc_sli_ring *pring; 2381 2382 /* Currently, only one fcp ring */ 2383 pring = &psli->ring[psli->fcp_ring]; 2384 2385 spin_lock_irq(&phba->hbalock); 2386 /* Retrieve everything on txq */ 2387 list_splice_init(&pring->txq, &txq); 2388 pring->txq_cnt = 0; 2389 2390 /* Retrieve everything on the txcmplq */ 2391 list_splice_init(&pring->txcmplq, &txcmplq); 2392 pring->txcmplq_cnt = 0; 2393 spin_unlock_irq(&phba->hbalock); 2394 2395 /* Flush the txq */ 2396 while (!list_empty(&txq)) { 2397 iocb = list_get_first(&txq, struct lpfc_iocbq, list); 2398 cmd = &iocb->iocb; 2399 list_del_init(&iocb->list); 2400 2401 if (!iocb->iocb_cmpl) 2402 lpfc_sli_release_iocbq(phba, iocb); 2403 else { 2404 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 2405 cmd->un.ulpWord[4] = IOERR_SLI_DOWN; 2406 (iocb->iocb_cmpl) (phba, iocb, iocb); 2407 } 2408 } 2409 2410 /* Flush the txcmpq */ 2411 while (!list_empty(&txcmplq)) { 2412 iocb = list_get_first(&txcmplq, struct lpfc_iocbq, list); 2413 cmd = &iocb->iocb; 2414 list_del_init(&iocb->list); 2415 2416 if (!iocb->iocb_cmpl) 2417 lpfc_sli_release_iocbq(phba, iocb); 2418 else { 2419 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 2420 cmd->un.ulpWord[4] = IOERR_SLI_DOWN; 2421 (iocb->iocb_cmpl) (phba, iocb, iocb); 2422 } 2423 } 2424 } 2425 2426 /** 2427 * lpfc_sli_brdready: Check for host status bits. 2428 * @phba: Pointer to HBA context object. 2429 * @mask: Bit mask to be checked. 2430 * 2431 * This function reads the host status register and compares 2432 * with the provided bit mask to check if HBA completed 2433 * the restart. This function will wait in a loop for the 2434 * HBA to complete restart. If the HBA does not restart within 2435 * 15 iterations, the function will reset the HBA again. The 2436 * function returns 1 when HBA fail to restart otherwise returns 2437 * zero. 2438 **/ 2439 int 2440 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 2441 { 2442 uint32_t status; 2443 int i = 0; 2444 int retval = 0; 2445 2446 /* Read the HBA Host Status Register */ 2447 status = readl(phba->HSregaddr); 2448 2449 /* 2450 * Check status register every 100ms for 5 retries, then every 2451 * 500ms for 5, then every 2.5 sec for 5, then reset board and 2452 * every 2.5 sec for 4. 2453 * Break our of the loop if errors occurred during init. 2454 */ 2455 while (((status & mask) != mask) && 2456 !(status & HS_FFERM) && 2457 i++ < 20) { 2458 2459 if (i <= 5) 2460 msleep(10); 2461 else if (i <= 10) 2462 msleep(500); 2463 else 2464 msleep(2500); 2465 2466 if (i == 15) { 2467 /* Do post */ 2468 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 2469 lpfc_sli_brdrestart(phba); 2470 } 2471 /* Read the HBA Host Status Register */ 2472 status = readl(phba->HSregaddr); 2473 } 2474 2475 /* Check to see if any errors occurred during init */ 2476 if ((status & HS_FFERM) || (i >= 20)) { 2477 phba->link_state = LPFC_HBA_ERROR; 2478 retval = 1; 2479 } 2480 2481 return retval; 2482 } 2483 2484 #define BARRIER_TEST_PATTERN (0xdeadbeef) 2485 2486 /** 2487 * lpfc_reset_barrier: Make HBA ready for HBA reset. 2488 * @phba: Pointer to HBA context object. 2489 * 2490 * This function is called before resetting an HBA. This 2491 * function requests HBA to quiesce DMAs before a reset. 2492 **/ 2493 void lpfc_reset_barrier(struct lpfc_hba *phba) 2494 { 2495 uint32_t __iomem *resp_buf; 2496 uint32_t __iomem *mbox_buf; 2497 volatile uint32_t mbox; 2498 uint32_t hc_copy; 2499 int i; 2500 uint8_t hdrtype; 2501 2502 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 2503 if (hdrtype != 0x80 || 2504 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 2505 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 2506 return; 2507 2508 /* 2509 * Tell the other part of the chip to suspend temporarily all 2510 * its DMA activity. 2511 */ 2512 resp_buf = phba->MBslimaddr; 2513 2514 /* Disable the error attention */ 2515 hc_copy = readl(phba->HCregaddr); 2516 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 2517 readl(phba->HCregaddr); /* flush */ 2518 phba->link_flag |= LS_IGNORE_ERATT; 2519 2520 if (readl(phba->HAregaddr) & HA_ERATT) { 2521 /* Clear Chip error bit */ 2522 writel(HA_ERATT, phba->HAregaddr); 2523 phba->pport->stopped = 1; 2524 } 2525 2526 mbox = 0; 2527 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 2528 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 2529 2530 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 2531 mbox_buf = phba->MBslimaddr; 2532 writel(mbox, mbox_buf); 2533 2534 for (i = 0; 2535 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) 2536 mdelay(1); 2537 2538 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 2539 if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || 2540 phba->pport->stopped) 2541 goto restore_hc; 2542 else 2543 goto clear_errat; 2544 } 2545 2546 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 2547 for (i = 0; readl(resp_buf) != mbox && i < 500; i++) 2548 mdelay(1); 2549 2550 clear_errat: 2551 2552 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) 2553 mdelay(1); 2554 2555 if (readl(phba->HAregaddr) & HA_ERATT) { 2556 writel(HA_ERATT, phba->HAregaddr); 2557 phba->pport->stopped = 1; 2558 } 2559 2560 restore_hc: 2561 phba->link_flag &= ~LS_IGNORE_ERATT; 2562 writel(hc_copy, phba->HCregaddr); 2563 readl(phba->HCregaddr); /* flush */ 2564 } 2565 2566 /** 2567 * lpfc_sli_brdkill: Issue a kill_board mailbox command. 2568 * @phba: Pointer to HBA context object. 2569 * 2570 * This function issues a kill_board mailbox command and waits for 2571 * the error attention interrupt. This function is called for stopping 2572 * the firmware processing. The caller is not required to hold any 2573 * locks. This function calls lpfc_hba_down_post function to free 2574 * any pending commands after the kill. The function will return 1 when it 2575 * fails to kill the board else will return 0. 2576 **/ 2577 int 2578 lpfc_sli_brdkill(struct lpfc_hba *phba) 2579 { 2580 struct lpfc_sli *psli; 2581 LPFC_MBOXQ_t *pmb; 2582 uint32_t status; 2583 uint32_t ha_copy; 2584 int retval; 2585 int i = 0; 2586 2587 psli = &phba->sli; 2588 2589 /* Kill HBA */ 2590 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2591 "0329 Kill HBA Data: x%x x%x\n", 2592 phba->pport->port_state, psli->sli_flag); 2593 2594 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2595 if (!pmb) 2596 return 1; 2597 2598 /* Disable the error attention */ 2599 spin_lock_irq(&phba->hbalock); 2600 status = readl(phba->HCregaddr); 2601 status &= ~HC_ERINT_ENA; 2602 writel(status, phba->HCregaddr); 2603 readl(phba->HCregaddr); /* flush */ 2604 phba->link_flag |= LS_IGNORE_ERATT; 2605 spin_unlock_irq(&phba->hbalock); 2606 2607 lpfc_kill_board(phba, pmb); 2608 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2609 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2610 2611 if (retval != MBX_SUCCESS) { 2612 if (retval != MBX_BUSY) 2613 mempool_free(pmb, phba->mbox_mem_pool); 2614 spin_lock_irq(&phba->hbalock); 2615 phba->link_flag &= ~LS_IGNORE_ERATT; 2616 spin_unlock_irq(&phba->hbalock); 2617 return 1; 2618 } 2619 2620 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 2621 2622 mempool_free(pmb, phba->mbox_mem_pool); 2623 2624 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 2625 * attention every 100ms for 3 seconds. If we don't get ERATT after 2626 * 3 seconds we still set HBA_ERROR state because the status of the 2627 * board is now undefined. 2628 */ 2629 ha_copy = readl(phba->HAregaddr); 2630 2631 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 2632 mdelay(100); 2633 ha_copy = readl(phba->HAregaddr); 2634 } 2635 2636 del_timer_sync(&psli->mbox_tmo); 2637 if (ha_copy & HA_ERATT) { 2638 writel(HA_ERATT, phba->HAregaddr); 2639 phba->pport->stopped = 1; 2640 } 2641 spin_lock_irq(&phba->hbalock); 2642 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 2643 phba->link_flag &= ~LS_IGNORE_ERATT; 2644 spin_unlock_irq(&phba->hbalock); 2645 2646 psli->mbox_active = NULL; 2647 lpfc_hba_down_post(phba); 2648 phba->link_state = LPFC_HBA_ERROR; 2649 2650 return ha_copy & HA_ERATT ? 0 : 1; 2651 } 2652 2653 /** 2654 * lpfc_sli_brdreset: Reset the HBA. 2655 * @phba: Pointer to HBA context object. 2656 * 2657 * This function resets the HBA by writing HC_INITFF to the control 2658 * register. After the HBA resets, this function resets all the iocb ring 2659 * indices. This function disables PCI layer parity checking during 2660 * the reset. 2661 * This function returns 0 always. 2662 * The caller is not required to hold any locks. 2663 **/ 2664 int 2665 lpfc_sli_brdreset(struct lpfc_hba *phba) 2666 { 2667 struct lpfc_sli *psli; 2668 struct lpfc_sli_ring *pring; 2669 uint16_t cfg_value; 2670 int i; 2671 2672 psli = &phba->sli; 2673 2674 /* Reset HBA */ 2675 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2676 "0325 Reset HBA Data: x%x x%x\n", 2677 phba->pport->port_state, psli->sli_flag); 2678 2679 /* perform board reset */ 2680 phba->fc_eventTag = 0; 2681 phba->pport->fc_myDID = 0; 2682 phba->pport->fc_prevDID = 0; 2683 2684 /* Turn off parity checking and serr during the physical reset */ 2685 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 2686 pci_write_config_word(phba->pcidev, PCI_COMMAND, 2687 (cfg_value & 2688 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 2689 2690 psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); 2691 /* Now toggle INITFF bit in the Host Control Register */ 2692 writel(HC_INITFF, phba->HCregaddr); 2693 mdelay(1); 2694 readl(phba->HCregaddr); /* flush */ 2695 writel(0, phba->HCregaddr); 2696 readl(phba->HCregaddr); /* flush */ 2697 2698 /* Restore PCI cmd register */ 2699 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 2700 2701 /* Initialize relevant SLI info */ 2702 for (i = 0; i < psli->num_rings; i++) { 2703 pring = &psli->ring[i]; 2704 pring->flag = 0; 2705 pring->rspidx = 0; 2706 pring->next_cmdidx = 0; 2707 pring->local_getidx = 0; 2708 pring->cmdidx = 0; 2709 pring->missbufcnt = 0; 2710 } 2711 2712 phba->link_state = LPFC_WARM_START; 2713 return 0; 2714 } 2715 2716 /** 2717 * lpfc_sli_brdrestart: Restart the HBA. 2718 * @phba: Pointer to HBA context object. 2719 * 2720 * This function is called in the SLI initialization code path to 2721 * restart the HBA. The caller is not required to hold any lock. 2722 * This function writes MBX_RESTART mailbox command to the SLIM and 2723 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 2724 * function to free any pending commands. The function enables 2725 * POST only during the first initialization. The function returns zero. 2726 * The function does not guarantee completion of MBX_RESTART mailbox 2727 * command before the return of this function. 2728 **/ 2729 int 2730 lpfc_sli_brdrestart(struct lpfc_hba *phba) 2731 { 2732 MAILBOX_t *mb; 2733 struct lpfc_sli *psli; 2734 volatile uint32_t word0; 2735 void __iomem *to_slim; 2736 2737 spin_lock_irq(&phba->hbalock); 2738 2739 psli = &phba->sli; 2740 2741 /* Restart HBA */ 2742 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2743 "0337 Restart HBA Data: x%x x%x\n", 2744 phba->pport->port_state, psli->sli_flag); 2745 2746 word0 = 0; 2747 mb = (MAILBOX_t *) &word0; 2748 mb->mbxCommand = MBX_RESTART; 2749 mb->mbxHc = 1; 2750 2751 lpfc_reset_barrier(phba); 2752 2753 to_slim = phba->MBslimaddr; 2754 writel(*(uint32_t *) mb, to_slim); 2755 readl(to_slim); /* flush */ 2756 2757 /* Only skip post after fc_ffinit is completed */ 2758 if (phba->pport->port_state) 2759 word0 = 1; /* This is really setting up word1 */ 2760 else 2761 word0 = 0; /* This is really setting up word1 */ 2762 to_slim = phba->MBslimaddr + sizeof (uint32_t); 2763 writel(*(uint32_t *) mb, to_slim); 2764 readl(to_slim); /* flush */ 2765 2766 lpfc_sli_brdreset(phba); 2767 phba->pport->stopped = 0; 2768 phba->link_state = LPFC_INIT_START; 2769 2770 spin_unlock_irq(&phba->hbalock); 2771 2772 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 2773 psli->stats_start = get_seconds(); 2774 2775 /* Give the INITFF and Post time to settle. */ 2776 mdelay(100); 2777 2778 lpfc_hba_down_post(phba); 2779 2780 return 0; 2781 } 2782 2783 /** 2784 * lpfc_sli_chipset_init: Wait for the restart of the HBA after a restart. 2785 * @phba: Pointer to HBA context object. 2786 * 2787 * This function is called after a HBA restart to wait for successful 2788 * restart of the HBA. Successful restart of the HBA is indicated by 2789 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 2790 * iteration, the function will restart the HBA again. The function returns 2791 * zero if HBA successfully restarted else returns negative error code. 2792 **/ 2793 static int 2794 lpfc_sli_chipset_init(struct lpfc_hba *phba) 2795 { 2796 uint32_t status, i = 0; 2797 2798 /* Read the HBA Host Status Register */ 2799 status = readl(phba->HSregaddr); 2800 2801 /* Check status register to see what current state is */ 2802 i = 0; 2803 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 2804 2805 /* Check every 100ms for 5 retries, then every 500ms for 5, then 2806 * every 2.5 sec for 5, then reset board and every 2.5 sec for 2807 * 4. 2808 */ 2809 if (i++ >= 20) { 2810 /* Adapter failed to init, timeout, status reg 2811 <status> */ 2812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2813 "0436 Adapter failed to init, " 2814 "timeout, status reg x%x, " 2815 "FW Data: A8 x%x AC x%x\n", status, 2816 readl(phba->MBslimaddr + 0xa8), 2817 readl(phba->MBslimaddr + 0xac)); 2818 phba->link_state = LPFC_HBA_ERROR; 2819 return -ETIMEDOUT; 2820 } 2821 2822 /* Check to see if any errors occurred during init */ 2823 if (status & HS_FFERM) { 2824 /* ERROR: During chipset initialization */ 2825 /* Adapter failed to init, chipset, status reg 2826 <status> */ 2827 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2828 "0437 Adapter failed to init, " 2829 "chipset, status reg x%x, " 2830 "FW Data: A8 x%x AC x%x\n", status, 2831 readl(phba->MBslimaddr + 0xa8), 2832 readl(phba->MBslimaddr + 0xac)); 2833 phba->link_state = LPFC_HBA_ERROR; 2834 return -EIO; 2835 } 2836 2837 if (i <= 5) { 2838 msleep(10); 2839 } else if (i <= 10) { 2840 msleep(500); 2841 } else { 2842 msleep(2500); 2843 } 2844 2845 if (i == 15) { 2846 /* Do post */ 2847 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 2848 lpfc_sli_brdrestart(phba); 2849 } 2850 /* Read the HBA Host Status Register */ 2851 status = readl(phba->HSregaddr); 2852 } 2853 2854 /* Check to see if any errors occurred during init */ 2855 if (status & HS_FFERM) { 2856 /* ERROR: During chipset initialization */ 2857 /* Adapter failed to init, chipset, status reg <status> */ 2858 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 2859 "0438 Adapter failed to init, chipset, " 2860 "status reg x%x, " 2861 "FW Data: A8 x%x AC x%x\n", status, 2862 readl(phba->MBslimaddr + 0xa8), 2863 readl(phba->MBslimaddr + 0xac)); 2864 phba->link_state = LPFC_HBA_ERROR; 2865 return -EIO; 2866 } 2867 2868 /* Clear all interrupt enable conditions */ 2869 writel(0, phba->HCregaddr); 2870 readl(phba->HCregaddr); /* flush */ 2871 2872 /* setup host attn register */ 2873 writel(0xffffffff, phba->HAregaddr); 2874 readl(phba->HAregaddr); /* flush */ 2875 return 0; 2876 } 2877 2878 /** 2879 * lpfc_sli_hbq_count: Get the number of HBQs to be configured. 2880 * 2881 * This function calculates and returns the number of HBQs required to be 2882 * configured. 2883 **/ 2884 int 2885 lpfc_sli_hbq_count(void) 2886 { 2887 return ARRAY_SIZE(lpfc_hbq_defs); 2888 } 2889 2890 /** 2891 * lpfc_sli_hbq_entry_count: Calculate total number of hbq entries. 2892 * 2893 * This function adds the number of hbq entries in every HBQ to get 2894 * the total number of hbq entries required for the HBA and returns 2895 * the total count. 2896 **/ 2897 static int 2898 lpfc_sli_hbq_entry_count(void) 2899 { 2900 int hbq_count = lpfc_sli_hbq_count(); 2901 int count = 0; 2902 int i; 2903 2904 for (i = 0; i < hbq_count; ++i) 2905 count += lpfc_hbq_defs[i]->entry_count; 2906 return count; 2907 } 2908 2909 /** 2910 * lpfc_sli_hbq_size: Calculate memory required for all hbq entries. 2911 * 2912 * This function calculates amount of memory required for all hbq entries 2913 * to be configured and returns the total memory required. 2914 **/ 2915 int 2916 lpfc_sli_hbq_size(void) 2917 { 2918 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 2919 } 2920 2921 /** 2922 * lpfc_sli_hbq_setup: configure and initialize HBQs. 2923 * @phba: Pointer to HBA context object. 2924 * 2925 * This function is called during the SLI initialization to configure 2926 * all the HBQs and post buffers to the HBQ. The caller is not 2927 * required to hold any locks. This function will return zero if successful 2928 * else it will return negative error code. 2929 **/ 2930 static int 2931 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 2932 { 2933 int hbq_count = lpfc_sli_hbq_count(); 2934 LPFC_MBOXQ_t *pmb; 2935 MAILBOX_t *pmbox; 2936 uint32_t hbqno; 2937 uint32_t hbq_entry_index; 2938 2939 /* Get a Mailbox buffer to setup mailbox 2940 * commands for HBA initialization 2941 */ 2942 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2943 2944 if (!pmb) 2945 return -ENOMEM; 2946 2947 pmbox = &pmb->mb; 2948 2949 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 2950 phba->link_state = LPFC_INIT_MBX_CMDS; 2951 phba->hbq_in_use = 1; 2952 2953 hbq_entry_index = 0; 2954 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 2955 phba->hbqs[hbqno].next_hbqPutIdx = 0; 2956 phba->hbqs[hbqno].hbqPutIdx = 0; 2957 phba->hbqs[hbqno].local_hbqGetIdx = 0; 2958 phba->hbqs[hbqno].entry_count = 2959 lpfc_hbq_defs[hbqno]->entry_count; 2960 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 2961 hbq_entry_index, pmb); 2962 hbq_entry_index += phba->hbqs[hbqno].entry_count; 2963 2964 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 2965 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 2966 mbxStatus <status>, ring <num> */ 2967 2968 lpfc_printf_log(phba, KERN_ERR, 2969 LOG_SLI | LOG_VPORT, 2970 "1805 Adapter failed to init. " 2971 "Data: x%x x%x x%x\n", 2972 pmbox->mbxCommand, 2973 pmbox->mbxStatus, hbqno); 2974 2975 phba->link_state = LPFC_HBA_ERROR; 2976 mempool_free(pmb, phba->mbox_mem_pool); 2977 return ENXIO; 2978 } 2979 } 2980 phba->hbq_count = hbq_count; 2981 2982 mempool_free(pmb, phba->mbox_mem_pool); 2983 2984 /* Initially populate or replenish the HBQs */ 2985 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 2986 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 2987 return 0; 2988 } 2989 2990 /** 2991 * lpfc_sli_config_port: Issue config port mailbox command. 2992 * @phba: Pointer to HBA context object. 2993 * @sli_mode: sli mode - 2/3 2994 * 2995 * This function is called by the sli intialization code path 2996 * to issue config_port mailbox command. This function restarts the 2997 * HBA firmware and issues a config_port mailbox command to configure 2998 * the SLI interface in the sli mode specified by sli_mode 2999 * variable. The caller is not required to hold any locks. 3000 * The function returns 0 if successful, else returns negative error 3001 * code. 3002 **/ 3003 int 3004 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 3005 { 3006 LPFC_MBOXQ_t *pmb; 3007 uint32_t resetcount = 0, rc = 0, done = 0; 3008 3009 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3010 if (!pmb) { 3011 phba->link_state = LPFC_HBA_ERROR; 3012 return -ENOMEM; 3013 } 3014 3015 phba->sli_rev = sli_mode; 3016 while (resetcount < 2 && !done) { 3017 spin_lock_irq(&phba->hbalock); 3018 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 3019 spin_unlock_irq(&phba->hbalock); 3020 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3021 lpfc_sli_brdrestart(phba); 3022 rc = lpfc_sli_chipset_init(phba); 3023 if (rc) 3024 break; 3025 3026 spin_lock_irq(&phba->hbalock); 3027 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3028 spin_unlock_irq(&phba->hbalock); 3029 resetcount++; 3030 3031 /* Call pre CONFIG_PORT mailbox command initialization. A 3032 * value of 0 means the call was successful. Any other 3033 * nonzero value is a failure, but if ERESTART is returned, 3034 * the driver may reset the HBA and try again. 3035 */ 3036 rc = lpfc_config_port_prep(phba); 3037 if (rc == -ERESTART) { 3038 phba->link_state = LPFC_LINK_UNKNOWN; 3039 continue; 3040 } else if (rc) 3041 break; 3042 phba->link_state = LPFC_INIT_MBX_CMDS; 3043 lpfc_config_port(phba, pmb); 3044 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 3045 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 3046 LPFC_SLI3_HBQ_ENABLED | 3047 LPFC_SLI3_CRP_ENABLED | 3048 LPFC_SLI3_INB_ENABLED | 3049 LPFC_SLI3_BG_ENABLED); 3050 if (rc != MBX_SUCCESS) { 3051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3052 "0442 Adapter failed to init, mbxCmd x%x " 3053 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 3054 pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); 3055 spin_lock_irq(&phba->hbalock); 3056 phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; 3057 spin_unlock_irq(&phba->hbalock); 3058 rc = -ENXIO; 3059 } else 3060 done = 1; 3061 } 3062 if (!done) { 3063 rc = -EINVAL; 3064 goto do_prep_failed; 3065 } 3066 if (pmb->mb.un.varCfgPort.sli_mode == 3) { 3067 if (!pmb->mb.un.varCfgPort.cMA) { 3068 rc = -ENXIO; 3069 goto do_prep_failed; 3070 } 3071 if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) { 3072 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3073 phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi; 3074 } else 3075 phba->max_vpi = 0; 3076 if (pmb->mb.un.varCfgPort.gerbm) 3077 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 3078 if (pmb->mb.un.varCfgPort.gcrp) 3079 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 3080 if (pmb->mb.un.varCfgPort.ginb) { 3081 phba->sli3_options |= LPFC_SLI3_INB_ENABLED; 3082 phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; 3083 phba->port_gp = phba->mbox->us.s3_inb_pgp.port; 3084 phba->inb_ha_copy = &phba->mbox->us.s3_inb_pgp.ha_copy; 3085 phba->inb_counter = &phba->mbox->us.s3_inb_pgp.counter; 3086 phba->inb_last_counter = 3087 phba->mbox->us.s3_inb_pgp.counter; 3088 } else { 3089 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 3090 phba->port_gp = phba->mbox->us.s3_pgp.port; 3091 phba->inb_ha_copy = NULL; 3092 phba->inb_counter = NULL; 3093 } 3094 3095 if (phba->cfg_enable_bg) { 3096 if (pmb->mb.un.varCfgPort.gbg) 3097 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 3098 else 3099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3100 "0443 Adapter did not grant " 3101 "BlockGuard\n"); 3102 } 3103 } else { 3104 phba->hbq_get = NULL; 3105 phba->port_gp = phba->mbox->us.s2.port; 3106 phba->inb_ha_copy = NULL; 3107 phba->inb_counter = NULL; 3108 phba->max_vpi = 0; 3109 } 3110 do_prep_failed: 3111 mempool_free(pmb, phba->mbox_mem_pool); 3112 return rc; 3113 } 3114 3115 3116 /** 3117 * lpfc_sli_hba_setup: SLI intialization function. 3118 * @phba: Pointer to HBA context object. 3119 * 3120 * This function is the main SLI intialization function. This function 3121 * is called by the HBA intialization code, HBA reset code and HBA 3122 * error attention handler code. Caller is not required to hold any 3123 * locks. This function issues config_port mailbox command to configure 3124 * the SLI, setup iocb rings and HBQ rings. In the end the function 3125 * calls the config_port_post function to issue init_link mailbox 3126 * command and to start the discovery. The function will return zero 3127 * if successful, else it will return negative error code. 3128 **/ 3129 int 3130 lpfc_sli_hba_setup(struct lpfc_hba *phba) 3131 { 3132 uint32_t rc; 3133 int mode = 3; 3134 3135 switch (lpfc_sli_mode) { 3136 case 2: 3137 if (phba->cfg_enable_npiv) { 3138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 3139 "1824 NPIV enabled: Override lpfc_sli_mode " 3140 "parameter (%d) to auto (0).\n", 3141 lpfc_sli_mode); 3142 break; 3143 } 3144 mode = 2; 3145 break; 3146 case 0: 3147 case 3: 3148 break; 3149 default: 3150 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 3151 "1819 Unrecognized lpfc_sli_mode " 3152 "parameter: %d.\n", lpfc_sli_mode); 3153 3154 break; 3155 } 3156 3157 rc = lpfc_sli_config_port(phba, mode); 3158 3159 if (rc && lpfc_sli_mode == 3) 3160 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 3161 "1820 Unable to select SLI-3. " 3162 "Not supported by adapter.\n"); 3163 if (rc && mode != 2) 3164 rc = lpfc_sli_config_port(phba, 2); 3165 if (rc) 3166 goto lpfc_sli_hba_setup_error; 3167 3168 if (phba->sli_rev == 3) { 3169 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 3170 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 3171 } else { 3172 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 3173 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 3174 phba->sli3_options = 0; 3175 } 3176 3177 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3178 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 3179 phba->sli_rev, phba->max_vpi); 3180 rc = lpfc_sli_ring_map(phba); 3181 3182 if (rc) 3183 goto lpfc_sli_hba_setup_error; 3184 3185 /* Init HBQs */ 3186 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 3187 rc = lpfc_sli_hbq_setup(phba); 3188 if (rc) 3189 goto lpfc_sli_hba_setup_error; 3190 } 3191 3192 phba->sli.sli_flag |= LPFC_PROCESS_LA; 3193 3194 rc = lpfc_config_port_post(phba); 3195 if (rc) 3196 goto lpfc_sli_hba_setup_error; 3197 3198 return rc; 3199 3200 lpfc_sli_hba_setup_error: 3201 phba->link_state = LPFC_HBA_ERROR; 3202 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3203 "0445 Firmware initialization failed\n"); 3204 return rc; 3205 } 3206 3207 3208 /** 3209 * lpfc_mbox_timeout: Timeout call back function for mbox timer. 3210 * @ptr: context object - pointer to hba structure. 3211 * 3212 * This is the callback function for mailbox timer. The mailbox 3213 * timer is armed when a new mailbox command is issued and the timer 3214 * is deleted when the mailbox complete. The function is called by 3215 * the kernel timer code when a mailbox does not complete within 3216 * expected time. This function wakes up the worker thread to 3217 * process the mailbox timeout and returns. All the processing is 3218 * done by the worker thread function lpfc_mbox_timeout_handler. 3219 **/ 3220 void 3221 lpfc_mbox_timeout(unsigned long ptr) 3222 { 3223 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 3224 unsigned long iflag; 3225 uint32_t tmo_posted; 3226 3227 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 3228 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 3229 if (!tmo_posted) 3230 phba->pport->work_port_events |= WORKER_MBOX_TMO; 3231 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 3232 3233 if (!tmo_posted) 3234 lpfc_worker_wake_up(phba); 3235 return; 3236 } 3237 3238 3239 /** 3240 * lpfc_mbox_timeout_handler: Worker thread function to handle mailbox timeout. 3241 * @phba: Pointer to HBA context object. 3242 * 3243 * This function is called from worker thread when a mailbox command times out. 3244 * The caller is not required to hold any locks. This function will reset the 3245 * HBA and recover all the pending commands. 3246 **/ 3247 void 3248 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 3249 { 3250 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 3251 MAILBOX_t *mb = &pmbox->mb; 3252 struct lpfc_sli *psli = &phba->sli; 3253 struct lpfc_sli_ring *pring; 3254 3255 /* Mbox cmd <mbxCommand> timeout */ 3256 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 3257 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 3258 mb->mbxCommand, 3259 phba->pport->port_state, 3260 phba->sli.sli_flag, 3261 phba->sli.mbox_active); 3262 3263 /* Setting state unknown so lpfc_sli_abort_iocb_ring 3264 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 3265 * it to fail all oustanding SCSI IO. 3266 */ 3267 spin_lock_irq(&phba->pport->work_port_lock); 3268 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 3269 spin_unlock_irq(&phba->pport->work_port_lock); 3270 spin_lock_irq(&phba->hbalock); 3271 phba->link_state = LPFC_LINK_UNKNOWN; 3272 psli->sli_flag &= ~LPFC_SLI2_ACTIVE; 3273 spin_unlock_irq(&phba->hbalock); 3274 3275 pring = &psli->ring[psli->fcp_ring]; 3276 lpfc_sli_abort_iocb_ring(phba, pring); 3277 3278 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 3279 "0345 Resetting board due to mailbox timeout\n"); 3280 /* 3281 * lpfc_offline calls lpfc_sli_hba_down which will clean up 3282 * on oustanding mailbox commands. 3283 */ 3284 /* If resets are disabled then set error state and return. */ 3285 if (!phba->cfg_enable_hba_reset) { 3286 phba->link_state = LPFC_HBA_ERROR; 3287 return; 3288 } 3289 lpfc_offline_prep(phba); 3290 lpfc_offline(phba); 3291 lpfc_sli_brdrestart(phba); 3292 lpfc_online(phba); 3293 lpfc_unblock_mgmt_io(phba); 3294 return; 3295 } 3296 3297 /** 3298 * lpfc_sli_issue_mbox: Issue a mailbox command to firmware. 3299 * @phba: Pointer to HBA context object. 3300 * @pmbox: Pointer to mailbox object. 3301 * @flag: Flag indicating how the mailbox need to be processed. 3302 * 3303 * This function is called by discovery code and HBA management code 3304 * to submit a mailbox command to firmware. This function gets the 3305 * hbalock to protect the data structures. 3306 * The mailbox command can be submitted in polling mode, in which case 3307 * this function will wait in a polling loop for the completion of the 3308 * mailbox. 3309 * If the mailbox is submitted in no_wait mode (not polling) the 3310 * function will submit the command and returns immediately without waiting 3311 * for the mailbox completion. The no_wait is supported only when HBA 3312 * is in SLI2/SLI3 mode - interrupts are enabled. 3313 * The SLI interface allows only one mailbox pending at a time. If the 3314 * mailbox is issued in polling mode and there is already a mailbox 3315 * pending, then the function will return an error. If the mailbox is issued 3316 * in NO_WAIT mode and there is a mailbox pending already, the function 3317 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 3318 * The sli layer owns the mailbox object until the completion of mailbox 3319 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 3320 * return codes the caller owns the mailbox command after the return of 3321 * the function. 3322 **/ 3323 int 3324 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 3325 { 3326 MAILBOX_t *mb; 3327 struct lpfc_sli *psli = &phba->sli; 3328 uint32_t status, evtctr; 3329 uint32_t ha_copy; 3330 int i; 3331 unsigned long timeout; 3332 unsigned long drvr_flag = 0; 3333 uint32_t word0, ldata; 3334 void __iomem *to_slim; 3335 int processing_queue = 0; 3336 3337 spin_lock_irqsave(&phba->hbalock, drvr_flag); 3338 if (!pmbox) { 3339 /* processing mbox queue from intr_handler */ 3340 processing_queue = 1; 3341 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3342 pmbox = lpfc_mbox_get(phba); 3343 if (!pmbox) { 3344 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3345 return MBX_SUCCESS; 3346 } 3347 } 3348 3349 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 3350 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 3351 if(!pmbox->vport) { 3352 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3353 lpfc_printf_log(phba, KERN_ERR, 3354 LOG_MBOX | LOG_VPORT, 3355 "1806 Mbox x%x failed. No vport\n", 3356 pmbox->mb.mbxCommand); 3357 dump_stack(); 3358 goto out_not_finished; 3359 } 3360 } 3361 3362 /* If the PCI channel is in offline state, do not post mbox. */ 3363 if (unlikely(pci_channel_offline(phba->pcidev))) { 3364 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3365 goto out_not_finished; 3366 } 3367 3368 psli = &phba->sli; 3369 3370 mb = &pmbox->mb; 3371 status = MBX_SUCCESS; 3372 3373 if (phba->link_state == LPFC_HBA_ERROR) { 3374 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3375 3376 /* Mbox command <mbxCommand> cannot issue */ 3377 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 3378 goto out_not_finished; 3379 } 3380 3381 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 3382 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 3383 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3384 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 3385 goto out_not_finished; 3386 } 3387 3388 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 3389 /* Polling for a mbox command when another one is already active 3390 * is not allowed in SLI. Also, the driver must have established 3391 * SLI2 mode to queue and process multiple mbox commands. 3392 */ 3393 3394 if (flag & MBX_POLL) { 3395 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3396 3397 /* Mbox command <mbxCommand> cannot issue */ 3398 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 3399 goto out_not_finished; 3400 } 3401 3402 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { 3403 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3404 /* Mbox command <mbxCommand> cannot issue */ 3405 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 3406 goto out_not_finished; 3407 } 3408 3409 /* Another mailbox command is still being processed, queue this 3410 * command to be processed later. 3411 */ 3412 lpfc_mbox_put(phba, pmbox); 3413 3414 /* Mbox cmd issue - BUSY */ 3415 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 3416 "(%d):0308 Mbox cmd issue - BUSY Data: " 3417 "x%x x%x x%x x%x\n", 3418 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 3419 mb->mbxCommand, phba->pport->port_state, 3420 psli->sli_flag, flag); 3421 3422 psli->slistat.mbox_busy++; 3423 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3424 3425 if (pmbox->vport) { 3426 lpfc_debugfs_disc_trc(pmbox->vport, 3427 LPFC_DISC_TRC_MBOX_VPORT, 3428 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 3429 (uint32_t)mb->mbxCommand, 3430 mb->un.varWords[0], mb->un.varWords[1]); 3431 } 3432 else { 3433 lpfc_debugfs_disc_trc(phba->pport, 3434 LPFC_DISC_TRC_MBOX, 3435 "MBOX Bsy: cmd:x%x mb:x%x x%x", 3436 (uint32_t)mb->mbxCommand, 3437 mb->un.varWords[0], mb->un.varWords[1]); 3438 } 3439 3440 return MBX_BUSY; 3441 } 3442 3443 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 3444 3445 /* If we are not polling, we MUST be in SLI2 mode */ 3446 if (flag != MBX_POLL) { 3447 if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && 3448 (mb->mbxCommand != MBX_KILL_BOARD)) { 3449 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3450 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3451 /* Mbox command <mbxCommand> cannot issue */ 3452 LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); 3453 goto out_not_finished; 3454 } 3455 /* timeout active mbox command */ 3456 mod_timer(&psli->mbox_tmo, (jiffies + 3457 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); 3458 } 3459 3460 /* Mailbox cmd <cmd> issue */ 3461 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 3462 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 3463 "x%x\n", 3464 pmbox->vport ? pmbox->vport->vpi : 0, 3465 mb->mbxCommand, phba->pport->port_state, 3466 psli->sli_flag, flag); 3467 3468 if (mb->mbxCommand != MBX_HEARTBEAT) { 3469 if (pmbox->vport) { 3470 lpfc_debugfs_disc_trc(pmbox->vport, 3471 LPFC_DISC_TRC_MBOX_VPORT, 3472 "MBOX Send vport: cmd:x%x mb:x%x x%x", 3473 (uint32_t)mb->mbxCommand, 3474 mb->un.varWords[0], mb->un.varWords[1]); 3475 } 3476 else { 3477 lpfc_debugfs_disc_trc(phba->pport, 3478 LPFC_DISC_TRC_MBOX, 3479 "MBOX Send: cmd:x%x mb:x%x x%x", 3480 (uint32_t)mb->mbxCommand, 3481 mb->un.varWords[0], mb->un.varWords[1]); 3482 } 3483 } 3484 3485 psli->slistat.mbox_cmd++; 3486 evtctr = psli->slistat.mbox_event; 3487 3488 /* next set own bit for the adapter and copy over command word */ 3489 mb->mbxOwner = OWN_CHIP; 3490 3491 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 3492 /* First copy command data to host SLIM area */ 3493 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 3494 } else { 3495 if (mb->mbxCommand == MBX_CONFIG_PORT) { 3496 /* copy command data into host mbox for cmpl */ 3497 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 3498 } 3499 3500 /* First copy mbox command data to HBA SLIM, skip past first 3501 word */ 3502 to_slim = phba->MBslimaddr + sizeof (uint32_t); 3503 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 3504 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 3505 3506 /* Next copy over first word, with mbxOwner set */ 3507 ldata = *((uint32_t *)mb); 3508 to_slim = phba->MBslimaddr; 3509 writel(ldata, to_slim); 3510 readl(to_slim); /* flush */ 3511 3512 if (mb->mbxCommand == MBX_CONFIG_PORT) { 3513 /* switch over to host mailbox */ 3514 psli->sli_flag |= LPFC_SLI2_ACTIVE; 3515 } 3516 } 3517 3518 wmb(); 3519 3520 switch (flag) { 3521 case MBX_NOWAIT: 3522 /* Set up reference to mailbox command */ 3523 psli->mbox_active = pmbox; 3524 /* Interrupt board to do it */ 3525 writel(CA_MBATT, phba->CAregaddr); 3526 readl(phba->CAregaddr); /* flush */ 3527 /* Don't wait for it to finish, just return */ 3528 break; 3529 3530 case MBX_POLL: 3531 /* Set up null reference to mailbox command */ 3532 psli->mbox_active = NULL; 3533 /* Interrupt board to do it */ 3534 writel(CA_MBATT, phba->CAregaddr); 3535 readl(phba->CAregaddr); /* flush */ 3536 3537 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 3538 /* First read mbox status word */ 3539 word0 = *((uint32_t *)phba->mbox); 3540 word0 = le32_to_cpu(word0); 3541 } else { 3542 /* First read mbox status word */ 3543 word0 = readl(phba->MBslimaddr); 3544 } 3545 3546 /* Read the HBA Host Attention Register */ 3547 ha_copy = readl(phba->HAregaddr); 3548 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 3549 mb->mbxCommand) * 3550 1000) + jiffies; 3551 i = 0; 3552 /* Wait for command to complete */ 3553 while (((word0 & OWN_CHIP) == OWN_CHIP) || 3554 (!(ha_copy & HA_MBATT) && 3555 (phba->link_state > LPFC_WARM_START))) { 3556 if (time_after(jiffies, timeout)) { 3557 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3558 spin_unlock_irqrestore(&phba->hbalock, 3559 drvr_flag); 3560 goto out_not_finished; 3561 } 3562 3563 /* Check if we took a mbox interrupt while we were 3564 polling */ 3565 if (((word0 & OWN_CHIP) != OWN_CHIP) 3566 && (evtctr != psli->slistat.mbox_event)) 3567 break; 3568 3569 if (i++ > 10) { 3570 spin_unlock_irqrestore(&phba->hbalock, 3571 drvr_flag); 3572 msleep(1); 3573 spin_lock_irqsave(&phba->hbalock, drvr_flag); 3574 } 3575 3576 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 3577 /* First copy command data */ 3578 word0 = *((uint32_t *)phba->mbox); 3579 word0 = le32_to_cpu(word0); 3580 if (mb->mbxCommand == MBX_CONFIG_PORT) { 3581 MAILBOX_t *slimmb; 3582 uint32_t slimword0; 3583 /* Check real SLIM for any errors */ 3584 slimword0 = readl(phba->MBslimaddr); 3585 slimmb = (MAILBOX_t *) & slimword0; 3586 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 3587 && slimmb->mbxStatus) { 3588 psli->sli_flag &= 3589 ~LPFC_SLI2_ACTIVE; 3590 word0 = slimword0; 3591 } 3592 } 3593 } else { 3594 /* First copy command data */ 3595 word0 = readl(phba->MBslimaddr); 3596 } 3597 /* Read the HBA Host Attention Register */ 3598 ha_copy = readl(phba->HAregaddr); 3599 } 3600 3601 if (psli->sli_flag & LPFC_SLI2_ACTIVE) { 3602 /* copy results back to user */ 3603 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 3604 } else { 3605 /* First copy command data */ 3606 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 3607 MAILBOX_CMD_SIZE); 3608 if ((mb->mbxCommand == MBX_DUMP_MEMORY) && 3609 pmbox->context2) { 3610 lpfc_memcpy_from_slim((void *)pmbox->context2, 3611 phba->MBslimaddr + DMP_RSP_OFFSET, 3612 mb->un.varDmp.word_cnt); 3613 } 3614 } 3615 3616 writel(HA_MBATT, phba->HAregaddr); 3617 readl(phba->HAregaddr); /* flush */ 3618 3619 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3620 status = mb->mbxStatus; 3621 } 3622 3623 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 3624 return status; 3625 3626 out_not_finished: 3627 if (processing_queue) { 3628 pmbox->mb.mbxStatus = MBX_NOT_FINISHED; 3629 lpfc_mbox_cmpl_put(phba, pmbox); 3630 } 3631 return MBX_NOT_FINISHED; 3632 } 3633 3634 /** 3635 * __lpfc_sli_ringtx_put: Add an iocb to the txq. 3636 * @phba: Pointer to HBA context object. 3637 * @pring: Pointer to driver SLI ring object. 3638 * @piocb: Pointer to address of newly added command iocb. 3639 * 3640 * This function is called with hbalock held to add a command 3641 * iocb to the txq when SLI layer cannot submit the command iocb 3642 * to the ring. 3643 **/ 3644 static void 3645 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3646 struct lpfc_iocbq *piocb) 3647 { 3648 /* Insert the caller's iocb in the txq tail for later processing. */ 3649 list_add_tail(&piocb->list, &pring->txq); 3650 pring->txq_cnt++; 3651 } 3652 3653 /** 3654 * lpfc_sli_next_iocb: Get the next iocb in the txq. 3655 * @phba: Pointer to HBA context object. 3656 * @pring: Pointer to driver SLI ring object. 3657 * @piocb: Pointer to address of newly added command iocb. 3658 * 3659 * This function is called with hbalock held before a new 3660 * iocb is submitted to the firmware. This function checks 3661 * txq to flush the iocbs in txq to Firmware before 3662 * submitting new iocbs to the Firmware. 3663 * If there are iocbs in the txq which need to be submitted 3664 * to firmware, lpfc_sli_next_iocb returns the first element 3665 * of the txq after dequeuing it from txq. 3666 * If there is no iocb in the txq then the function will return 3667 * *piocb and *piocb is set to NULL. Caller needs to check 3668 * *piocb to find if there are more commands in the txq. 3669 **/ 3670 static struct lpfc_iocbq * 3671 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3672 struct lpfc_iocbq **piocb) 3673 { 3674 struct lpfc_iocbq * nextiocb; 3675 3676 nextiocb = lpfc_sli_ringtx_get(phba, pring); 3677 if (!nextiocb) { 3678 nextiocb = *piocb; 3679 *piocb = NULL; 3680 } 3681 3682 return nextiocb; 3683 } 3684 3685 /** 3686 * __lpfc_sli_issue_iocb: Lockless version of lpfc_sli_issue_iocb. 3687 * @phba: Pointer to HBA context object. 3688 * @pring: Pointer to driver SLI ring object. 3689 * @piocb: Pointer to command iocb. 3690 * @flag: Flag indicating if this command can be put into txq. 3691 * 3692 * __lpfc_sli_issue_iocb is used by other functions in the driver 3693 * to issue an iocb command to the HBA. If the PCI slot is recovering 3694 * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT 3695 * flag is turned on, the function returns IOCB_ERROR. 3696 * When the link is down, this function allows only iocbs for 3697 * posting buffers. 3698 * This function finds next available slot in the command ring and 3699 * posts the command to the available slot and writes the port 3700 * attention register to request HBA start processing new iocb. 3701 * If there is no slot available in the ring and 3702 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the 3703 * txq, otherwise the function returns IOCB_BUSY. 3704 * 3705 * This function is called with hbalock held. 3706 * The function will return success after it successfully submit the 3707 * iocb to firmware or after adding to the txq. 3708 **/ 3709 static int 3710 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3711 struct lpfc_iocbq *piocb, uint32_t flag) 3712 { 3713 struct lpfc_iocbq *nextiocb; 3714 IOCB_t *iocb; 3715 3716 if (piocb->iocb_cmpl && (!piocb->vport) && 3717 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 3718 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 3719 lpfc_printf_log(phba, KERN_ERR, 3720 LOG_SLI | LOG_VPORT, 3721 "1807 IOCB x%x failed. No vport\n", 3722 piocb->iocb.ulpCommand); 3723 dump_stack(); 3724 return IOCB_ERROR; 3725 } 3726 3727 3728 /* If the PCI channel is in offline state, do not post iocbs. */ 3729 if (unlikely(pci_channel_offline(phba->pcidev))) 3730 return IOCB_ERROR; 3731 3732 /* 3733 * We should never get an IOCB if we are in a < LINK_DOWN state 3734 */ 3735 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 3736 return IOCB_ERROR; 3737 3738 /* 3739 * Check to see if we are blocking IOCB processing because of a 3740 * outstanding event. 3741 */ 3742 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 3743 goto iocb_busy; 3744 3745 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 3746 /* 3747 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 3748 * can be issued if the link is not up. 3749 */ 3750 switch (piocb->iocb.ulpCommand) { 3751 case CMD_GEN_REQUEST64_CR: 3752 case CMD_GEN_REQUEST64_CX: 3753 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 3754 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 3755 FC_FCP_CMND) || 3756 (piocb->iocb.un.genreq64.w5.hcsw.Type != 3757 MENLO_TRANSPORT_TYPE)) 3758 3759 goto iocb_busy; 3760 break; 3761 case CMD_QUE_RING_BUF_CN: 3762 case CMD_QUE_RING_BUF64_CN: 3763 /* 3764 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 3765 * completion, iocb_cmpl MUST be 0. 3766 */ 3767 if (piocb->iocb_cmpl) 3768 piocb->iocb_cmpl = NULL; 3769 /*FALLTHROUGH*/ 3770 case CMD_CREATE_XRI_CR: 3771 case CMD_CLOSE_XRI_CN: 3772 case CMD_CLOSE_XRI_CX: 3773 break; 3774 default: 3775 goto iocb_busy; 3776 } 3777 3778 /* 3779 * For FCP commands, we must be in a state where we can process link 3780 * attention events. 3781 */ 3782 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 3783 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 3784 goto iocb_busy; 3785 } 3786 3787 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 3788 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 3789 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 3790 3791 if (iocb) 3792 lpfc_sli_update_ring(phba, pring); 3793 else 3794 lpfc_sli_update_full_ring(phba, pring); 3795 3796 if (!piocb) 3797 return IOCB_SUCCESS; 3798 3799 goto out_busy; 3800 3801 iocb_busy: 3802 pring->stats.iocb_cmd_delay++; 3803 3804 out_busy: 3805 3806 if (!(flag & SLI_IOCB_RET_IOCB)) { 3807 __lpfc_sli_ringtx_put(phba, pring, piocb); 3808 return IOCB_SUCCESS; 3809 } 3810 3811 return IOCB_BUSY; 3812 } 3813 3814 3815 /** 3816 * lpfc_sli_issue_iocb: Wrapper function for __lpfc_sli_issue_iocb. 3817 * @phba: Pointer to HBA context object. 3818 * @pring: Pointer to driver SLI ring object. 3819 * @piocb: Pointer to command iocb. 3820 * @flag: Flag indicating if this command can be put into txq. 3821 * 3822 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 3823 * function. This function gets the hbalock and calls 3824 * __lpfc_sli_issue_iocb function and will return the error returned 3825 * by __lpfc_sli_issue_iocb function. This wrapper is used by 3826 * functions which do not hold hbalock. 3827 **/ 3828 int 3829 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3830 struct lpfc_iocbq *piocb, uint32_t flag) 3831 { 3832 unsigned long iflags; 3833 int rc; 3834 3835 spin_lock_irqsave(&phba->hbalock, iflags); 3836 rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); 3837 spin_unlock_irqrestore(&phba->hbalock, iflags); 3838 3839 return rc; 3840 } 3841 3842 /** 3843 * lpfc_extra_ring_setup: Extra ring setup function. 3844 * @phba: Pointer to HBA context object. 3845 * 3846 * This function is called while driver attaches with the 3847 * HBA to setup the extra ring. The extra ring is used 3848 * only when driver needs to support target mode functionality 3849 * or IP over FC functionalities. 3850 * 3851 * This function is called with no lock held. 3852 **/ 3853 static int 3854 lpfc_extra_ring_setup( struct lpfc_hba *phba) 3855 { 3856 struct lpfc_sli *psli; 3857 struct lpfc_sli_ring *pring; 3858 3859 psli = &phba->sli; 3860 3861 /* Adjust cmd/rsp ring iocb entries more evenly */ 3862 3863 /* Take some away from the FCP ring */ 3864 pring = &psli->ring[psli->fcp_ring]; 3865 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 3866 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3867 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3868 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3869 3870 /* and give them to the extra ring */ 3871 pring = &psli->ring[psli->extra_ring]; 3872 3873 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 3874 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3875 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3876 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3877 3878 /* Setup default profile for this ring */ 3879 pring->iotag_max = 4096; 3880 pring->num_mask = 1; 3881 pring->prt[0].profile = 0; /* Mask 0 */ 3882 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 3883 pring->prt[0].type = phba->cfg_multi_ring_type; 3884 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 3885 return 0; 3886 } 3887 3888 /** 3889 * lpfc_sli_async_event_handler: ASYNC iocb handler function. 3890 * @phba: Pointer to HBA context object. 3891 * @pring: Pointer to driver SLI ring object. 3892 * @iocbq: Pointer to iocb object. 3893 * 3894 * This function is called by the slow ring event handler 3895 * function when there is an ASYNC event iocb in the ring. 3896 * This function is called with no lock held. 3897 * Currently this function handles only temperature related 3898 * ASYNC events. The function decodes the temperature sensor 3899 * event message and posts events for the management applications. 3900 **/ 3901 static void 3902 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 3903 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 3904 { 3905 IOCB_t *icmd; 3906 uint16_t evt_code; 3907 uint16_t temp; 3908 struct temp_event temp_event_data; 3909 struct Scsi_Host *shost; 3910 3911 icmd = &iocbq->iocb; 3912 evt_code = icmd->un.asyncstat.evt_code; 3913 temp = icmd->ulpContext; 3914 3915 if ((evt_code != ASYNC_TEMP_WARN) && 3916 (evt_code != ASYNC_TEMP_SAFE)) { 3917 lpfc_printf_log(phba, 3918 KERN_ERR, 3919 LOG_SLI, 3920 "0346 Ring %d handler: unexpected ASYNC_STATUS" 3921 " evt_code 0x%x\n", 3922 pring->ringno, 3923 icmd->un.asyncstat.evt_code); 3924 return; 3925 } 3926 temp_event_data.data = (uint32_t)temp; 3927 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 3928 if (evt_code == ASYNC_TEMP_WARN) { 3929 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 3930 lpfc_printf_log(phba, 3931 KERN_ERR, 3932 LOG_TEMP, 3933 "0347 Adapter is very hot, please take " 3934 "corrective action. temperature : %d Celsius\n", 3935 temp); 3936 } 3937 if (evt_code == ASYNC_TEMP_SAFE) { 3938 temp_event_data.event_code = LPFC_NORMAL_TEMP; 3939 lpfc_printf_log(phba, 3940 KERN_ERR, 3941 LOG_TEMP, 3942 "0340 Adapter temperature is OK now. " 3943 "temperature : %d Celsius\n", 3944 temp); 3945 } 3946 3947 /* Send temperature change event to applications */ 3948 shost = lpfc_shost_from_vport(phba->pport); 3949 fc_host_post_vendor_event(shost, fc_get_event_number(), 3950 sizeof(temp_event_data), (char *) &temp_event_data, 3951 LPFC_NL_VENDOR_ID); 3952 3953 } 3954 3955 3956 /** 3957 * lpfc_sli_setup: SLI ring setup function. 3958 * @phba: Pointer to HBA context object. 3959 * 3960 * lpfc_sli_setup sets up rings of the SLI interface with 3961 * number of iocbs per ring and iotags. This function is 3962 * called while driver attach to the HBA and before the 3963 * interrupts are enabled. So there is no need for locking. 3964 * 3965 * This function always returns 0. 3966 **/ 3967 int 3968 lpfc_sli_setup(struct lpfc_hba *phba) 3969 { 3970 int i, totiocbsize = 0; 3971 struct lpfc_sli *psli = &phba->sli; 3972 struct lpfc_sli_ring *pring; 3973 3974 psli->num_rings = MAX_CONFIGURED_RINGS; 3975 psli->sli_flag = 0; 3976 psli->fcp_ring = LPFC_FCP_RING; 3977 psli->next_ring = LPFC_FCP_NEXT_RING; 3978 psli->extra_ring = LPFC_EXTRA_RING; 3979 3980 psli->iocbq_lookup = NULL; 3981 psli->iocbq_lookup_len = 0; 3982 psli->last_iotag = 0; 3983 3984 for (i = 0; i < psli->num_rings; i++) { 3985 pring = &psli->ring[i]; 3986 switch (i) { 3987 case LPFC_FCP_RING: /* ring 0 - FCP */ 3988 /* numCiocb and numRiocb are used in config_port */ 3989 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 3990 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 3991 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 3992 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 3993 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 3994 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 3995 pring->sizeCiocb = (phba->sli_rev == 3) ? 3996 SLI3_IOCB_CMD_SIZE : 3997 SLI2_IOCB_CMD_SIZE; 3998 pring->sizeRiocb = (phba->sli_rev == 3) ? 3999 SLI3_IOCB_RSP_SIZE : 4000 SLI2_IOCB_RSP_SIZE; 4001 pring->iotag_ctr = 0; 4002 pring->iotag_max = 4003 (phba->cfg_hba_queue_depth * 2); 4004 pring->fast_iotag = pring->iotag_max; 4005 pring->num_mask = 0; 4006 break; 4007 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 4008 /* numCiocb and numRiocb are used in config_port */ 4009 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 4010 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 4011 pring->sizeCiocb = (phba->sli_rev == 3) ? 4012 SLI3_IOCB_CMD_SIZE : 4013 SLI2_IOCB_CMD_SIZE; 4014 pring->sizeRiocb = (phba->sli_rev == 3) ? 4015 SLI3_IOCB_RSP_SIZE : 4016 SLI2_IOCB_RSP_SIZE; 4017 pring->iotag_max = phba->cfg_hba_queue_depth; 4018 pring->num_mask = 0; 4019 break; 4020 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 4021 /* numCiocb and numRiocb are used in config_port */ 4022 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 4023 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 4024 pring->sizeCiocb = (phba->sli_rev == 3) ? 4025 SLI3_IOCB_CMD_SIZE : 4026 SLI2_IOCB_CMD_SIZE; 4027 pring->sizeRiocb = (phba->sli_rev == 3) ? 4028 SLI3_IOCB_RSP_SIZE : 4029 SLI2_IOCB_RSP_SIZE; 4030 pring->fast_iotag = 0; 4031 pring->iotag_ctr = 0; 4032 pring->iotag_max = 4096; 4033 pring->lpfc_sli_rcv_async_status = 4034 lpfc_sli_async_event_handler; 4035 pring->num_mask = 4; 4036 pring->prt[0].profile = 0; /* Mask 0 */ 4037 pring->prt[0].rctl = FC_ELS_REQ; 4038 pring->prt[0].type = FC_ELS_DATA; 4039 pring->prt[0].lpfc_sli_rcv_unsol_event = 4040 lpfc_els_unsol_event; 4041 pring->prt[1].profile = 0; /* Mask 1 */ 4042 pring->prt[1].rctl = FC_ELS_RSP; 4043 pring->prt[1].type = FC_ELS_DATA; 4044 pring->prt[1].lpfc_sli_rcv_unsol_event = 4045 lpfc_els_unsol_event; 4046 pring->prt[2].profile = 0; /* Mask 2 */ 4047 /* NameServer Inquiry */ 4048 pring->prt[2].rctl = FC_UNSOL_CTL; 4049 /* NameServer */ 4050 pring->prt[2].type = FC_COMMON_TRANSPORT_ULP; 4051 pring->prt[2].lpfc_sli_rcv_unsol_event = 4052 lpfc_ct_unsol_event; 4053 pring->prt[3].profile = 0; /* Mask 3 */ 4054 /* NameServer response */ 4055 pring->prt[3].rctl = FC_SOL_CTL; 4056 /* NameServer */ 4057 pring->prt[3].type = FC_COMMON_TRANSPORT_ULP; 4058 pring->prt[3].lpfc_sli_rcv_unsol_event = 4059 lpfc_ct_unsol_event; 4060 break; 4061 } 4062 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 4063 (pring->numRiocb * pring->sizeRiocb); 4064 } 4065 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 4066 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 4067 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 4068 "SLI2 SLIM Data: x%x x%lx\n", 4069 phba->brd_no, totiocbsize, 4070 (unsigned long) MAX_SLIM_IOCB_SIZE); 4071 } 4072 if (phba->cfg_multi_ring_support == 2) 4073 lpfc_extra_ring_setup(phba); 4074 4075 return 0; 4076 } 4077 4078 /** 4079 * lpfc_sli_queue_setup: Queue initialization function. 4080 * @phba: Pointer to HBA context object. 4081 * 4082 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each 4083 * ring. This function also initializes ring indices of each ring. 4084 * This function is called during the initialization of the SLI 4085 * interface of an HBA. 4086 * This function is called with no lock held and always returns 4087 * 1. 4088 **/ 4089 int 4090 lpfc_sli_queue_setup(struct lpfc_hba *phba) 4091 { 4092 struct lpfc_sli *psli; 4093 struct lpfc_sli_ring *pring; 4094 int i; 4095 4096 psli = &phba->sli; 4097 spin_lock_irq(&phba->hbalock); 4098 INIT_LIST_HEAD(&psli->mboxq); 4099 INIT_LIST_HEAD(&psli->mboxq_cmpl); 4100 /* Initialize list headers for txq and txcmplq as double linked lists */ 4101 for (i = 0; i < psli->num_rings; i++) { 4102 pring = &psli->ring[i]; 4103 pring->ringno = i; 4104 pring->next_cmdidx = 0; 4105 pring->local_getidx = 0; 4106 pring->cmdidx = 0; 4107 INIT_LIST_HEAD(&pring->txq); 4108 INIT_LIST_HEAD(&pring->txcmplq); 4109 INIT_LIST_HEAD(&pring->iocb_continueq); 4110 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 4111 INIT_LIST_HEAD(&pring->postbufq); 4112 } 4113 spin_unlock_irq(&phba->hbalock); 4114 return 1; 4115 } 4116 4117 /** 4118 * lpfc_sli_host_down: Vport cleanup function. 4119 * @vport: Pointer to virtual port object. 4120 * 4121 * lpfc_sli_host_down is called to clean up the resources 4122 * associated with a vport before destroying virtual 4123 * port data structures. 4124 * This function does following operations: 4125 * - Free discovery resources associated with this virtual 4126 * port. 4127 * - Free iocbs associated with this virtual port in 4128 * the txq. 4129 * - Send abort for all iocb commands associated with this 4130 * vport in txcmplq. 4131 * 4132 * This function is called with no lock held and always returns 1. 4133 **/ 4134 int 4135 lpfc_sli_host_down(struct lpfc_vport *vport) 4136 { 4137 LIST_HEAD(completions); 4138 struct lpfc_hba *phba = vport->phba; 4139 struct lpfc_sli *psli = &phba->sli; 4140 struct lpfc_sli_ring *pring; 4141 struct lpfc_iocbq *iocb, *next_iocb; 4142 int i; 4143 unsigned long flags = 0; 4144 uint16_t prev_pring_flag; 4145 4146 lpfc_cleanup_discovery_resources(vport); 4147 4148 spin_lock_irqsave(&phba->hbalock, flags); 4149 for (i = 0; i < psli->num_rings; i++) { 4150 pring = &psli->ring[i]; 4151 prev_pring_flag = pring->flag; 4152 /* Only slow rings */ 4153 if (pring->ringno == LPFC_ELS_RING) { 4154 pring->flag |= LPFC_DEFERRED_RING_EVENT; 4155 /* Set the lpfc data pending flag */ 4156 set_bit(LPFC_DATA_READY, &phba->data_flags); 4157 } 4158 /* 4159 * Error everything on the txq since these iocbs have not been 4160 * given to the FW yet. 4161 */ 4162 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 4163 if (iocb->vport != vport) 4164 continue; 4165 list_move_tail(&iocb->list, &completions); 4166 pring->txq_cnt--; 4167 } 4168 4169 /* Next issue ABTS for everything on the txcmplq */ 4170 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 4171 list) { 4172 if (iocb->vport != vport) 4173 continue; 4174 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 4175 } 4176 4177 pring->flag = prev_pring_flag; 4178 } 4179 4180 spin_unlock_irqrestore(&phba->hbalock, flags); 4181 4182 while (!list_empty(&completions)) { 4183 list_remove_head(&completions, iocb, struct lpfc_iocbq, list); 4184 4185 if (!iocb->iocb_cmpl) 4186 lpfc_sli_release_iocbq(phba, iocb); 4187 else { 4188 iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 4189 iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN; 4190 (iocb->iocb_cmpl) (phba, iocb, iocb); 4191 } 4192 } 4193 return 1; 4194 } 4195 4196 /** 4197 * lpfc_sli_hba_down: Resource cleanup function for the HBA. 4198 * @phba: Pointer to HBA context object. 4199 * 4200 * This function cleans up all iocb, buffers, mailbox commands 4201 * while shutting down the HBA. This function is called with no 4202 * lock held and always returns 1. 4203 * This function does the following to cleanup driver resources: 4204 * - Free discovery resources for each virtual port 4205 * - Cleanup any pending fabric iocbs 4206 * - Iterate through the iocb txq and free each entry 4207 * in the list. 4208 * - Free up any buffer posted to the HBA 4209 * - Free mailbox commands in the mailbox queue. 4210 **/ 4211 int 4212 lpfc_sli_hba_down(struct lpfc_hba *phba) 4213 { 4214 LIST_HEAD(completions); 4215 struct lpfc_sli *psli = &phba->sli; 4216 struct lpfc_sli_ring *pring; 4217 struct lpfc_dmabuf *buf_ptr; 4218 LPFC_MBOXQ_t *pmb; 4219 struct lpfc_iocbq *iocb; 4220 IOCB_t *cmd = NULL; 4221 int i; 4222 unsigned long flags = 0; 4223 4224 lpfc_hba_down_prep(phba); 4225 4226 lpfc_fabric_abort_hba(phba); 4227 4228 spin_lock_irqsave(&phba->hbalock, flags); 4229 for (i = 0; i < psli->num_rings; i++) { 4230 pring = &psli->ring[i]; 4231 /* Only slow rings */ 4232 if (pring->ringno == LPFC_ELS_RING) { 4233 pring->flag |= LPFC_DEFERRED_RING_EVENT; 4234 /* Set the lpfc data pending flag */ 4235 set_bit(LPFC_DATA_READY, &phba->data_flags); 4236 } 4237 4238 /* 4239 * Error everything on the txq since these iocbs have not been 4240 * given to the FW yet. 4241 */ 4242 list_splice_init(&pring->txq, &completions); 4243 pring->txq_cnt = 0; 4244 4245 } 4246 spin_unlock_irqrestore(&phba->hbalock, flags); 4247 4248 while (!list_empty(&completions)) { 4249 list_remove_head(&completions, iocb, struct lpfc_iocbq, list); 4250 cmd = &iocb->iocb; 4251 4252 if (!iocb->iocb_cmpl) 4253 lpfc_sli_release_iocbq(phba, iocb); 4254 else { 4255 cmd->ulpStatus = IOSTAT_LOCAL_REJECT; 4256 cmd->un.ulpWord[4] = IOERR_SLI_DOWN; 4257 (iocb->iocb_cmpl) (phba, iocb, iocb); 4258 } 4259 } 4260 4261 spin_lock_irqsave(&phba->hbalock, flags); 4262 list_splice_init(&phba->elsbuf, &completions); 4263 phba->elsbuf_cnt = 0; 4264 phba->elsbuf_prev_cnt = 0; 4265 spin_unlock_irqrestore(&phba->hbalock, flags); 4266 4267 while (!list_empty(&completions)) { 4268 list_remove_head(&completions, buf_ptr, 4269 struct lpfc_dmabuf, list); 4270 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 4271 kfree(buf_ptr); 4272 } 4273 4274 /* Return any active mbox cmds */ 4275 del_timer_sync(&psli->mbox_tmo); 4276 spin_lock_irqsave(&phba->hbalock, flags); 4277 4278 spin_lock(&phba->pport->work_port_lock); 4279 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 4280 spin_unlock(&phba->pport->work_port_lock); 4281 4282 /* Return any pending or completed mbox cmds */ 4283 list_splice_init(&phba->sli.mboxq, &completions); 4284 if (psli->mbox_active) { 4285 list_add_tail(&psli->mbox_active->list, &completions); 4286 psli->mbox_active = NULL; 4287 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4288 } 4289 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 4290 spin_unlock_irqrestore(&phba->hbalock, flags); 4291 4292 while (!list_empty(&completions)) { 4293 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 4294 pmb->mb.mbxStatus = MBX_NOT_FINISHED; 4295 if (pmb->mbox_cmpl) 4296 pmb->mbox_cmpl(phba,pmb); 4297 } 4298 return 1; 4299 } 4300 4301 /** 4302 * lpfc_sli_pcimem_bcopy: SLI memory copy function. 4303 * @srcp: Source memory pointer. 4304 * @destp: Destination memory pointer. 4305 * @cnt: Number of words required to be copied. 4306 * 4307 * This function is used for copying data between driver memory 4308 * and the SLI memory. This function also changes the endianness 4309 * of each word if native endianness is different from SLI 4310 * endianness. This function can be called with or without 4311 * lock. 4312 **/ 4313 void 4314 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 4315 { 4316 uint32_t *src = srcp; 4317 uint32_t *dest = destp; 4318 uint32_t ldata; 4319 int i; 4320 4321 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 4322 ldata = *src; 4323 ldata = le32_to_cpu(ldata); 4324 *dest = ldata; 4325 src++; 4326 dest++; 4327 } 4328 } 4329 4330 4331 /** 4332 * lpfc_sli_ringpostbuf_put: Function to add a buffer to postbufq. 4333 * @phba: Pointer to HBA context object. 4334 * @pring: Pointer to driver SLI ring object. 4335 * @mp: Pointer to driver buffer object. 4336 * 4337 * This function is called with no lock held. 4338 * It always return zero after adding the buffer to the postbufq 4339 * buffer list. 4340 **/ 4341 int 4342 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 4343 struct lpfc_dmabuf *mp) 4344 { 4345 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 4346 later */ 4347 spin_lock_irq(&phba->hbalock); 4348 list_add_tail(&mp->list, &pring->postbufq); 4349 pring->postbufq_cnt++; 4350 spin_unlock_irq(&phba->hbalock); 4351 return 0; 4352 } 4353 4354 /** 4355 * lpfc_sli_get_buffer_tag: Tag allocation function for a buffer posted 4356 * using CMD_QUE_XRI64_CX iocb. 4357 * @phba: Pointer to HBA context object. 4358 * 4359 * When HBQ is enabled, buffers are searched based on tags. This function 4360 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 4361 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 4362 * does not conflict with tags of buffer posted for unsolicited events. 4363 * The function returns the allocated tag. The function is called with 4364 * no locks held. 4365 **/ 4366 uint32_t 4367 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 4368 { 4369 spin_lock_irq(&phba->hbalock); 4370 phba->buffer_tag_count++; 4371 /* 4372 * Always set the QUE_BUFTAG_BIT to distiguish between 4373 * a tag assigned by HBQ. 4374 */ 4375 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 4376 spin_unlock_irq(&phba->hbalock); 4377 return phba->buffer_tag_count; 4378 } 4379 4380 /** 4381 * lpfc_sli_ring_taggedbuf_get: Search HBQ buffer associated with 4382 * posted using CMD_QUE_XRI64_CX iocb. 4383 * @phba: Pointer to HBA context object. 4384 * @pring: Pointer to driver SLI ring object. 4385 * @tag: Buffer tag. 4386 * 4387 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 4388 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 4389 * iocb is posted to the response ring with the tag of the buffer. 4390 * This function searches the pring->postbufq list using the tag 4391 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 4392 * iocb. If the buffer is found then lpfc_dmabuf object of the 4393 * buffer is returned to the caller else NULL is returned. 4394 * This function is called with no lock held. 4395 **/ 4396 struct lpfc_dmabuf * 4397 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 4398 uint32_t tag) 4399 { 4400 struct lpfc_dmabuf *mp, *next_mp; 4401 struct list_head *slp = &pring->postbufq; 4402 4403 /* Search postbufq, from the begining, looking for a match on tag */ 4404 spin_lock_irq(&phba->hbalock); 4405 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 4406 if (mp->buffer_tag == tag) { 4407 list_del_init(&mp->list); 4408 pring->postbufq_cnt--; 4409 spin_unlock_irq(&phba->hbalock); 4410 return mp; 4411 } 4412 } 4413 4414 spin_unlock_irq(&phba->hbalock); 4415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4416 "0402 Cannot find virtual addr for buffer tag on " 4417 "ring %d Data x%lx x%p x%p x%x\n", 4418 pring->ringno, (unsigned long) tag, 4419 slp->next, slp->prev, pring->postbufq_cnt); 4420 4421 return NULL; 4422 } 4423 4424 /** 4425 * lpfc_sli_ringpostbuf_get: SLI2 buffer search function for 4426 * unsolicited ct and els events. 4427 * @phba: Pointer to HBA context object. 4428 * @pring: Pointer to driver SLI ring object. 4429 * @phys: DMA address of the buffer. 4430 * 4431 * This function searches the buffer list using the dma_address 4432 * of unsolicited event to find the driver's lpfc_dmabuf object 4433 * corresponding to the dma_address. The function returns the 4434 * lpfc_dmabuf object if a buffer is found else it returns NULL. 4435 * This function is called by the ct and els unsolicited event 4436 * handlers to get the buffer associated with the unsolicited 4437 * event. 4438 * 4439 * This function is called with no lock held. 4440 **/ 4441 struct lpfc_dmabuf * 4442 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 4443 dma_addr_t phys) 4444 { 4445 struct lpfc_dmabuf *mp, *next_mp; 4446 struct list_head *slp = &pring->postbufq; 4447 4448 /* Search postbufq, from the begining, looking for a match on phys */ 4449 spin_lock_irq(&phba->hbalock); 4450 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 4451 if (mp->phys == phys) { 4452 list_del_init(&mp->list); 4453 pring->postbufq_cnt--; 4454 spin_unlock_irq(&phba->hbalock); 4455 return mp; 4456 } 4457 } 4458 4459 spin_unlock_irq(&phba->hbalock); 4460 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4461 "0410 Cannot find virtual addr for mapped buf on " 4462 "ring %d Data x%llx x%p x%p x%x\n", 4463 pring->ringno, (unsigned long long)phys, 4464 slp->next, slp->prev, pring->postbufq_cnt); 4465 return NULL; 4466 } 4467 4468 /** 4469 * lpfc_sli_abort_els_cmpl: Completion handler for the els abort iocbs. 4470 * @phba: Pointer to HBA context object. 4471 * @cmdiocb: Pointer to driver command iocb object. 4472 * @rspiocb: Pointer to driver response iocb object. 4473 * 4474 * This function is the completion handler for the abort iocbs for 4475 * ELS commands. This function is called from the ELS ring event 4476 * handler with no lock held. This function frees memory resources 4477 * associated with the abort iocb. 4478 **/ 4479 static void 4480 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4481 struct lpfc_iocbq *rspiocb) 4482 { 4483 IOCB_t *irsp = &rspiocb->iocb; 4484 uint16_t abort_iotag, abort_context; 4485 struct lpfc_iocbq *abort_iocb; 4486 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 4487 4488 abort_iocb = NULL; 4489 4490 if (irsp->ulpStatus) { 4491 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 4492 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 4493 4494 spin_lock_irq(&phba->hbalock); 4495 if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag) 4496 abort_iocb = phba->sli.iocbq_lookup[abort_iotag]; 4497 4498 lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI, 4499 "0327 Cannot abort els iocb %p " 4500 "with tag %x context %x, abort status %x, " 4501 "abort code %x\n", 4502 abort_iocb, abort_iotag, abort_context, 4503 irsp->ulpStatus, irsp->un.ulpWord[4]); 4504 4505 /* 4506 * If the iocb is not found in Firmware queue the iocb 4507 * might have completed already. Do not free it again. 4508 */ 4509 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 4510 spin_unlock_irq(&phba->hbalock); 4511 lpfc_sli_release_iocbq(phba, cmdiocb); 4512 return; 4513 } 4514 /* 4515 * make sure we have the right iocbq before taking it 4516 * off the txcmplq and try to call completion routine. 4517 */ 4518 if (!abort_iocb || 4519 abort_iocb->iocb.ulpContext != abort_context || 4520 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 4521 spin_unlock_irq(&phba->hbalock); 4522 else { 4523 list_del_init(&abort_iocb->list); 4524 pring->txcmplq_cnt--; 4525 spin_unlock_irq(&phba->hbalock); 4526 4527 /* Firmware could still be in progress of DMAing 4528 * payload, so don't free data buffer till after 4529 * a hbeat. 4530 */ 4531 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 4532 4533 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 4534 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 4535 abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED; 4536 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 4537 } 4538 } 4539 4540 lpfc_sli_release_iocbq(phba, cmdiocb); 4541 return; 4542 } 4543 4544 /** 4545 * lpfc_ignore_els_cmpl: Completion handler for aborted ELS command. 4546 * @phba: Pointer to HBA context object. 4547 * @cmdiocb: Pointer to driver command iocb object. 4548 * @rspiocb: Pointer to driver response iocb object. 4549 * 4550 * The function is called from SLI ring event handler with no 4551 * lock held. This function is the completion handler for ELS commands 4552 * which are aborted. The function frees memory resources used for 4553 * the aborted ELS commands. 4554 **/ 4555 static void 4556 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4557 struct lpfc_iocbq *rspiocb) 4558 { 4559 IOCB_t *irsp = &rspiocb->iocb; 4560 4561 /* ELS cmd tag <ulpIoTag> completes */ 4562 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 4563 "0139 Ignoring ELS cmd tag x%x completion Data: " 4564 "x%x x%x x%x\n", 4565 irsp->ulpIoTag, irsp->ulpStatus, 4566 irsp->un.ulpWord[4], irsp->ulpTimeout); 4567 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 4568 lpfc_ct_free_iocb(phba, cmdiocb); 4569 else 4570 lpfc_els_free_iocb(phba, cmdiocb); 4571 return; 4572 } 4573 4574 /** 4575 * lpfc_sli_issue_abort_iotag: Abort function for a command iocb. 4576 * @phba: Pointer to HBA context object. 4577 * @pring: Pointer to driver SLI ring object. 4578 * @cmdiocb: Pointer to driver command iocb object. 4579 * 4580 * This function issues an abort iocb for the provided command 4581 * iocb. This function is called with hbalock held. 4582 * The function returns 0 when it fails due to memory allocation 4583 * failure or when the command iocb is an abort request. 4584 **/ 4585 int 4586 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 4587 struct lpfc_iocbq *cmdiocb) 4588 { 4589 struct lpfc_vport *vport = cmdiocb->vport; 4590 struct lpfc_iocbq *abtsiocbp; 4591 IOCB_t *icmd = NULL; 4592 IOCB_t *iabt = NULL; 4593 int retval = IOCB_ERROR; 4594 4595 /* 4596 * There are certain command types we don't want to abort. And we 4597 * don't want to abort commands that are already in the process of 4598 * being aborted. 4599 */ 4600 icmd = &cmdiocb->iocb; 4601 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 4602 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 4603 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 4604 return 0; 4605 4606 /* If we're unloading, don't abort iocb on the ELS ring, but change the 4607 * callback so that nothing happens when it finishes. 4608 */ 4609 if ((vport->load_flag & FC_UNLOADING) && 4610 (pring->ringno == LPFC_ELS_RING)) { 4611 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 4612 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 4613 else 4614 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 4615 goto abort_iotag_exit; 4616 } 4617 4618 /* issue ABTS for this IOCB based on iotag */ 4619 abtsiocbp = __lpfc_sli_get_iocbq(phba); 4620 if (abtsiocbp == NULL) 4621 return 0; 4622 4623 /* This signals the response to set the correct status 4624 * before calling the completion handler. 4625 */ 4626 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 4627 4628 iabt = &abtsiocbp->iocb; 4629 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 4630 iabt->un.acxri.abortContextTag = icmd->ulpContext; 4631 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 4632 iabt->ulpLe = 1; 4633 iabt->ulpClass = icmd->ulpClass; 4634 4635 if (phba->link_state >= LPFC_LINK_UP) 4636 iabt->ulpCommand = CMD_ABORT_XRI_CN; 4637 else 4638 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 4639 4640 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 4641 4642 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 4643 "0339 Abort xri x%x, original iotag x%x, " 4644 "abort cmd iotag x%x\n", 4645 iabt->un.acxri.abortContextTag, 4646 iabt->un.acxri.abortIoTag, abtsiocbp->iotag); 4647 retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); 4648 4649 if (retval) 4650 __lpfc_sli_release_iocbq(phba, abtsiocbp); 4651 abort_iotag_exit: 4652 /* 4653 * Caller to this routine should check for IOCB_ERROR 4654 * and handle it properly. This routine no longer removes 4655 * iocb off txcmplq and call compl in case of IOCB_ERROR. 4656 */ 4657 return retval; 4658 } 4659 4660 /** 4661 * lpfc_sli_validate_fcp_iocb: Filtering function, used to find commands 4662 * associated with a vport/SCSI target/lun. 4663 * @iocbq: Pointer to driver iocb object. 4664 * @vport: Pointer to driver virtual port object. 4665 * @tgt_id: SCSI ID of the target. 4666 * @lun_id: LUN ID of the scsi device. 4667 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 4668 * 4669 * This function acts as iocb filter for functions which abort or count 4670 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 4671 * 0 if the filtering criteria is met for the given iocb and will return 4672 * 1 if the filtering criteria is not met. 4673 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 4674 * given iocb is for the SCSI device specified by vport, tgt_id and 4675 * lun_id parameter. 4676 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 4677 * given iocb is for the SCSI target specified by vport and tgt_id 4678 * parameters. 4679 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 4680 * given iocb is for the SCSI host associated with the given vport. 4681 * This function is called with no locks held. 4682 **/ 4683 static int 4684 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 4685 uint16_t tgt_id, uint64_t lun_id, 4686 lpfc_ctx_cmd ctx_cmd) 4687 { 4688 struct lpfc_scsi_buf *lpfc_cmd; 4689 int rc = 1; 4690 4691 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 4692 return rc; 4693 4694 if (iocbq->vport != vport) 4695 return rc; 4696 4697 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 4698 4699 if (lpfc_cmd->pCmd == NULL) 4700 return rc; 4701 4702 switch (ctx_cmd) { 4703 case LPFC_CTX_LUN: 4704 if ((lpfc_cmd->rdata->pnode) && 4705 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 4706 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 4707 rc = 0; 4708 break; 4709 case LPFC_CTX_TGT: 4710 if ((lpfc_cmd->rdata->pnode) && 4711 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 4712 rc = 0; 4713 break; 4714 case LPFC_CTX_HOST: 4715 rc = 0; 4716 break; 4717 default: 4718 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 4719 __func__, ctx_cmd); 4720 break; 4721 } 4722 4723 return rc; 4724 } 4725 4726 /** 4727 * lpfc_sli_sum_iocb: Function to count the number of FCP iocbs pending. 4728 * @vport: Pointer to virtual port. 4729 * @tgt_id: SCSI ID of the target. 4730 * @lun_id: LUN ID of the scsi device. 4731 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 4732 * 4733 * This function returns number of FCP commands pending for the vport. 4734 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 4735 * commands pending on the vport associated with SCSI device specified 4736 * by tgt_id and lun_id parameters. 4737 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 4738 * commands pending on the vport associated with SCSI target specified 4739 * by tgt_id parameter. 4740 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 4741 * commands pending on the vport. 4742 * This function returns the number of iocbs which satisfy the filter. 4743 * This function is called without any lock held. 4744 **/ 4745 int 4746 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 4747 lpfc_ctx_cmd ctx_cmd) 4748 { 4749 struct lpfc_hba *phba = vport->phba; 4750 struct lpfc_iocbq *iocbq; 4751 int sum, i; 4752 4753 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 4754 iocbq = phba->sli.iocbq_lookup[i]; 4755 4756 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 4757 ctx_cmd) == 0) 4758 sum++; 4759 } 4760 4761 return sum; 4762 } 4763 4764 /** 4765 * lpfc_sli_abort_fcp_cmpl: Completion handler function for an aborted 4766 * FCP iocb. 4767 * @phba: Pointer to HBA context object 4768 * @cmdiocb: Pointer to command iocb object. 4769 * @rspiocb: Pointer to response iocb object. 4770 * 4771 * This function is called when an aborted FCP iocb completes. This 4772 * function is called by the ring event handler with no lock held. 4773 * This function frees the iocb. 4774 **/ 4775 void 4776 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 4777 struct lpfc_iocbq *rspiocb) 4778 { 4779 lpfc_sli_release_iocbq(phba, cmdiocb); 4780 return; 4781 } 4782 4783 /** 4784 * lpfc_sli_abort_iocb: This function issue abort for all SCSI commands 4785 * pending on a SCSI host(vport)/target/lun. 4786 * @vport: Pointer to virtual port. 4787 * @pring: Pointer to driver SLI ring object. 4788 * @tgt_id: SCSI ID of the target. 4789 * @lun_id: LUN ID of the scsi device. 4790 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 4791 * 4792 * This function sends an abort command for every SCSI command 4793 * associated with the given virtual port pending on the ring 4794 * filtered by lpfc_sli_validate_fcp_iocb function. 4795 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 4796 * FCP iocbs associated with lun specified by tgt_id and lun_id 4797 * parameters 4798 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 4799 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 4800 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 4801 * FCP iocbs associated with virtual port. 4802 * This function returns number of iocbs it failed to abort. 4803 * This function is called with no locks held. 4804 **/ 4805 int 4806 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 4807 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 4808 { 4809 struct lpfc_hba *phba = vport->phba; 4810 struct lpfc_iocbq *iocbq; 4811 struct lpfc_iocbq *abtsiocb; 4812 IOCB_t *cmd = NULL; 4813 int errcnt = 0, ret_val = 0; 4814 int i; 4815 4816 for (i = 1; i <= phba->sli.last_iotag; i++) { 4817 iocbq = phba->sli.iocbq_lookup[i]; 4818 4819 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 4820 abort_cmd) != 0) 4821 continue; 4822 4823 /* issue ABTS for this IOCB based on iotag */ 4824 abtsiocb = lpfc_sli_get_iocbq(phba); 4825 if (abtsiocb == NULL) { 4826 errcnt++; 4827 continue; 4828 } 4829 4830 cmd = &iocbq->iocb; 4831 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 4832 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 4833 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 4834 abtsiocb->iocb.ulpLe = 1; 4835 abtsiocb->iocb.ulpClass = cmd->ulpClass; 4836 abtsiocb->vport = phba->pport; 4837 4838 if (lpfc_is_link_up(phba)) 4839 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 4840 else 4841 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 4842 4843 /* Setup callback routine and issue the command. */ 4844 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 4845 ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); 4846 if (ret_val == IOCB_ERROR) { 4847 lpfc_sli_release_iocbq(phba, abtsiocb); 4848 errcnt++; 4849 continue; 4850 } 4851 } 4852 4853 return errcnt; 4854 } 4855 4856 /** 4857 * lpfc_sli_wake_iocb_wait: iocb completion handler for iocb issued using 4858 * lpfc_sli_issue_iocb_wait. 4859 * @phba: Pointer to HBA context object. 4860 * @cmdiocbq: Pointer to command iocb. 4861 * @rspiocbq: Pointer to response iocb. 4862 * 4863 * This function is the completion handler for iocbs issued using 4864 * lpfc_sli_issue_iocb_wait function. This function is called by the 4865 * ring event handler function without any lock held. This function 4866 * can be called from both worker thread context and interrupt 4867 * context. This function also can be called from other thread which 4868 * cleans up the SLI layer objects. 4869 * This function copy the contents of the response iocb to the 4870 * response iocb memory object provided by the caller of 4871 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 4872 * sleeps for the iocb completion. 4873 **/ 4874 static void 4875 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 4876 struct lpfc_iocbq *cmdiocbq, 4877 struct lpfc_iocbq *rspiocbq) 4878 { 4879 wait_queue_head_t *pdone_q; 4880 unsigned long iflags; 4881 4882 spin_lock_irqsave(&phba->hbalock, iflags); 4883 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 4884 if (cmdiocbq->context2 && rspiocbq) 4885 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 4886 &rspiocbq->iocb, sizeof(IOCB_t)); 4887 4888 pdone_q = cmdiocbq->context_un.wait_queue; 4889 if (pdone_q) 4890 wake_up(pdone_q); 4891 spin_unlock_irqrestore(&phba->hbalock, iflags); 4892 return; 4893 } 4894 4895 /** 4896 * lpfc_sli_issue_iocb_wait: Synchronous function to issue iocb commands. 4897 * @phba: Pointer to HBA context object.. 4898 * @pring: Pointer to sli ring. 4899 * @piocb: Pointer to command iocb. 4900 * @prspiocbq: Pointer to response iocb. 4901 * @timeout: Timeout in number of seconds. 4902 * 4903 * This function issues the iocb to firmware and waits for the 4904 * iocb to complete. If the iocb command is not 4905 * completed within timeout seconds, it returns IOCB_TIMEDOUT. 4906 * Caller should not free the iocb resources if this function 4907 * returns IOCB_TIMEDOUT. 4908 * The function waits for the iocb completion using an 4909 * non-interruptible wait. 4910 * This function will sleep while waiting for iocb completion. 4911 * So, this function should not be called from any context which 4912 * does not allow sleeping. Due to the same reason, this function 4913 * cannot be called with interrupt disabled. 4914 * This function assumes that the iocb completions occur while 4915 * this function sleep. So, this function cannot be called from 4916 * the thread which process iocb completion for this ring. 4917 * This function clears the iocb_flag of the iocb object before 4918 * issuing the iocb and the iocb completion handler sets this 4919 * flag and wakes this thread when the iocb completes. 4920 * The contents of the response iocb will be copied to prspiocbq 4921 * by the completion handler when the command completes. 4922 * This function returns IOCB_SUCCESS when success. 4923 * This function is called with no lock held. 4924 **/ 4925 int 4926 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 4927 struct lpfc_sli_ring *pring, 4928 struct lpfc_iocbq *piocb, 4929 struct lpfc_iocbq *prspiocbq, 4930 uint32_t timeout) 4931 { 4932 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 4933 long timeleft, timeout_req = 0; 4934 int retval = IOCB_SUCCESS; 4935 uint32_t creg_val; 4936 4937 /* 4938 * If the caller has provided a response iocbq buffer, then context2 4939 * is NULL or its an error. 4940 */ 4941 if (prspiocbq) { 4942 if (piocb->context2) 4943 return IOCB_ERROR; 4944 piocb->context2 = prspiocbq; 4945 } 4946 4947 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 4948 piocb->context_un.wait_queue = &done_q; 4949 piocb->iocb_flag &= ~LPFC_IO_WAKE; 4950 4951 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 4952 creg_val = readl(phba->HCregaddr); 4953 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 4954 writel(creg_val, phba->HCregaddr); 4955 readl(phba->HCregaddr); /* flush */ 4956 } 4957 4958 retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); 4959 if (retval == IOCB_SUCCESS) { 4960 timeout_req = timeout * HZ; 4961 timeleft = wait_event_timeout(done_q, 4962 piocb->iocb_flag & LPFC_IO_WAKE, 4963 timeout_req); 4964 4965 if (piocb->iocb_flag & LPFC_IO_WAKE) { 4966 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4967 "0331 IOCB wake signaled\n"); 4968 } else if (timeleft == 0) { 4969 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4970 "0338 IOCB wait timeout error - no " 4971 "wake response Data x%x\n", timeout); 4972 retval = IOCB_TIMEDOUT; 4973 } else { 4974 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4975 "0330 IOCB wake NOT set, " 4976 "Data x%x x%lx\n", 4977 timeout, (timeleft / jiffies)); 4978 retval = IOCB_TIMEDOUT; 4979 } 4980 } else { 4981 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4982 "0332 IOCB wait issue failed, Data x%x\n", 4983 retval); 4984 retval = IOCB_ERROR; 4985 } 4986 4987 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 4988 creg_val = readl(phba->HCregaddr); 4989 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 4990 writel(creg_val, phba->HCregaddr); 4991 readl(phba->HCregaddr); /* flush */ 4992 } 4993 4994 if (prspiocbq) 4995 piocb->context2 = NULL; 4996 4997 piocb->context_un.wait_queue = NULL; 4998 piocb->iocb_cmpl = NULL; 4999 return retval; 5000 } 5001 5002 /** 5003 * lpfc_sli_issue_mbox_wait: Synchronous function to issue mailbox. 5004 * @phba: Pointer to HBA context object. 5005 * @pmboxq: Pointer to driver mailbox object. 5006 * @timeout: Timeout in number of seconds. 5007 * 5008 * This function issues the mailbox to firmware and waits for the 5009 * mailbox command to complete. If the mailbox command is not 5010 * completed within timeout seconds, it returns MBX_TIMEOUT. 5011 * The function waits for the mailbox completion using an 5012 * interruptible wait. If the thread is woken up due to a 5013 * signal, MBX_TIMEOUT error is returned to the caller. Caller 5014 * should not free the mailbox resources, if this function returns 5015 * MBX_TIMEOUT. 5016 * This function will sleep while waiting for mailbox completion. 5017 * So, this function should not be called from any context which 5018 * does not allow sleeping. Due to the same reason, this function 5019 * cannot be called with interrupt disabled. 5020 * This function assumes that the mailbox completion occurs while 5021 * this function sleep. So, this function cannot be called from 5022 * the worker thread which processes mailbox completion. 5023 * This function is called in the context of HBA management 5024 * applications. 5025 * This function returns MBX_SUCCESS when successful. 5026 * This function is called with no lock held. 5027 **/ 5028 int 5029 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 5030 uint32_t timeout) 5031 { 5032 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 5033 int retval; 5034 unsigned long flag; 5035 5036 /* The caller must leave context1 empty. */ 5037 if (pmboxq->context1) 5038 return MBX_NOT_FINISHED; 5039 5040 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 5041 /* setup wake call as IOCB callback */ 5042 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 5043 /* setup context field to pass wait_queue pointer to wake function */ 5044 pmboxq->context1 = &done_q; 5045 5046 /* now issue the command */ 5047 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 5048 5049 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 5050 wait_event_interruptible_timeout(done_q, 5051 pmboxq->mbox_flag & LPFC_MBX_WAKE, 5052 timeout * HZ); 5053 5054 spin_lock_irqsave(&phba->hbalock, flag); 5055 pmboxq->context1 = NULL; 5056 /* 5057 * if LPFC_MBX_WAKE flag is set the mailbox is completed 5058 * else do not free the resources. 5059 */ 5060 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) 5061 retval = MBX_SUCCESS; 5062 else { 5063 retval = MBX_TIMEOUT; 5064 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5065 } 5066 spin_unlock_irqrestore(&phba->hbalock, flag); 5067 } 5068 5069 return retval; 5070 } 5071 5072 /** 5073 * lpfc_sli_flush_mbox_queue: mailbox queue cleanup function. 5074 * @phba: Pointer to HBA context. 5075 * 5076 * This function is called to cleanup any pending mailbox 5077 * objects in the driver queue before bringing the HBA offline. 5078 * This function is called while resetting the HBA. 5079 * The function is called without any lock held. The function 5080 * takes hbalock to update SLI data structure. 5081 * This function returns 1 when there is an active mailbox 5082 * command pending else returns 0. 5083 **/ 5084 int 5085 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) 5086 { 5087 struct lpfc_vport *vport = phba->pport; 5088 int i = 0; 5089 uint32_t ha_copy; 5090 5091 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { 5092 if (i++ > LPFC_MBOX_TMO * 1000) 5093 return 1; 5094 5095 /* 5096 * Call lpfc_sli_handle_mb_event only if a mailbox cmd 5097 * did finish. This way we won't get the misleading 5098 * "Stray Mailbox Interrupt" message. 5099 */ 5100 spin_lock_irq(&phba->hbalock); 5101 ha_copy = phba->work_ha; 5102 phba->work_ha &= ~HA_MBATT; 5103 spin_unlock_irq(&phba->hbalock); 5104 5105 if (ha_copy & HA_MBATT) 5106 if (lpfc_sli_handle_mb_event(phba) == 0) 5107 i = 0; 5108 5109 msleep(1); 5110 } 5111 5112 return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; 5113 } 5114 5115 /** 5116 * lpfc_sli_check_eratt: check error attention events 5117 * @phba: Pointer to HBA context. 5118 * 5119 * This function is called form timer soft interrupt context to check HBA's 5120 * error attention register bit for error attention events. 5121 * 5122 * This fucntion returns 1 when there is Error Attention in the Host Attention 5123 * Register and returns 0 otherwise. 5124 **/ 5125 int 5126 lpfc_sli_check_eratt(struct lpfc_hba *phba) 5127 { 5128 uint32_t ha_copy; 5129 5130 /* If PCI channel is offline, don't process it */ 5131 if (unlikely(pci_channel_offline(phba->pcidev))) 5132 return 0; 5133 5134 /* If somebody is waiting to handle an eratt, don't process it 5135 * here. The brdkill function will do this. 5136 */ 5137 if (phba->link_flag & LS_IGNORE_ERATT) 5138 return 0; 5139 5140 /* Check if interrupt handler handles this ERATT */ 5141 spin_lock_irq(&phba->hbalock); 5142 if (phba->hba_flag & HBA_ERATT_HANDLED) { 5143 /* Interrupt handler has handled ERATT */ 5144 spin_unlock_irq(&phba->hbalock); 5145 return 0; 5146 } 5147 5148 /* Read chip Host Attention (HA) register */ 5149 ha_copy = readl(phba->HAregaddr); 5150 if (ha_copy & HA_ERATT) { 5151 /* Read host status register to retrieve error event */ 5152 lpfc_sli_read_hs(phba); 5153 /* Set the driver HA work bitmap */ 5154 phba->work_ha |= HA_ERATT; 5155 /* Indicate polling handles this ERATT */ 5156 phba->hba_flag |= HBA_ERATT_HANDLED; 5157 spin_unlock_irq(&phba->hbalock); 5158 return 1; 5159 } 5160 spin_unlock_irq(&phba->hbalock); 5161 return 0; 5162 } 5163 5164 /** 5165 * lpfc_sp_intr_handler: The slow-path interrupt handler of lpfc driver. 5166 * @irq: Interrupt number. 5167 * @dev_id: The device context pointer. 5168 * 5169 * This function is directly called from the PCI layer as an interrupt 5170 * service routine when the device is enabled with MSI-X multi-message 5171 * interrupt mode and there are slow-path events in the HBA. However, 5172 * when the device is enabled with either MSI or Pin-IRQ interrupt mode, 5173 * this function is called as part of the device-level interrupt handler. 5174 * When the PCI slot is in error recovery or the HBA is undergoing 5175 * initialization, the interrupt handler will not process the interrupt. 5176 * The link attention and ELS ring attention events are handled by the 5177 * worker thread. The interrupt handler signals the worker thread and 5178 * and returns for these events. This function is called without any 5179 * lock held. It gets the hbalock to access and update SLI data 5180 * structures. 5181 * 5182 * This function returns IRQ_HANDLED when interrupt is handled else it 5183 * returns IRQ_NONE. 5184 **/ 5185 irqreturn_t 5186 lpfc_sp_intr_handler(int irq, void *dev_id) 5187 { 5188 struct lpfc_hba *phba; 5189 uint32_t ha_copy; 5190 uint32_t work_ha_copy; 5191 unsigned long status; 5192 unsigned long iflag; 5193 uint32_t control; 5194 5195 MAILBOX_t *mbox, *pmbox; 5196 struct lpfc_vport *vport; 5197 struct lpfc_nodelist *ndlp; 5198 struct lpfc_dmabuf *mp; 5199 LPFC_MBOXQ_t *pmb; 5200 int rc; 5201 5202 /* 5203 * Get the driver's phba structure from the dev_id and 5204 * assume the HBA is not interrupting. 5205 */ 5206 phba = (struct lpfc_hba *)dev_id; 5207 5208 if (unlikely(!phba)) 5209 return IRQ_NONE; 5210 5211 /* 5212 * Stuff needs to be attented to when this function is invoked as an 5213 * individual interrupt handler in MSI-X multi-message interrupt mode 5214 */ 5215 if (phba->intr_type == MSIX) { 5216 /* If the pci channel is offline, ignore all the interrupts */ 5217 if (unlikely(pci_channel_offline(phba->pcidev))) 5218 return IRQ_NONE; 5219 /* Update device-level interrupt statistics */ 5220 phba->sli.slistat.sli_intr++; 5221 /* Ignore all interrupts during initialization. */ 5222 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 5223 return IRQ_NONE; 5224 /* Need to read HA REG for slow-path events */ 5225 spin_lock_irqsave(&phba->hbalock, iflag); 5226 ha_copy = readl(phba->HAregaddr); 5227 /* If somebody is waiting to handle an eratt don't process it 5228 * here. The brdkill function will do this. 5229 */ 5230 if (phba->link_flag & LS_IGNORE_ERATT) 5231 ha_copy &= ~HA_ERATT; 5232 /* Check the need for handling ERATT in interrupt handler */ 5233 if (ha_copy & HA_ERATT) { 5234 if (phba->hba_flag & HBA_ERATT_HANDLED) 5235 /* ERATT polling has handled ERATT */ 5236 ha_copy &= ~HA_ERATT; 5237 else 5238 /* Indicate interrupt handler handles ERATT */ 5239 phba->hba_flag |= HBA_ERATT_HANDLED; 5240 } 5241 /* Clear up only attention source related to slow-path */ 5242 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 5243 phba->HAregaddr); 5244 readl(phba->HAregaddr); /* flush */ 5245 spin_unlock_irqrestore(&phba->hbalock, iflag); 5246 } else 5247 ha_copy = phba->ha_copy; 5248 5249 work_ha_copy = ha_copy & phba->work_ha_mask; 5250 5251 if (work_ha_copy) { 5252 if (work_ha_copy & HA_LATT) { 5253 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 5254 /* 5255 * Turn off Link Attention interrupts 5256 * until CLEAR_LA done 5257 */ 5258 spin_lock_irqsave(&phba->hbalock, iflag); 5259 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 5260 control = readl(phba->HCregaddr); 5261 control &= ~HC_LAINT_ENA; 5262 writel(control, phba->HCregaddr); 5263 readl(phba->HCregaddr); /* flush */ 5264 spin_unlock_irqrestore(&phba->hbalock, iflag); 5265 } 5266 else 5267 work_ha_copy &= ~HA_LATT; 5268 } 5269 5270 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 5271 /* 5272 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 5273 * the only slow ring. 5274 */ 5275 status = (work_ha_copy & 5276 (HA_RXMASK << (4*LPFC_ELS_RING))); 5277 status >>= (4*LPFC_ELS_RING); 5278 if (status & HA_RXMASK) { 5279 spin_lock_irqsave(&phba->hbalock, iflag); 5280 control = readl(phba->HCregaddr); 5281 5282 lpfc_debugfs_slow_ring_trc(phba, 5283 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 5284 control, status, 5285 (uint32_t)phba->sli.slistat.sli_intr); 5286 5287 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 5288 lpfc_debugfs_slow_ring_trc(phba, 5289 "ISR Disable ring:" 5290 "pwork:x%x hawork:x%x wait:x%x", 5291 phba->work_ha, work_ha_copy, 5292 (uint32_t)((unsigned long) 5293 &phba->work_waitq)); 5294 5295 control &= 5296 ~(HC_R0INT_ENA << LPFC_ELS_RING); 5297 writel(control, phba->HCregaddr); 5298 readl(phba->HCregaddr); /* flush */ 5299 } 5300 else { 5301 lpfc_debugfs_slow_ring_trc(phba, 5302 "ISR slow ring: pwork:" 5303 "x%x hawork:x%x wait:x%x", 5304 phba->work_ha, work_ha_copy, 5305 (uint32_t)((unsigned long) 5306 &phba->work_waitq)); 5307 } 5308 spin_unlock_irqrestore(&phba->hbalock, iflag); 5309 } 5310 } 5311 spin_lock_irqsave(&phba->hbalock, iflag); 5312 if (work_ha_copy & HA_ERATT) 5313 lpfc_sli_read_hs(phba); 5314 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 5315 pmb = phba->sli.mbox_active; 5316 pmbox = &pmb->mb; 5317 mbox = phba->mbox; 5318 vport = pmb->vport; 5319 5320 /* First check out the status word */ 5321 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 5322 if (pmbox->mbxOwner != OWN_HOST) { 5323 spin_unlock_irqrestore(&phba->hbalock, iflag); 5324 /* 5325 * Stray Mailbox Interrupt, mbxCommand <cmd> 5326 * mbxStatus <status> 5327 */ 5328 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 5329 LOG_SLI, 5330 "(%d):0304 Stray Mailbox " 5331 "Interrupt mbxCommand x%x " 5332 "mbxStatus x%x\n", 5333 (vport ? vport->vpi : 0), 5334 pmbox->mbxCommand, 5335 pmbox->mbxStatus); 5336 /* clear mailbox attention bit */ 5337 work_ha_copy &= ~HA_MBATT; 5338 } else { 5339 phba->sli.mbox_active = NULL; 5340 spin_unlock_irqrestore(&phba->hbalock, iflag); 5341 phba->last_completion_time = jiffies; 5342 del_timer(&phba->sli.mbox_tmo); 5343 if (pmb->mbox_cmpl) { 5344 lpfc_sli_pcimem_bcopy(mbox, pmbox, 5345 MAILBOX_CMD_SIZE); 5346 } 5347 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 5348 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 5349 5350 lpfc_debugfs_disc_trc(vport, 5351 LPFC_DISC_TRC_MBOX_VPORT, 5352 "MBOX dflt rpi: : " 5353 "status:x%x rpi:x%x", 5354 (uint32_t)pmbox->mbxStatus, 5355 pmbox->un.varWords[0], 0); 5356 5357 if (!pmbox->mbxStatus) { 5358 mp = (struct lpfc_dmabuf *) 5359 (pmb->context1); 5360 ndlp = (struct lpfc_nodelist *) 5361 pmb->context2; 5362 5363 /* Reg_LOGIN of dflt RPI was 5364 * successful. new lets get 5365 * rid of the RPI using the 5366 * same mbox buffer. 5367 */ 5368 lpfc_unreg_login(phba, 5369 vport->vpi, 5370 pmbox->un.varWords[0], 5371 pmb); 5372 pmb->mbox_cmpl = 5373 lpfc_mbx_cmpl_dflt_rpi; 5374 pmb->context1 = mp; 5375 pmb->context2 = ndlp; 5376 pmb->vport = vport; 5377 rc = lpfc_sli_issue_mbox(phba, 5378 pmb, 5379 MBX_NOWAIT); 5380 if (rc != MBX_BUSY) 5381 lpfc_printf_log(phba, 5382 KERN_ERR, 5383 LOG_MBOX | LOG_SLI, 5384 "0350 rc should have" 5385 "been MBX_BUSY"); 5386 goto send_current_mbox; 5387 } 5388 } 5389 spin_lock_irqsave( 5390 &phba->pport->work_port_lock, 5391 iflag); 5392 phba->pport->work_port_events &= 5393 ~WORKER_MBOX_TMO; 5394 spin_unlock_irqrestore( 5395 &phba->pport->work_port_lock, 5396 iflag); 5397 lpfc_mbox_cmpl_put(phba, pmb); 5398 } 5399 } else 5400 spin_unlock_irqrestore(&phba->hbalock, iflag); 5401 5402 if ((work_ha_copy & HA_MBATT) && 5403 (phba->sli.mbox_active == NULL)) { 5404 send_current_mbox: 5405 /* Process next mailbox command if there is one */ 5406 do { 5407 rc = lpfc_sli_issue_mbox(phba, NULL, 5408 MBX_NOWAIT); 5409 } while (rc == MBX_NOT_FINISHED); 5410 if (rc != MBX_SUCCESS) 5411 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 5412 LOG_SLI, "0349 rc should be " 5413 "MBX_SUCCESS"); 5414 } 5415 5416 spin_lock_irqsave(&phba->hbalock, iflag); 5417 phba->work_ha |= work_ha_copy; 5418 spin_unlock_irqrestore(&phba->hbalock, iflag); 5419 lpfc_worker_wake_up(phba); 5420 } 5421 return IRQ_HANDLED; 5422 5423 } /* lpfc_sp_intr_handler */ 5424 5425 /** 5426 * lpfc_fp_intr_handler: The fast-path interrupt handler of lpfc driver. 5427 * @irq: Interrupt number. 5428 * @dev_id: The device context pointer. 5429 * 5430 * This function is directly called from the PCI layer as an interrupt 5431 * service routine when the device is enabled with MSI-X multi-message 5432 * interrupt mode and there is a fast-path FCP IOCB ring event in the 5433 * HBA. However, when the device is enabled with either MSI or Pin-IRQ 5434 * interrupt mode, this function is called as part of the device-level 5435 * interrupt handler. When the PCI slot is in error recovery or the HBA 5436 * is undergoing initialization, the interrupt handler will not process 5437 * the interrupt. The SCSI FCP fast-path ring event are handled in the 5438 * intrrupt context. This function is called without any lock held. It 5439 * gets the hbalock to access and update SLI data structures. 5440 * 5441 * This function returns IRQ_HANDLED when interrupt is handled else it 5442 * returns IRQ_NONE. 5443 **/ 5444 irqreturn_t 5445 lpfc_fp_intr_handler(int irq, void *dev_id) 5446 { 5447 struct lpfc_hba *phba; 5448 uint32_t ha_copy; 5449 unsigned long status; 5450 unsigned long iflag; 5451 5452 /* Get the driver's phba structure from the dev_id and 5453 * assume the HBA is not interrupting. 5454 */ 5455 phba = (struct lpfc_hba *) dev_id; 5456 5457 if (unlikely(!phba)) 5458 return IRQ_NONE; 5459 5460 /* 5461 * Stuff needs to be attented to when this function is invoked as an 5462 * individual interrupt handler in MSI-X multi-message interrupt mode 5463 */ 5464 if (phba->intr_type == MSIX) { 5465 /* If pci channel is offline, ignore all the interrupts */ 5466 if (unlikely(pci_channel_offline(phba->pcidev))) 5467 return IRQ_NONE; 5468 /* Update device-level interrupt statistics */ 5469 phba->sli.slistat.sli_intr++; 5470 /* Ignore all interrupts during initialization. */ 5471 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 5472 return IRQ_NONE; 5473 /* Need to read HA REG for FCP ring and other ring events */ 5474 ha_copy = readl(phba->HAregaddr); 5475 /* Clear up only attention source related to fast-path */ 5476 spin_lock_irqsave(&phba->hbalock, iflag); 5477 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 5478 phba->HAregaddr); 5479 readl(phba->HAregaddr); /* flush */ 5480 spin_unlock_irqrestore(&phba->hbalock, iflag); 5481 } else 5482 ha_copy = phba->ha_copy; 5483 5484 /* 5485 * Process all events on FCP ring. Take the optimized path for FCP IO. 5486 */ 5487 ha_copy &= ~(phba->work_ha_mask); 5488 5489 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 5490 status >>= (4*LPFC_FCP_RING); 5491 if (status & HA_RXMASK) 5492 lpfc_sli_handle_fast_ring_event(phba, 5493 &phba->sli.ring[LPFC_FCP_RING], 5494 status); 5495 5496 if (phba->cfg_multi_ring_support == 2) { 5497 /* 5498 * Process all events on extra ring. Take the optimized path 5499 * for extra ring IO. 5500 */ 5501 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 5502 status >>= (4*LPFC_EXTRA_RING); 5503 if (status & HA_RXMASK) { 5504 lpfc_sli_handle_fast_ring_event(phba, 5505 &phba->sli.ring[LPFC_EXTRA_RING], 5506 status); 5507 } 5508 } 5509 return IRQ_HANDLED; 5510 } /* lpfc_fp_intr_handler */ 5511 5512 /** 5513 * lpfc_intr_handler: The device-level interrupt handler of lpfc driver. 5514 * @irq: Interrupt number. 5515 * @dev_id: The device context pointer. 5516 * 5517 * This function is the device-level interrupt handler called from the PCI 5518 * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is 5519 * an event in the HBA which requires driver attention. This function 5520 * invokes the slow-path interrupt attention handling function and fast-path 5521 * interrupt attention handling function in turn to process the relevant 5522 * HBA attention events. This function is called without any lock held. It 5523 * gets the hbalock to access and update SLI data structures. 5524 * 5525 * This function returns IRQ_HANDLED when interrupt is handled, else it 5526 * returns IRQ_NONE. 5527 **/ 5528 irqreturn_t 5529 lpfc_intr_handler(int irq, void *dev_id) 5530 { 5531 struct lpfc_hba *phba; 5532 irqreturn_t sp_irq_rc, fp_irq_rc; 5533 unsigned long status1, status2; 5534 5535 /* 5536 * Get the driver's phba structure from the dev_id and 5537 * assume the HBA is not interrupting. 5538 */ 5539 phba = (struct lpfc_hba *) dev_id; 5540 5541 if (unlikely(!phba)) 5542 return IRQ_NONE; 5543 5544 /* If the pci channel is offline, ignore all the interrupts. */ 5545 if (unlikely(pci_channel_offline(phba->pcidev))) 5546 return IRQ_NONE; 5547 5548 /* Update device level interrupt statistics */ 5549 phba->sli.slistat.sli_intr++; 5550 5551 /* Ignore all interrupts during initialization. */ 5552 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 5553 return IRQ_NONE; 5554 5555 spin_lock(&phba->hbalock); 5556 phba->ha_copy = readl(phba->HAregaddr); 5557 if (unlikely(!phba->ha_copy)) { 5558 spin_unlock(&phba->hbalock); 5559 return IRQ_NONE; 5560 } else if (phba->ha_copy & HA_ERATT) { 5561 if (phba->hba_flag & HBA_ERATT_HANDLED) 5562 /* ERATT polling has handled ERATT */ 5563 phba->ha_copy &= ~HA_ERATT; 5564 else 5565 /* Indicate interrupt handler handles ERATT */ 5566 phba->hba_flag |= HBA_ERATT_HANDLED; 5567 } 5568 5569 /* Clear attention sources except link and error attentions */ 5570 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 5571 readl(phba->HAregaddr); /* flush */ 5572 spin_unlock(&phba->hbalock); 5573 5574 /* 5575 * Invokes slow-path host attention interrupt handling as appropriate. 5576 */ 5577 5578 /* status of events with mailbox and link attention */ 5579 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 5580 5581 /* status of events with ELS ring */ 5582 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 5583 status2 >>= (4*LPFC_ELS_RING); 5584 5585 if (status1 || (status2 & HA_RXMASK)) 5586 sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); 5587 else 5588 sp_irq_rc = IRQ_NONE; 5589 5590 /* 5591 * Invoke fast-path host attention interrupt handling as appropriate. 5592 */ 5593 5594 /* status of events with FCP ring */ 5595 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 5596 status1 >>= (4*LPFC_FCP_RING); 5597 5598 /* status of events with extra ring */ 5599 if (phba->cfg_multi_ring_support == 2) { 5600 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 5601 status2 >>= (4*LPFC_EXTRA_RING); 5602 } else 5603 status2 = 0; 5604 5605 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 5606 fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); 5607 else 5608 fp_irq_rc = IRQ_NONE; 5609 5610 /* Return device-level interrupt handling status */ 5611 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 5612 } /* lpfc_intr_handler */ 5613