1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_cmnd.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <scsi/fc/fc_fs.h> 34 #include <linux/aer.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_compat.h" 47 #include "lpfc_debugfs.h" 48 #include "lpfc_vport.h" 49 50 /* There are only four IOCB completion types. */ 51 typedef enum _lpfc_iocb_type { 52 LPFC_UNKNOWN_IOCB, 53 LPFC_UNSOL_IOCB, 54 LPFC_SOL_IOCB, 55 LPFC_ABORT_IOCB 56 } lpfc_iocb_type; 57 58 59 /* Provide function prototypes local to this module. */ 60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 61 uint32_t); 62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 63 uint8_t *, uint32_t *); 64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 65 struct lpfc_iocbq *); 66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 67 struct hbq_dmabuf *); 68 static IOCB_t * 69 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 70 { 71 return &iocbq->iocb; 72 } 73 74 /** 75 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 76 * @q: The Work Queue to operate on. 77 * @wqe: The work Queue Entry to put on the Work queue. 78 * 79 * This routine will copy the contents of @wqe to the next available entry on 80 * the @q. This function will then ring the Work Queue Doorbell to signal the 81 * HBA to start processing the Work Queue Entry. This function returns 0 if 82 * successful. If no entries are available on @q then this function will return 83 * -ENOMEM. 84 * The caller is expected to hold the hbalock when calling this routine. 85 **/ 86 static uint32_t 87 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 88 { 89 union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe; 90 struct lpfc_register doorbell; 91 uint32_t host_index; 92 93 /* If the host has not yet processed the next entry then we are done */ 94 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 95 return -ENOMEM; 96 /* set consumption flag every once in a while */ 97 if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) 98 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 99 100 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 101 102 /* Update the host index before invoking device */ 103 host_index = q->host_index; 104 q->host_index = ((q->host_index + 1) % q->entry_count); 105 106 /* Ring Doorbell */ 107 doorbell.word0 = 0; 108 bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); 109 bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); 110 bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); 111 writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); 112 readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */ 113 114 return 0; 115 } 116 117 /** 118 * lpfc_sli4_wq_release - Updates internal hba index for WQ 119 * @q: The Work Queue to operate on. 120 * @index: The index to advance the hba index to. 121 * 122 * This routine will update the HBA index of a queue to reflect consumption of 123 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 124 * an entry the host calls this function to update the queue's internal 125 * pointers. This routine returns the number of entries that were consumed by 126 * the HBA. 127 **/ 128 static uint32_t 129 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 130 { 131 uint32_t released = 0; 132 133 if (q->hba_index == index) 134 return 0; 135 do { 136 q->hba_index = ((q->hba_index + 1) % q->entry_count); 137 released++; 138 } while (q->hba_index != index); 139 return released; 140 } 141 142 /** 143 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 144 * @q: The Mailbox Queue to operate on. 145 * @wqe: The Mailbox Queue Entry to put on the Work queue. 146 * 147 * This routine will copy the contents of @mqe to the next available entry on 148 * the @q. This function will then ring the Work Queue Doorbell to signal the 149 * HBA to start processing the Work Queue Entry. This function returns 0 if 150 * successful. If no entries are available on @q then this function will return 151 * -ENOMEM. 152 * The caller is expected to hold the hbalock when calling this routine. 153 **/ 154 static uint32_t 155 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 156 { 157 struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe; 158 struct lpfc_register doorbell; 159 uint32_t host_index; 160 161 /* If the host has not yet processed the next entry then we are done */ 162 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 163 return -ENOMEM; 164 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 165 /* Save off the mailbox pointer for completion */ 166 q->phba->mbox = (MAILBOX_t *)temp_mqe; 167 168 /* Update the host index before invoking device */ 169 host_index = q->host_index; 170 q->host_index = ((q->host_index + 1) % q->entry_count); 171 172 /* Ring Doorbell */ 173 doorbell.word0 = 0; 174 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 175 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 176 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 177 readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */ 178 return 0; 179 } 180 181 /** 182 * lpfc_sli4_mq_release - Updates internal hba index for MQ 183 * @q: The Mailbox Queue to operate on. 184 * 185 * This routine will update the HBA index of a queue to reflect consumption of 186 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 187 * an entry the host calls this function to update the queue's internal 188 * pointers. This routine returns the number of entries that were consumed by 189 * the HBA. 190 **/ 191 static uint32_t 192 lpfc_sli4_mq_release(struct lpfc_queue *q) 193 { 194 /* Clear the mailbox pointer for completion */ 195 q->phba->mbox = NULL; 196 q->hba_index = ((q->hba_index + 1) % q->entry_count); 197 return 1; 198 } 199 200 /** 201 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 202 * @q: The Event Queue to get the first valid EQE from 203 * 204 * This routine will get the first valid Event Queue Entry from @q, update 205 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 206 * the Queue (no more work to do), or the Queue is full of EQEs that have been 207 * processed, but not popped back to the HBA then this routine will return NULL. 208 **/ 209 static struct lpfc_eqe * 210 lpfc_sli4_eq_get(struct lpfc_queue *q) 211 { 212 struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; 213 214 /* If the next EQE is not valid then we are done */ 215 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 216 return NULL; 217 /* If the host has not yet processed the next entry then we are done */ 218 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 219 return NULL; 220 221 q->hba_index = ((q->hba_index + 1) % q->entry_count); 222 return eqe; 223 } 224 225 /** 226 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 227 * @q: The Event Queue that the host has completed processing for. 228 * @arm: Indicates whether the host wants to arms this CQ. 229 * 230 * This routine will mark all Event Queue Entries on @q, from the last 231 * known completed entry to the last entry that was processed, as completed 232 * by clearing the valid bit for each completion queue entry. Then it will 233 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 234 * The internal host index in the @q will be updated by this routine to indicate 235 * that the host has finished processing the entries. The @arm parameter 236 * indicates that the queue should be rearmed when ringing the doorbell. 237 * 238 * This function will return the number of EQEs that were popped. 239 **/ 240 uint32_t 241 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 242 { 243 uint32_t released = 0; 244 struct lpfc_eqe *temp_eqe; 245 struct lpfc_register doorbell; 246 247 /* while there are valid entries */ 248 while (q->hba_index != q->host_index) { 249 temp_eqe = q->qe[q->host_index].eqe; 250 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 251 released++; 252 q->host_index = ((q->host_index + 1) % q->entry_count); 253 } 254 if (unlikely(released == 0 && !arm)) 255 return 0; 256 257 /* ring doorbell for number popped */ 258 doorbell.word0 = 0; 259 if (arm) { 260 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 261 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 262 } 263 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 264 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 265 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); 266 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 267 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 268 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 269 readl(q->phba->sli4_hba.EQCQDBregaddr); 270 return released; 271 } 272 273 /** 274 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 275 * @q: The Completion Queue to get the first valid CQE from 276 * 277 * This routine will get the first valid Completion Queue Entry from @q, update 278 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 279 * the Queue (no more work to do), or the Queue is full of CQEs that have been 280 * processed, but not popped back to the HBA then this routine will return NULL. 281 **/ 282 static struct lpfc_cqe * 283 lpfc_sli4_cq_get(struct lpfc_queue *q) 284 { 285 struct lpfc_cqe *cqe; 286 287 /* If the next CQE is not valid then we are done */ 288 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 289 return NULL; 290 /* If the host has not yet processed the next entry then we are done */ 291 if (((q->hba_index + 1) % q->entry_count) == q->host_index) 292 return NULL; 293 294 cqe = q->qe[q->hba_index].cqe; 295 q->hba_index = ((q->hba_index + 1) % q->entry_count); 296 return cqe; 297 } 298 299 /** 300 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 301 * @q: The Completion Queue that the host has completed processing for. 302 * @arm: Indicates whether the host wants to arms this CQ. 303 * 304 * This routine will mark all Completion queue entries on @q, from the last 305 * known completed entry to the last entry that was processed, as completed 306 * by clearing the valid bit for each completion queue entry. Then it will 307 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 308 * The internal host index in the @q will be updated by this routine to indicate 309 * that the host has finished processing the entries. The @arm parameter 310 * indicates that the queue should be rearmed when ringing the doorbell. 311 * 312 * This function will return the number of CQEs that were released. 313 **/ 314 uint32_t 315 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 316 { 317 uint32_t released = 0; 318 struct lpfc_cqe *temp_qe; 319 struct lpfc_register doorbell; 320 321 /* while there are valid entries */ 322 while (q->hba_index != q->host_index) { 323 temp_qe = q->qe[q->host_index].cqe; 324 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 325 released++; 326 q->host_index = ((q->host_index + 1) % q->entry_count); 327 } 328 if (unlikely(released == 0 && !arm)) 329 return 0; 330 331 /* ring doorbell for number popped */ 332 doorbell.word0 = 0; 333 if (arm) 334 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 335 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 336 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 337 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id); 338 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 339 return released; 340 } 341 342 /** 343 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 344 * @q: The Header Receive Queue to operate on. 345 * @wqe: The Receive Queue Entry to put on the Receive queue. 346 * 347 * This routine will copy the contents of @wqe to the next available entry on 348 * the @q. This function will then ring the Receive Queue Doorbell to signal the 349 * HBA to start processing the Receive Queue Entry. This function returns the 350 * index that the rqe was copied to if successful. If no entries are available 351 * on @q then this function will return -ENOMEM. 352 * The caller is expected to hold the hbalock when calling this routine. 353 **/ 354 static int 355 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 356 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 357 { 358 struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe; 359 struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe; 360 struct lpfc_register doorbell; 361 int put_index = hq->host_index; 362 363 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 364 return -EINVAL; 365 if (hq->host_index != dq->host_index) 366 return -EINVAL; 367 /* If the host has not yet processed the next entry then we are done */ 368 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) 369 return -EBUSY; 370 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 371 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 372 373 /* Update the host index to point to the next slot */ 374 hq->host_index = ((hq->host_index + 1) % hq->entry_count); 375 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 376 377 /* Ring The Header Receive Queue Doorbell */ 378 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) { 379 doorbell.word0 = 0; 380 bf_set(lpfc_rq_doorbell_num_posted, &doorbell, 381 LPFC_RQ_POST_BATCH); 382 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); 383 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); 384 } 385 return put_index; 386 } 387 388 /** 389 * lpfc_sli4_rq_release - Updates internal hba index for RQ 390 * @q: The Header Receive Queue to operate on. 391 * 392 * This routine will update the HBA index of a queue to reflect consumption of 393 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 394 * consumed an entry the host calls this function to update the queue's 395 * internal pointers. This routine returns the number of entries that were 396 * consumed by the HBA. 397 **/ 398 static uint32_t 399 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 400 { 401 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 402 return 0; 403 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 404 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 405 return 1; 406 } 407 408 /** 409 * lpfc_cmd_iocb - Get next command iocb entry in the ring 410 * @phba: Pointer to HBA context object. 411 * @pring: Pointer to driver SLI ring object. 412 * 413 * This function returns pointer to next command iocb entry 414 * in the command ring. The caller must hold hbalock to prevent 415 * other threads consume the next command iocb. 416 * SLI-2/SLI-3 provide different sized iocbs. 417 **/ 418 static inline IOCB_t * 419 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 420 { 421 return (IOCB_t *) (((char *) pring->cmdringaddr) + 422 pring->cmdidx * phba->iocb_cmd_size); 423 } 424 425 /** 426 * lpfc_resp_iocb - Get next response iocb entry in the ring 427 * @phba: Pointer to HBA context object. 428 * @pring: Pointer to driver SLI ring object. 429 * 430 * This function returns pointer to next response iocb entry 431 * in the response ring. The caller must hold hbalock to make sure 432 * that no other thread consume the next response iocb. 433 * SLI-2/SLI-3 provide different sized iocbs. 434 **/ 435 static inline IOCB_t * 436 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 437 { 438 return (IOCB_t *) (((char *) pring->rspringaddr) + 439 pring->rspidx * phba->iocb_rsp_size); 440 } 441 442 /** 443 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 444 * @phba: Pointer to HBA context object. 445 * 446 * This function is called with hbalock held. This function 447 * allocates a new driver iocb object from the iocb pool. If the 448 * allocation is successful, it returns pointer to the newly 449 * allocated iocb object else it returns NULL. 450 **/ 451 static struct lpfc_iocbq * 452 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 453 { 454 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 455 struct lpfc_iocbq * iocbq = NULL; 456 457 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 458 459 if (iocbq) 460 phba->iocb_cnt++; 461 if (phba->iocb_cnt > phba->iocb_max) 462 phba->iocb_max = phba->iocb_cnt; 463 return iocbq; 464 } 465 466 /** 467 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 468 * @phba: Pointer to HBA context object. 469 * @xritag: XRI value. 470 * 471 * This function clears the sglq pointer from the array of acive 472 * sglq's. The xritag that is passed in is used to index into the 473 * array. Before the xritag can be used it needs to be adjusted 474 * by subtracting the xribase. 475 * 476 * Returns sglq ponter = success, NULL = Failure. 477 **/ 478 static struct lpfc_sglq * 479 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 480 { 481 uint16_t adj_xri; 482 struct lpfc_sglq *sglq; 483 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 484 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 485 return NULL; 486 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; 487 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL; 488 return sglq; 489 } 490 491 /** 492 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 493 * @phba: Pointer to HBA context object. 494 * @xritag: XRI value. 495 * 496 * This function returns the sglq pointer from the array of acive 497 * sglq's. The xritag that is passed in is used to index into the 498 * array. Before the xritag can be used it needs to be adjusted 499 * by subtracting the xribase. 500 * 501 * Returns sglq ponter = success, NULL = Failure. 502 **/ 503 struct lpfc_sglq * 504 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 505 { 506 uint16_t adj_xri; 507 struct lpfc_sglq *sglq; 508 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 509 if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) 510 return NULL; 511 sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; 512 return sglq; 513 } 514 515 /** 516 * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap. 517 * @phba: Pointer to HBA context object. 518 * @ndlp: nodelist pointer for this target. 519 * @xritag: xri used in this exchange. 520 * @rxid: Remote Exchange ID. 521 * @send_rrq: Flag used to determine if we should send rrq els cmd. 522 * 523 * This function is called with hbalock held. 524 * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an 525 * rrq struct and adds it to the active_rrq_list. 526 * 527 * returns 0 for rrq slot for this xri 528 * < 0 Were not able to get rrq mem or invalid parameter. 529 **/ 530 static int 531 __lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 532 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 533 { 534 uint16_t adj_xri; 535 struct lpfc_node_rrq *rrq; 536 int empty; 537 538 /* 539 * set the active bit even if there is no mem available. 540 */ 541 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 542 if (!ndlp) 543 return -EINVAL; 544 if (test_and_set_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) 545 return -EINVAL; 546 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 547 if (rrq) { 548 rrq->send_rrq = send_rrq; 549 rrq->xritag = xritag; 550 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); 551 rrq->ndlp = ndlp; 552 rrq->nlp_DID = ndlp->nlp_DID; 553 rrq->vport = ndlp->vport; 554 rrq->rxid = rxid; 555 empty = list_empty(&phba->active_rrq_list); 556 if (phba->cfg_enable_rrq && send_rrq) 557 /* 558 * We need the xri before we can add this to the 559 * phba active rrq list. 560 */ 561 rrq->send_rrq = send_rrq; 562 else 563 rrq->send_rrq = 0; 564 list_add_tail(&rrq->list, &phba->active_rrq_list); 565 if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) { 566 phba->hba_flag |= HBA_RRQ_ACTIVE; 567 if (empty) 568 lpfc_worker_wake_up(phba); 569 } 570 return 0; 571 } 572 return -ENOMEM; 573 } 574 575 /** 576 * __lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 577 * @phba: Pointer to HBA context object. 578 * @xritag: xri used in this exchange. 579 * @rrq: The RRQ to be cleared. 580 * 581 * This function is called with hbalock held. This function 582 **/ 583 static void 584 __lpfc_clr_rrq_active(struct lpfc_hba *phba, 585 uint16_t xritag, 586 struct lpfc_node_rrq *rrq) 587 { 588 uint16_t adj_xri; 589 struct lpfc_nodelist *ndlp; 590 591 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 592 593 /* The target DID could have been swapped (cable swap) 594 * we should use the ndlp from the findnode if it is 595 * available. 596 */ 597 if (!ndlp) 598 ndlp = rrq->ndlp; 599 600 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 601 if (test_and_clear_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) { 602 rrq->send_rrq = 0; 603 rrq->xritag = 0; 604 rrq->rrq_stop_time = 0; 605 } 606 mempool_free(rrq, phba->rrq_pool); 607 } 608 609 /** 610 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 611 * @phba: Pointer to HBA context object. 612 * 613 * This function is called with hbalock held. This function 614 * Checks if stop_time (ratov from setting rrq active) has 615 * been reached, if it has and the send_rrq flag is set then 616 * it will call lpfc_send_rrq. If the send_rrq flag is not set 617 * then it will just call the routine to clear the rrq and 618 * free the rrq resource. 619 * The timer is set to the next rrq that is going to expire before 620 * leaving the routine. 621 * 622 **/ 623 void 624 lpfc_handle_rrq_active(struct lpfc_hba *phba) 625 { 626 struct lpfc_node_rrq *rrq; 627 struct lpfc_node_rrq *nextrrq; 628 unsigned long next_time; 629 unsigned long iflags; 630 631 spin_lock_irqsave(&phba->hbalock, iflags); 632 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 633 next_time = jiffies + HZ * (phba->fc_ratov + 1); 634 list_for_each_entry_safe(rrq, nextrrq, 635 &phba->active_rrq_list, list) { 636 if (time_after(jiffies, rrq->rrq_stop_time)) { 637 list_del(&rrq->list); 638 if (!rrq->send_rrq) 639 /* this call will free the rrq */ 640 __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 641 else { 642 /* if we send the rrq then the completion handler 643 * will clear the bit in the xribitmap. 644 */ 645 spin_unlock_irqrestore(&phba->hbalock, iflags); 646 if (lpfc_send_rrq(phba, rrq)) { 647 lpfc_clr_rrq_active(phba, rrq->xritag, 648 rrq); 649 } 650 spin_lock_irqsave(&phba->hbalock, iflags); 651 } 652 } else if (time_before(rrq->rrq_stop_time, next_time)) 653 next_time = rrq->rrq_stop_time; 654 } 655 spin_unlock_irqrestore(&phba->hbalock, iflags); 656 if (!list_empty(&phba->active_rrq_list)) 657 mod_timer(&phba->rrq_tmr, next_time); 658 } 659 660 /** 661 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 662 * @vport: Pointer to vport context object. 663 * @xri: The xri used in the exchange. 664 * @did: The targets DID for this exchange. 665 * 666 * returns NULL = rrq not found in the phba->active_rrq_list. 667 * rrq = rrq for this xri and target. 668 **/ 669 struct lpfc_node_rrq * 670 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 671 { 672 struct lpfc_hba *phba = vport->phba; 673 struct lpfc_node_rrq *rrq; 674 struct lpfc_node_rrq *nextrrq; 675 unsigned long iflags; 676 677 if (phba->sli_rev != LPFC_SLI_REV4) 678 return NULL; 679 spin_lock_irqsave(&phba->hbalock, iflags); 680 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 681 if (rrq->vport == vport && rrq->xritag == xri && 682 rrq->nlp_DID == did){ 683 list_del(&rrq->list); 684 spin_unlock_irqrestore(&phba->hbalock, iflags); 685 return rrq; 686 } 687 } 688 spin_unlock_irqrestore(&phba->hbalock, iflags); 689 return NULL; 690 } 691 692 /** 693 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 694 * @vport: Pointer to vport context object. 695 * 696 * Remove all active RRQs for this vport from the phba->active_rrq_list and 697 * clear the rrq. 698 **/ 699 void 700 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport) 701 702 { 703 struct lpfc_hba *phba = vport->phba; 704 struct lpfc_node_rrq *rrq; 705 struct lpfc_node_rrq *nextrrq; 706 unsigned long iflags; 707 708 if (phba->sli_rev != LPFC_SLI_REV4) 709 return; 710 spin_lock_irqsave(&phba->hbalock, iflags); 711 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 712 if (rrq->vport == vport) { 713 list_del(&rrq->list); 714 __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 715 } 716 } 717 spin_unlock_irqrestore(&phba->hbalock, iflags); 718 } 719 720 /** 721 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list. 722 * @phba: Pointer to HBA context object. 723 * 724 * Remove all rrqs from the phba->active_rrq_list and free them by 725 * calling __lpfc_clr_active_rrq 726 * 727 **/ 728 void 729 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) 730 { 731 struct lpfc_node_rrq *rrq; 732 struct lpfc_node_rrq *nextrrq; 733 unsigned long next_time; 734 unsigned long iflags; 735 736 if (phba->sli_rev != LPFC_SLI_REV4) 737 return; 738 spin_lock_irqsave(&phba->hbalock, iflags); 739 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 740 next_time = jiffies + HZ * (phba->fc_ratov * 2); 741 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 742 list_del(&rrq->list); 743 __lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 744 } 745 spin_unlock_irqrestore(&phba->hbalock, iflags); 746 if (!list_empty(&phba->active_rrq_list)) 747 mod_timer(&phba->rrq_tmr, next_time); 748 } 749 750 751 /** 752 * __lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 753 * @phba: Pointer to HBA context object. 754 * @ndlp: Targets nodelist pointer for this exchange. 755 * @xritag the xri in the bitmap to test. 756 * 757 * This function is called with hbalock held. This function 758 * returns 0 = rrq not active for this xri 759 * 1 = rrq is valid for this xri. 760 **/ 761 static int 762 __lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 763 uint16_t xritag) 764 { 765 uint16_t adj_xri; 766 767 adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; 768 if (!ndlp) 769 return 0; 770 if (test_bit(adj_xri, ndlp->active_rrqs.xri_bitmap)) 771 return 1; 772 else 773 return 0; 774 } 775 776 /** 777 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 778 * @phba: Pointer to HBA context object. 779 * @ndlp: nodelist pointer for this target. 780 * @xritag: xri used in this exchange. 781 * @rxid: Remote Exchange ID. 782 * @send_rrq: Flag used to determine if we should send rrq els cmd. 783 * 784 * This function takes the hbalock. 785 * The active bit is always set in the active rrq xri_bitmap even 786 * if there is no slot avaiable for the other rrq information. 787 * 788 * returns 0 rrq actived for this xri 789 * < 0 No memory or invalid ndlp. 790 **/ 791 int 792 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 793 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 794 { 795 int ret; 796 unsigned long iflags; 797 798 spin_lock_irqsave(&phba->hbalock, iflags); 799 ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq); 800 spin_unlock_irqrestore(&phba->hbalock, iflags); 801 return ret; 802 } 803 804 /** 805 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 806 * @phba: Pointer to HBA context object. 807 * @xritag: xri used in this exchange. 808 * @rrq: The RRQ to be cleared. 809 * 810 * This function is takes the hbalock. 811 **/ 812 void 813 lpfc_clr_rrq_active(struct lpfc_hba *phba, 814 uint16_t xritag, 815 struct lpfc_node_rrq *rrq) 816 { 817 unsigned long iflags; 818 819 spin_lock_irqsave(&phba->hbalock, iflags); 820 __lpfc_clr_rrq_active(phba, xritag, rrq); 821 spin_unlock_irqrestore(&phba->hbalock, iflags); 822 return; 823 } 824 825 826 827 /** 828 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 829 * @phba: Pointer to HBA context object. 830 * @ndlp: Targets nodelist pointer for this exchange. 831 * @xritag the xri in the bitmap to test. 832 * 833 * This function takes the hbalock. 834 * returns 0 = rrq not active for this xri 835 * 1 = rrq is valid for this xri. 836 **/ 837 int 838 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 839 uint16_t xritag) 840 { 841 int ret; 842 unsigned long iflags; 843 844 spin_lock_irqsave(&phba->hbalock, iflags); 845 ret = __lpfc_test_rrq_active(phba, ndlp, xritag); 846 spin_unlock_irqrestore(&phba->hbalock, iflags); 847 return ret; 848 } 849 850 /** 851 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool 852 * @phba: Pointer to HBA context object. 853 * @piocb: Pointer to the iocbq. 854 * 855 * This function is called with hbalock held. This function 856 * Gets a new driver sglq object from the sglq list. If the 857 * list is not empty then it is successful, it returns pointer to the newly 858 * allocated sglq object else it returns NULL. 859 **/ 860 static struct lpfc_sglq * 861 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 862 { 863 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 864 struct lpfc_sglq *sglq = NULL; 865 struct lpfc_sglq *start_sglq = NULL; 866 uint16_t adj_xri; 867 struct lpfc_scsi_buf *lpfc_cmd; 868 struct lpfc_nodelist *ndlp; 869 int found = 0; 870 871 if (piocbq->iocb_flag & LPFC_IO_FCP) { 872 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; 873 ndlp = lpfc_cmd->rdata->pnode; 874 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 875 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) 876 ndlp = piocbq->context_un.ndlp; 877 else 878 ndlp = piocbq->context1; 879 880 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 881 start_sglq = sglq; 882 while (!found) { 883 if (!sglq) 884 return NULL; 885 adj_xri = sglq->sli4_xritag - 886 phba->sli4_hba.max_cfg_param.xri_base; 887 if (__lpfc_test_rrq_active(phba, ndlp, sglq->sli4_xritag)) { 888 /* This xri has an rrq outstanding for this DID. 889 * put it back in the list and get another xri. 890 */ 891 list_add_tail(&sglq->list, lpfc_sgl_list); 892 sglq = NULL; 893 list_remove_head(lpfc_sgl_list, sglq, 894 struct lpfc_sglq, list); 895 if (sglq == start_sglq) { 896 sglq = NULL; 897 break; 898 } else 899 continue; 900 } 901 sglq->ndlp = ndlp; 902 found = 1; 903 phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; 904 sglq->state = SGL_ALLOCATED; 905 } 906 return sglq; 907 } 908 909 /** 910 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 911 * @phba: Pointer to HBA context object. 912 * 913 * This function is called with no lock held. This function 914 * allocates a new driver iocb object from the iocb pool. If the 915 * allocation is successful, it returns pointer to the newly 916 * allocated iocb object else it returns NULL. 917 **/ 918 struct lpfc_iocbq * 919 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 920 { 921 struct lpfc_iocbq * iocbq = NULL; 922 unsigned long iflags; 923 924 spin_lock_irqsave(&phba->hbalock, iflags); 925 iocbq = __lpfc_sli_get_iocbq(phba); 926 spin_unlock_irqrestore(&phba->hbalock, iflags); 927 return iocbq; 928 } 929 930 /** 931 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 932 * @phba: Pointer to HBA context object. 933 * @iocbq: Pointer to driver iocb object. 934 * 935 * This function is called with hbalock held to release driver 936 * iocb object to the iocb pool. The iotag in the iocb object 937 * does not change for each use of the iocb object. This function 938 * clears all other fields of the iocb object when it is freed. 939 * The sqlq structure that holds the xritag and phys and virtual 940 * mappings for the scatter gather list is retrieved from the 941 * active array of sglq. The get of the sglq pointer also clears 942 * the entry in the array. If the status of the IO indiactes that 943 * this IO was aborted then the sglq entry it put on the 944 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 945 * IO has good status or fails for any other reason then the sglq 946 * entry is added to the free list (lpfc_sgl_list). 947 **/ 948 static void 949 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 950 { 951 struct lpfc_sglq *sglq; 952 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 953 unsigned long iflag = 0; 954 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 955 956 if (iocbq->sli4_xritag == NO_XRI) 957 sglq = NULL; 958 else 959 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); 960 if (sglq) { 961 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 962 (sglq->state != SGL_XRI_ABORTED)) { 963 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 964 iflag); 965 list_add(&sglq->list, 966 &phba->sli4_hba.lpfc_abts_els_sgl_list); 967 spin_unlock_irqrestore( 968 &phba->sli4_hba.abts_sgl_list_lock, iflag); 969 } else { 970 sglq->state = SGL_FREED; 971 sglq->ndlp = NULL; 972 list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); 973 974 /* Check if TXQ queue needs to be serviced */ 975 if (pring->txq_cnt) 976 lpfc_worker_wake_up(phba); 977 } 978 } 979 980 981 /* 982 * Clean all volatile data fields, preserve iotag and node struct. 983 */ 984 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 985 iocbq->sli4_xritag = NO_XRI; 986 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 987 } 988 989 990 /** 991 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 992 * @phba: Pointer to HBA context object. 993 * @iocbq: Pointer to driver iocb object. 994 * 995 * This function is called with hbalock held to release driver 996 * iocb object to the iocb pool. The iotag in the iocb object 997 * does not change for each use of the iocb object. This function 998 * clears all other fields of the iocb object when it is freed. 999 **/ 1000 static void 1001 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1002 { 1003 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1004 1005 /* 1006 * Clean all volatile data fields, preserve iotag and node struct. 1007 */ 1008 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1009 iocbq->sli4_xritag = NO_XRI; 1010 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1011 } 1012 1013 /** 1014 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1015 * @phba: Pointer to HBA context object. 1016 * @iocbq: Pointer to driver iocb object. 1017 * 1018 * This function is called with hbalock held to release driver 1019 * iocb object to the iocb pool. The iotag in the iocb object 1020 * does not change for each use of the iocb object. This function 1021 * clears all other fields of the iocb object when it is freed. 1022 **/ 1023 static void 1024 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1025 { 1026 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1027 phba->iocb_cnt--; 1028 } 1029 1030 /** 1031 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1032 * @phba: Pointer to HBA context object. 1033 * @iocbq: Pointer to driver iocb object. 1034 * 1035 * This function is called with no lock held to release the iocb to 1036 * iocb pool. 1037 **/ 1038 void 1039 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1040 { 1041 unsigned long iflags; 1042 1043 /* 1044 * Clean all volatile data fields, preserve iotag and node struct. 1045 */ 1046 spin_lock_irqsave(&phba->hbalock, iflags); 1047 __lpfc_sli_release_iocbq(phba, iocbq); 1048 spin_unlock_irqrestore(&phba->hbalock, iflags); 1049 } 1050 1051 /** 1052 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1053 * @phba: Pointer to HBA context object. 1054 * @iocblist: List of IOCBs. 1055 * @ulpstatus: ULP status in IOCB command field. 1056 * @ulpWord4: ULP word-4 in IOCB command field. 1057 * 1058 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1059 * on the list by invoking the complete callback function associated with the 1060 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1061 * fields. 1062 **/ 1063 void 1064 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1065 uint32_t ulpstatus, uint32_t ulpWord4) 1066 { 1067 struct lpfc_iocbq *piocb; 1068 1069 while (!list_empty(iocblist)) { 1070 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1071 1072 if (!piocb->iocb_cmpl) 1073 lpfc_sli_release_iocbq(phba, piocb); 1074 else { 1075 piocb->iocb.ulpStatus = ulpstatus; 1076 piocb->iocb.un.ulpWord[4] = ulpWord4; 1077 (piocb->iocb_cmpl) (phba, piocb, piocb); 1078 } 1079 } 1080 return; 1081 } 1082 1083 /** 1084 * lpfc_sli_iocb_cmd_type - Get the iocb type 1085 * @iocb_cmnd: iocb command code. 1086 * 1087 * This function is called by ring event handler function to get the iocb type. 1088 * This function translates the iocb command to an iocb command type used to 1089 * decide the final disposition of each completed IOCB. 1090 * The function returns 1091 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1092 * LPFC_SOL_IOCB if it is a solicited iocb completion 1093 * LPFC_ABORT_IOCB if it is an abort iocb 1094 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1095 * 1096 * The caller is not required to hold any lock. 1097 **/ 1098 static lpfc_iocb_type 1099 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1100 { 1101 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1102 1103 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1104 return 0; 1105 1106 switch (iocb_cmnd) { 1107 case CMD_XMIT_SEQUENCE_CR: 1108 case CMD_XMIT_SEQUENCE_CX: 1109 case CMD_XMIT_BCAST_CN: 1110 case CMD_XMIT_BCAST_CX: 1111 case CMD_ELS_REQUEST_CR: 1112 case CMD_ELS_REQUEST_CX: 1113 case CMD_CREATE_XRI_CR: 1114 case CMD_CREATE_XRI_CX: 1115 case CMD_GET_RPI_CN: 1116 case CMD_XMIT_ELS_RSP_CX: 1117 case CMD_GET_RPI_CR: 1118 case CMD_FCP_IWRITE_CR: 1119 case CMD_FCP_IWRITE_CX: 1120 case CMD_FCP_IREAD_CR: 1121 case CMD_FCP_IREAD_CX: 1122 case CMD_FCP_ICMND_CR: 1123 case CMD_FCP_ICMND_CX: 1124 case CMD_FCP_TSEND_CX: 1125 case CMD_FCP_TRSP_CX: 1126 case CMD_FCP_TRECEIVE_CX: 1127 case CMD_FCP_AUTO_TRSP_CX: 1128 case CMD_ADAPTER_MSG: 1129 case CMD_ADAPTER_DUMP: 1130 case CMD_XMIT_SEQUENCE64_CR: 1131 case CMD_XMIT_SEQUENCE64_CX: 1132 case CMD_XMIT_BCAST64_CN: 1133 case CMD_XMIT_BCAST64_CX: 1134 case CMD_ELS_REQUEST64_CR: 1135 case CMD_ELS_REQUEST64_CX: 1136 case CMD_FCP_IWRITE64_CR: 1137 case CMD_FCP_IWRITE64_CX: 1138 case CMD_FCP_IREAD64_CR: 1139 case CMD_FCP_IREAD64_CX: 1140 case CMD_FCP_ICMND64_CR: 1141 case CMD_FCP_ICMND64_CX: 1142 case CMD_FCP_TSEND64_CX: 1143 case CMD_FCP_TRSP64_CX: 1144 case CMD_FCP_TRECEIVE64_CX: 1145 case CMD_GEN_REQUEST64_CR: 1146 case CMD_GEN_REQUEST64_CX: 1147 case CMD_XMIT_ELS_RSP64_CX: 1148 case DSSCMD_IWRITE64_CR: 1149 case DSSCMD_IWRITE64_CX: 1150 case DSSCMD_IREAD64_CR: 1151 case DSSCMD_IREAD64_CX: 1152 type = LPFC_SOL_IOCB; 1153 break; 1154 case CMD_ABORT_XRI_CN: 1155 case CMD_ABORT_XRI_CX: 1156 case CMD_CLOSE_XRI_CN: 1157 case CMD_CLOSE_XRI_CX: 1158 case CMD_XRI_ABORTED_CX: 1159 case CMD_ABORT_MXRI64_CN: 1160 case CMD_XMIT_BLS_RSP64_CX: 1161 type = LPFC_ABORT_IOCB; 1162 break; 1163 case CMD_RCV_SEQUENCE_CX: 1164 case CMD_RCV_ELS_REQ_CX: 1165 case CMD_RCV_SEQUENCE64_CX: 1166 case CMD_RCV_ELS_REQ64_CX: 1167 case CMD_ASYNC_STATUS: 1168 case CMD_IOCB_RCV_SEQ64_CX: 1169 case CMD_IOCB_RCV_ELS64_CX: 1170 case CMD_IOCB_RCV_CONT64_CX: 1171 case CMD_IOCB_RET_XRI64_CX: 1172 type = LPFC_UNSOL_IOCB; 1173 break; 1174 case CMD_IOCB_XMIT_MSEQ64_CR: 1175 case CMD_IOCB_XMIT_MSEQ64_CX: 1176 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1177 case CMD_IOCB_RCV_ELS_LIST64_CX: 1178 case CMD_IOCB_CLOSE_EXTENDED_CN: 1179 case CMD_IOCB_ABORT_EXTENDED_CN: 1180 case CMD_IOCB_RET_HBQE64_CN: 1181 case CMD_IOCB_FCP_IBIDIR64_CR: 1182 case CMD_IOCB_FCP_IBIDIR64_CX: 1183 case CMD_IOCB_FCP_ITASKMGT64_CX: 1184 case CMD_IOCB_LOGENTRY_CN: 1185 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1186 printk("%s - Unhandled SLI-3 Command x%x\n", 1187 __func__, iocb_cmnd); 1188 type = LPFC_UNKNOWN_IOCB; 1189 break; 1190 default: 1191 type = LPFC_UNKNOWN_IOCB; 1192 break; 1193 } 1194 1195 return type; 1196 } 1197 1198 /** 1199 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1200 * @phba: Pointer to HBA context object. 1201 * 1202 * This function is called from SLI initialization code 1203 * to configure every ring of the HBA's SLI interface. The 1204 * caller is not required to hold any lock. This function issues 1205 * a config_ring mailbox command for each ring. 1206 * This function returns zero if successful else returns a negative 1207 * error code. 1208 **/ 1209 static int 1210 lpfc_sli_ring_map(struct lpfc_hba *phba) 1211 { 1212 struct lpfc_sli *psli = &phba->sli; 1213 LPFC_MBOXQ_t *pmb; 1214 MAILBOX_t *pmbox; 1215 int i, rc, ret = 0; 1216 1217 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1218 if (!pmb) 1219 return -ENOMEM; 1220 pmbox = &pmb->u.mb; 1221 phba->link_state = LPFC_INIT_MBX_CMDS; 1222 for (i = 0; i < psli->num_rings; i++) { 1223 lpfc_config_ring(phba, i, pmb); 1224 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1225 if (rc != MBX_SUCCESS) { 1226 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1227 "0446 Adapter failed to init (%d), " 1228 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1229 "ring %d\n", 1230 rc, pmbox->mbxCommand, 1231 pmbox->mbxStatus, i); 1232 phba->link_state = LPFC_HBA_ERROR; 1233 ret = -ENXIO; 1234 break; 1235 } 1236 } 1237 mempool_free(pmb, phba->mbox_mem_pool); 1238 return ret; 1239 } 1240 1241 /** 1242 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1243 * @phba: Pointer to HBA context object. 1244 * @pring: Pointer to driver SLI ring object. 1245 * @piocb: Pointer to the driver iocb object. 1246 * 1247 * This function is called with hbalock held. The function adds the 1248 * new iocb to txcmplq of the given ring. This function always returns 1249 * 0. If this function is called for ELS ring, this function checks if 1250 * there is a vport associated with the ELS command. This function also 1251 * starts els_tmofunc timer if this is an ELS command. 1252 **/ 1253 static int 1254 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1255 struct lpfc_iocbq *piocb) 1256 { 1257 list_add_tail(&piocb->list, &pring->txcmplq); 1258 piocb->iocb_flag |= LPFC_IO_ON_Q; 1259 pring->txcmplq_cnt++; 1260 if (pring->txcmplq_cnt > pring->txcmplq_max) 1261 pring->txcmplq_max = pring->txcmplq_cnt; 1262 1263 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1264 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1265 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1266 if (!piocb->vport) 1267 BUG(); 1268 else 1269 mod_timer(&piocb->vport->els_tmofunc, 1270 jiffies + HZ * (phba->fc_ratov << 1)); 1271 } 1272 1273 1274 return 0; 1275 } 1276 1277 /** 1278 * lpfc_sli_ringtx_get - Get first element of the txq 1279 * @phba: Pointer to HBA context object. 1280 * @pring: Pointer to driver SLI ring object. 1281 * 1282 * This function is called with hbalock held to get next 1283 * iocb in txq of the given ring. If there is any iocb in 1284 * the txq, the function returns first iocb in the list after 1285 * removing the iocb from the list, else it returns NULL. 1286 **/ 1287 struct lpfc_iocbq * 1288 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1289 { 1290 struct lpfc_iocbq *cmd_iocb; 1291 1292 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1293 if (cmd_iocb != NULL) 1294 pring->txq_cnt--; 1295 return cmd_iocb; 1296 } 1297 1298 /** 1299 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1300 * @phba: Pointer to HBA context object. 1301 * @pring: Pointer to driver SLI ring object. 1302 * 1303 * This function is called with hbalock held and the caller must post the 1304 * iocb without releasing the lock. If the caller releases the lock, 1305 * iocb slot returned by the function is not guaranteed to be available. 1306 * The function returns pointer to the next available iocb slot if there 1307 * is available slot in the ring, else it returns NULL. 1308 * If the get index of the ring is ahead of the put index, the function 1309 * will post an error attention event to the worker thread to take the 1310 * HBA to offline state. 1311 **/ 1312 static IOCB_t * 1313 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1314 { 1315 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1316 uint32_t max_cmd_idx = pring->numCiocb; 1317 if ((pring->next_cmdidx == pring->cmdidx) && 1318 (++pring->next_cmdidx >= max_cmd_idx)) 1319 pring->next_cmdidx = 0; 1320 1321 if (unlikely(pring->local_getidx == pring->next_cmdidx)) { 1322 1323 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 1324 1325 if (unlikely(pring->local_getidx >= max_cmd_idx)) { 1326 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1327 "0315 Ring %d issue: portCmdGet %d " 1328 "is bigger than cmd ring %d\n", 1329 pring->ringno, 1330 pring->local_getidx, max_cmd_idx); 1331 1332 phba->link_state = LPFC_HBA_ERROR; 1333 /* 1334 * All error attention handlers are posted to 1335 * worker thread 1336 */ 1337 phba->work_ha |= HA_ERATT; 1338 phba->work_hs = HS_FFER3; 1339 1340 lpfc_worker_wake_up(phba); 1341 1342 return NULL; 1343 } 1344 1345 if (pring->local_getidx == pring->next_cmdidx) 1346 return NULL; 1347 } 1348 1349 return lpfc_cmd_iocb(phba, pring); 1350 } 1351 1352 /** 1353 * lpfc_sli_next_iotag - Get an iotag for the iocb 1354 * @phba: Pointer to HBA context object. 1355 * @iocbq: Pointer to driver iocb object. 1356 * 1357 * This function gets an iotag for the iocb. If there is no unused iotag and 1358 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1359 * array and assigns a new iotag. 1360 * The function returns the allocated iotag if successful, else returns zero. 1361 * Zero is not a valid iotag. 1362 * The caller is not required to hold any lock. 1363 **/ 1364 uint16_t 1365 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1366 { 1367 struct lpfc_iocbq **new_arr; 1368 struct lpfc_iocbq **old_arr; 1369 size_t new_len; 1370 struct lpfc_sli *psli = &phba->sli; 1371 uint16_t iotag; 1372 1373 spin_lock_irq(&phba->hbalock); 1374 iotag = psli->last_iotag; 1375 if(++iotag < psli->iocbq_lookup_len) { 1376 psli->last_iotag = iotag; 1377 psli->iocbq_lookup[iotag] = iocbq; 1378 spin_unlock_irq(&phba->hbalock); 1379 iocbq->iotag = iotag; 1380 return iotag; 1381 } else if (psli->iocbq_lookup_len < (0xffff 1382 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1383 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1384 spin_unlock_irq(&phba->hbalock); 1385 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 1386 GFP_KERNEL); 1387 if (new_arr) { 1388 spin_lock_irq(&phba->hbalock); 1389 old_arr = psli->iocbq_lookup; 1390 if (new_len <= psli->iocbq_lookup_len) { 1391 /* highly unprobable case */ 1392 kfree(new_arr); 1393 iotag = psli->last_iotag; 1394 if(++iotag < psli->iocbq_lookup_len) { 1395 psli->last_iotag = iotag; 1396 psli->iocbq_lookup[iotag] = iocbq; 1397 spin_unlock_irq(&phba->hbalock); 1398 iocbq->iotag = iotag; 1399 return iotag; 1400 } 1401 spin_unlock_irq(&phba->hbalock); 1402 return 0; 1403 } 1404 if (psli->iocbq_lookup) 1405 memcpy(new_arr, old_arr, 1406 ((psli->last_iotag + 1) * 1407 sizeof (struct lpfc_iocbq *))); 1408 psli->iocbq_lookup = new_arr; 1409 psli->iocbq_lookup_len = new_len; 1410 psli->last_iotag = iotag; 1411 psli->iocbq_lookup[iotag] = iocbq; 1412 spin_unlock_irq(&phba->hbalock); 1413 iocbq->iotag = iotag; 1414 kfree(old_arr); 1415 return iotag; 1416 } 1417 } else 1418 spin_unlock_irq(&phba->hbalock); 1419 1420 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1421 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1422 psli->last_iotag); 1423 1424 return 0; 1425 } 1426 1427 /** 1428 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1429 * @phba: Pointer to HBA context object. 1430 * @pring: Pointer to driver SLI ring object. 1431 * @iocb: Pointer to iocb slot in the ring. 1432 * @nextiocb: Pointer to driver iocb object which need to be 1433 * posted to firmware. 1434 * 1435 * This function is called with hbalock held to post a new iocb to 1436 * the firmware. This function copies the new iocb to ring iocb slot and 1437 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1438 * a completion call back for this iocb else the function will free the 1439 * iocb object. 1440 **/ 1441 static void 1442 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1443 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1444 { 1445 /* 1446 * Set up an iotag 1447 */ 1448 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1449 1450 1451 if (pring->ringno == LPFC_ELS_RING) { 1452 lpfc_debugfs_slow_ring_trc(phba, 1453 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1454 *(((uint32_t *) &nextiocb->iocb) + 4), 1455 *(((uint32_t *) &nextiocb->iocb) + 6), 1456 *(((uint32_t *) &nextiocb->iocb) + 7)); 1457 } 1458 1459 /* 1460 * Issue iocb command to adapter 1461 */ 1462 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1463 wmb(); 1464 pring->stats.iocb_cmd++; 1465 1466 /* 1467 * If there is no completion routine to call, we can release the 1468 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1469 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1470 */ 1471 if (nextiocb->iocb_cmpl) 1472 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1473 else 1474 __lpfc_sli_release_iocbq(phba, nextiocb); 1475 1476 /* 1477 * Let the HBA know what IOCB slot will be the next one the 1478 * driver will put a command into. 1479 */ 1480 pring->cmdidx = pring->next_cmdidx; 1481 writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1482 } 1483 1484 /** 1485 * lpfc_sli_update_full_ring - Update the chip attention register 1486 * @phba: Pointer to HBA context object. 1487 * @pring: Pointer to driver SLI ring object. 1488 * 1489 * The caller is not required to hold any lock for calling this function. 1490 * This function updates the chip attention bits for the ring to inform firmware 1491 * that there are pending work to be done for this ring and requests an 1492 * interrupt when there is space available in the ring. This function is 1493 * called when the driver is unable to post more iocbs to the ring due 1494 * to unavailability of space in the ring. 1495 **/ 1496 static void 1497 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1498 { 1499 int ringno = pring->ringno; 1500 1501 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1502 1503 wmb(); 1504 1505 /* 1506 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1507 * The HBA will tell us when an IOCB entry is available. 1508 */ 1509 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1510 readl(phba->CAregaddr); /* flush */ 1511 1512 pring->stats.iocb_cmd_full++; 1513 } 1514 1515 /** 1516 * lpfc_sli_update_ring - Update chip attention register 1517 * @phba: Pointer to HBA context object. 1518 * @pring: Pointer to driver SLI ring object. 1519 * 1520 * This function updates the chip attention register bit for the 1521 * given ring to inform HBA that there is more work to be done 1522 * in this ring. The caller is not required to hold any lock. 1523 **/ 1524 static void 1525 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1526 { 1527 int ringno = pring->ringno; 1528 1529 /* 1530 * Tell the HBA that there is work to do in this ring. 1531 */ 1532 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1533 wmb(); 1534 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1535 readl(phba->CAregaddr); /* flush */ 1536 } 1537 } 1538 1539 /** 1540 * lpfc_sli_resume_iocb - Process iocbs in the txq 1541 * @phba: Pointer to HBA context object. 1542 * @pring: Pointer to driver SLI ring object. 1543 * 1544 * This function is called with hbalock held to post pending iocbs 1545 * in the txq to the firmware. This function is called when driver 1546 * detects space available in the ring. 1547 **/ 1548 static void 1549 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1550 { 1551 IOCB_t *iocb; 1552 struct lpfc_iocbq *nextiocb; 1553 1554 /* 1555 * Check to see if: 1556 * (a) there is anything on the txq to send 1557 * (b) link is up 1558 * (c) link attention events can be processed (fcp ring only) 1559 * (d) IOCB processing is not blocked by the outstanding mbox command. 1560 */ 1561 if (pring->txq_cnt && 1562 lpfc_is_link_up(phba) && 1563 (pring->ringno != phba->sli.fcp_ring || 1564 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1565 1566 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1567 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1568 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1569 1570 if (iocb) 1571 lpfc_sli_update_ring(phba, pring); 1572 else 1573 lpfc_sli_update_full_ring(phba, pring); 1574 } 1575 1576 return; 1577 } 1578 1579 /** 1580 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1581 * @phba: Pointer to HBA context object. 1582 * @hbqno: HBQ number. 1583 * 1584 * This function is called with hbalock held to get the next 1585 * available slot for the given HBQ. If there is free slot 1586 * available for the HBQ it will return pointer to the next available 1587 * HBQ entry else it will return NULL. 1588 **/ 1589 static struct lpfc_hbq_entry * 1590 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1591 { 1592 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1593 1594 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1595 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1596 hbqp->next_hbqPutIdx = 0; 1597 1598 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1599 uint32_t raw_index = phba->hbq_get[hbqno]; 1600 uint32_t getidx = le32_to_cpu(raw_index); 1601 1602 hbqp->local_hbqGetIdx = getidx; 1603 1604 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1605 lpfc_printf_log(phba, KERN_ERR, 1606 LOG_SLI | LOG_VPORT, 1607 "1802 HBQ %d: local_hbqGetIdx " 1608 "%u is > than hbqp->entry_count %u\n", 1609 hbqno, hbqp->local_hbqGetIdx, 1610 hbqp->entry_count); 1611 1612 phba->link_state = LPFC_HBA_ERROR; 1613 return NULL; 1614 } 1615 1616 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1617 return NULL; 1618 } 1619 1620 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1621 hbqp->hbqPutIdx; 1622 } 1623 1624 /** 1625 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1626 * @phba: Pointer to HBA context object. 1627 * 1628 * This function is called with no lock held to free all the 1629 * hbq buffers while uninitializing the SLI interface. It also 1630 * frees the HBQ buffers returned by the firmware but not yet 1631 * processed by the upper layers. 1632 **/ 1633 void 1634 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1635 { 1636 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1637 struct hbq_dmabuf *hbq_buf; 1638 unsigned long flags; 1639 int i, hbq_count; 1640 uint32_t hbqno; 1641 1642 hbq_count = lpfc_sli_hbq_count(); 1643 /* Return all memory used by all HBQs */ 1644 spin_lock_irqsave(&phba->hbalock, flags); 1645 for (i = 0; i < hbq_count; ++i) { 1646 list_for_each_entry_safe(dmabuf, next_dmabuf, 1647 &phba->hbqs[i].hbq_buffer_list, list) { 1648 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1649 list_del(&hbq_buf->dbuf.list); 1650 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1651 } 1652 phba->hbqs[i].buffer_count = 0; 1653 } 1654 /* Return all HBQ buffer that are in-fly */ 1655 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, 1656 list) { 1657 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1658 list_del(&hbq_buf->dbuf.list); 1659 if (hbq_buf->tag == -1) { 1660 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1661 (phba, hbq_buf); 1662 } else { 1663 hbqno = hbq_buf->tag >> 16; 1664 if (hbqno >= LPFC_MAX_HBQS) 1665 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1666 (phba, hbq_buf); 1667 else 1668 (phba->hbqs[hbqno].hbq_free_buffer)(phba, 1669 hbq_buf); 1670 } 1671 } 1672 1673 /* Mark the HBQs not in use */ 1674 phba->hbq_in_use = 0; 1675 spin_unlock_irqrestore(&phba->hbalock, flags); 1676 } 1677 1678 /** 1679 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1680 * @phba: Pointer to HBA context object. 1681 * @hbqno: HBQ number. 1682 * @hbq_buf: Pointer to HBQ buffer. 1683 * 1684 * This function is called with the hbalock held to post a 1685 * hbq buffer to the firmware. If the function finds an empty 1686 * slot in the HBQ, it will post the buffer. The function will return 1687 * pointer to the hbq entry if it successfully post the buffer 1688 * else it will return NULL. 1689 **/ 1690 static int 1691 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1692 struct hbq_dmabuf *hbq_buf) 1693 { 1694 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 1695 } 1696 1697 /** 1698 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 1699 * @phba: Pointer to HBA context object. 1700 * @hbqno: HBQ number. 1701 * @hbq_buf: Pointer to HBQ buffer. 1702 * 1703 * This function is called with the hbalock held to post a hbq buffer to the 1704 * firmware. If the function finds an empty slot in the HBQ, it will post the 1705 * buffer and place it on the hbq_buffer_list. The function will return zero if 1706 * it successfully post the buffer else it will return an error. 1707 **/ 1708 static int 1709 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 1710 struct hbq_dmabuf *hbq_buf) 1711 { 1712 struct lpfc_hbq_entry *hbqe; 1713 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1714 1715 /* Get next HBQ entry slot to use */ 1716 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 1717 if (hbqe) { 1718 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1719 1720 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1721 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1722 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 1723 hbqe->bde.tus.f.bdeFlags = 0; 1724 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 1725 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 1726 /* Sync SLIM */ 1727 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 1728 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 1729 /* flush */ 1730 readl(phba->hbq_put + hbqno); 1731 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1732 return 0; 1733 } else 1734 return -ENOMEM; 1735 } 1736 1737 /** 1738 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 1739 * @phba: Pointer to HBA context object. 1740 * @hbqno: HBQ number. 1741 * @hbq_buf: Pointer to HBQ buffer. 1742 * 1743 * This function is called with the hbalock held to post an RQE to the SLI4 1744 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 1745 * the hbq_buffer_list and return zero, otherwise it will return an error. 1746 **/ 1747 static int 1748 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 1749 struct hbq_dmabuf *hbq_buf) 1750 { 1751 int rc; 1752 struct lpfc_rqe hrqe; 1753 struct lpfc_rqe drqe; 1754 1755 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 1756 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 1757 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 1758 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 1759 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 1760 &hrqe, &drqe); 1761 if (rc < 0) 1762 return rc; 1763 hbq_buf->tag = rc; 1764 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 1765 return 0; 1766 } 1767 1768 /* HBQ for ELS and CT traffic. */ 1769 static struct lpfc_hbq_init lpfc_els_hbq = { 1770 .rn = 1, 1771 .entry_count = 256, 1772 .mask_count = 0, 1773 .profile = 0, 1774 .ring_mask = (1 << LPFC_ELS_RING), 1775 .buffer_count = 0, 1776 .init_count = 40, 1777 .add_count = 40, 1778 }; 1779 1780 /* HBQ for the extra ring if needed */ 1781 static struct lpfc_hbq_init lpfc_extra_hbq = { 1782 .rn = 1, 1783 .entry_count = 200, 1784 .mask_count = 0, 1785 .profile = 0, 1786 .ring_mask = (1 << LPFC_EXTRA_RING), 1787 .buffer_count = 0, 1788 .init_count = 0, 1789 .add_count = 5, 1790 }; 1791 1792 /* Array of HBQs */ 1793 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 1794 &lpfc_els_hbq, 1795 &lpfc_extra_hbq, 1796 }; 1797 1798 /** 1799 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 1800 * @phba: Pointer to HBA context object. 1801 * @hbqno: HBQ number. 1802 * @count: Number of HBQ buffers to be posted. 1803 * 1804 * This function is called with no lock held to post more hbq buffers to the 1805 * given HBQ. The function returns the number of HBQ buffers successfully 1806 * posted. 1807 **/ 1808 static int 1809 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 1810 { 1811 uint32_t i, posted = 0; 1812 unsigned long flags; 1813 struct hbq_dmabuf *hbq_buffer; 1814 LIST_HEAD(hbq_buf_list); 1815 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 1816 return 0; 1817 1818 if ((phba->hbqs[hbqno].buffer_count + count) > 1819 lpfc_hbq_defs[hbqno]->entry_count) 1820 count = lpfc_hbq_defs[hbqno]->entry_count - 1821 phba->hbqs[hbqno].buffer_count; 1822 if (!count) 1823 return 0; 1824 /* Allocate HBQ entries */ 1825 for (i = 0; i < count; i++) { 1826 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 1827 if (!hbq_buffer) 1828 break; 1829 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 1830 } 1831 /* Check whether HBQ is still in use */ 1832 spin_lock_irqsave(&phba->hbalock, flags); 1833 if (!phba->hbq_in_use) 1834 goto err; 1835 while (!list_empty(&hbq_buf_list)) { 1836 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1837 dbuf.list); 1838 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1839 (hbqno << 16)); 1840 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1841 phba->hbqs[hbqno].buffer_count++; 1842 posted++; 1843 } else 1844 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1845 } 1846 spin_unlock_irqrestore(&phba->hbalock, flags); 1847 return posted; 1848 err: 1849 spin_unlock_irqrestore(&phba->hbalock, flags); 1850 while (!list_empty(&hbq_buf_list)) { 1851 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1852 dbuf.list); 1853 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1854 } 1855 return 0; 1856 } 1857 1858 /** 1859 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 1860 * @phba: Pointer to HBA context object. 1861 * @qno: HBQ number. 1862 * 1863 * This function posts more buffers to the HBQ. This function 1864 * is called with no lock held. The function returns the number of HBQ entries 1865 * successfully allocated. 1866 **/ 1867 int 1868 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1869 { 1870 if (phba->sli_rev == LPFC_SLI_REV4) 1871 return 0; 1872 else 1873 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1874 lpfc_hbq_defs[qno]->add_count); 1875 } 1876 1877 /** 1878 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 1879 * @phba: Pointer to HBA context object. 1880 * @qno: HBQ queue number. 1881 * 1882 * This function is called from SLI initialization code path with 1883 * no lock held to post initial HBQ buffers to firmware. The 1884 * function returns the number of HBQ entries successfully allocated. 1885 **/ 1886 static int 1887 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1888 { 1889 if (phba->sli_rev == LPFC_SLI_REV4) 1890 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1891 lpfc_hbq_defs[qno]->entry_count); 1892 else 1893 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1894 lpfc_hbq_defs[qno]->init_count); 1895 } 1896 1897 /** 1898 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 1899 * @phba: Pointer to HBA context object. 1900 * @hbqno: HBQ number. 1901 * 1902 * This function removes the first hbq buffer on an hbq list and returns a 1903 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 1904 **/ 1905 static struct hbq_dmabuf * 1906 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 1907 { 1908 struct lpfc_dmabuf *d_buf; 1909 1910 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 1911 if (!d_buf) 1912 return NULL; 1913 return container_of(d_buf, struct hbq_dmabuf, dbuf); 1914 } 1915 1916 /** 1917 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 1918 * @phba: Pointer to HBA context object. 1919 * @tag: Tag of the hbq buffer. 1920 * 1921 * This function is called with hbalock held. This function searches 1922 * for the hbq buffer associated with the given tag in the hbq buffer 1923 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise 1924 * it returns NULL. 1925 **/ 1926 static struct hbq_dmabuf * 1927 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 1928 { 1929 struct lpfc_dmabuf *d_buf; 1930 struct hbq_dmabuf *hbq_buf; 1931 uint32_t hbqno; 1932 1933 hbqno = tag >> 16; 1934 if (hbqno >= LPFC_MAX_HBQS) 1935 return NULL; 1936 1937 spin_lock_irq(&phba->hbalock); 1938 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 1939 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 1940 if (hbq_buf->tag == tag) { 1941 spin_unlock_irq(&phba->hbalock); 1942 return hbq_buf; 1943 } 1944 } 1945 spin_unlock_irq(&phba->hbalock); 1946 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 1947 "1803 Bad hbq tag. Data: x%x x%x\n", 1948 tag, phba->hbqs[tag >> 16].buffer_count); 1949 return NULL; 1950 } 1951 1952 /** 1953 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 1954 * @phba: Pointer to HBA context object. 1955 * @hbq_buffer: Pointer to HBQ buffer. 1956 * 1957 * This function is called with hbalock. This function gives back 1958 * the hbq buffer to firmware. If the HBQ does not have space to 1959 * post the buffer, it will free the buffer. 1960 **/ 1961 void 1962 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 1963 { 1964 uint32_t hbqno; 1965 1966 if (hbq_buffer) { 1967 hbqno = hbq_buffer->tag >> 16; 1968 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 1969 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1970 } 1971 } 1972 1973 /** 1974 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 1975 * @mbxCommand: mailbox command code. 1976 * 1977 * This function is called by the mailbox event handler function to verify 1978 * that the completed mailbox command is a legitimate mailbox command. If the 1979 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 1980 * and the mailbox event handler will take the HBA offline. 1981 **/ 1982 static int 1983 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 1984 { 1985 uint8_t ret; 1986 1987 switch (mbxCommand) { 1988 case MBX_LOAD_SM: 1989 case MBX_READ_NV: 1990 case MBX_WRITE_NV: 1991 case MBX_WRITE_VPARMS: 1992 case MBX_RUN_BIU_DIAG: 1993 case MBX_INIT_LINK: 1994 case MBX_DOWN_LINK: 1995 case MBX_CONFIG_LINK: 1996 case MBX_CONFIG_RING: 1997 case MBX_RESET_RING: 1998 case MBX_READ_CONFIG: 1999 case MBX_READ_RCONFIG: 2000 case MBX_READ_SPARM: 2001 case MBX_READ_STATUS: 2002 case MBX_READ_RPI: 2003 case MBX_READ_XRI: 2004 case MBX_READ_REV: 2005 case MBX_READ_LNK_STAT: 2006 case MBX_REG_LOGIN: 2007 case MBX_UNREG_LOGIN: 2008 case MBX_CLEAR_LA: 2009 case MBX_DUMP_MEMORY: 2010 case MBX_DUMP_CONTEXT: 2011 case MBX_RUN_DIAGS: 2012 case MBX_RESTART: 2013 case MBX_UPDATE_CFG: 2014 case MBX_DOWN_LOAD: 2015 case MBX_DEL_LD_ENTRY: 2016 case MBX_RUN_PROGRAM: 2017 case MBX_SET_MASK: 2018 case MBX_SET_VARIABLE: 2019 case MBX_UNREG_D_ID: 2020 case MBX_KILL_BOARD: 2021 case MBX_CONFIG_FARP: 2022 case MBX_BEACON: 2023 case MBX_LOAD_AREA: 2024 case MBX_RUN_BIU_DIAG64: 2025 case MBX_CONFIG_PORT: 2026 case MBX_READ_SPARM64: 2027 case MBX_READ_RPI64: 2028 case MBX_REG_LOGIN64: 2029 case MBX_READ_TOPOLOGY: 2030 case MBX_WRITE_WWN: 2031 case MBX_SET_DEBUG: 2032 case MBX_LOAD_EXP_ROM: 2033 case MBX_ASYNCEVT_ENABLE: 2034 case MBX_REG_VPI: 2035 case MBX_UNREG_VPI: 2036 case MBX_HEARTBEAT: 2037 case MBX_PORT_CAPABILITIES: 2038 case MBX_PORT_IOV_CONTROL: 2039 case MBX_SLI4_CONFIG: 2040 case MBX_SLI4_REQ_FTRS: 2041 case MBX_REG_FCFI: 2042 case MBX_UNREG_FCFI: 2043 case MBX_REG_VFI: 2044 case MBX_UNREG_VFI: 2045 case MBX_INIT_VPI: 2046 case MBX_INIT_VFI: 2047 case MBX_RESUME_RPI: 2048 case MBX_READ_EVENT_LOG_STATUS: 2049 case MBX_READ_EVENT_LOG: 2050 case MBX_SECURITY_MGMT: 2051 case MBX_AUTH_PORT: 2052 ret = mbxCommand; 2053 break; 2054 default: 2055 ret = MBX_SHUTDOWN; 2056 break; 2057 } 2058 return ret; 2059 } 2060 2061 /** 2062 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2063 * @phba: Pointer to HBA context object. 2064 * @pmboxq: Pointer to mailbox command. 2065 * 2066 * This is completion handler function for mailbox commands issued from 2067 * lpfc_sli_issue_mbox_wait function. This function is called by the 2068 * mailbox event handler function with no lock held. This function 2069 * will wake up thread waiting on the wait queue pointed by context1 2070 * of the mailbox. 2071 **/ 2072 void 2073 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2074 { 2075 wait_queue_head_t *pdone_q; 2076 unsigned long drvr_flag; 2077 2078 /* 2079 * If pdone_q is empty, the driver thread gave up waiting and 2080 * continued running. 2081 */ 2082 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2083 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2084 pdone_q = (wait_queue_head_t *) pmboxq->context1; 2085 if (pdone_q) 2086 wake_up_interruptible(pdone_q); 2087 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2088 return; 2089 } 2090 2091 2092 /** 2093 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2094 * @phba: Pointer to HBA context object. 2095 * @pmb: Pointer to mailbox object. 2096 * 2097 * This function is the default mailbox completion handler. It 2098 * frees the memory resources associated with the completed mailbox 2099 * command. If the completed command is a REG_LOGIN mailbox command, 2100 * this function will issue a UREG_LOGIN to re-claim the RPI. 2101 **/ 2102 void 2103 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2104 { 2105 struct lpfc_vport *vport = pmb->vport; 2106 struct lpfc_dmabuf *mp; 2107 struct lpfc_nodelist *ndlp; 2108 struct Scsi_Host *shost; 2109 uint16_t rpi, vpi; 2110 int rc; 2111 2112 mp = (struct lpfc_dmabuf *) (pmb->context1); 2113 2114 if (mp) { 2115 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2116 kfree(mp); 2117 } 2118 2119 /* 2120 * If a REG_LOGIN succeeded after node is destroyed or node 2121 * is in re-discovery driver need to cleanup the RPI. 2122 */ 2123 if (!(phba->pport->load_flag & FC_UNLOADING) && 2124 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2125 !pmb->u.mb.mbxStatus) { 2126 rpi = pmb->u.mb.un.varWords[0]; 2127 vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base; 2128 lpfc_unreg_login(phba, vpi, rpi, pmb); 2129 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2130 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2131 if (rc != MBX_NOT_FINISHED) 2132 return; 2133 } 2134 2135 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2136 !(phba->pport->load_flag & FC_UNLOADING) && 2137 !pmb->u.mb.mbxStatus) { 2138 shost = lpfc_shost_from_vport(vport); 2139 spin_lock_irq(shost->host_lock); 2140 vport->vpi_state |= LPFC_VPI_REGISTERED; 2141 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2142 spin_unlock_irq(shost->host_lock); 2143 } 2144 2145 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2146 ndlp = (struct lpfc_nodelist *)pmb->context2; 2147 lpfc_nlp_put(ndlp); 2148 pmb->context2 = NULL; 2149 } 2150 2151 /* Check security permission status on INIT_LINK mailbox command */ 2152 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2153 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2154 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2155 "2860 SLI authentication is required " 2156 "for INIT_LINK but has not done yet\n"); 2157 2158 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2159 lpfc_sli4_mbox_cmd_free(phba, pmb); 2160 else 2161 mempool_free(pmb, phba->mbox_mem_pool); 2162 } 2163 2164 /** 2165 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2166 * @phba: Pointer to HBA context object. 2167 * 2168 * This function is called with no lock held. This function processes all 2169 * the completed mailbox commands and gives it to upper layers. The interrupt 2170 * service routine processes mailbox completion interrupt and adds completed 2171 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2172 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2173 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2174 * function returns the mailbox commands to the upper layer by calling the 2175 * completion handler function of each mailbox. 2176 **/ 2177 int 2178 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2179 { 2180 MAILBOX_t *pmbox; 2181 LPFC_MBOXQ_t *pmb; 2182 int rc; 2183 LIST_HEAD(cmplq); 2184 2185 phba->sli.slistat.mbox_event++; 2186 2187 /* Get all completed mailboxe buffers into the cmplq */ 2188 spin_lock_irq(&phba->hbalock); 2189 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2190 spin_unlock_irq(&phba->hbalock); 2191 2192 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2193 do { 2194 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2195 if (pmb == NULL) 2196 break; 2197 2198 pmbox = &pmb->u.mb; 2199 2200 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2201 if (pmb->vport) { 2202 lpfc_debugfs_disc_trc(pmb->vport, 2203 LPFC_DISC_TRC_MBOX_VPORT, 2204 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2205 (uint32_t)pmbox->mbxCommand, 2206 pmbox->un.varWords[0], 2207 pmbox->un.varWords[1]); 2208 } 2209 else { 2210 lpfc_debugfs_disc_trc(phba->pport, 2211 LPFC_DISC_TRC_MBOX, 2212 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2213 (uint32_t)pmbox->mbxCommand, 2214 pmbox->un.varWords[0], 2215 pmbox->un.varWords[1]); 2216 } 2217 } 2218 2219 /* 2220 * It is a fatal error if unknown mbox command completion. 2221 */ 2222 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2223 MBX_SHUTDOWN) { 2224 /* Unknown mailbox command compl */ 2225 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2226 "(%d):0323 Unknown Mailbox command " 2227 "x%x (x%x) Cmpl\n", 2228 pmb->vport ? pmb->vport->vpi : 0, 2229 pmbox->mbxCommand, 2230 lpfc_sli4_mbox_opcode_get(phba, pmb)); 2231 phba->link_state = LPFC_HBA_ERROR; 2232 phba->work_hs = HS_FFER3; 2233 lpfc_handle_eratt(phba); 2234 continue; 2235 } 2236 2237 if (pmbox->mbxStatus) { 2238 phba->sli.slistat.mbox_stat_err++; 2239 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2240 /* Mbox cmd cmpl error - RETRYing */ 2241 lpfc_printf_log(phba, KERN_INFO, 2242 LOG_MBOX | LOG_SLI, 2243 "(%d):0305 Mbox cmd cmpl " 2244 "error - RETRYing Data: x%x " 2245 "(x%x) x%x x%x x%x\n", 2246 pmb->vport ? pmb->vport->vpi :0, 2247 pmbox->mbxCommand, 2248 lpfc_sli4_mbox_opcode_get(phba, 2249 pmb), 2250 pmbox->mbxStatus, 2251 pmbox->un.varWords[0], 2252 pmb->vport->port_state); 2253 pmbox->mbxStatus = 0; 2254 pmbox->mbxOwner = OWN_HOST; 2255 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2256 if (rc != MBX_NOT_FINISHED) 2257 continue; 2258 } 2259 } 2260 2261 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2262 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2263 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p " 2264 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 2265 pmb->vport ? pmb->vport->vpi : 0, 2266 pmbox->mbxCommand, 2267 lpfc_sli4_mbox_opcode_get(phba, pmb), 2268 pmb->mbox_cmpl, 2269 *((uint32_t *) pmbox), 2270 pmbox->un.varWords[0], 2271 pmbox->un.varWords[1], 2272 pmbox->un.varWords[2], 2273 pmbox->un.varWords[3], 2274 pmbox->un.varWords[4], 2275 pmbox->un.varWords[5], 2276 pmbox->un.varWords[6], 2277 pmbox->un.varWords[7]); 2278 2279 if (pmb->mbox_cmpl) 2280 pmb->mbox_cmpl(phba,pmb); 2281 } while (1); 2282 return 0; 2283 } 2284 2285 /** 2286 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2287 * @phba: Pointer to HBA context object. 2288 * @pring: Pointer to driver SLI ring object. 2289 * @tag: buffer tag. 2290 * 2291 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2292 * is set in the tag the buffer is posted for a particular exchange, 2293 * the function will return the buffer without replacing the buffer. 2294 * If the buffer is for unsolicited ELS or CT traffic, this function 2295 * returns the buffer and also posts another buffer to the firmware. 2296 **/ 2297 static struct lpfc_dmabuf * 2298 lpfc_sli_get_buff(struct lpfc_hba *phba, 2299 struct lpfc_sli_ring *pring, 2300 uint32_t tag) 2301 { 2302 struct hbq_dmabuf *hbq_entry; 2303 2304 if (tag & QUE_BUFTAG_BIT) 2305 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2306 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2307 if (!hbq_entry) 2308 return NULL; 2309 return &hbq_entry->dbuf; 2310 } 2311 2312 /** 2313 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2314 * @phba: Pointer to HBA context object. 2315 * @pring: Pointer to driver SLI ring object. 2316 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2317 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2318 * @fch_type: the type for the first frame of the sequence. 2319 * 2320 * This function is called with no lock held. This function uses the r_ctl and 2321 * type of the received sequence to find the correct callback function to call 2322 * to process the sequence. 2323 **/ 2324 static int 2325 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2326 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2327 uint32_t fch_type) 2328 { 2329 int i; 2330 2331 /* unSolicited Responses */ 2332 if (pring->prt[0].profile) { 2333 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2334 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2335 saveq); 2336 return 1; 2337 } 2338 /* We must search, based on rctl / type 2339 for the right routine */ 2340 for (i = 0; i < pring->num_mask; i++) { 2341 if ((pring->prt[i].rctl == fch_r_ctl) && 2342 (pring->prt[i].type == fch_type)) { 2343 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2344 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2345 (phba, pring, saveq); 2346 return 1; 2347 } 2348 } 2349 return 0; 2350 } 2351 2352 /** 2353 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2354 * @phba: Pointer to HBA context object. 2355 * @pring: Pointer to driver SLI ring object. 2356 * @saveq: Pointer to the unsolicited iocb. 2357 * 2358 * This function is called with no lock held by the ring event handler 2359 * when there is an unsolicited iocb posted to the response ring by the 2360 * firmware. This function gets the buffer associated with the iocbs 2361 * and calls the event handler for the ring. This function handles both 2362 * qring buffers and hbq buffers. 2363 * When the function returns 1 the caller can free the iocb object otherwise 2364 * upper layer functions will free the iocb objects. 2365 **/ 2366 static int 2367 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2368 struct lpfc_iocbq *saveq) 2369 { 2370 IOCB_t * irsp; 2371 WORD5 * w5p; 2372 uint32_t Rctl, Type; 2373 uint32_t match; 2374 struct lpfc_iocbq *iocbq; 2375 struct lpfc_dmabuf *dmzbuf; 2376 2377 match = 0; 2378 irsp = &(saveq->iocb); 2379 2380 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2381 if (pring->lpfc_sli_rcv_async_status) 2382 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2383 else 2384 lpfc_printf_log(phba, 2385 KERN_WARNING, 2386 LOG_SLI, 2387 "0316 Ring %d handler: unexpected " 2388 "ASYNC_STATUS iocb received evt_code " 2389 "0x%x\n", 2390 pring->ringno, 2391 irsp->un.asyncstat.evt_code); 2392 return 1; 2393 } 2394 2395 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2396 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2397 if (irsp->ulpBdeCount > 0) { 2398 dmzbuf = lpfc_sli_get_buff(phba, pring, 2399 irsp->un.ulpWord[3]); 2400 lpfc_in_buf_free(phba, dmzbuf); 2401 } 2402 2403 if (irsp->ulpBdeCount > 1) { 2404 dmzbuf = lpfc_sli_get_buff(phba, pring, 2405 irsp->unsli3.sli3Words[3]); 2406 lpfc_in_buf_free(phba, dmzbuf); 2407 } 2408 2409 if (irsp->ulpBdeCount > 2) { 2410 dmzbuf = lpfc_sli_get_buff(phba, pring, 2411 irsp->unsli3.sli3Words[7]); 2412 lpfc_in_buf_free(phba, dmzbuf); 2413 } 2414 2415 return 1; 2416 } 2417 2418 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2419 if (irsp->ulpBdeCount != 0) { 2420 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2421 irsp->un.ulpWord[3]); 2422 if (!saveq->context2) 2423 lpfc_printf_log(phba, 2424 KERN_ERR, 2425 LOG_SLI, 2426 "0341 Ring %d Cannot find buffer for " 2427 "an unsolicited iocb. tag 0x%x\n", 2428 pring->ringno, 2429 irsp->un.ulpWord[3]); 2430 } 2431 if (irsp->ulpBdeCount == 2) { 2432 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2433 irsp->unsli3.sli3Words[7]); 2434 if (!saveq->context3) 2435 lpfc_printf_log(phba, 2436 KERN_ERR, 2437 LOG_SLI, 2438 "0342 Ring %d Cannot find buffer for an" 2439 " unsolicited iocb. tag 0x%x\n", 2440 pring->ringno, 2441 irsp->unsli3.sli3Words[7]); 2442 } 2443 list_for_each_entry(iocbq, &saveq->list, list) { 2444 irsp = &(iocbq->iocb); 2445 if (irsp->ulpBdeCount != 0) { 2446 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2447 irsp->un.ulpWord[3]); 2448 if (!iocbq->context2) 2449 lpfc_printf_log(phba, 2450 KERN_ERR, 2451 LOG_SLI, 2452 "0343 Ring %d Cannot find " 2453 "buffer for an unsolicited iocb" 2454 ". tag 0x%x\n", pring->ringno, 2455 irsp->un.ulpWord[3]); 2456 } 2457 if (irsp->ulpBdeCount == 2) { 2458 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2459 irsp->unsli3.sli3Words[7]); 2460 if (!iocbq->context3) 2461 lpfc_printf_log(phba, 2462 KERN_ERR, 2463 LOG_SLI, 2464 "0344 Ring %d Cannot find " 2465 "buffer for an unsolicited " 2466 "iocb. tag 0x%x\n", 2467 pring->ringno, 2468 irsp->unsli3.sli3Words[7]); 2469 } 2470 } 2471 } 2472 if (irsp->ulpBdeCount != 0 && 2473 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2474 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2475 int found = 0; 2476 2477 /* search continue save q for same XRI */ 2478 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2479 if (iocbq->iocb.ulpContext == saveq->iocb.ulpContext) { 2480 list_add_tail(&saveq->list, &iocbq->list); 2481 found = 1; 2482 break; 2483 } 2484 } 2485 if (!found) 2486 list_add_tail(&saveq->clist, 2487 &pring->iocb_continue_saveq); 2488 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2489 list_del_init(&iocbq->clist); 2490 saveq = iocbq; 2491 irsp = &(saveq->iocb); 2492 } else 2493 return 0; 2494 } 2495 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2496 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2497 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2498 Rctl = FC_RCTL_ELS_REQ; 2499 Type = FC_TYPE_ELS; 2500 } else { 2501 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2502 Rctl = w5p->hcsw.Rctl; 2503 Type = w5p->hcsw.Type; 2504 2505 /* Firmware Workaround */ 2506 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2507 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2508 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2509 Rctl = FC_RCTL_ELS_REQ; 2510 Type = FC_TYPE_ELS; 2511 w5p->hcsw.Rctl = Rctl; 2512 w5p->hcsw.Type = Type; 2513 } 2514 } 2515 2516 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2517 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2518 "0313 Ring %d handler: unexpected Rctl x%x " 2519 "Type x%x received\n", 2520 pring->ringno, Rctl, Type); 2521 2522 return 1; 2523 } 2524 2525 /** 2526 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2527 * @phba: Pointer to HBA context object. 2528 * @pring: Pointer to driver SLI ring object. 2529 * @prspiocb: Pointer to response iocb object. 2530 * 2531 * This function looks up the iocb_lookup table to get the command iocb 2532 * corresponding to the given response iocb using the iotag of the 2533 * response iocb. This function is called with the hbalock held. 2534 * This function returns the command iocb object if it finds the command 2535 * iocb else returns NULL. 2536 **/ 2537 static struct lpfc_iocbq * 2538 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2539 struct lpfc_sli_ring *pring, 2540 struct lpfc_iocbq *prspiocb) 2541 { 2542 struct lpfc_iocbq *cmd_iocb = NULL; 2543 uint16_t iotag; 2544 2545 iotag = prspiocb->iocb.ulpIoTag; 2546 2547 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2548 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2549 list_del_init(&cmd_iocb->list); 2550 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2551 pring->txcmplq_cnt--; 2552 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2553 } 2554 return cmd_iocb; 2555 } 2556 2557 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2558 "0317 iotag x%x is out off " 2559 "range: max iotag x%x wd0 x%x\n", 2560 iotag, phba->sli.last_iotag, 2561 *(((uint32_t *) &prspiocb->iocb) + 7)); 2562 return NULL; 2563 } 2564 2565 /** 2566 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2567 * @phba: Pointer to HBA context object. 2568 * @pring: Pointer to driver SLI ring object. 2569 * @iotag: IOCB tag. 2570 * 2571 * This function looks up the iocb_lookup table to get the command iocb 2572 * corresponding to the given iotag. This function is called with the 2573 * hbalock held. 2574 * This function returns the command iocb object if it finds the command 2575 * iocb else returns NULL. 2576 **/ 2577 static struct lpfc_iocbq * 2578 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2579 struct lpfc_sli_ring *pring, uint16_t iotag) 2580 { 2581 struct lpfc_iocbq *cmd_iocb; 2582 2583 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2584 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2585 list_del_init(&cmd_iocb->list); 2586 if (cmd_iocb->iocb_flag & LPFC_IO_ON_Q) { 2587 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 2588 pring->txcmplq_cnt--; 2589 } 2590 return cmd_iocb; 2591 } 2592 2593 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2594 "0372 iotag x%x is out off range: max iotag (x%x)\n", 2595 iotag, phba->sli.last_iotag); 2596 return NULL; 2597 } 2598 2599 /** 2600 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2601 * @phba: Pointer to HBA context object. 2602 * @pring: Pointer to driver SLI ring object. 2603 * @saveq: Pointer to the response iocb to be processed. 2604 * 2605 * This function is called by the ring event handler for non-fcp 2606 * rings when there is a new response iocb in the response ring. 2607 * The caller is not required to hold any locks. This function 2608 * gets the command iocb associated with the response iocb and 2609 * calls the completion handler for the command iocb. If there 2610 * is no completion handler, the function will free the resources 2611 * associated with command iocb. If the response iocb is for 2612 * an already aborted command iocb, the status of the completion 2613 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 2614 * This function always returns 1. 2615 **/ 2616 static int 2617 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2618 struct lpfc_iocbq *saveq) 2619 { 2620 struct lpfc_iocbq *cmdiocbp; 2621 int rc = 1; 2622 unsigned long iflag; 2623 2624 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 2625 spin_lock_irqsave(&phba->hbalock, iflag); 2626 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 2627 spin_unlock_irqrestore(&phba->hbalock, iflag); 2628 2629 if (cmdiocbp) { 2630 if (cmdiocbp->iocb_cmpl) { 2631 /* 2632 * If an ELS command failed send an event to mgmt 2633 * application. 2634 */ 2635 if (saveq->iocb.ulpStatus && 2636 (pring->ringno == LPFC_ELS_RING) && 2637 (cmdiocbp->iocb.ulpCommand == 2638 CMD_ELS_REQUEST64_CR)) 2639 lpfc_send_els_failure_event(phba, 2640 cmdiocbp, saveq); 2641 2642 /* 2643 * Post all ELS completions to the worker thread. 2644 * All other are passed to the completion callback. 2645 */ 2646 if (pring->ringno == LPFC_ELS_RING) { 2647 if ((phba->sli_rev < LPFC_SLI_REV4) && 2648 (cmdiocbp->iocb_flag & 2649 LPFC_DRIVER_ABORTED)) { 2650 spin_lock_irqsave(&phba->hbalock, 2651 iflag); 2652 cmdiocbp->iocb_flag &= 2653 ~LPFC_DRIVER_ABORTED; 2654 spin_unlock_irqrestore(&phba->hbalock, 2655 iflag); 2656 saveq->iocb.ulpStatus = 2657 IOSTAT_LOCAL_REJECT; 2658 saveq->iocb.un.ulpWord[4] = 2659 IOERR_SLI_ABORTED; 2660 2661 /* Firmware could still be in progress 2662 * of DMAing payload, so don't free data 2663 * buffer till after a hbeat. 2664 */ 2665 spin_lock_irqsave(&phba->hbalock, 2666 iflag); 2667 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2668 spin_unlock_irqrestore(&phba->hbalock, 2669 iflag); 2670 } 2671 if (phba->sli_rev == LPFC_SLI_REV4) { 2672 if (saveq->iocb_flag & 2673 LPFC_EXCHANGE_BUSY) { 2674 /* Set cmdiocb flag for the 2675 * exchange busy so sgl (xri) 2676 * will not be released until 2677 * the abort xri is received 2678 * from hba. 2679 */ 2680 spin_lock_irqsave( 2681 &phba->hbalock, iflag); 2682 cmdiocbp->iocb_flag |= 2683 LPFC_EXCHANGE_BUSY; 2684 spin_unlock_irqrestore( 2685 &phba->hbalock, iflag); 2686 } 2687 if (cmdiocbp->iocb_flag & 2688 LPFC_DRIVER_ABORTED) { 2689 /* 2690 * Clear LPFC_DRIVER_ABORTED 2691 * bit in case it was driver 2692 * initiated abort. 2693 */ 2694 spin_lock_irqsave( 2695 &phba->hbalock, iflag); 2696 cmdiocbp->iocb_flag &= 2697 ~LPFC_DRIVER_ABORTED; 2698 spin_unlock_irqrestore( 2699 &phba->hbalock, iflag); 2700 cmdiocbp->iocb.ulpStatus = 2701 IOSTAT_LOCAL_REJECT; 2702 cmdiocbp->iocb.un.ulpWord[4] = 2703 IOERR_ABORT_REQUESTED; 2704 /* 2705 * For SLI4, irsiocb contains 2706 * NO_XRI in sli_xritag, it 2707 * shall not affect releasing 2708 * sgl (xri) process. 2709 */ 2710 saveq->iocb.ulpStatus = 2711 IOSTAT_LOCAL_REJECT; 2712 saveq->iocb.un.ulpWord[4] = 2713 IOERR_SLI_ABORTED; 2714 spin_lock_irqsave( 2715 &phba->hbalock, iflag); 2716 saveq->iocb_flag |= 2717 LPFC_DELAY_MEM_FREE; 2718 spin_unlock_irqrestore( 2719 &phba->hbalock, iflag); 2720 } 2721 } 2722 } 2723 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2724 } else 2725 lpfc_sli_release_iocbq(phba, cmdiocbp); 2726 } else { 2727 /* 2728 * Unknown initiating command based on the response iotag. 2729 * This could be the case on the ELS ring because of 2730 * lpfc_els_abort(). 2731 */ 2732 if (pring->ringno != LPFC_ELS_RING) { 2733 /* 2734 * Ring <ringno> handler: unexpected completion IoTag 2735 * <IoTag> 2736 */ 2737 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2738 "0322 Ring %d handler: " 2739 "unexpected completion IoTag x%x " 2740 "Data: x%x x%x x%x x%x\n", 2741 pring->ringno, 2742 saveq->iocb.ulpIoTag, 2743 saveq->iocb.ulpStatus, 2744 saveq->iocb.un.ulpWord[4], 2745 saveq->iocb.ulpCommand, 2746 saveq->iocb.ulpContext); 2747 } 2748 } 2749 2750 return rc; 2751 } 2752 2753 /** 2754 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 2755 * @phba: Pointer to HBA context object. 2756 * @pring: Pointer to driver SLI ring object. 2757 * 2758 * This function is called from the iocb ring event handlers when 2759 * put pointer is ahead of the get pointer for a ring. This function signal 2760 * an error attention condition to the worker thread and the worker 2761 * thread will transition the HBA to offline state. 2762 **/ 2763 static void 2764 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2765 { 2766 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2767 /* 2768 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2769 * rsp ring <portRspMax> 2770 */ 2771 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2772 "0312 Ring %d handler: portRspPut %d " 2773 "is bigger than rsp ring %d\n", 2774 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2775 pring->numRiocb); 2776 2777 phba->link_state = LPFC_HBA_ERROR; 2778 2779 /* 2780 * All error attention handlers are posted to 2781 * worker thread 2782 */ 2783 phba->work_ha |= HA_ERATT; 2784 phba->work_hs = HS_FFER3; 2785 2786 lpfc_worker_wake_up(phba); 2787 2788 return; 2789 } 2790 2791 /** 2792 * lpfc_poll_eratt - Error attention polling timer timeout handler 2793 * @ptr: Pointer to address of HBA context object. 2794 * 2795 * This function is invoked by the Error Attention polling timer when the 2796 * timer times out. It will check the SLI Error Attention register for 2797 * possible attention events. If so, it will post an Error Attention event 2798 * and wake up worker thread to process it. Otherwise, it will set up the 2799 * Error Attention polling timer for the next poll. 2800 **/ 2801 void lpfc_poll_eratt(unsigned long ptr) 2802 { 2803 struct lpfc_hba *phba; 2804 uint32_t eratt = 0; 2805 2806 phba = (struct lpfc_hba *)ptr; 2807 2808 /* Check chip HA register for error event */ 2809 eratt = lpfc_sli_check_eratt(phba); 2810 2811 if (eratt) 2812 /* Tell the worker thread there is work to do */ 2813 lpfc_worker_wake_up(phba); 2814 else 2815 /* Restart the timer for next eratt poll */ 2816 mod_timer(&phba->eratt_poll, jiffies + 2817 HZ * LPFC_ERATT_POLL_INTERVAL); 2818 return; 2819 } 2820 2821 2822 /** 2823 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2824 * @phba: Pointer to HBA context object. 2825 * @pring: Pointer to driver SLI ring object. 2826 * @mask: Host attention register mask for this ring. 2827 * 2828 * This function is called from the interrupt context when there is a ring 2829 * event for the fcp ring. The caller does not hold any lock. 2830 * The function processes each response iocb in the response ring until it 2831 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with 2832 * LE bit set. The function will call the completion handler of the command iocb 2833 * if the response iocb indicates a completion for a command iocb or it is 2834 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2835 * function if this is an unsolicited iocb. 2836 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2837 * to check it explicitly. 2838 */ 2839 int 2840 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2841 struct lpfc_sli_ring *pring, uint32_t mask) 2842 { 2843 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2844 IOCB_t *irsp = NULL; 2845 IOCB_t *entry = NULL; 2846 struct lpfc_iocbq *cmdiocbq = NULL; 2847 struct lpfc_iocbq rspiocbq; 2848 uint32_t status; 2849 uint32_t portRspPut, portRspMax; 2850 int rc = 1; 2851 lpfc_iocb_type type; 2852 unsigned long iflag; 2853 uint32_t rsp_cmpl = 0; 2854 2855 spin_lock_irqsave(&phba->hbalock, iflag); 2856 pring->stats.iocb_event++; 2857 2858 /* 2859 * The next available response entry should never exceed the maximum 2860 * entries. If it does, treat it as an adapter hardware error. 2861 */ 2862 portRspMax = pring->numRiocb; 2863 portRspPut = le32_to_cpu(pgp->rspPutInx); 2864 if (unlikely(portRspPut >= portRspMax)) { 2865 lpfc_sli_rsp_pointers_error(phba, pring); 2866 spin_unlock_irqrestore(&phba->hbalock, iflag); 2867 return 1; 2868 } 2869 if (phba->fcp_ring_in_use) { 2870 spin_unlock_irqrestore(&phba->hbalock, iflag); 2871 return 1; 2872 } else 2873 phba->fcp_ring_in_use = 1; 2874 2875 rmb(); 2876 while (pring->rspidx != portRspPut) { 2877 /* 2878 * Fetch an entry off the ring and copy it into a local data 2879 * structure. The copy involves a byte-swap since the 2880 * network byte order and pci byte orders are different. 2881 */ 2882 entry = lpfc_resp_iocb(phba, pring); 2883 phba->last_completion_time = jiffies; 2884 2885 if (++pring->rspidx >= portRspMax) 2886 pring->rspidx = 0; 2887 2888 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 2889 (uint32_t *) &rspiocbq.iocb, 2890 phba->iocb_rsp_size); 2891 INIT_LIST_HEAD(&(rspiocbq.list)); 2892 irsp = &rspiocbq.iocb; 2893 2894 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 2895 pring->stats.iocb_rsp++; 2896 rsp_cmpl++; 2897 2898 if (unlikely(irsp->ulpStatus)) { 2899 /* 2900 * If resource errors reported from HBA, reduce 2901 * queuedepths of the SCSI device. 2902 */ 2903 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 2904 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 2905 spin_unlock_irqrestore(&phba->hbalock, iflag); 2906 phba->lpfc_rampdown_queue_depth(phba); 2907 spin_lock_irqsave(&phba->hbalock, iflag); 2908 } 2909 2910 /* Rsp ring <ringno> error: IOCB */ 2911 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2912 "0336 Rsp Ring %d error: IOCB Data: " 2913 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 2914 pring->ringno, 2915 irsp->un.ulpWord[0], 2916 irsp->un.ulpWord[1], 2917 irsp->un.ulpWord[2], 2918 irsp->un.ulpWord[3], 2919 irsp->un.ulpWord[4], 2920 irsp->un.ulpWord[5], 2921 *(uint32_t *)&irsp->un1, 2922 *((uint32_t *)&irsp->un1 + 1)); 2923 } 2924 2925 switch (type) { 2926 case LPFC_ABORT_IOCB: 2927 case LPFC_SOL_IOCB: 2928 /* 2929 * Idle exchange closed via ABTS from port. No iocb 2930 * resources need to be recovered. 2931 */ 2932 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 2933 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 2934 "0333 IOCB cmd 0x%x" 2935 " processed. Skipping" 2936 " completion\n", 2937 irsp->ulpCommand); 2938 break; 2939 } 2940 2941 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 2942 &rspiocbq); 2943 if (unlikely(!cmdiocbq)) 2944 break; 2945 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 2946 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 2947 if (cmdiocbq->iocb_cmpl) { 2948 spin_unlock_irqrestore(&phba->hbalock, iflag); 2949 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 2950 &rspiocbq); 2951 spin_lock_irqsave(&phba->hbalock, iflag); 2952 } 2953 break; 2954 case LPFC_UNSOL_IOCB: 2955 spin_unlock_irqrestore(&phba->hbalock, iflag); 2956 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 2957 spin_lock_irqsave(&phba->hbalock, iflag); 2958 break; 2959 default: 2960 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 2961 char adaptermsg[LPFC_MAX_ADPTMSG]; 2962 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 2963 memcpy(&adaptermsg[0], (uint8_t *) irsp, 2964 MAX_MSG_DATA); 2965 dev_warn(&((phba->pcidev)->dev), 2966 "lpfc%d: %s\n", 2967 phba->brd_no, adaptermsg); 2968 } else { 2969 /* Unknown IOCB command */ 2970 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2971 "0334 Unknown IOCB command " 2972 "Data: x%x, x%x x%x x%x x%x\n", 2973 type, irsp->ulpCommand, 2974 irsp->ulpStatus, 2975 irsp->ulpIoTag, 2976 irsp->ulpContext); 2977 } 2978 break; 2979 } 2980 2981 /* 2982 * The response IOCB has been processed. Update the ring 2983 * pointer in SLIM. If the port response put pointer has not 2984 * been updated, sync the pgp->rspPutInx and fetch the new port 2985 * response put pointer. 2986 */ 2987 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 2988 2989 if (pring->rspidx == portRspPut) 2990 portRspPut = le32_to_cpu(pgp->rspPutInx); 2991 } 2992 2993 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 2994 pring->stats.iocb_rsp_full++; 2995 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 2996 writel(status, phba->CAregaddr); 2997 readl(phba->CAregaddr); 2998 } 2999 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3000 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3001 pring->stats.iocb_cmd_empty++; 3002 3003 /* Force update of the local copy of cmdGetInx */ 3004 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3005 lpfc_sli_resume_iocb(phba, pring); 3006 3007 if ((pring->lpfc_sli_cmd_available)) 3008 (pring->lpfc_sli_cmd_available) (phba, pring); 3009 3010 } 3011 3012 phba->fcp_ring_in_use = 0; 3013 spin_unlock_irqrestore(&phba->hbalock, iflag); 3014 return rc; 3015 } 3016 3017 /** 3018 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3019 * @phba: Pointer to HBA context object. 3020 * @pring: Pointer to driver SLI ring object. 3021 * @rspiocbp: Pointer to driver response IOCB object. 3022 * 3023 * This function is called from the worker thread when there is a slow-path 3024 * response IOCB to process. This function chains all the response iocbs until 3025 * seeing the iocb with the LE bit set. The function will call 3026 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3027 * completion of a command iocb. The function will call the 3028 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3029 * The function frees the resources or calls the completion handler if this 3030 * iocb is an abort completion. The function returns NULL when the response 3031 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3032 * this function shall chain the iocb on to the iocb_continueq and return the 3033 * response iocb passed in. 3034 **/ 3035 static struct lpfc_iocbq * 3036 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3037 struct lpfc_iocbq *rspiocbp) 3038 { 3039 struct lpfc_iocbq *saveq; 3040 struct lpfc_iocbq *cmdiocbp; 3041 struct lpfc_iocbq *next_iocb; 3042 IOCB_t *irsp = NULL; 3043 uint32_t free_saveq; 3044 uint8_t iocb_cmd_type; 3045 lpfc_iocb_type type; 3046 unsigned long iflag; 3047 int rc; 3048 3049 spin_lock_irqsave(&phba->hbalock, iflag); 3050 /* First add the response iocb to the countinueq list */ 3051 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3052 pring->iocb_continueq_cnt++; 3053 3054 /* Now, determine whetehr the list is completed for processing */ 3055 irsp = &rspiocbp->iocb; 3056 if (irsp->ulpLe) { 3057 /* 3058 * By default, the driver expects to free all resources 3059 * associated with this iocb completion. 3060 */ 3061 free_saveq = 1; 3062 saveq = list_get_first(&pring->iocb_continueq, 3063 struct lpfc_iocbq, list); 3064 irsp = &(saveq->iocb); 3065 list_del_init(&pring->iocb_continueq); 3066 pring->iocb_continueq_cnt = 0; 3067 3068 pring->stats.iocb_rsp++; 3069 3070 /* 3071 * If resource errors reported from HBA, reduce 3072 * queuedepths of the SCSI device. 3073 */ 3074 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3075 (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { 3076 spin_unlock_irqrestore(&phba->hbalock, iflag); 3077 phba->lpfc_rampdown_queue_depth(phba); 3078 spin_lock_irqsave(&phba->hbalock, iflag); 3079 } 3080 3081 if (irsp->ulpStatus) { 3082 /* Rsp ring <ringno> error: IOCB */ 3083 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3084 "0328 Rsp Ring %d error: " 3085 "IOCB Data: " 3086 "x%x x%x x%x x%x " 3087 "x%x x%x x%x x%x " 3088 "x%x x%x x%x x%x " 3089 "x%x x%x x%x x%x\n", 3090 pring->ringno, 3091 irsp->un.ulpWord[0], 3092 irsp->un.ulpWord[1], 3093 irsp->un.ulpWord[2], 3094 irsp->un.ulpWord[3], 3095 irsp->un.ulpWord[4], 3096 irsp->un.ulpWord[5], 3097 *(((uint32_t *) irsp) + 6), 3098 *(((uint32_t *) irsp) + 7), 3099 *(((uint32_t *) irsp) + 8), 3100 *(((uint32_t *) irsp) + 9), 3101 *(((uint32_t *) irsp) + 10), 3102 *(((uint32_t *) irsp) + 11), 3103 *(((uint32_t *) irsp) + 12), 3104 *(((uint32_t *) irsp) + 13), 3105 *(((uint32_t *) irsp) + 14), 3106 *(((uint32_t *) irsp) + 15)); 3107 } 3108 3109 /* 3110 * Fetch the IOCB command type and call the correct completion 3111 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3112 * get freed back to the lpfc_iocb_list by the discovery 3113 * kernel thread. 3114 */ 3115 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3116 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3117 switch (type) { 3118 case LPFC_SOL_IOCB: 3119 spin_unlock_irqrestore(&phba->hbalock, iflag); 3120 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3121 spin_lock_irqsave(&phba->hbalock, iflag); 3122 break; 3123 3124 case LPFC_UNSOL_IOCB: 3125 spin_unlock_irqrestore(&phba->hbalock, iflag); 3126 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3127 spin_lock_irqsave(&phba->hbalock, iflag); 3128 if (!rc) 3129 free_saveq = 0; 3130 break; 3131 3132 case LPFC_ABORT_IOCB: 3133 cmdiocbp = NULL; 3134 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3135 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3136 saveq); 3137 if (cmdiocbp) { 3138 /* Call the specified completion routine */ 3139 if (cmdiocbp->iocb_cmpl) { 3140 spin_unlock_irqrestore(&phba->hbalock, 3141 iflag); 3142 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3143 saveq); 3144 spin_lock_irqsave(&phba->hbalock, 3145 iflag); 3146 } else 3147 __lpfc_sli_release_iocbq(phba, 3148 cmdiocbp); 3149 } 3150 break; 3151 3152 case LPFC_UNKNOWN_IOCB: 3153 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3154 char adaptermsg[LPFC_MAX_ADPTMSG]; 3155 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3156 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3157 MAX_MSG_DATA); 3158 dev_warn(&((phba->pcidev)->dev), 3159 "lpfc%d: %s\n", 3160 phba->brd_no, adaptermsg); 3161 } else { 3162 /* Unknown IOCB command */ 3163 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3164 "0335 Unknown IOCB " 3165 "command Data: x%x " 3166 "x%x x%x x%x\n", 3167 irsp->ulpCommand, 3168 irsp->ulpStatus, 3169 irsp->ulpIoTag, 3170 irsp->ulpContext); 3171 } 3172 break; 3173 } 3174 3175 if (free_saveq) { 3176 list_for_each_entry_safe(rspiocbp, next_iocb, 3177 &saveq->list, list) { 3178 list_del(&rspiocbp->list); 3179 __lpfc_sli_release_iocbq(phba, rspiocbp); 3180 } 3181 __lpfc_sli_release_iocbq(phba, saveq); 3182 } 3183 rspiocbp = NULL; 3184 } 3185 spin_unlock_irqrestore(&phba->hbalock, iflag); 3186 return rspiocbp; 3187 } 3188 3189 /** 3190 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3191 * @phba: Pointer to HBA context object. 3192 * @pring: Pointer to driver SLI ring object. 3193 * @mask: Host attention register mask for this ring. 3194 * 3195 * This routine wraps the actual slow_ring event process routine from the 3196 * API jump table function pointer from the lpfc_hba struct. 3197 **/ 3198 void 3199 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3200 struct lpfc_sli_ring *pring, uint32_t mask) 3201 { 3202 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3203 } 3204 3205 /** 3206 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3207 * @phba: Pointer to HBA context object. 3208 * @pring: Pointer to driver SLI ring object. 3209 * @mask: Host attention register mask for this ring. 3210 * 3211 * This function is called from the worker thread when there is a ring event 3212 * for non-fcp rings. The caller does not hold any lock. The function will 3213 * remove each response iocb in the response ring and calls the handle 3214 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3215 **/ 3216 static void 3217 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3218 struct lpfc_sli_ring *pring, uint32_t mask) 3219 { 3220 struct lpfc_pgp *pgp; 3221 IOCB_t *entry; 3222 IOCB_t *irsp = NULL; 3223 struct lpfc_iocbq *rspiocbp = NULL; 3224 uint32_t portRspPut, portRspMax; 3225 unsigned long iflag; 3226 uint32_t status; 3227 3228 pgp = &phba->port_gp[pring->ringno]; 3229 spin_lock_irqsave(&phba->hbalock, iflag); 3230 pring->stats.iocb_event++; 3231 3232 /* 3233 * The next available response entry should never exceed the maximum 3234 * entries. If it does, treat it as an adapter hardware error. 3235 */ 3236 portRspMax = pring->numRiocb; 3237 portRspPut = le32_to_cpu(pgp->rspPutInx); 3238 if (portRspPut >= portRspMax) { 3239 /* 3240 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3241 * rsp ring <portRspMax> 3242 */ 3243 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3244 "0303 Ring %d handler: portRspPut %d " 3245 "is bigger than rsp ring %d\n", 3246 pring->ringno, portRspPut, portRspMax); 3247 3248 phba->link_state = LPFC_HBA_ERROR; 3249 spin_unlock_irqrestore(&phba->hbalock, iflag); 3250 3251 phba->work_hs = HS_FFER3; 3252 lpfc_handle_eratt(phba); 3253 3254 return; 3255 } 3256 3257 rmb(); 3258 while (pring->rspidx != portRspPut) { 3259 /* 3260 * Build a completion list and call the appropriate handler. 3261 * The process is to get the next available response iocb, get 3262 * a free iocb from the list, copy the response data into the 3263 * free iocb, insert to the continuation list, and update the 3264 * next response index to slim. This process makes response 3265 * iocb's in the ring available to DMA as fast as possible but 3266 * pays a penalty for a copy operation. Since the iocb is 3267 * only 32 bytes, this penalty is considered small relative to 3268 * the PCI reads for register values and a slim write. When 3269 * the ulpLe field is set, the entire Command has been 3270 * received. 3271 */ 3272 entry = lpfc_resp_iocb(phba, pring); 3273 3274 phba->last_completion_time = jiffies; 3275 rspiocbp = __lpfc_sli_get_iocbq(phba); 3276 if (rspiocbp == NULL) { 3277 printk(KERN_ERR "%s: out of buffers! Failing " 3278 "completion.\n", __func__); 3279 break; 3280 } 3281 3282 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3283 phba->iocb_rsp_size); 3284 irsp = &rspiocbp->iocb; 3285 3286 if (++pring->rspidx >= portRspMax) 3287 pring->rspidx = 0; 3288 3289 if (pring->ringno == LPFC_ELS_RING) { 3290 lpfc_debugfs_slow_ring_trc(phba, 3291 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3292 *(((uint32_t *) irsp) + 4), 3293 *(((uint32_t *) irsp) + 6), 3294 *(((uint32_t *) irsp) + 7)); 3295 } 3296 3297 writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); 3298 3299 spin_unlock_irqrestore(&phba->hbalock, iflag); 3300 /* Handle the response IOCB */ 3301 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3302 spin_lock_irqsave(&phba->hbalock, iflag); 3303 3304 /* 3305 * If the port response put pointer has not been updated, sync 3306 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3307 * response put pointer. 3308 */ 3309 if (pring->rspidx == portRspPut) { 3310 portRspPut = le32_to_cpu(pgp->rspPutInx); 3311 } 3312 } /* while (pring->rspidx != portRspPut) */ 3313 3314 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3315 /* At least one response entry has been freed */ 3316 pring->stats.iocb_rsp_full++; 3317 /* SET RxRE_RSP in Chip Att register */ 3318 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3319 writel(status, phba->CAregaddr); 3320 readl(phba->CAregaddr); /* flush */ 3321 } 3322 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3323 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3324 pring->stats.iocb_cmd_empty++; 3325 3326 /* Force update of the local copy of cmdGetInx */ 3327 pring->local_getidx = le32_to_cpu(pgp->cmdGetInx); 3328 lpfc_sli_resume_iocb(phba, pring); 3329 3330 if ((pring->lpfc_sli_cmd_available)) 3331 (pring->lpfc_sli_cmd_available) (phba, pring); 3332 3333 } 3334 3335 spin_unlock_irqrestore(&phba->hbalock, iflag); 3336 return; 3337 } 3338 3339 /** 3340 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3341 * @phba: Pointer to HBA context object. 3342 * @pring: Pointer to driver SLI ring object. 3343 * @mask: Host attention register mask for this ring. 3344 * 3345 * This function is called from the worker thread when there is a pending 3346 * ELS response iocb on the driver internal slow-path response iocb worker 3347 * queue. The caller does not hold any lock. The function will remove each 3348 * response iocb from the response worker queue and calls the handle 3349 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3350 **/ 3351 static void 3352 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3353 struct lpfc_sli_ring *pring, uint32_t mask) 3354 { 3355 struct lpfc_iocbq *irspiocbq; 3356 struct hbq_dmabuf *dmabuf; 3357 struct lpfc_cq_event *cq_event; 3358 unsigned long iflag; 3359 3360 spin_lock_irqsave(&phba->hbalock, iflag); 3361 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3362 spin_unlock_irqrestore(&phba->hbalock, iflag); 3363 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3364 /* Get the response iocb from the head of work queue */ 3365 spin_lock_irqsave(&phba->hbalock, iflag); 3366 list_remove_head(&phba->sli4_hba.sp_queue_event, 3367 cq_event, struct lpfc_cq_event, list); 3368 spin_unlock_irqrestore(&phba->hbalock, iflag); 3369 3370 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3371 case CQE_CODE_COMPL_WQE: 3372 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3373 cq_event); 3374 /* Translate ELS WCQE to response IOCBQ */ 3375 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3376 irspiocbq); 3377 if (irspiocbq) 3378 lpfc_sli_sp_handle_rspiocb(phba, pring, 3379 irspiocbq); 3380 break; 3381 case CQE_CODE_RECEIVE: 3382 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3383 cq_event); 3384 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3385 break; 3386 default: 3387 break; 3388 } 3389 } 3390 } 3391 3392 /** 3393 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3394 * @phba: Pointer to HBA context object. 3395 * @pring: Pointer to driver SLI ring object. 3396 * 3397 * This function aborts all iocbs in the given ring and frees all the iocb 3398 * objects in txq. This function issues an abort iocb for all the iocb commands 3399 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3400 * the return of this function. The caller is not required to hold any locks. 3401 **/ 3402 void 3403 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3404 { 3405 LIST_HEAD(completions); 3406 struct lpfc_iocbq *iocb, *next_iocb; 3407 3408 if (pring->ringno == LPFC_ELS_RING) { 3409 lpfc_fabric_abort_hba(phba); 3410 } 3411 3412 /* Error everything on txq and txcmplq 3413 * First do the txq. 3414 */ 3415 spin_lock_irq(&phba->hbalock); 3416 list_splice_init(&pring->txq, &completions); 3417 pring->txq_cnt = 0; 3418 3419 /* Next issue ABTS for everything on the txcmplq */ 3420 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3421 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3422 3423 spin_unlock_irq(&phba->hbalock); 3424 3425 /* Cancel all the IOCBs from the completions list */ 3426 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3427 IOERR_SLI_ABORTED); 3428 } 3429 3430 /** 3431 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3432 * @phba: Pointer to HBA context object. 3433 * 3434 * This function flushes all iocbs in the fcp ring and frees all the iocb 3435 * objects in txq and txcmplq. This function will not issue abort iocbs 3436 * for all the iocb commands in txcmplq, they will just be returned with 3437 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3438 * slot has been permanently disabled. 3439 **/ 3440 void 3441 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3442 { 3443 LIST_HEAD(txq); 3444 LIST_HEAD(txcmplq); 3445 struct lpfc_sli *psli = &phba->sli; 3446 struct lpfc_sli_ring *pring; 3447 3448 /* Currently, only one fcp ring */ 3449 pring = &psli->ring[psli->fcp_ring]; 3450 3451 spin_lock_irq(&phba->hbalock); 3452 /* Retrieve everything on txq */ 3453 list_splice_init(&pring->txq, &txq); 3454 pring->txq_cnt = 0; 3455 3456 /* Retrieve everything on the txcmplq */ 3457 list_splice_init(&pring->txcmplq, &txcmplq); 3458 pring->txcmplq_cnt = 0; 3459 spin_unlock_irq(&phba->hbalock); 3460 3461 /* Flush the txq */ 3462 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3463 IOERR_SLI_DOWN); 3464 3465 /* Flush the txcmpq */ 3466 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3467 IOERR_SLI_DOWN); 3468 } 3469 3470 /** 3471 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 3472 * @phba: Pointer to HBA context object. 3473 * @mask: Bit mask to be checked. 3474 * 3475 * This function reads the host status register and compares 3476 * with the provided bit mask to check if HBA completed 3477 * the restart. This function will wait in a loop for the 3478 * HBA to complete restart. If the HBA does not restart within 3479 * 15 iterations, the function will reset the HBA again. The 3480 * function returns 1 when HBA fail to restart otherwise returns 3481 * zero. 3482 **/ 3483 static int 3484 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 3485 { 3486 uint32_t status; 3487 int i = 0; 3488 int retval = 0; 3489 3490 /* Read the HBA Host Status Register */ 3491 status = readl(phba->HSregaddr); 3492 3493 /* 3494 * Check status register every 100ms for 5 retries, then every 3495 * 500ms for 5, then every 2.5 sec for 5, then reset board and 3496 * every 2.5 sec for 4. 3497 * Break our of the loop if errors occurred during init. 3498 */ 3499 while (((status & mask) != mask) && 3500 !(status & HS_FFERM) && 3501 i++ < 20) { 3502 3503 if (i <= 5) 3504 msleep(10); 3505 else if (i <= 10) 3506 msleep(500); 3507 else 3508 msleep(2500); 3509 3510 if (i == 15) { 3511 /* Do post */ 3512 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3513 lpfc_sli_brdrestart(phba); 3514 } 3515 /* Read the HBA Host Status Register */ 3516 status = readl(phba->HSregaddr); 3517 } 3518 3519 /* Check to see if any errors occurred during init */ 3520 if ((status & HS_FFERM) || (i >= 20)) { 3521 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3522 "2751 Adapter failed to restart, " 3523 "status reg x%x, FW Data: A8 x%x AC x%x\n", 3524 status, 3525 readl(phba->MBslimaddr + 0xa8), 3526 readl(phba->MBslimaddr + 0xac)); 3527 phba->link_state = LPFC_HBA_ERROR; 3528 retval = 1; 3529 } 3530 3531 return retval; 3532 } 3533 3534 /** 3535 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 3536 * @phba: Pointer to HBA context object. 3537 * @mask: Bit mask to be checked. 3538 * 3539 * This function checks the host status register to check if HBA is 3540 * ready. This function will wait in a loop for the HBA to be ready 3541 * If the HBA is not ready , the function will will reset the HBA PCI 3542 * function again. The function returns 1 when HBA fail to be ready 3543 * otherwise returns zero. 3544 **/ 3545 static int 3546 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 3547 { 3548 uint32_t status; 3549 int retval = 0; 3550 3551 /* Read the HBA Host Status Register */ 3552 status = lpfc_sli4_post_status_check(phba); 3553 3554 if (status) { 3555 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3556 lpfc_sli_brdrestart(phba); 3557 status = lpfc_sli4_post_status_check(phba); 3558 } 3559 3560 /* Check to see if any errors occurred during init */ 3561 if (status) { 3562 phba->link_state = LPFC_HBA_ERROR; 3563 retval = 1; 3564 } else 3565 phba->sli4_hba.intr_enable = 0; 3566 3567 return retval; 3568 } 3569 3570 /** 3571 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 3572 * @phba: Pointer to HBA context object. 3573 * @mask: Bit mask to be checked. 3574 * 3575 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 3576 * from the API jump table function pointer from the lpfc_hba struct. 3577 **/ 3578 int 3579 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3580 { 3581 return phba->lpfc_sli_brdready(phba, mask); 3582 } 3583 3584 #define BARRIER_TEST_PATTERN (0xdeadbeef) 3585 3586 /** 3587 * lpfc_reset_barrier - Make HBA ready for HBA reset 3588 * @phba: Pointer to HBA context object. 3589 * 3590 * This function is called before resetting an HBA. This 3591 * function requests HBA to quiesce DMAs before a reset. 3592 **/ 3593 void lpfc_reset_barrier(struct lpfc_hba *phba) 3594 { 3595 uint32_t __iomem *resp_buf; 3596 uint32_t __iomem *mbox_buf; 3597 volatile uint32_t mbox; 3598 uint32_t hc_copy; 3599 int i; 3600 uint8_t hdrtype; 3601 3602 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 3603 if (hdrtype != 0x80 || 3604 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 3605 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 3606 return; 3607 3608 /* 3609 * Tell the other part of the chip to suspend temporarily all 3610 * its DMA activity. 3611 */ 3612 resp_buf = phba->MBslimaddr; 3613 3614 /* Disable the error attention */ 3615 hc_copy = readl(phba->HCregaddr); 3616 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 3617 readl(phba->HCregaddr); /* flush */ 3618 phba->link_flag |= LS_IGNORE_ERATT; 3619 3620 if (readl(phba->HAregaddr) & HA_ERATT) { 3621 /* Clear Chip error bit */ 3622 writel(HA_ERATT, phba->HAregaddr); 3623 phba->pport->stopped = 1; 3624 } 3625 3626 mbox = 0; 3627 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 3628 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 3629 3630 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 3631 mbox_buf = phba->MBslimaddr; 3632 writel(mbox, mbox_buf); 3633 3634 for (i = 0; 3635 readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++) 3636 mdelay(1); 3637 3638 if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { 3639 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 3640 phba->pport->stopped) 3641 goto restore_hc; 3642 else 3643 goto clear_errat; 3644 } 3645 3646 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 3647 for (i = 0; readl(resp_buf) != mbox && i < 500; i++) 3648 mdelay(1); 3649 3650 clear_errat: 3651 3652 while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500) 3653 mdelay(1); 3654 3655 if (readl(phba->HAregaddr) & HA_ERATT) { 3656 writel(HA_ERATT, phba->HAregaddr); 3657 phba->pport->stopped = 1; 3658 } 3659 3660 restore_hc: 3661 phba->link_flag &= ~LS_IGNORE_ERATT; 3662 writel(hc_copy, phba->HCregaddr); 3663 readl(phba->HCregaddr); /* flush */ 3664 } 3665 3666 /** 3667 * lpfc_sli_brdkill - Issue a kill_board mailbox command 3668 * @phba: Pointer to HBA context object. 3669 * 3670 * This function issues a kill_board mailbox command and waits for 3671 * the error attention interrupt. This function is called for stopping 3672 * the firmware processing. The caller is not required to hold any 3673 * locks. This function calls lpfc_hba_down_post function to free 3674 * any pending commands after the kill. The function will return 1 when it 3675 * fails to kill the board else will return 0. 3676 **/ 3677 int 3678 lpfc_sli_brdkill(struct lpfc_hba *phba) 3679 { 3680 struct lpfc_sli *psli; 3681 LPFC_MBOXQ_t *pmb; 3682 uint32_t status; 3683 uint32_t ha_copy; 3684 int retval; 3685 int i = 0; 3686 3687 psli = &phba->sli; 3688 3689 /* Kill HBA */ 3690 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3691 "0329 Kill HBA Data: x%x x%x\n", 3692 phba->pport->port_state, psli->sli_flag); 3693 3694 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3695 if (!pmb) 3696 return 1; 3697 3698 /* Disable the error attention */ 3699 spin_lock_irq(&phba->hbalock); 3700 status = readl(phba->HCregaddr); 3701 status &= ~HC_ERINT_ENA; 3702 writel(status, phba->HCregaddr); 3703 readl(phba->HCregaddr); /* flush */ 3704 phba->link_flag |= LS_IGNORE_ERATT; 3705 spin_unlock_irq(&phba->hbalock); 3706 3707 lpfc_kill_board(phba, pmb); 3708 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3709 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3710 3711 if (retval != MBX_SUCCESS) { 3712 if (retval != MBX_BUSY) 3713 mempool_free(pmb, phba->mbox_mem_pool); 3714 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3715 "2752 KILL_BOARD command failed retval %d\n", 3716 retval); 3717 spin_lock_irq(&phba->hbalock); 3718 phba->link_flag &= ~LS_IGNORE_ERATT; 3719 spin_unlock_irq(&phba->hbalock); 3720 return 1; 3721 } 3722 3723 spin_lock_irq(&phba->hbalock); 3724 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 3725 spin_unlock_irq(&phba->hbalock); 3726 3727 mempool_free(pmb, phba->mbox_mem_pool); 3728 3729 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 3730 * attention every 100ms for 3 seconds. If we don't get ERATT after 3731 * 3 seconds we still set HBA_ERROR state because the status of the 3732 * board is now undefined. 3733 */ 3734 ha_copy = readl(phba->HAregaddr); 3735 3736 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 3737 mdelay(100); 3738 ha_copy = readl(phba->HAregaddr); 3739 } 3740 3741 del_timer_sync(&psli->mbox_tmo); 3742 if (ha_copy & HA_ERATT) { 3743 writel(HA_ERATT, phba->HAregaddr); 3744 phba->pport->stopped = 1; 3745 } 3746 spin_lock_irq(&phba->hbalock); 3747 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3748 psli->mbox_active = NULL; 3749 phba->link_flag &= ~LS_IGNORE_ERATT; 3750 spin_unlock_irq(&phba->hbalock); 3751 3752 lpfc_hba_down_post(phba); 3753 phba->link_state = LPFC_HBA_ERROR; 3754 3755 return ha_copy & HA_ERATT ? 0 : 1; 3756 } 3757 3758 /** 3759 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 3760 * @phba: Pointer to HBA context object. 3761 * 3762 * This function resets the HBA by writing HC_INITFF to the control 3763 * register. After the HBA resets, this function resets all the iocb ring 3764 * indices. This function disables PCI layer parity checking during 3765 * the reset. 3766 * This function returns 0 always. 3767 * The caller is not required to hold any locks. 3768 **/ 3769 int 3770 lpfc_sli_brdreset(struct lpfc_hba *phba) 3771 { 3772 struct lpfc_sli *psli; 3773 struct lpfc_sli_ring *pring; 3774 uint16_t cfg_value; 3775 int i; 3776 3777 psli = &phba->sli; 3778 3779 /* Reset HBA */ 3780 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3781 "0325 Reset HBA Data: x%x x%x\n", 3782 phba->pport->port_state, psli->sli_flag); 3783 3784 /* perform board reset */ 3785 phba->fc_eventTag = 0; 3786 phba->link_events = 0; 3787 phba->pport->fc_myDID = 0; 3788 phba->pport->fc_prevDID = 0; 3789 3790 /* Turn off parity checking and serr during the physical reset */ 3791 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3792 pci_write_config_word(phba->pcidev, PCI_COMMAND, 3793 (cfg_value & 3794 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3795 3796 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 3797 3798 /* Now toggle INITFF bit in the Host Control Register */ 3799 writel(HC_INITFF, phba->HCregaddr); 3800 mdelay(1); 3801 readl(phba->HCregaddr); /* flush */ 3802 writel(0, phba->HCregaddr); 3803 readl(phba->HCregaddr); /* flush */ 3804 3805 /* Restore PCI cmd register */ 3806 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3807 3808 /* Initialize relevant SLI info */ 3809 for (i = 0; i < psli->num_rings; i++) { 3810 pring = &psli->ring[i]; 3811 pring->flag = 0; 3812 pring->rspidx = 0; 3813 pring->next_cmdidx = 0; 3814 pring->local_getidx = 0; 3815 pring->cmdidx = 0; 3816 pring->missbufcnt = 0; 3817 } 3818 3819 phba->link_state = LPFC_WARM_START; 3820 return 0; 3821 } 3822 3823 /** 3824 * lpfc_sli4_brdreset - Reset a sli-4 HBA 3825 * @phba: Pointer to HBA context object. 3826 * 3827 * This function resets a SLI4 HBA. This function disables PCI layer parity 3828 * checking during resets the device. The caller is not required to hold 3829 * any locks. 3830 * 3831 * This function returns 0 always. 3832 **/ 3833 int 3834 lpfc_sli4_brdreset(struct lpfc_hba *phba) 3835 { 3836 struct lpfc_sli *psli = &phba->sli; 3837 uint16_t cfg_value; 3838 uint8_t qindx; 3839 3840 /* Reset HBA */ 3841 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3842 "0295 Reset HBA Data: x%x x%x\n", 3843 phba->pport->port_state, psli->sli_flag); 3844 3845 /* perform board reset */ 3846 phba->fc_eventTag = 0; 3847 phba->link_events = 0; 3848 phba->pport->fc_myDID = 0; 3849 phba->pport->fc_prevDID = 0; 3850 3851 spin_lock_irq(&phba->hbalock); 3852 psli->sli_flag &= ~(LPFC_PROCESS_LA); 3853 phba->fcf.fcf_flag = 0; 3854 /* Clean up the child queue list for the CQs */ 3855 list_del_init(&phba->sli4_hba.mbx_wq->list); 3856 list_del_init(&phba->sli4_hba.els_wq->list); 3857 list_del_init(&phba->sli4_hba.hdr_rq->list); 3858 list_del_init(&phba->sli4_hba.dat_rq->list); 3859 list_del_init(&phba->sli4_hba.mbx_cq->list); 3860 list_del_init(&phba->sli4_hba.els_cq->list); 3861 for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) 3862 list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); 3863 for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) 3864 list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list); 3865 spin_unlock_irq(&phba->hbalock); 3866 3867 /* Now physically reset the device */ 3868 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 3869 "0389 Performing PCI function reset!\n"); 3870 3871 /* Turn off parity checking and serr during the physical reset */ 3872 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 3873 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 3874 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 3875 3876 /* Perform FCoE PCI function reset */ 3877 lpfc_pci_function_reset(phba); 3878 3879 /* Restore PCI cmd register */ 3880 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 3881 3882 return 0; 3883 } 3884 3885 /** 3886 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 3887 * @phba: Pointer to HBA context object. 3888 * 3889 * This function is called in the SLI initialization code path to 3890 * restart the HBA. The caller is not required to hold any lock. 3891 * This function writes MBX_RESTART mailbox command to the SLIM and 3892 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 3893 * function to free any pending commands. The function enables 3894 * POST only during the first initialization. The function returns zero. 3895 * The function does not guarantee completion of MBX_RESTART mailbox 3896 * command before the return of this function. 3897 **/ 3898 static int 3899 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 3900 { 3901 MAILBOX_t *mb; 3902 struct lpfc_sli *psli; 3903 volatile uint32_t word0; 3904 void __iomem *to_slim; 3905 uint32_t hba_aer_enabled; 3906 3907 spin_lock_irq(&phba->hbalock); 3908 3909 /* Take PCIe device Advanced Error Reporting (AER) state */ 3910 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 3911 3912 psli = &phba->sli; 3913 3914 /* Restart HBA */ 3915 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3916 "0337 Restart HBA Data: x%x x%x\n", 3917 phba->pport->port_state, psli->sli_flag); 3918 3919 word0 = 0; 3920 mb = (MAILBOX_t *) &word0; 3921 mb->mbxCommand = MBX_RESTART; 3922 mb->mbxHc = 1; 3923 3924 lpfc_reset_barrier(phba); 3925 3926 to_slim = phba->MBslimaddr; 3927 writel(*(uint32_t *) mb, to_slim); 3928 readl(to_slim); /* flush */ 3929 3930 /* Only skip post after fc_ffinit is completed */ 3931 if (phba->pport->port_state) 3932 word0 = 1; /* This is really setting up word1 */ 3933 else 3934 word0 = 0; /* This is really setting up word1 */ 3935 to_slim = phba->MBslimaddr + sizeof (uint32_t); 3936 writel(*(uint32_t *) mb, to_slim); 3937 readl(to_slim); /* flush */ 3938 3939 lpfc_sli_brdreset(phba); 3940 phba->pport->stopped = 0; 3941 phba->link_state = LPFC_INIT_START; 3942 phba->hba_flag = 0; 3943 spin_unlock_irq(&phba->hbalock); 3944 3945 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3946 psli->stats_start = get_seconds(); 3947 3948 /* Give the INITFF and Post time to settle. */ 3949 mdelay(100); 3950 3951 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 3952 if (hba_aer_enabled) 3953 pci_disable_pcie_error_reporting(phba->pcidev); 3954 3955 lpfc_hba_down_post(phba); 3956 3957 return 0; 3958 } 3959 3960 /** 3961 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 3962 * @phba: Pointer to HBA context object. 3963 * 3964 * This function is called in the SLI initialization code path to restart 3965 * a SLI4 HBA. The caller is not required to hold any lock. 3966 * At the end of the function, it calls lpfc_hba_down_post function to 3967 * free any pending commands. 3968 **/ 3969 static int 3970 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 3971 { 3972 struct lpfc_sli *psli = &phba->sli; 3973 uint32_t hba_aer_enabled; 3974 3975 /* Restart HBA */ 3976 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3977 "0296 Restart HBA Data: x%x x%x\n", 3978 phba->pport->port_state, psli->sli_flag); 3979 3980 /* Take PCIe device Advanced Error Reporting (AER) state */ 3981 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 3982 3983 lpfc_sli4_brdreset(phba); 3984 3985 spin_lock_irq(&phba->hbalock); 3986 phba->pport->stopped = 0; 3987 phba->link_state = LPFC_INIT_START; 3988 phba->hba_flag = 0; 3989 spin_unlock_irq(&phba->hbalock); 3990 3991 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 3992 psli->stats_start = get_seconds(); 3993 3994 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 3995 if (hba_aer_enabled) 3996 pci_disable_pcie_error_reporting(phba->pcidev); 3997 3998 lpfc_hba_down_post(phba); 3999 4000 return 0; 4001 } 4002 4003 /** 4004 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4005 * @phba: Pointer to HBA context object. 4006 * 4007 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4008 * API jump table function pointer from the lpfc_hba struct. 4009 **/ 4010 int 4011 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4012 { 4013 return phba->lpfc_sli_brdrestart(phba); 4014 } 4015 4016 /** 4017 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4018 * @phba: Pointer to HBA context object. 4019 * 4020 * This function is called after a HBA restart to wait for successful 4021 * restart of the HBA. Successful restart of the HBA is indicated by 4022 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4023 * iteration, the function will restart the HBA again. The function returns 4024 * zero if HBA successfully restarted else returns negative error code. 4025 **/ 4026 static int 4027 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4028 { 4029 uint32_t status, i = 0; 4030 4031 /* Read the HBA Host Status Register */ 4032 status = readl(phba->HSregaddr); 4033 4034 /* Check status register to see what current state is */ 4035 i = 0; 4036 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4037 4038 /* Check every 10ms for 10 retries, then every 100ms for 90 4039 * retries, then every 1 sec for 50 retires for a total of 4040 * ~60 seconds before reset the board again and check every 4041 * 1 sec for 50 retries. The up to 60 seconds before the 4042 * board ready is required by the Falcon FIPS zeroization 4043 * complete, and any reset the board in between shall cause 4044 * restart of zeroization, further delay the board ready. 4045 */ 4046 if (i++ >= 200) { 4047 /* Adapter failed to init, timeout, status reg 4048 <status> */ 4049 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4050 "0436 Adapter failed to init, " 4051 "timeout, status reg x%x, " 4052 "FW Data: A8 x%x AC x%x\n", status, 4053 readl(phba->MBslimaddr + 0xa8), 4054 readl(phba->MBslimaddr + 0xac)); 4055 phba->link_state = LPFC_HBA_ERROR; 4056 return -ETIMEDOUT; 4057 } 4058 4059 /* Check to see if any errors occurred during init */ 4060 if (status & HS_FFERM) { 4061 /* ERROR: During chipset initialization */ 4062 /* Adapter failed to init, chipset, status reg 4063 <status> */ 4064 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4065 "0437 Adapter failed to init, " 4066 "chipset, status reg x%x, " 4067 "FW Data: A8 x%x AC x%x\n", status, 4068 readl(phba->MBslimaddr + 0xa8), 4069 readl(phba->MBslimaddr + 0xac)); 4070 phba->link_state = LPFC_HBA_ERROR; 4071 return -EIO; 4072 } 4073 4074 if (i <= 10) 4075 msleep(10); 4076 else if (i <= 100) 4077 msleep(100); 4078 else 4079 msleep(1000); 4080 4081 if (i == 150) { 4082 /* Do post */ 4083 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4084 lpfc_sli_brdrestart(phba); 4085 } 4086 /* Read the HBA Host Status Register */ 4087 status = readl(phba->HSregaddr); 4088 } 4089 4090 /* Check to see if any errors occurred during init */ 4091 if (status & HS_FFERM) { 4092 /* ERROR: During chipset initialization */ 4093 /* Adapter failed to init, chipset, status reg <status> */ 4094 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4095 "0438 Adapter failed to init, chipset, " 4096 "status reg x%x, " 4097 "FW Data: A8 x%x AC x%x\n", status, 4098 readl(phba->MBslimaddr + 0xa8), 4099 readl(phba->MBslimaddr + 0xac)); 4100 phba->link_state = LPFC_HBA_ERROR; 4101 return -EIO; 4102 } 4103 4104 /* Clear all interrupt enable conditions */ 4105 writel(0, phba->HCregaddr); 4106 readl(phba->HCregaddr); /* flush */ 4107 4108 /* setup host attn register */ 4109 writel(0xffffffff, phba->HAregaddr); 4110 readl(phba->HAregaddr); /* flush */ 4111 return 0; 4112 } 4113 4114 /** 4115 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4116 * 4117 * This function calculates and returns the number of HBQs required to be 4118 * configured. 4119 **/ 4120 int 4121 lpfc_sli_hbq_count(void) 4122 { 4123 return ARRAY_SIZE(lpfc_hbq_defs); 4124 } 4125 4126 /** 4127 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4128 * 4129 * This function adds the number of hbq entries in every HBQ to get 4130 * the total number of hbq entries required for the HBA and returns 4131 * the total count. 4132 **/ 4133 static int 4134 lpfc_sli_hbq_entry_count(void) 4135 { 4136 int hbq_count = lpfc_sli_hbq_count(); 4137 int count = 0; 4138 int i; 4139 4140 for (i = 0; i < hbq_count; ++i) 4141 count += lpfc_hbq_defs[i]->entry_count; 4142 return count; 4143 } 4144 4145 /** 4146 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4147 * 4148 * This function calculates amount of memory required for all hbq entries 4149 * to be configured and returns the total memory required. 4150 **/ 4151 int 4152 lpfc_sli_hbq_size(void) 4153 { 4154 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4155 } 4156 4157 /** 4158 * lpfc_sli_hbq_setup - configure and initialize HBQs 4159 * @phba: Pointer to HBA context object. 4160 * 4161 * This function is called during the SLI initialization to configure 4162 * all the HBQs and post buffers to the HBQ. The caller is not 4163 * required to hold any locks. This function will return zero if successful 4164 * else it will return negative error code. 4165 **/ 4166 static int 4167 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4168 { 4169 int hbq_count = lpfc_sli_hbq_count(); 4170 LPFC_MBOXQ_t *pmb; 4171 MAILBOX_t *pmbox; 4172 uint32_t hbqno; 4173 uint32_t hbq_entry_index; 4174 4175 /* Get a Mailbox buffer to setup mailbox 4176 * commands for HBA initialization 4177 */ 4178 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4179 4180 if (!pmb) 4181 return -ENOMEM; 4182 4183 pmbox = &pmb->u.mb; 4184 4185 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4186 phba->link_state = LPFC_INIT_MBX_CMDS; 4187 phba->hbq_in_use = 1; 4188 4189 hbq_entry_index = 0; 4190 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4191 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4192 phba->hbqs[hbqno].hbqPutIdx = 0; 4193 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4194 phba->hbqs[hbqno].entry_count = 4195 lpfc_hbq_defs[hbqno]->entry_count; 4196 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4197 hbq_entry_index, pmb); 4198 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4199 4200 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4201 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4202 mbxStatus <status>, ring <num> */ 4203 4204 lpfc_printf_log(phba, KERN_ERR, 4205 LOG_SLI | LOG_VPORT, 4206 "1805 Adapter failed to init. " 4207 "Data: x%x x%x x%x\n", 4208 pmbox->mbxCommand, 4209 pmbox->mbxStatus, hbqno); 4210 4211 phba->link_state = LPFC_HBA_ERROR; 4212 mempool_free(pmb, phba->mbox_mem_pool); 4213 return -ENXIO; 4214 } 4215 } 4216 phba->hbq_count = hbq_count; 4217 4218 mempool_free(pmb, phba->mbox_mem_pool); 4219 4220 /* Initially populate or replenish the HBQs */ 4221 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4222 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4223 return 0; 4224 } 4225 4226 /** 4227 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4228 * @phba: Pointer to HBA context object. 4229 * 4230 * This function is called during the SLI initialization to configure 4231 * all the HBQs and post buffers to the HBQ. The caller is not 4232 * required to hold any locks. This function will return zero if successful 4233 * else it will return negative error code. 4234 **/ 4235 static int 4236 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4237 { 4238 phba->hbq_in_use = 1; 4239 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; 4240 phba->hbq_count = 1; 4241 /* Initially populate or replenish the HBQs */ 4242 lpfc_sli_hbqbuf_init_hbqs(phba, 0); 4243 return 0; 4244 } 4245 4246 /** 4247 * lpfc_sli_config_port - Issue config port mailbox command 4248 * @phba: Pointer to HBA context object. 4249 * @sli_mode: sli mode - 2/3 4250 * 4251 * This function is called by the sli intialization code path 4252 * to issue config_port mailbox command. This function restarts the 4253 * HBA firmware and issues a config_port mailbox command to configure 4254 * the SLI interface in the sli mode specified by sli_mode 4255 * variable. The caller is not required to hold any locks. 4256 * The function returns 0 if successful, else returns negative error 4257 * code. 4258 **/ 4259 int 4260 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4261 { 4262 LPFC_MBOXQ_t *pmb; 4263 uint32_t resetcount = 0, rc = 0, done = 0; 4264 4265 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4266 if (!pmb) { 4267 phba->link_state = LPFC_HBA_ERROR; 4268 return -ENOMEM; 4269 } 4270 4271 phba->sli_rev = sli_mode; 4272 while (resetcount < 2 && !done) { 4273 spin_lock_irq(&phba->hbalock); 4274 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4275 spin_unlock_irq(&phba->hbalock); 4276 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4277 lpfc_sli_brdrestart(phba); 4278 rc = lpfc_sli_chipset_init(phba); 4279 if (rc) 4280 break; 4281 4282 spin_lock_irq(&phba->hbalock); 4283 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4284 spin_unlock_irq(&phba->hbalock); 4285 resetcount++; 4286 4287 /* Call pre CONFIG_PORT mailbox command initialization. A 4288 * value of 0 means the call was successful. Any other 4289 * nonzero value is a failure, but if ERESTART is returned, 4290 * the driver may reset the HBA and try again. 4291 */ 4292 rc = lpfc_config_port_prep(phba); 4293 if (rc == -ERESTART) { 4294 phba->link_state = LPFC_LINK_UNKNOWN; 4295 continue; 4296 } else if (rc) 4297 break; 4298 phba->link_state = LPFC_INIT_MBX_CMDS; 4299 lpfc_config_port(phba, pmb); 4300 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4301 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4302 LPFC_SLI3_HBQ_ENABLED | 4303 LPFC_SLI3_CRP_ENABLED | 4304 LPFC_SLI3_BG_ENABLED | 4305 LPFC_SLI3_DSS_ENABLED); 4306 if (rc != MBX_SUCCESS) { 4307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4308 "0442 Adapter failed to init, mbxCmd x%x " 4309 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4310 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4311 spin_lock_irq(&phba->hbalock); 4312 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4313 spin_unlock_irq(&phba->hbalock); 4314 rc = -ENXIO; 4315 } else { 4316 /* Allow asynchronous mailbox command to go through */ 4317 spin_lock_irq(&phba->hbalock); 4318 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4319 spin_unlock_irq(&phba->hbalock); 4320 done = 1; 4321 } 4322 } 4323 if (!done) { 4324 rc = -EINVAL; 4325 goto do_prep_failed; 4326 } 4327 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4328 if (!pmb->u.mb.un.varCfgPort.cMA) { 4329 rc = -ENXIO; 4330 goto do_prep_failed; 4331 } 4332 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 4333 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 4334 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 4335 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 4336 phba->max_vpi : phba->max_vports; 4337 4338 } else 4339 phba->max_vpi = 0; 4340 phba->fips_level = 0; 4341 phba->fips_spec_rev = 0; 4342 if (pmb->u.mb.un.varCfgPort.gdss) { 4343 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 4344 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 4345 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 4346 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4347 "2850 Security Crypto Active. FIPS x%d " 4348 "(Spec Rev: x%d)", 4349 phba->fips_level, phba->fips_spec_rev); 4350 } 4351 if (pmb->u.mb.un.varCfgPort.sec_err) { 4352 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4353 "2856 Config Port Security Crypto " 4354 "Error: x%x ", 4355 pmb->u.mb.un.varCfgPort.sec_err); 4356 } 4357 if (pmb->u.mb.un.varCfgPort.gerbm) 4358 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 4359 if (pmb->u.mb.un.varCfgPort.gcrp) 4360 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 4361 4362 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 4363 phba->port_gp = phba->mbox->us.s3_pgp.port; 4364 4365 if (phba->cfg_enable_bg) { 4366 if (pmb->u.mb.un.varCfgPort.gbg) 4367 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 4368 else 4369 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4370 "0443 Adapter did not grant " 4371 "BlockGuard\n"); 4372 } 4373 } else { 4374 phba->hbq_get = NULL; 4375 phba->port_gp = phba->mbox->us.s2.port; 4376 phba->max_vpi = 0; 4377 } 4378 do_prep_failed: 4379 mempool_free(pmb, phba->mbox_mem_pool); 4380 return rc; 4381 } 4382 4383 4384 /** 4385 * lpfc_sli_hba_setup - SLI intialization function 4386 * @phba: Pointer to HBA context object. 4387 * 4388 * This function is the main SLI intialization function. This function 4389 * is called by the HBA intialization code, HBA reset code and HBA 4390 * error attention handler code. Caller is not required to hold any 4391 * locks. This function issues config_port mailbox command to configure 4392 * the SLI, setup iocb rings and HBQ rings. In the end the function 4393 * calls the config_port_post function to issue init_link mailbox 4394 * command and to start the discovery. The function will return zero 4395 * if successful, else it will return negative error code. 4396 **/ 4397 int 4398 lpfc_sli_hba_setup(struct lpfc_hba *phba) 4399 { 4400 uint32_t rc; 4401 int mode = 3; 4402 4403 switch (lpfc_sli_mode) { 4404 case 2: 4405 if (phba->cfg_enable_npiv) { 4406 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4407 "1824 NPIV enabled: Override lpfc_sli_mode " 4408 "parameter (%d) to auto (0).\n", 4409 lpfc_sli_mode); 4410 break; 4411 } 4412 mode = 2; 4413 break; 4414 case 0: 4415 case 3: 4416 break; 4417 default: 4418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4419 "1819 Unrecognized lpfc_sli_mode " 4420 "parameter: %d.\n", lpfc_sli_mode); 4421 4422 break; 4423 } 4424 4425 rc = lpfc_sli_config_port(phba, mode); 4426 4427 if (rc && lpfc_sli_mode == 3) 4428 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4429 "1820 Unable to select SLI-3. " 4430 "Not supported by adapter.\n"); 4431 if (rc && mode != 2) 4432 rc = lpfc_sli_config_port(phba, 2); 4433 if (rc) 4434 goto lpfc_sli_hba_setup_error; 4435 4436 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4437 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4438 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4439 if (!rc) { 4440 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4441 "2709 This device supports " 4442 "Advanced Error Reporting (AER)\n"); 4443 spin_lock_irq(&phba->hbalock); 4444 phba->hba_flag |= HBA_AER_ENABLED; 4445 spin_unlock_irq(&phba->hbalock); 4446 } else { 4447 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4448 "2708 This device does not support " 4449 "Advanced Error Reporting (AER)\n"); 4450 phba->cfg_aer_support = 0; 4451 } 4452 } 4453 4454 if (phba->sli_rev == 3) { 4455 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4456 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4457 } else { 4458 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 4459 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 4460 phba->sli3_options = 0; 4461 } 4462 4463 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4464 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 4465 phba->sli_rev, phba->max_vpi); 4466 rc = lpfc_sli_ring_map(phba); 4467 4468 if (rc) 4469 goto lpfc_sli_hba_setup_error; 4470 4471 /* Init HBQs */ 4472 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4473 rc = lpfc_sli_hbq_setup(phba); 4474 if (rc) 4475 goto lpfc_sli_hba_setup_error; 4476 } 4477 spin_lock_irq(&phba->hbalock); 4478 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4479 spin_unlock_irq(&phba->hbalock); 4480 4481 rc = lpfc_config_port_post(phba); 4482 if (rc) 4483 goto lpfc_sli_hba_setup_error; 4484 4485 return rc; 4486 4487 lpfc_sli_hba_setup_error: 4488 phba->link_state = LPFC_HBA_ERROR; 4489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4490 "0445 Firmware initialization failed\n"); 4491 return rc; 4492 } 4493 4494 /** 4495 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 4496 * @phba: Pointer to HBA context object. 4497 * @mboxq: mailbox pointer. 4498 * This function issue a dump mailbox command to read config region 4499 * 23 and parse the records in the region and populate driver 4500 * data structure. 4501 **/ 4502 static int 4503 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, 4504 LPFC_MBOXQ_t *mboxq) 4505 { 4506 struct lpfc_dmabuf *mp; 4507 struct lpfc_mqe *mqe; 4508 uint32_t data_length; 4509 int rc; 4510 4511 /* Program the default value of vlan_id and fc_map */ 4512 phba->valid_vlan = 0; 4513 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4514 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4515 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4516 4517 mqe = &mboxq->u.mqe; 4518 if (lpfc_dump_fcoe_param(phba, mboxq)) 4519 return -ENOMEM; 4520 4521 mp = (struct lpfc_dmabuf *) mboxq->context1; 4522 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4523 4524 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4525 "(%d):2571 Mailbox cmd x%x Status x%x " 4526 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4527 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4528 "CQ: x%x x%x x%x x%x\n", 4529 mboxq->vport ? mboxq->vport->vpi : 0, 4530 bf_get(lpfc_mqe_command, mqe), 4531 bf_get(lpfc_mqe_status, mqe), 4532 mqe->un.mb_words[0], mqe->un.mb_words[1], 4533 mqe->un.mb_words[2], mqe->un.mb_words[3], 4534 mqe->un.mb_words[4], mqe->un.mb_words[5], 4535 mqe->un.mb_words[6], mqe->un.mb_words[7], 4536 mqe->un.mb_words[8], mqe->un.mb_words[9], 4537 mqe->un.mb_words[10], mqe->un.mb_words[11], 4538 mqe->un.mb_words[12], mqe->un.mb_words[13], 4539 mqe->un.mb_words[14], mqe->un.mb_words[15], 4540 mqe->un.mb_words[16], mqe->un.mb_words[50], 4541 mboxq->mcqe.word0, 4542 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 4543 mboxq->mcqe.trailer); 4544 4545 if (rc) { 4546 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4547 kfree(mp); 4548 return -EIO; 4549 } 4550 data_length = mqe->un.mb_words[5]; 4551 if (data_length > DMP_RGN23_SIZE) { 4552 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4553 kfree(mp); 4554 return -EIO; 4555 } 4556 4557 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 4558 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4559 kfree(mp); 4560 return 0; 4561 } 4562 4563 /** 4564 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 4565 * @phba: pointer to lpfc hba data structure. 4566 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 4567 * @vpd: pointer to the memory to hold resulting port vpd data. 4568 * @vpd_size: On input, the number of bytes allocated to @vpd. 4569 * On output, the number of data bytes in @vpd. 4570 * 4571 * This routine executes a READ_REV SLI4 mailbox command. In 4572 * addition, this routine gets the port vpd data. 4573 * 4574 * Return codes 4575 * 0 - successful 4576 * -ENOMEM - could not allocated memory. 4577 **/ 4578 static int 4579 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 4580 uint8_t *vpd, uint32_t *vpd_size) 4581 { 4582 int rc = 0; 4583 uint32_t dma_size; 4584 struct lpfc_dmabuf *dmabuf; 4585 struct lpfc_mqe *mqe; 4586 4587 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4588 if (!dmabuf) 4589 return -ENOMEM; 4590 4591 /* 4592 * Get a DMA buffer for the vpd data resulting from the READ_REV 4593 * mailbox command. 4594 */ 4595 dma_size = *vpd_size; 4596 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4597 dma_size, 4598 &dmabuf->phys, 4599 GFP_KERNEL); 4600 if (!dmabuf->virt) { 4601 kfree(dmabuf); 4602 return -ENOMEM; 4603 } 4604 memset(dmabuf->virt, 0, dma_size); 4605 4606 /* 4607 * The SLI4 implementation of READ_REV conflicts at word1, 4608 * bits 31:16 and SLI4 adds vpd functionality not present 4609 * in SLI3. This code corrects the conflicts. 4610 */ 4611 lpfc_read_rev(phba, mboxq); 4612 mqe = &mboxq->u.mqe; 4613 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 4614 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 4615 mqe->un.read_rev.word1 &= 0x0000FFFF; 4616 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 4617 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 4618 4619 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4620 if (rc) { 4621 dma_free_coherent(&phba->pcidev->dev, dma_size, 4622 dmabuf->virt, dmabuf->phys); 4623 kfree(dmabuf); 4624 return -EIO; 4625 } 4626 4627 /* 4628 * The available vpd length cannot be bigger than the 4629 * DMA buffer passed to the port. Catch the less than 4630 * case and update the caller's size. 4631 */ 4632 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 4633 *vpd_size = mqe->un.read_rev.avail_vpd_len; 4634 4635 memcpy(vpd, dmabuf->virt, *vpd_size); 4636 4637 dma_free_coherent(&phba->pcidev->dev, dma_size, 4638 dmabuf->virt, dmabuf->phys); 4639 kfree(dmabuf); 4640 return 0; 4641 } 4642 4643 /** 4644 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 4645 * @phba: pointer to lpfc hba data structure. 4646 * 4647 * This routine is called to explicitly arm the SLI4 device's completion and 4648 * event queues 4649 **/ 4650 static void 4651 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 4652 { 4653 uint8_t fcp_eqidx; 4654 4655 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 4656 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 4657 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4658 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 4659 LPFC_QUEUE_REARM); 4660 lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); 4661 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) 4662 lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], 4663 LPFC_QUEUE_REARM); 4664 } 4665 4666 /** 4667 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 4668 * @phba: Pointer to HBA context object. 4669 * 4670 * This function is the main SLI4 device intialization PCI function. This 4671 * function is called by the HBA intialization code, HBA reset code and 4672 * HBA error attention handler code. Caller is not required to hold any 4673 * locks. 4674 **/ 4675 int 4676 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 4677 { 4678 int rc; 4679 LPFC_MBOXQ_t *mboxq; 4680 struct lpfc_mqe *mqe; 4681 uint8_t *vpd; 4682 uint32_t vpd_size; 4683 uint32_t ftr_rsp = 0; 4684 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 4685 struct lpfc_vport *vport = phba->pport; 4686 struct lpfc_dmabuf *mp; 4687 4688 /* 4689 * TODO: Why does this routine execute these task in a different 4690 * order from probe? 4691 */ 4692 /* Perform a PCI function reset to start from clean */ 4693 rc = lpfc_pci_function_reset(phba); 4694 if (unlikely(rc)) 4695 return -ENODEV; 4696 4697 /* Check the HBA Host Status Register for readyness */ 4698 rc = lpfc_sli4_post_status_check(phba); 4699 if (unlikely(rc)) 4700 return -ENODEV; 4701 else { 4702 spin_lock_irq(&phba->hbalock); 4703 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 4704 spin_unlock_irq(&phba->hbalock); 4705 } 4706 4707 /* 4708 * Allocate a single mailbox container for initializing the 4709 * port. 4710 */ 4711 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4712 if (!mboxq) 4713 return -ENOMEM; 4714 4715 /* 4716 * Continue initialization with default values even if driver failed 4717 * to read FCoE param config regions 4718 */ 4719 if (lpfc_sli4_read_fcoe_params(phba, mboxq)) 4720 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 4721 "2570 Failed to read FCoE parameters\n"); 4722 4723 /* Issue READ_REV to collect vpd and FW information. */ 4724 vpd_size = SLI4_PAGE_SIZE; 4725 vpd = kzalloc(vpd_size, GFP_KERNEL); 4726 if (!vpd) { 4727 rc = -ENOMEM; 4728 goto out_free_mbox; 4729 } 4730 4731 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 4732 if (unlikely(rc)) { 4733 kfree(vpd); 4734 goto out_free_mbox; 4735 } 4736 mqe = &mboxq->u.mqe; 4737 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 4738 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 4739 phba->hba_flag |= HBA_FCOE_MODE; 4740 else 4741 phba->hba_flag &= ~HBA_FCOE_MODE; 4742 4743 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 4744 LPFC_DCBX_CEE_MODE) 4745 phba->hba_flag |= HBA_FIP_SUPPORT; 4746 else 4747 phba->hba_flag &= ~HBA_FIP_SUPPORT; 4748 4749 if (phba->sli_rev != LPFC_SLI_REV4 || 4750 !(phba->hba_flag & HBA_FCOE_MODE)) { 4751 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4752 "0376 READ_REV Error. SLI Level %d " 4753 "FCoE enabled %d\n", 4754 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 4755 rc = -EIO; 4756 kfree(vpd); 4757 goto out_free_mbox; 4758 } 4759 /* 4760 * Evaluate the read rev and vpd data. Populate the driver 4761 * state with the results. If this routine fails, the failure 4762 * is not fatal as the driver will use generic values. 4763 */ 4764 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 4765 if (unlikely(!rc)) { 4766 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4767 "0377 Error %d parsing vpd. " 4768 "Using defaults.\n", rc); 4769 rc = 0; 4770 } 4771 kfree(vpd); 4772 4773 /* Save information as VPD data */ 4774 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 4775 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 4776 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 4777 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 4778 &mqe->un.read_rev); 4779 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 4780 &mqe->un.read_rev); 4781 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 4782 &mqe->un.read_rev); 4783 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 4784 &mqe->un.read_rev); 4785 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 4786 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 4787 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 4788 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 4789 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 4790 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 4791 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4792 "(%d):0380 READ_REV Status x%x " 4793 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 4794 mboxq->vport ? mboxq->vport->vpi : 0, 4795 bf_get(lpfc_mqe_status, mqe), 4796 phba->vpd.rev.opFwName, 4797 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 4798 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 4799 4800 /* 4801 * Discover the port's supported feature set and match it against the 4802 * hosts requests. 4803 */ 4804 lpfc_request_features(phba, mboxq); 4805 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4806 if (unlikely(rc)) { 4807 rc = -EIO; 4808 goto out_free_mbox; 4809 } 4810 4811 /* 4812 * The port must support FCP initiator mode as this is the 4813 * only mode running in the host. 4814 */ 4815 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 4816 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 4817 "0378 No support for fcpi mode.\n"); 4818 ftr_rsp++; 4819 } 4820 4821 /* 4822 * If the port cannot support the host's requested features 4823 * then turn off the global config parameters to disable the 4824 * feature in the driver. This is not a fatal error. 4825 */ 4826 if ((phba->cfg_enable_bg) && 4827 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 4828 ftr_rsp++; 4829 4830 if (phba->max_vpi && phba->cfg_enable_npiv && 4831 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 4832 ftr_rsp++; 4833 4834 if (ftr_rsp) { 4835 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 4836 "0379 Feature Mismatch Data: x%08x %08x " 4837 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 4838 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 4839 phba->cfg_enable_npiv, phba->max_vpi); 4840 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 4841 phba->cfg_enable_bg = 0; 4842 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 4843 phba->cfg_enable_npiv = 0; 4844 } 4845 4846 /* These SLI3 features are assumed in SLI4 */ 4847 spin_lock_irq(&phba->hbalock); 4848 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 4849 spin_unlock_irq(&phba->hbalock); 4850 4851 /* Read the port's service parameters. */ 4852 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 4853 if (rc) { 4854 phba->link_state = LPFC_HBA_ERROR; 4855 rc = -ENOMEM; 4856 goto out_free_mbox; 4857 } 4858 4859 mboxq->vport = vport; 4860 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4861 mp = (struct lpfc_dmabuf *) mboxq->context1; 4862 if (rc == MBX_SUCCESS) { 4863 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 4864 rc = 0; 4865 } 4866 4867 /* 4868 * This memory was allocated by the lpfc_read_sparam routine. Release 4869 * it to the mbuf pool. 4870 */ 4871 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4872 kfree(mp); 4873 mboxq->context1 = NULL; 4874 if (unlikely(rc)) { 4875 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4876 "0382 READ_SPARAM command failed " 4877 "status %d, mbxStatus x%x\n", 4878 rc, bf_get(lpfc_mqe_status, mqe)); 4879 phba->link_state = LPFC_HBA_ERROR; 4880 rc = -EIO; 4881 goto out_free_mbox; 4882 } 4883 4884 if (phba->cfg_soft_wwnn) 4885 u64_to_wwn(phba->cfg_soft_wwnn, 4886 vport->fc_sparam.nodeName.u.wwn); 4887 if (phba->cfg_soft_wwpn) 4888 u64_to_wwn(phba->cfg_soft_wwpn, 4889 vport->fc_sparam.portName.u.wwn); 4890 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, 4891 sizeof(struct lpfc_name)); 4892 memcpy(&vport->fc_portname, &vport->fc_sparam.portName, 4893 sizeof(struct lpfc_name)); 4894 4895 /* Update the fc_host data structures with new wwn. */ 4896 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 4897 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 4898 4899 /* Register SGL pool to the device using non-embedded mailbox command */ 4900 rc = lpfc_sli4_post_sgl_list(phba); 4901 if (unlikely(rc)) { 4902 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4903 "0582 Error %d during sgl post operation\n", 4904 rc); 4905 rc = -ENODEV; 4906 goto out_free_mbox; 4907 } 4908 4909 /* Register SCSI SGL pool to the device */ 4910 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 4911 if (unlikely(rc)) { 4912 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 4913 "0383 Error %d during scsi sgl post " 4914 "operation\n", rc); 4915 /* Some Scsi buffers were moved to the abort scsi list */ 4916 /* A pci function reset will repost them */ 4917 rc = -ENODEV; 4918 goto out_free_mbox; 4919 } 4920 4921 /* Post the rpi header region to the device. */ 4922 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 4923 if (unlikely(rc)) { 4924 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4925 "0393 Error %d during rpi post operation\n", 4926 rc); 4927 rc = -ENODEV; 4928 goto out_free_mbox; 4929 } 4930 4931 /* Set up all the queues to the device */ 4932 rc = lpfc_sli4_queue_setup(phba); 4933 if (unlikely(rc)) { 4934 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 4935 "0381 Error %d during queue setup.\n ", rc); 4936 goto out_stop_timers; 4937 } 4938 4939 /* Arm the CQs and then EQs on device */ 4940 lpfc_sli4_arm_cqeq_intr(phba); 4941 4942 /* Indicate device interrupt mode */ 4943 phba->sli4_hba.intr_enable = 1; 4944 4945 /* Allow asynchronous mailbox command to go through */ 4946 spin_lock_irq(&phba->hbalock); 4947 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4948 spin_unlock_irq(&phba->hbalock); 4949 4950 /* Post receive buffers to the device */ 4951 lpfc_sli4_rb_setup(phba); 4952 4953 /* Reset HBA FCF states after HBA reset */ 4954 phba->fcf.fcf_flag = 0; 4955 phba->fcf.current_rec.flag = 0; 4956 4957 /* Start the ELS watchdog timer */ 4958 mod_timer(&vport->els_tmofunc, 4959 jiffies + HZ * (phba->fc_ratov * 2)); 4960 4961 /* Start heart beat timer */ 4962 mod_timer(&phba->hb_tmofunc, 4963 jiffies + HZ * LPFC_HB_MBOX_INTERVAL); 4964 phba->hb_outstanding = 0; 4965 phba->last_completion_time = jiffies; 4966 4967 /* Start error attention (ERATT) polling timer */ 4968 mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); 4969 4970 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4971 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4972 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4973 if (!rc) { 4974 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4975 "2829 This device supports " 4976 "Advanced Error Reporting (AER)\n"); 4977 spin_lock_irq(&phba->hbalock); 4978 phba->hba_flag |= HBA_AER_ENABLED; 4979 spin_unlock_irq(&phba->hbalock); 4980 } else { 4981 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4982 "2830 This device does not support " 4983 "Advanced Error Reporting (AER)\n"); 4984 phba->cfg_aer_support = 0; 4985 } 4986 } 4987 4988 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 4989 /* 4990 * The FC Port needs to register FCFI (index 0) 4991 */ 4992 lpfc_reg_fcfi(phba, mboxq); 4993 mboxq->vport = phba->pport; 4994 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4995 if (rc == MBX_SUCCESS) 4996 rc = 0; 4997 else 4998 goto out_unset_queue; 4999 } 5000 /* 5001 * The port is ready, set the host's link state to LINK_DOWN 5002 * in preparation for link interrupts. 5003 */ 5004 spin_lock_irq(&phba->hbalock); 5005 phba->link_state = LPFC_LINK_DOWN; 5006 spin_unlock_irq(&phba->hbalock); 5007 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 5008 out_unset_queue: 5009 /* Unset all the queues set up in this routine when error out */ 5010 if (rc) 5011 lpfc_sli4_queue_unset(phba); 5012 out_stop_timers: 5013 if (rc) 5014 lpfc_stop_hba_timers(phba); 5015 out_free_mbox: 5016 mempool_free(mboxq, phba->mbox_mem_pool); 5017 return rc; 5018 } 5019 5020 /** 5021 * lpfc_mbox_timeout - Timeout call back function for mbox timer 5022 * @ptr: context object - pointer to hba structure. 5023 * 5024 * This is the callback function for mailbox timer. The mailbox 5025 * timer is armed when a new mailbox command is issued and the timer 5026 * is deleted when the mailbox complete. The function is called by 5027 * the kernel timer code when a mailbox does not complete within 5028 * expected time. This function wakes up the worker thread to 5029 * process the mailbox timeout and returns. All the processing is 5030 * done by the worker thread function lpfc_mbox_timeout_handler. 5031 **/ 5032 void 5033 lpfc_mbox_timeout(unsigned long ptr) 5034 { 5035 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 5036 unsigned long iflag; 5037 uint32_t tmo_posted; 5038 5039 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 5040 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 5041 if (!tmo_posted) 5042 phba->pport->work_port_events |= WORKER_MBOX_TMO; 5043 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 5044 5045 if (!tmo_posted) 5046 lpfc_worker_wake_up(phba); 5047 return; 5048 } 5049 5050 5051 /** 5052 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 5053 * @phba: Pointer to HBA context object. 5054 * 5055 * This function is called from worker thread when a mailbox command times out. 5056 * The caller is not required to hold any locks. This function will reset the 5057 * HBA and recover all the pending commands. 5058 **/ 5059 void 5060 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 5061 { 5062 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 5063 MAILBOX_t *mb = &pmbox->u.mb; 5064 struct lpfc_sli *psli = &phba->sli; 5065 struct lpfc_sli_ring *pring; 5066 5067 /* Check the pmbox pointer first. There is a race condition 5068 * between the mbox timeout handler getting executed in the 5069 * worklist and the mailbox actually completing. When this 5070 * race condition occurs, the mbox_active will be NULL. 5071 */ 5072 spin_lock_irq(&phba->hbalock); 5073 if (pmbox == NULL) { 5074 lpfc_printf_log(phba, KERN_WARNING, 5075 LOG_MBOX | LOG_SLI, 5076 "0353 Active Mailbox cleared - mailbox timeout " 5077 "exiting\n"); 5078 spin_unlock_irq(&phba->hbalock); 5079 return; 5080 } 5081 5082 /* Mbox cmd <mbxCommand> timeout */ 5083 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5084 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 5085 mb->mbxCommand, 5086 phba->pport->port_state, 5087 phba->sli.sli_flag, 5088 phba->sli.mbox_active); 5089 spin_unlock_irq(&phba->hbalock); 5090 5091 /* Setting state unknown so lpfc_sli_abort_iocb_ring 5092 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 5093 * it to fail all oustanding SCSI IO. 5094 */ 5095 spin_lock_irq(&phba->pport->work_port_lock); 5096 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 5097 spin_unlock_irq(&phba->pport->work_port_lock); 5098 spin_lock_irq(&phba->hbalock); 5099 phba->link_state = LPFC_LINK_UNKNOWN; 5100 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 5101 spin_unlock_irq(&phba->hbalock); 5102 5103 pring = &psli->ring[psli->fcp_ring]; 5104 lpfc_sli_abort_iocb_ring(phba, pring); 5105 5106 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5107 "0345 Resetting board due to mailbox timeout\n"); 5108 5109 /* Reset the HBA device */ 5110 lpfc_reset_hba(phba); 5111 } 5112 5113 /** 5114 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 5115 * @phba: Pointer to HBA context object. 5116 * @pmbox: Pointer to mailbox object. 5117 * @flag: Flag indicating how the mailbox need to be processed. 5118 * 5119 * This function is called by discovery code and HBA management code 5120 * to submit a mailbox command to firmware with SLI-3 interface spec. This 5121 * function gets the hbalock to protect the data structures. 5122 * The mailbox command can be submitted in polling mode, in which case 5123 * this function will wait in a polling loop for the completion of the 5124 * mailbox. 5125 * If the mailbox is submitted in no_wait mode (not polling) the 5126 * function will submit the command and returns immediately without waiting 5127 * for the mailbox completion. The no_wait is supported only when HBA 5128 * is in SLI2/SLI3 mode - interrupts are enabled. 5129 * The SLI interface allows only one mailbox pending at a time. If the 5130 * mailbox is issued in polling mode and there is already a mailbox 5131 * pending, then the function will return an error. If the mailbox is issued 5132 * in NO_WAIT mode and there is a mailbox pending already, the function 5133 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 5134 * The sli layer owns the mailbox object until the completion of mailbox 5135 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 5136 * return codes the caller owns the mailbox command after the return of 5137 * the function. 5138 **/ 5139 static int 5140 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 5141 uint32_t flag) 5142 { 5143 MAILBOX_t *mb; 5144 struct lpfc_sli *psli = &phba->sli; 5145 uint32_t status, evtctr; 5146 uint32_t ha_copy; 5147 int i; 5148 unsigned long timeout; 5149 unsigned long drvr_flag = 0; 5150 uint32_t word0, ldata; 5151 void __iomem *to_slim; 5152 int processing_queue = 0; 5153 5154 spin_lock_irqsave(&phba->hbalock, drvr_flag); 5155 if (!pmbox) { 5156 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5157 /* processing mbox queue from intr_handler */ 5158 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 5159 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5160 return MBX_SUCCESS; 5161 } 5162 processing_queue = 1; 5163 pmbox = lpfc_mbox_get(phba); 5164 if (!pmbox) { 5165 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5166 return MBX_SUCCESS; 5167 } 5168 } 5169 5170 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 5171 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 5172 if(!pmbox->vport) { 5173 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5174 lpfc_printf_log(phba, KERN_ERR, 5175 LOG_MBOX | LOG_VPORT, 5176 "1806 Mbox x%x failed. No vport\n", 5177 pmbox->u.mb.mbxCommand); 5178 dump_stack(); 5179 goto out_not_finished; 5180 } 5181 } 5182 5183 /* If the PCI channel is in offline state, do not post mbox. */ 5184 if (unlikely(pci_channel_offline(phba->pcidev))) { 5185 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5186 goto out_not_finished; 5187 } 5188 5189 /* If HBA has a deferred error attention, fail the iocb. */ 5190 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 5191 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5192 goto out_not_finished; 5193 } 5194 5195 psli = &phba->sli; 5196 5197 mb = &pmbox->u.mb; 5198 status = MBX_SUCCESS; 5199 5200 if (phba->link_state == LPFC_HBA_ERROR) { 5201 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5202 5203 /* Mbox command <mbxCommand> cannot issue */ 5204 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5205 "(%d):0311 Mailbox command x%x cannot " 5206 "issue Data: x%x x%x\n", 5207 pmbox->vport ? pmbox->vport->vpi : 0, 5208 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 5209 goto out_not_finished; 5210 } 5211 5212 if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && 5213 !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { 5214 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5215 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5216 "(%d):2528 Mailbox command x%x cannot " 5217 "issue Data: x%x x%x\n", 5218 pmbox->vport ? pmbox->vport->vpi : 0, 5219 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 5220 goto out_not_finished; 5221 } 5222 5223 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 5224 /* Polling for a mbox command when another one is already active 5225 * is not allowed in SLI. Also, the driver must have established 5226 * SLI2 mode to queue and process multiple mbox commands. 5227 */ 5228 5229 if (flag & MBX_POLL) { 5230 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5231 5232 /* Mbox command <mbxCommand> cannot issue */ 5233 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5234 "(%d):2529 Mailbox command x%x " 5235 "cannot issue Data: x%x x%x\n", 5236 pmbox->vport ? pmbox->vport->vpi : 0, 5237 pmbox->u.mb.mbxCommand, 5238 psli->sli_flag, flag); 5239 goto out_not_finished; 5240 } 5241 5242 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 5243 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5244 /* Mbox command <mbxCommand> cannot issue */ 5245 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5246 "(%d):2530 Mailbox command x%x " 5247 "cannot issue Data: x%x x%x\n", 5248 pmbox->vport ? pmbox->vport->vpi : 0, 5249 pmbox->u.mb.mbxCommand, 5250 psli->sli_flag, flag); 5251 goto out_not_finished; 5252 } 5253 5254 /* Another mailbox command is still being processed, queue this 5255 * command to be processed later. 5256 */ 5257 lpfc_mbox_put(phba, pmbox); 5258 5259 /* Mbox cmd issue - BUSY */ 5260 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5261 "(%d):0308 Mbox cmd issue - BUSY Data: " 5262 "x%x x%x x%x x%x\n", 5263 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 5264 mb->mbxCommand, phba->pport->port_state, 5265 psli->sli_flag, flag); 5266 5267 psli->slistat.mbox_busy++; 5268 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5269 5270 if (pmbox->vport) { 5271 lpfc_debugfs_disc_trc(pmbox->vport, 5272 LPFC_DISC_TRC_MBOX_VPORT, 5273 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 5274 (uint32_t)mb->mbxCommand, 5275 mb->un.varWords[0], mb->un.varWords[1]); 5276 } 5277 else { 5278 lpfc_debugfs_disc_trc(phba->pport, 5279 LPFC_DISC_TRC_MBOX, 5280 "MBOX Bsy: cmd:x%x mb:x%x x%x", 5281 (uint32_t)mb->mbxCommand, 5282 mb->un.varWords[0], mb->un.varWords[1]); 5283 } 5284 5285 return MBX_BUSY; 5286 } 5287 5288 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 5289 5290 /* If we are not polling, we MUST be in SLI2 mode */ 5291 if (flag != MBX_POLL) { 5292 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 5293 (mb->mbxCommand != MBX_KILL_BOARD)) { 5294 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5295 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5296 /* Mbox command <mbxCommand> cannot issue */ 5297 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5298 "(%d):2531 Mailbox command x%x " 5299 "cannot issue Data: x%x x%x\n", 5300 pmbox->vport ? pmbox->vport->vpi : 0, 5301 pmbox->u.mb.mbxCommand, 5302 psli->sli_flag, flag); 5303 goto out_not_finished; 5304 } 5305 /* timeout active mbox command */ 5306 mod_timer(&psli->mbox_tmo, (jiffies + 5307 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); 5308 } 5309 5310 /* Mailbox cmd <cmd> issue */ 5311 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5312 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 5313 "x%x\n", 5314 pmbox->vport ? pmbox->vport->vpi : 0, 5315 mb->mbxCommand, phba->pport->port_state, 5316 psli->sli_flag, flag); 5317 5318 if (mb->mbxCommand != MBX_HEARTBEAT) { 5319 if (pmbox->vport) { 5320 lpfc_debugfs_disc_trc(pmbox->vport, 5321 LPFC_DISC_TRC_MBOX_VPORT, 5322 "MBOX Send vport: cmd:x%x mb:x%x x%x", 5323 (uint32_t)mb->mbxCommand, 5324 mb->un.varWords[0], mb->un.varWords[1]); 5325 } 5326 else { 5327 lpfc_debugfs_disc_trc(phba->pport, 5328 LPFC_DISC_TRC_MBOX, 5329 "MBOX Send: cmd:x%x mb:x%x x%x", 5330 (uint32_t)mb->mbxCommand, 5331 mb->un.varWords[0], mb->un.varWords[1]); 5332 } 5333 } 5334 5335 psli->slistat.mbox_cmd++; 5336 evtctr = psli->slistat.mbox_event; 5337 5338 /* next set own bit for the adapter and copy over command word */ 5339 mb->mbxOwner = OWN_CHIP; 5340 5341 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 5342 /* Populate mbox extension offset word. */ 5343 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 5344 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 5345 = (uint8_t *)phba->mbox_ext 5346 - (uint8_t *)phba->mbox; 5347 } 5348 5349 /* Copy the mailbox extension data */ 5350 if (pmbox->in_ext_byte_len && pmbox->context2) { 5351 lpfc_sli_pcimem_bcopy(pmbox->context2, 5352 (uint8_t *)phba->mbox_ext, 5353 pmbox->in_ext_byte_len); 5354 } 5355 /* Copy command data to host SLIM area */ 5356 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 5357 } else { 5358 /* Populate mbox extension offset word. */ 5359 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 5360 *(((uint32_t *)mb) + pmbox->mbox_offset_word) 5361 = MAILBOX_HBA_EXT_OFFSET; 5362 5363 /* Copy the mailbox extension data */ 5364 if (pmbox->in_ext_byte_len && pmbox->context2) { 5365 lpfc_memcpy_to_slim(phba->MBslimaddr + 5366 MAILBOX_HBA_EXT_OFFSET, 5367 pmbox->context2, pmbox->in_ext_byte_len); 5368 5369 } 5370 if (mb->mbxCommand == MBX_CONFIG_PORT) { 5371 /* copy command data into host mbox for cmpl */ 5372 lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); 5373 } 5374 5375 /* First copy mbox command data to HBA SLIM, skip past first 5376 word */ 5377 to_slim = phba->MBslimaddr + sizeof (uint32_t); 5378 lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0], 5379 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 5380 5381 /* Next copy over first word, with mbxOwner set */ 5382 ldata = *((uint32_t *)mb); 5383 to_slim = phba->MBslimaddr; 5384 writel(ldata, to_slim); 5385 readl(to_slim); /* flush */ 5386 5387 if (mb->mbxCommand == MBX_CONFIG_PORT) { 5388 /* switch over to host mailbox */ 5389 psli->sli_flag |= LPFC_SLI_ACTIVE; 5390 } 5391 } 5392 5393 wmb(); 5394 5395 switch (flag) { 5396 case MBX_NOWAIT: 5397 /* Set up reference to mailbox command */ 5398 psli->mbox_active = pmbox; 5399 /* Interrupt board to do it */ 5400 writel(CA_MBATT, phba->CAregaddr); 5401 readl(phba->CAregaddr); /* flush */ 5402 /* Don't wait for it to finish, just return */ 5403 break; 5404 5405 case MBX_POLL: 5406 /* Set up null reference to mailbox command */ 5407 psli->mbox_active = NULL; 5408 /* Interrupt board to do it */ 5409 writel(CA_MBATT, phba->CAregaddr); 5410 readl(phba->CAregaddr); /* flush */ 5411 5412 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 5413 /* First read mbox status word */ 5414 word0 = *((uint32_t *)phba->mbox); 5415 word0 = le32_to_cpu(word0); 5416 } else { 5417 /* First read mbox status word */ 5418 word0 = readl(phba->MBslimaddr); 5419 } 5420 5421 /* Read the HBA Host Attention Register */ 5422 ha_copy = readl(phba->HAregaddr); 5423 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 5424 mb->mbxCommand) * 5425 1000) + jiffies; 5426 i = 0; 5427 /* Wait for command to complete */ 5428 while (((word0 & OWN_CHIP) == OWN_CHIP) || 5429 (!(ha_copy & HA_MBATT) && 5430 (phba->link_state > LPFC_WARM_START))) { 5431 if (time_after(jiffies, timeout)) { 5432 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5433 spin_unlock_irqrestore(&phba->hbalock, 5434 drvr_flag); 5435 goto out_not_finished; 5436 } 5437 5438 /* Check if we took a mbox interrupt while we were 5439 polling */ 5440 if (((word0 & OWN_CHIP) != OWN_CHIP) 5441 && (evtctr != psli->slistat.mbox_event)) 5442 break; 5443 5444 if (i++ > 10) { 5445 spin_unlock_irqrestore(&phba->hbalock, 5446 drvr_flag); 5447 msleep(1); 5448 spin_lock_irqsave(&phba->hbalock, drvr_flag); 5449 } 5450 5451 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 5452 /* First copy command data */ 5453 word0 = *((uint32_t *)phba->mbox); 5454 word0 = le32_to_cpu(word0); 5455 if (mb->mbxCommand == MBX_CONFIG_PORT) { 5456 MAILBOX_t *slimmb; 5457 uint32_t slimword0; 5458 /* Check real SLIM for any errors */ 5459 slimword0 = readl(phba->MBslimaddr); 5460 slimmb = (MAILBOX_t *) & slimword0; 5461 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 5462 && slimmb->mbxStatus) { 5463 psli->sli_flag &= 5464 ~LPFC_SLI_ACTIVE; 5465 word0 = slimword0; 5466 } 5467 } 5468 } else { 5469 /* First copy command data */ 5470 word0 = readl(phba->MBslimaddr); 5471 } 5472 /* Read the HBA Host Attention Register */ 5473 ha_copy = readl(phba->HAregaddr); 5474 } 5475 5476 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 5477 /* copy results back to user */ 5478 lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); 5479 /* Copy the mailbox extension data */ 5480 if (pmbox->out_ext_byte_len && pmbox->context2) { 5481 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 5482 pmbox->context2, 5483 pmbox->out_ext_byte_len); 5484 } 5485 } else { 5486 /* First copy command data */ 5487 lpfc_memcpy_from_slim(mb, phba->MBslimaddr, 5488 MAILBOX_CMD_SIZE); 5489 /* Copy the mailbox extension data */ 5490 if (pmbox->out_ext_byte_len && pmbox->context2) { 5491 lpfc_memcpy_from_slim(pmbox->context2, 5492 phba->MBslimaddr + 5493 MAILBOX_HBA_EXT_OFFSET, 5494 pmbox->out_ext_byte_len); 5495 } 5496 } 5497 5498 writel(HA_MBATT, phba->HAregaddr); 5499 readl(phba->HAregaddr); /* flush */ 5500 5501 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5502 status = mb->mbxStatus; 5503 } 5504 5505 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 5506 return status; 5507 5508 out_not_finished: 5509 if (processing_queue) { 5510 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 5511 lpfc_mbox_cmpl_put(phba, pmbox); 5512 } 5513 return MBX_NOT_FINISHED; 5514 } 5515 5516 /** 5517 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 5518 * @phba: Pointer to HBA context object. 5519 * 5520 * The function blocks the posting of SLI4 asynchronous mailbox commands from 5521 * the driver internal pending mailbox queue. It will then try to wait out the 5522 * possible outstanding mailbox command before return. 5523 * 5524 * Returns: 5525 * 0 - the outstanding mailbox command completed; otherwise, the wait for 5526 * the outstanding mailbox command timed out. 5527 **/ 5528 static int 5529 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 5530 { 5531 struct lpfc_sli *psli = &phba->sli; 5532 uint8_t actcmd = MBX_HEARTBEAT; 5533 int rc = 0; 5534 unsigned long timeout; 5535 5536 /* Mark the asynchronous mailbox command posting as blocked */ 5537 spin_lock_irq(&phba->hbalock); 5538 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 5539 if (phba->sli.mbox_active) 5540 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 5541 spin_unlock_irq(&phba->hbalock); 5542 /* Determine how long we might wait for the active mailbox 5543 * command to be gracefully completed by firmware. 5544 */ 5545 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + 5546 jiffies; 5547 /* Wait for the outstnading mailbox command to complete */ 5548 while (phba->sli.mbox_active) { 5549 /* Check active mailbox complete status every 2ms */ 5550 msleep(2); 5551 if (time_after(jiffies, timeout)) { 5552 /* Timeout, marked the outstanding cmd not complete */ 5553 rc = 1; 5554 break; 5555 } 5556 } 5557 5558 /* Can not cleanly block async mailbox command, fails it */ 5559 if (rc) { 5560 spin_lock_irq(&phba->hbalock); 5561 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 5562 spin_unlock_irq(&phba->hbalock); 5563 } 5564 return rc; 5565 } 5566 5567 /** 5568 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 5569 * @phba: Pointer to HBA context object. 5570 * 5571 * The function unblocks and resume posting of SLI4 asynchronous mailbox 5572 * commands from the driver internal pending mailbox queue. It makes sure 5573 * that there is no outstanding mailbox command before resuming posting 5574 * asynchronous mailbox commands. If, for any reason, there is outstanding 5575 * mailbox command, it will try to wait it out before resuming asynchronous 5576 * mailbox command posting. 5577 **/ 5578 static void 5579 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 5580 { 5581 struct lpfc_sli *psli = &phba->sli; 5582 5583 spin_lock_irq(&phba->hbalock); 5584 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 5585 /* Asynchronous mailbox posting is not blocked, do nothing */ 5586 spin_unlock_irq(&phba->hbalock); 5587 return; 5588 } 5589 5590 /* Outstanding synchronous mailbox command is guaranteed to be done, 5591 * successful or timeout, after timing-out the outstanding mailbox 5592 * command shall always be removed, so just unblock posting async 5593 * mailbox command and resume 5594 */ 5595 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 5596 spin_unlock_irq(&phba->hbalock); 5597 5598 /* wake up worker thread to post asynchronlous mailbox command */ 5599 lpfc_worker_wake_up(phba); 5600 } 5601 5602 /** 5603 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 5604 * @phba: Pointer to HBA context object. 5605 * @mboxq: Pointer to mailbox object. 5606 * 5607 * The function posts a mailbox to the port. The mailbox is expected 5608 * to be comletely filled in and ready for the port to operate on it. 5609 * This routine executes a synchronous completion operation on the 5610 * mailbox by polling for its completion. 5611 * 5612 * The caller must not be holding any locks when calling this routine. 5613 * 5614 * Returns: 5615 * MBX_SUCCESS - mailbox posted successfully 5616 * Any of the MBX error values. 5617 **/ 5618 static int 5619 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 5620 { 5621 int rc = MBX_SUCCESS; 5622 unsigned long iflag; 5623 uint32_t db_ready; 5624 uint32_t mcqe_status; 5625 uint32_t mbx_cmnd; 5626 unsigned long timeout; 5627 struct lpfc_sli *psli = &phba->sli; 5628 struct lpfc_mqe *mb = &mboxq->u.mqe; 5629 struct lpfc_bmbx_create *mbox_rgn; 5630 struct dma_address *dma_address; 5631 struct lpfc_register bmbx_reg; 5632 5633 /* 5634 * Only one mailbox can be active to the bootstrap mailbox region 5635 * at a time and there is no queueing provided. 5636 */ 5637 spin_lock_irqsave(&phba->hbalock, iflag); 5638 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 5639 spin_unlock_irqrestore(&phba->hbalock, iflag); 5640 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5641 "(%d):2532 Mailbox command x%x (x%x) " 5642 "cannot issue Data: x%x x%x\n", 5643 mboxq->vport ? mboxq->vport->vpi : 0, 5644 mboxq->u.mb.mbxCommand, 5645 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5646 psli->sli_flag, MBX_POLL); 5647 return MBXERR_ERROR; 5648 } 5649 /* The server grabs the token and owns it until release */ 5650 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 5651 phba->sli.mbox_active = mboxq; 5652 spin_unlock_irqrestore(&phba->hbalock, iflag); 5653 5654 /* 5655 * Initialize the bootstrap memory region to avoid stale data areas 5656 * in the mailbox post. Then copy the caller's mailbox contents to 5657 * the bmbx mailbox region. 5658 */ 5659 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 5660 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 5661 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 5662 sizeof(struct lpfc_mqe)); 5663 5664 /* Post the high mailbox dma address to the port and wait for ready. */ 5665 dma_address = &phba->sli4_hba.bmbx.dma_address; 5666 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 5667 5668 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) 5669 * 1000) + jiffies; 5670 do { 5671 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 5672 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 5673 if (!db_ready) 5674 msleep(2); 5675 5676 if (time_after(jiffies, timeout)) { 5677 rc = MBXERR_ERROR; 5678 goto exit; 5679 } 5680 } while (!db_ready); 5681 5682 /* Post the low mailbox dma address to the port. */ 5683 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 5684 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) 5685 * 1000) + jiffies; 5686 do { 5687 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 5688 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 5689 if (!db_ready) 5690 msleep(2); 5691 5692 if (time_after(jiffies, timeout)) { 5693 rc = MBXERR_ERROR; 5694 goto exit; 5695 } 5696 } while (!db_ready); 5697 5698 /* 5699 * Read the CQ to ensure the mailbox has completed. 5700 * If so, update the mailbox status so that the upper layers 5701 * can complete the request normally. 5702 */ 5703 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 5704 sizeof(struct lpfc_mqe)); 5705 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 5706 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 5707 sizeof(struct lpfc_mcqe)); 5708 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 5709 5710 /* Prefix the mailbox status with range x4000 to note SLI4 status. */ 5711 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 5712 bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status); 5713 rc = MBXERR_ERROR; 5714 } else 5715 lpfc_sli4_swap_str(phba, mboxq); 5716 5717 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5718 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x " 5719 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 5720 " x%x x%x CQ: x%x x%x x%x x%x\n", 5721 mboxq->vport ? mboxq->vport->vpi : 0, 5722 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq), 5723 bf_get(lpfc_mqe_status, mb), 5724 mb->un.mb_words[0], mb->un.mb_words[1], 5725 mb->un.mb_words[2], mb->un.mb_words[3], 5726 mb->un.mb_words[4], mb->un.mb_words[5], 5727 mb->un.mb_words[6], mb->un.mb_words[7], 5728 mb->un.mb_words[8], mb->un.mb_words[9], 5729 mb->un.mb_words[10], mb->un.mb_words[11], 5730 mb->un.mb_words[12], mboxq->mcqe.word0, 5731 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5732 mboxq->mcqe.trailer); 5733 exit: 5734 /* We are holding the token, no needed for lock when release */ 5735 spin_lock_irqsave(&phba->hbalock, iflag); 5736 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5737 phba->sli.mbox_active = NULL; 5738 spin_unlock_irqrestore(&phba->hbalock, iflag); 5739 return rc; 5740 } 5741 5742 /** 5743 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 5744 * @phba: Pointer to HBA context object. 5745 * @pmbox: Pointer to mailbox object. 5746 * @flag: Flag indicating how the mailbox need to be processed. 5747 * 5748 * This function is called by discovery code and HBA management code to submit 5749 * a mailbox command to firmware with SLI-4 interface spec. 5750 * 5751 * Return codes the caller owns the mailbox command after the return of the 5752 * function. 5753 **/ 5754 static int 5755 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5756 uint32_t flag) 5757 { 5758 struct lpfc_sli *psli = &phba->sli; 5759 unsigned long iflags; 5760 int rc; 5761 5762 rc = lpfc_mbox_dev_check(phba); 5763 if (unlikely(rc)) { 5764 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5765 "(%d):2544 Mailbox command x%x (x%x) " 5766 "cannot issue Data: x%x x%x\n", 5767 mboxq->vport ? mboxq->vport->vpi : 0, 5768 mboxq->u.mb.mbxCommand, 5769 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5770 psli->sli_flag, flag); 5771 goto out_not_finished; 5772 } 5773 5774 /* Detect polling mode and jump to a handler */ 5775 if (!phba->sli4_hba.intr_enable) { 5776 if (flag == MBX_POLL) 5777 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 5778 else 5779 rc = -EIO; 5780 if (rc != MBX_SUCCESS) 5781 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5782 "(%d):2541 Mailbox command x%x " 5783 "(x%x) cannot issue Data: x%x x%x\n", 5784 mboxq->vport ? mboxq->vport->vpi : 0, 5785 mboxq->u.mb.mbxCommand, 5786 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5787 psli->sli_flag, flag); 5788 return rc; 5789 } else if (flag == MBX_POLL) { 5790 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 5791 "(%d):2542 Try to issue mailbox command " 5792 "x%x (x%x) synchronously ahead of async" 5793 "mailbox command queue: x%x x%x\n", 5794 mboxq->vport ? mboxq->vport->vpi : 0, 5795 mboxq->u.mb.mbxCommand, 5796 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5797 psli->sli_flag, flag); 5798 /* Try to block the asynchronous mailbox posting */ 5799 rc = lpfc_sli4_async_mbox_block(phba); 5800 if (!rc) { 5801 /* Successfully blocked, now issue sync mbox cmd */ 5802 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 5803 if (rc != MBX_SUCCESS) 5804 lpfc_printf_log(phba, KERN_ERR, 5805 LOG_MBOX | LOG_SLI, 5806 "(%d):2597 Mailbox command " 5807 "x%x (x%x) cannot issue " 5808 "Data: x%x x%x\n", 5809 mboxq->vport ? 5810 mboxq->vport->vpi : 0, 5811 mboxq->u.mb.mbxCommand, 5812 lpfc_sli4_mbox_opcode_get(phba, 5813 mboxq), 5814 psli->sli_flag, flag); 5815 /* Unblock the async mailbox posting afterward */ 5816 lpfc_sli4_async_mbox_unblock(phba); 5817 } 5818 return rc; 5819 } 5820 5821 /* Now, interrupt mode asynchrous mailbox command */ 5822 rc = lpfc_mbox_cmd_check(phba, mboxq); 5823 if (rc) { 5824 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5825 "(%d):2543 Mailbox command x%x (x%x) " 5826 "cannot issue Data: x%x x%x\n", 5827 mboxq->vport ? mboxq->vport->vpi : 0, 5828 mboxq->u.mb.mbxCommand, 5829 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5830 psli->sli_flag, flag); 5831 goto out_not_finished; 5832 } 5833 5834 /* Put the mailbox command to the driver internal FIFO */ 5835 psli->slistat.mbox_busy++; 5836 spin_lock_irqsave(&phba->hbalock, iflags); 5837 lpfc_mbox_put(phba, mboxq); 5838 spin_unlock_irqrestore(&phba->hbalock, iflags); 5839 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5840 "(%d):0354 Mbox cmd issue - Enqueue Data: " 5841 "x%x (x%x) x%x x%x x%x\n", 5842 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 5843 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5844 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5845 phba->pport->port_state, 5846 psli->sli_flag, MBX_NOWAIT); 5847 /* Wake up worker thread to transport mailbox command from head */ 5848 lpfc_worker_wake_up(phba); 5849 5850 return MBX_BUSY; 5851 5852 out_not_finished: 5853 return MBX_NOT_FINISHED; 5854 } 5855 5856 /** 5857 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 5858 * @phba: Pointer to HBA context object. 5859 * 5860 * This function is called by worker thread to send a mailbox command to 5861 * SLI4 HBA firmware. 5862 * 5863 **/ 5864 int 5865 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 5866 { 5867 struct lpfc_sli *psli = &phba->sli; 5868 LPFC_MBOXQ_t *mboxq; 5869 int rc = MBX_SUCCESS; 5870 unsigned long iflags; 5871 struct lpfc_mqe *mqe; 5872 uint32_t mbx_cmnd; 5873 5874 /* Check interrupt mode before post async mailbox command */ 5875 if (unlikely(!phba->sli4_hba.intr_enable)) 5876 return MBX_NOT_FINISHED; 5877 5878 /* Check for mailbox command service token */ 5879 spin_lock_irqsave(&phba->hbalock, iflags); 5880 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 5881 spin_unlock_irqrestore(&phba->hbalock, iflags); 5882 return MBX_NOT_FINISHED; 5883 } 5884 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 5885 spin_unlock_irqrestore(&phba->hbalock, iflags); 5886 return MBX_NOT_FINISHED; 5887 } 5888 if (unlikely(phba->sli.mbox_active)) { 5889 spin_unlock_irqrestore(&phba->hbalock, iflags); 5890 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5891 "0384 There is pending active mailbox cmd\n"); 5892 return MBX_NOT_FINISHED; 5893 } 5894 /* Take the mailbox command service token */ 5895 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 5896 5897 /* Get the next mailbox command from head of queue */ 5898 mboxq = lpfc_mbox_get(phba); 5899 5900 /* If no more mailbox command waiting for post, we're done */ 5901 if (!mboxq) { 5902 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5903 spin_unlock_irqrestore(&phba->hbalock, iflags); 5904 return MBX_SUCCESS; 5905 } 5906 phba->sli.mbox_active = mboxq; 5907 spin_unlock_irqrestore(&phba->hbalock, iflags); 5908 5909 /* Check device readiness for posting mailbox command */ 5910 rc = lpfc_mbox_dev_check(phba); 5911 if (unlikely(rc)) 5912 /* Driver clean routine will clean up pending mailbox */ 5913 goto out_not_finished; 5914 5915 /* Prepare the mbox command to be posted */ 5916 mqe = &mboxq->u.mqe; 5917 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 5918 5919 /* Start timer for the mbox_tmo and log some mailbox post messages */ 5920 mod_timer(&psli->mbox_tmo, (jiffies + 5921 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd)))); 5922 5923 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5924 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: " 5925 "x%x x%x\n", 5926 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 5927 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5928 phba->pport->port_state, psli->sli_flag); 5929 5930 if (mbx_cmnd != MBX_HEARTBEAT) { 5931 if (mboxq->vport) { 5932 lpfc_debugfs_disc_trc(mboxq->vport, 5933 LPFC_DISC_TRC_MBOX_VPORT, 5934 "MBOX Send vport: cmd:x%x mb:x%x x%x", 5935 mbx_cmnd, mqe->un.mb_words[0], 5936 mqe->un.mb_words[1]); 5937 } else { 5938 lpfc_debugfs_disc_trc(phba->pport, 5939 LPFC_DISC_TRC_MBOX, 5940 "MBOX Send: cmd:x%x mb:x%x x%x", 5941 mbx_cmnd, mqe->un.mb_words[0], 5942 mqe->un.mb_words[1]); 5943 } 5944 } 5945 psli->slistat.mbox_cmd++; 5946 5947 /* Post the mailbox command to the port */ 5948 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 5949 if (rc != MBX_SUCCESS) { 5950 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 5951 "(%d):2533 Mailbox command x%x (x%x) " 5952 "cannot issue Data: x%x x%x\n", 5953 mboxq->vport ? mboxq->vport->vpi : 0, 5954 mboxq->u.mb.mbxCommand, 5955 lpfc_sli4_mbox_opcode_get(phba, mboxq), 5956 psli->sli_flag, MBX_NOWAIT); 5957 goto out_not_finished; 5958 } 5959 5960 return rc; 5961 5962 out_not_finished: 5963 spin_lock_irqsave(&phba->hbalock, iflags); 5964 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 5965 __lpfc_mbox_cmpl_put(phba, mboxq); 5966 /* Release the token */ 5967 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 5968 phba->sli.mbox_active = NULL; 5969 spin_unlock_irqrestore(&phba->hbalock, iflags); 5970 5971 return MBX_NOT_FINISHED; 5972 } 5973 5974 /** 5975 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 5976 * @phba: Pointer to HBA context object. 5977 * @pmbox: Pointer to mailbox object. 5978 * @flag: Flag indicating how the mailbox need to be processed. 5979 * 5980 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 5981 * the API jump table function pointer from the lpfc_hba struct. 5982 * 5983 * Return codes the caller owns the mailbox command after the return of the 5984 * function. 5985 **/ 5986 int 5987 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 5988 { 5989 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 5990 } 5991 5992 /** 5993 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table 5994 * @phba: The hba struct for which this call is being executed. 5995 * @dev_grp: The HBA PCI-Device group number. 5996 * 5997 * This routine sets up the mbox interface API function jump table in @phba 5998 * struct. 5999 * Returns: 0 - success, -ENODEV - failure. 6000 **/ 6001 int 6002 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 6003 { 6004 6005 switch (dev_grp) { 6006 case LPFC_PCI_DEV_LP: 6007 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 6008 phba->lpfc_sli_handle_slow_ring_event = 6009 lpfc_sli_handle_slow_ring_event_s3; 6010 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 6011 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 6012 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 6013 break; 6014 case LPFC_PCI_DEV_OC: 6015 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 6016 phba->lpfc_sli_handle_slow_ring_event = 6017 lpfc_sli_handle_slow_ring_event_s4; 6018 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 6019 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 6020 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 6021 break; 6022 default: 6023 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6024 "1420 Invalid HBA PCI-device group: 0x%x\n", 6025 dev_grp); 6026 return -ENODEV; 6027 break; 6028 } 6029 return 0; 6030 } 6031 6032 /** 6033 * __lpfc_sli_ringtx_put - Add an iocb to the txq 6034 * @phba: Pointer to HBA context object. 6035 * @pring: Pointer to driver SLI ring object. 6036 * @piocb: Pointer to address of newly added command iocb. 6037 * 6038 * This function is called with hbalock held to add a command 6039 * iocb to the txq when SLI layer cannot submit the command iocb 6040 * to the ring. 6041 **/ 6042 void 6043 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6044 struct lpfc_iocbq *piocb) 6045 { 6046 /* Insert the caller's iocb in the txq tail for later processing. */ 6047 list_add_tail(&piocb->list, &pring->txq); 6048 pring->txq_cnt++; 6049 } 6050 6051 /** 6052 * lpfc_sli_next_iocb - Get the next iocb in the txq 6053 * @phba: Pointer to HBA context object. 6054 * @pring: Pointer to driver SLI ring object. 6055 * @piocb: Pointer to address of newly added command iocb. 6056 * 6057 * This function is called with hbalock held before a new 6058 * iocb is submitted to the firmware. This function checks 6059 * txq to flush the iocbs in txq to Firmware before 6060 * submitting new iocbs to the Firmware. 6061 * If there are iocbs in the txq which need to be submitted 6062 * to firmware, lpfc_sli_next_iocb returns the first element 6063 * of the txq after dequeuing it from txq. 6064 * If there is no iocb in the txq then the function will return 6065 * *piocb and *piocb is set to NULL. Caller needs to check 6066 * *piocb to find if there are more commands in the txq. 6067 **/ 6068 static struct lpfc_iocbq * 6069 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 6070 struct lpfc_iocbq **piocb) 6071 { 6072 struct lpfc_iocbq * nextiocb; 6073 6074 nextiocb = lpfc_sli_ringtx_get(phba, pring); 6075 if (!nextiocb) { 6076 nextiocb = *piocb; 6077 *piocb = NULL; 6078 } 6079 6080 return nextiocb; 6081 } 6082 6083 /** 6084 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 6085 * @phba: Pointer to HBA context object. 6086 * @ring_number: SLI ring number to issue iocb on. 6087 * @piocb: Pointer to command iocb. 6088 * @flag: Flag indicating if this command can be put into txq. 6089 * 6090 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 6091 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 6092 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 6093 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 6094 * this function allows only iocbs for posting buffers. This function finds 6095 * next available slot in the command ring and posts the command to the 6096 * available slot and writes the port attention register to request HBA start 6097 * processing new iocb. If there is no slot available in the ring and 6098 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 6099 * the function returns IOCB_BUSY. 6100 * 6101 * This function is called with hbalock held. The function will return success 6102 * after it successfully submit the iocb to firmware or after adding to the 6103 * txq. 6104 **/ 6105 static int 6106 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 6107 struct lpfc_iocbq *piocb, uint32_t flag) 6108 { 6109 struct lpfc_iocbq *nextiocb; 6110 IOCB_t *iocb; 6111 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 6112 6113 if (piocb->iocb_cmpl && (!piocb->vport) && 6114 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 6115 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 6116 lpfc_printf_log(phba, KERN_ERR, 6117 LOG_SLI | LOG_VPORT, 6118 "1807 IOCB x%x failed. No vport\n", 6119 piocb->iocb.ulpCommand); 6120 dump_stack(); 6121 return IOCB_ERROR; 6122 } 6123 6124 6125 /* If the PCI channel is in offline state, do not post iocbs. */ 6126 if (unlikely(pci_channel_offline(phba->pcidev))) 6127 return IOCB_ERROR; 6128 6129 /* If HBA has a deferred error attention, fail the iocb. */ 6130 if (unlikely(phba->hba_flag & DEFER_ERATT)) 6131 return IOCB_ERROR; 6132 6133 /* 6134 * We should never get an IOCB if we are in a < LINK_DOWN state 6135 */ 6136 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 6137 return IOCB_ERROR; 6138 6139 /* 6140 * Check to see if we are blocking IOCB processing because of a 6141 * outstanding event. 6142 */ 6143 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 6144 goto iocb_busy; 6145 6146 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 6147 /* 6148 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 6149 * can be issued if the link is not up. 6150 */ 6151 switch (piocb->iocb.ulpCommand) { 6152 case CMD_GEN_REQUEST64_CR: 6153 case CMD_GEN_REQUEST64_CX: 6154 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 6155 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 6156 FC_RCTL_DD_UNSOL_CMD) || 6157 (piocb->iocb.un.genreq64.w5.hcsw.Type != 6158 MENLO_TRANSPORT_TYPE)) 6159 6160 goto iocb_busy; 6161 break; 6162 case CMD_QUE_RING_BUF_CN: 6163 case CMD_QUE_RING_BUF64_CN: 6164 /* 6165 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 6166 * completion, iocb_cmpl MUST be 0. 6167 */ 6168 if (piocb->iocb_cmpl) 6169 piocb->iocb_cmpl = NULL; 6170 /*FALLTHROUGH*/ 6171 case CMD_CREATE_XRI_CR: 6172 case CMD_CLOSE_XRI_CN: 6173 case CMD_CLOSE_XRI_CX: 6174 break; 6175 default: 6176 goto iocb_busy; 6177 } 6178 6179 /* 6180 * For FCP commands, we must be in a state where we can process link 6181 * attention events. 6182 */ 6183 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 6184 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 6185 goto iocb_busy; 6186 } 6187 6188 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 6189 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 6190 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 6191 6192 if (iocb) 6193 lpfc_sli_update_ring(phba, pring); 6194 else 6195 lpfc_sli_update_full_ring(phba, pring); 6196 6197 if (!piocb) 6198 return IOCB_SUCCESS; 6199 6200 goto out_busy; 6201 6202 iocb_busy: 6203 pring->stats.iocb_cmd_delay++; 6204 6205 out_busy: 6206 6207 if (!(flag & SLI_IOCB_RET_IOCB)) { 6208 __lpfc_sli_ringtx_put(phba, pring, piocb); 6209 return IOCB_SUCCESS; 6210 } 6211 6212 return IOCB_BUSY; 6213 } 6214 6215 /** 6216 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 6217 * @phba: Pointer to HBA context object. 6218 * @piocb: Pointer to command iocb. 6219 * @sglq: Pointer to the scatter gather queue object. 6220 * 6221 * This routine converts the bpl or bde that is in the IOCB 6222 * to a sgl list for the sli4 hardware. The physical address 6223 * of the bpl/bde is converted back to a virtual address. 6224 * If the IOCB contains a BPL then the list of BDE's is 6225 * converted to sli4_sge's. If the IOCB contains a single 6226 * BDE then it is converted to a single sli_sge. 6227 * The IOCB is still in cpu endianess so the contents of 6228 * the bpl can be used without byte swapping. 6229 * 6230 * Returns valid XRI = Success, NO_XRI = Failure. 6231 **/ 6232 static uint16_t 6233 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 6234 struct lpfc_sglq *sglq) 6235 { 6236 uint16_t xritag = NO_XRI; 6237 struct ulp_bde64 *bpl = NULL; 6238 struct ulp_bde64 bde; 6239 struct sli4_sge *sgl = NULL; 6240 IOCB_t *icmd; 6241 int numBdes = 0; 6242 int i = 0; 6243 uint32_t offset = 0; /* accumulated offset in the sg request list */ 6244 int inbound = 0; /* number of sg reply entries inbound from firmware */ 6245 6246 if (!piocbq || !sglq) 6247 return xritag; 6248 6249 sgl = (struct sli4_sge *)sglq->sgl; 6250 icmd = &piocbq->iocb; 6251 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 6252 numBdes = icmd->un.genreq64.bdl.bdeSize / 6253 sizeof(struct ulp_bde64); 6254 /* The addrHigh and addrLow fields within the IOCB 6255 * have not been byteswapped yet so there is no 6256 * need to swap them back. 6257 */ 6258 bpl = (struct ulp_bde64 *) 6259 ((struct lpfc_dmabuf *)piocbq->context3)->virt; 6260 6261 if (!bpl) 6262 return xritag; 6263 6264 for (i = 0; i < numBdes; i++) { 6265 /* Should already be byte swapped. */ 6266 sgl->addr_hi = bpl->addrHigh; 6267 sgl->addr_lo = bpl->addrLow; 6268 6269 if ((i+1) == numBdes) 6270 bf_set(lpfc_sli4_sge_last, sgl, 1); 6271 else 6272 bf_set(lpfc_sli4_sge_last, sgl, 0); 6273 sgl->word2 = cpu_to_le32(sgl->word2); 6274 /* swap the size field back to the cpu so we 6275 * can assign it to the sgl. 6276 */ 6277 bde.tus.w = le32_to_cpu(bpl->tus.w); 6278 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 6279 /* The offsets in the sgl need to be accumulated 6280 * separately for the request and reply lists. 6281 * The request is always first, the reply follows. 6282 */ 6283 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 6284 /* add up the reply sg entries */ 6285 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 6286 inbound++; 6287 /* first inbound? reset the offset */ 6288 if (inbound == 1) 6289 offset = 0; 6290 bf_set(lpfc_sli4_sge_offset, sgl, offset); 6291 offset += bde.tus.f.bdeSize; 6292 } 6293 bpl++; 6294 sgl++; 6295 } 6296 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 6297 /* The addrHigh and addrLow fields of the BDE have not 6298 * been byteswapped yet so they need to be swapped 6299 * before putting them in the sgl. 6300 */ 6301 sgl->addr_hi = 6302 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 6303 sgl->addr_lo = 6304 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 6305 bf_set(lpfc_sli4_sge_last, sgl, 1); 6306 sgl->word2 = cpu_to_le32(sgl->word2); 6307 sgl->sge_len = 6308 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 6309 } 6310 return sglq->sli4_xritag; 6311 } 6312 6313 /** 6314 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 6315 * @phba: Pointer to HBA context object. 6316 * 6317 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index 6318 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 6319 * held. 6320 * 6321 * Return: index into SLI4 fast-path FCP queue index. 6322 **/ 6323 static uint32_t 6324 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 6325 { 6326 ++phba->fcp_qidx; 6327 if (phba->fcp_qidx >= phba->cfg_fcp_wq_count) 6328 phba->fcp_qidx = 0; 6329 6330 return phba->fcp_qidx; 6331 } 6332 6333 /** 6334 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 6335 * @phba: Pointer to HBA context object. 6336 * @piocb: Pointer to command iocb. 6337 * @wqe: Pointer to the work queue entry. 6338 * 6339 * This routine converts the iocb command to its Work Queue Entry 6340 * equivalent. The wqe pointer should not have any fields set when 6341 * this routine is called because it will memcpy over them. 6342 * This routine does not set the CQ_ID or the WQEC bits in the 6343 * wqe. 6344 * 6345 * Returns: 0 = Success, IOCB_ERROR = Failure. 6346 **/ 6347 static int 6348 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 6349 union lpfc_wqe *wqe) 6350 { 6351 uint32_t xmit_len = 0, total_len = 0; 6352 uint8_t ct = 0; 6353 uint32_t fip; 6354 uint32_t abort_tag; 6355 uint8_t command_type = ELS_COMMAND_NON_FIP; 6356 uint8_t cmnd; 6357 uint16_t xritag; 6358 uint16_t abrt_iotag; 6359 struct lpfc_iocbq *abrtiocbq; 6360 struct ulp_bde64 *bpl = NULL; 6361 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 6362 int numBdes, i; 6363 struct ulp_bde64 bde; 6364 6365 fip = phba->hba_flag & HBA_FIP_SUPPORT; 6366 /* The fcp commands will set command type */ 6367 if (iocbq->iocb_flag & LPFC_IO_FCP) 6368 command_type = FCP_COMMAND; 6369 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 6370 command_type = ELS_COMMAND_FIP; 6371 else 6372 command_type = ELS_COMMAND_NON_FIP; 6373 6374 /* Some of the fields are in the right position already */ 6375 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 6376 abort_tag = (uint32_t) iocbq->iotag; 6377 xritag = iocbq->sli4_xritag; 6378 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 6379 /* words0-2 bpl convert bde */ 6380 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 6381 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 6382 sizeof(struct ulp_bde64); 6383 bpl = (struct ulp_bde64 *) 6384 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 6385 if (!bpl) 6386 return IOCB_ERROR; 6387 6388 /* Should already be byte swapped. */ 6389 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 6390 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 6391 /* swap the size field back to the cpu so we 6392 * can assign it to the sgl. 6393 */ 6394 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 6395 xmit_len = wqe->generic.bde.tus.f.bdeSize; 6396 total_len = 0; 6397 for (i = 0; i < numBdes; i++) { 6398 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 6399 total_len += bde.tus.f.bdeSize; 6400 } 6401 } else 6402 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 6403 6404 iocbq->iocb.ulpIoTag = iocbq->iotag; 6405 cmnd = iocbq->iocb.ulpCommand; 6406 6407 switch (iocbq->iocb.ulpCommand) { 6408 case CMD_ELS_REQUEST64_CR: 6409 if (!iocbq->iocb.ulpLe) { 6410 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6411 "2007 Only Limited Edition cmd Format" 6412 " supported 0x%x\n", 6413 iocbq->iocb.ulpCommand); 6414 return IOCB_ERROR; 6415 } 6416 wqe->els_req.payload_len = xmit_len; 6417 /* Els_reguest64 has a TMO */ 6418 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 6419 iocbq->iocb.ulpTimeout); 6420 /* Need a VF for word 4 set the vf bit*/ 6421 bf_set(els_req64_vf, &wqe->els_req, 0); 6422 /* And a VFID for word 12 */ 6423 bf_set(els_req64_vfid, &wqe->els_req, 0); 6424 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 6425 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 6426 iocbq->iocb.ulpContext); 6427 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 6428 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 6429 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 6430 if (command_type == ELS_COMMAND_FIP) { 6431 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 6432 >> LPFC_FIP_ELS_ID_SHIFT); 6433 } 6434 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 6435 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 6436 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 6437 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 6438 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 6439 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 6440 break; 6441 case CMD_XMIT_SEQUENCE64_CX: 6442 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 6443 iocbq->iocb.un.ulpWord[3]); 6444 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 6445 iocbq->iocb.ulpContext); 6446 /* The entire sequence is transmitted for this IOCB */ 6447 xmit_len = total_len; 6448 cmnd = CMD_XMIT_SEQUENCE64_CR; 6449 case CMD_XMIT_SEQUENCE64_CR: 6450 /* word3 iocb=io_tag32 wqe=reserved */ 6451 wqe->xmit_sequence.rsvd3 = 0; 6452 /* word4 relative_offset memcpy */ 6453 /* word5 r_ctl/df_ctl memcpy */ 6454 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 6455 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 6456 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 6457 LPFC_WQE_IOD_WRITE); 6458 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 6459 LPFC_WQE_LENLOC_WORD12); 6460 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 6461 wqe->xmit_sequence.xmit_len = xmit_len; 6462 command_type = OTHER_COMMAND; 6463 break; 6464 case CMD_XMIT_BCAST64_CN: 6465 /* word3 iocb=iotag32 wqe=seq_payload_len */ 6466 wqe->xmit_bcast64.seq_payload_len = xmit_len; 6467 /* word4 iocb=rsvd wqe=rsvd */ 6468 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 6469 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 6470 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 6471 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6472 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 6473 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 6474 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 6475 LPFC_WQE_LENLOC_WORD3); 6476 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 6477 break; 6478 case CMD_FCP_IWRITE64_CR: 6479 command_type = FCP_COMMAND_DATA_OUT; 6480 /* word3 iocb=iotag wqe=payload_offset_len */ 6481 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 6482 wqe->fcp_iwrite.payload_offset_len = 6483 xmit_len + sizeof(struct fcp_rsp); 6484 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 6485 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 6486 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 6487 iocbq->iocb.ulpFCP2Rcvy); 6488 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 6489 /* Always open the exchange */ 6490 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); 6491 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 6492 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 6493 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 6494 LPFC_WQE_LENLOC_WORD4); 6495 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 6496 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 6497 break; 6498 case CMD_FCP_IREAD64_CR: 6499 /* word3 iocb=iotag wqe=payload_offset_len */ 6500 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 6501 wqe->fcp_iread.payload_offset_len = 6502 xmit_len + sizeof(struct fcp_rsp); 6503 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 6504 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 6505 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 6506 iocbq->iocb.ulpFCP2Rcvy); 6507 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 6508 /* Always open the exchange */ 6509 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 6510 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 6511 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 6512 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 6513 LPFC_WQE_LENLOC_WORD4); 6514 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 6515 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 6516 break; 6517 case CMD_FCP_ICMND64_CR: 6518 /* word3 iocb=IO_TAG wqe=reserved */ 6519 wqe->fcp_icmd.rsrvd3 = 0; 6520 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 6521 /* Always open the exchange */ 6522 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); 6523 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 6524 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 6525 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 6526 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 6527 LPFC_WQE_LENLOC_NONE); 6528 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 6529 break; 6530 case CMD_GEN_REQUEST64_CR: 6531 /* For this command calculate the xmit length of the 6532 * request bde. 6533 */ 6534 xmit_len = 0; 6535 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 6536 sizeof(struct ulp_bde64); 6537 for (i = 0; i < numBdes; i++) { 6538 if (bpl[i].tus.f.bdeFlags != BUFF_TYPE_BDE_64) 6539 break; 6540 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 6541 xmit_len += bde.tus.f.bdeSize; 6542 } 6543 /* word3 iocb=IO_TAG wqe=request_payload_len */ 6544 wqe->gen_req.request_payload_len = xmit_len; 6545 /* word4 iocb=parameter wqe=relative_offset memcpy */ 6546 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 6547 /* word6 context tag copied in memcpy */ 6548 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 6549 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 6550 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6551 "2015 Invalid CT %x command 0x%x\n", 6552 ct, iocbq->iocb.ulpCommand); 6553 return IOCB_ERROR; 6554 } 6555 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 6556 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 6557 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 6558 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 6559 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 6560 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 6561 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 6562 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 6563 command_type = OTHER_COMMAND; 6564 break; 6565 case CMD_XMIT_ELS_RSP64_CX: 6566 /* words0-2 BDE memcpy */ 6567 /* word3 iocb=iotag32 wqe=response_payload_len */ 6568 wqe->xmit_els_rsp.response_payload_len = xmit_len; 6569 /* word4 iocb=did wge=rsvd. */ 6570 wqe->xmit_els_rsp.rsvd4 = 0; 6571 /* word5 iocb=rsvd wge=did */ 6572 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 6573 iocbq->iocb.un.elsreq64.remoteID); 6574 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 6575 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6576 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 6577 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 6578 iocbq->iocb.ulpContext); 6579 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 6580 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 6581 iocbq->vport->vpi + phba->vpi_base); 6582 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 6583 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 6584 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 6585 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 6586 LPFC_WQE_LENLOC_WORD3); 6587 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 6588 command_type = OTHER_COMMAND; 6589 break; 6590 case CMD_CLOSE_XRI_CN: 6591 case CMD_ABORT_XRI_CN: 6592 case CMD_ABORT_XRI_CX: 6593 /* words 0-2 memcpy should be 0 rserved */ 6594 /* port will send abts */ 6595 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 6596 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 6597 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 6598 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 6599 } else 6600 fip = 0; 6601 6602 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 6603 /* 6604 * The link is down, or the command was ELS_FIP 6605 * so the fw does not need to send abts 6606 * on the wire. 6607 */ 6608 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 6609 else 6610 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 6611 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 6612 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 6613 wqe->abort_cmd.rsrvd5 = 0; 6614 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 6615 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 6616 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 6617 /* 6618 * The abort handler will send us CMD_ABORT_XRI_CN or 6619 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 6620 */ 6621 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 6622 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 6623 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 6624 LPFC_WQE_LENLOC_NONE); 6625 cmnd = CMD_ABORT_XRI_CX; 6626 command_type = OTHER_COMMAND; 6627 xritag = 0; 6628 break; 6629 case CMD_XMIT_BLS_RSP64_CX: 6630 /* As BLS ABTS-ACC WQE is very different from other WQEs, 6631 * we re-construct this WQE here based on information in 6632 * iocbq from scratch. 6633 */ 6634 memset(wqe, 0, sizeof(union lpfc_wqe)); 6635 /* OX_ID is invariable to who sent ABTS to CT exchange */ 6636 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 6637 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_acc)); 6638 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_acc) == 6639 LPFC_ABTS_UNSOL_INT) { 6640 /* ABTS sent by initiator to CT exchange, the 6641 * RX_ID field will be filled with the newly 6642 * allocated responder XRI. 6643 */ 6644 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 6645 iocbq->sli4_xritag); 6646 } else { 6647 /* ABTS sent by responder to CT exchange, the 6648 * RX_ID field will be filled with the responder 6649 * RX_ID from ABTS. 6650 */ 6651 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 6652 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_acc)); 6653 } 6654 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 6655 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 6656 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 6657 iocbq->iocb.ulpContext); 6658 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 6659 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 6660 LPFC_WQE_LENLOC_NONE); 6661 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 6662 command_type = OTHER_COMMAND; 6663 break; 6664 case CMD_XRI_ABORTED_CX: 6665 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 6666 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 6667 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 6668 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 6669 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 6670 default: 6671 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6672 "2014 Invalid command 0x%x\n", 6673 iocbq->iocb.ulpCommand); 6674 return IOCB_ERROR; 6675 break; 6676 } 6677 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 6678 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 6679 wqe->generic.wqe_com.abort_tag = abort_tag; 6680 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 6681 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 6682 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 6683 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 6684 return 0; 6685 } 6686 6687 /** 6688 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 6689 * @phba: Pointer to HBA context object. 6690 * @ring_number: SLI ring number to issue iocb on. 6691 * @piocb: Pointer to command iocb. 6692 * @flag: Flag indicating if this command can be put into txq. 6693 * 6694 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 6695 * an iocb command to an HBA with SLI-4 interface spec. 6696 * 6697 * This function is called with hbalock held. The function will return success 6698 * after it successfully submit the iocb to firmware or after adding to the 6699 * txq. 6700 **/ 6701 static int 6702 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 6703 struct lpfc_iocbq *piocb, uint32_t flag) 6704 { 6705 struct lpfc_sglq *sglq; 6706 union lpfc_wqe wqe; 6707 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 6708 6709 if (piocb->sli4_xritag == NO_XRI) { 6710 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 6711 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 6712 sglq = NULL; 6713 else { 6714 if (pring->txq_cnt) { 6715 if (!(flag & SLI_IOCB_RET_IOCB)) { 6716 __lpfc_sli_ringtx_put(phba, 6717 pring, piocb); 6718 return IOCB_SUCCESS; 6719 } else { 6720 return IOCB_BUSY; 6721 } 6722 } else { 6723 sglq = __lpfc_sli_get_sglq(phba, piocb); 6724 if (!sglq) { 6725 if (!(flag & SLI_IOCB_RET_IOCB)) { 6726 __lpfc_sli_ringtx_put(phba, 6727 pring, 6728 piocb); 6729 return IOCB_SUCCESS; 6730 } else 6731 return IOCB_BUSY; 6732 } 6733 } 6734 } 6735 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 6736 sglq = NULL; /* These IO's already have an XRI and 6737 * a mapped sgl. 6738 */ 6739 } else { 6740 /* This is a continuation of a commandi,(CX) so this 6741 * sglq is on the active list 6742 */ 6743 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); 6744 if (!sglq) 6745 return IOCB_ERROR; 6746 } 6747 6748 if (sglq) { 6749 piocb->sli4_xritag = sglq->sli4_xritag; 6750 6751 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 6752 return IOCB_ERROR; 6753 } 6754 6755 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 6756 return IOCB_ERROR; 6757 6758 if ((piocb->iocb_flag & LPFC_IO_FCP) || 6759 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 6760 /* 6761 * For FCP command IOCB, get a new WQ index to distribute 6762 * WQE across the WQsr. On the other hand, for abort IOCB, 6763 * it carries the same WQ index to the original command 6764 * IOCB. 6765 */ 6766 if (piocb->iocb_flag & LPFC_IO_FCP) 6767 piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba); 6768 if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], 6769 &wqe)) 6770 return IOCB_ERROR; 6771 } else { 6772 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 6773 return IOCB_ERROR; 6774 } 6775 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 6776 6777 return 0; 6778 } 6779 6780 /** 6781 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 6782 * 6783 * This routine wraps the actual lockless version for issusing IOCB function 6784 * pointer from the lpfc_hba struct. 6785 * 6786 * Return codes: 6787 * IOCB_ERROR - Error 6788 * IOCB_SUCCESS - Success 6789 * IOCB_BUSY - Busy 6790 **/ 6791 int 6792 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 6793 struct lpfc_iocbq *piocb, uint32_t flag) 6794 { 6795 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 6796 } 6797 6798 /** 6799 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table 6800 * @phba: The hba struct for which this call is being executed. 6801 * @dev_grp: The HBA PCI-Device group number. 6802 * 6803 * This routine sets up the SLI interface API function jump table in @phba 6804 * struct. 6805 * Returns: 0 - success, -ENODEV - failure. 6806 **/ 6807 int 6808 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 6809 { 6810 6811 switch (dev_grp) { 6812 case LPFC_PCI_DEV_LP: 6813 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 6814 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 6815 break; 6816 case LPFC_PCI_DEV_OC: 6817 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 6818 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 6819 break; 6820 default: 6821 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6822 "1419 Invalid HBA PCI-device group: 0x%x\n", 6823 dev_grp); 6824 return -ENODEV; 6825 break; 6826 } 6827 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 6828 return 0; 6829 } 6830 6831 /** 6832 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 6833 * @phba: Pointer to HBA context object. 6834 * @pring: Pointer to driver SLI ring object. 6835 * @piocb: Pointer to command iocb. 6836 * @flag: Flag indicating if this command can be put into txq. 6837 * 6838 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 6839 * function. This function gets the hbalock and calls 6840 * __lpfc_sli_issue_iocb function and will return the error returned 6841 * by __lpfc_sli_issue_iocb function. This wrapper is used by 6842 * functions which do not hold hbalock. 6843 **/ 6844 int 6845 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 6846 struct lpfc_iocbq *piocb, uint32_t flag) 6847 { 6848 unsigned long iflags; 6849 int rc; 6850 6851 spin_lock_irqsave(&phba->hbalock, iflags); 6852 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 6853 spin_unlock_irqrestore(&phba->hbalock, iflags); 6854 6855 return rc; 6856 } 6857 6858 /** 6859 * lpfc_extra_ring_setup - Extra ring setup function 6860 * @phba: Pointer to HBA context object. 6861 * 6862 * This function is called while driver attaches with the 6863 * HBA to setup the extra ring. The extra ring is used 6864 * only when driver needs to support target mode functionality 6865 * or IP over FC functionalities. 6866 * 6867 * This function is called with no lock held. 6868 **/ 6869 static int 6870 lpfc_extra_ring_setup( struct lpfc_hba *phba) 6871 { 6872 struct lpfc_sli *psli; 6873 struct lpfc_sli_ring *pring; 6874 6875 psli = &phba->sli; 6876 6877 /* Adjust cmd/rsp ring iocb entries more evenly */ 6878 6879 /* Take some away from the FCP ring */ 6880 pring = &psli->ring[psli->fcp_ring]; 6881 pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 6882 pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 6883 pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 6884 pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 6885 6886 /* and give them to the extra ring */ 6887 pring = &psli->ring[psli->extra_ring]; 6888 6889 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 6890 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 6891 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 6892 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 6893 6894 /* Setup default profile for this ring */ 6895 pring->iotag_max = 4096; 6896 pring->num_mask = 1; 6897 pring->prt[0].profile = 0; /* Mask 0 */ 6898 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 6899 pring->prt[0].type = phba->cfg_multi_ring_type; 6900 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 6901 return 0; 6902 } 6903 6904 /** 6905 * lpfc_sli_async_event_handler - ASYNC iocb handler function 6906 * @phba: Pointer to HBA context object. 6907 * @pring: Pointer to driver SLI ring object. 6908 * @iocbq: Pointer to iocb object. 6909 * 6910 * This function is called by the slow ring event handler 6911 * function when there is an ASYNC event iocb in the ring. 6912 * This function is called with no lock held. 6913 * Currently this function handles only temperature related 6914 * ASYNC events. The function decodes the temperature sensor 6915 * event message and posts events for the management applications. 6916 **/ 6917 static void 6918 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 6919 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 6920 { 6921 IOCB_t *icmd; 6922 uint16_t evt_code; 6923 uint16_t temp; 6924 struct temp_event temp_event_data; 6925 struct Scsi_Host *shost; 6926 uint32_t *iocb_w; 6927 6928 icmd = &iocbq->iocb; 6929 evt_code = icmd->un.asyncstat.evt_code; 6930 temp = icmd->ulpContext; 6931 6932 if ((evt_code != ASYNC_TEMP_WARN) && 6933 (evt_code != ASYNC_TEMP_SAFE)) { 6934 iocb_w = (uint32_t *) icmd; 6935 lpfc_printf_log(phba, 6936 KERN_ERR, 6937 LOG_SLI, 6938 "0346 Ring %d handler: unexpected ASYNC_STATUS" 6939 " evt_code 0x%x\n" 6940 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 6941 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 6942 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 6943 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 6944 pring->ringno, 6945 icmd->un.asyncstat.evt_code, 6946 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 6947 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 6948 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 6949 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 6950 6951 return; 6952 } 6953 temp_event_data.data = (uint32_t)temp; 6954 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 6955 if (evt_code == ASYNC_TEMP_WARN) { 6956 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 6957 lpfc_printf_log(phba, 6958 KERN_ERR, 6959 LOG_TEMP, 6960 "0347 Adapter is very hot, please take " 6961 "corrective action. temperature : %d Celsius\n", 6962 temp); 6963 } 6964 if (evt_code == ASYNC_TEMP_SAFE) { 6965 temp_event_data.event_code = LPFC_NORMAL_TEMP; 6966 lpfc_printf_log(phba, 6967 KERN_ERR, 6968 LOG_TEMP, 6969 "0340 Adapter temperature is OK now. " 6970 "temperature : %d Celsius\n", 6971 temp); 6972 } 6973 6974 /* Send temperature change event to applications */ 6975 shost = lpfc_shost_from_vport(phba->pport); 6976 fc_host_post_vendor_event(shost, fc_get_event_number(), 6977 sizeof(temp_event_data), (char *) &temp_event_data, 6978 LPFC_NL_VENDOR_ID); 6979 6980 } 6981 6982 6983 /** 6984 * lpfc_sli_setup - SLI ring setup function 6985 * @phba: Pointer to HBA context object. 6986 * 6987 * lpfc_sli_setup sets up rings of the SLI interface with 6988 * number of iocbs per ring and iotags. This function is 6989 * called while driver attach to the HBA and before the 6990 * interrupts are enabled. So there is no need for locking. 6991 * 6992 * This function always returns 0. 6993 **/ 6994 int 6995 lpfc_sli_setup(struct lpfc_hba *phba) 6996 { 6997 int i, totiocbsize = 0; 6998 struct lpfc_sli *psli = &phba->sli; 6999 struct lpfc_sli_ring *pring; 7000 7001 psli->num_rings = MAX_CONFIGURED_RINGS; 7002 psli->sli_flag = 0; 7003 psli->fcp_ring = LPFC_FCP_RING; 7004 psli->next_ring = LPFC_FCP_NEXT_RING; 7005 psli->extra_ring = LPFC_EXTRA_RING; 7006 7007 psli->iocbq_lookup = NULL; 7008 psli->iocbq_lookup_len = 0; 7009 psli->last_iotag = 0; 7010 7011 for (i = 0; i < psli->num_rings; i++) { 7012 pring = &psli->ring[i]; 7013 switch (i) { 7014 case LPFC_FCP_RING: /* ring 0 - FCP */ 7015 /* numCiocb and numRiocb are used in config_port */ 7016 pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 7017 pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 7018 pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 7019 pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 7020 pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 7021 pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 7022 pring->sizeCiocb = (phba->sli_rev == 3) ? 7023 SLI3_IOCB_CMD_SIZE : 7024 SLI2_IOCB_CMD_SIZE; 7025 pring->sizeRiocb = (phba->sli_rev == 3) ? 7026 SLI3_IOCB_RSP_SIZE : 7027 SLI2_IOCB_RSP_SIZE; 7028 pring->iotag_ctr = 0; 7029 pring->iotag_max = 7030 (phba->cfg_hba_queue_depth * 2); 7031 pring->fast_iotag = pring->iotag_max; 7032 pring->num_mask = 0; 7033 break; 7034 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 7035 /* numCiocb and numRiocb are used in config_port */ 7036 pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 7037 pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 7038 pring->sizeCiocb = (phba->sli_rev == 3) ? 7039 SLI3_IOCB_CMD_SIZE : 7040 SLI2_IOCB_CMD_SIZE; 7041 pring->sizeRiocb = (phba->sli_rev == 3) ? 7042 SLI3_IOCB_RSP_SIZE : 7043 SLI2_IOCB_RSP_SIZE; 7044 pring->iotag_max = phba->cfg_hba_queue_depth; 7045 pring->num_mask = 0; 7046 break; 7047 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 7048 /* numCiocb and numRiocb are used in config_port */ 7049 pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 7050 pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 7051 pring->sizeCiocb = (phba->sli_rev == 3) ? 7052 SLI3_IOCB_CMD_SIZE : 7053 SLI2_IOCB_CMD_SIZE; 7054 pring->sizeRiocb = (phba->sli_rev == 3) ? 7055 SLI3_IOCB_RSP_SIZE : 7056 SLI2_IOCB_RSP_SIZE; 7057 pring->fast_iotag = 0; 7058 pring->iotag_ctr = 0; 7059 pring->iotag_max = 4096; 7060 pring->lpfc_sli_rcv_async_status = 7061 lpfc_sli_async_event_handler; 7062 pring->num_mask = LPFC_MAX_RING_MASK; 7063 pring->prt[0].profile = 0; /* Mask 0 */ 7064 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 7065 pring->prt[0].type = FC_TYPE_ELS; 7066 pring->prt[0].lpfc_sli_rcv_unsol_event = 7067 lpfc_els_unsol_event; 7068 pring->prt[1].profile = 0; /* Mask 1 */ 7069 pring->prt[1].rctl = FC_RCTL_ELS_REP; 7070 pring->prt[1].type = FC_TYPE_ELS; 7071 pring->prt[1].lpfc_sli_rcv_unsol_event = 7072 lpfc_els_unsol_event; 7073 pring->prt[2].profile = 0; /* Mask 2 */ 7074 /* NameServer Inquiry */ 7075 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 7076 /* NameServer */ 7077 pring->prt[2].type = FC_TYPE_CT; 7078 pring->prt[2].lpfc_sli_rcv_unsol_event = 7079 lpfc_ct_unsol_event; 7080 pring->prt[3].profile = 0; /* Mask 3 */ 7081 /* NameServer response */ 7082 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 7083 /* NameServer */ 7084 pring->prt[3].type = FC_TYPE_CT; 7085 pring->prt[3].lpfc_sli_rcv_unsol_event = 7086 lpfc_ct_unsol_event; 7087 /* abort unsolicited sequence */ 7088 pring->prt[4].profile = 0; /* Mask 4 */ 7089 pring->prt[4].rctl = FC_RCTL_BA_ABTS; 7090 pring->prt[4].type = FC_TYPE_BLS; 7091 pring->prt[4].lpfc_sli_rcv_unsol_event = 7092 lpfc_sli4_ct_abort_unsol_event; 7093 break; 7094 } 7095 totiocbsize += (pring->numCiocb * pring->sizeCiocb) + 7096 (pring->numRiocb * pring->sizeRiocb); 7097 } 7098 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 7099 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 7100 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 7101 "SLI2 SLIM Data: x%x x%lx\n", 7102 phba->brd_no, totiocbsize, 7103 (unsigned long) MAX_SLIM_IOCB_SIZE); 7104 } 7105 if (phba->cfg_multi_ring_support == 2) 7106 lpfc_extra_ring_setup(phba); 7107 7108 return 0; 7109 } 7110 7111 /** 7112 * lpfc_sli_queue_setup - Queue initialization function 7113 * @phba: Pointer to HBA context object. 7114 * 7115 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each 7116 * ring. This function also initializes ring indices of each ring. 7117 * This function is called during the initialization of the SLI 7118 * interface of an HBA. 7119 * This function is called with no lock held and always returns 7120 * 1. 7121 **/ 7122 int 7123 lpfc_sli_queue_setup(struct lpfc_hba *phba) 7124 { 7125 struct lpfc_sli *psli; 7126 struct lpfc_sli_ring *pring; 7127 int i; 7128 7129 psli = &phba->sli; 7130 spin_lock_irq(&phba->hbalock); 7131 INIT_LIST_HEAD(&psli->mboxq); 7132 INIT_LIST_HEAD(&psli->mboxq_cmpl); 7133 /* Initialize list headers for txq and txcmplq as double linked lists */ 7134 for (i = 0; i < psli->num_rings; i++) { 7135 pring = &psli->ring[i]; 7136 pring->ringno = i; 7137 pring->next_cmdidx = 0; 7138 pring->local_getidx = 0; 7139 pring->cmdidx = 0; 7140 INIT_LIST_HEAD(&pring->txq); 7141 INIT_LIST_HEAD(&pring->txcmplq); 7142 INIT_LIST_HEAD(&pring->iocb_continueq); 7143 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 7144 INIT_LIST_HEAD(&pring->postbufq); 7145 } 7146 spin_unlock_irq(&phba->hbalock); 7147 return 1; 7148 } 7149 7150 /** 7151 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 7152 * @phba: Pointer to HBA context object. 7153 * 7154 * This routine flushes the mailbox command subsystem. It will unconditionally 7155 * flush all the mailbox commands in the three possible stages in the mailbox 7156 * command sub-system: pending mailbox command queue; the outstanding mailbox 7157 * command; and completed mailbox command queue. It is caller's responsibility 7158 * to make sure that the driver is in the proper state to flush the mailbox 7159 * command sub-system. Namely, the posting of mailbox commands into the 7160 * pending mailbox command queue from the various clients must be stopped; 7161 * either the HBA is in a state that it will never works on the outstanding 7162 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 7163 * mailbox command has been completed. 7164 **/ 7165 static void 7166 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 7167 { 7168 LIST_HEAD(completions); 7169 struct lpfc_sli *psli = &phba->sli; 7170 LPFC_MBOXQ_t *pmb; 7171 unsigned long iflag; 7172 7173 /* Flush all the mailbox commands in the mbox system */ 7174 spin_lock_irqsave(&phba->hbalock, iflag); 7175 /* The pending mailbox command queue */ 7176 list_splice_init(&phba->sli.mboxq, &completions); 7177 /* The outstanding active mailbox command */ 7178 if (psli->mbox_active) { 7179 list_add_tail(&psli->mbox_active->list, &completions); 7180 psli->mbox_active = NULL; 7181 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7182 } 7183 /* The completed mailbox command queue */ 7184 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 7185 spin_unlock_irqrestore(&phba->hbalock, iflag); 7186 7187 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 7188 while (!list_empty(&completions)) { 7189 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 7190 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 7191 if (pmb->mbox_cmpl) 7192 pmb->mbox_cmpl(phba, pmb); 7193 } 7194 } 7195 7196 /** 7197 * lpfc_sli_host_down - Vport cleanup function 7198 * @vport: Pointer to virtual port object. 7199 * 7200 * lpfc_sli_host_down is called to clean up the resources 7201 * associated with a vport before destroying virtual 7202 * port data structures. 7203 * This function does following operations: 7204 * - Free discovery resources associated with this virtual 7205 * port. 7206 * - Free iocbs associated with this virtual port in 7207 * the txq. 7208 * - Send abort for all iocb commands associated with this 7209 * vport in txcmplq. 7210 * 7211 * This function is called with no lock held and always returns 1. 7212 **/ 7213 int 7214 lpfc_sli_host_down(struct lpfc_vport *vport) 7215 { 7216 LIST_HEAD(completions); 7217 struct lpfc_hba *phba = vport->phba; 7218 struct lpfc_sli *psli = &phba->sli; 7219 struct lpfc_sli_ring *pring; 7220 struct lpfc_iocbq *iocb, *next_iocb; 7221 int i; 7222 unsigned long flags = 0; 7223 uint16_t prev_pring_flag; 7224 7225 lpfc_cleanup_discovery_resources(vport); 7226 7227 spin_lock_irqsave(&phba->hbalock, flags); 7228 for (i = 0; i < psli->num_rings; i++) { 7229 pring = &psli->ring[i]; 7230 prev_pring_flag = pring->flag; 7231 /* Only slow rings */ 7232 if (pring->ringno == LPFC_ELS_RING) { 7233 pring->flag |= LPFC_DEFERRED_RING_EVENT; 7234 /* Set the lpfc data pending flag */ 7235 set_bit(LPFC_DATA_READY, &phba->data_flags); 7236 } 7237 /* 7238 * Error everything on the txq since these iocbs have not been 7239 * given to the FW yet. 7240 */ 7241 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 7242 if (iocb->vport != vport) 7243 continue; 7244 list_move_tail(&iocb->list, &completions); 7245 pring->txq_cnt--; 7246 } 7247 7248 /* Next issue ABTS for everything on the txcmplq */ 7249 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 7250 list) { 7251 if (iocb->vport != vport) 7252 continue; 7253 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 7254 } 7255 7256 pring->flag = prev_pring_flag; 7257 } 7258 7259 spin_unlock_irqrestore(&phba->hbalock, flags); 7260 7261 /* Cancel all the IOCBs from the completions list */ 7262 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7263 IOERR_SLI_DOWN); 7264 return 1; 7265 } 7266 7267 /** 7268 * lpfc_sli_hba_down - Resource cleanup function for the HBA 7269 * @phba: Pointer to HBA context object. 7270 * 7271 * This function cleans up all iocb, buffers, mailbox commands 7272 * while shutting down the HBA. This function is called with no 7273 * lock held and always returns 1. 7274 * This function does the following to cleanup driver resources: 7275 * - Free discovery resources for each virtual port 7276 * - Cleanup any pending fabric iocbs 7277 * - Iterate through the iocb txq and free each entry 7278 * in the list. 7279 * - Free up any buffer posted to the HBA 7280 * - Free mailbox commands in the mailbox queue. 7281 **/ 7282 int 7283 lpfc_sli_hba_down(struct lpfc_hba *phba) 7284 { 7285 LIST_HEAD(completions); 7286 struct lpfc_sli *psli = &phba->sli; 7287 struct lpfc_sli_ring *pring; 7288 struct lpfc_dmabuf *buf_ptr; 7289 unsigned long flags = 0; 7290 int i; 7291 7292 /* Shutdown the mailbox command sub-system */ 7293 lpfc_sli_mbox_sys_shutdown(phba); 7294 7295 lpfc_hba_down_prep(phba); 7296 7297 lpfc_fabric_abort_hba(phba); 7298 7299 spin_lock_irqsave(&phba->hbalock, flags); 7300 for (i = 0; i < psli->num_rings; i++) { 7301 pring = &psli->ring[i]; 7302 /* Only slow rings */ 7303 if (pring->ringno == LPFC_ELS_RING) { 7304 pring->flag |= LPFC_DEFERRED_RING_EVENT; 7305 /* Set the lpfc data pending flag */ 7306 set_bit(LPFC_DATA_READY, &phba->data_flags); 7307 } 7308 7309 /* 7310 * Error everything on the txq since these iocbs have not been 7311 * given to the FW yet. 7312 */ 7313 list_splice_init(&pring->txq, &completions); 7314 pring->txq_cnt = 0; 7315 7316 } 7317 spin_unlock_irqrestore(&phba->hbalock, flags); 7318 7319 /* Cancel all the IOCBs from the completions list */ 7320 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7321 IOERR_SLI_DOWN); 7322 7323 spin_lock_irqsave(&phba->hbalock, flags); 7324 list_splice_init(&phba->elsbuf, &completions); 7325 phba->elsbuf_cnt = 0; 7326 phba->elsbuf_prev_cnt = 0; 7327 spin_unlock_irqrestore(&phba->hbalock, flags); 7328 7329 while (!list_empty(&completions)) { 7330 list_remove_head(&completions, buf_ptr, 7331 struct lpfc_dmabuf, list); 7332 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 7333 kfree(buf_ptr); 7334 } 7335 7336 /* Return any active mbox cmds */ 7337 del_timer_sync(&psli->mbox_tmo); 7338 7339 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 7340 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 7341 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 7342 7343 return 1; 7344 } 7345 7346 /** 7347 * lpfc_sli_pcimem_bcopy - SLI memory copy function 7348 * @srcp: Source memory pointer. 7349 * @destp: Destination memory pointer. 7350 * @cnt: Number of words required to be copied. 7351 * 7352 * This function is used for copying data between driver memory 7353 * and the SLI memory. This function also changes the endianness 7354 * of each word if native endianness is different from SLI 7355 * endianness. This function can be called with or without 7356 * lock. 7357 **/ 7358 void 7359 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 7360 { 7361 uint32_t *src = srcp; 7362 uint32_t *dest = destp; 7363 uint32_t ldata; 7364 int i; 7365 7366 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 7367 ldata = *src; 7368 ldata = le32_to_cpu(ldata); 7369 *dest = ldata; 7370 src++; 7371 dest++; 7372 } 7373 } 7374 7375 7376 /** 7377 * lpfc_sli_bemem_bcopy - SLI memory copy function 7378 * @srcp: Source memory pointer. 7379 * @destp: Destination memory pointer. 7380 * @cnt: Number of words required to be copied. 7381 * 7382 * This function is used for copying data between a data structure 7383 * with big endian representation to local endianness. 7384 * This function can be called with or without lock. 7385 **/ 7386 void 7387 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 7388 { 7389 uint32_t *src = srcp; 7390 uint32_t *dest = destp; 7391 uint32_t ldata; 7392 int i; 7393 7394 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 7395 ldata = *src; 7396 ldata = be32_to_cpu(ldata); 7397 *dest = ldata; 7398 src++; 7399 dest++; 7400 } 7401 } 7402 7403 /** 7404 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 7405 * @phba: Pointer to HBA context object. 7406 * @pring: Pointer to driver SLI ring object. 7407 * @mp: Pointer to driver buffer object. 7408 * 7409 * This function is called with no lock held. 7410 * It always return zero after adding the buffer to the postbufq 7411 * buffer list. 7412 **/ 7413 int 7414 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7415 struct lpfc_dmabuf *mp) 7416 { 7417 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 7418 later */ 7419 spin_lock_irq(&phba->hbalock); 7420 list_add_tail(&mp->list, &pring->postbufq); 7421 pring->postbufq_cnt++; 7422 spin_unlock_irq(&phba->hbalock); 7423 return 0; 7424 } 7425 7426 /** 7427 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 7428 * @phba: Pointer to HBA context object. 7429 * 7430 * When HBQ is enabled, buffers are searched based on tags. This function 7431 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 7432 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 7433 * does not conflict with tags of buffer posted for unsolicited events. 7434 * The function returns the allocated tag. The function is called with 7435 * no locks held. 7436 **/ 7437 uint32_t 7438 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 7439 { 7440 spin_lock_irq(&phba->hbalock); 7441 phba->buffer_tag_count++; 7442 /* 7443 * Always set the QUE_BUFTAG_BIT to distiguish between 7444 * a tag assigned by HBQ. 7445 */ 7446 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 7447 spin_unlock_irq(&phba->hbalock); 7448 return phba->buffer_tag_count; 7449 } 7450 7451 /** 7452 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 7453 * @phba: Pointer to HBA context object. 7454 * @pring: Pointer to driver SLI ring object. 7455 * @tag: Buffer tag. 7456 * 7457 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 7458 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 7459 * iocb is posted to the response ring with the tag of the buffer. 7460 * This function searches the pring->postbufq list using the tag 7461 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 7462 * iocb. If the buffer is found then lpfc_dmabuf object of the 7463 * buffer is returned to the caller else NULL is returned. 7464 * This function is called with no lock held. 7465 **/ 7466 struct lpfc_dmabuf * 7467 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7468 uint32_t tag) 7469 { 7470 struct lpfc_dmabuf *mp, *next_mp; 7471 struct list_head *slp = &pring->postbufq; 7472 7473 /* Search postbufq, from the begining, looking for a match on tag */ 7474 spin_lock_irq(&phba->hbalock); 7475 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 7476 if (mp->buffer_tag == tag) { 7477 list_del_init(&mp->list); 7478 pring->postbufq_cnt--; 7479 spin_unlock_irq(&phba->hbalock); 7480 return mp; 7481 } 7482 } 7483 7484 spin_unlock_irq(&phba->hbalock); 7485 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7486 "0402 Cannot find virtual addr for buffer tag on " 7487 "ring %d Data x%lx x%p x%p x%x\n", 7488 pring->ringno, (unsigned long) tag, 7489 slp->next, slp->prev, pring->postbufq_cnt); 7490 7491 return NULL; 7492 } 7493 7494 /** 7495 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 7496 * @phba: Pointer to HBA context object. 7497 * @pring: Pointer to driver SLI ring object. 7498 * @phys: DMA address of the buffer. 7499 * 7500 * This function searches the buffer list using the dma_address 7501 * of unsolicited event to find the driver's lpfc_dmabuf object 7502 * corresponding to the dma_address. The function returns the 7503 * lpfc_dmabuf object if a buffer is found else it returns NULL. 7504 * This function is called by the ct and els unsolicited event 7505 * handlers to get the buffer associated with the unsolicited 7506 * event. 7507 * 7508 * This function is called with no lock held. 7509 **/ 7510 struct lpfc_dmabuf * 7511 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7512 dma_addr_t phys) 7513 { 7514 struct lpfc_dmabuf *mp, *next_mp; 7515 struct list_head *slp = &pring->postbufq; 7516 7517 /* Search postbufq, from the begining, looking for a match on phys */ 7518 spin_lock_irq(&phba->hbalock); 7519 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 7520 if (mp->phys == phys) { 7521 list_del_init(&mp->list); 7522 pring->postbufq_cnt--; 7523 spin_unlock_irq(&phba->hbalock); 7524 return mp; 7525 } 7526 } 7527 7528 spin_unlock_irq(&phba->hbalock); 7529 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7530 "0410 Cannot find virtual addr for mapped buf on " 7531 "ring %d Data x%llx x%p x%p x%x\n", 7532 pring->ringno, (unsigned long long)phys, 7533 slp->next, slp->prev, pring->postbufq_cnt); 7534 return NULL; 7535 } 7536 7537 /** 7538 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 7539 * @phba: Pointer to HBA context object. 7540 * @cmdiocb: Pointer to driver command iocb object. 7541 * @rspiocb: Pointer to driver response iocb object. 7542 * 7543 * This function is the completion handler for the abort iocbs for 7544 * ELS commands. This function is called from the ELS ring event 7545 * handler with no lock held. This function frees memory resources 7546 * associated with the abort iocb. 7547 **/ 7548 static void 7549 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7550 struct lpfc_iocbq *rspiocb) 7551 { 7552 IOCB_t *irsp = &rspiocb->iocb; 7553 uint16_t abort_iotag, abort_context; 7554 struct lpfc_iocbq *abort_iocb; 7555 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 7556 7557 abort_iocb = NULL; 7558 7559 if (irsp->ulpStatus) { 7560 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 7561 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 7562 7563 spin_lock_irq(&phba->hbalock); 7564 if (phba->sli_rev < LPFC_SLI_REV4) { 7565 if (abort_iotag != 0 && 7566 abort_iotag <= phba->sli.last_iotag) 7567 abort_iocb = 7568 phba->sli.iocbq_lookup[abort_iotag]; 7569 } else 7570 /* For sli4 the abort_tag is the XRI, 7571 * so the abort routine puts the iotag of the iocb 7572 * being aborted in the context field of the abort 7573 * IOCB. 7574 */ 7575 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 7576 7577 /* 7578 * If the iocb is not found in Firmware queue the iocb 7579 * might have completed already. Do not free it again. 7580 */ 7581 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { 7582 if (irsp->un.ulpWord[4] != IOERR_NO_XRI) { 7583 spin_unlock_irq(&phba->hbalock); 7584 lpfc_sli_release_iocbq(phba, cmdiocb); 7585 return; 7586 } 7587 /* For SLI4 the ulpContext field for abort IOCB 7588 * holds the iotag of the IOCB being aborted so 7589 * the local abort_context needs to be reset to 7590 * match the aborted IOCBs ulpContext. 7591 */ 7592 if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4) 7593 abort_context = abort_iocb->iocb.ulpContext; 7594 } 7595 7596 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 7597 "0327 Cannot abort els iocb %p " 7598 "with tag %x context %x, abort status %x, " 7599 "abort code %x\n", 7600 abort_iocb, abort_iotag, abort_context, 7601 irsp->ulpStatus, irsp->un.ulpWord[4]); 7602 /* 7603 * make sure we have the right iocbq before taking it 7604 * off the txcmplq and try to call completion routine. 7605 */ 7606 if (!abort_iocb || 7607 abort_iocb->iocb.ulpContext != abort_context || 7608 (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0) 7609 spin_unlock_irq(&phba->hbalock); 7610 else if (phba->sli_rev < LPFC_SLI_REV4) { 7611 /* 7612 * leave the SLI4 aborted command on the txcmplq 7613 * list and the command complete WCQE's XB bit 7614 * will tell whether the SGL (XRI) can be released 7615 * immediately or to the aborted SGL list for the 7616 * following abort XRI from the HBA. 7617 */ 7618 list_del_init(&abort_iocb->list); 7619 if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) { 7620 abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q; 7621 pring->txcmplq_cnt--; 7622 } 7623 7624 /* Firmware could still be in progress of DMAing 7625 * payload, so don't free data buffer till after 7626 * a hbeat. 7627 */ 7628 abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE; 7629 abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; 7630 spin_unlock_irq(&phba->hbalock); 7631 7632 abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 7633 abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED; 7634 (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb); 7635 } else 7636 spin_unlock_irq(&phba->hbalock); 7637 } 7638 7639 lpfc_sli_release_iocbq(phba, cmdiocb); 7640 return; 7641 } 7642 7643 /** 7644 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 7645 * @phba: Pointer to HBA context object. 7646 * @cmdiocb: Pointer to driver command iocb object. 7647 * @rspiocb: Pointer to driver response iocb object. 7648 * 7649 * The function is called from SLI ring event handler with no 7650 * lock held. This function is the completion handler for ELS commands 7651 * which are aborted. The function frees memory resources used for 7652 * the aborted ELS commands. 7653 **/ 7654 static void 7655 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7656 struct lpfc_iocbq *rspiocb) 7657 { 7658 IOCB_t *irsp = &rspiocb->iocb; 7659 7660 /* ELS cmd tag <ulpIoTag> completes */ 7661 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 7662 "0139 Ignoring ELS cmd tag x%x completion Data: " 7663 "x%x x%x x%x\n", 7664 irsp->ulpIoTag, irsp->ulpStatus, 7665 irsp->un.ulpWord[4], irsp->ulpTimeout); 7666 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 7667 lpfc_ct_free_iocb(phba, cmdiocb); 7668 else 7669 lpfc_els_free_iocb(phba, cmdiocb); 7670 return; 7671 } 7672 7673 /** 7674 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 7675 * @phba: Pointer to HBA context object. 7676 * @pring: Pointer to driver SLI ring object. 7677 * @cmdiocb: Pointer to driver command iocb object. 7678 * 7679 * This function issues an abort iocb for the provided command iocb down to 7680 * the port. Other than the case the outstanding command iocb is an abort 7681 * request, this function issues abort out unconditionally. This function is 7682 * called with hbalock held. The function returns 0 when it fails due to 7683 * memory allocation failure or when the command iocb is an abort request. 7684 **/ 7685 static int 7686 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7687 struct lpfc_iocbq *cmdiocb) 7688 { 7689 struct lpfc_vport *vport = cmdiocb->vport; 7690 struct lpfc_iocbq *abtsiocbp; 7691 IOCB_t *icmd = NULL; 7692 IOCB_t *iabt = NULL; 7693 int retval; 7694 7695 /* 7696 * There are certain command types we don't want to abort. And we 7697 * don't want to abort commands that are already in the process of 7698 * being aborted. 7699 */ 7700 icmd = &cmdiocb->iocb; 7701 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 7702 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 7703 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 7704 return 0; 7705 7706 /* issue ABTS for this IOCB based on iotag */ 7707 abtsiocbp = __lpfc_sli_get_iocbq(phba); 7708 if (abtsiocbp == NULL) 7709 return 0; 7710 7711 /* This signals the response to set the correct status 7712 * before calling the completion handler 7713 */ 7714 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 7715 7716 iabt = &abtsiocbp->iocb; 7717 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 7718 iabt->un.acxri.abortContextTag = icmd->ulpContext; 7719 if (phba->sli_rev == LPFC_SLI_REV4) { 7720 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 7721 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 7722 } 7723 else 7724 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 7725 iabt->ulpLe = 1; 7726 iabt->ulpClass = icmd->ulpClass; 7727 7728 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 7729 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; 7730 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 7731 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 7732 7733 if (phba->link_state >= LPFC_LINK_UP) 7734 iabt->ulpCommand = CMD_ABORT_XRI_CN; 7735 else 7736 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 7737 7738 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 7739 7740 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 7741 "0339 Abort xri x%x, original iotag x%x, " 7742 "abort cmd iotag x%x\n", 7743 iabt->un.acxri.abortIoTag, 7744 iabt->un.acxri.abortContextTag, 7745 abtsiocbp->iotag); 7746 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); 7747 7748 if (retval) 7749 __lpfc_sli_release_iocbq(phba, abtsiocbp); 7750 7751 /* 7752 * Caller to this routine should check for IOCB_ERROR 7753 * and handle it properly. This routine no longer removes 7754 * iocb off txcmplq and call compl in case of IOCB_ERROR. 7755 */ 7756 return retval; 7757 } 7758 7759 /** 7760 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 7761 * @phba: Pointer to HBA context object. 7762 * @pring: Pointer to driver SLI ring object. 7763 * @cmdiocb: Pointer to driver command iocb object. 7764 * 7765 * This function issues an abort iocb for the provided command iocb. In case 7766 * of unloading, the abort iocb will not be issued to commands on the ELS 7767 * ring. Instead, the callback function shall be changed to those commands 7768 * so that nothing happens when them finishes. This function is called with 7769 * hbalock held. The function returns 0 when the command iocb is an abort 7770 * request. 7771 **/ 7772 int 7773 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7774 struct lpfc_iocbq *cmdiocb) 7775 { 7776 struct lpfc_vport *vport = cmdiocb->vport; 7777 int retval = IOCB_ERROR; 7778 IOCB_t *icmd = NULL; 7779 7780 /* 7781 * There are certain command types we don't want to abort. And we 7782 * don't want to abort commands that are already in the process of 7783 * being aborted. 7784 */ 7785 icmd = &cmdiocb->iocb; 7786 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 7787 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 7788 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 7789 return 0; 7790 7791 /* 7792 * If we're unloading, don't abort iocb on the ELS ring, but change 7793 * the callback so that nothing happens when it finishes. 7794 */ 7795 if ((vport->load_flag & FC_UNLOADING) && 7796 (pring->ringno == LPFC_ELS_RING)) { 7797 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 7798 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 7799 else 7800 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 7801 goto abort_iotag_exit; 7802 } 7803 7804 /* Now, we try to issue the abort to the cmdiocb out */ 7805 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 7806 7807 abort_iotag_exit: 7808 /* 7809 * Caller to this routine should check for IOCB_ERROR 7810 * and handle it properly. This routine no longer removes 7811 * iocb off txcmplq and call compl in case of IOCB_ERROR. 7812 */ 7813 return retval; 7814 } 7815 7816 /** 7817 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring 7818 * @phba: Pointer to HBA context object. 7819 * @pring: Pointer to driver SLI ring object. 7820 * 7821 * This function aborts all iocbs in the given ring and frees all the iocb 7822 * objects in txq. This function issues abort iocbs unconditionally for all 7823 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed 7824 * to complete before the return of this function. The caller is not required 7825 * to hold any locks. 7826 **/ 7827 static void 7828 lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 7829 { 7830 LIST_HEAD(completions); 7831 struct lpfc_iocbq *iocb, *next_iocb; 7832 7833 if (pring->ringno == LPFC_ELS_RING) 7834 lpfc_fabric_abort_hba(phba); 7835 7836 spin_lock_irq(&phba->hbalock); 7837 7838 /* Take off all the iocbs on txq for cancelling */ 7839 list_splice_init(&pring->txq, &completions); 7840 pring->txq_cnt = 0; 7841 7842 /* Next issue ABTS for everything on the txcmplq */ 7843 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 7844 lpfc_sli_abort_iotag_issue(phba, pring, iocb); 7845 7846 spin_unlock_irq(&phba->hbalock); 7847 7848 /* Cancel all the IOCBs from the completions list */ 7849 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 7850 IOERR_SLI_ABORTED); 7851 } 7852 7853 /** 7854 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 7855 * @phba: pointer to lpfc HBA data structure. 7856 * 7857 * This routine will abort all pending and outstanding iocbs to an HBA. 7858 **/ 7859 void 7860 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 7861 { 7862 struct lpfc_sli *psli = &phba->sli; 7863 struct lpfc_sli_ring *pring; 7864 int i; 7865 7866 for (i = 0; i < psli->num_rings; i++) { 7867 pring = &psli->ring[i]; 7868 lpfc_sli_iocb_ring_abort(phba, pring); 7869 } 7870 } 7871 7872 /** 7873 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 7874 * @iocbq: Pointer to driver iocb object. 7875 * @vport: Pointer to driver virtual port object. 7876 * @tgt_id: SCSI ID of the target. 7877 * @lun_id: LUN ID of the scsi device. 7878 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 7879 * 7880 * This function acts as an iocb filter for functions which abort or count 7881 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 7882 * 0 if the filtering criteria is met for the given iocb and will return 7883 * 1 if the filtering criteria is not met. 7884 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 7885 * given iocb is for the SCSI device specified by vport, tgt_id and 7886 * lun_id parameter. 7887 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 7888 * given iocb is for the SCSI target specified by vport and tgt_id 7889 * parameters. 7890 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 7891 * given iocb is for the SCSI host associated with the given vport. 7892 * This function is called with no locks held. 7893 **/ 7894 static int 7895 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 7896 uint16_t tgt_id, uint64_t lun_id, 7897 lpfc_ctx_cmd ctx_cmd) 7898 { 7899 struct lpfc_scsi_buf *lpfc_cmd; 7900 int rc = 1; 7901 7902 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 7903 return rc; 7904 7905 if (iocbq->vport != vport) 7906 return rc; 7907 7908 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 7909 7910 if (lpfc_cmd->pCmd == NULL) 7911 return rc; 7912 7913 switch (ctx_cmd) { 7914 case LPFC_CTX_LUN: 7915 if ((lpfc_cmd->rdata->pnode) && 7916 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 7917 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 7918 rc = 0; 7919 break; 7920 case LPFC_CTX_TGT: 7921 if ((lpfc_cmd->rdata->pnode) && 7922 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 7923 rc = 0; 7924 break; 7925 case LPFC_CTX_HOST: 7926 rc = 0; 7927 break; 7928 default: 7929 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 7930 __func__, ctx_cmd); 7931 break; 7932 } 7933 7934 return rc; 7935 } 7936 7937 /** 7938 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 7939 * @vport: Pointer to virtual port. 7940 * @tgt_id: SCSI ID of the target. 7941 * @lun_id: LUN ID of the scsi device. 7942 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 7943 * 7944 * This function returns number of FCP commands pending for the vport. 7945 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 7946 * commands pending on the vport associated with SCSI device specified 7947 * by tgt_id and lun_id parameters. 7948 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 7949 * commands pending on the vport associated with SCSI target specified 7950 * by tgt_id parameter. 7951 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 7952 * commands pending on the vport. 7953 * This function returns the number of iocbs which satisfy the filter. 7954 * This function is called without any lock held. 7955 **/ 7956 int 7957 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 7958 lpfc_ctx_cmd ctx_cmd) 7959 { 7960 struct lpfc_hba *phba = vport->phba; 7961 struct lpfc_iocbq *iocbq; 7962 int sum, i; 7963 7964 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 7965 iocbq = phba->sli.iocbq_lookup[i]; 7966 7967 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 7968 ctx_cmd) == 0) 7969 sum++; 7970 } 7971 7972 return sum; 7973 } 7974 7975 /** 7976 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 7977 * @phba: Pointer to HBA context object 7978 * @cmdiocb: Pointer to command iocb object. 7979 * @rspiocb: Pointer to response iocb object. 7980 * 7981 * This function is called when an aborted FCP iocb completes. This 7982 * function is called by the ring event handler with no lock held. 7983 * This function frees the iocb. 7984 **/ 7985 void 7986 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 7987 struct lpfc_iocbq *rspiocb) 7988 { 7989 lpfc_sli_release_iocbq(phba, cmdiocb); 7990 return; 7991 } 7992 7993 /** 7994 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 7995 * @vport: Pointer to virtual port. 7996 * @pring: Pointer to driver SLI ring object. 7997 * @tgt_id: SCSI ID of the target. 7998 * @lun_id: LUN ID of the scsi device. 7999 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 8000 * 8001 * This function sends an abort command for every SCSI command 8002 * associated with the given virtual port pending on the ring 8003 * filtered by lpfc_sli_validate_fcp_iocb function. 8004 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 8005 * FCP iocbs associated with lun specified by tgt_id and lun_id 8006 * parameters 8007 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 8008 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 8009 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 8010 * FCP iocbs associated with virtual port. 8011 * This function returns number of iocbs it failed to abort. 8012 * This function is called with no locks held. 8013 **/ 8014 int 8015 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 8016 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 8017 { 8018 struct lpfc_hba *phba = vport->phba; 8019 struct lpfc_iocbq *iocbq; 8020 struct lpfc_iocbq *abtsiocb; 8021 IOCB_t *cmd = NULL; 8022 int errcnt = 0, ret_val = 0; 8023 int i; 8024 8025 for (i = 1; i <= phba->sli.last_iotag; i++) { 8026 iocbq = phba->sli.iocbq_lookup[i]; 8027 8028 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 8029 abort_cmd) != 0) 8030 continue; 8031 8032 /* issue ABTS for this IOCB based on iotag */ 8033 abtsiocb = lpfc_sli_get_iocbq(phba); 8034 if (abtsiocb == NULL) { 8035 errcnt++; 8036 continue; 8037 } 8038 8039 cmd = &iocbq->iocb; 8040 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 8041 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 8042 if (phba->sli_rev == LPFC_SLI_REV4) 8043 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 8044 else 8045 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 8046 abtsiocb->iocb.ulpLe = 1; 8047 abtsiocb->iocb.ulpClass = cmd->ulpClass; 8048 abtsiocb->vport = phba->pport; 8049 8050 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 8051 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; 8052 if (iocbq->iocb_flag & LPFC_IO_FCP) 8053 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 8054 8055 if (lpfc_is_link_up(phba)) 8056 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 8057 else 8058 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 8059 8060 /* Setup callback routine and issue the command. */ 8061 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 8062 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 8063 abtsiocb, 0); 8064 if (ret_val == IOCB_ERROR) { 8065 lpfc_sli_release_iocbq(phba, abtsiocb); 8066 errcnt++; 8067 continue; 8068 } 8069 } 8070 8071 return errcnt; 8072 } 8073 8074 /** 8075 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 8076 * @phba: Pointer to HBA context object. 8077 * @cmdiocbq: Pointer to command iocb. 8078 * @rspiocbq: Pointer to response iocb. 8079 * 8080 * This function is the completion handler for iocbs issued using 8081 * lpfc_sli_issue_iocb_wait function. This function is called by the 8082 * ring event handler function without any lock held. This function 8083 * can be called from both worker thread context and interrupt 8084 * context. This function also can be called from other thread which 8085 * cleans up the SLI layer objects. 8086 * This function copy the contents of the response iocb to the 8087 * response iocb memory object provided by the caller of 8088 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 8089 * sleeps for the iocb completion. 8090 **/ 8091 static void 8092 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 8093 struct lpfc_iocbq *cmdiocbq, 8094 struct lpfc_iocbq *rspiocbq) 8095 { 8096 wait_queue_head_t *pdone_q; 8097 unsigned long iflags; 8098 struct lpfc_scsi_buf *lpfc_cmd; 8099 8100 spin_lock_irqsave(&phba->hbalock, iflags); 8101 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 8102 if (cmdiocbq->context2 && rspiocbq) 8103 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 8104 &rspiocbq->iocb, sizeof(IOCB_t)); 8105 8106 /* Set the exchange busy flag for task management commands */ 8107 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 8108 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 8109 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 8110 cur_iocbq); 8111 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 8112 } 8113 8114 pdone_q = cmdiocbq->context_un.wait_queue; 8115 if (pdone_q) 8116 wake_up(pdone_q); 8117 spin_unlock_irqrestore(&phba->hbalock, iflags); 8118 return; 8119 } 8120 8121 /** 8122 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 8123 * @phba: Pointer to HBA context object.. 8124 * @piocbq: Pointer to command iocb. 8125 * @flag: Flag to test. 8126 * 8127 * This routine grabs the hbalock and then test the iocb_flag to 8128 * see if the passed in flag is set. 8129 * Returns: 8130 * 1 if flag is set. 8131 * 0 if flag is not set. 8132 **/ 8133 static int 8134 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 8135 struct lpfc_iocbq *piocbq, uint32_t flag) 8136 { 8137 unsigned long iflags; 8138 int ret; 8139 8140 spin_lock_irqsave(&phba->hbalock, iflags); 8141 ret = piocbq->iocb_flag & flag; 8142 spin_unlock_irqrestore(&phba->hbalock, iflags); 8143 return ret; 8144 8145 } 8146 8147 /** 8148 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 8149 * @phba: Pointer to HBA context object.. 8150 * @pring: Pointer to sli ring. 8151 * @piocb: Pointer to command iocb. 8152 * @prspiocbq: Pointer to response iocb. 8153 * @timeout: Timeout in number of seconds. 8154 * 8155 * This function issues the iocb to firmware and waits for the 8156 * iocb to complete. If the iocb command is not 8157 * completed within timeout seconds, it returns IOCB_TIMEDOUT. 8158 * Caller should not free the iocb resources if this function 8159 * returns IOCB_TIMEDOUT. 8160 * The function waits for the iocb completion using an 8161 * non-interruptible wait. 8162 * This function will sleep while waiting for iocb completion. 8163 * So, this function should not be called from any context which 8164 * does not allow sleeping. Due to the same reason, this function 8165 * cannot be called with interrupt disabled. 8166 * This function assumes that the iocb completions occur while 8167 * this function sleep. So, this function cannot be called from 8168 * the thread which process iocb completion for this ring. 8169 * This function clears the iocb_flag of the iocb object before 8170 * issuing the iocb and the iocb completion handler sets this 8171 * flag and wakes this thread when the iocb completes. 8172 * The contents of the response iocb will be copied to prspiocbq 8173 * by the completion handler when the command completes. 8174 * This function returns IOCB_SUCCESS when success. 8175 * This function is called with no lock held. 8176 **/ 8177 int 8178 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 8179 uint32_t ring_number, 8180 struct lpfc_iocbq *piocb, 8181 struct lpfc_iocbq *prspiocbq, 8182 uint32_t timeout) 8183 { 8184 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 8185 long timeleft, timeout_req = 0; 8186 int retval = IOCB_SUCCESS; 8187 uint32_t creg_val; 8188 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 8189 /* 8190 * If the caller has provided a response iocbq buffer, then context2 8191 * is NULL or its an error. 8192 */ 8193 if (prspiocbq) { 8194 if (piocb->context2) 8195 return IOCB_ERROR; 8196 piocb->context2 = prspiocbq; 8197 } 8198 8199 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 8200 piocb->context_un.wait_queue = &done_q; 8201 piocb->iocb_flag &= ~LPFC_IO_WAKE; 8202 8203 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 8204 creg_val = readl(phba->HCregaddr); 8205 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 8206 writel(creg_val, phba->HCregaddr); 8207 readl(phba->HCregaddr); /* flush */ 8208 } 8209 8210 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 8211 SLI_IOCB_RET_IOCB); 8212 if (retval == IOCB_SUCCESS) { 8213 timeout_req = timeout * HZ; 8214 timeleft = wait_event_timeout(done_q, 8215 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 8216 timeout_req); 8217 8218 if (piocb->iocb_flag & LPFC_IO_WAKE) { 8219 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8220 "0331 IOCB wake signaled\n"); 8221 } else if (timeleft == 0) { 8222 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8223 "0338 IOCB wait timeout error - no " 8224 "wake response Data x%x\n", timeout); 8225 retval = IOCB_TIMEDOUT; 8226 } else { 8227 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8228 "0330 IOCB wake NOT set, " 8229 "Data x%x x%lx\n", 8230 timeout, (timeleft / jiffies)); 8231 retval = IOCB_TIMEDOUT; 8232 } 8233 } else if (retval == IOCB_BUSY) { 8234 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8235 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 8236 phba->iocb_cnt, pring->txq_cnt, pring->txcmplq_cnt); 8237 return retval; 8238 } else { 8239 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8240 "0332 IOCB wait issue failed, Data x%x\n", 8241 retval); 8242 retval = IOCB_ERROR; 8243 } 8244 8245 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 8246 creg_val = readl(phba->HCregaddr); 8247 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 8248 writel(creg_val, phba->HCregaddr); 8249 readl(phba->HCregaddr); /* flush */ 8250 } 8251 8252 if (prspiocbq) 8253 piocb->context2 = NULL; 8254 8255 piocb->context_un.wait_queue = NULL; 8256 piocb->iocb_cmpl = NULL; 8257 return retval; 8258 } 8259 8260 /** 8261 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 8262 * @phba: Pointer to HBA context object. 8263 * @pmboxq: Pointer to driver mailbox object. 8264 * @timeout: Timeout in number of seconds. 8265 * 8266 * This function issues the mailbox to firmware and waits for the 8267 * mailbox command to complete. If the mailbox command is not 8268 * completed within timeout seconds, it returns MBX_TIMEOUT. 8269 * The function waits for the mailbox completion using an 8270 * interruptible wait. If the thread is woken up due to a 8271 * signal, MBX_TIMEOUT error is returned to the caller. Caller 8272 * should not free the mailbox resources, if this function returns 8273 * MBX_TIMEOUT. 8274 * This function will sleep while waiting for mailbox completion. 8275 * So, this function should not be called from any context which 8276 * does not allow sleeping. Due to the same reason, this function 8277 * cannot be called with interrupt disabled. 8278 * This function assumes that the mailbox completion occurs while 8279 * this function sleep. So, this function cannot be called from 8280 * the worker thread which processes mailbox completion. 8281 * This function is called in the context of HBA management 8282 * applications. 8283 * This function returns MBX_SUCCESS when successful. 8284 * This function is called with no lock held. 8285 **/ 8286 int 8287 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 8288 uint32_t timeout) 8289 { 8290 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 8291 int retval; 8292 unsigned long flag; 8293 8294 /* The caller must leave context1 empty. */ 8295 if (pmboxq->context1) 8296 return MBX_NOT_FINISHED; 8297 8298 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 8299 /* setup wake call as IOCB callback */ 8300 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 8301 /* setup context field to pass wait_queue pointer to wake function */ 8302 pmboxq->context1 = &done_q; 8303 8304 /* now issue the command */ 8305 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 8306 8307 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 8308 wait_event_interruptible_timeout(done_q, 8309 pmboxq->mbox_flag & LPFC_MBX_WAKE, 8310 timeout * HZ); 8311 8312 spin_lock_irqsave(&phba->hbalock, flag); 8313 pmboxq->context1 = NULL; 8314 /* 8315 * if LPFC_MBX_WAKE flag is set the mailbox is completed 8316 * else do not free the resources. 8317 */ 8318 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 8319 retval = MBX_SUCCESS; 8320 lpfc_sli4_swap_str(phba, pmboxq); 8321 } else { 8322 retval = MBX_TIMEOUT; 8323 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 8324 } 8325 spin_unlock_irqrestore(&phba->hbalock, flag); 8326 } 8327 8328 return retval; 8329 } 8330 8331 /** 8332 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 8333 * @phba: Pointer to HBA context. 8334 * 8335 * This function is called to shutdown the driver's mailbox sub-system. 8336 * It first marks the mailbox sub-system is in a block state to prevent 8337 * the asynchronous mailbox command from issued off the pending mailbox 8338 * command queue. If the mailbox command sub-system shutdown is due to 8339 * HBA error conditions such as EEH or ERATT, this routine shall invoke 8340 * the mailbox sub-system flush routine to forcefully bring down the 8341 * mailbox sub-system. Otherwise, if it is due to normal condition (such 8342 * as with offline or HBA function reset), this routine will wait for the 8343 * outstanding mailbox command to complete before invoking the mailbox 8344 * sub-system flush routine to gracefully bring down mailbox sub-system. 8345 **/ 8346 void 8347 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) 8348 { 8349 struct lpfc_sli *psli = &phba->sli; 8350 uint8_t actcmd = MBX_HEARTBEAT; 8351 unsigned long timeout; 8352 8353 spin_lock_irq(&phba->hbalock); 8354 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 8355 spin_unlock_irq(&phba->hbalock); 8356 8357 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8358 spin_lock_irq(&phba->hbalock); 8359 if (phba->sli.mbox_active) 8360 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 8361 spin_unlock_irq(&phba->hbalock); 8362 /* Determine how long we might wait for the active mailbox 8363 * command to be gracefully completed by firmware. 8364 */ 8365 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 8366 1000) + jiffies; 8367 while (phba->sli.mbox_active) { 8368 /* Check active mailbox complete status every 2ms */ 8369 msleep(2); 8370 if (time_after(jiffies, timeout)) 8371 /* Timeout, let the mailbox flush routine to 8372 * forcefully release active mailbox command 8373 */ 8374 break; 8375 } 8376 } 8377 lpfc_sli_mbox_sys_flush(phba); 8378 } 8379 8380 /** 8381 * lpfc_sli_eratt_read - read sli-3 error attention events 8382 * @phba: Pointer to HBA context. 8383 * 8384 * This function is called to read the SLI3 device error attention registers 8385 * for possible error attention events. The caller must hold the hostlock 8386 * with spin_lock_irq(). 8387 * 8388 * This fucntion returns 1 when there is Error Attention in the Host Attention 8389 * Register and returns 0 otherwise. 8390 **/ 8391 static int 8392 lpfc_sli_eratt_read(struct lpfc_hba *phba) 8393 { 8394 uint32_t ha_copy; 8395 8396 /* Read chip Host Attention (HA) register */ 8397 ha_copy = readl(phba->HAregaddr); 8398 if (ha_copy & HA_ERATT) { 8399 /* Read host status register to retrieve error event */ 8400 lpfc_sli_read_hs(phba); 8401 8402 /* Check if there is a deferred error condition is active */ 8403 if ((HS_FFER1 & phba->work_hs) && 8404 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 8405 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 8406 phba->hba_flag |= DEFER_ERATT; 8407 /* Clear all interrupt enable conditions */ 8408 writel(0, phba->HCregaddr); 8409 readl(phba->HCregaddr); 8410 } 8411 8412 /* Set the driver HA work bitmap */ 8413 phba->work_ha |= HA_ERATT; 8414 /* Indicate polling handles this ERATT */ 8415 phba->hba_flag |= HBA_ERATT_HANDLED; 8416 return 1; 8417 } 8418 return 0; 8419 } 8420 8421 /** 8422 * lpfc_sli4_eratt_read - read sli-4 error attention events 8423 * @phba: Pointer to HBA context. 8424 * 8425 * This function is called to read the SLI4 device error attention registers 8426 * for possible error attention events. The caller must hold the hostlock 8427 * with spin_lock_irq(). 8428 * 8429 * This fucntion returns 1 when there is Error Attention in the Host Attention 8430 * Register and returns 0 otherwise. 8431 **/ 8432 static int 8433 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 8434 { 8435 uint32_t uerr_sta_hi, uerr_sta_lo; 8436 uint32_t if_type, portsmphr; 8437 struct lpfc_register portstat_reg; 8438 8439 /* 8440 * For now, use the SLI4 device internal unrecoverable error 8441 * registers for error attention. This can be changed later. 8442 */ 8443 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 8444 switch (if_type) { 8445 case LPFC_SLI_INTF_IF_TYPE_0: 8446 uerr_sta_lo = readl(phba->sli4_hba.u.if_type0.UERRLOregaddr); 8447 uerr_sta_hi = readl(phba->sli4_hba.u.if_type0.UERRHIregaddr); 8448 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 8449 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 8450 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8451 "1423 HBA Unrecoverable error: " 8452 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 8453 "ue_mask_lo_reg=0x%x, " 8454 "ue_mask_hi_reg=0x%x\n", 8455 uerr_sta_lo, uerr_sta_hi, 8456 phba->sli4_hba.ue_mask_lo, 8457 phba->sli4_hba.ue_mask_hi); 8458 phba->work_status[0] = uerr_sta_lo; 8459 phba->work_status[1] = uerr_sta_hi; 8460 phba->work_ha |= HA_ERATT; 8461 phba->hba_flag |= HBA_ERATT_HANDLED; 8462 return 1; 8463 } 8464 break; 8465 case LPFC_SLI_INTF_IF_TYPE_2: 8466 portstat_reg.word0 = 8467 readl(phba->sli4_hba.u.if_type2.STATUSregaddr); 8468 portsmphr = readl(phba->sli4_hba.PSMPHRregaddr); 8469 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 8470 phba->work_status[0] = 8471 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 8472 phba->work_status[1] = 8473 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 8474 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8475 "2885 Port Error Detected: " 8476 "port status reg 0x%x, " 8477 "port smphr reg 0x%x, " 8478 "error 1=0x%x, error 2=0x%x\n", 8479 portstat_reg.word0, 8480 portsmphr, 8481 phba->work_status[0], 8482 phba->work_status[1]); 8483 phba->work_ha |= HA_ERATT; 8484 phba->hba_flag |= HBA_ERATT_HANDLED; 8485 return 1; 8486 } 8487 break; 8488 case LPFC_SLI_INTF_IF_TYPE_1: 8489 default: 8490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8491 "2886 HBA Error Attention on unsupported " 8492 "if type %d.", if_type); 8493 return 1; 8494 } 8495 8496 return 0; 8497 } 8498 8499 /** 8500 * lpfc_sli_check_eratt - check error attention events 8501 * @phba: Pointer to HBA context. 8502 * 8503 * This function is called from timer soft interrupt context to check HBA's 8504 * error attention register bit for error attention events. 8505 * 8506 * This fucntion returns 1 when there is Error Attention in the Host Attention 8507 * Register and returns 0 otherwise. 8508 **/ 8509 int 8510 lpfc_sli_check_eratt(struct lpfc_hba *phba) 8511 { 8512 uint32_t ha_copy; 8513 8514 /* If somebody is waiting to handle an eratt, don't process it 8515 * here. The brdkill function will do this. 8516 */ 8517 if (phba->link_flag & LS_IGNORE_ERATT) 8518 return 0; 8519 8520 /* Check if interrupt handler handles this ERATT */ 8521 spin_lock_irq(&phba->hbalock); 8522 if (phba->hba_flag & HBA_ERATT_HANDLED) { 8523 /* Interrupt handler has handled ERATT */ 8524 spin_unlock_irq(&phba->hbalock); 8525 return 0; 8526 } 8527 8528 /* 8529 * If there is deferred error attention, do not check for error 8530 * attention 8531 */ 8532 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8533 spin_unlock_irq(&phba->hbalock); 8534 return 0; 8535 } 8536 8537 /* If PCI channel is offline, don't process it */ 8538 if (unlikely(pci_channel_offline(phba->pcidev))) { 8539 spin_unlock_irq(&phba->hbalock); 8540 return 0; 8541 } 8542 8543 switch (phba->sli_rev) { 8544 case LPFC_SLI_REV2: 8545 case LPFC_SLI_REV3: 8546 /* Read chip Host Attention (HA) register */ 8547 ha_copy = lpfc_sli_eratt_read(phba); 8548 break; 8549 case LPFC_SLI_REV4: 8550 /* Read device Uncoverable Error (UERR) registers */ 8551 ha_copy = lpfc_sli4_eratt_read(phba); 8552 break; 8553 default: 8554 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8555 "0299 Invalid SLI revision (%d)\n", 8556 phba->sli_rev); 8557 ha_copy = 0; 8558 break; 8559 } 8560 spin_unlock_irq(&phba->hbalock); 8561 8562 return ha_copy; 8563 } 8564 8565 /** 8566 * lpfc_intr_state_check - Check device state for interrupt handling 8567 * @phba: Pointer to HBA context. 8568 * 8569 * This inline routine checks whether a device or its PCI slot is in a state 8570 * that the interrupt should be handled. 8571 * 8572 * This function returns 0 if the device or the PCI slot is in a state that 8573 * interrupt should be handled, otherwise -EIO. 8574 */ 8575 static inline int 8576 lpfc_intr_state_check(struct lpfc_hba *phba) 8577 { 8578 /* If the pci channel is offline, ignore all the interrupts */ 8579 if (unlikely(pci_channel_offline(phba->pcidev))) 8580 return -EIO; 8581 8582 /* Update device level interrupt statistics */ 8583 phba->sli.slistat.sli_intr++; 8584 8585 /* Ignore all interrupts during initialization. */ 8586 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 8587 return -EIO; 8588 8589 return 0; 8590 } 8591 8592 /** 8593 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 8594 * @irq: Interrupt number. 8595 * @dev_id: The device context pointer. 8596 * 8597 * This function is directly called from the PCI layer as an interrupt 8598 * service routine when device with SLI-3 interface spec is enabled with 8599 * MSI-X multi-message interrupt mode and there are slow-path events in 8600 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 8601 * interrupt mode, this function is called as part of the device-level 8602 * interrupt handler. When the PCI slot is in error recovery or the HBA 8603 * is undergoing initialization, the interrupt handler will not process 8604 * the interrupt. The link attention and ELS ring attention events are 8605 * handled by the worker thread. The interrupt handler signals the worker 8606 * thread and returns for these events. This function is called without 8607 * any lock held. It gets the hbalock to access and update SLI data 8608 * structures. 8609 * 8610 * This function returns IRQ_HANDLED when interrupt is handled else it 8611 * returns IRQ_NONE. 8612 **/ 8613 irqreturn_t 8614 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 8615 { 8616 struct lpfc_hba *phba; 8617 uint32_t ha_copy, hc_copy; 8618 uint32_t work_ha_copy; 8619 unsigned long status; 8620 unsigned long iflag; 8621 uint32_t control; 8622 8623 MAILBOX_t *mbox, *pmbox; 8624 struct lpfc_vport *vport; 8625 struct lpfc_nodelist *ndlp; 8626 struct lpfc_dmabuf *mp; 8627 LPFC_MBOXQ_t *pmb; 8628 int rc; 8629 8630 /* 8631 * Get the driver's phba structure from the dev_id and 8632 * assume the HBA is not interrupting. 8633 */ 8634 phba = (struct lpfc_hba *)dev_id; 8635 8636 if (unlikely(!phba)) 8637 return IRQ_NONE; 8638 8639 /* 8640 * Stuff needs to be attented to when this function is invoked as an 8641 * individual interrupt handler in MSI-X multi-message interrupt mode 8642 */ 8643 if (phba->intr_type == MSIX) { 8644 /* Check device state for handling interrupt */ 8645 if (lpfc_intr_state_check(phba)) 8646 return IRQ_NONE; 8647 /* Need to read HA REG for slow-path events */ 8648 spin_lock_irqsave(&phba->hbalock, iflag); 8649 ha_copy = readl(phba->HAregaddr); 8650 /* If somebody is waiting to handle an eratt don't process it 8651 * here. The brdkill function will do this. 8652 */ 8653 if (phba->link_flag & LS_IGNORE_ERATT) 8654 ha_copy &= ~HA_ERATT; 8655 /* Check the need for handling ERATT in interrupt handler */ 8656 if (ha_copy & HA_ERATT) { 8657 if (phba->hba_flag & HBA_ERATT_HANDLED) 8658 /* ERATT polling has handled ERATT */ 8659 ha_copy &= ~HA_ERATT; 8660 else 8661 /* Indicate interrupt handler handles ERATT */ 8662 phba->hba_flag |= HBA_ERATT_HANDLED; 8663 } 8664 8665 /* 8666 * If there is deferred error attention, do not check for any 8667 * interrupt. 8668 */ 8669 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8670 spin_unlock_irqrestore(&phba->hbalock, iflag); 8671 return IRQ_NONE; 8672 } 8673 8674 /* Clear up only attention source related to slow-path */ 8675 hc_copy = readl(phba->HCregaddr); 8676 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 8677 HC_LAINT_ENA | HC_ERINT_ENA), 8678 phba->HCregaddr); 8679 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 8680 phba->HAregaddr); 8681 writel(hc_copy, phba->HCregaddr); 8682 readl(phba->HAregaddr); /* flush */ 8683 spin_unlock_irqrestore(&phba->hbalock, iflag); 8684 } else 8685 ha_copy = phba->ha_copy; 8686 8687 work_ha_copy = ha_copy & phba->work_ha_mask; 8688 8689 if (work_ha_copy) { 8690 if (work_ha_copy & HA_LATT) { 8691 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 8692 /* 8693 * Turn off Link Attention interrupts 8694 * until CLEAR_LA done 8695 */ 8696 spin_lock_irqsave(&phba->hbalock, iflag); 8697 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 8698 control = readl(phba->HCregaddr); 8699 control &= ~HC_LAINT_ENA; 8700 writel(control, phba->HCregaddr); 8701 readl(phba->HCregaddr); /* flush */ 8702 spin_unlock_irqrestore(&phba->hbalock, iflag); 8703 } 8704 else 8705 work_ha_copy &= ~HA_LATT; 8706 } 8707 8708 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 8709 /* 8710 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 8711 * the only slow ring. 8712 */ 8713 status = (work_ha_copy & 8714 (HA_RXMASK << (4*LPFC_ELS_RING))); 8715 status >>= (4*LPFC_ELS_RING); 8716 if (status & HA_RXMASK) { 8717 spin_lock_irqsave(&phba->hbalock, iflag); 8718 control = readl(phba->HCregaddr); 8719 8720 lpfc_debugfs_slow_ring_trc(phba, 8721 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 8722 control, status, 8723 (uint32_t)phba->sli.slistat.sli_intr); 8724 8725 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 8726 lpfc_debugfs_slow_ring_trc(phba, 8727 "ISR Disable ring:" 8728 "pwork:x%x hawork:x%x wait:x%x", 8729 phba->work_ha, work_ha_copy, 8730 (uint32_t)((unsigned long) 8731 &phba->work_waitq)); 8732 8733 control &= 8734 ~(HC_R0INT_ENA << LPFC_ELS_RING); 8735 writel(control, phba->HCregaddr); 8736 readl(phba->HCregaddr); /* flush */ 8737 } 8738 else { 8739 lpfc_debugfs_slow_ring_trc(phba, 8740 "ISR slow ring: pwork:" 8741 "x%x hawork:x%x wait:x%x", 8742 phba->work_ha, work_ha_copy, 8743 (uint32_t)((unsigned long) 8744 &phba->work_waitq)); 8745 } 8746 spin_unlock_irqrestore(&phba->hbalock, iflag); 8747 } 8748 } 8749 spin_lock_irqsave(&phba->hbalock, iflag); 8750 if (work_ha_copy & HA_ERATT) { 8751 lpfc_sli_read_hs(phba); 8752 /* 8753 * Check if there is a deferred error condition 8754 * is active 8755 */ 8756 if ((HS_FFER1 & phba->work_hs) && 8757 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 8758 HS_FFER6 | HS_FFER7 | HS_FFER8) & 8759 phba->work_hs)) { 8760 phba->hba_flag |= DEFER_ERATT; 8761 /* Clear all interrupt enable conditions */ 8762 writel(0, phba->HCregaddr); 8763 readl(phba->HCregaddr); 8764 } 8765 } 8766 8767 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 8768 pmb = phba->sli.mbox_active; 8769 pmbox = &pmb->u.mb; 8770 mbox = phba->mbox; 8771 vport = pmb->vport; 8772 8773 /* First check out the status word */ 8774 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 8775 if (pmbox->mbxOwner != OWN_HOST) { 8776 spin_unlock_irqrestore(&phba->hbalock, iflag); 8777 /* 8778 * Stray Mailbox Interrupt, mbxCommand <cmd> 8779 * mbxStatus <status> 8780 */ 8781 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 8782 LOG_SLI, 8783 "(%d):0304 Stray Mailbox " 8784 "Interrupt mbxCommand x%x " 8785 "mbxStatus x%x\n", 8786 (vport ? vport->vpi : 0), 8787 pmbox->mbxCommand, 8788 pmbox->mbxStatus); 8789 /* clear mailbox attention bit */ 8790 work_ha_copy &= ~HA_MBATT; 8791 } else { 8792 phba->sli.mbox_active = NULL; 8793 spin_unlock_irqrestore(&phba->hbalock, iflag); 8794 phba->last_completion_time = jiffies; 8795 del_timer(&phba->sli.mbox_tmo); 8796 if (pmb->mbox_cmpl) { 8797 lpfc_sli_pcimem_bcopy(mbox, pmbox, 8798 MAILBOX_CMD_SIZE); 8799 if (pmb->out_ext_byte_len && 8800 pmb->context2) 8801 lpfc_sli_pcimem_bcopy( 8802 phba->mbox_ext, 8803 pmb->context2, 8804 pmb->out_ext_byte_len); 8805 } 8806 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 8807 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 8808 8809 lpfc_debugfs_disc_trc(vport, 8810 LPFC_DISC_TRC_MBOX_VPORT, 8811 "MBOX dflt rpi: : " 8812 "status:x%x rpi:x%x", 8813 (uint32_t)pmbox->mbxStatus, 8814 pmbox->un.varWords[0], 0); 8815 8816 if (!pmbox->mbxStatus) { 8817 mp = (struct lpfc_dmabuf *) 8818 (pmb->context1); 8819 ndlp = (struct lpfc_nodelist *) 8820 pmb->context2; 8821 8822 /* Reg_LOGIN of dflt RPI was 8823 * successful. new lets get 8824 * rid of the RPI using the 8825 * same mbox buffer. 8826 */ 8827 lpfc_unreg_login(phba, 8828 vport->vpi, 8829 pmbox->un.varWords[0], 8830 pmb); 8831 pmb->mbox_cmpl = 8832 lpfc_mbx_cmpl_dflt_rpi; 8833 pmb->context1 = mp; 8834 pmb->context2 = ndlp; 8835 pmb->vport = vport; 8836 rc = lpfc_sli_issue_mbox(phba, 8837 pmb, 8838 MBX_NOWAIT); 8839 if (rc != MBX_BUSY) 8840 lpfc_printf_log(phba, 8841 KERN_ERR, 8842 LOG_MBOX | LOG_SLI, 8843 "0350 rc should have" 8844 "been MBX_BUSY\n"); 8845 if (rc != MBX_NOT_FINISHED) 8846 goto send_current_mbox; 8847 } 8848 } 8849 spin_lock_irqsave( 8850 &phba->pport->work_port_lock, 8851 iflag); 8852 phba->pport->work_port_events &= 8853 ~WORKER_MBOX_TMO; 8854 spin_unlock_irqrestore( 8855 &phba->pport->work_port_lock, 8856 iflag); 8857 lpfc_mbox_cmpl_put(phba, pmb); 8858 } 8859 } else 8860 spin_unlock_irqrestore(&phba->hbalock, iflag); 8861 8862 if ((work_ha_copy & HA_MBATT) && 8863 (phba->sli.mbox_active == NULL)) { 8864 send_current_mbox: 8865 /* Process next mailbox command if there is one */ 8866 do { 8867 rc = lpfc_sli_issue_mbox(phba, NULL, 8868 MBX_NOWAIT); 8869 } while (rc == MBX_NOT_FINISHED); 8870 if (rc != MBX_SUCCESS) 8871 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 8872 LOG_SLI, "0349 rc should be " 8873 "MBX_SUCCESS\n"); 8874 } 8875 8876 spin_lock_irqsave(&phba->hbalock, iflag); 8877 phba->work_ha |= work_ha_copy; 8878 spin_unlock_irqrestore(&phba->hbalock, iflag); 8879 lpfc_worker_wake_up(phba); 8880 } 8881 return IRQ_HANDLED; 8882 8883 } /* lpfc_sli_sp_intr_handler */ 8884 8885 /** 8886 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 8887 * @irq: Interrupt number. 8888 * @dev_id: The device context pointer. 8889 * 8890 * This function is directly called from the PCI layer as an interrupt 8891 * service routine when device with SLI-3 interface spec is enabled with 8892 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 8893 * ring event in the HBA. However, when the device is enabled with either 8894 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 8895 * device-level interrupt handler. When the PCI slot is in error recovery 8896 * or the HBA is undergoing initialization, the interrupt handler will not 8897 * process the interrupt. The SCSI FCP fast-path ring event are handled in 8898 * the intrrupt context. This function is called without any lock held. 8899 * It gets the hbalock to access and update SLI data structures. 8900 * 8901 * This function returns IRQ_HANDLED when interrupt is handled else it 8902 * returns IRQ_NONE. 8903 **/ 8904 irqreturn_t 8905 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 8906 { 8907 struct lpfc_hba *phba; 8908 uint32_t ha_copy; 8909 unsigned long status; 8910 unsigned long iflag; 8911 8912 /* Get the driver's phba structure from the dev_id and 8913 * assume the HBA is not interrupting. 8914 */ 8915 phba = (struct lpfc_hba *) dev_id; 8916 8917 if (unlikely(!phba)) 8918 return IRQ_NONE; 8919 8920 /* 8921 * Stuff needs to be attented to when this function is invoked as an 8922 * individual interrupt handler in MSI-X multi-message interrupt mode 8923 */ 8924 if (phba->intr_type == MSIX) { 8925 /* Check device state for handling interrupt */ 8926 if (lpfc_intr_state_check(phba)) 8927 return IRQ_NONE; 8928 /* Need to read HA REG for FCP ring and other ring events */ 8929 ha_copy = readl(phba->HAregaddr); 8930 /* Clear up only attention source related to fast-path */ 8931 spin_lock_irqsave(&phba->hbalock, iflag); 8932 /* 8933 * If there is deferred error attention, do not check for 8934 * any interrupt. 8935 */ 8936 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8937 spin_unlock_irqrestore(&phba->hbalock, iflag); 8938 return IRQ_NONE; 8939 } 8940 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 8941 phba->HAregaddr); 8942 readl(phba->HAregaddr); /* flush */ 8943 spin_unlock_irqrestore(&phba->hbalock, iflag); 8944 } else 8945 ha_copy = phba->ha_copy; 8946 8947 /* 8948 * Process all events on FCP ring. Take the optimized path for FCP IO. 8949 */ 8950 ha_copy &= ~(phba->work_ha_mask); 8951 8952 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 8953 status >>= (4*LPFC_FCP_RING); 8954 if (status & HA_RXMASK) 8955 lpfc_sli_handle_fast_ring_event(phba, 8956 &phba->sli.ring[LPFC_FCP_RING], 8957 status); 8958 8959 if (phba->cfg_multi_ring_support == 2) { 8960 /* 8961 * Process all events on extra ring. Take the optimized path 8962 * for extra ring IO. 8963 */ 8964 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 8965 status >>= (4*LPFC_EXTRA_RING); 8966 if (status & HA_RXMASK) { 8967 lpfc_sli_handle_fast_ring_event(phba, 8968 &phba->sli.ring[LPFC_EXTRA_RING], 8969 status); 8970 } 8971 } 8972 return IRQ_HANDLED; 8973 } /* lpfc_sli_fp_intr_handler */ 8974 8975 /** 8976 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 8977 * @irq: Interrupt number. 8978 * @dev_id: The device context pointer. 8979 * 8980 * This function is the HBA device-level interrupt handler to device with 8981 * SLI-3 interface spec, called from the PCI layer when either MSI or 8982 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 8983 * requires driver attention. This function invokes the slow-path interrupt 8984 * attention handling function and fast-path interrupt attention handling 8985 * function in turn to process the relevant HBA attention events. This 8986 * function is called without any lock held. It gets the hbalock to access 8987 * and update SLI data structures. 8988 * 8989 * This function returns IRQ_HANDLED when interrupt is handled, else it 8990 * returns IRQ_NONE. 8991 **/ 8992 irqreturn_t 8993 lpfc_sli_intr_handler(int irq, void *dev_id) 8994 { 8995 struct lpfc_hba *phba; 8996 irqreturn_t sp_irq_rc, fp_irq_rc; 8997 unsigned long status1, status2; 8998 uint32_t hc_copy; 8999 9000 /* 9001 * Get the driver's phba structure from the dev_id and 9002 * assume the HBA is not interrupting. 9003 */ 9004 phba = (struct lpfc_hba *) dev_id; 9005 9006 if (unlikely(!phba)) 9007 return IRQ_NONE; 9008 9009 /* Check device state for handling interrupt */ 9010 if (lpfc_intr_state_check(phba)) 9011 return IRQ_NONE; 9012 9013 spin_lock(&phba->hbalock); 9014 phba->ha_copy = readl(phba->HAregaddr); 9015 if (unlikely(!phba->ha_copy)) { 9016 spin_unlock(&phba->hbalock); 9017 return IRQ_NONE; 9018 } else if (phba->ha_copy & HA_ERATT) { 9019 if (phba->hba_flag & HBA_ERATT_HANDLED) 9020 /* ERATT polling has handled ERATT */ 9021 phba->ha_copy &= ~HA_ERATT; 9022 else 9023 /* Indicate interrupt handler handles ERATT */ 9024 phba->hba_flag |= HBA_ERATT_HANDLED; 9025 } 9026 9027 /* 9028 * If there is deferred error attention, do not check for any interrupt. 9029 */ 9030 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 9031 spin_unlock(&phba->hbalock); 9032 return IRQ_NONE; 9033 } 9034 9035 /* Clear attention sources except link and error attentions */ 9036 hc_copy = readl(phba->HCregaddr); 9037 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 9038 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 9039 phba->HCregaddr); 9040 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 9041 writel(hc_copy, phba->HCregaddr); 9042 readl(phba->HAregaddr); /* flush */ 9043 spin_unlock(&phba->hbalock); 9044 9045 /* 9046 * Invokes slow-path host attention interrupt handling as appropriate. 9047 */ 9048 9049 /* status of events with mailbox and link attention */ 9050 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 9051 9052 /* status of events with ELS ring */ 9053 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 9054 status2 >>= (4*LPFC_ELS_RING); 9055 9056 if (status1 || (status2 & HA_RXMASK)) 9057 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 9058 else 9059 sp_irq_rc = IRQ_NONE; 9060 9061 /* 9062 * Invoke fast-path host attention interrupt handling as appropriate. 9063 */ 9064 9065 /* status of events with FCP ring */ 9066 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 9067 status1 >>= (4*LPFC_FCP_RING); 9068 9069 /* status of events with extra ring */ 9070 if (phba->cfg_multi_ring_support == 2) { 9071 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 9072 status2 >>= (4*LPFC_EXTRA_RING); 9073 } else 9074 status2 = 0; 9075 9076 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 9077 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 9078 else 9079 fp_irq_rc = IRQ_NONE; 9080 9081 /* Return device-level interrupt handling status */ 9082 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 9083 } /* lpfc_sli_intr_handler */ 9084 9085 /** 9086 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 9087 * @phba: pointer to lpfc hba data structure. 9088 * 9089 * This routine is invoked by the worker thread to process all the pending 9090 * SLI4 FCP abort XRI events. 9091 **/ 9092 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 9093 { 9094 struct lpfc_cq_event *cq_event; 9095 9096 /* First, declare the fcp xri abort event has been handled */ 9097 spin_lock_irq(&phba->hbalock); 9098 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 9099 spin_unlock_irq(&phba->hbalock); 9100 /* Now, handle all the fcp xri abort events */ 9101 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 9102 /* Get the first event from the head of the event queue */ 9103 spin_lock_irq(&phba->hbalock); 9104 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 9105 cq_event, struct lpfc_cq_event, list); 9106 spin_unlock_irq(&phba->hbalock); 9107 /* Notify aborted XRI for FCP work queue */ 9108 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 9109 /* Free the event processed back to the free pool */ 9110 lpfc_sli4_cq_event_release(phba, cq_event); 9111 } 9112 } 9113 9114 /** 9115 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 9116 * @phba: pointer to lpfc hba data structure. 9117 * 9118 * This routine is invoked by the worker thread to process all the pending 9119 * SLI4 els abort xri events. 9120 **/ 9121 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 9122 { 9123 struct lpfc_cq_event *cq_event; 9124 9125 /* First, declare the els xri abort event has been handled */ 9126 spin_lock_irq(&phba->hbalock); 9127 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 9128 spin_unlock_irq(&phba->hbalock); 9129 /* Now, handle all the els xri abort events */ 9130 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 9131 /* Get the first event from the head of the event queue */ 9132 spin_lock_irq(&phba->hbalock); 9133 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 9134 cq_event, struct lpfc_cq_event, list); 9135 spin_unlock_irq(&phba->hbalock); 9136 /* Notify aborted XRI for ELS work queue */ 9137 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 9138 /* Free the event processed back to the free pool */ 9139 lpfc_sli4_cq_event_release(phba, cq_event); 9140 } 9141 } 9142 9143 /** 9144 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 9145 * @phba: pointer to lpfc hba data structure 9146 * @pIocbIn: pointer to the rspiocbq 9147 * @pIocbOut: pointer to the cmdiocbq 9148 * @wcqe: pointer to the complete wcqe 9149 * 9150 * This routine transfers the fields of a command iocbq to a response iocbq 9151 * by copying all the IOCB fields from command iocbq and transferring the 9152 * completion status information from the complete wcqe. 9153 **/ 9154 static void 9155 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 9156 struct lpfc_iocbq *pIocbIn, 9157 struct lpfc_iocbq *pIocbOut, 9158 struct lpfc_wcqe_complete *wcqe) 9159 { 9160 unsigned long iflags; 9161 size_t offset = offsetof(struct lpfc_iocbq, iocb); 9162 9163 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 9164 sizeof(struct lpfc_iocbq) - offset); 9165 /* Map WCQE parameters into irspiocb parameters */ 9166 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); 9167 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 9168 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 9169 pIocbIn->iocb.un.fcpi.fcpi_parm = 9170 pIocbOut->iocb.un.fcpi.fcpi_parm - 9171 wcqe->total_data_placed; 9172 else 9173 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 9174 else { 9175 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 9176 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; 9177 } 9178 9179 /* Pick up HBA exchange busy condition */ 9180 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 9181 spin_lock_irqsave(&phba->hbalock, iflags); 9182 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 9183 spin_unlock_irqrestore(&phba->hbalock, iflags); 9184 } 9185 } 9186 9187 /** 9188 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 9189 * @phba: Pointer to HBA context object. 9190 * @wcqe: Pointer to work-queue completion queue entry. 9191 * 9192 * This routine handles an ELS work-queue completion event and construct 9193 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 9194 * discovery engine to handle. 9195 * 9196 * Return: Pointer to the receive IOCBQ, NULL otherwise. 9197 **/ 9198 static struct lpfc_iocbq * 9199 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 9200 struct lpfc_iocbq *irspiocbq) 9201 { 9202 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 9203 struct lpfc_iocbq *cmdiocbq; 9204 struct lpfc_wcqe_complete *wcqe; 9205 unsigned long iflags; 9206 9207 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 9208 spin_lock_irqsave(&phba->hbalock, iflags); 9209 pring->stats.iocb_event++; 9210 /* Look up the ELS command IOCB and create pseudo response IOCB */ 9211 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 9212 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 9213 spin_unlock_irqrestore(&phba->hbalock, iflags); 9214 9215 if (unlikely(!cmdiocbq)) { 9216 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9217 "0386 ELS complete with no corresponding " 9218 "cmdiocb: iotag (%d)\n", 9219 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 9220 lpfc_sli_release_iocbq(phba, irspiocbq); 9221 return NULL; 9222 } 9223 9224 /* Fake the irspiocbq and copy necessary response information */ 9225 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 9226 9227 return irspiocbq; 9228 } 9229 9230 /** 9231 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 9232 * @phba: Pointer to HBA context object. 9233 * @cqe: Pointer to mailbox completion queue entry. 9234 * 9235 * This routine process a mailbox completion queue entry with asynchrous 9236 * event. 9237 * 9238 * Return: true if work posted to worker thread, otherwise false. 9239 **/ 9240 static bool 9241 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 9242 { 9243 struct lpfc_cq_event *cq_event; 9244 unsigned long iflags; 9245 9246 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9247 "0392 Async Event: word0:x%x, word1:x%x, " 9248 "word2:x%x, word3:x%x\n", mcqe->word0, 9249 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 9250 9251 /* Allocate a new internal CQ_EVENT entry */ 9252 cq_event = lpfc_sli4_cq_event_alloc(phba); 9253 if (!cq_event) { 9254 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9255 "0394 Failed to allocate CQ_EVENT entry\n"); 9256 return false; 9257 } 9258 9259 /* Move the CQE into an asynchronous event entry */ 9260 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); 9261 spin_lock_irqsave(&phba->hbalock, iflags); 9262 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 9263 /* Set the async event flag */ 9264 phba->hba_flag |= ASYNC_EVENT; 9265 spin_unlock_irqrestore(&phba->hbalock, iflags); 9266 9267 return true; 9268 } 9269 9270 /** 9271 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 9272 * @phba: Pointer to HBA context object. 9273 * @cqe: Pointer to mailbox completion queue entry. 9274 * 9275 * This routine process a mailbox completion queue entry with mailbox 9276 * completion event. 9277 * 9278 * Return: true if work posted to worker thread, otherwise false. 9279 **/ 9280 static bool 9281 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 9282 { 9283 uint32_t mcqe_status; 9284 MAILBOX_t *mbox, *pmbox; 9285 struct lpfc_mqe *mqe; 9286 struct lpfc_vport *vport; 9287 struct lpfc_nodelist *ndlp; 9288 struct lpfc_dmabuf *mp; 9289 unsigned long iflags; 9290 LPFC_MBOXQ_t *pmb; 9291 bool workposted = false; 9292 int rc; 9293 9294 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 9295 if (!bf_get(lpfc_trailer_completed, mcqe)) 9296 goto out_no_mqe_complete; 9297 9298 /* Get the reference to the active mbox command */ 9299 spin_lock_irqsave(&phba->hbalock, iflags); 9300 pmb = phba->sli.mbox_active; 9301 if (unlikely(!pmb)) { 9302 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 9303 "1832 No pending MBOX command to handle\n"); 9304 spin_unlock_irqrestore(&phba->hbalock, iflags); 9305 goto out_no_mqe_complete; 9306 } 9307 spin_unlock_irqrestore(&phba->hbalock, iflags); 9308 mqe = &pmb->u.mqe; 9309 pmbox = (MAILBOX_t *)&pmb->u.mqe; 9310 mbox = phba->mbox; 9311 vport = pmb->vport; 9312 9313 /* Reset heartbeat timer */ 9314 phba->last_completion_time = jiffies; 9315 del_timer(&phba->sli.mbox_tmo); 9316 9317 /* Move mbox data to caller's mailbox region, do endian swapping */ 9318 if (pmb->mbox_cmpl && mbox) 9319 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 9320 /* Set the mailbox status with SLI4 range 0x4000 */ 9321 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 9322 if (mcqe_status != MB_CQE_STATUS_SUCCESS) 9323 bf_set(lpfc_mqe_status, mqe, 9324 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 9325 9326 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 9327 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 9328 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 9329 "MBOX dflt rpi: status:x%x rpi:x%x", 9330 mcqe_status, 9331 pmbox->un.varWords[0], 0); 9332 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 9333 mp = (struct lpfc_dmabuf *)(pmb->context1); 9334 ndlp = (struct lpfc_nodelist *)pmb->context2; 9335 /* Reg_LOGIN of dflt RPI was successful. Now lets get 9336 * RID of the PPI using the same mbox buffer. 9337 */ 9338 lpfc_unreg_login(phba, vport->vpi, 9339 pmbox->un.varWords[0], pmb); 9340 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 9341 pmb->context1 = mp; 9342 pmb->context2 = ndlp; 9343 pmb->vport = vport; 9344 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 9345 if (rc != MBX_BUSY) 9346 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 9347 LOG_SLI, "0385 rc should " 9348 "have been MBX_BUSY\n"); 9349 if (rc != MBX_NOT_FINISHED) 9350 goto send_current_mbox; 9351 } 9352 } 9353 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 9354 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 9355 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 9356 9357 /* There is mailbox completion work to do */ 9358 spin_lock_irqsave(&phba->hbalock, iflags); 9359 __lpfc_mbox_cmpl_put(phba, pmb); 9360 phba->work_ha |= HA_MBATT; 9361 spin_unlock_irqrestore(&phba->hbalock, iflags); 9362 workposted = true; 9363 9364 send_current_mbox: 9365 spin_lock_irqsave(&phba->hbalock, iflags); 9366 /* Release the mailbox command posting token */ 9367 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9368 /* Setting active mailbox pointer need to be in sync to flag clear */ 9369 phba->sli.mbox_active = NULL; 9370 spin_unlock_irqrestore(&phba->hbalock, iflags); 9371 /* Wake up worker thread to post the next pending mailbox command */ 9372 lpfc_worker_wake_up(phba); 9373 out_no_mqe_complete: 9374 if (bf_get(lpfc_trailer_consumed, mcqe)) 9375 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 9376 return workposted; 9377 } 9378 9379 /** 9380 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 9381 * @phba: Pointer to HBA context object. 9382 * @cqe: Pointer to mailbox completion queue entry. 9383 * 9384 * This routine process a mailbox completion queue entry, it invokes the 9385 * proper mailbox complete handling or asynchrous event handling routine 9386 * according to the MCQE's async bit. 9387 * 9388 * Return: true if work posted to worker thread, otherwise false. 9389 **/ 9390 static bool 9391 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 9392 { 9393 struct lpfc_mcqe mcqe; 9394 bool workposted; 9395 9396 /* Copy the mailbox MCQE and convert endian order as needed */ 9397 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 9398 9399 /* Invoke the proper event handling routine */ 9400 if (!bf_get(lpfc_trailer_async, &mcqe)) 9401 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 9402 else 9403 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 9404 return workposted; 9405 } 9406 9407 /** 9408 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 9409 * @phba: Pointer to HBA context object. 9410 * @wcqe: Pointer to work-queue completion queue entry. 9411 * 9412 * This routine handles an ELS work-queue completion event. 9413 * 9414 * Return: true if work posted to worker thread, otherwise false. 9415 **/ 9416 static bool 9417 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, 9418 struct lpfc_wcqe_complete *wcqe) 9419 { 9420 struct lpfc_iocbq *irspiocbq; 9421 unsigned long iflags; 9422 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 9423 9424 /* Get an irspiocbq for later ELS response processing use */ 9425 irspiocbq = lpfc_sli_get_iocbq(phba); 9426 if (!irspiocbq) { 9427 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9428 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 9429 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 9430 pring->txq_cnt, phba->iocb_cnt, 9431 phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt, 9432 phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt); 9433 return false; 9434 } 9435 9436 /* Save off the slow-path queue event for work thread to process */ 9437 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 9438 spin_lock_irqsave(&phba->hbalock, iflags); 9439 list_add_tail(&irspiocbq->cq_event.list, 9440 &phba->sli4_hba.sp_queue_event); 9441 phba->hba_flag |= HBA_SP_QUEUE_EVT; 9442 spin_unlock_irqrestore(&phba->hbalock, iflags); 9443 9444 return true; 9445 } 9446 9447 /** 9448 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 9449 * @phba: Pointer to HBA context object. 9450 * @wcqe: Pointer to work-queue completion queue entry. 9451 * 9452 * This routine handles slow-path WQ entry comsumed event by invoking the 9453 * proper WQ release routine to the slow-path WQ. 9454 **/ 9455 static void 9456 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 9457 struct lpfc_wcqe_release *wcqe) 9458 { 9459 /* Check for the slow-path ELS work queue */ 9460 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 9461 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 9462 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 9463 else 9464 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9465 "2579 Slow-path wqe consume event carries " 9466 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 9467 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 9468 phba->sli4_hba.els_wq->queue_id); 9469 } 9470 9471 /** 9472 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 9473 * @phba: Pointer to HBA context object. 9474 * @cq: Pointer to a WQ completion queue. 9475 * @wcqe: Pointer to work-queue completion queue entry. 9476 * 9477 * This routine handles an XRI abort event. 9478 * 9479 * Return: true if work posted to worker thread, otherwise false. 9480 **/ 9481 static bool 9482 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 9483 struct lpfc_queue *cq, 9484 struct sli4_wcqe_xri_aborted *wcqe) 9485 { 9486 bool workposted = false; 9487 struct lpfc_cq_event *cq_event; 9488 unsigned long iflags; 9489 9490 /* Allocate a new internal CQ_EVENT entry */ 9491 cq_event = lpfc_sli4_cq_event_alloc(phba); 9492 if (!cq_event) { 9493 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9494 "0602 Failed to allocate CQ_EVENT entry\n"); 9495 return false; 9496 } 9497 9498 /* Move the CQE into the proper xri abort event list */ 9499 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 9500 switch (cq->subtype) { 9501 case LPFC_FCP: 9502 spin_lock_irqsave(&phba->hbalock, iflags); 9503 list_add_tail(&cq_event->list, 9504 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 9505 /* Set the fcp xri abort event flag */ 9506 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 9507 spin_unlock_irqrestore(&phba->hbalock, iflags); 9508 workposted = true; 9509 break; 9510 case LPFC_ELS: 9511 spin_lock_irqsave(&phba->hbalock, iflags); 9512 list_add_tail(&cq_event->list, 9513 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 9514 /* Set the els xri abort event flag */ 9515 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 9516 spin_unlock_irqrestore(&phba->hbalock, iflags); 9517 workposted = true; 9518 break; 9519 default: 9520 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9521 "0603 Invalid work queue CQE subtype (x%x)\n", 9522 cq->subtype); 9523 workposted = false; 9524 break; 9525 } 9526 return workposted; 9527 } 9528 9529 /** 9530 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 9531 * @phba: Pointer to HBA context object. 9532 * @rcqe: Pointer to receive-queue completion queue entry. 9533 * 9534 * This routine process a receive-queue completion queue entry. 9535 * 9536 * Return: true if work posted to worker thread, otherwise false. 9537 **/ 9538 static bool 9539 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 9540 { 9541 bool workposted = false; 9542 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 9543 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 9544 struct hbq_dmabuf *dma_buf; 9545 uint32_t status; 9546 unsigned long iflags; 9547 9548 if (bf_get(lpfc_rcqe_rq_id, rcqe) != hrq->queue_id) 9549 goto out; 9550 9551 status = bf_get(lpfc_rcqe_status, rcqe); 9552 switch (status) { 9553 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 9554 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9555 "2537 Receive Frame Truncated!!\n"); 9556 case FC_STATUS_RQ_SUCCESS: 9557 lpfc_sli4_rq_release(hrq, drq); 9558 spin_lock_irqsave(&phba->hbalock, iflags); 9559 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 9560 if (!dma_buf) { 9561 spin_unlock_irqrestore(&phba->hbalock, iflags); 9562 goto out; 9563 } 9564 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 9565 /* save off the frame for the word thread to process */ 9566 list_add_tail(&dma_buf->cq_event.list, 9567 &phba->sli4_hba.sp_queue_event); 9568 /* Frame received */ 9569 phba->hba_flag |= HBA_SP_QUEUE_EVT; 9570 spin_unlock_irqrestore(&phba->hbalock, iflags); 9571 workposted = true; 9572 break; 9573 case FC_STATUS_INSUFF_BUF_NEED_BUF: 9574 case FC_STATUS_INSUFF_BUF_FRM_DISC: 9575 /* Post more buffers if possible */ 9576 spin_lock_irqsave(&phba->hbalock, iflags); 9577 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 9578 spin_unlock_irqrestore(&phba->hbalock, iflags); 9579 workposted = true; 9580 break; 9581 } 9582 out: 9583 return workposted; 9584 } 9585 9586 /** 9587 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 9588 * @phba: Pointer to HBA context object. 9589 * @cq: Pointer to the completion queue. 9590 * @wcqe: Pointer to a completion queue entry. 9591 * 9592 * This routine process a slow-path work-queue or recieve queue completion queue 9593 * entry. 9594 * 9595 * Return: true if work posted to worker thread, otherwise false. 9596 **/ 9597 static bool 9598 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 9599 struct lpfc_cqe *cqe) 9600 { 9601 struct lpfc_cqe cqevt; 9602 bool workposted = false; 9603 9604 /* Copy the work queue CQE and convert endian order if needed */ 9605 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 9606 9607 /* Check and process for different type of WCQE and dispatch */ 9608 switch (bf_get(lpfc_cqe_code, &cqevt)) { 9609 case CQE_CODE_COMPL_WQE: 9610 /* Process the WQ/RQ complete event */ 9611 phba->last_completion_time = jiffies; 9612 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, 9613 (struct lpfc_wcqe_complete *)&cqevt); 9614 break; 9615 case CQE_CODE_RELEASE_WQE: 9616 /* Process the WQ release event */ 9617 lpfc_sli4_sp_handle_rel_wcqe(phba, 9618 (struct lpfc_wcqe_release *)&cqevt); 9619 break; 9620 case CQE_CODE_XRI_ABORTED: 9621 /* Process the WQ XRI abort event */ 9622 phba->last_completion_time = jiffies; 9623 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 9624 (struct sli4_wcqe_xri_aborted *)&cqevt); 9625 break; 9626 case CQE_CODE_RECEIVE: 9627 /* Process the RQ event */ 9628 phba->last_completion_time = jiffies; 9629 workposted = lpfc_sli4_sp_handle_rcqe(phba, 9630 (struct lpfc_rcqe *)&cqevt); 9631 break; 9632 default: 9633 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9634 "0388 Not a valid WCQE code: x%x\n", 9635 bf_get(lpfc_cqe_code, &cqevt)); 9636 break; 9637 } 9638 return workposted; 9639 } 9640 9641 /** 9642 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 9643 * @phba: Pointer to HBA context object. 9644 * @eqe: Pointer to fast-path event queue entry. 9645 * 9646 * This routine process a event queue entry from the slow-path event queue. 9647 * It will check the MajorCode and MinorCode to determine this is for a 9648 * completion event on a completion queue, if not, an error shall be logged 9649 * and just return. Otherwise, it will get to the corresponding completion 9650 * queue and process all the entries on that completion queue, rearm the 9651 * completion queue, and then return. 9652 * 9653 **/ 9654 static void 9655 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 9656 { 9657 struct lpfc_queue *cq = NULL, *childq, *speq; 9658 struct lpfc_cqe *cqe; 9659 bool workposted = false; 9660 int ecount = 0; 9661 uint16_t cqid; 9662 9663 if (bf_get_le32(lpfc_eqe_major_code, eqe) != 0) { 9664 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9665 "0359 Not a valid slow-path completion " 9666 "event: majorcode=x%x, minorcode=x%x\n", 9667 bf_get_le32(lpfc_eqe_major_code, eqe), 9668 bf_get_le32(lpfc_eqe_minor_code, eqe)); 9669 return; 9670 } 9671 9672 /* Get the reference to the corresponding CQ */ 9673 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 9674 9675 /* Search for completion queue pointer matching this cqid */ 9676 speq = phba->sli4_hba.sp_eq; 9677 list_for_each_entry(childq, &speq->child_list, list) { 9678 if (childq->queue_id == cqid) { 9679 cq = childq; 9680 break; 9681 } 9682 } 9683 if (unlikely(!cq)) { 9684 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 9685 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9686 "0365 Slow-path CQ identifier " 9687 "(%d) does not exist\n", cqid); 9688 return; 9689 } 9690 9691 /* Process all the entries to the CQ */ 9692 switch (cq->type) { 9693 case LPFC_MCQ: 9694 while ((cqe = lpfc_sli4_cq_get(cq))) { 9695 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 9696 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9697 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 9698 } 9699 break; 9700 case LPFC_WCQ: 9701 while ((cqe = lpfc_sli4_cq_get(cq))) { 9702 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe); 9703 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9704 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 9705 } 9706 break; 9707 default: 9708 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9709 "0370 Invalid completion queue type (%d)\n", 9710 cq->type); 9711 return; 9712 } 9713 9714 /* Catch the no cq entry condition, log an error */ 9715 if (unlikely(ecount == 0)) 9716 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9717 "0371 No entry from the CQ: identifier " 9718 "(x%x), type (%d)\n", cq->queue_id, cq->type); 9719 9720 /* In any case, flash and re-arm the RCQ */ 9721 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 9722 9723 /* wake up worker thread if there are works to be done */ 9724 if (workposted) 9725 lpfc_worker_wake_up(phba); 9726 } 9727 9728 /** 9729 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 9730 * @eqe: Pointer to fast-path completion queue entry. 9731 * 9732 * This routine process a fast-path work queue completion entry from fast-path 9733 * event queue for FCP command response completion. 9734 **/ 9735 static void 9736 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, 9737 struct lpfc_wcqe_complete *wcqe) 9738 { 9739 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; 9740 struct lpfc_iocbq *cmdiocbq; 9741 struct lpfc_iocbq irspiocbq; 9742 unsigned long iflags; 9743 9744 spin_lock_irqsave(&phba->hbalock, iflags); 9745 pring->stats.iocb_event++; 9746 spin_unlock_irqrestore(&phba->hbalock, iflags); 9747 9748 /* Check for response status */ 9749 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 9750 /* If resource errors reported from HBA, reduce queue 9751 * depth of the SCSI device. 9752 */ 9753 if ((bf_get(lpfc_wcqe_c_status, wcqe) == 9754 IOSTAT_LOCAL_REJECT) && 9755 (wcqe->parameter == IOERR_NO_RESOURCES)) { 9756 phba->lpfc_rampdown_queue_depth(phba); 9757 } 9758 /* Log the error status */ 9759 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9760 "0373 FCP complete error: status=x%x, " 9761 "hw_status=x%x, total_data_specified=%d, " 9762 "parameter=x%x, word3=x%x\n", 9763 bf_get(lpfc_wcqe_c_status, wcqe), 9764 bf_get(lpfc_wcqe_c_hw_status, wcqe), 9765 wcqe->total_data_placed, wcqe->parameter, 9766 wcqe->word3); 9767 } 9768 9769 /* Look up the FCP command IOCB and create pseudo response IOCB */ 9770 spin_lock_irqsave(&phba->hbalock, iflags); 9771 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 9772 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 9773 spin_unlock_irqrestore(&phba->hbalock, iflags); 9774 if (unlikely(!cmdiocbq)) { 9775 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9776 "0374 FCP complete with no corresponding " 9777 "cmdiocb: iotag (%d)\n", 9778 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 9779 return; 9780 } 9781 if (unlikely(!cmdiocbq->iocb_cmpl)) { 9782 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9783 "0375 FCP cmdiocb not callback function " 9784 "iotag: (%d)\n", 9785 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 9786 return; 9787 } 9788 9789 /* Fake the irspiocb and copy necessary response information */ 9790 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 9791 9792 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 9793 spin_lock_irqsave(&phba->hbalock, iflags); 9794 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 9795 spin_unlock_irqrestore(&phba->hbalock, iflags); 9796 } 9797 9798 /* Pass the cmd_iocb and the rsp state to the upper layer */ 9799 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 9800 } 9801 9802 /** 9803 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 9804 * @phba: Pointer to HBA context object. 9805 * @cq: Pointer to completion queue. 9806 * @wcqe: Pointer to work-queue completion queue entry. 9807 * 9808 * This routine handles an fast-path WQ entry comsumed event by invoking the 9809 * proper WQ release routine to the slow-path WQ. 9810 **/ 9811 static void 9812 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 9813 struct lpfc_wcqe_release *wcqe) 9814 { 9815 struct lpfc_queue *childwq; 9816 bool wqid_matched = false; 9817 uint16_t fcp_wqid; 9818 9819 /* Check for fast-path FCP work queue release */ 9820 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 9821 list_for_each_entry(childwq, &cq->child_list, list) { 9822 if (childwq->queue_id == fcp_wqid) { 9823 lpfc_sli4_wq_release(childwq, 9824 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 9825 wqid_matched = true; 9826 break; 9827 } 9828 } 9829 /* Report warning log message if no match found */ 9830 if (wqid_matched != true) 9831 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9832 "2580 Fast-path wqe consume event carries " 9833 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); 9834 } 9835 9836 /** 9837 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry 9838 * @cq: Pointer to the completion queue. 9839 * @eqe: Pointer to fast-path completion queue entry. 9840 * 9841 * This routine process a fast-path work queue completion entry from fast-path 9842 * event queue for FCP command response completion. 9843 **/ 9844 static int 9845 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 9846 struct lpfc_cqe *cqe) 9847 { 9848 struct lpfc_wcqe_release wcqe; 9849 bool workposted = false; 9850 9851 /* Copy the work queue CQE and convert endian order if needed */ 9852 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 9853 9854 /* Check and process for different type of WCQE and dispatch */ 9855 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 9856 case CQE_CODE_COMPL_WQE: 9857 /* Process the WQ complete event */ 9858 phba->last_completion_time = jiffies; 9859 lpfc_sli4_fp_handle_fcp_wcqe(phba, 9860 (struct lpfc_wcqe_complete *)&wcqe); 9861 break; 9862 case CQE_CODE_RELEASE_WQE: 9863 /* Process the WQ release event */ 9864 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 9865 (struct lpfc_wcqe_release *)&wcqe); 9866 break; 9867 case CQE_CODE_XRI_ABORTED: 9868 /* Process the WQ XRI abort event */ 9869 phba->last_completion_time = jiffies; 9870 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 9871 (struct sli4_wcqe_xri_aborted *)&wcqe); 9872 break; 9873 default: 9874 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9875 "0144 Not a valid WCQE code: x%x\n", 9876 bf_get(lpfc_wcqe_c_code, &wcqe)); 9877 break; 9878 } 9879 return workposted; 9880 } 9881 9882 /** 9883 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry 9884 * @phba: Pointer to HBA context object. 9885 * @eqe: Pointer to fast-path event queue entry. 9886 * 9887 * This routine process a event queue entry from the fast-path event queue. 9888 * It will check the MajorCode and MinorCode to determine this is for a 9889 * completion event on a completion queue, if not, an error shall be logged 9890 * and just return. Otherwise, it will get to the corresponding completion 9891 * queue and process all the entries on the completion queue, rearm the 9892 * completion queue, and then return. 9893 **/ 9894 static void 9895 lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 9896 uint32_t fcp_cqidx) 9897 { 9898 struct lpfc_queue *cq; 9899 struct lpfc_cqe *cqe; 9900 bool workposted = false; 9901 uint16_t cqid; 9902 int ecount = 0; 9903 9904 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 9905 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9906 "0366 Not a valid fast-path completion " 9907 "event: majorcode=x%x, minorcode=x%x\n", 9908 bf_get_le32(lpfc_eqe_major_code, eqe), 9909 bf_get_le32(lpfc_eqe_minor_code, eqe)); 9910 return; 9911 } 9912 9913 cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; 9914 if (unlikely(!cq)) { 9915 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 9916 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9917 "0367 Fast-path completion queue " 9918 "does not exist\n"); 9919 return; 9920 } 9921 9922 /* Get the reference to the corresponding CQ */ 9923 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 9924 if (unlikely(cqid != cq->queue_id)) { 9925 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9926 "0368 Miss-matched fast-path completion " 9927 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 9928 cqid, cq->queue_id); 9929 return; 9930 } 9931 9932 /* Process all the entries to the CQ */ 9933 while ((cqe = lpfc_sli4_cq_get(cq))) { 9934 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); 9935 if (!(++ecount % LPFC_GET_QE_REL_INT)) 9936 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 9937 } 9938 9939 /* Catch the no cq entry condition */ 9940 if (unlikely(ecount == 0)) 9941 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9942 "0369 No entry from fast-path completion " 9943 "queue fcpcqid=%d\n", cq->queue_id); 9944 9945 /* In any case, flash and re-arm the CQ */ 9946 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 9947 9948 /* wake up worker thread if there are works to be done */ 9949 if (workposted) 9950 lpfc_worker_wake_up(phba); 9951 } 9952 9953 static void 9954 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 9955 { 9956 struct lpfc_eqe *eqe; 9957 9958 /* walk all the EQ entries and drop on the floor */ 9959 while ((eqe = lpfc_sli4_eq_get(eq))) 9960 ; 9961 9962 /* Clear and re-arm the EQ */ 9963 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 9964 } 9965 9966 /** 9967 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device 9968 * @irq: Interrupt number. 9969 * @dev_id: The device context pointer. 9970 * 9971 * This function is directly called from the PCI layer as an interrupt 9972 * service routine when device with SLI-4 interface spec is enabled with 9973 * MSI-X multi-message interrupt mode and there are slow-path events in 9974 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 9975 * interrupt mode, this function is called as part of the device-level 9976 * interrupt handler. When the PCI slot is in error recovery or the HBA is 9977 * undergoing initialization, the interrupt handler will not process the 9978 * interrupt. The link attention and ELS ring attention events are handled 9979 * by the worker thread. The interrupt handler signals the worker thread 9980 * and returns for these events. This function is called without any lock 9981 * held. It gets the hbalock to access and update SLI data structures. 9982 * 9983 * This function returns IRQ_HANDLED when interrupt is handled else it 9984 * returns IRQ_NONE. 9985 **/ 9986 irqreturn_t 9987 lpfc_sli4_sp_intr_handler(int irq, void *dev_id) 9988 { 9989 struct lpfc_hba *phba; 9990 struct lpfc_queue *speq; 9991 struct lpfc_eqe *eqe; 9992 unsigned long iflag; 9993 int ecount = 0; 9994 9995 /* 9996 * Get the driver's phba structure from the dev_id 9997 */ 9998 phba = (struct lpfc_hba *)dev_id; 9999 10000 if (unlikely(!phba)) 10001 return IRQ_NONE; 10002 10003 /* Get to the EQ struct associated with this vector */ 10004 speq = phba->sli4_hba.sp_eq; 10005 10006 /* Check device state for handling interrupt */ 10007 if (unlikely(lpfc_intr_state_check(phba))) { 10008 /* Check again for link_state with lock held */ 10009 spin_lock_irqsave(&phba->hbalock, iflag); 10010 if (phba->link_state < LPFC_LINK_DOWN) 10011 /* Flush, clear interrupt, and rearm the EQ */ 10012 lpfc_sli4_eq_flush(phba, speq); 10013 spin_unlock_irqrestore(&phba->hbalock, iflag); 10014 return IRQ_NONE; 10015 } 10016 10017 /* 10018 * Process all the event on FCP slow-path EQ 10019 */ 10020 while ((eqe = lpfc_sli4_eq_get(speq))) { 10021 lpfc_sli4_sp_handle_eqe(phba, eqe); 10022 if (!(++ecount % LPFC_GET_QE_REL_INT)) 10023 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); 10024 } 10025 10026 /* Always clear and re-arm the slow-path EQ */ 10027 lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); 10028 10029 /* Catch the no cq entry condition */ 10030 if (unlikely(ecount == 0)) { 10031 if (phba->intr_type == MSIX) 10032 /* MSI-X treated interrupt served as no EQ share INT */ 10033 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10034 "0357 MSI-X interrupt with no EQE\n"); 10035 else 10036 /* Non MSI-X treated on interrupt as EQ share INT */ 10037 return IRQ_NONE; 10038 } 10039 10040 return IRQ_HANDLED; 10041 } /* lpfc_sli4_sp_intr_handler */ 10042 10043 /** 10044 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device 10045 * @irq: Interrupt number. 10046 * @dev_id: The device context pointer. 10047 * 10048 * This function is directly called from the PCI layer as an interrupt 10049 * service routine when device with SLI-4 interface spec is enabled with 10050 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 10051 * ring event in the HBA. However, when the device is enabled with either 10052 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 10053 * device-level interrupt handler. When the PCI slot is in error recovery 10054 * or the HBA is undergoing initialization, the interrupt handler will not 10055 * process the interrupt. The SCSI FCP fast-path ring event are handled in 10056 * the intrrupt context. This function is called without any lock held. 10057 * It gets the hbalock to access and update SLI data structures. Note that, 10058 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 10059 * equal to that of FCP CQ index. 10060 * 10061 * This function returns IRQ_HANDLED when interrupt is handled else it 10062 * returns IRQ_NONE. 10063 **/ 10064 irqreturn_t 10065 lpfc_sli4_fp_intr_handler(int irq, void *dev_id) 10066 { 10067 struct lpfc_hba *phba; 10068 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 10069 struct lpfc_queue *fpeq; 10070 struct lpfc_eqe *eqe; 10071 unsigned long iflag; 10072 int ecount = 0; 10073 uint32_t fcp_eqidx; 10074 10075 /* Get the driver's phba structure from the dev_id */ 10076 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 10077 phba = fcp_eq_hdl->phba; 10078 fcp_eqidx = fcp_eq_hdl->idx; 10079 10080 if (unlikely(!phba)) 10081 return IRQ_NONE; 10082 10083 /* Get to the EQ struct associated with this vector */ 10084 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 10085 10086 /* Check device state for handling interrupt */ 10087 if (unlikely(lpfc_intr_state_check(phba))) { 10088 /* Check again for link_state with lock held */ 10089 spin_lock_irqsave(&phba->hbalock, iflag); 10090 if (phba->link_state < LPFC_LINK_DOWN) 10091 /* Flush, clear interrupt, and rearm the EQ */ 10092 lpfc_sli4_eq_flush(phba, fpeq); 10093 spin_unlock_irqrestore(&phba->hbalock, iflag); 10094 return IRQ_NONE; 10095 } 10096 10097 /* 10098 * Process all the event on FCP fast-path EQ 10099 */ 10100 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 10101 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 10102 if (!(++ecount % LPFC_GET_QE_REL_INT)) 10103 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 10104 } 10105 10106 /* Always clear and re-arm the fast-path EQ */ 10107 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 10108 10109 if (unlikely(ecount == 0)) { 10110 if (phba->intr_type == MSIX) 10111 /* MSI-X treated interrupt served as no EQ share INT */ 10112 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10113 "0358 MSI-X interrupt with no EQE\n"); 10114 else 10115 /* Non MSI-X treated on interrupt as EQ share INT */ 10116 return IRQ_NONE; 10117 } 10118 10119 return IRQ_HANDLED; 10120 } /* lpfc_sli4_fp_intr_handler */ 10121 10122 /** 10123 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 10124 * @irq: Interrupt number. 10125 * @dev_id: The device context pointer. 10126 * 10127 * This function is the device-level interrupt handler to device with SLI-4 10128 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 10129 * interrupt mode is enabled and there is an event in the HBA which requires 10130 * driver attention. This function invokes the slow-path interrupt attention 10131 * handling function and fast-path interrupt attention handling function in 10132 * turn to process the relevant HBA attention events. This function is called 10133 * without any lock held. It gets the hbalock to access and update SLI data 10134 * structures. 10135 * 10136 * This function returns IRQ_HANDLED when interrupt is handled, else it 10137 * returns IRQ_NONE. 10138 **/ 10139 irqreturn_t 10140 lpfc_sli4_intr_handler(int irq, void *dev_id) 10141 { 10142 struct lpfc_hba *phba; 10143 irqreturn_t sp_irq_rc, fp_irq_rc; 10144 bool fp_handled = false; 10145 uint32_t fcp_eqidx; 10146 10147 /* Get the driver's phba structure from the dev_id */ 10148 phba = (struct lpfc_hba *)dev_id; 10149 10150 if (unlikely(!phba)) 10151 return IRQ_NONE; 10152 10153 /* 10154 * Invokes slow-path host attention interrupt handling as appropriate. 10155 */ 10156 sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id); 10157 10158 /* 10159 * Invoke fast-path host attention interrupt handling as appropriate. 10160 */ 10161 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 10162 fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, 10163 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 10164 if (fp_irq_rc == IRQ_HANDLED) 10165 fp_handled |= true; 10166 } 10167 10168 return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; 10169 } /* lpfc_sli4_intr_handler */ 10170 10171 /** 10172 * lpfc_sli4_queue_free - free a queue structure and associated memory 10173 * @queue: The queue structure to free. 10174 * 10175 * This function frees a queue structure and the DMAable memory used for 10176 * the host resident queue. This function must be called after destroying the 10177 * queue on the HBA. 10178 **/ 10179 void 10180 lpfc_sli4_queue_free(struct lpfc_queue *queue) 10181 { 10182 struct lpfc_dmabuf *dmabuf; 10183 10184 if (!queue) 10185 return; 10186 10187 while (!list_empty(&queue->page_list)) { 10188 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 10189 list); 10190 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE, 10191 dmabuf->virt, dmabuf->phys); 10192 kfree(dmabuf); 10193 } 10194 kfree(queue); 10195 return; 10196 } 10197 10198 /** 10199 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 10200 * @phba: The HBA that this queue is being created on. 10201 * @entry_size: The size of each queue entry for this queue. 10202 * @entry count: The number of entries that this queue will handle. 10203 * 10204 * This function allocates a queue structure and the DMAable memory used for 10205 * the host resident queue. This function must be called before creating the 10206 * queue on the HBA. 10207 **/ 10208 struct lpfc_queue * 10209 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, 10210 uint32_t entry_count) 10211 { 10212 struct lpfc_queue *queue; 10213 struct lpfc_dmabuf *dmabuf; 10214 int x, total_qe_count; 10215 void *dma_pointer; 10216 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10217 10218 if (!phba->sli4_hba.pc_sli4_params.supported) 10219 hw_page_size = SLI4_PAGE_SIZE; 10220 10221 queue = kzalloc(sizeof(struct lpfc_queue) + 10222 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 10223 if (!queue) 10224 return NULL; 10225 queue->page_count = (ALIGN(entry_size * entry_count, 10226 hw_page_size))/hw_page_size; 10227 INIT_LIST_HEAD(&queue->list); 10228 INIT_LIST_HEAD(&queue->page_list); 10229 INIT_LIST_HEAD(&queue->child_list); 10230 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 10231 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 10232 if (!dmabuf) 10233 goto out_fail; 10234 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 10235 hw_page_size, &dmabuf->phys, 10236 GFP_KERNEL); 10237 if (!dmabuf->virt) { 10238 kfree(dmabuf); 10239 goto out_fail; 10240 } 10241 memset(dmabuf->virt, 0, hw_page_size); 10242 dmabuf->buffer_tag = x; 10243 list_add_tail(&dmabuf->list, &queue->page_list); 10244 /* initialize queue's entry array */ 10245 dma_pointer = dmabuf->virt; 10246 for (; total_qe_count < entry_count && 10247 dma_pointer < (hw_page_size + dmabuf->virt); 10248 total_qe_count++, dma_pointer += entry_size) { 10249 queue->qe[total_qe_count].address = dma_pointer; 10250 } 10251 } 10252 queue->entry_size = entry_size; 10253 queue->entry_count = entry_count; 10254 queue->phba = phba; 10255 10256 return queue; 10257 out_fail: 10258 lpfc_sli4_queue_free(queue); 10259 return NULL; 10260 } 10261 10262 /** 10263 * lpfc_eq_create - Create an Event Queue on the HBA 10264 * @phba: HBA structure that indicates port to create a queue on. 10265 * @eq: The queue structure to use to create the event queue. 10266 * @imax: The maximum interrupt per second limit. 10267 * 10268 * This function creates an event queue, as detailed in @eq, on a port, 10269 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 10270 * 10271 * The @phba struct is used to send mailbox command to HBA. The @eq struct 10272 * is used to get the entry count and entry size that are necessary to 10273 * determine the number of pages to allocate and use for this queue. This 10274 * function will send the EQ_CREATE mailbox command to the HBA to setup the 10275 * event queue. This function is asynchronous and will wait for the mailbox 10276 * command to finish before continuing. 10277 * 10278 * On success this function will return a zero. If unable to allocate enough 10279 * memory this function will return -ENOMEM. If the queue create mailbox command 10280 * fails this function will return -ENXIO. 10281 **/ 10282 uint32_t 10283 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) 10284 { 10285 struct lpfc_mbx_eq_create *eq_create; 10286 LPFC_MBOXQ_t *mbox; 10287 int rc, length, status = 0; 10288 struct lpfc_dmabuf *dmabuf; 10289 uint32_t shdr_status, shdr_add_status; 10290 union lpfc_sli4_cfg_shdr *shdr; 10291 uint16_t dmult; 10292 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10293 10294 if (!phba->sli4_hba.pc_sli4_params.supported) 10295 hw_page_size = SLI4_PAGE_SIZE; 10296 10297 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10298 if (!mbox) 10299 return -ENOMEM; 10300 length = (sizeof(struct lpfc_mbx_eq_create) - 10301 sizeof(struct lpfc_sli4_cfg_mhdr)); 10302 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10303 LPFC_MBOX_OPCODE_EQ_CREATE, 10304 length, LPFC_SLI4_MBX_EMBED); 10305 eq_create = &mbox->u.mqe.un.eq_create; 10306 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 10307 eq->page_count); 10308 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 10309 LPFC_EQE_SIZE); 10310 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 10311 /* Calculate delay multiper from maximum interrupt per second */ 10312 dmult = LPFC_DMULT_CONST/imax - 1; 10313 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 10314 dmult); 10315 switch (eq->entry_count) { 10316 default: 10317 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10318 "0360 Unsupported EQ count. (%d)\n", 10319 eq->entry_count); 10320 if (eq->entry_count < 256) 10321 return -EINVAL; 10322 /* otherwise default to smallest count (drop through) */ 10323 case 256: 10324 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 10325 LPFC_EQ_CNT_256); 10326 break; 10327 case 512: 10328 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 10329 LPFC_EQ_CNT_512); 10330 break; 10331 case 1024: 10332 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 10333 LPFC_EQ_CNT_1024); 10334 break; 10335 case 2048: 10336 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 10337 LPFC_EQ_CNT_2048); 10338 break; 10339 case 4096: 10340 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 10341 LPFC_EQ_CNT_4096); 10342 break; 10343 } 10344 list_for_each_entry(dmabuf, &eq->page_list, list) { 10345 memset(dmabuf->virt, 0, hw_page_size); 10346 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10347 putPaddrLow(dmabuf->phys); 10348 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10349 putPaddrHigh(dmabuf->phys); 10350 } 10351 mbox->vport = phba->pport; 10352 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10353 mbox->context1 = NULL; 10354 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10355 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 10356 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10357 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10358 if (shdr_status || shdr_add_status || rc) { 10359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10360 "2500 EQ_CREATE mailbox failed with " 10361 "status x%x add_status x%x, mbx status x%x\n", 10362 shdr_status, shdr_add_status, rc); 10363 status = -ENXIO; 10364 } 10365 eq->type = LPFC_EQ; 10366 eq->subtype = LPFC_NONE; 10367 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 10368 if (eq->queue_id == 0xFFFF) 10369 status = -ENXIO; 10370 eq->host_index = 0; 10371 eq->hba_index = 0; 10372 10373 mempool_free(mbox, phba->mbox_mem_pool); 10374 return status; 10375 } 10376 10377 /** 10378 * lpfc_cq_create - Create a Completion Queue on the HBA 10379 * @phba: HBA structure that indicates port to create a queue on. 10380 * @cq: The queue structure to use to create the completion queue. 10381 * @eq: The event queue to bind this completion queue to. 10382 * 10383 * This function creates a completion queue, as detailed in @wq, on a port, 10384 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 10385 * 10386 * The @phba struct is used to send mailbox command to HBA. The @cq struct 10387 * is used to get the entry count and entry size that are necessary to 10388 * determine the number of pages to allocate and use for this queue. The @eq 10389 * is used to indicate which event queue to bind this completion queue to. This 10390 * function will send the CQ_CREATE mailbox command to the HBA to setup the 10391 * completion queue. This function is asynchronous and will wait for the mailbox 10392 * command to finish before continuing. 10393 * 10394 * On success this function will return a zero. If unable to allocate enough 10395 * memory this function will return -ENOMEM. If the queue create mailbox command 10396 * fails this function will return -ENXIO. 10397 **/ 10398 uint32_t 10399 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 10400 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 10401 { 10402 struct lpfc_mbx_cq_create *cq_create; 10403 struct lpfc_dmabuf *dmabuf; 10404 LPFC_MBOXQ_t *mbox; 10405 int rc, length, status = 0; 10406 uint32_t shdr_status, shdr_add_status; 10407 union lpfc_sli4_cfg_shdr *shdr; 10408 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10409 10410 if (!phba->sli4_hba.pc_sli4_params.supported) 10411 hw_page_size = SLI4_PAGE_SIZE; 10412 10413 10414 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10415 if (!mbox) 10416 return -ENOMEM; 10417 length = (sizeof(struct lpfc_mbx_cq_create) - 10418 sizeof(struct lpfc_sli4_cfg_mhdr)); 10419 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10420 LPFC_MBOX_OPCODE_CQ_CREATE, 10421 length, LPFC_SLI4_MBX_EMBED); 10422 cq_create = &mbox->u.mqe.un.cq_create; 10423 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 10424 cq->page_count); 10425 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 10426 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 10427 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); 10428 switch (cq->entry_count) { 10429 default: 10430 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10431 "0361 Unsupported CQ count. (%d)\n", 10432 cq->entry_count); 10433 if (cq->entry_count < 256) 10434 return -EINVAL; 10435 /* otherwise default to smallest count (drop through) */ 10436 case 256: 10437 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 10438 LPFC_CQ_CNT_256); 10439 break; 10440 case 512: 10441 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 10442 LPFC_CQ_CNT_512); 10443 break; 10444 case 1024: 10445 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 10446 LPFC_CQ_CNT_1024); 10447 break; 10448 } 10449 list_for_each_entry(dmabuf, &cq->page_list, list) { 10450 memset(dmabuf->virt, 0, hw_page_size); 10451 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10452 putPaddrLow(dmabuf->phys); 10453 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10454 putPaddrHigh(dmabuf->phys); 10455 } 10456 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10457 10458 /* The IOCTL status is embedded in the mailbox subheader. */ 10459 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 10460 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10461 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10462 if (shdr_status || shdr_add_status || rc) { 10463 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10464 "2501 CQ_CREATE mailbox failed with " 10465 "status x%x add_status x%x, mbx status x%x\n", 10466 shdr_status, shdr_add_status, rc); 10467 status = -ENXIO; 10468 goto out; 10469 } 10470 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 10471 if (cq->queue_id == 0xFFFF) { 10472 status = -ENXIO; 10473 goto out; 10474 } 10475 /* link the cq onto the parent eq child list */ 10476 list_add_tail(&cq->list, &eq->child_list); 10477 /* Set up completion queue's type and subtype */ 10478 cq->type = type; 10479 cq->subtype = subtype; 10480 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 10481 cq->host_index = 0; 10482 cq->hba_index = 0; 10483 10484 out: 10485 mempool_free(mbox, phba->mbox_mem_pool); 10486 return status; 10487 } 10488 10489 /** 10490 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 10491 * @phba: HBA structure that indicates port to create a queue on. 10492 * @mq: The queue structure to use to create the mailbox queue. 10493 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 10494 * @cq: The completion queue to associate with this cq. 10495 * 10496 * This function provides failback (fb) functionality when the 10497 * mq_create_ext fails on older FW generations. It's purpose is identical 10498 * to mq_create_ext otherwise. 10499 * 10500 * This routine cannot fail as all attributes were previously accessed and 10501 * initialized in mq_create_ext. 10502 **/ 10503 static void 10504 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 10505 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 10506 { 10507 struct lpfc_mbx_mq_create *mq_create; 10508 struct lpfc_dmabuf *dmabuf; 10509 int length; 10510 10511 length = (sizeof(struct lpfc_mbx_mq_create) - 10512 sizeof(struct lpfc_sli4_cfg_mhdr)); 10513 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10514 LPFC_MBOX_OPCODE_MQ_CREATE, 10515 length, LPFC_SLI4_MBX_EMBED); 10516 mq_create = &mbox->u.mqe.un.mq_create; 10517 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 10518 mq->page_count); 10519 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 10520 cq->queue_id); 10521 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 10522 switch (mq->entry_count) { 10523 case 16: 10524 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10525 LPFC_MQ_CNT_16); 10526 break; 10527 case 32: 10528 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10529 LPFC_MQ_CNT_32); 10530 break; 10531 case 64: 10532 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10533 LPFC_MQ_CNT_64); 10534 break; 10535 case 128: 10536 bf_set(lpfc_mq_context_count, &mq_create->u.request.context, 10537 LPFC_MQ_CNT_128); 10538 break; 10539 } 10540 list_for_each_entry(dmabuf, &mq->page_list, list) { 10541 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10542 putPaddrLow(dmabuf->phys); 10543 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10544 putPaddrHigh(dmabuf->phys); 10545 } 10546 } 10547 10548 /** 10549 * lpfc_mq_create - Create a mailbox Queue on the HBA 10550 * @phba: HBA structure that indicates port to create a queue on. 10551 * @mq: The queue structure to use to create the mailbox queue. 10552 * @cq: The completion queue to associate with this cq. 10553 * @subtype: The queue's subtype. 10554 * 10555 * This function creates a mailbox queue, as detailed in @mq, on a port, 10556 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 10557 * 10558 * The @phba struct is used to send mailbox command to HBA. The @cq struct 10559 * is used to get the entry count and entry size that are necessary to 10560 * determine the number of pages to allocate and use for this queue. This 10561 * function will send the MQ_CREATE mailbox command to the HBA to setup the 10562 * mailbox queue. This function is asynchronous and will wait for the mailbox 10563 * command to finish before continuing. 10564 * 10565 * On success this function will return a zero. If unable to allocate enough 10566 * memory this function will return -ENOMEM. If the queue create mailbox command 10567 * fails this function will return -ENXIO. 10568 **/ 10569 int32_t 10570 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 10571 struct lpfc_queue *cq, uint32_t subtype) 10572 { 10573 struct lpfc_mbx_mq_create *mq_create; 10574 struct lpfc_mbx_mq_create_ext *mq_create_ext; 10575 struct lpfc_dmabuf *dmabuf; 10576 LPFC_MBOXQ_t *mbox; 10577 int rc, length, status = 0; 10578 uint32_t shdr_status, shdr_add_status; 10579 union lpfc_sli4_cfg_shdr *shdr; 10580 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10581 10582 if (!phba->sli4_hba.pc_sli4_params.supported) 10583 hw_page_size = SLI4_PAGE_SIZE; 10584 10585 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10586 if (!mbox) 10587 return -ENOMEM; 10588 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 10589 sizeof(struct lpfc_sli4_cfg_mhdr)); 10590 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10591 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 10592 length, LPFC_SLI4_MBX_EMBED); 10593 10594 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 10595 bf_set(lpfc_mbx_mq_create_ext_num_pages, 10596 &mq_create_ext->u.request, mq->page_count); 10597 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 10598 &mq_create_ext->u.request, 1); 10599 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 10600 &mq_create_ext->u.request, 1); 10601 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 10602 &mq_create_ext->u.request, 1); 10603 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 10604 &mq_create_ext->u.request, 1); 10605 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 10606 &mq_create_ext->u.request, 1); 10607 bf_set(lpfc_mq_context_cq_id, 10608 &mq_create_ext->u.request.context, cq->queue_id); 10609 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 10610 switch (mq->entry_count) { 10611 default: 10612 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10613 "0362 Unsupported MQ count. (%d)\n", 10614 mq->entry_count); 10615 if (mq->entry_count < 16) 10616 return -EINVAL; 10617 /* otherwise default to smallest count (drop through) */ 10618 case 16: 10619 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10620 LPFC_MQ_CNT_16); 10621 break; 10622 case 32: 10623 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10624 LPFC_MQ_CNT_32); 10625 break; 10626 case 64: 10627 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10628 LPFC_MQ_CNT_64); 10629 break; 10630 case 128: 10631 bf_set(lpfc_mq_context_count, &mq_create_ext->u.request.context, 10632 LPFC_MQ_CNT_128); 10633 break; 10634 } 10635 list_for_each_entry(dmabuf, &mq->page_list, list) { 10636 memset(dmabuf->virt, 0, hw_page_size); 10637 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 10638 putPaddrLow(dmabuf->phys); 10639 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 10640 putPaddrHigh(dmabuf->phys); 10641 } 10642 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10643 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 10644 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 10645 &mq_create_ext->u.response); 10646 if (rc != MBX_SUCCESS) { 10647 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 10648 "2795 MQ_CREATE_EXT failed with " 10649 "status x%x. Failback to MQ_CREATE.\n", 10650 rc); 10651 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 10652 mq_create = &mbox->u.mqe.un.mq_create; 10653 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10654 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 10655 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 10656 &mq_create->u.response); 10657 } 10658 10659 /* The IOCTL status is embedded in the mailbox subheader. */ 10660 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10661 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10662 if (shdr_status || shdr_add_status || rc) { 10663 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10664 "2502 MQ_CREATE mailbox failed with " 10665 "status x%x add_status x%x, mbx status x%x\n", 10666 shdr_status, shdr_add_status, rc); 10667 status = -ENXIO; 10668 goto out; 10669 } 10670 if (mq->queue_id == 0xFFFF) { 10671 status = -ENXIO; 10672 goto out; 10673 } 10674 mq->type = LPFC_MQ; 10675 mq->subtype = subtype; 10676 mq->host_index = 0; 10677 mq->hba_index = 0; 10678 10679 /* link the mq onto the parent cq child list */ 10680 list_add_tail(&mq->list, &cq->child_list); 10681 out: 10682 mempool_free(mbox, phba->mbox_mem_pool); 10683 return status; 10684 } 10685 10686 /** 10687 * lpfc_wq_create - Create a Work Queue on the HBA 10688 * @phba: HBA structure that indicates port to create a queue on. 10689 * @wq: The queue structure to use to create the work queue. 10690 * @cq: The completion queue to bind this work queue to. 10691 * @subtype: The subtype of the work queue indicating its functionality. 10692 * 10693 * This function creates a work queue, as detailed in @wq, on a port, described 10694 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 10695 * 10696 * The @phba struct is used to send mailbox command to HBA. The @wq struct 10697 * is used to get the entry count and entry size that are necessary to 10698 * determine the number of pages to allocate and use for this queue. The @cq 10699 * is used to indicate which completion queue to bind this work queue to. This 10700 * function will send the WQ_CREATE mailbox command to the HBA to setup the 10701 * work queue. This function is asynchronous and will wait for the mailbox 10702 * command to finish before continuing. 10703 * 10704 * On success this function will return a zero. If unable to allocate enough 10705 * memory this function will return -ENOMEM. If the queue create mailbox command 10706 * fails this function will return -ENXIO. 10707 **/ 10708 uint32_t 10709 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 10710 struct lpfc_queue *cq, uint32_t subtype) 10711 { 10712 struct lpfc_mbx_wq_create *wq_create; 10713 struct lpfc_dmabuf *dmabuf; 10714 LPFC_MBOXQ_t *mbox; 10715 int rc, length, status = 0; 10716 uint32_t shdr_status, shdr_add_status; 10717 union lpfc_sli4_cfg_shdr *shdr; 10718 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10719 10720 if (!phba->sli4_hba.pc_sli4_params.supported) 10721 hw_page_size = SLI4_PAGE_SIZE; 10722 10723 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10724 if (!mbox) 10725 return -ENOMEM; 10726 length = (sizeof(struct lpfc_mbx_wq_create) - 10727 sizeof(struct lpfc_sli4_cfg_mhdr)); 10728 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10729 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 10730 length, LPFC_SLI4_MBX_EMBED); 10731 wq_create = &mbox->u.mqe.un.wq_create; 10732 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 10733 wq->page_count); 10734 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 10735 cq->queue_id); 10736 list_for_each_entry(dmabuf, &wq->page_list, list) { 10737 memset(dmabuf->virt, 0, hw_page_size); 10738 wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10739 putPaddrLow(dmabuf->phys); 10740 wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10741 putPaddrHigh(dmabuf->phys); 10742 } 10743 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10744 /* The IOCTL status is embedded in the mailbox subheader. */ 10745 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 10746 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10747 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10748 if (shdr_status || shdr_add_status || rc) { 10749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10750 "2503 WQ_CREATE mailbox failed with " 10751 "status x%x add_status x%x, mbx status x%x\n", 10752 shdr_status, shdr_add_status, rc); 10753 status = -ENXIO; 10754 goto out; 10755 } 10756 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); 10757 if (wq->queue_id == 0xFFFF) { 10758 status = -ENXIO; 10759 goto out; 10760 } 10761 wq->type = LPFC_WQ; 10762 wq->subtype = subtype; 10763 wq->host_index = 0; 10764 wq->hba_index = 0; 10765 10766 /* link the wq onto the parent cq child list */ 10767 list_add_tail(&wq->list, &cq->child_list); 10768 out: 10769 mempool_free(mbox, phba->mbox_mem_pool); 10770 return status; 10771 } 10772 10773 /** 10774 * lpfc_rq_create - Create a Receive Queue on the HBA 10775 * @phba: HBA structure that indicates port to create a queue on. 10776 * @hrq: The queue structure to use to create the header receive queue. 10777 * @drq: The queue structure to use to create the data receive queue. 10778 * @cq: The completion queue to bind this work queue to. 10779 * 10780 * This function creates a receive buffer queue pair , as detailed in @hrq and 10781 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 10782 * to the HBA. 10783 * 10784 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 10785 * struct is used to get the entry count that is necessary to determine the 10786 * number of pages to use for this queue. The @cq is used to indicate which 10787 * completion queue to bind received buffers that are posted to these queues to. 10788 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 10789 * receive queue pair. This function is asynchronous and will wait for the 10790 * mailbox command to finish before continuing. 10791 * 10792 * On success this function will return a zero. If unable to allocate enough 10793 * memory this function will return -ENOMEM. If the queue create mailbox command 10794 * fails this function will return -ENXIO. 10795 **/ 10796 uint32_t 10797 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 10798 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 10799 { 10800 struct lpfc_mbx_rq_create *rq_create; 10801 struct lpfc_dmabuf *dmabuf; 10802 LPFC_MBOXQ_t *mbox; 10803 int rc, length, status = 0; 10804 uint32_t shdr_status, shdr_add_status; 10805 union lpfc_sli4_cfg_shdr *shdr; 10806 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 10807 10808 if (!phba->sli4_hba.pc_sli4_params.supported) 10809 hw_page_size = SLI4_PAGE_SIZE; 10810 10811 if (hrq->entry_count != drq->entry_count) 10812 return -EINVAL; 10813 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 10814 if (!mbox) 10815 return -ENOMEM; 10816 length = (sizeof(struct lpfc_mbx_rq_create) - 10817 sizeof(struct lpfc_sli4_cfg_mhdr)); 10818 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10819 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 10820 length, LPFC_SLI4_MBX_EMBED); 10821 rq_create = &mbox->u.mqe.un.rq_create; 10822 switch (hrq->entry_count) { 10823 default: 10824 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10825 "2535 Unsupported RQ count. (%d)\n", 10826 hrq->entry_count); 10827 if (hrq->entry_count < 512) 10828 return -EINVAL; 10829 /* otherwise default to smallest count (drop through) */ 10830 case 512: 10831 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10832 LPFC_RQ_RING_SIZE_512); 10833 break; 10834 case 1024: 10835 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10836 LPFC_RQ_RING_SIZE_1024); 10837 break; 10838 case 2048: 10839 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10840 LPFC_RQ_RING_SIZE_2048); 10841 break; 10842 case 4096: 10843 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10844 LPFC_RQ_RING_SIZE_4096); 10845 break; 10846 } 10847 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 10848 cq->queue_id); 10849 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 10850 hrq->page_count); 10851 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 10852 LPFC_HDR_BUF_SIZE); 10853 list_for_each_entry(dmabuf, &hrq->page_list, list) { 10854 memset(dmabuf->virt, 0, hw_page_size); 10855 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10856 putPaddrLow(dmabuf->phys); 10857 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10858 putPaddrHigh(dmabuf->phys); 10859 } 10860 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10861 /* The IOCTL status is embedded in the mailbox subheader. */ 10862 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 10863 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10864 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10865 if (shdr_status || shdr_add_status || rc) { 10866 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10867 "2504 RQ_CREATE mailbox failed with " 10868 "status x%x add_status x%x, mbx status x%x\n", 10869 shdr_status, shdr_add_status, rc); 10870 status = -ENXIO; 10871 goto out; 10872 } 10873 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 10874 if (hrq->queue_id == 0xFFFF) { 10875 status = -ENXIO; 10876 goto out; 10877 } 10878 hrq->type = LPFC_HRQ; 10879 hrq->subtype = subtype; 10880 hrq->host_index = 0; 10881 hrq->hba_index = 0; 10882 10883 /* now create the data queue */ 10884 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 10885 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 10886 length, LPFC_SLI4_MBX_EMBED); 10887 switch (drq->entry_count) { 10888 default: 10889 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10890 "2536 Unsupported RQ count. (%d)\n", 10891 drq->entry_count); 10892 if (drq->entry_count < 512) 10893 return -EINVAL; 10894 /* otherwise default to smallest count (drop through) */ 10895 case 512: 10896 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10897 LPFC_RQ_RING_SIZE_512); 10898 break; 10899 case 1024: 10900 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10901 LPFC_RQ_RING_SIZE_1024); 10902 break; 10903 case 2048: 10904 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10905 LPFC_RQ_RING_SIZE_2048); 10906 break; 10907 case 4096: 10908 bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, 10909 LPFC_RQ_RING_SIZE_4096); 10910 break; 10911 } 10912 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 10913 cq->queue_id); 10914 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 10915 drq->page_count); 10916 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 10917 LPFC_DATA_BUF_SIZE); 10918 list_for_each_entry(dmabuf, &drq->page_list, list) { 10919 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 10920 putPaddrLow(dmabuf->phys); 10921 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 10922 putPaddrHigh(dmabuf->phys); 10923 } 10924 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 10925 /* The IOCTL status is embedded in the mailbox subheader. */ 10926 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 10927 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10928 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10929 if (shdr_status || shdr_add_status || rc) { 10930 status = -ENXIO; 10931 goto out; 10932 } 10933 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 10934 if (drq->queue_id == 0xFFFF) { 10935 status = -ENXIO; 10936 goto out; 10937 } 10938 drq->type = LPFC_DRQ; 10939 drq->subtype = subtype; 10940 drq->host_index = 0; 10941 drq->hba_index = 0; 10942 10943 /* link the header and data RQs onto the parent cq child list */ 10944 list_add_tail(&hrq->list, &cq->child_list); 10945 list_add_tail(&drq->list, &cq->child_list); 10946 10947 out: 10948 mempool_free(mbox, phba->mbox_mem_pool); 10949 return status; 10950 } 10951 10952 /** 10953 * lpfc_eq_destroy - Destroy an event Queue on the HBA 10954 * @eq: The queue structure associated with the queue to destroy. 10955 * 10956 * This function destroys a queue, as detailed in @eq by sending an mailbox 10957 * command, specific to the type of queue, to the HBA. 10958 * 10959 * The @eq struct is used to get the queue ID of the queue to destroy. 10960 * 10961 * On success this function will return a zero. If the queue destroy mailbox 10962 * command fails this function will return -ENXIO. 10963 **/ 10964 uint32_t 10965 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 10966 { 10967 LPFC_MBOXQ_t *mbox; 10968 int rc, length, status = 0; 10969 uint32_t shdr_status, shdr_add_status; 10970 union lpfc_sli4_cfg_shdr *shdr; 10971 10972 if (!eq) 10973 return -ENODEV; 10974 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 10975 if (!mbox) 10976 return -ENOMEM; 10977 length = (sizeof(struct lpfc_mbx_eq_destroy) - 10978 sizeof(struct lpfc_sli4_cfg_mhdr)); 10979 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 10980 LPFC_MBOX_OPCODE_EQ_DESTROY, 10981 length, LPFC_SLI4_MBX_EMBED); 10982 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 10983 eq->queue_id); 10984 mbox->vport = eq->phba->pport; 10985 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10986 10987 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 10988 /* The IOCTL status is embedded in the mailbox subheader. */ 10989 shdr = (union lpfc_sli4_cfg_shdr *) 10990 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 10991 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 10992 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 10993 if (shdr_status || shdr_add_status || rc) { 10994 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10995 "2505 EQ_DESTROY mailbox failed with " 10996 "status x%x add_status x%x, mbx status x%x\n", 10997 shdr_status, shdr_add_status, rc); 10998 status = -ENXIO; 10999 } 11000 11001 /* Remove eq from any list */ 11002 list_del_init(&eq->list); 11003 mempool_free(mbox, eq->phba->mbox_mem_pool); 11004 return status; 11005 } 11006 11007 /** 11008 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 11009 * @cq: The queue structure associated with the queue to destroy. 11010 * 11011 * This function destroys a queue, as detailed in @cq by sending an mailbox 11012 * command, specific to the type of queue, to the HBA. 11013 * 11014 * The @cq struct is used to get the queue ID of the queue to destroy. 11015 * 11016 * On success this function will return a zero. If the queue destroy mailbox 11017 * command fails this function will return -ENXIO. 11018 **/ 11019 uint32_t 11020 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 11021 { 11022 LPFC_MBOXQ_t *mbox; 11023 int rc, length, status = 0; 11024 uint32_t shdr_status, shdr_add_status; 11025 union lpfc_sli4_cfg_shdr *shdr; 11026 11027 if (!cq) 11028 return -ENODEV; 11029 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 11030 if (!mbox) 11031 return -ENOMEM; 11032 length = (sizeof(struct lpfc_mbx_cq_destroy) - 11033 sizeof(struct lpfc_sli4_cfg_mhdr)); 11034 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 11035 LPFC_MBOX_OPCODE_CQ_DESTROY, 11036 length, LPFC_SLI4_MBX_EMBED); 11037 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 11038 cq->queue_id); 11039 mbox->vport = cq->phba->pport; 11040 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 11041 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 11042 /* The IOCTL status is embedded in the mailbox subheader. */ 11043 shdr = (union lpfc_sli4_cfg_shdr *) 11044 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 11045 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11046 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11047 if (shdr_status || shdr_add_status || rc) { 11048 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11049 "2506 CQ_DESTROY mailbox failed with " 11050 "status x%x add_status x%x, mbx status x%x\n", 11051 shdr_status, shdr_add_status, rc); 11052 status = -ENXIO; 11053 } 11054 /* Remove cq from any list */ 11055 list_del_init(&cq->list); 11056 mempool_free(mbox, cq->phba->mbox_mem_pool); 11057 return status; 11058 } 11059 11060 /** 11061 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 11062 * @qm: The queue structure associated with the queue to destroy. 11063 * 11064 * This function destroys a queue, as detailed in @mq by sending an mailbox 11065 * command, specific to the type of queue, to the HBA. 11066 * 11067 * The @mq struct is used to get the queue ID of the queue to destroy. 11068 * 11069 * On success this function will return a zero. If the queue destroy mailbox 11070 * command fails this function will return -ENXIO. 11071 **/ 11072 uint32_t 11073 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 11074 { 11075 LPFC_MBOXQ_t *mbox; 11076 int rc, length, status = 0; 11077 uint32_t shdr_status, shdr_add_status; 11078 union lpfc_sli4_cfg_shdr *shdr; 11079 11080 if (!mq) 11081 return -ENODEV; 11082 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 11083 if (!mbox) 11084 return -ENOMEM; 11085 length = (sizeof(struct lpfc_mbx_mq_destroy) - 11086 sizeof(struct lpfc_sli4_cfg_mhdr)); 11087 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 11088 LPFC_MBOX_OPCODE_MQ_DESTROY, 11089 length, LPFC_SLI4_MBX_EMBED); 11090 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 11091 mq->queue_id); 11092 mbox->vport = mq->phba->pport; 11093 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 11094 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 11095 /* The IOCTL status is embedded in the mailbox subheader. */ 11096 shdr = (union lpfc_sli4_cfg_shdr *) 11097 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 11098 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11099 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11100 if (shdr_status || shdr_add_status || rc) { 11101 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11102 "2507 MQ_DESTROY mailbox failed with " 11103 "status x%x add_status x%x, mbx status x%x\n", 11104 shdr_status, shdr_add_status, rc); 11105 status = -ENXIO; 11106 } 11107 /* Remove mq from any list */ 11108 list_del_init(&mq->list); 11109 mempool_free(mbox, mq->phba->mbox_mem_pool); 11110 return status; 11111 } 11112 11113 /** 11114 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 11115 * @wq: The queue structure associated with the queue to destroy. 11116 * 11117 * This function destroys a queue, as detailed in @wq by sending an mailbox 11118 * command, specific to the type of queue, to the HBA. 11119 * 11120 * The @wq struct is used to get the queue ID of the queue to destroy. 11121 * 11122 * On success this function will return a zero. If the queue destroy mailbox 11123 * command fails this function will return -ENXIO. 11124 **/ 11125 uint32_t 11126 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 11127 { 11128 LPFC_MBOXQ_t *mbox; 11129 int rc, length, status = 0; 11130 uint32_t shdr_status, shdr_add_status; 11131 union lpfc_sli4_cfg_shdr *shdr; 11132 11133 if (!wq) 11134 return -ENODEV; 11135 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 11136 if (!mbox) 11137 return -ENOMEM; 11138 length = (sizeof(struct lpfc_mbx_wq_destroy) - 11139 sizeof(struct lpfc_sli4_cfg_mhdr)); 11140 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 11141 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 11142 length, LPFC_SLI4_MBX_EMBED); 11143 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 11144 wq->queue_id); 11145 mbox->vport = wq->phba->pport; 11146 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 11147 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 11148 shdr = (union lpfc_sli4_cfg_shdr *) 11149 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 11150 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11151 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11152 if (shdr_status || shdr_add_status || rc) { 11153 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11154 "2508 WQ_DESTROY mailbox failed with " 11155 "status x%x add_status x%x, mbx status x%x\n", 11156 shdr_status, shdr_add_status, rc); 11157 status = -ENXIO; 11158 } 11159 /* Remove wq from any list */ 11160 list_del_init(&wq->list); 11161 mempool_free(mbox, wq->phba->mbox_mem_pool); 11162 return status; 11163 } 11164 11165 /** 11166 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 11167 * @rq: The queue structure associated with the queue to destroy. 11168 * 11169 * This function destroys a queue, as detailed in @rq by sending an mailbox 11170 * command, specific to the type of queue, to the HBA. 11171 * 11172 * The @rq struct is used to get the queue ID of the queue to destroy. 11173 * 11174 * On success this function will return a zero. If the queue destroy mailbox 11175 * command fails this function will return -ENXIO. 11176 **/ 11177 uint32_t 11178 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 11179 struct lpfc_queue *drq) 11180 { 11181 LPFC_MBOXQ_t *mbox; 11182 int rc, length, status = 0; 11183 uint32_t shdr_status, shdr_add_status; 11184 union lpfc_sli4_cfg_shdr *shdr; 11185 11186 if (!hrq || !drq) 11187 return -ENODEV; 11188 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 11189 if (!mbox) 11190 return -ENOMEM; 11191 length = (sizeof(struct lpfc_mbx_rq_destroy) - 11192 sizeof(struct mbox_header)); 11193 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 11194 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 11195 length, LPFC_SLI4_MBX_EMBED); 11196 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 11197 hrq->queue_id); 11198 mbox->vport = hrq->phba->pport; 11199 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 11200 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 11201 /* The IOCTL status is embedded in the mailbox subheader. */ 11202 shdr = (union lpfc_sli4_cfg_shdr *) 11203 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 11204 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11205 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11206 if (shdr_status || shdr_add_status || rc) { 11207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11208 "2509 RQ_DESTROY mailbox failed with " 11209 "status x%x add_status x%x, mbx status x%x\n", 11210 shdr_status, shdr_add_status, rc); 11211 if (rc != MBX_TIMEOUT) 11212 mempool_free(mbox, hrq->phba->mbox_mem_pool); 11213 return -ENXIO; 11214 } 11215 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 11216 drq->queue_id); 11217 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 11218 shdr = (union lpfc_sli4_cfg_shdr *) 11219 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 11220 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11221 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11222 if (shdr_status || shdr_add_status || rc) { 11223 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11224 "2510 RQ_DESTROY mailbox failed with " 11225 "status x%x add_status x%x, mbx status x%x\n", 11226 shdr_status, shdr_add_status, rc); 11227 status = -ENXIO; 11228 } 11229 list_del_init(&hrq->list); 11230 list_del_init(&drq->list); 11231 mempool_free(mbox, hrq->phba->mbox_mem_pool); 11232 return status; 11233 } 11234 11235 /** 11236 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 11237 * @phba: The virtual port for which this call being executed. 11238 * @pdma_phys_addr0: Physical address of the 1st SGL page. 11239 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 11240 * @xritag: the xritag that ties this io to the SGL pages. 11241 * 11242 * This routine will post the sgl pages for the IO that has the xritag 11243 * that is in the iocbq structure. The xritag is assigned during iocbq 11244 * creation and persists for as long as the driver is loaded. 11245 * if the caller has fewer than 256 scatter gather segments to map then 11246 * pdma_phys_addr1 should be 0. 11247 * If the caller needs to map more than 256 scatter gather segment then 11248 * pdma_phys_addr1 should be a valid physical address. 11249 * physical address for SGLs must be 64 byte aligned. 11250 * If you are going to map 2 SGL's then the first one must have 256 entries 11251 * the second sgl can have between 1 and 256 entries. 11252 * 11253 * Return codes: 11254 * 0 - Success 11255 * -ENXIO, -ENOMEM - Failure 11256 **/ 11257 int 11258 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 11259 dma_addr_t pdma_phys_addr0, 11260 dma_addr_t pdma_phys_addr1, 11261 uint16_t xritag) 11262 { 11263 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 11264 LPFC_MBOXQ_t *mbox; 11265 int rc; 11266 uint32_t shdr_status, shdr_add_status; 11267 union lpfc_sli4_cfg_shdr *shdr; 11268 11269 if (xritag == NO_XRI) { 11270 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11271 "0364 Invalid param:\n"); 11272 return -EINVAL; 11273 } 11274 11275 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11276 if (!mbox) 11277 return -ENOMEM; 11278 11279 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 11280 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 11281 sizeof(struct lpfc_mbx_post_sgl_pages) - 11282 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); 11283 11284 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 11285 &mbox->u.mqe.un.post_sgl_pages; 11286 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 11287 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 11288 11289 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 11290 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 11291 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 11292 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 11293 11294 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 11295 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 11296 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 11297 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 11298 if (!phba->sli4_hba.intr_enable) 11299 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11300 else 11301 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 11302 /* The IOCTL status is embedded in the mailbox subheader. */ 11303 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 11304 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11305 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11306 if (rc != MBX_TIMEOUT) 11307 mempool_free(mbox, phba->mbox_mem_pool); 11308 if (shdr_status || shdr_add_status || rc) { 11309 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11310 "2511 POST_SGL mailbox failed with " 11311 "status x%x add_status x%x, mbx status x%x\n", 11312 shdr_status, shdr_add_status, rc); 11313 rc = -ENXIO; 11314 } 11315 return 0; 11316 } 11317 11318 /** 11319 * lpfc_sli4_next_xritag - Get an xritag for the io 11320 * @phba: Pointer to HBA context object. 11321 * 11322 * This function gets an xritag for the iocb. If there is no unused xritag 11323 * it will return 0xffff. 11324 * The function returns the allocated xritag if successful, else returns zero. 11325 * Zero is not a valid xritag. 11326 * The caller is not required to hold any lock. 11327 **/ 11328 uint16_t 11329 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 11330 { 11331 uint16_t xritag; 11332 11333 spin_lock_irq(&phba->hbalock); 11334 xritag = phba->sli4_hba.next_xri; 11335 if ((xritag != (uint16_t) -1) && xritag < 11336 (phba->sli4_hba.max_cfg_param.max_xri 11337 + phba->sli4_hba.max_cfg_param.xri_base)) { 11338 phba->sli4_hba.next_xri++; 11339 phba->sli4_hba.max_cfg_param.xri_used++; 11340 spin_unlock_irq(&phba->hbalock); 11341 return xritag; 11342 } 11343 spin_unlock_irq(&phba->hbalock); 11344 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11345 "2004 Failed to allocate XRI.last XRITAG is %d" 11346 " Max XRI is %d, Used XRI is %d\n", 11347 phba->sli4_hba.next_xri, 11348 phba->sli4_hba.max_cfg_param.max_xri, 11349 phba->sli4_hba.max_cfg_param.xri_used); 11350 return -1; 11351 } 11352 11353 /** 11354 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware. 11355 * @phba: pointer to lpfc hba data structure. 11356 * 11357 * This routine is invoked to post a block of driver's sgl pages to the 11358 * HBA using non-embedded mailbox command. No Lock is held. This routine 11359 * is only called when the driver is loading and after all IO has been 11360 * stopped. 11361 **/ 11362 int 11363 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) 11364 { 11365 struct lpfc_sglq *sglq_entry; 11366 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 11367 struct sgl_page_pairs *sgl_pg_pairs; 11368 void *viraddr; 11369 LPFC_MBOXQ_t *mbox; 11370 uint32_t reqlen, alloclen, pg_pairs; 11371 uint32_t mbox_tmo; 11372 uint16_t xritag_start = 0; 11373 int els_xri_cnt, rc = 0; 11374 uint32_t shdr_status, shdr_add_status; 11375 union lpfc_sli4_cfg_shdr *shdr; 11376 11377 /* The number of sgls to be posted */ 11378 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); 11379 11380 reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + 11381 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 11382 if (reqlen > SLI4_PAGE_SIZE) { 11383 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11384 "2559 Block sgl registration required DMA " 11385 "size (%d) great than a page\n", reqlen); 11386 return -ENOMEM; 11387 } 11388 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11389 if (!mbox) { 11390 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11391 "2560 Failed to allocate mbox cmd memory\n"); 11392 return -ENOMEM; 11393 } 11394 11395 /* Allocate DMA memory and set up the non-embedded mailbox command */ 11396 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 11397 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 11398 LPFC_SLI4_MBX_NEMBED); 11399 11400 if (alloclen < reqlen) { 11401 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11402 "0285 Allocated DMA memory size (%d) is " 11403 "less than the requested DMA memory " 11404 "size (%d)\n", alloclen, reqlen); 11405 lpfc_sli4_mbox_cmd_free(phba, mbox); 11406 return -ENOMEM; 11407 } 11408 /* Get the first SGE entry from the non-embedded DMA memory */ 11409 viraddr = mbox->sge_array->addr[0]; 11410 11411 /* Set up the SGL pages in the non-embedded DMA pages */ 11412 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 11413 sgl_pg_pairs = &sgl->sgl_pg_pairs; 11414 11415 for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { 11416 sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; 11417 /* Set up the sge entry */ 11418 sgl_pg_pairs->sgl_pg0_addr_lo = 11419 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 11420 sgl_pg_pairs->sgl_pg0_addr_hi = 11421 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 11422 sgl_pg_pairs->sgl_pg1_addr_lo = 11423 cpu_to_le32(putPaddrLow(0)); 11424 sgl_pg_pairs->sgl_pg1_addr_hi = 11425 cpu_to_le32(putPaddrHigh(0)); 11426 /* Keep the first xritag on the list */ 11427 if (pg_pairs == 0) 11428 xritag_start = sglq_entry->sli4_xritag; 11429 sgl_pg_pairs++; 11430 } 11431 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 11432 bf_set(lpfc_post_sgl_pages_xricnt, sgl, els_xri_cnt); 11433 /* Perform endian conversion if necessary */ 11434 sgl->word0 = cpu_to_le32(sgl->word0); 11435 11436 if (!phba->sli4_hba.intr_enable) 11437 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11438 else { 11439 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 11440 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 11441 } 11442 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 11443 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11444 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11445 if (rc != MBX_TIMEOUT) 11446 lpfc_sli4_mbox_cmd_free(phba, mbox); 11447 if (shdr_status || shdr_add_status || rc) { 11448 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11449 "2513 POST_SGL_BLOCK mailbox command failed " 11450 "status x%x add_status x%x mbx status x%x\n", 11451 shdr_status, shdr_add_status, rc); 11452 rc = -ENXIO; 11453 } 11454 return rc; 11455 } 11456 11457 /** 11458 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 11459 * @phba: pointer to lpfc hba data structure. 11460 * @sblist: pointer to scsi buffer list. 11461 * @count: number of scsi buffers on the list. 11462 * 11463 * This routine is invoked to post a block of @count scsi sgl pages from a 11464 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 11465 * No Lock is held. 11466 * 11467 **/ 11468 int 11469 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, 11470 int cnt) 11471 { 11472 struct lpfc_scsi_buf *psb; 11473 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 11474 struct sgl_page_pairs *sgl_pg_pairs; 11475 void *viraddr; 11476 LPFC_MBOXQ_t *mbox; 11477 uint32_t reqlen, alloclen, pg_pairs; 11478 uint32_t mbox_tmo; 11479 uint16_t xritag_start = 0; 11480 int rc = 0; 11481 uint32_t shdr_status, shdr_add_status; 11482 dma_addr_t pdma_phys_bpl1; 11483 union lpfc_sli4_cfg_shdr *shdr; 11484 11485 /* Calculate the requested length of the dma memory */ 11486 reqlen = cnt * sizeof(struct sgl_page_pairs) + 11487 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 11488 if (reqlen > SLI4_PAGE_SIZE) { 11489 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 11490 "0217 Block sgl registration required DMA " 11491 "size (%d) great than a page\n", reqlen); 11492 return -ENOMEM; 11493 } 11494 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 11495 if (!mbox) { 11496 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11497 "0283 Failed to allocate mbox cmd memory\n"); 11498 return -ENOMEM; 11499 } 11500 11501 /* Allocate DMA memory and set up the non-embedded mailbox command */ 11502 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 11503 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 11504 LPFC_SLI4_MBX_NEMBED); 11505 11506 if (alloclen < reqlen) { 11507 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11508 "2561 Allocated DMA memory size (%d) is " 11509 "less than the requested DMA memory " 11510 "size (%d)\n", alloclen, reqlen); 11511 lpfc_sli4_mbox_cmd_free(phba, mbox); 11512 return -ENOMEM; 11513 } 11514 /* Get the first SGE entry from the non-embedded DMA memory */ 11515 viraddr = mbox->sge_array->addr[0]; 11516 11517 /* Set up the SGL pages in the non-embedded DMA pages */ 11518 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 11519 sgl_pg_pairs = &sgl->sgl_pg_pairs; 11520 11521 pg_pairs = 0; 11522 list_for_each_entry(psb, sblist, list) { 11523 /* Set up the sge entry */ 11524 sgl_pg_pairs->sgl_pg0_addr_lo = 11525 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 11526 sgl_pg_pairs->sgl_pg0_addr_hi = 11527 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 11528 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 11529 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 11530 else 11531 pdma_phys_bpl1 = 0; 11532 sgl_pg_pairs->sgl_pg1_addr_lo = 11533 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 11534 sgl_pg_pairs->sgl_pg1_addr_hi = 11535 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 11536 /* Keep the first xritag on the list */ 11537 if (pg_pairs == 0) 11538 xritag_start = psb->cur_iocbq.sli4_xritag; 11539 sgl_pg_pairs++; 11540 pg_pairs++; 11541 } 11542 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 11543 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 11544 /* Perform endian conversion if necessary */ 11545 sgl->word0 = cpu_to_le32(sgl->word0); 11546 11547 if (!phba->sli4_hba.intr_enable) 11548 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 11549 else { 11550 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 11551 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 11552 } 11553 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 11554 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 11555 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 11556 if (rc != MBX_TIMEOUT) 11557 lpfc_sli4_mbox_cmd_free(phba, mbox); 11558 if (shdr_status || shdr_add_status || rc) { 11559 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11560 "2564 POST_SGL_BLOCK mailbox command failed " 11561 "status x%x add_status x%x mbx status x%x\n", 11562 shdr_status, shdr_add_status, rc); 11563 rc = -ENXIO; 11564 } 11565 return rc; 11566 } 11567 11568 /** 11569 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 11570 * @phba: pointer to lpfc_hba struct that the frame was received on 11571 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 11572 * 11573 * This function checks the fields in the @fc_hdr to see if the FC frame is a 11574 * valid type of frame that the LPFC driver will handle. This function will 11575 * return a zero if the frame is a valid frame or a non zero value when the 11576 * frame does not pass the check. 11577 **/ 11578 static int 11579 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 11580 { 11581 /* make rctl_names static to save stack space */ 11582 static char *rctl_names[] = FC_RCTL_NAMES_INIT; 11583 char *type_names[] = FC_TYPE_NAMES_INIT; 11584 struct fc_vft_header *fc_vft_hdr; 11585 11586 switch (fc_hdr->fh_r_ctl) { 11587 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 11588 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 11589 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 11590 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 11591 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 11592 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 11593 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 11594 case FC_RCTL_DD_CMD_STATUS: /* command status */ 11595 case FC_RCTL_ELS_REQ: /* extended link services request */ 11596 case FC_RCTL_ELS_REP: /* extended link services reply */ 11597 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 11598 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 11599 case FC_RCTL_BA_NOP: /* basic link service NOP */ 11600 case FC_RCTL_BA_ABTS: /* basic link service abort */ 11601 case FC_RCTL_BA_RMC: /* remove connection */ 11602 case FC_RCTL_BA_ACC: /* basic accept */ 11603 case FC_RCTL_BA_RJT: /* basic reject */ 11604 case FC_RCTL_BA_PRMT: 11605 case FC_RCTL_ACK_1: /* acknowledge_1 */ 11606 case FC_RCTL_ACK_0: /* acknowledge_0 */ 11607 case FC_RCTL_P_RJT: /* port reject */ 11608 case FC_RCTL_F_RJT: /* fabric reject */ 11609 case FC_RCTL_P_BSY: /* port busy */ 11610 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 11611 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 11612 case FC_RCTL_LCR: /* link credit reset */ 11613 case FC_RCTL_END: /* end */ 11614 break; 11615 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 11616 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 11617 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 11618 return lpfc_fc_frame_check(phba, fc_hdr); 11619 default: 11620 goto drop; 11621 } 11622 switch (fc_hdr->fh_type) { 11623 case FC_TYPE_BLS: 11624 case FC_TYPE_ELS: 11625 case FC_TYPE_FCP: 11626 case FC_TYPE_CT: 11627 break; 11628 case FC_TYPE_IP: 11629 case FC_TYPE_ILS: 11630 default: 11631 goto drop; 11632 } 11633 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11634 "2538 Received frame rctl:%s type:%s\n", 11635 rctl_names[fc_hdr->fh_r_ctl], 11636 type_names[fc_hdr->fh_type]); 11637 return 0; 11638 drop: 11639 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 11640 "2539 Dropped frame rctl:%s type:%s\n", 11641 rctl_names[fc_hdr->fh_r_ctl], 11642 type_names[fc_hdr->fh_type]); 11643 return 1; 11644 } 11645 11646 /** 11647 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 11648 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 11649 * 11650 * This function processes the FC header to retrieve the VFI from the VF 11651 * header, if one exists. This function will return the VFI if one exists 11652 * or 0 if no VSAN Header exists. 11653 **/ 11654 static uint32_t 11655 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 11656 { 11657 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 11658 11659 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 11660 return 0; 11661 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 11662 } 11663 11664 /** 11665 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 11666 * @phba: Pointer to the HBA structure to search for the vport on 11667 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 11668 * @fcfi: The FC Fabric ID that the frame came from 11669 * 11670 * This function searches the @phba for a vport that matches the content of the 11671 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 11672 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 11673 * returns the matching vport pointer or NULL if unable to match frame to a 11674 * vport. 11675 **/ 11676 static struct lpfc_vport * 11677 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 11678 uint16_t fcfi) 11679 { 11680 struct lpfc_vport **vports; 11681 struct lpfc_vport *vport = NULL; 11682 int i; 11683 uint32_t did = (fc_hdr->fh_d_id[0] << 16 | 11684 fc_hdr->fh_d_id[1] << 8 | 11685 fc_hdr->fh_d_id[2]); 11686 11687 vports = lpfc_create_vport_work_array(phba); 11688 if (vports != NULL) 11689 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 11690 if (phba->fcf.fcfi == fcfi && 11691 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 11692 vports[i]->fc_myDID == did) { 11693 vport = vports[i]; 11694 break; 11695 } 11696 } 11697 lpfc_destroy_vport_work_array(phba, vports); 11698 return vport; 11699 } 11700 11701 /** 11702 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 11703 * @vport: The vport to work on. 11704 * 11705 * This function updates the receive sequence time stamp for this vport. The 11706 * receive sequence time stamp indicates the time that the last frame of the 11707 * the sequence that has been idle for the longest amount of time was received. 11708 * the driver uses this time stamp to indicate if any received sequences have 11709 * timed out. 11710 **/ 11711 void 11712 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 11713 { 11714 struct lpfc_dmabuf *h_buf; 11715 struct hbq_dmabuf *dmabuf = NULL; 11716 11717 /* get the oldest sequence on the rcv list */ 11718 h_buf = list_get_first(&vport->rcv_buffer_list, 11719 struct lpfc_dmabuf, list); 11720 if (!h_buf) 11721 return; 11722 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11723 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 11724 } 11725 11726 /** 11727 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 11728 * @vport: The vport that the received sequences were sent to. 11729 * 11730 * This function cleans up all outstanding received sequences. This is called 11731 * by the driver when a link event or user action invalidates all the received 11732 * sequences. 11733 **/ 11734 void 11735 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 11736 { 11737 struct lpfc_dmabuf *h_buf, *hnext; 11738 struct lpfc_dmabuf *d_buf, *dnext; 11739 struct hbq_dmabuf *dmabuf = NULL; 11740 11741 /* start with the oldest sequence on the rcv list */ 11742 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 11743 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11744 list_del_init(&dmabuf->hbuf.list); 11745 list_for_each_entry_safe(d_buf, dnext, 11746 &dmabuf->dbuf.list, list) { 11747 list_del_init(&d_buf->list); 11748 lpfc_in_buf_free(vport->phba, d_buf); 11749 } 11750 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 11751 } 11752 } 11753 11754 /** 11755 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 11756 * @vport: The vport that the received sequences were sent to. 11757 * 11758 * This function determines whether any received sequences have timed out by 11759 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 11760 * indicates that there is at least one timed out sequence this routine will 11761 * go through the received sequences one at a time from most inactive to most 11762 * active to determine which ones need to be cleaned up. Once it has determined 11763 * that a sequence needs to be cleaned up it will simply free up the resources 11764 * without sending an abort. 11765 **/ 11766 void 11767 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 11768 { 11769 struct lpfc_dmabuf *h_buf, *hnext; 11770 struct lpfc_dmabuf *d_buf, *dnext; 11771 struct hbq_dmabuf *dmabuf = NULL; 11772 unsigned long timeout; 11773 int abort_count = 0; 11774 11775 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 11776 vport->rcv_buffer_time_stamp); 11777 if (list_empty(&vport->rcv_buffer_list) || 11778 time_before(jiffies, timeout)) 11779 return; 11780 /* start with the oldest sequence on the rcv list */ 11781 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 11782 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11783 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 11784 dmabuf->time_stamp); 11785 if (time_before(jiffies, timeout)) 11786 break; 11787 abort_count++; 11788 list_del_init(&dmabuf->hbuf.list); 11789 list_for_each_entry_safe(d_buf, dnext, 11790 &dmabuf->dbuf.list, list) { 11791 list_del_init(&d_buf->list); 11792 lpfc_in_buf_free(vport->phba, d_buf); 11793 } 11794 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 11795 } 11796 if (abort_count) 11797 lpfc_update_rcv_time_stamp(vport); 11798 } 11799 11800 /** 11801 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 11802 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 11803 * 11804 * This function searches through the existing incomplete sequences that have 11805 * been sent to this @vport. If the frame matches one of the incomplete 11806 * sequences then the dbuf in the @dmabuf is added to the list of frames that 11807 * make up that sequence. If no sequence is found that matches this frame then 11808 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 11809 * This function returns a pointer to the first dmabuf in the sequence list that 11810 * the frame was linked to. 11811 **/ 11812 static struct hbq_dmabuf * 11813 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 11814 { 11815 struct fc_frame_header *new_hdr; 11816 struct fc_frame_header *temp_hdr; 11817 struct lpfc_dmabuf *d_buf; 11818 struct lpfc_dmabuf *h_buf; 11819 struct hbq_dmabuf *seq_dmabuf = NULL; 11820 struct hbq_dmabuf *temp_dmabuf = NULL; 11821 11822 INIT_LIST_HEAD(&dmabuf->dbuf.list); 11823 dmabuf->time_stamp = jiffies; 11824 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11825 /* Use the hdr_buf to find the sequence that this frame belongs to */ 11826 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 11827 temp_hdr = (struct fc_frame_header *)h_buf->virt; 11828 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 11829 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 11830 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 11831 continue; 11832 /* found a pending sequence that matches this frame */ 11833 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11834 break; 11835 } 11836 if (!seq_dmabuf) { 11837 /* 11838 * This indicates first frame received for this sequence. 11839 * Queue the buffer on the vport's rcv_buffer_list. 11840 */ 11841 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 11842 lpfc_update_rcv_time_stamp(vport); 11843 return dmabuf; 11844 } 11845 temp_hdr = seq_dmabuf->hbuf.virt; 11846 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 11847 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 11848 list_del_init(&seq_dmabuf->hbuf.list); 11849 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 11850 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 11851 lpfc_update_rcv_time_stamp(vport); 11852 return dmabuf; 11853 } 11854 /* move this sequence to the tail to indicate a young sequence */ 11855 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 11856 seq_dmabuf->time_stamp = jiffies; 11857 lpfc_update_rcv_time_stamp(vport); 11858 if (list_empty(&seq_dmabuf->dbuf.list)) { 11859 temp_hdr = dmabuf->hbuf.virt; 11860 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 11861 return seq_dmabuf; 11862 } 11863 /* find the correct place in the sequence to insert this frame */ 11864 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 11865 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 11866 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 11867 /* 11868 * If the frame's sequence count is greater than the frame on 11869 * the list then insert the frame right after this frame 11870 */ 11871 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 11872 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 11873 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 11874 return seq_dmabuf; 11875 } 11876 } 11877 return NULL; 11878 } 11879 11880 /** 11881 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 11882 * @vport: pointer to a vitural port 11883 * @dmabuf: pointer to a dmabuf that describes the FC sequence 11884 * 11885 * This function tries to abort from the partially assembed sequence, described 11886 * by the information from basic abbort @dmabuf. It checks to see whether such 11887 * partially assembled sequence held by the driver. If so, it shall free up all 11888 * the frames from the partially assembled sequence. 11889 * 11890 * Return 11891 * true -- if there is matching partially assembled sequence present and all 11892 * the frames freed with the sequence; 11893 * false -- if there is no matching partially assembled sequence present so 11894 * nothing got aborted in the lower layer driver 11895 **/ 11896 static bool 11897 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 11898 struct hbq_dmabuf *dmabuf) 11899 { 11900 struct fc_frame_header *new_hdr; 11901 struct fc_frame_header *temp_hdr; 11902 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 11903 struct hbq_dmabuf *seq_dmabuf = NULL; 11904 11905 /* Use the hdr_buf to find the sequence that matches this frame */ 11906 INIT_LIST_HEAD(&dmabuf->dbuf.list); 11907 INIT_LIST_HEAD(&dmabuf->hbuf.list); 11908 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 11909 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 11910 temp_hdr = (struct fc_frame_header *)h_buf->virt; 11911 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 11912 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 11913 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 11914 continue; 11915 /* found a pending sequence that matches this frame */ 11916 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 11917 break; 11918 } 11919 11920 /* Free up all the frames from the partially assembled sequence */ 11921 if (seq_dmabuf) { 11922 list_for_each_entry_safe(d_buf, n_buf, 11923 &seq_dmabuf->dbuf.list, list) { 11924 list_del_init(&d_buf->list); 11925 lpfc_in_buf_free(vport->phba, d_buf); 11926 } 11927 return true; 11928 } 11929 return false; 11930 } 11931 11932 /** 11933 * lpfc_sli4_seq_abort_acc_cmpl - Accept seq abort iocb complete handler 11934 * @phba: Pointer to HBA context object. 11935 * @cmd_iocbq: pointer to the command iocbq structure. 11936 * @rsp_iocbq: pointer to the response iocbq structure. 11937 * 11938 * This function handles the sequence abort accept iocb command complete 11939 * event. It properly releases the memory allocated to the sequence abort 11940 * accept iocb. 11941 **/ 11942 static void 11943 lpfc_sli4_seq_abort_acc_cmpl(struct lpfc_hba *phba, 11944 struct lpfc_iocbq *cmd_iocbq, 11945 struct lpfc_iocbq *rsp_iocbq) 11946 { 11947 if (cmd_iocbq) 11948 lpfc_sli_release_iocbq(phba, cmd_iocbq); 11949 } 11950 11951 /** 11952 * lpfc_sli4_seq_abort_acc - Accept sequence abort 11953 * @phba: Pointer to HBA context object. 11954 * @fc_hdr: pointer to a FC frame header. 11955 * 11956 * This function sends a basic accept to a previous unsol sequence abort 11957 * event after aborting the sequence handling. 11958 **/ 11959 static void 11960 lpfc_sli4_seq_abort_acc(struct lpfc_hba *phba, 11961 struct fc_frame_header *fc_hdr) 11962 { 11963 struct lpfc_iocbq *ctiocb = NULL; 11964 struct lpfc_nodelist *ndlp; 11965 uint16_t oxid, rxid; 11966 uint32_t sid, fctl; 11967 IOCB_t *icmd; 11968 11969 if (!lpfc_is_link_up(phba)) 11970 return; 11971 11972 sid = sli4_sid_from_fc_hdr(fc_hdr); 11973 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 11974 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 11975 11976 ndlp = lpfc_findnode_did(phba->pport, sid); 11977 if (!ndlp) { 11978 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 11979 "1268 Find ndlp returned NULL for oxid:x%x " 11980 "SID:x%x\n", oxid, sid); 11981 return; 11982 } 11983 if (rxid >= phba->sli4_hba.max_cfg_param.xri_base 11984 && rxid <= (phba->sli4_hba.max_cfg_param.max_xri 11985 + phba->sli4_hba.max_cfg_param.xri_base)) 11986 lpfc_set_rrq_active(phba, ndlp, rxid, oxid, 0); 11987 11988 /* Allocate buffer for acc iocb */ 11989 ctiocb = lpfc_sli_get_iocbq(phba); 11990 if (!ctiocb) 11991 return; 11992 11993 /* Extract the F_CTL field from FC_HDR */ 11994 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 11995 11996 icmd = &ctiocb->iocb; 11997 icmd->un.xseq64.bdl.bdeSize = 0; 11998 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 11999 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 12000 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 12001 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 12002 12003 /* Fill in the rest of iocb fields */ 12004 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 12005 icmd->ulpBdeCount = 0; 12006 icmd->ulpLe = 1; 12007 icmd->ulpClass = CLASS3; 12008 icmd->ulpContext = ndlp->nlp_rpi; 12009 ctiocb->context1 = ndlp; 12010 12011 ctiocb->iocb_cmpl = NULL; 12012 ctiocb->vport = phba->pport; 12013 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_acc_cmpl; 12014 12015 if (fctl & FC_FC_EX_CTX) { 12016 /* ABTS sent by responder to CT exchange, construction 12017 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 12018 * field and RX_ID from ABTS for RX_ID field. 12019 */ 12020 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_RSP); 12021 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, rxid); 12022 ctiocb->sli4_xritag = oxid; 12023 } else { 12024 /* ABTS sent by initiator to CT exchange, construction 12025 * of BA_ACC will need to allocate a new XRI as for the 12026 * XRI_TAG and RX_ID fields. 12027 */ 12028 bf_set(lpfc_abts_orig, &icmd->un.bls_acc, LPFC_ABTS_UNSOL_INT); 12029 bf_set(lpfc_abts_rxid, &icmd->un.bls_acc, NO_XRI); 12030 ctiocb->sli4_xritag = NO_XRI; 12031 } 12032 bf_set(lpfc_abts_oxid, &icmd->un.bls_acc, oxid); 12033 12034 /* Xmit CT abts accept on exchange <xid> */ 12035 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 12036 "1200 Xmit CT ABTS ACC on exchange x%x Data: x%x\n", 12037 CMD_XMIT_BLS_RSP64_CX, phba->link_state); 12038 lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 12039 } 12040 12041 /** 12042 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 12043 * @vport: Pointer to the vport on which this sequence was received 12044 * @dmabuf: pointer to a dmabuf that describes the FC sequence 12045 * 12046 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 12047 * receive sequence is only partially assembed by the driver, it shall abort 12048 * the partially assembled frames for the sequence. Otherwise, if the 12049 * unsolicited receive sequence has been completely assembled and passed to 12050 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 12051 * unsolicited sequence has been aborted. After that, it will issue a basic 12052 * accept to accept the abort. 12053 **/ 12054 void 12055 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 12056 struct hbq_dmabuf *dmabuf) 12057 { 12058 struct lpfc_hba *phba = vport->phba; 12059 struct fc_frame_header fc_hdr; 12060 uint32_t fctl; 12061 bool abts_par; 12062 12063 /* Make a copy of fc_hdr before the dmabuf being released */ 12064 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 12065 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 12066 12067 if (fctl & FC_FC_EX_CTX) { 12068 /* 12069 * ABTS sent by responder to exchange, just free the buffer 12070 */ 12071 lpfc_in_buf_free(phba, &dmabuf->dbuf); 12072 } else { 12073 /* 12074 * ABTS sent by initiator to exchange, need to do cleanup 12075 */ 12076 /* Try to abort partially assembled seq */ 12077 abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf); 12078 12079 /* Send abort to ULP if partially seq abort failed */ 12080 if (abts_par == false) 12081 lpfc_sli4_send_seq_to_ulp(vport, dmabuf); 12082 else 12083 lpfc_in_buf_free(phba, &dmabuf->dbuf); 12084 } 12085 /* Send basic accept (BA_ACC) to the abort requester */ 12086 lpfc_sli4_seq_abort_acc(phba, &fc_hdr); 12087 } 12088 12089 /** 12090 * lpfc_seq_complete - Indicates if a sequence is complete 12091 * @dmabuf: pointer to a dmabuf that describes the FC sequence 12092 * 12093 * This function checks the sequence, starting with the frame described by 12094 * @dmabuf, to see if all the frames associated with this sequence are present. 12095 * the frames associated with this sequence are linked to the @dmabuf using the 12096 * dbuf list. This function looks for two major things. 1) That the first frame 12097 * has a sequence count of zero. 2) There is a frame with last frame of sequence 12098 * set. 3) That there are no holes in the sequence count. The function will 12099 * return 1 when the sequence is complete, otherwise it will return 0. 12100 **/ 12101 static int 12102 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 12103 { 12104 struct fc_frame_header *hdr; 12105 struct lpfc_dmabuf *d_buf; 12106 struct hbq_dmabuf *seq_dmabuf; 12107 uint32_t fctl; 12108 int seq_count = 0; 12109 12110 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 12111 /* make sure first fame of sequence has a sequence count of zero */ 12112 if (hdr->fh_seq_cnt != seq_count) 12113 return 0; 12114 fctl = (hdr->fh_f_ctl[0] << 16 | 12115 hdr->fh_f_ctl[1] << 8 | 12116 hdr->fh_f_ctl[2]); 12117 /* If last frame of sequence we can return success. */ 12118 if (fctl & FC_FC_END_SEQ) 12119 return 1; 12120 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 12121 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 12122 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 12123 /* If there is a hole in the sequence count then fail. */ 12124 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 12125 return 0; 12126 fctl = (hdr->fh_f_ctl[0] << 16 | 12127 hdr->fh_f_ctl[1] << 8 | 12128 hdr->fh_f_ctl[2]); 12129 /* If last frame of sequence we can return success. */ 12130 if (fctl & FC_FC_END_SEQ) 12131 return 1; 12132 } 12133 return 0; 12134 } 12135 12136 /** 12137 * lpfc_prep_seq - Prep sequence for ULP processing 12138 * @vport: Pointer to the vport on which this sequence was received 12139 * @dmabuf: pointer to a dmabuf that describes the FC sequence 12140 * 12141 * This function takes a sequence, described by a list of frames, and creates 12142 * a list of iocbq structures to describe the sequence. This iocbq list will be 12143 * used to issue to the generic unsolicited sequence handler. This routine 12144 * returns a pointer to the first iocbq in the list. If the function is unable 12145 * to allocate an iocbq then it throw out the received frames that were not 12146 * able to be described and return a pointer to the first iocbq. If unable to 12147 * allocate any iocbqs (including the first) this function will return NULL. 12148 **/ 12149 static struct lpfc_iocbq * 12150 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 12151 { 12152 struct lpfc_dmabuf *d_buf, *n_buf; 12153 struct lpfc_iocbq *first_iocbq, *iocbq; 12154 struct fc_frame_header *fc_hdr; 12155 uint32_t sid; 12156 struct ulp_bde64 *pbde; 12157 12158 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 12159 /* remove from receive buffer list */ 12160 list_del_init(&seq_dmabuf->hbuf.list); 12161 lpfc_update_rcv_time_stamp(vport); 12162 /* get the Remote Port's SID */ 12163 sid = sli4_sid_from_fc_hdr(fc_hdr); 12164 /* Get an iocbq struct to fill in. */ 12165 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 12166 if (first_iocbq) { 12167 /* Initialize the first IOCB. */ 12168 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 12169 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 12170 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 12171 first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); 12172 first_iocbq->iocb.unsli3.rcvsli3.vpi = 12173 vport->vpi + vport->phba->vpi_base; 12174 /* put the first buffer into the first IOCBq */ 12175 first_iocbq->context2 = &seq_dmabuf->dbuf; 12176 first_iocbq->context3 = NULL; 12177 first_iocbq->iocb.ulpBdeCount = 1; 12178 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 12179 LPFC_DATA_BUF_SIZE; 12180 first_iocbq->iocb.un.rcvels.remoteID = sid; 12181 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 12182 bf_get(lpfc_rcqe_length, 12183 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 12184 } 12185 iocbq = first_iocbq; 12186 /* 12187 * Each IOCBq can have two Buffers assigned, so go through the list 12188 * of buffers for this sequence and save two buffers in each IOCBq 12189 */ 12190 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 12191 if (!iocbq) { 12192 lpfc_in_buf_free(vport->phba, d_buf); 12193 continue; 12194 } 12195 if (!iocbq->context3) { 12196 iocbq->context3 = d_buf; 12197 iocbq->iocb.ulpBdeCount++; 12198 pbde = (struct ulp_bde64 *) 12199 &iocbq->iocb.unsli3.sli3Words[4]; 12200 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 12201 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 12202 bf_get(lpfc_rcqe_length, 12203 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 12204 } else { 12205 iocbq = lpfc_sli_get_iocbq(vport->phba); 12206 if (!iocbq) { 12207 if (first_iocbq) { 12208 first_iocbq->iocb.ulpStatus = 12209 IOSTAT_FCP_RSP_ERROR; 12210 first_iocbq->iocb.un.ulpWord[4] = 12211 IOERR_NO_RESOURCES; 12212 } 12213 lpfc_in_buf_free(vport->phba, d_buf); 12214 continue; 12215 } 12216 iocbq->context2 = d_buf; 12217 iocbq->context3 = NULL; 12218 iocbq->iocb.ulpBdeCount = 1; 12219 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 12220 LPFC_DATA_BUF_SIZE; 12221 first_iocbq->iocb.unsli3.rcvsli3.acc_len += 12222 bf_get(lpfc_rcqe_length, 12223 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 12224 iocbq->iocb.un.rcvels.remoteID = sid; 12225 list_add_tail(&iocbq->list, &first_iocbq->list); 12226 } 12227 } 12228 return first_iocbq; 12229 } 12230 12231 static void 12232 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 12233 struct hbq_dmabuf *seq_dmabuf) 12234 { 12235 struct fc_frame_header *fc_hdr; 12236 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 12237 struct lpfc_hba *phba = vport->phba; 12238 12239 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 12240 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 12241 if (!iocbq) { 12242 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12243 "2707 Ring %d handler: Failed to allocate " 12244 "iocb Rctl x%x Type x%x received\n", 12245 LPFC_ELS_RING, 12246 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 12247 return; 12248 } 12249 if (!lpfc_complete_unsol_iocb(phba, 12250 &phba->sli.ring[LPFC_ELS_RING], 12251 iocbq, fc_hdr->fh_r_ctl, 12252 fc_hdr->fh_type)) 12253 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12254 "2540 Ring %d handler: unexpected Rctl " 12255 "x%x Type x%x received\n", 12256 LPFC_ELS_RING, 12257 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 12258 12259 /* Free iocb created in lpfc_prep_seq */ 12260 list_for_each_entry_safe(curr_iocb, next_iocb, 12261 &iocbq->list, list) { 12262 list_del_init(&curr_iocb->list); 12263 lpfc_sli_release_iocbq(phba, curr_iocb); 12264 } 12265 lpfc_sli_release_iocbq(phba, iocbq); 12266 } 12267 12268 /** 12269 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 12270 * @phba: Pointer to HBA context object. 12271 * 12272 * This function is called with no lock held. This function processes all 12273 * the received buffers and gives it to upper layers when a received buffer 12274 * indicates that it is the final frame in the sequence. The interrupt 12275 * service routine processes received buffers at interrupt contexts and adds 12276 * received dma buffers to the rb_pend_list queue and signals the worker thread. 12277 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 12278 * appropriate receive function when the final frame in a sequence is received. 12279 **/ 12280 void 12281 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 12282 struct hbq_dmabuf *dmabuf) 12283 { 12284 struct hbq_dmabuf *seq_dmabuf; 12285 struct fc_frame_header *fc_hdr; 12286 struct lpfc_vport *vport; 12287 uint32_t fcfi; 12288 12289 /* Process each received buffer */ 12290 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 12291 /* check to see if this a valid type of frame */ 12292 if (lpfc_fc_frame_check(phba, fc_hdr)) { 12293 lpfc_in_buf_free(phba, &dmabuf->dbuf); 12294 return; 12295 } 12296 fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); 12297 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 12298 if (!vport || !(vport->vpi_state & LPFC_VPI_REGISTERED)) { 12299 /* throw out the frame */ 12300 lpfc_in_buf_free(phba, &dmabuf->dbuf); 12301 return; 12302 } 12303 /* Handle the basic abort sequence (BA_ABTS) event */ 12304 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 12305 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 12306 return; 12307 } 12308 12309 /* Link this frame */ 12310 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 12311 if (!seq_dmabuf) { 12312 /* unable to add frame to vport - throw it out */ 12313 lpfc_in_buf_free(phba, &dmabuf->dbuf); 12314 return; 12315 } 12316 /* If not last frame in sequence continue processing frames. */ 12317 if (!lpfc_seq_complete(seq_dmabuf)) 12318 return; 12319 12320 /* Send the complete sequence to the upper layer protocol */ 12321 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 12322 } 12323 12324 /** 12325 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 12326 * @phba: pointer to lpfc hba data structure. 12327 * 12328 * This routine is invoked to post rpi header templates to the 12329 * HBA consistent with the SLI-4 interface spec. This routine 12330 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 12331 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 12332 * 12333 * This routine does not require any locks. It's usage is expected 12334 * to be driver load or reset recovery when the driver is 12335 * sequential. 12336 * 12337 * Return codes 12338 * 0 - successful 12339 * -EIO - The mailbox failed to complete successfully. 12340 * When this error occurs, the driver is not guaranteed 12341 * to have any rpi regions posted to the device and 12342 * must either attempt to repost the regions or take a 12343 * fatal error. 12344 **/ 12345 int 12346 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 12347 { 12348 struct lpfc_rpi_hdr *rpi_page; 12349 uint32_t rc = 0; 12350 12351 /* Post all rpi memory regions to the port. */ 12352 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 12353 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 12354 if (rc != MBX_SUCCESS) { 12355 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12356 "2008 Error %d posting all rpi " 12357 "headers\n", rc); 12358 rc = -EIO; 12359 break; 12360 } 12361 } 12362 12363 return rc; 12364 } 12365 12366 /** 12367 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 12368 * @phba: pointer to lpfc hba data structure. 12369 * @rpi_page: pointer to the rpi memory region. 12370 * 12371 * This routine is invoked to post a single rpi header to the 12372 * HBA consistent with the SLI-4 interface spec. This memory region 12373 * maps up to 64 rpi context regions. 12374 * 12375 * Return codes 12376 * 0 - successful 12377 * -ENOMEM - No available memory 12378 * -EIO - The mailbox failed to complete successfully. 12379 **/ 12380 int 12381 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 12382 { 12383 LPFC_MBOXQ_t *mboxq; 12384 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 12385 uint32_t rc = 0; 12386 uint32_t mbox_tmo; 12387 uint32_t shdr_status, shdr_add_status; 12388 union lpfc_sli4_cfg_shdr *shdr; 12389 12390 /* The port is notified of the header region via a mailbox command. */ 12391 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12392 if (!mboxq) { 12393 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12394 "2001 Unable to allocate memory for issuing " 12395 "SLI_CONFIG_SPECIAL mailbox command\n"); 12396 return -ENOMEM; 12397 } 12398 12399 /* Post all rpi memory regions to the port. */ 12400 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 12401 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 12402 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 12403 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 12404 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 12405 sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); 12406 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 12407 hdr_tmpl, rpi_page->page_count); 12408 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 12409 rpi_page->start_rpi); 12410 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 12411 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 12412 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 12413 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 12414 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12415 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12416 if (rc != MBX_TIMEOUT) 12417 mempool_free(mboxq, phba->mbox_mem_pool); 12418 if (shdr_status || shdr_add_status || rc) { 12419 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12420 "2514 POST_RPI_HDR mailbox failed with " 12421 "status x%x add_status x%x, mbx status x%x\n", 12422 shdr_status, shdr_add_status, rc); 12423 rc = -ENXIO; 12424 } 12425 return rc; 12426 } 12427 12428 /** 12429 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 12430 * @phba: pointer to lpfc hba data structure. 12431 * 12432 * This routine is invoked to post rpi header templates to the 12433 * HBA consistent with the SLI-4 interface spec. This routine 12434 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 12435 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 12436 * 12437 * Returns 12438 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 12439 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 12440 **/ 12441 int 12442 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 12443 { 12444 int rpi; 12445 uint16_t max_rpi, rpi_base, rpi_limit; 12446 uint16_t rpi_remaining; 12447 struct lpfc_rpi_hdr *rpi_hdr; 12448 12449 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 12450 rpi_base = phba->sli4_hba.max_cfg_param.rpi_base; 12451 rpi_limit = phba->sli4_hba.next_rpi; 12452 12453 /* 12454 * The valid rpi range is not guaranteed to be zero-based. Start 12455 * the search at the rpi_base as reported by the port. 12456 */ 12457 spin_lock_irq(&phba->hbalock); 12458 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base); 12459 if (rpi >= rpi_limit || rpi < rpi_base) 12460 rpi = LPFC_RPI_ALLOC_ERROR; 12461 else { 12462 set_bit(rpi, phba->sli4_hba.rpi_bmask); 12463 phba->sli4_hba.max_cfg_param.rpi_used++; 12464 phba->sli4_hba.rpi_count++; 12465 } 12466 12467 /* 12468 * Don't try to allocate more rpi header regions if the device limit 12469 * on available rpis max has been exhausted. 12470 */ 12471 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 12472 (phba->sli4_hba.rpi_count >= max_rpi)) { 12473 spin_unlock_irq(&phba->hbalock); 12474 return rpi; 12475 } 12476 12477 /* 12478 * If the driver is running low on rpi resources, allocate another 12479 * page now. Note that the next_rpi value is used because 12480 * it represents how many are actually in use whereas max_rpi notes 12481 * how many are supported max by the device. 12482 */ 12483 rpi_remaining = phba->sli4_hba.next_rpi - rpi_base - 12484 phba->sli4_hba.rpi_count; 12485 spin_unlock_irq(&phba->hbalock); 12486 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 12487 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 12488 if (!rpi_hdr) { 12489 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12490 "2002 Error Could not grow rpi " 12491 "count\n"); 12492 } else { 12493 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 12494 } 12495 } 12496 12497 return rpi; 12498 } 12499 12500 /** 12501 * lpfc_sli4_free_rpi - Release an rpi for reuse. 12502 * @phba: pointer to lpfc hba data structure. 12503 * 12504 * This routine is invoked to release an rpi to the pool of 12505 * available rpis maintained by the driver. 12506 **/ 12507 void 12508 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 12509 { 12510 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 12511 phba->sli4_hba.rpi_count--; 12512 phba->sli4_hba.max_cfg_param.rpi_used--; 12513 } 12514 } 12515 12516 /** 12517 * lpfc_sli4_free_rpi - Release an rpi for reuse. 12518 * @phba: pointer to lpfc hba data structure. 12519 * 12520 * This routine is invoked to release an rpi to the pool of 12521 * available rpis maintained by the driver. 12522 **/ 12523 void 12524 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 12525 { 12526 spin_lock_irq(&phba->hbalock); 12527 __lpfc_sli4_free_rpi(phba, rpi); 12528 spin_unlock_irq(&phba->hbalock); 12529 } 12530 12531 /** 12532 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 12533 * @phba: pointer to lpfc hba data structure. 12534 * 12535 * This routine is invoked to remove the memory region that 12536 * provided rpi via a bitmask. 12537 **/ 12538 void 12539 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 12540 { 12541 kfree(phba->sli4_hba.rpi_bmask); 12542 } 12543 12544 /** 12545 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 12546 * @phba: pointer to lpfc hba data structure. 12547 * 12548 * This routine is invoked to remove the memory region that 12549 * provided rpi via a bitmask. 12550 **/ 12551 int 12552 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp) 12553 { 12554 LPFC_MBOXQ_t *mboxq; 12555 struct lpfc_hba *phba = ndlp->phba; 12556 int rc; 12557 12558 /* The port is notified of the header region via a mailbox command. */ 12559 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12560 if (!mboxq) 12561 return -ENOMEM; 12562 12563 /* Post all rpi memory regions to the port. */ 12564 lpfc_resume_rpi(mboxq, ndlp); 12565 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12566 if (rc == MBX_NOT_FINISHED) { 12567 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12568 "2010 Resume RPI Mailbox failed " 12569 "status %d, mbxStatus x%x\n", rc, 12570 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 12571 mempool_free(mboxq, phba->mbox_mem_pool); 12572 return -EIO; 12573 } 12574 return 0; 12575 } 12576 12577 /** 12578 * lpfc_sli4_init_vpi - Initialize a vpi with the port 12579 * @vport: Pointer to the vport for which the vpi is being initialized 12580 * 12581 * This routine is invoked to activate a vpi with the port. 12582 * 12583 * Returns: 12584 * 0 success 12585 * -Evalue otherwise 12586 **/ 12587 int 12588 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 12589 { 12590 LPFC_MBOXQ_t *mboxq; 12591 int rc = 0; 12592 int retval = MBX_SUCCESS; 12593 uint32_t mbox_tmo; 12594 struct lpfc_hba *phba = vport->phba; 12595 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12596 if (!mboxq) 12597 return -ENOMEM; 12598 lpfc_init_vpi(phba, mboxq, vport->vpi); 12599 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); 12600 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 12601 if (rc != MBX_SUCCESS) { 12602 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 12603 "2022 INIT VPI Mailbox failed " 12604 "status %d, mbxStatus x%x\n", rc, 12605 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 12606 retval = -EIO; 12607 } 12608 if (rc != MBX_TIMEOUT) 12609 mempool_free(mboxq, vport->phba->mbox_mem_pool); 12610 12611 return retval; 12612 } 12613 12614 /** 12615 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 12616 * @phba: pointer to lpfc hba data structure. 12617 * @mboxq: Pointer to mailbox object. 12618 * 12619 * This routine is invoked to manually add a single FCF record. The caller 12620 * must pass a completely initialized FCF_Record. This routine takes 12621 * care of the nonembedded mailbox operations. 12622 **/ 12623 static void 12624 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 12625 { 12626 void *virt_addr; 12627 union lpfc_sli4_cfg_shdr *shdr; 12628 uint32_t shdr_status, shdr_add_status; 12629 12630 virt_addr = mboxq->sge_array->addr[0]; 12631 /* The IOCTL status is embedded in the mailbox subheader. */ 12632 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 12633 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12634 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12635 12636 if ((shdr_status || shdr_add_status) && 12637 (shdr_status != STATUS_FCF_IN_USE)) 12638 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12639 "2558 ADD_FCF_RECORD mailbox failed with " 12640 "status x%x add_status x%x\n", 12641 shdr_status, shdr_add_status); 12642 12643 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12644 } 12645 12646 /** 12647 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 12648 * @phba: pointer to lpfc hba data structure. 12649 * @fcf_record: pointer to the initialized fcf record to add. 12650 * 12651 * This routine is invoked to manually add a single FCF record. The caller 12652 * must pass a completely initialized FCF_Record. This routine takes 12653 * care of the nonembedded mailbox operations. 12654 **/ 12655 int 12656 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 12657 { 12658 int rc = 0; 12659 LPFC_MBOXQ_t *mboxq; 12660 uint8_t *bytep; 12661 void *virt_addr; 12662 dma_addr_t phys_addr; 12663 struct lpfc_mbx_sge sge; 12664 uint32_t alloc_len, req_len; 12665 uint32_t fcfindex; 12666 12667 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12668 if (!mboxq) { 12669 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12670 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 12671 return -ENOMEM; 12672 } 12673 12674 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 12675 sizeof(uint32_t); 12676 12677 /* Allocate DMA memory and set up the non-embedded mailbox command */ 12678 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 12679 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 12680 req_len, LPFC_SLI4_MBX_NEMBED); 12681 if (alloc_len < req_len) { 12682 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12683 "2523 Allocated DMA memory size (x%x) is " 12684 "less than the requested DMA memory " 12685 "size (x%x)\n", alloc_len, req_len); 12686 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12687 return -ENOMEM; 12688 } 12689 12690 /* 12691 * Get the first SGE entry from the non-embedded DMA memory. This 12692 * routine only uses a single SGE. 12693 */ 12694 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 12695 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 12696 virt_addr = mboxq->sge_array->addr[0]; 12697 /* 12698 * Configure the FCF record for FCFI 0. This is the driver's 12699 * hardcoded default and gets used in nonFIP mode. 12700 */ 12701 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 12702 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 12703 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 12704 12705 /* 12706 * Copy the fcf_index and the FCF Record Data. The data starts after 12707 * the FCoE header plus word10. The data copy needs to be endian 12708 * correct. 12709 */ 12710 bytep += sizeof(uint32_t); 12711 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 12712 mboxq->vport = phba->pport; 12713 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 12714 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12715 if (rc == MBX_NOT_FINISHED) { 12716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12717 "2515 ADD_FCF_RECORD mailbox failed with " 12718 "status 0x%x\n", rc); 12719 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12720 rc = -EIO; 12721 } else 12722 rc = 0; 12723 12724 return rc; 12725 } 12726 12727 /** 12728 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 12729 * @phba: pointer to lpfc hba data structure. 12730 * @fcf_record: pointer to the fcf record to write the default data. 12731 * @fcf_index: FCF table entry index. 12732 * 12733 * This routine is invoked to build the driver's default FCF record. The 12734 * values used are hardcoded. This routine handles memory initialization. 12735 * 12736 **/ 12737 void 12738 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 12739 struct fcf_record *fcf_record, 12740 uint16_t fcf_index) 12741 { 12742 memset(fcf_record, 0, sizeof(struct fcf_record)); 12743 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 12744 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 12745 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 12746 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 12747 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 12748 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 12749 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 12750 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 12751 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 12752 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 12753 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 12754 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 12755 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 12756 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 12757 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 12758 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 12759 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 12760 /* Set the VLAN bit map */ 12761 if (phba->valid_vlan) { 12762 fcf_record->vlan_bitmap[phba->vlan_id / 8] 12763 = 1 << (phba->vlan_id % 8); 12764 } 12765 } 12766 12767 /** 12768 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 12769 * @phba: pointer to lpfc hba data structure. 12770 * @fcf_index: FCF table entry offset. 12771 * 12772 * This routine is invoked to scan the entire FCF table by reading FCF 12773 * record and processing it one at a time starting from the @fcf_index 12774 * for initial FCF discovery or fast FCF failover rediscovery. 12775 * 12776 * Return 0 if the mailbox command is submitted sucessfully, none 0 12777 * otherwise. 12778 **/ 12779 int 12780 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 12781 { 12782 int rc = 0, error; 12783 LPFC_MBOXQ_t *mboxq; 12784 12785 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 12786 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12787 if (!mboxq) { 12788 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12789 "2000 Failed to allocate mbox for " 12790 "READ_FCF cmd\n"); 12791 error = -ENOMEM; 12792 goto fail_fcf_scan; 12793 } 12794 /* Construct the read FCF record mailbox command */ 12795 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 12796 if (rc) { 12797 error = -EINVAL; 12798 goto fail_fcf_scan; 12799 } 12800 /* Issue the mailbox command asynchronously */ 12801 mboxq->vport = phba->pport; 12802 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 12803 12804 spin_lock_irq(&phba->hbalock); 12805 phba->hba_flag |= FCF_TS_INPROG; 12806 spin_unlock_irq(&phba->hbalock); 12807 12808 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12809 if (rc == MBX_NOT_FINISHED) 12810 error = -EIO; 12811 else { 12812 /* Reset eligible FCF count for new scan */ 12813 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 12814 phba->fcf.eligible_fcf_cnt = 0; 12815 error = 0; 12816 } 12817 fail_fcf_scan: 12818 if (error) { 12819 if (mboxq) 12820 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12821 /* FCF scan failed, clear FCF_TS_INPROG flag */ 12822 spin_lock_irq(&phba->hbalock); 12823 phba->hba_flag &= ~FCF_TS_INPROG; 12824 spin_unlock_irq(&phba->hbalock); 12825 } 12826 return error; 12827 } 12828 12829 /** 12830 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 12831 * @phba: pointer to lpfc hba data structure. 12832 * @fcf_index: FCF table entry offset. 12833 * 12834 * This routine is invoked to read an FCF record indicated by @fcf_index 12835 * and to use it for FLOGI roundrobin FCF failover. 12836 * 12837 * Return 0 if the mailbox command is submitted sucessfully, none 0 12838 * otherwise. 12839 **/ 12840 int 12841 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 12842 { 12843 int rc = 0, error; 12844 LPFC_MBOXQ_t *mboxq; 12845 12846 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12847 if (!mboxq) { 12848 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 12849 "2763 Failed to allocate mbox for " 12850 "READ_FCF cmd\n"); 12851 error = -ENOMEM; 12852 goto fail_fcf_read; 12853 } 12854 /* Construct the read FCF record mailbox command */ 12855 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 12856 if (rc) { 12857 error = -EINVAL; 12858 goto fail_fcf_read; 12859 } 12860 /* Issue the mailbox command asynchronously */ 12861 mboxq->vport = phba->pport; 12862 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 12863 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12864 if (rc == MBX_NOT_FINISHED) 12865 error = -EIO; 12866 else 12867 error = 0; 12868 12869 fail_fcf_read: 12870 if (error && mboxq) 12871 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12872 return error; 12873 } 12874 12875 /** 12876 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 12877 * @phba: pointer to lpfc hba data structure. 12878 * @fcf_index: FCF table entry offset. 12879 * 12880 * This routine is invoked to read an FCF record indicated by @fcf_index to 12881 * determine whether it's eligible for FLOGI roundrobin failover list. 12882 * 12883 * Return 0 if the mailbox command is submitted sucessfully, none 0 12884 * otherwise. 12885 **/ 12886 int 12887 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 12888 { 12889 int rc = 0, error; 12890 LPFC_MBOXQ_t *mboxq; 12891 12892 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12893 if (!mboxq) { 12894 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 12895 "2758 Failed to allocate mbox for " 12896 "READ_FCF cmd\n"); 12897 error = -ENOMEM; 12898 goto fail_fcf_read; 12899 } 12900 /* Construct the read FCF record mailbox command */ 12901 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 12902 if (rc) { 12903 error = -EINVAL; 12904 goto fail_fcf_read; 12905 } 12906 /* Issue the mailbox command asynchronously */ 12907 mboxq->vport = phba->pport; 12908 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 12909 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 12910 if (rc == MBX_NOT_FINISHED) 12911 error = -EIO; 12912 else 12913 error = 0; 12914 12915 fail_fcf_read: 12916 if (error && mboxq) 12917 lpfc_sli4_mbox_cmd_free(phba, mboxq); 12918 return error; 12919 } 12920 12921 /** 12922 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 12923 * @phba: pointer to lpfc hba data structure. 12924 * 12925 * This routine is to get the next eligible FCF record index in a round 12926 * robin fashion. If the next eligible FCF record index equals to the 12927 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 12928 * shall be returned, otherwise, the next eligible FCF record's index 12929 * shall be returned. 12930 **/ 12931 uint16_t 12932 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 12933 { 12934 uint16_t next_fcf_index; 12935 12936 /* Search start from next bit of currently registered FCF index */ 12937 next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) % 12938 LPFC_SLI4_FCF_TBL_INDX_MAX; 12939 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 12940 LPFC_SLI4_FCF_TBL_INDX_MAX, 12941 next_fcf_index); 12942 12943 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 12944 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 12945 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 12946 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 12947 12948 /* Check roundrobin failover list empty condition */ 12949 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12950 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 12951 "2844 No roundrobin failover FCF available\n"); 12952 return LPFC_FCOE_FCF_NEXT_NONE; 12953 } 12954 12955 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12956 "2845 Get next roundrobin failover FCF (x%x)\n", 12957 next_fcf_index); 12958 12959 return next_fcf_index; 12960 } 12961 12962 /** 12963 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 12964 * @phba: pointer to lpfc hba data structure. 12965 * 12966 * This routine sets the FCF record index in to the eligible bmask for 12967 * roundrobin failover search. It checks to make sure that the index 12968 * does not go beyond the range of the driver allocated bmask dimension 12969 * before setting the bit. 12970 * 12971 * Returns 0 if the index bit successfully set, otherwise, it returns 12972 * -EINVAL. 12973 **/ 12974 int 12975 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 12976 { 12977 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 12978 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 12979 "2610 FCF (x%x) reached driver's book " 12980 "keeping dimension:x%x\n", 12981 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 12982 return -EINVAL; 12983 } 12984 /* Set the eligible FCF record index bmask */ 12985 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 12986 12987 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 12988 "2790 Set FCF (x%x) to roundrobin FCF failover " 12989 "bmask\n", fcf_index); 12990 12991 return 0; 12992 } 12993 12994 /** 12995 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 12996 * @phba: pointer to lpfc hba data structure. 12997 * 12998 * This routine clears the FCF record index from the eligible bmask for 12999 * roundrobin failover search. It checks to make sure that the index 13000 * does not go beyond the range of the driver allocated bmask dimension 13001 * before clearing the bit. 13002 **/ 13003 void 13004 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 13005 { 13006 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 13007 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 13008 "2762 FCF (x%x) reached driver's book " 13009 "keeping dimension:x%x\n", 13010 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 13011 return; 13012 } 13013 /* Clear the eligible FCF record index bmask */ 13014 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 13015 13016 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 13017 "2791 Clear FCF (x%x) from roundrobin failover " 13018 "bmask\n", fcf_index); 13019 } 13020 13021 /** 13022 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 13023 * @phba: pointer to lpfc hba data structure. 13024 * 13025 * This routine is the completion routine for the rediscover FCF table mailbox 13026 * command. If the mailbox command returned failure, it will try to stop the 13027 * FCF rediscover wait timer. 13028 **/ 13029 void 13030 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 13031 { 13032 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 13033 uint32_t shdr_status, shdr_add_status; 13034 13035 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 13036 13037 shdr_status = bf_get(lpfc_mbox_hdr_status, 13038 &redisc_fcf->header.cfg_shdr.response); 13039 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 13040 &redisc_fcf->header.cfg_shdr.response); 13041 if (shdr_status || shdr_add_status) { 13042 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 13043 "2746 Requesting for FCF rediscovery failed " 13044 "status x%x add_status x%x\n", 13045 shdr_status, shdr_add_status); 13046 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 13047 spin_lock_irq(&phba->hbalock); 13048 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 13049 spin_unlock_irq(&phba->hbalock); 13050 /* 13051 * CVL event triggered FCF rediscover request failed, 13052 * last resort to re-try current registered FCF entry. 13053 */ 13054 lpfc_retry_pport_discovery(phba); 13055 } else { 13056 spin_lock_irq(&phba->hbalock); 13057 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 13058 spin_unlock_irq(&phba->hbalock); 13059 /* 13060 * DEAD FCF event triggered FCF rediscover request 13061 * failed, last resort to fail over as a link down 13062 * to FCF registration. 13063 */ 13064 lpfc_sli4_fcf_dead_failthrough(phba); 13065 } 13066 } else { 13067 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 13068 "2775 Start FCF rediscover quiescent timer\n"); 13069 /* 13070 * Start FCF rediscovery wait timer for pending FCF 13071 * before rescan FCF record table. 13072 */ 13073 lpfc_fcf_redisc_wait_start_timer(phba); 13074 } 13075 13076 mempool_free(mbox, phba->mbox_mem_pool); 13077 } 13078 13079 /** 13080 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 13081 * @phba: pointer to lpfc hba data structure. 13082 * 13083 * This routine is invoked to request for rediscovery of the entire FCF table 13084 * by the port. 13085 **/ 13086 int 13087 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 13088 { 13089 LPFC_MBOXQ_t *mbox; 13090 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 13091 int rc, length; 13092 13093 /* Cancel retry delay timers to all vports before FCF rediscover */ 13094 lpfc_cancel_all_vport_retry_delay_timer(phba); 13095 13096 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13097 if (!mbox) { 13098 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13099 "2745 Failed to allocate mbox for " 13100 "requesting FCF rediscover.\n"); 13101 return -ENOMEM; 13102 } 13103 13104 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 13105 sizeof(struct lpfc_sli4_cfg_mhdr)); 13106 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13107 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 13108 length, LPFC_SLI4_MBX_EMBED); 13109 13110 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 13111 /* Set count to 0 for invalidating the entire FCF database */ 13112 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 13113 13114 /* Issue the mailbox command asynchronously */ 13115 mbox->vport = phba->pport; 13116 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 13117 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 13118 13119 if (rc == MBX_NOT_FINISHED) { 13120 mempool_free(mbox, phba->mbox_mem_pool); 13121 return -EIO; 13122 } 13123 return 0; 13124 } 13125 13126 /** 13127 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 13128 * @phba: pointer to lpfc hba data structure. 13129 * 13130 * This function is the failover routine as a last resort to the FCF DEAD 13131 * event when driver failed to perform fast FCF failover. 13132 **/ 13133 void 13134 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 13135 { 13136 uint32_t link_state; 13137 13138 /* 13139 * Last resort as FCF DEAD event failover will treat this as 13140 * a link down, but save the link state because we don't want 13141 * it to be changed to Link Down unless it is already down. 13142 */ 13143 link_state = phba->link_state; 13144 lpfc_linkdown(phba); 13145 phba->link_state = link_state; 13146 13147 /* Unregister FCF if no devices connected to it */ 13148 lpfc_unregister_unused_fcf(phba); 13149 } 13150 13151 /** 13152 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 13153 * @phba: pointer to lpfc hba data structure. 13154 * 13155 * This function read region 23 and parse TLV for port status to 13156 * decide if the user disaled the port. If the TLV indicates the 13157 * port is disabled, the hba_flag is set accordingly. 13158 **/ 13159 void 13160 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 13161 { 13162 LPFC_MBOXQ_t *pmb = NULL; 13163 MAILBOX_t *mb; 13164 uint8_t *rgn23_data = NULL; 13165 uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset; 13166 int rc; 13167 13168 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13169 if (!pmb) { 13170 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13171 "2600 lpfc_sli_read_serdes_param failed to" 13172 " allocate mailbox memory\n"); 13173 goto out; 13174 } 13175 mb = &pmb->u.mb; 13176 13177 /* Get adapter Region 23 data */ 13178 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 13179 if (!rgn23_data) 13180 goto out; 13181 13182 do { 13183 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 13184 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 13185 13186 if (rc != MBX_SUCCESS) { 13187 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13188 "2601 lpfc_sli_read_link_ste failed to" 13189 " read config region 23 rc 0x%x Status 0x%x\n", 13190 rc, mb->mbxStatus); 13191 mb->un.varDmp.word_cnt = 0; 13192 } 13193 /* 13194 * dump mem may return a zero when finished or we got a 13195 * mailbox error, either way we are done. 13196 */ 13197 if (mb->un.varDmp.word_cnt == 0) 13198 break; 13199 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 13200 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 13201 13202 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 13203 rgn23_data + offset, 13204 mb->un.varDmp.word_cnt); 13205 offset += mb->un.varDmp.word_cnt; 13206 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 13207 13208 data_size = offset; 13209 offset = 0; 13210 13211 if (!data_size) 13212 goto out; 13213 13214 /* Check the region signature first */ 13215 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 13216 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13217 "2619 Config region 23 has bad signature\n"); 13218 goto out; 13219 } 13220 offset += 4; 13221 13222 /* Check the data structure version */ 13223 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 13224 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13225 "2620 Config region 23 has bad version\n"); 13226 goto out; 13227 } 13228 offset += 4; 13229 13230 /* Parse TLV entries in the region */ 13231 while (offset < data_size) { 13232 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 13233 break; 13234 /* 13235 * If the TLV is not driver specific TLV or driver id is 13236 * not linux driver id, skip the record. 13237 */ 13238 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 13239 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 13240 (rgn23_data[offset + 3] != 0)) { 13241 offset += rgn23_data[offset + 1] * 4 + 4; 13242 continue; 13243 } 13244 13245 /* Driver found a driver specific TLV in the config region */ 13246 sub_tlv_len = rgn23_data[offset + 1] * 4; 13247 offset += 4; 13248 tlv_offset = 0; 13249 13250 /* 13251 * Search for configured port state sub-TLV. 13252 */ 13253 while ((offset < data_size) && 13254 (tlv_offset < sub_tlv_len)) { 13255 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 13256 offset += 4; 13257 tlv_offset += 4; 13258 break; 13259 } 13260 if (rgn23_data[offset] != PORT_STE_TYPE) { 13261 offset += rgn23_data[offset + 1] * 4 + 4; 13262 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 13263 continue; 13264 } 13265 13266 /* This HBA contains PORT_STE configured */ 13267 if (!rgn23_data[offset + 2]) 13268 phba->hba_flag |= LINK_DISABLED; 13269 13270 goto out; 13271 } 13272 } 13273 out: 13274 if (pmb) 13275 mempool_free(pmb, phba->mbox_mem_pool); 13276 kfree(rgn23_data); 13277 return; 13278 } 13279 13280 /** 13281 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 13282 * @vport: pointer to vport data structure. 13283 * 13284 * This function iterate through the mailboxq and clean up all REG_LOGIN 13285 * and REG_VPI mailbox commands associated with the vport. This function 13286 * is called when driver want to restart discovery of the vport due to 13287 * a Clear Virtual Link event. 13288 **/ 13289 void 13290 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 13291 { 13292 struct lpfc_hba *phba = vport->phba; 13293 LPFC_MBOXQ_t *mb, *nextmb; 13294 struct lpfc_dmabuf *mp; 13295 struct lpfc_nodelist *ndlp; 13296 struct lpfc_nodelist *act_mbx_ndlp = NULL; 13297 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 13298 LIST_HEAD(mbox_cmd_list); 13299 uint8_t restart_loop; 13300 13301 /* Clean up internally queued mailbox commands with the vport */ 13302 spin_lock_irq(&phba->hbalock); 13303 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 13304 if (mb->vport != vport) 13305 continue; 13306 13307 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 13308 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 13309 continue; 13310 13311 list_del(&mb->list); 13312 list_add_tail(&mb->list, &mbox_cmd_list); 13313 } 13314 /* Clean up active mailbox command with the vport */ 13315 mb = phba->sli.mbox_active; 13316 if (mb && (mb->vport == vport)) { 13317 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 13318 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 13319 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13320 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 13321 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 13322 /* Put reference count for delayed processing */ 13323 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 13324 /* Unregister the RPI when mailbox complete */ 13325 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 13326 } 13327 } 13328 /* Cleanup any mailbox completions which are not yet processed */ 13329 do { 13330 restart_loop = 0; 13331 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 13332 /* 13333 * If this mailox is already processed or it is 13334 * for another vport ignore it. 13335 */ 13336 if ((mb->vport != vport) || 13337 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 13338 continue; 13339 13340 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 13341 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 13342 continue; 13343 13344 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13345 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 13346 ndlp = (struct lpfc_nodelist *)mb->context2; 13347 /* Unregister the RPI when mailbox complete */ 13348 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 13349 restart_loop = 1; 13350 spin_unlock_irq(&phba->hbalock); 13351 spin_lock(shost->host_lock); 13352 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 13353 spin_unlock(shost->host_lock); 13354 spin_lock_irq(&phba->hbalock); 13355 break; 13356 } 13357 } 13358 } while (restart_loop); 13359 13360 spin_unlock_irq(&phba->hbalock); 13361 13362 /* Release the cleaned-up mailbox commands */ 13363 while (!list_empty(&mbox_cmd_list)) { 13364 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 13365 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 13366 mp = (struct lpfc_dmabuf *) (mb->context1); 13367 if (mp) { 13368 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 13369 kfree(mp); 13370 } 13371 ndlp = (struct lpfc_nodelist *) mb->context2; 13372 mb->context2 = NULL; 13373 if (ndlp) { 13374 spin_lock(shost->host_lock); 13375 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 13376 spin_unlock(shost->host_lock); 13377 lpfc_nlp_put(ndlp); 13378 } 13379 } 13380 mempool_free(mb, phba->mbox_mem_pool); 13381 } 13382 13383 /* Release the ndlp with the cleaned-up active mailbox command */ 13384 if (act_mbx_ndlp) { 13385 spin_lock(shost->host_lock); 13386 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 13387 spin_unlock(shost->host_lock); 13388 lpfc_nlp_put(act_mbx_ndlp); 13389 } 13390 } 13391 13392 /** 13393 * lpfc_drain_txq - Drain the txq 13394 * @phba: Pointer to HBA context object. 13395 * 13396 * This function attempt to submit IOCBs on the txq 13397 * to the adapter. For SLI4 adapters, the txq contains 13398 * ELS IOCBs that have been deferred because the there 13399 * are no SGLs. This congestion can occur with large 13400 * vport counts during node discovery. 13401 **/ 13402 13403 uint32_t 13404 lpfc_drain_txq(struct lpfc_hba *phba) 13405 { 13406 LIST_HEAD(completions); 13407 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 13408 struct lpfc_iocbq *piocbq = 0; 13409 unsigned long iflags = 0; 13410 char *fail_msg = NULL; 13411 struct lpfc_sglq *sglq; 13412 union lpfc_wqe wqe; 13413 13414 spin_lock_irqsave(&phba->hbalock, iflags); 13415 if (pring->txq_cnt > pring->txq_max) 13416 pring->txq_max = pring->txq_cnt; 13417 13418 spin_unlock_irqrestore(&phba->hbalock, iflags); 13419 13420 while (pring->txq_cnt) { 13421 spin_lock_irqsave(&phba->hbalock, iflags); 13422 13423 piocbq = lpfc_sli_ringtx_get(phba, pring); 13424 sglq = __lpfc_sli_get_sglq(phba, piocbq); 13425 if (!sglq) { 13426 __lpfc_sli_ringtx_put(phba, pring, piocbq); 13427 spin_unlock_irqrestore(&phba->hbalock, iflags); 13428 break; 13429 } else { 13430 if (!piocbq) { 13431 /* The txq_cnt out of sync. This should 13432 * never happen 13433 */ 13434 sglq = __lpfc_clear_active_sglq(phba, 13435 sglq->sli4_xritag); 13436 spin_unlock_irqrestore(&phba->hbalock, iflags); 13437 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13438 "2823 txq empty and txq_cnt is %d\n ", 13439 pring->txq_cnt); 13440 break; 13441 } 13442 } 13443 13444 /* The xri and iocb resources secured, 13445 * attempt to issue request 13446 */ 13447 piocbq->sli4_xritag = sglq->sli4_xritag; 13448 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 13449 fail_msg = "to convert bpl to sgl"; 13450 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 13451 fail_msg = "to convert iocb to wqe"; 13452 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 13453 fail_msg = " - Wq is full"; 13454 else 13455 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 13456 13457 if (fail_msg) { 13458 /* Failed means we can't issue and need to cancel */ 13459 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13460 "2822 IOCB failed %s iotag 0x%x " 13461 "xri 0x%x\n", 13462 fail_msg, 13463 piocbq->iotag, piocbq->sli4_xritag); 13464 list_add_tail(&piocbq->list, &completions); 13465 } 13466 spin_unlock_irqrestore(&phba->hbalock, iflags); 13467 } 13468 13469 /* Cancel all the IOCBs that cannot be issued */ 13470 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 13471 IOERR_SLI_ABORTED); 13472 13473 return pring->txq_cnt; 13474 } 13475