1 2 /******************************************************************* 3 * This file is part of the Emulex Linux Device Driver for * 4 * Fibre Channel Host Bus Adapters. * 5 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * 6 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 7 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 8 * EMULEX and SLI are trademarks of Emulex. * 9 * www.broadcom.com * 10 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 11 * * 12 * This program is free software; you can redistribute it and/or * 13 * modify it under the terms of version 2 of the GNU General * 14 * Public License as published by the Free Software Foundation. * 15 * This program is distributed in the hope that it will be useful. * 16 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 17 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 18 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 19 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 20 * TO BE LEGALLY INVALID. See the GNU General Public License for * 21 * more details, a copy of which can be found in the file COPYING * 22 * included with this package. * 23 *******************************************************************/ 24 25 #include <linux/blkdev.h> 26 #include <linux/pci.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <linux/slab.h> 30 #include <linux/lockdep.h> 31 32 #include <scsi/scsi.h> 33 #include <scsi/scsi_cmnd.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/scsi_host.h> 36 #include <scsi/scsi_transport_fc.h> 37 #include <scsi/fc/fc_fs.h> 38 #include <linux/aer.h> 39 40 #include <linux/nvme-fc-driver.h> 41 42 #include "lpfc_hw4.h" 43 #include "lpfc_hw.h" 44 #include "lpfc_sli.h" 45 #include "lpfc_sli4.h" 46 #include "lpfc_nl.h" 47 #include "lpfc_disc.h" 48 #include "lpfc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc_nvme.h" 51 #include "lpfc_nvmet.h" 52 #include "lpfc_crtn.h" 53 #include "lpfc_logmsg.h" 54 #include "lpfc_compat.h" 55 #include "lpfc_debugfs.h" 56 #include "lpfc_vport.h" 57 #include "lpfc_version.h" 58 59 /* There are only four IOCB completion types. */ 60 typedef enum _lpfc_iocb_type { 61 LPFC_UNKNOWN_IOCB, 62 LPFC_UNSOL_IOCB, 63 LPFC_SOL_IOCB, 64 LPFC_ABORT_IOCB 65 } lpfc_iocb_type; 66 67 68 /* Provide function prototypes local to this module. */ 69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 70 uint32_t); 71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 72 uint8_t *, uint32_t *); 73 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 74 struct lpfc_iocbq *); 75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 76 struct hbq_dmabuf *); 77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 78 struct hbq_dmabuf *dmabuf); 79 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, 80 struct lpfc_cqe *); 81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 82 int); 83 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *, 84 uint32_t); 85 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 86 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 87 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, 88 struct lpfc_sli_ring *pring, 89 struct lpfc_iocbq *cmdiocb); 90 91 static IOCB_t * 92 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 93 { 94 return &iocbq->iocb; 95 } 96 97 /** 98 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 99 * @q: The Work Queue to operate on. 100 * @wqe: The work Queue Entry to put on the Work queue. 101 * 102 * This routine will copy the contents of @wqe to the next available entry on 103 * the @q. This function will then ring the Work Queue Doorbell to signal the 104 * HBA to start processing the Work Queue Entry. This function returns 0 if 105 * successful. If no entries are available on @q then this function will return 106 * -ENOMEM. 107 * The caller is expected to hold the hbalock when calling this routine. 108 **/ 109 static uint32_t 110 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 111 { 112 union lpfc_wqe *temp_wqe; 113 struct lpfc_register doorbell; 114 uint32_t host_index; 115 uint32_t idx; 116 117 /* sanity check on queue memory */ 118 if (unlikely(!q)) 119 return -ENOMEM; 120 temp_wqe = q->qe[q->host_index].wqe; 121 122 /* If the host has not yet processed the next entry then we are done */ 123 idx = ((q->host_index + 1) % q->entry_count); 124 if (idx == q->hba_index) { 125 q->WQ_overflow++; 126 return -ENOMEM; 127 } 128 q->WQ_posted++; 129 /* set consumption flag every once in a while */ 130 if (!((q->host_index + 1) % q->entry_repost)) 131 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 132 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 133 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 134 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 135 /* ensure WQE bcopy flushed before doorbell write */ 136 wmb(); 137 138 /* Update the host index before invoking device */ 139 host_index = q->host_index; 140 141 q->host_index = idx; 142 143 /* Ring Doorbell */ 144 doorbell.word0 = 0; 145 if (q->db_format == LPFC_DB_LIST_FORMAT) { 146 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 147 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index); 148 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 149 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 150 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 151 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 152 } else { 153 return -EINVAL; 154 } 155 writel(doorbell.word0, q->db_regaddr); 156 157 return 0; 158 } 159 160 /** 161 * lpfc_sli4_wq_release - Updates internal hba index for WQ 162 * @q: The Work Queue to operate on. 163 * @index: The index to advance the hba index to. 164 * 165 * This routine will update the HBA index of a queue to reflect consumption of 166 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 167 * an entry the host calls this function to update the queue's internal 168 * pointers. This routine returns the number of entries that were consumed by 169 * the HBA. 170 **/ 171 static uint32_t 172 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 173 { 174 uint32_t released = 0; 175 176 /* sanity check on queue memory */ 177 if (unlikely(!q)) 178 return 0; 179 180 if (q->hba_index == index) 181 return 0; 182 do { 183 q->hba_index = ((q->hba_index + 1) % q->entry_count); 184 released++; 185 } while (q->hba_index != index); 186 return released; 187 } 188 189 /** 190 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 191 * @q: The Mailbox Queue to operate on. 192 * @wqe: The Mailbox Queue Entry to put on the Work queue. 193 * 194 * This routine will copy the contents of @mqe to the next available entry on 195 * the @q. This function will then ring the Work Queue Doorbell to signal the 196 * HBA to start processing the Work Queue Entry. This function returns 0 if 197 * successful. If no entries are available on @q then this function will return 198 * -ENOMEM. 199 * The caller is expected to hold the hbalock when calling this routine. 200 **/ 201 static uint32_t 202 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 203 { 204 struct lpfc_mqe *temp_mqe; 205 struct lpfc_register doorbell; 206 207 /* sanity check on queue memory */ 208 if (unlikely(!q)) 209 return -ENOMEM; 210 temp_mqe = q->qe[q->host_index].mqe; 211 212 /* If the host has not yet processed the next entry then we are done */ 213 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 214 return -ENOMEM; 215 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 216 /* Save off the mailbox pointer for completion */ 217 q->phba->mbox = (MAILBOX_t *)temp_mqe; 218 219 /* Update the host index before invoking device */ 220 q->host_index = ((q->host_index + 1) % q->entry_count); 221 222 /* Ring Doorbell */ 223 doorbell.word0 = 0; 224 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 225 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 226 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 227 return 0; 228 } 229 230 /** 231 * lpfc_sli4_mq_release - Updates internal hba index for MQ 232 * @q: The Mailbox Queue to operate on. 233 * 234 * This routine will update the HBA index of a queue to reflect consumption of 235 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 236 * an entry the host calls this function to update the queue's internal 237 * pointers. This routine returns the number of entries that were consumed by 238 * the HBA. 239 **/ 240 static uint32_t 241 lpfc_sli4_mq_release(struct lpfc_queue *q) 242 { 243 /* sanity check on queue memory */ 244 if (unlikely(!q)) 245 return 0; 246 247 /* Clear the mailbox pointer for completion */ 248 q->phba->mbox = NULL; 249 q->hba_index = ((q->hba_index + 1) % q->entry_count); 250 return 1; 251 } 252 253 /** 254 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 255 * @q: The Event Queue to get the first valid EQE from 256 * 257 * This routine will get the first valid Event Queue Entry from @q, update 258 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 259 * the Queue (no more work to do), or the Queue is full of EQEs that have been 260 * processed, but not popped back to the HBA then this routine will return NULL. 261 **/ 262 static struct lpfc_eqe * 263 lpfc_sli4_eq_get(struct lpfc_queue *q) 264 { 265 struct lpfc_eqe *eqe; 266 uint32_t idx; 267 268 /* sanity check on queue memory */ 269 if (unlikely(!q)) 270 return NULL; 271 eqe = q->qe[q->hba_index].eqe; 272 273 /* If the next EQE is not valid then we are done */ 274 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 275 return NULL; 276 /* If the host has not yet processed the next entry then we are done */ 277 idx = ((q->hba_index + 1) % q->entry_count); 278 if (idx == q->host_index) 279 return NULL; 280 281 q->hba_index = idx; 282 283 /* 284 * insert barrier for instruction interlock : data from the hardware 285 * must have the valid bit checked before it can be copied and acted 286 * upon. Speculative instructions were allowing a bcopy at the start 287 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately 288 * after our return, to copy data before the valid bit check above 289 * was done. As such, some of the copied data was stale. The barrier 290 * ensures the check is before any data is copied. 291 */ 292 mb(); 293 return eqe; 294 } 295 296 /** 297 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 298 * @q: The Event Queue to disable interrupts 299 * 300 **/ 301 static inline void 302 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 303 { 304 struct lpfc_register doorbell; 305 306 doorbell.word0 = 0; 307 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 308 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 309 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 310 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 311 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 312 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 313 } 314 315 /** 316 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 317 * @q: The Event Queue that the host has completed processing for. 318 * @arm: Indicates whether the host wants to arms this CQ. 319 * 320 * This routine will mark all Event Queue Entries on @q, from the last 321 * known completed entry to the last entry that was processed, as completed 322 * by clearing the valid bit for each completion queue entry. Then it will 323 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 324 * The internal host index in the @q will be updated by this routine to indicate 325 * that the host has finished processing the entries. The @arm parameter 326 * indicates that the queue should be rearmed when ringing the doorbell. 327 * 328 * This function will return the number of EQEs that were popped. 329 **/ 330 uint32_t 331 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 332 { 333 uint32_t released = 0; 334 struct lpfc_eqe *temp_eqe; 335 struct lpfc_register doorbell; 336 337 /* sanity check on queue memory */ 338 if (unlikely(!q)) 339 return 0; 340 341 /* while there are valid entries */ 342 while (q->hba_index != q->host_index) { 343 temp_eqe = q->qe[q->host_index].eqe; 344 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 345 released++; 346 q->host_index = ((q->host_index + 1) % q->entry_count); 347 } 348 if (unlikely(released == 0 && !arm)) 349 return 0; 350 351 /* ring doorbell for number popped */ 352 doorbell.word0 = 0; 353 if (arm) { 354 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 355 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 356 } 357 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 358 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 359 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 360 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 361 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 362 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 363 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 364 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 365 readl(q->phba->sli4_hba.EQCQDBregaddr); 366 return released; 367 } 368 369 /** 370 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 371 * @q: The Completion Queue to get the first valid CQE from 372 * 373 * This routine will get the first valid Completion Queue Entry from @q, update 374 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 375 * the Queue (no more work to do), or the Queue is full of CQEs that have been 376 * processed, but not popped back to the HBA then this routine will return NULL. 377 **/ 378 static struct lpfc_cqe * 379 lpfc_sli4_cq_get(struct lpfc_queue *q) 380 { 381 struct lpfc_cqe *cqe; 382 uint32_t idx; 383 384 /* sanity check on queue memory */ 385 if (unlikely(!q)) 386 return NULL; 387 388 /* If the next CQE is not valid then we are done */ 389 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 390 return NULL; 391 /* If the host has not yet processed the next entry then we are done */ 392 idx = ((q->hba_index + 1) % q->entry_count); 393 if (idx == q->host_index) 394 return NULL; 395 396 cqe = q->qe[q->hba_index].cqe; 397 q->hba_index = idx; 398 399 /* 400 * insert barrier for instruction interlock : data from the hardware 401 * must have the valid bit checked before it can be copied and acted 402 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative 403 * instructions allowing action on content before valid bit checked, 404 * add barrier here as well. May not be needed as "content" is a 405 * single 32-bit entity here (vs multi word structure for cq's). 406 */ 407 mb(); 408 return cqe; 409 } 410 411 /** 412 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 413 * @q: The Completion Queue that the host has completed processing for. 414 * @arm: Indicates whether the host wants to arms this CQ. 415 * 416 * This routine will mark all Completion queue entries on @q, from the last 417 * known completed entry to the last entry that was processed, as completed 418 * by clearing the valid bit for each completion queue entry. Then it will 419 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 420 * The internal host index in the @q will be updated by this routine to indicate 421 * that the host has finished processing the entries. The @arm parameter 422 * indicates that the queue should be rearmed when ringing the doorbell. 423 * 424 * This function will return the number of CQEs that were released. 425 **/ 426 uint32_t 427 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 428 { 429 uint32_t released = 0; 430 struct lpfc_cqe *temp_qe; 431 struct lpfc_register doorbell; 432 433 /* sanity check on queue memory */ 434 if (unlikely(!q)) 435 return 0; 436 /* while there are valid entries */ 437 while (q->hba_index != q->host_index) { 438 temp_qe = q->qe[q->host_index].cqe; 439 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 440 released++; 441 q->host_index = ((q->host_index + 1) % q->entry_count); 442 } 443 if (unlikely(released == 0 && !arm)) 444 return 0; 445 446 /* ring doorbell for number popped */ 447 doorbell.word0 = 0; 448 if (arm) 449 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 450 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 451 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 452 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 453 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 454 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 455 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 456 return released; 457 } 458 459 /** 460 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 461 * @q: The Header Receive Queue to operate on. 462 * @wqe: The Receive Queue Entry to put on the Receive queue. 463 * 464 * This routine will copy the contents of @wqe to the next available entry on 465 * the @q. This function will then ring the Receive Queue Doorbell to signal the 466 * HBA to start processing the Receive Queue Entry. This function returns the 467 * index that the rqe was copied to if successful. If no entries are available 468 * on @q then this function will return -ENOMEM. 469 * The caller is expected to hold the hbalock when calling this routine. 470 **/ 471 int 472 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 473 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 474 { 475 struct lpfc_rqe *temp_hrqe; 476 struct lpfc_rqe *temp_drqe; 477 struct lpfc_register doorbell; 478 int put_index; 479 480 /* sanity check on queue memory */ 481 if (unlikely(!hq) || unlikely(!dq)) 482 return -ENOMEM; 483 put_index = hq->host_index; 484 temp_hrqe = hq->qe[put_index].rqe; 485 temp_drqe = dq->qe[dq->host_index].rqe; 486 487 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 488 return -EINVAL; 489 if (put_index != dq->host_index) 490 return -EINVAL; 491 /* If the host has not yet processed the next entry then we are done */ 492 if (((put_index + 1) % hq->entry_count) == hq->hba_index) 493 return -EBUSY; 494 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 495 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 496 497 /* Update the host index to point to the next slot */ 498 hq->host_index = ((put_index + 1) % hq->entry_count); 499 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 500 hq->RQ_buf_posted++; 501 502 /* Ring The Header Receive Queue Doorbell */ 503 if (!(hq->host_index % hq->entry_repost)) { 504 doorbell.word0 = 0; 505 if (hq->db_format == LPFC_DB_RING_FORMAT) { 506 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 507 hq->entry_repost); 508 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 509 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 510 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 511 hq->entry_repost); 512 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 513 hq->host_index); 514 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 515 } else { 516 return -EINVAL; 517 } 518 writel(doorbell.word0, hq->db_regaddr); 519 } 520 return put_index; 521 } 522 523 /** 524 * lpfc_sli4_rq_release - Updates internal hba index for RQ 525 * @q: The Header Receive Queue to operate on. 526 * 527 * This routine will update the HBA index of a queue to reflect consumption of 528 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 529 * consumed an entry the host calls this function to update the queue's 530 * internal pointers. This routine returns the number of entries that were 531 * consumed by the HBA. 532 **/ 533 static uint32_t 534 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 535 { 536 /* sanity check on queue memory */ 537 if (unlikely(!hq) || unlikely(!dq)) 538 return 0; 539 540 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 541 return 0; 542 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 543 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 544 return 1; 545 } 546 547 /** 548 * lpfc_cmd_iocb - Get next command iocb entry in the ring 549 * @phba: Pointer to HBA context object. 550 * @pring: Pointer to driver SLI ring object. 551 * 552 * This function returns pointer to next command iocb entry 553 * in the command ring. The caller must hold hbalock to prevent 554 * other threads consume the next command iocb. 555 * SLI-2/SLI-3 provide different sized iocbs. 556 **/ 557 static inline IOCB_t * 558 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 559 { 560 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 561 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 562 } 563 564 /** 565 * lpfc_resp_iocb - Get next response iocb entry in the ring 566 * @phba: Pointer to HBA context object. 567 * @pring: Pointer to driver SLI ring object. 568 * 569 * This function returns pointer to next response iocb entry 570 * in the response ring. The caller must hold hbalock to make sure 571 * that no other thread consume the next response iocb. 572 * SLI-2/SLI-3 provide different sized iocbs. 573 **/ 574 static inline IOCB_t * 575 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 576 { 577 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 578 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 579 } 580 581 /** 582 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 583 * @phba: Pointer to HBA context object. 584 * 585 * This function is called with hbalock held. This function 586 * allocates a new driver iocb object from the iocb pool. If the 587 * allocation is successful, it returns pointer to the newly 588 * allocated iocb object else it returns NULL. 589 **/ 590 struct lpfc_iocbq * 591 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 592 { 593 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 594 struct lpfc_iocbq * iocbq = NULL; 595 596 lockdep_assert_held(&phba->hbalock); 597 598 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 599 if (iocbq) 600 phba->iocb_cnt++; 601 if (phba->iocb_cnt > phba->iocb_max) 602 phba->iocb_max = phba->iocb_cnt; 603 return iocbq; 604 } 605 606 /** 607 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 608 * @phba: Pointer to HBA context object. 609 * @xritag: XRI value. 610 * 611 * This function clears the sglq pointer from the array of acive 612 * sglq's. The xritag that is passed in is used to index into the 613 * array. Before the xritag can be used it needs to be adjusted 614 * by subtracting the xribase. 615 * 616 * Returns sglq ponter = success, NULL = Failure. 617 **/ 618 struct lpfc_sglq * 619 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 620 { 621 struct lpfc_sglq *sglq; 622 623 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 624 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 625 return sglq; 626 } 627 628 /** 629 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 630 * @phba: Pointer to HBA context object. 631 * @xritag: XRI value. 632 * 633 * This function returns the sglq pointer from the array of acive 634 * sglq's. The xritag that is passed in is used to index into the 635 * array. Before the xritag can be used it needs to be adjusted 636 * by subtracting the xribase. 637 * 638 * Returns sglq ponter = success, NULL = Failure. 639 **/ 640 struct lpfc_sglq * 641 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 642 { 643 struct lpfc_sglq *sglq; 644 645 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 646 return sglq; 647 } 648 649 /** 650 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 651 * @phba: Pointer to HBA context object. 652 * @xritag: xri used in this exchange. 653 * @rrq: The RRQ to be cleared. 654 * 655 **/ 656 void 657 lpfc_clr_rrq_active(struct lpfc_hba *phba, 658 uint16_t xritag, 659 struct lpfc_node_rrq *rrq) 660 { 661 struct lpfc_nodelist *ndlp = NULL; 662 663 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 664 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 665 666 /* The target DID could have been swapped (cable swap) 667 * we should use the ndlp from the findnode if it is 668 * available. 669 */ 670 if ((!ndlp) && rrq->ndlp) 671 ndlp = rrq->ndlp; 672 673 if (!ndlp) 674 goto out; 675 676 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 677 rrq->send_rrq = 0; 678 rrq->xritag = 0; 679 rrq->rrq_stop_time = 0; 680 } 681 out: 682 mempool_free(rrq, phba->rrq_pool); 683 } 684 685 /** 686 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 687 * @phba: Pointer to HBA context object. 688 * 689 * This function is called with hbalock held. This function 690 * Checks if stop_time (ratov from setting rrq active) has 691 * been reached, if it has and the send_rrq flag is set then 692 * it will call lpfc_send_rrq. If the send_rrq flag is not set 693 * then it will just call the routine to clear the rrq and 694 * free the rrq resource. 695 * The timer is set to the next rrq that is going to expire before 696 * leaving the routine. 697 * 698 **/ 699 void 700 lpfc_handle_rrq_active(struct lpfc_hba *phba) 701 { 702 struct lpfc_node_rrq *rrq; 703 struct lpfc_node_rrq *nextrrq; 704 unsigned long next_time; 705 unsigned long iflags; 706 LIST_HEAD(send_rrq); 707 708 spin_lock_irqsave(&phba->hbalock, iflags); 709 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 710 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 711 list_for_each_entry_safe(rrq, nextrrq, 712 &phba->active_rrq_list, list) { 713 if (time_after(jiffies, rrq->rrq_stop_time)) 714 list_move(&rrq->list, &send_rrq); 715 else if (time_before(rrq->rrq_stop_time, next_time)) 716 next_time = rrq->rrq_stop_time; 717 } 718 spin_unlock_irqrestore(&phba->hbalock, iflags); 719 if ((!list_empty(&phba->active_rrq_list)) && 720 (!(phba->pport->load_flag & FC_UNLOADING))) 721 mod_timer(&phba->rrq_tmr, next_time); 722 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 723 list_del(&rrq->list); 724 if (!rrq->send_rrq) 725 /* this call will free the rrq */ 726 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 727 else if (lpfc_send_rrq(phba, rrq)) { 728 /* if we send the rrq then the completion handler 729 * will clear the bit in the xribitmap. 730 */ 731 lpfc_clr_rrq_active(phba, rrq->xritag, 732 rrq); 733 } 734 } 735 } 736 737 /** 738 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 739 * @vport: Pointer to vport context object. 740 * @xri: The xri used in the exchange. 741 * @did: The targets DID for this exchange. 742 * 743 * returns NULL = rrq not found in the phba->active_rrq_list. 744 * rrq = rrq for this xri and target. 745 **/ 746 struct lpfc_node_rrq * 747 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 748 { 749 struct lpfc_hba *phba = vport->phba; 750 struct lpfc_node_rrq *rrq; 751 struct lpfc_node_rrq *nextrrq; 752 unsigned long iflags; 753 754 if (phba->sli_rev != LPFC_SLI_REV4) 755 return NULL; 756 spin_lock_irqsave(&phba->hbalock, iflags); 757 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 758 if (rrq->vport == vport && rrq->xritag == xri && 759 rrq->nlp_DID == did){ 760 list_del(&rrq->list); 761 spin_unlock_irqrestore(&phba->hbalock, iflags); 762 return rrq; 763 } 764 } 765 spin_unlock_irqrestore(&phba->hbalock, iflags); 766 return NULL; 767 } 768 769 /** 770 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 771 * @vport: Pointer to vport context object. 772 * @ndlp: Pointer to the lpfc_node_list structure. 773 * If ndlp is NULL Remove all active RRQs for this vport from the 774 * phba->active_rrq_list and clear the rrq. 775 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 776 **/ 777 void 778 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 779 780 { 781 struct lpfc_hba *phba = vport->phba; 782 struct lpfc_node_rrq *rrq; 783 struct lpfc_node_rrq *nextrrq; 784 unsigned long iflags; 785 LIST_HEAD(rrq_list); 786 787 if (phba->sli_rev != LPFC_SLI_REV4) 788 return; 789 if (!ndlp) { 790 lpfc_sli4_vport_delete_els_xri_aborted(vport); 791 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 792 } 793 spin_lock_irqsave(&phba->hbalock, iflags); 794 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 795 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 796 list_move(&rrq->list, &rrq_list); 797 spin_unlock_irqrestore(&phba->hbalock, iflags); 798 799 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 800 list_del(&rrq->list); 801 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 802 } 803 } 804 805 /** 806 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 807 * @phba: Pointer to HBA context object. 808 * @ndlp: Targets nodelist pointer for this exchange. 809 * @xritag the xri in the bitmap to test. 810 * 811 * This function is called with hbalock held. This function 812 * returns 0 = rrq not active for this xri 813 * 1 = rrq is valid for this xri. 814 **/ 815 int 816 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 817 uint16_t xritag) 818 { 819 lockdep_assert_held(&phba->hbalock); 820 if (!ndlp) 821 return 0; 822 if (!ndlp->active_rrqs_xri_bitmap) 823 return 0; 824 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 825 return 1; 826 else 827 return 0; 828 } 829 830 /** 831 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 832 * @phba: Pointer to HBA context object. 833 * @ndlp: nodelist pointer for this target. 834 * @xritag: xri used in this exchange. 835 * @rxid: Remote Exchange ID. 836 * @send_rrq: Flag used to determine if we should send rrq els cmd. 837 * 838 * This function takes the hbalock. 839 * The active bit is always set in the active rrq xri_bitmap even 840 * if there is no slot avaiable for the other rrq information. 841 * 842 * returns 0 rrq actived for this xri 843 * < 0 No memory or invalid ndlp. 844 **/ 845 int 846 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 847 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 848 { 849 unsigned long iflags; 850 struct lpfc_node_rrq *rrq; 851 int empty; 852 853 if (!ndlp) 854 return -EINVAL; 855 856 if (!phba->cfg_enable_rrq) 857 return -EINVAL; 858 859 spin_lock_irqsave(&phba->hbalock, iflags); 860 if (phba->pport->load_flag & FC_UNLOADING) { 861 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 862 goto out; 863 } 864 865 /* 866 * set the active bit even if there is no mem available. 867 */ 868 if (NLP_CHK_FREE_REQ(ndlp)) 869 goto out; 870 871 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 872 goto out; 873 874 if (!ndlp->active_rrqs_xri_bitmap) 875 goto out; 876 877 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 878 goto out; 879 880 spin_unlock_irqrestore(&phba->hbalock, iflags); 881 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 882 if (!rrq) { 883 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 884 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 885 " DID:0x%x Send:%d\n", 886 xritag, rxid, ndlp->nlp_DID, send_rrq); 887 return -EINVAL; 888 } 889 if (phba->cfg_enable_rrq == 1) 890 rrq->send_rrq = send_rrq; 891 else 892 rrq->send_rrq = 0; 893 rrq->xritag = xritag; 894 rrq->rrq_stop_time = jiffies + 895 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 896 rrq->ndlp = ndlp; 897 rrq->nlp_DID = ndlp->nlp_DID; 898 rrq->vport = ndlp->vport; 899 rrq->rxid = rxid; 900 spin_lock_irqsave(&phba->hbalock, iflags); 901 empty = list_empty(&phba->active_rrq_list); 902 list_add_tail(&rrq->list, &phba->active_rrq_list); 903 phba->hba_flag |= HBA_RRQ_ACTIVE; 904 if (empty) 905 lpfc_worker_wake_up(phba); 906 spin_unlock_irqrestore(&phba->hbalock, iflags); 907 return 0; 908 out: 909 spin_unlock_irqrestore(&phba->hbalock, iflags); 910 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 911 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 912 " DID:0x%x Send:%d\n", 913 xritag, rxid, ndlp->nlp_DID, send_rrq); 914 return -EINVAL; 915 } 916 917 /** 918 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool 919 * @phba: Pointer to HBA context object. 920 * @piocb: Pointer to the iocbq. 921 * 922 * This function is called with the ring lock held. This function 923 * gets a new driver sglq object from the sglq list. If the 924 * list is not empty then it is successful, it returns pointer to the newly 925 * allocated sglq object else it returns NULL. 926 **/ 927 static struct lpfc_sglq * 928 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 929 { 930 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; 931 struct lpfc_sglq *sglq = NULL; 932 struct lpfc_sglq *start_sglq = NULL; 933 struct lpfc_scsi_buf *lpfc_cmd; 934 struct lpfc_nodelist *ndlp; 935 int found = 0; 936 937 lockdep_assert_held(&phba->hbalock); 938 939 if (piocbq->iocb_flag & LPFC_IO_FCP) { 940 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; 941 ndlp = lpfc_cmd->rdata->pnode; 942 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 943 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { 944 ndlp = piocbq->context_un.ndlp; 945 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) { 946 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK) 947 ndlp = NULL; 948 else 949 ndlp = piocbq->context_un.ndlp; 950 } else { 951 ndlp = piocbq->context1; 952 } 953 954 spin_lock(&phba->sli4_hba.sgl_list_lock); 955 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); 956 start_sglq = sglq; 957 while (!found) { 958 if (!sglq) 959 break; 960 if (ndlp && ndlp->active_rrqs_xri_bitmap && 961 test_bit(sglq->sli4_lxritag, 962 ndlp->active_rrqs_xri_bitmap)) { 963 /* This xri has an rrq outstanding for this DID. 964 * put it back in the list and get another xri. 965 */ 966 list_add_tail(&sglq->list, lpfc_els_sgl_list); 967 sglq = NULL; 968 list_remove_head(lpfc_els_sgl_list, sglq, 969 struct lpfc_sglq, list); 970 if (sglq == start_sglq) { 971 sglq = NULL; 972 break; 973 } else 974 continue; 975 } 976 sglq->ndlp = ndlp; 977 found = 1; 978 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 979 sglq->state = SGL_ALLOCATED; 980 } 981 spin_unlock(&phba->sli4_hba.sgl_list_lock); 982 return sglq; 983 } 984 985 /** 986 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool 987 * @phba: Pointer to HBA context object. 988 * @piocb: Pointer to the iocbq. 989 * 990 * This function is called with the sgl_list lock held. This function 991 * gets a new driver sglq object from the sglq list. If the 992 * list is not empty then it is successful, it returns pointer to the newly 993 * allocated sglq object else it returns NULL. 994 **/ 995 struct lpfc_sglq * 996 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 997 { 998 struct list_head *lpfc_nvmet_sgl_list; 999 struct lpfc_sglq *sglq = NULL; 1000 1001 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; 1002 1003 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); 1004 1005 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); 1006 if (!sglq) 1007 return NULL; 1008 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1009 sglq->state = SGL_ALLOCATED; 1010 return sglq; 1011 } 1012 1013 /** 1014 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 1015 * @phba: Pointer to HBA context object. 1016 * 1017 * This function is called with no lock held. This function 1018 * allocates a new driver iocb object from the iocb pool. If the 1019 * allocation is successful, it returns pointer to the newly 1020 * allocated iocb object else it returns NULL. 1021 **/ 1022 struct lpfc_iocbq * 1023 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 1024 { 1025 struct lpfc_iocbq * iocbq = NULL; 1026 unsigned long iflags; 1027 1028 spin_lock_irqsave(&phba->hbalock, iflags); 1029 iocbq = __lpfc_sli_get_iocbq(phba); 1030 spin_unlock_irqrestore(&phba->hbalock, iflags); 1031 return iocbq; 1032 } 1033 1034 /** 1035 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 1036 * @phba: Pointer to HBA context object. 1037 * @iocbq: Pointer to driver iocb object. 1038 * 1039 * This function is called with hbalock held to release driver 1040 * iocb object to the iocb pool. The iotag in the iocb object 1041 * does not change for each use of the iocb object. This function 1042 * clears all other fields of the iocb object when it is freed. 1043 * The sqlq structure that holds the xritag and phys and virtual 1044 * mappings for the scatter gather list is retrieved from the 1045 * active array of sglq. The get of the sglq pointer also clears 1046 * the entry in the array. If the status of the IO indiactes that 1047 * this IO was aborted then the sglq entry it put on the 1048 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1049 * IO has good status or fails for any other reason then the sglq 1050 * entry is added to the free list (lpfc_els_sgl_list). 1051 **/ 1052 static void 1053 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1054 { 1055 struct lpfc_sglq *sglq; 1056 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1057 unsigned long iflag = 0; 1058 struct lpfc_sli_ring *pring; 1059 1060 lockdep_assert_held(&phba->hbalock); 1061 1062 if (iocbq->sli4_xritag == NO_XRI) 1063 sglq = NULL; 1064 else 1065 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1066 1067 1068 if (sglq) { 1069 if (iocbq->iocb_flag & LPFC_IO_NVMET) { 1070 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1071 iflag); 1072 sglq->state = SGL_FREED; 1073 sglq->ndlp = NULL; 1074 list_add_tail(&sglq->list, 1075 &phba->sli4_hba.lpfc_nvmet_sgl_list); 1076 spin_unlock_irqrestore( 1077 &phba->sli4_hba.sgl_list_lock, iflag); 1078 goto out; 1079 } 1080 1081 pring = phba->sli4_hba.els_wq->pring; 1082 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1083 (sglq->state != SGL_XRI_ABORTED)) { 1084 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1085 iflag); 1086 list_add(&sglq->list, 1087 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1088 spin_unlock_irqrestore( 1089 &phba->sli4_hba.sgl_list_lock, iflag); 1090 } else { 1091 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1092 iflag); 1093 sglq->state = SGL_FREED; 1094 sglq->ndlp = NULL; 1095 list_add_tail(&sglq->list, 1096 &phba->sli4_hba.lpfc_els_sgl_list); 1097 spin_unlock_irqrestore( 1098 &phba->sli4_hba.sgl_list_lock, iflag); 1099 1100 /* Check if TXQ queue needs to be serviced */ 1101 if (!list_empty(&pring->txq)) 1102 lpfc_worker_wake_up(phba); 1103 } 1104 } 1105 1106 out: 1107 /* 1108 * Clean all volatile data fields, preserve iotag and node struct. 1109 */ 1110 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1111 iocbq->sli4_lxritag = NO_XRI; 1112 iocbq->sli4_xritag = NO_XRI; 1113 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | 1114 LPFC_IO_NVME_LS); 1115 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1116 } 1117 1118 1119 /** 1120 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1121 * @phba: Pointer to HBA context object. 1122 * @iocbq: Pointer to driver iocb object. 1123 * 1124 * This function is called with hbalock held to release driver 1125 * iocb object to the iocb pool. The iotag in the iocb object 1126 * does not change for each use of the iocb object. This function 1127 * clears all other fields of the iocb object when it is freed. 1128 **/ 1129 static void 1130 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1131 { 1132 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1133 1134 lockdep_assert_held(&phba->hbalock); 1135 1136 /* 1137 * Clean all volatile data fields, preserve iotag and node struct. 1138 */ 1139 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1140 iocbq->sli4_xritag = NO_XRI; 1141 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1142 } 1143 1144 /** 1145 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1146 * @phba: Pointer to HBA context object. 1147 * @iocbq: Pointer to driver iocb object. 1148 * 1149 * This function is called with hbalock held to release driver 1150 * iocb object to the iocb pool. The iotag in the iocb object 1151 * does not change for each use of the iocb object. This function 1152 * clears all other fields of the iocb object when it is freed. 1153 **/ 1154 static void 1155 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1156 { 1157 lockdep_assert_held(&phba->hbalock); 1158 1159 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1160 phba->iocb_cnt--; 1161 } 1162 1163 /** 1164 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1165 * @phba: Pointer to HBA context object. 1166 * @iocbq: Pointer to driver iocb object. 1167 * 1168 * This function is called with no lock held to release the iocb to 1169 * iocb pool. 1170 **/ 1171 void 1172 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1173 { 1174 unsigned long iflags; 1175 1176 /* 1177 * Clean all volatile data fields, preserve iotag and node struct. 1178 */ 1179 spin_lock_irqsave(&phba->hbalock, iflags); 1180 __lpfc_sli_release_iocbq(phba, iocbq); 1181 spin_unlock_irqrestore(&phba->hbalock, iflags); 1182 } 1183 1184 /** 1185 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1186 * @phba: Pointer to HBA context object. 1187 * @iocblist: List of IOCBs. 1188 * @ulpstatus: ULP status in IOCB command field. 1189 * @ulpWord4: ULP word-4 in IOCB command field. 1190 * 1191 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1192 * on the list by invoking the complete callback function associated with the 1193 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1194 * fields. 1195 **/ 1196 void 1197 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1198 uint32_t ulpstatus, uint32_t ulpWord4) 1199 { 1200 struct lpfc_iocbq *piocb; 1201 1202 while (!list_empty(iocblist)) { 1203 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1204 if (!piocb->iocb_cmpl) 1205 lpfc_sli_release_iocbq(phba, piocb); 1206 else { 1207 piocb->iocb.ulpStatus = ulpstatus; 1208 piocb->iocb.un.ulpWord[4] = ulpWord4; 1209 (piocb->iocb_cmpl) (phba, piocb, piocb); 1210 } 1211 } 1212 return; 1213 } 1214 1215 /** 1216 * lpfc_sli_iocb_cmd_type - Get the iocb type 1217 * @iocb_cmnd: iocb command code. 1218 * 1219 * This function is called by ring event handler function to get the iocb type. 1220 * This function translates the iocb command to an iocb command type used to 1221 * decide the final disposition of each completed IOCB. 1222 * The function returns 1223 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1224 * LPFC_SOL_IOCB if it is a solicited iocb completion 1225 * LPFC_ABORT_IOCB if it is an abort iocb 1226 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1227 * 1228 * The caller is not required to hold any lock. 1229 **/ 1230 static lpfc_iocb_type 1231 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1232 { 1233 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1234 1235 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1236 return 0; 1237 1238 switch (iocb_cmnd) { 1239 case CMD_XMIT_SEQUENCE_CR: 1240 case CMD_XMIT_SEQUENCE_CX: 1241 case CMD_XMIT_BCAST_CN: 1242 case CMD_XMIT_BCAST_CX: 1243 case CMD_ELS_REQUEST_CR: 1244 case CMD_ELS_REQUEST_CX: 1245 case CMD_CREATE_XRI_CR: 1246 case CMD_CREATE_XRI_CX: 1247 case CMD_GET_RPI_CN: 1248 case CMD_XMIT_ELS_RSP_CX: 1249 case CMD_GET_RPI_CR: 1250 case CMD_FCP_IWRITE_CR: 1251 case CMD_FCP_IWRITE_CX: 1252 case CMD_FCP_IREAD_CR: 1253 case CMD_FCP_IREAD_CX: 1254 case CMD_FCP_ICMND_CR: 1255 case CMD_FCP_ICMND_CX: 1256 case CMD_FCP_TSEND_CX: 1257 case CMD_FCP_TRSP_CX: 1258 case CMD_FCP_TRECEIVE_CX: 1259 case CMD_FCP_AUTO_TRSP_CX: 1260 case CMD_ADAPTER_MSG: 1261 case CMD_ADAPTER_DUMP: 1262 case CMD_XMIT_SEQUENCE64_CR: 1263 case CMD_XMIT_SEQUENCE64_CX: 1264 case CMD_XMIT_BCAST64_CN: 1265 case CMD_XMIT_BCAST64_CX: 1266 case CMD_ELS_REQUEST64_CR: 1267 case CMD_ELS_REQUEST64_CX: 1268 case CMD_FCP_IWRITE64_CR: 1269 case CMD_FCP_IWRITE64_CX: 1270 case CMD_FCP_IREAD64_CR: 1271 case CMD_FCP_IREAD64_CX: 1272 case CMD_FCP_ICMND64_CR: 1273 case CMD_FCP_ICMND64_CX: 1274 case CMD_FCP_TSEND64_CX: 1275 case CMD_FCP_TRSP64_CX: 1276 case CMD_FCP_TRECEIVE64_CX: 1277 case CMD_GEN_REQUEST64_CR: 1278 case CMD_GEN_REQUEST64_CX: 1279 case CMD_XMIT_ELS_RSP64_CX: 1280 case DSSCMD_IWRITE64_CR: 1281 case DSSCMD_IWRITE64_CX: 1282 case DSSCMD_IREAD64_CR: 1283 case DSSCMD_IREAD64_CX: 1284 type = LPFC_SOL_IOCB; 1285 break; 1286 case CMD_ABORT_XRI_CN: 1287 case CMD_ABORT_XRI_CX: 1288 case CMD_CLOSE_XRI_CN: 1289 case CMD_CLOSE_XRI_CX: 1290 case CMD_XRI_ABORTED_CX: 1291 case CMD_ABORT_MXRI64_CN: 1292 case CMD_XMIT_BLS_RSP64_CX: 1293 type = LPFC_ABORT_IOCB; 1294 break; 1295 case CMD_RCV_SEQUENCE_CX: 1296 case CMD_RCV_ELS_REQ_CX: 1297 case CMD_RCV_SEQUENCE64_CX: 1298 case CMD_RCV_ELS_REQ64_CX: 1299 case CMD_ASYNC_STATUS: 1300 case CMD_IOCB_RCV_SEQ64_CX: 1301 case CMD_IOCB_RCV_ELS64_CX: 1302 case CMD_IOCB_RCV_CONT64_CX: 1303 case CMD_IOCB_RET_XRI64_CX: 1304 type = LPFC_UNSOL_IOCB; 1305 break; 1306 case CMD_IOCB_XMIT_MSEQ64_CR: 1307 case CMD_IOCB_XMIT_MSEQ64_CX: 1308 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1309 case CMD_IOCB_RCV_ELS_LIST64_CX: 1310 case CMD_IOCB_CLOSE_EXTENDED_CN: 1311 case CMD_IOCB_ABORT_EXTENDED_CN: 1312 case CMD_IOCB_RET_HBQE64_CN: 1313 case CMD_IOCB_FCP_IBIDIR64_CR: 1314 case CMD_IOCB_FCP_IBIDIR64_CX: 1315 case CMD_IOCB_FCP_ITASKMGT64_CX: 1316 case CMD_IOCB_LOGENTRY_CN: 1317 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1318 printk("%s - Unhandled SLI-3 Command x%x\n", 1319 __func__, iocb_cmnd); 1320 type = LPFC_UNKNOWN_IOCB; 1321 break; 1322 default: 1323 type = LPFC_UNKNOWN_IOCB; 1324 break; 1325 } 1326 1327 return type; 1328 } 1329 1330 /** 1331 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1332 * @phba: Pointer to HBA context object. 1333 * 1334 * This function is called from SLI initialization code 1335 * to configure every ring of the HBA's SLI interface. The 1336 * caller is not required to hold any lock. This function issues 1337 * a config_ring mailbox command for each ring. 1338 * This function returns zero if successful else returns a negative 1339 * error code. 1340 **/ 1341 static int 1342 lpfc_sli_ring_map(struct lpfc_hba *phba) 1343 { 1344 struct lpfc_sli *psli = &phba->sli; 1345 LPFC_MBOXQ_t *pmb; 1346 MAILBOX_t *pmbox; 1347 int i, rc, ret = 0; 1348 1349 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1350 if (!pmb) 1351 return -ENOMEM; 1352 pmbox = &pmb->u.mb; 1353 phba->link_state = LPFC_INIT_MBX_CMDS; 1354 for (i = 0; i < psli->num_rings; i++) { 1355 lpfc_config_ring(phba, i, pmb); 1356 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1357 if (rc != MBX_SUCCESS) { 1358 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1359 "0446 Adapter failed to init (%d), " 1360 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1361 "ring %d\n", 1362 rc, pmbox->mbxCommand, 1363 pmbox->mbxStatus, i); 1364 phba->link_state = LPFC_HBA_ERROR; 1365 ret = -ENXIO; 1366 break; 1367 } 1368 } 1369 mempool_free(pmb, phba->mbox_mem_pool); 1370 return ret; 1371 } 1372 1373 /** 1374 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1375 * @phba: Pointer to HBA context object. 1376 * @pring: Pointer to driver SLI ring object. 1377 * @piocb: Pointer to the driver iocb object. 1378 * 1379 * This function is called with hbalock held. The function adds the 1380 * new iocb to txcmplq of the given ring. This function always returns 1381 * 0. If this function is called for ELS ring, this function checks if 1382 * there is a vport associated with the ELS command. This function also 1383 * starts els_tmofunc timer if this is an ELS command. 1384 **/ 1385 static int 1386 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1387 struct lpfc_iocbq *piocb) 1388 { 1389 lockdep_assert_held(&phba->hbalock); 1390 1391 BUG_ON(!piocb); 1392 1393 list_add_tail(&piocb->list, &pring->txcmplq); 1394 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1395 1396 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1397 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1398 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1399 BUG_ON(!piocb->vport); 1400 if (!(piocb->vport->load_flag & FC_UNLOADING)) 1401 mod_timer(&piocb->vport->els_tmofunc, 1402 jiffies + 1403 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1404 } 1405 1406 return 0; 1407 } 1408 1409 /** 1410 * lpfc_sli_ringtx_get - Get first element of the txq 1411 * @phba: Pointer to HBA context object. 1412 * @pring: Pointer to driver SLI ring object. 1413 * 1414 * This function is called with hbalock held to get next 1415 * iocb in txq of the given ring. If there is any iocb in 1416 * the txq, the function returns first iocb in the list after 1417 * removing the iocb from the list, else it returns NULL. 1418 **/ 1419 struct lpfc_iocbq * 1420 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1421 { 1422 struct lpfc_iocbq *cmd_iocb; 1423 1424 lockdep_assert_held(&phba->hbalock); 1425 1426 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1427 return cmd_iocb; 1428 } 1429 1430 /** 1431 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1432 * @phba: Pointer to HBA context object. 1433 * @pring: Pointer to driver SLI ring object. 1434 * 1435 * This function is called with hbalock held and the caller must post the 1436 * iocb without releasing the lock. If the caller releases the lock, 1437 * iocb slot returned by the function is not guaranteed to be available. 1438 * The function returns pointer to the next available iocb slot if there 1439 * is available slot in the ring, else it returns NULL. 1440 * If the get index of the ring is ahead of the put index, the function 1441 * will post an error attention event to the worker thread to take the 1442 * HBA to offline state. 1443 **/ 1444 static IOCB_t * 1445 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1446 { 1447 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1448 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 1449 1450 lockdep_assert_held(&phba->hbalock); 1451 1452 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 1453 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 1454 pring->sli.sli3.next_cmdidx = 0; 1455 1456 if (unlikely(pring->sli.sli3.local_getidx == 1457 pring->sli.sli3.next_cmdidx)) { 1458 1459 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1460 1461 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1462 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1463 "0315 Ring %d issue: portCmdGet %d " 1464 "is bigger than cmd ring %d\n", 1465 pring->ringno, 1466 pring->sli.sli3.local_getidx, 1467 max_cmd_idx); 1468 1469 phba->link_state = LPFC_HBA_ERROR; 1470 /* 1471 * All error attention handlers are posted to 1472 * worker thread 1473 */ 1474 phba->work_ha |= HA_ERATT; 1475 phba->work_hs = HS_FFER3; 1476 1477 lpfc_worker_wake_up(phba); 1478 1479 return NULL; 1480 } 1481 1482 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 1483 return NULL; 1484 } 1485 1486 return lpfc_cmd_iocb(phba, pring); 1487 } 1488 1489 /** 1490 * lpfc_sli_next_iotag - Get an iotag for the iocb 1491 * @phba: Pointer to HBA context object. 1492 * @iocbq: Pointer to driver iocb object. 1493 * 1494 * This function gets an iotag for the iocb. If there is no unused iotag and 1495 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1496 * array and assigns a new iotag. 1497 * The function returns the allocated iotag if successful, else returns zero. 1498 * Zero is not a valid iotag. 1499 * The caller is not required to hold any lock. 1500 **/ 1501 uint16_t 1502 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1503 { 1504 struct lpfc_iocbq **new_arr; 1505 struct lpfc_iocbq **old_arr; 1506 size_t new_len; 1507 struct lpfc_sli *psli = &phba->sli; 1508 uint16_t iotag; 1509 1510 spin_lock_irq(&phba->hbalock); 1511 iotag = psli->last_iotag; 1512 if(++iotag < psli->iocbq_lookup_len) { 1513 psli->last_iotag = iotag; 1514 psli->iocbq_lookup[iotag] = iocbq; 1515 spin_unlock_irq(&phba->hbalock); 1516 iocbq->iotag = iotag; 1517 return iotag; 1518 } else if (psli->iocbq_lookup_len < (0xffff 1519 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1520 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1521 spin_unlock_irq(&phba->hbalock); 1522 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 1523 GFP_KERNEL); 1524 if (new_arr) { 1525 spin_lock_irq(&phba->hbalock); 1526 old_arr = psli->iocbq_lookup; 1527 if (new_len <= psli->iocbq_lookup_len) { 1528 /* highly unprobable case */ 1529 kfree(new_arr); 1530 iotag = psli->last_iotag; 1531 if(++iotag < psli->iocbq_lookup_len) { 1532 psli->last_iotag = iotag; 1533 psli->iocbq_lookup[iotag] = iocbq; 1534 spin_unlock_irq(&phba->hbalock); 1535 iocbq->iotag = iotag; 1536 return iotag; 1537 } 1538 spin_unlock_irq(&phba->hbalock); 1539 return 0; 1540 } 1541 if (psli->iocbq_lookup) 1542 memcpy(new_arr, old_arr, 1543 ((psli->last_iotag + 1) * 1544 sizeof (struct lpfc_iocbq *))); 1545 psli->iocbq_lookup = new_arr; 1546 psli->iocbq_lookup_len = new_len; 1547 psli->last_iotag = iotag; 1548 psli->iocbq_lookup[iotag] = iocbq; 1549 spin_unlock_irq(&phba->hbalock); 1550 iocbq->iotag = iotag; 1551 kfree(old_arr); 1552 return iotag; 1553 } 1554 } else 1555 spin_unlock_irq(&phba->hbalock); 1556 1557 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1558 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1559 psli->last_iotag); 1560 1561 return 0; 1562 } 1563 1564 /** 1565 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1566 * @phba: Pointer to HBA context object. 1567 * @pring: Pointer to driver SLI ring object. 1568 * @iocb: Pointer to iocb slot in the ring. 1569 * @nextiocb: Pointer to driver iocb object which need to be 1570 * posted to firmware. 1571 * 1572 * This function is called with hbalock held to post a new iocb to 1573 * the firmware. This function copies the new iocb to ring iocb slot and 1574 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1575 * a completion call back for this iocb else the function will free the 1576 * iocb object. 1577 **/ 1578 static void 1579 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1580 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1581 { 1582 lockdep_assert_held(&phba->hbalock); 1583 /* 1584 * Set up an iotag 1585 */ 1586 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1587 1588 1589 if (pring->ringno == LPFC_ELS_RING) { 1590 lpfc_debugfs_slow_ring_trc(phba, 1591 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1592 *(((uint32_t *) &nextiocb->iocb) + 4), 1593 *(((uint32_t *) &nextiocb->iocb) + 6), 1594 *(((uint32_t *) &nextiocb->iocb) + 7)); 1595 } 1596 1597 /* 1598 * Issue iocb command to adapter 1599 */ 1600 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1601 wmb(); 1602 pring->stats.iocb_cmd++; 1603 1604 /* 1605 * If there is no completion routine to call, we can release the 1606 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1607 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1608 */ 1609 if (nextiocb->iocb_cmpl) 1610 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1611 else 1612 __lpfc_sli_release_iocbq(phba, nextiocb); 1613 1614 /* 1615 * Let the HBA know what IOCB slot will be the next one the 1616 * driver will put a command into. 1617 */ 1618 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 1619 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1620 } 1621 1622 /** 1623 * lpfc_sli_update_full_ring - Update the chip attention register 1624 * @phba: Pointer to HBA context object. 1625 * @pring: Pointer to driver SLI ring object. 1626 * 1627 * The caller is not required to hold any lock for calling this function. 1628 * This function updates the chip attention bits for the ring to inform firmware 1629 * that there are pending work to be done for this ring and requests an 1630 * interrupt when there is space available in the ring. This function is 1631 * called when the driver is unable to post more iocbs to the ring due 1632 * to unavailability of space in the ring. 1633 **/ 1634 static void 1635 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1636 { 1637 int ringno = pring->ringno; 1638 1639 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1640 1641 wmb(); 1642 1643 /* 1644 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1645 * The HBA will tell us when an IOCB entry is available. 1646 */ 1647 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1648 readl(phba->CAregaddr); /* flush */ 1649 1650 pring->stats.iocb_cmd_full++; 1651 } 1652 1653 /** 1654 * lpfc_sli_update_ring - Update chip attention register 1655 * @phba: Pointer to HBA context object. 1656 * @pring: Pointer to driver SLI ring object. 1657 * 1658 * This function updates the chip attention register bit for the 1659 * given ring to inform HBA that there is more work to be done 1660 * in this ring. The caller is not required to hold any lock. 1661 **/ 1662 static void 1663 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1664 { 1665 int ringno = pring->ringno; 1666 1667 /* 1668 * Tell the HBA that there is work to do in this ring. 1669 */ 1670 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1671 wmb(); 1672 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1673 readl(phba->CAregaddr); /* flush */ 1674 } 1675 } 1676 1677 /** 1678 * lpfc_sli_resume_iocb - Process iocbs in the txq 1679 * @phba: Pointer to HBA context object. 1680 * @pring: Pointer to driver SLI ring object. 1681 * 1682 * This function is called with hbalock held to post pending iocbs 1683 * in the txq to the firmware. This function is called when driver 1684 * detects space available in the ring. 1685 **/ 1686 static void 1687 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1688 { 1689 IOCB_t *iocb; 1690 struct lpfc_iocbq *nextiocb; 1691 1692 lockdep_assert_held(&phba->hbalock); 1693 1694 /* 1695 * Check to see if: 1696 * (a) there is anything on the txq to send 1697 * (b) link is up 1698 * (c) link attention events can be processed (fcp ring only) 1699 * (d) IOCB processing is not blocked by the outstanding mbox command. 1700 */ 1701 1702 if (lpfc_is_link_up(phba) && 1703 (!list_empty(&pring->txq)) && 1704 (pring->ringno != LPFC_FCP_RING || 1705 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1706 1707 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1708 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1709 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1710 1711 if (iocb) 1712 lpfc_sli_update_ring(phba, pring); 1713 else 1714 lpfc_sli_update_full_ring(phba, pring); 1715 } 1716 1717 return; 1718 } 1719 1720 /** 1721 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1722 * @phba: Pointer to HBA context object. 1723 * @hbqno: HBQ number. 1724 * 1725 * This function is called with hbalock held to get the next 1726 * available slot for the given HBQ. If there is free slot 1727 * available for the HBQ it will return pointer to the next available 1728 * HBQ entry else it will return NULL. 1729 **/ 1730 static struct lpfc_hbq_entry * 1731 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1732 { 1733 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1734 1735 lockdep_assert_held(&phba->hbalock); 1736 1737 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1738 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1739 hbqp->next_hbqPutIdx = 0; 1740 1741 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1742 uint32_t raw_index = phba->hbq_get[hbqno]; 1743 uint32_t getidx = le32_to_cpu(raw_index); 1744 1745 hbqp->local_hbqGetIdx = getidx; 1746 1747 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1748 lpfc_printf_log(phba, KERN_ERR, 1749 LOG_SLI | LOG_VPORT, 1750 "1802 HBQ %d: local_hbqGetIdx " 1751 "%u is > than hbqp->entry_count %u\n", 1752 hbqno, hbqp->local_hbqGetIdx, 1753 hbqp->entry_count); 1754 1755 phba->link_state = LPFC_HBA_ERROR; 1756 return NULL; 1757 } 1758 1759 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1760 return NULL; 1761 } 1762 1763 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1764 hbqp->hbqPutIdx; 1765 } 1766 1767 /** 1768 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1769 * @phba: Pointer to HBA context object. 1770 * 1771 * This function is called with no lock held to free all the 1772 * hbq buffers while uninitializing the SLI interface. It also 1773 * frees the HBQ buffers returned by the firmware but not yet 1774 * processed by the upper layers. 1775 **/ 1776 void 1777 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1778 { 1779 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1780 struct hbq_dmabuf *hbq_buf; 1781 unsigned long flags; 1782 int i, hbq_count; 1783 1784 hbq_count = lpfc_sli_hbq_count(); 1785 /* Return all memory used by all HBQs */ 1786 spin_lock_irqsave(&phba->hbalock, flags); 1787 for (i = 0; i < hbq_count; ++i) { 1788 list_for_each_entry_safe(dmabuf, next_dmabuf, 1789 &phba->hbqs[i].hbq_buffer_list, list) { 1790 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1791 list_del(&hbq_buf->dbuf.list); 1792 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1793 } 1794 phba->hbqs[i].buffer_count = 0; 1795 } 1796 1797 /* Mark the HBQs not in use */ 1798 phba->hbq_in_use = 0; 1799 spin_unlock_irqrestore(&phba->hbalock, flags); 1800 } 1801 1802 /** 1803 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1804 * @phba: Pointer to HBA context object. 1805 * @hbqno: HBQ number. 1806 * @hbq_buf: Pointer to HBQ buffer. 1807 * 1808 * This function is called with the hbalock held to post a 1809 * hbq buffer to the firmware. If the function finds an empty 1810 * slot in the HBQ, it will post the buffer. The function will return 1811 * pointer to the hbq entry if it successfully post the buffer 1812 * else it will return NULL. 1813 **/ 1814 static int 1815 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1816 struct hbq_dmabuf *hbq_buf) 1817 { 1818 lockdep_assert_held(&phba->hbalock); 1819 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 1820 } 1821 1822 /** 1823 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 1824 * @phba: Pointer to HBA context object. 1825 * @hbqno: HBQ number. 1826 * @hbq_buf: Pointer to HBQ buffer. 1827 * 1828 * This function is called with the hbalock held to post a hbq buffer to the 1829 * firmware. If the function finds an empty slot in the HBQ, it will post the 1830 * buffer and place it on the hbq_buffer_list. The function will return zero if 1831 * it successfully post the buffer else it will return an error. 1832 **/ 1833 static int 1834 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 1835 struct hbq_dmabuf *hbq_buf) 1836 { 1837 struct lpfc_hbq_entry *hbqe; 1838 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1839 1840 lockdep_assert_held(&phba->hbalock); 1841 /* Get next HBQ entry slot to use */ 1842 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 1843 if (hbqe) { 1844 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1845 1846 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1847 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1848 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; 1849 hbqe->bde.tus.f.bdeFlags = 0; 1850 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 1851 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 1852 /* Sync SLIM */ 1853 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 1854 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 1855 /* flush */ 1856 readl(phba->hbq_put + hbqno); 1857 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1858 return 0; 1859 } else 1860 return -ENOMEM; 1861 } 1862 1863 /** 1864 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 1865 * @phba: Pointer to HBA context object. 1866 * @hbqno: HBQ number. 1867 * @hbq_buf: Pointer to HBQ buffer. 1868 * 1869 * This function is called with the hbalock held to post an RQE to the SLI4 1870 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 1871 * the hbq_buffer_list and return zero, otherwise it will return an error. 1872 **/ 1873 static int 1874 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 1875 struct hbq_dmabuf *hbq_buf) 1876 { 1877 int rc; 1878 struct lpfc_rqe hrqe; 1879 struct lpfc_rqe drqe; 1880 struct lpfc_queue *hrq; 1881 struct lpfc_queue *drq; 1882 1883 if (hbqno != LPFC_ELS_HBQ) 1884 return 1; 1885 hrq = phba->sli4_hba.hdr_rq; 1886 drq = phba->sli4_hba.dat_rq; 1887 1888 lockdep_assert_held(&phba->hbalock); 1889 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 1890 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 1891 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 1892 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 1893 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 1894 if (rc < 0) 1895 return rc; 1896 hbq_buf->tag = (rc | (hbqno << 16)); 1897 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 1898 return 0; 1899 } 1900 1901 /* HBQ for ELS and CT traffic. */ 1902 static struct lpfc_hbq_init lpfc_els_hbq = { 1903 .rn = 1, 1904 .entry_count = 256, 1905 .mask_count = 0, 1906 .profile = 0, 1907 .ring_mask = (1 << LPFC_ELS_RING), 1908 .buffer_count = 0, 1909 .init_count = 40, 1910 .add_count = 40, 1911 }; 1912 1913 /* Array of HBQs */ 1914 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 1915 &lpfc_els_hbq, 1916 }; 1917 1918 /** 1919 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 1920 * @phba: Pointer to HBA context object. 1921 * @hbqno: HBQ number. 1922 * @count: Number of HBQ buffers to be posted. 1923 * 1924 * This function is called with no lock held to post more hbq buffers to the 1925 * given HBQ. The function returns the number of HBQ buffers successfully 1926 * posted. 1927 **/ 1928 static int 1929 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 1930 { 1931 uint32_t i, posted = 0; 1932 unsigned long flags; 1933 struct hbq_dmabuf *hbq_buffer; 1934 LIST_HEAD(hbq_buf_list); 1935 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 1936 return 0; 1937 1938 if ((phba->hbqs[hbqno].buffer_count + count) > 1939 lpfc_hbq_defs[hbqno]->entry_count) 1940 count = lpfc_hbq_defs[hbqno]->entry_count - 1941 phba->hbqs[hbqno].buffer_count; 1942 if (!count) 1943 return 0; 1944 /* Allocate HBQ entries */ 1945 for (i = 0; i < count; i++) { 1946 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 1947 if (!hbq_buffer) 1948 break; 1949 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 1950 } 1951 /* Check whether HBQ is still in use */ 1952 spin_lock_irqsave(&phba->hbalock, flags); 1953 if (!phba->hbq_in_use) 1954 goto err; 1955 while (!list_empty(&hbq_buf_list)) { 1956 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1957 dbuf.list); 1958 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1959 (hbqno << 16)); 1960 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1961 phba->hbqs[hbqno].buffer_count++; 1962 posted++; 1963 } else 1964 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1965 } 1966 spin_unlock_irqrestore(&phba->hbalock, flags); 1967 return posted; 1968 err: 1969 spin_unlock_irqrestore(&phba->hbalock, flags); 1970 while (!list_empty(&hbq_buf_list)) { 1971 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1972 dbuf.list); 1973 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1974 } 1975 return 0; 1976 } 1977 1978 /** 1979 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 1980 * @phba: Pointer to HBA context object. 1981 * @qno: HBQ number. 1982 * 1983 * This function posts more buffers to the HBQ. This function 1984 * is called with no lock held. The function returns the number of HBQ entries 1985 * successfully allocated. 1986 **/ 1987 int 1988 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1989 { 1990 if (phba->sli_rev == LPFC_SLI_REV4) 1991 return 0; 1992 else 1993 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1994 lpfc_hbq_defs[qno]->add_count); 1995 } 1996 1997 /** 1998 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 1999 * @phba: Pointer to HBA context object. 2000 * @qno: HBQ queue number. 2001 * 2002 * This function is called from SLI initialization code path with 2003 * no lock held to post initial HBQ buffers to firmware. The 2004 * function returns the number of HBQ entries successfully allocated. 2005 **/ 2006 static int 2007 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 2008 { 2009 if (phba->sli_rev == LPFC_SLI_REV4) 2010 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2011 lpfc_hbq_defs[qno]->entry_count); 2012 else 2013 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2014 lpfc_hbq_defs[qno]->init_count); 2015 } 2016 2017 /** 2018 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 2019 * @phba: Pointer to HBA context object. 2020 * @hbqno: HBQ number. 2021 * 2022 * This function removes the first hbq buffer on an hbq list and returns a 2023 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2024 **/ 2025 static struct hbq_dmabuf * 2026 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 2027 { 2028 struct lpfc_dmabuf *d_buf; 2029 2030 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 2031 if (!d_buf) 2032 return NULL; 2033 return container_of(d_buf, struct hbq_dmabuf, dbuf); 2034 } 2035 2036 /** 2037 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list 2038 * @phba: Pointer to HBA context object. 2039 * @hbqno: HBQ number. 2040 * 2041 * This function removes the first RQ buffer on an RQ buffer list and returns a 2042 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2043 **/ 2044 static struct rqb_dmabuf * 2045 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) 2046 { 2047 struct lpfc_dmabuf *h_buf; 2048 struct lpfc_rqb *rqbp; 2049 2050 rqbp = hrq->rqbp; 2051 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 2052 struct lpfc_dmabuf, list); 2053 if (!h_buf) 2054 return NULL; 2055 rqbp->buffer_count--; 2056 return container_of(h_buf, struct rqb_dmabuf, hbuf); 2057 } 2058 2059 /** 2060 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 2061 * @phba: Pointer to HBA context object. 2062 * @tag: Tag of the hbq buffer. 2063 * 2064 * This function searches for the hbq buffer associated with the given tag in 2065 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer 2066 * otherwise it returns NULL. 2067 **/ 2068 static struct hbq_dmabuf * 2069 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 2070 { 2071 struct lpfc_dmabuf *d_buf; 2072 struct hbq_dmabuf *hbq_buf; 2073 uint32_t hbqno; 2074 2075 hbqno = tag >> 16; 2076 if (hbqno >= LPFC_MAX_HBQS) 2077 return NULL; 2078 2079 spin_lock_irq(&phba->hbalock); 2080 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2081 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2082 if (hbq_buf->tag == tag) { 2083 spin_unlock_irq(&phba->hbalock); 2084 return hbq_buf; 2085 } 2086 } 2087 spin_unlock_irq(&phba->hbalock); 2088 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2089 "1803 Bad hbq tag. Data: x%x x%x\n", 2090 tag, phba->hbqs[tag >> 16].buffer_count); 2091 return NULL; 2092 } 2093 2094 /** 2095 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2096 * @phba: Pointer to HBA context object. 2097 * @hbq_buffer: Pointer to HBQ buffer. 2098 * 2099 * This function is called with hbalock. This function gives back 2100 * the hbq buffer to firmware. If the HBQ does not have space to 2101 * post the buffer, it will free the buffer. 2102 **/ 2103 void 2104 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2105 { 2106 uint32_t hbqno; 2107 2108 if (hbq_buffer) { 2109 hbqno = hbq_buffer->tag >> 16; 2110 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2111 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2112 } 2113 } 2114 2115 /** 2116 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2117 * @mbxCommand: mailbox command code. 2118 * 2119 * This function is called by the mailbox event handler function to verify 2120 * that the completed mailbox command is a legitimate mailbox command. If the 2121 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2122 * and the mailbox event handler will take the HBA offline. 2123 **/ 2124 static int 2125 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2126 { 2127 uint8_t ret; 2128 2129 switch (mbxCommand) { 2130 case MBX_LOAD_SM: 2131 case MBX_READ_NV: 2132 case MBX_WRITE_NV: 2133 case MBX_WRITE_VPARMS: 2134 case MBX_RUN_BIU_DIAG: 2135 case MBX_INIT_LINK: 2136 case MBX_DOWN_LINK: 2137 case MBX_CONFIG_LINK: 2138 case MBX_CONFIG_RING: 2139 case MBX_RESET_RING: 2140 case MBX_READ_CONFIG: 2141 case MBX_READ_RCONFIG: 2142 case MBX_READ_SPARM: 2143 case MBX_READ_STATUS: 2144 case MBX_READ_RPI: 2145 case MBX_READ_XRI: 2146 case MBX_READ_REV: 2147 case MBX_READ_LNK_STAT: 2148 case MBX_REG_LOGIN: 2149 case MBX_UNREG_LOGIN: 2150 case MBX_CLEAR_LA: 2151 case MBX_DUMP_MEMORY: 2152 case MBX_DUMP_CONTEXT: 2153 case MBX_RUN_DIAGS: 2154 case MBX_RESTART: 2155 case MBX_UPDATE_CFG: 2156 case MBX_DOWN_LOAD: 2157 case MBX_DEL_LD_ENTRY: 2158 case MBX_RUN_PROGRAM: 2159 case MBX_SET_MASK: 2160 case MBX_SET_VARIABLE: 2161 case MBX_UNREG_D_ID: 2162 case MBX_KILL_BOARD: 2163 case MBX_CONFIG_FARP: 2164 case MBX_BEACON: 2165 case MBX_LOAD_AREA: 2166 case MBX_RUN_BIU_DIAG64: 2167 case MBX_CONFIG_PORT: 2168 case MBX_READ_SPARM64: 2169 case MBX_READ_RPI64: 2170 case MBX_REG_LOGIN64: 2171 case MBX_READ_TOPOLOGY: 2172 case MBX_WRITE_WWN: 2173 case MBX_SET_DEBUG: 2174 case MBX_LOAD_EXP_ROM: 2175 case MBX_ASYNCEVT_ENABLE: 2176 case MBX_REG_VPI: 2177 case MBX_UNREG_VPI: 2178 case MBX_HEARTBEAT: 2179 case MBX_PORT_CAPABILITIES: 2180 case MBX_PORT_IOV_CONTROL: 2181 case MBX_SLI4_CONFIG: 2182 case MBX_SLI4_REQ_FTRS: 2183 case MBX_REG_FCFI: 2184 case MBX_UNREG_FCFI: 2185 case MBX_REG_VFI: 2186 case MBX_UNREG_VFI: 2187 case MBX_INIT_VPI: 2188 case MBX_INIT_VFI: 2189 case MBX_RESUME_RPI: 2190 case MBX_READ_EVENT_LOG_STATUS: 2191 case MBX_READ_EVENT_LOG: 2192 case MBX_SECURITY_MGMT: 2193 case MBX_AUTH_PORT: 2194 case MBX_ACCESS_VDATA: 2195 ret = mbxCommand; 2196 break; 2197 default: 2198 ret = MBX_SHUTDOWN; 2199 break; 2200 } 2201 return ret; 2202 } 2203 2204 /** 2205 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2206 * @phba: Pointer to HBA context object. 2207 * @pmboxq: Pointer to mailbox command. 2208 * 2209 * This is completion handler function for mailbox commands issued from 2210 * lpfc_sli_issue_mbox_wait function. This function is called by the 2211 * mailbox event handler function with no lock held. This function 2212 * will wake up thread waiting on the wait queue pointed by context1 2213 * of the mailbox. 2214 **/ 2215 void 2216 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2217 { 2218 wait_queue_head_t *pdone_q; 2219 unsigned long drvr_flag; 2220 2221 /* 2222 * If pdone_q is empty, the driver thread gave up waiting and 2223 * continued running. 2224 */ 2225 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2226 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2227 pdone_q = (wait_queue_head_t *) pmboxq->context1; 2228 if (pdone_q) 2229 wake_up_interruptible(pdone_q); 2230 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2231 return; 2232 } 2233 2234 2235 /** 2236 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2237 * @phba: Pointer to HBA context object. 2238 * @pmb: Pointer to mailbox object. 2239 * 2240 * This function is the default mailbox completion handler. It 2241 * frees the memory resources associated with the completed mailbox 2242 * command. If the completed command is a REG_LOGIN mailbox command, 2243 * this function will issue a UREG_LOGIN to re-claim the RPI. 2244 **/ 2245 void 2246 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2247 { 2248 struct lpfc_vport *vport = pmb->vport; 2249 struct lpfc_dmabuf *mp; 2250 struct lpfc_nodelist *ndlp; 2251 struct Scsi_Host *shost; 2252 uint16_t rpi, vpi; 2253 int rc; 2254 2255 mp = (struct lpfc_dmabuf *) (pmb->context1); 2256 2257 if (mp) { 2258 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2259 kfree(mp); 2260 } 2261 2262 /* 2263 * If a REG_LOGIN succeeded after node is destroyed or node 2264 * is in re-discovery driver need to cleanup the RPI. 2265 */ 2266 if (!(phba->pport->load_flag & FC_UNLOADING) && 2267 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2268 !pmb->u.mb.mbxStatus) { 2269 rpi = pmb->u.mb.un.varWords[0]; 2270 vpi = pmb->u.mb.un.varRegLogin.vpi; 2271 lpfc_unreg_login(phba, vpi, rpi, pmb); 2272 pmb->vport = vport; 2273 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2274 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2275 if (rc != MBX_NOT_FINISHED) 2276 return; 2277 } 2278 2279 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2280 !(phba->pport->load_flag & FC_UNLOADING) && 2281 !pmb->u.mb.mbxStatus) { 2282 shost = lpfc_shost_from_vport(vport); 2283 spin_lock_irq(shost->host_lock); 2284 vport->vpi_state |= LPFC_VPI_REGISTERED; 2285 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2286 spin_unlock_irq(shost->host_lock); 2287 } 2288 2289 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2290 ndlp = (struct lpfc_nodelist *)pmb->context2; 2291 lpfc_nlp_put(ndlp); 2292 pmb->context2 = NULL; 2293 } 2294 2295 /* Check security permission status on INIT_LINK mailbox command */ 2296 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2297 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2298 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2299 "2860 SLI authentication is required " 2300 "for INIT_LINK but has not done yet\n"); 2301 2302 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2303 lpfc_sli4_mbox_cmd_free(phba, pmb); 2304 else 2305 mempool_free(pmb, phba->mbox_mem_pool); 2306 } 2307 /** 2308 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler 2309 * @phba: Pointer to HBA context object. 2310 * @pmb: Pointer to mailbox object. 2311 * 2312 * This function is the unreg rpi mailbox completion handler. It 2313 * frees the memory resources associated with the completed mailbox 2314 * command. An additional refrenece is put on the ndlp to prevent 2315 * lpfc_nlp_release from freeing the rpi bit in the bitmask before 2316 * the unreg mailbox command completes, this routine puts the 2317 * reference back. 2318 * 2319 **/ 2320 void 2321 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2322 { 2323 struct lpfc_vport *vport = pmb->vport; 2324 struct lpfc_nodelist *ndlp; 2325 2326 ndlp = pmb->context1; 2327 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2328 if (phba->sli_rev == LPFC_SLI_REV4 && 2329 (bf_get(lpfc_sli_intf_if_type, 2330 &phba->sli4_hba.sli_intf) == 2331 LPFC_SLI_INTF_IF_TYPE_2)) { 2332 if (ndlp) { 2333 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 2334 "0010 UNREG_LOGIN vpi:%x " 2335 "rpi:%x DID:%x map:%x %p\n", 2336 vport->vpi, ndlp->nlp_rpi, 2337 ndlp->nlp_DID, 2338 ndlp->nlp_usg_map, ndlp); 2339 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2340 lpfc_nlp_put(ndlp); 2341 } 2342 } 2343 } 2344 2345 mempool_free(pmb, phba->mbox_mem_pool); 2346 } 2347 2348 /** 2349 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2350 * @phba: Pointer to HBA context object. 2351 * 2352 * This function is called with no lock held. This function processes all 2353 * the completed mailbox commands and gives it to upper layers. The interrupt 2354 * service routine processes mailbox completion interrupt and adds completed 2355 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2356 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2357 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2358 * function returns the mailbox commands to the upper layer by calling the 2359 * completion handler function of each mailbox. 2360 **/ 2361 int 2362 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2363 { 2364 MAILBOX_t *pmbox; 2365 LPFC_MBOXQ_t *pmb; 2366 int rc; 2367 LIST_HEAD(cmplq); 2368 2369 phba->sli.slistat.mbox_event++; 2370 2371 /* Get all completed mailboxe buffers into the cmplq */ 2372 spin_lock_irq(&phba->hbalock); 2373 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2374 spin_unlock_irq(&phba->hbalock); 2375 2376 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2377 do { 2378 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2379 if (pmb == NULL) 2380 break; 2381 2382 pmbox = &pmb->u.mb; 2383 2384 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2385 if (pmb->vport) { 2386 lpfc_debugfs_disc_trc(pmb->vport, 2387 LPFC_DISC_TRC_MBOX_VPORT, 2388 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2389 (uint32_t)pmbox->mbxCommand, 2390 pmbox->un.varWords[0], 2391 pmbox->un.varWords[1]); 2392 } 2393 else { 2394 lpfc_debugfs_disc_trc(phba->pport, 2395 LPFC_DISC_TRC_MBOX, 2396 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2397 (uint32_t)pmbox->mbxCommand, 2398 pmbox->un.varWords[0], 2399 pmbox->un.varWords[1]); 2400 } 2401 } 2402 2403 /* 2404 * It is a fatal error if unknown mbox command completion. 2405 */ 2406 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2407 MBX_SHUTDOWN) { 2408 /* Unknown mailbox command compl */ 2409 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2410 "(%d):0323 Unknown Mailbox command " 2411 "x%x (x%x/x%x) Cmpl\n", 2412 pmb->vport ? pmb->vport->vpi : 0, 2413 pmbox->mbxCommand, 2414 lpfc_sli_config_mbox_subsys_get(phba, 2415 pmb), 2416 lpfc_sli_config_mbox_opcode_get(phba, 2417 pmb)); 2418 phba->link_state = LPFC_HBA_ERROR; 2419 phba->work_hs = HS_FFER3; 2420 lpfc_handle_eratt(phba); 2421 continue; 2422 } 2423 2424 if (pmbox->mbxStatus) { 2425 phba->sli.slistat.mbox_stat_err++; 2426 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2427 /* Mbox cmd cmpl error - RETRYing */ 2428 lpfc_printf_log(phba, KERN_INFO, 2429 LOG_MBOX | LOG_SLI, 2430 "(%d):0305 Mbox cmd cmpl " 2431 "error - RETRYing Data: x%x " 2432 "(x%x/x%x) x%x x%x x%x\n", 2433 pmb->vport ? pmb->vport->vpi : 0, 2434 pmbox->mbxCommand, 2435 lpfc_sli_config_mbox_subsys_get(phba, 2436 pmb), 2437 lpfc_sli_config_mbox_opcode_get(phba, 2438 pmb), 2439 pmbox->mbxStatus, 2440 pmbox->un.varWords[0], 2441 pmb->vport->port_state); 2442 pmbox->mbxStatus = 0; 2443 pmbox->mbxOwner = OWN_HOST; 2444 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2445 if (rc != MBX_NOT_FINISHED) 2446 continue; 2447 } 2448 } 2449 2450 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2451 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2452 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2453 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2454 "x%x x%x x%x\n", 2455 pmb->vport ? pmb->vport->vpi : 0, 2456 pmbox->mbxCommand, 2457 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2458 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2459 pmb->mbox_cmpl, 2460 *((uint32_t *) pmbox), 2461 pmbox->un.varWords[0], 2462 pmbox->un.varWords[1], 2463 pmbox->un.varWords[2], 2464 pmbox->un.varWords[3], 2465 pmbox->un.varWords[4], 2466 pmbox->un.varWords[5], 2467 pmbox->un.varWords[6], 2468 pmbox->un.varWords[7], 2469 pmbox->un.varWords[8], 2470 pmbox->un.varWords[9], 2471 pmbox->un.varWords[10]); 2472 2473 if (pmb->mbox_cmpl) 2474 pmb->mbox_cmpl(phba,pmb); 2475 } while (1); 2476 return 0; 2477 } 2478 2479 /** 2480 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2481 * @phba: Pointer to HBA context object. 2482 * @pring: Pointer to driver SLI ring object. 2483 * @tag: buffer tag. 2484 * 2485 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2486 * is set in the tag the buffer is posted for a particular exchange, 2487 * the function will return the buffer without replacing the buffer. 2488 * If the buffer is for unsolicited ELS or CT traffic, this function 2489 * returns the buffer and also posts another buffer to the firmware. 2490 **/ 2491 static struct lpfc_dmabuf * 2492 lpfc_sli_get_buff(struct lpfc_hba *phba, 2493 struct lpfc_sli_ring *pring, 2494 uint32_t tag) 2495 { 2496 struct hbq_dmabuf *hbq_entry; 2497 2498 if (tag & QUE_BUFTAG_BIT) 2499 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2500 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2501 if (!hbq_entry) 2502 return NULL; 2503 return &hbq_entry->dbuf; 2504 } 2505 2506 /** 2507 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2508 * @phba: Pointer to HBA context object. 2509 * @pring: Pointer to driver SLI ring object. 2510 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2511 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2512 * @fch_type: the type for the first frame of the sequence. 2513 * 2514 * This function is called with no lock held. This function uses the r_ctl and 2515 * type of the received sequence to find the correct callback function to call 2516 * to process the sequence. 2517 **/ 2518 static int 2519 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2520 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2521 uint32_t fch_type) 2522 { 2523 int i; 2524 2525 switch (fch_type) { 2526 case FC_TYPE_NVME: 2527 lpfc_nvmet_unsol_ls_event(phba, pring, saveq); 2528 return 1; 2529 default: 2530 break; 2531 } 2532 2533 /* unSolicited Responses */ 2534 if (pring->prt[0].profile) { 2535 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2536 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2537 saveq); 2538 return 1; 2539 } 2540 /* We must search, based on rctl / type 2541 for the right routine */ 2542 for (i = 0; i < pring->num_mask; i++) { 2543 if ((pring->prt[i].rctl == fch_r_ctl) && 2544 (pring->prt[i].type == fch_type)) { 2545 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2546 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2547 (phba, pring, saveq); 2548 return 1; 2549 } 2550 } 2551 return 0; 2552 } 2553 2554 /** 2555 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2556 * @phba: Pointer to HBA context object. 2557 * @pring: Pointer to driver SLI ring object. 2558 * @saveq: Pointer to the unsolicited iocb. 2559 * 2560 * This function is called with no lock held by the ring event handler 2561 * when there is an unsolicited iocb posted to the response ring by the 2562 * firmware. This function gets the buffer associated with the iocbs 2563 * and calls the event handler for the ring. This function handles both 2564 * qring buffers and hbq buffers. 2565 * When the function returns 1 the caller can free the iocb object otherwise 2566 * upper layer functions will free the iocb objects. 2567 **/ 2568 static int 2569 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2570 struct lpfc_iocbq *saveq) 2571 { 2572 IOCB_t * irsp; 2573 WORD5 * w5p; 2574 uint32_t Rctl, Type; 2575 struct lpfc_iocbq *iocbq; 2576 struct lpfc_dmabuf *dmzbuf; 2577 2578 irsp = &(saveq->iocb); 2579 2580 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2581 if (pring->lpfc_sli_rcv_async_status) 2582 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2583 else 2584 lpfc_printf_log(phba, 2585 KERN_WARNING, 2586 LOG_SLI, 2587 "0316 Ring %d handler: unexpected " 2588 "ASYNC_STATUS iocb received evt_code " 2589 "0x%x\n", 2590 pring->ringno, 2591 irsp->un.asyncstat.evt_code); 2592 return 1; 2593 } 2594 2595 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2596 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2597 if (irsp->ulpBdeCount > 0) { 2598 dmzbuf = lpfc_sli_get_buff(phba, pring, 2599 irsp->un.ulpWord[3]); 2600 lpfc_in_buf_free(phba, dmzbuf); 2601 } 2602 2603 if (irsp->ulpBdeCount > 1) { 2604 dmzbuf = lpfc_sli_get_buff(phba, pring, 2605 irsp->unsli3.sli3Words[3]); 2606 lpfc_in_buf_free(phba, dmzbuf); 2607 } 2608 2609 if (irsp->ulpBdeCount > 2) { 2610 dmzbuf = lpfc_sli_get_buff(phba, pring, 2611 irsp->unsli3.sli3Words[7]); 2612 lpfc_in_buf_free(phba, dmzbuf); 2613 } 2614 2615 return 1; 2616 } 2617 2618 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2619 if (irsp->ulpBdeCount != 0) { 2620 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2621 irsp->un.ulpWord[3]); 2622 if (!saveq->context2) 2623 lpfc_printf_log(phba, 2624 KERN_ERR, 2625 LOG_SLI, 2626 "0341 Ring %d Cannot find buffer for " 2627 "an unsolicited iocb. tag 0x%x\n", 2628 pring->ringno, 2629 irsp->un.ulpWord[3]); 2630 } 2631 if (irsp->ulpBdeCount == 2) { 2632 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2633 irsp->unsli3.sli3Words[7]); 2634 if (!saveq->context3) 2635 lpfc_printf_log(phba, 2636 KERN_ERR, 2637 LOG_SLI, 2638 "0342 Ring %d Cannot find buffer for an" 2639 " unsolicited iocb. tag 0x%x\n", 2640 pring->ringno, 2641 irsp->unsli3.sli3Words[7]); 2642 } 2643 list_for_each_entry(iocbq, &saveq->list, list) { 2644 irsp = &(iocbq->iocb); 2645 if (irsp->ulpBdeCount != 0) { 2646 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2647 irsp->un.ulpWord[3]); 2648 if (!iocbq->context2) 2649 lpfc_printf_log(phba, 2650 KERN_ERR, 2651 LOG_SLI, 2652 "0343 Ring %d Cannot find " 2653 "buffer for an unsolicited iocb" 2654 ". tag 0x%x\n", pring->ringno, 2655 irsp->un.ulpWord[3]); 2656 } 2657 if (irsp->ulpBdeCount == 2) { 2658 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2659 irsp->unsli3.sli3Words[7]); 2660 if (!iocbq->context3) 2661 lpfc_printf_log(phba, 2662 KERN_ERR, 2663 LOG_SLI, 2664 "0344 Ring %d Cannot find " 2665 "buffer for an unsolicited " 2666 "iocb. tag 0x%x\n", 2667 pring->ringno, 2668 irsp->unsli3.sli3Words[7]); 2669 } 2670 } 2671 } 2672 if (irsp->ulpBdeCount != 0 && 2673 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2674 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2675 int found = 0; 2676 2677 /* search continue save q for same XRI */ 2678 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2679 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2680 saveq->iocb.unsli3.rcvsli3.ox_id) { 2681 list_add_tail(&saveq->list, &iocbq->list); 2682 found = 1; 2683 break; 2684 } 2685 } 2686 if (!found) 2687 list_add_tail(&saveq->clist, 2688 &pring->iocb_continue_saveq); 2689 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2690 list_del_init(&iocbq->clist); 2691 saveq = iocbq; 2692 irsp = &(saveq->iocb); 2693 } else 2694 return 0; 2695 } 2696 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2697 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2698 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2699 Rctl = FC_RCTL_ELS_REQ; 2700 Type = FC_TYPE_ELS; 2701 } else { 2702 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2703 Rctl = w5p->hcsw.Rctl; 2704 Type = w5p->hcsw.Type; 2705 2706 /* Firmware Workaround */ 2707 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2708 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2709 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2710 Rctl = FC_RCTL_ELS_REQ; 2711 Type = FC_TYPE_ELS; 2712 w5p->hcsw.Rctl = Rctl; 2713 w5p->hcsw.Type = Type; 2714 } 2715 } 2716 2717 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2718 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2719 "0313 Ring %d handler: unexpected Rctl x%x " 2720 "Type x%x received\n", 2721 pring->ringno, Rctl, Type); 2722 2723 return 1; 2724 } 2725 2726 /** 2727 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2728 * @phba: Pointer to HBA context object. 2729 * @pring: Pointer to driver SLI ring object. 2730 * @prspiocb: Pointer to response iocb object. 2731 * 2732 * This function looks up the iocb_lookup table to get the command iocb 2733 * corresponding to the given response iocb using the iotag of the 2734 * response iocb. This function is called with the hbalock held. 2735 * This function returns the command iocb object if it finds the command 2736 * iocb else returns NULL. 2737 **/ 2738 static struct lpfc_iocbq * 2739 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2740 struct lpfc_sli_ring *pring, 2741 struct lpfc_iocbq *prspiocb) 2742 { 2743 struct lpfc_iocbq *cmd_iocb = NULL; 2744 uint16_t iotag; 2745 lockdep_assert_held(&phba->hbalock); 2746 2747 iotag = prspiocb->iocb.ulpIoTag; 2748 2749 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2750 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2751 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2752 /* remove from txcmpl queue list */ 2753 list_del_init(&cmd_iocb->list); 2754 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2755 return cmd_iocb; 2756 } 2757 } 2758 2759 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2760 "0317 iotag x%x is out of " 2761 "range: max iotag x%x wd0 x%x\n", 2762 iotag, phba->sli.last_iotag, 2763 *(((uint32_t *) &prspiocb->iocb) + 7)); 2764 return NULL; 2765 } 2766 2767 /** 2768 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2769 * @phba: Pointer to HBA context object. 2770 * @pring: Pointer to driver SLI ring object. 2771 * @iotag: IOCB tag. 2772 * 2773 * This function looks up the iocb_lookup table to get the command iocb 2774 * corresponding to the given iotag. This function is called with the 2775 * hbalock held. 2776 * This function returns the command iocb object if it finds the command 2777 * iocb else returns NULL. 2778 **/ 2779 static struct lpfc_iocbq * 2780 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2781 struct lpfc_sli_ring *pring, uint16_t iotag) 2782 { 2783 struct lpfc_iocbq *cmd_iocb = NULL; 2784 2785 lockdep_assert_held(&phba->hbalock); 2786 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2787 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2788 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2789 /* remove from txcmpl queue list */ 2790 list_del_init(&cmd_iocb->list); 2791 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2792 return cmd_iocb; 2793 } 2794 } 2795 2796 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2797 "0372 iotag x%x lookup error: max iotag (x%x) " 2798 "iocb_flag x%x\n", 2799 iotag, phba->sli.last_iotag, 2800 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff); 2801 return NULL; 2802 } 2803 2804 /** 2805 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2806 * @phba: Pointer to HBA context object. 2807 * @pring: Pointer to driver SLI ring object. 2808 * @saveq: Pointer to the response iocb to be processed. 2809 * 2810 * This function is called by the ring event handler for non-fcp 2811 * rings when there is a new response iocb in the response ring. 2812 * The caller is not required to hold any locks. This function 2813 * gets the command iocb associated with the response iocb and 2814 * calls the completion handler for the command iocb. If there 2815 * is no completion handler, the function will free the resources 2816 * associated with command iocb. If the response iocb is for 2817 * an already aborted command iocb, the status of the completion 2818 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 2819 * This function always returns 1. 2820 **/ 2821 static int 2822 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2823 struct lpfc_iocbq *saveq) 2824 { 2825 struct lpfc_iocbq *cmdiocbp; 2826 int rc = 1; 2827 unsigned long iflag; 2828 2829 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 2830 spin_lock_irqsave(&phba->hbalock, iflag); 2831 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 2832 spin_unlock_irqrestore(&phba->hbalock, iflag); 2833 2834 if (cmdiocbp) { 2835 if (cmdiocbp->iocb_cmpl) { 2836 /* 2837 * If an ELS command failed send an event to mgmt 2838 * application. 2839 */ 2840 if (saveq->iocb.ulpStatus && 2841 (pring->ringno == LPFC_ELS_RING) && 2842 (cmdiocbp->iocb.ulpCommand == 2843 CMD_ELS_REQUEST64_CR)) 2844 lpfc_send_els_failure_event(phba, 2845 cmdiocbp, saveq); 2846 2847 /* 2848 * Post all ELS completions to the worker thread. 2849 * All other are passed to the completion callback. 2850 */ 2851 if (pring->ringno == LPFC_ELS_RING) { 2852 if ((phba->sli_rev < LPFC_SLI_REV4) && 2853 (cmdiocbp->iocb_flag & 2854 LPFC_DRIVER_ABORTED)) { 2855 spin_lock_irqsave(&phba->hbalock, 2856 iflag); 2857 cmdiocbp->iocb_flag &= 2858 ~LPFC_DRIVER_ABORTED; 2859 spin_unlock_irqrestore(&phba->hbalock, 2860 iflag); 2861 saveq->iocb.ulpStatus = 2862 IOSTAT_LOCAL_REJECT; 2863 saveq->iocb.un.ulpWord[4] = 2864 IOERR_SLI_ABORTED; 2865 2866 /* Firmware could still be in progress 2867 * of DMAing payload, so don't free data 2868 * buffer till after a hbeat. 2869 */ 2870 spin_lock_irqsave(&phba->hbalock, 2871 iflag); 2872 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2873 spin_unlock_irqrestore(&phba->hbalock, 2874 iflag); 2875 } 2876 if (phba->sli_rev == LPFC_SLI_REV4) { 2877 if (saveq->iocb_flag & 2878 LPFC_EXCHANGE_BUSY) { 2879 /* Set cmdiocb flag for the 2880 * exchange busy so sgl (xri) 2881 * will not be released until 2882 * the abort xri is received 2883 * from hba. 2884 */ 2885 spin_lock_irqsave( 2886 &phba->hbalock, iflag); 2887 cmdiocbp->iocb_flag |= 2888 LPFC_EXCHANGE_BUSY; 2889 spin_unlock_irqrestore( 2890 &phba->hbalock, iflag); 2891 } 2892 if (cmdiocbp->iocb_flag & 2893 LPFC_DRIVER_ABORTED) { 2894 /* 2895 * Clear LPFC_DRIVER_ABORTED 2896 * bit in case it was driver 2897 * initiated abort. 2898 */ 2899 spin_lock_irqsave( 2900 &phba->hbalock, iflag); 2901 cmdiocbp->iocb_flag &= 2902 ~LPFC_DRIVER_ABORTED; 2903 spin_unlock_irqrestore( 2904 &phba->hbalock, iflag); 2905 cmdiocbp->iocb.ulpStatus = 2906 IOSTAT_LOCAL_REJECT; 2907 cmdiocbp->iocb.un.ulpWord[4] = 2908 IOERR_ABORT_REQUESTED; 2909 /* 2910 * For SLI4, irsiocb contains 2911 * NO_XRI in sli_xritag, it 2912 * shall not affect releasing 2913 * sgl (xri) process. 2914 */ 2915 saveq->iocb.ulpStatus = 2916 IOSTAT_LOCAL_REJECT; 2917 saveq->iocb.un.ulpWord[4] = 2918 IOERR_SLI_ABORTED; 2919 spin_lock_irqsave( 2920 &phba->hbalock, iflag); 2921 saveq->iocb_flag |= 2922 LPFC_DELAY_MEM_FREE; 2923 spin_unlock_irqrestore( 2924 &phba->hbalock, iflag); 2925 } 2926 } 2927 } 2928 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2929 } else 2930 lpfc_sli_release_iocbq(phba, cmdiocbp); 2931 } else { 2932 /* 2933 * Unknown initiating command based on the response iotag. 2934 * This could be the case on the ELS ring because of 2935 * lpfc_els_abort(). 2936 */ 2937 if (pring->ringno != LPFC_ELS_RING) { 2938 /* 2939 * Ring <ringno> handler: unexpected completion IoTag 2940 * <IoTag> 2941 */ 2942 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2943 "0322 Ring %d handler: " 2944 "unexpected completion IoTag x%x " 2945 "Data: x%x x%x x%x x%x\n", 2946 pring->ringno, 2947 saveq->iocb.ulpIoTag, 2948 saveq->iocb.ulpStatus, 2949 saveq->iocb.un.ulpWord[4], 2950 saveq->iocb.ulpCommand, 2951 saveq->iocb.ulpContext); 2952 } 2953 } 2954 2955 return rc; 2956 } 2957 2958 /** 2959 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 2960 * @phba: Pointer to HBA context object. 2961 * @pring: Pointer to driver SLI ring object. 2962 * 2963 * This function is called from the iocb ring event handlers when 2964 * put pointer is ahead of the get pointer for a ring. This function signal 2965 * an error attention condition to the worker thread and the worker 2966 * thread will transition the HBA to offline state. 2967 **/ 2968 static void 2969 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2970 { 2971 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2972 /* 2973 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2974 * rsp ring <portRspMax> 2975 */ 2976 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2977 "0312 Ring %d handler: portRspPut %d " 2978 "is bigger than rsp ring %d\n", 2979 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2980 pring->sli.sli3.numRiocb); 2981 2982 phba->link_state = LPFC_HBA_ERROR; 2983 2984 /* 2985 * All error attention handlers are posted to 2986 * worker thread 2987 */ 2988 phba->work_ha |= HA_ERATT; 2989 phba->work_hs = HS_FFER3; 2990 2991 lpfc_worker_wake_up(phba); 2992 2993 return; 2994 } 2995 2996 /** 2997 * lpfc_poll_eratt - Error attention polling timer timeout handler 2998 * @ptr: Pointer to address of HBA context object. 2999 * 3000 * This function is invoked by the Error Attention polling timer when the 3001 * timer times out. It will check the SLI Error Attention register for 3002 * possible attention events. If so, it will post an Error Attention event 3003 * and wake up worker thread to process it. Otherwise, it will set up the 3004 * Error Attention polling timer for the next poll. 3005 **/ 3006 void lpfc_poll_eratt(unsigned long ptr) 3007 { 3008 struct lpfc_hba *phba; 3009 uint32_t eratt = 0; 3010 uint64_t sli_intr, cnt; 3011 3012 phba = (struct lpfc_hba *)ptr; 3013 3014 /* Here we will also keep track of interrupts per sec of the hba */ 3015 sli_intr = phba->sli.slistat.sli_intr; 3016 3017 if (phba->sli.slistat.sli_prev_intr > sli_intr) 3018 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 3019 sli_intr); 3020 else 3021 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 3022 3023 /* 64-bit integer division not supported on 32-bit x86 - use do_div */ 3024 do_div(cnt, phba->eratt_poll_interval); 3025 phba->sli.slistat.sli_ips = cnt; 3026 3027 phba->sli.slistat.sli_prev_intr = sli_intr; 3028 3029 /* Check chip HA register for error event */ 3030 eratt = lpfc_sli_check_eratt(phba); 3031 3032 if (eratt) 3033 /* Tell the worker thread there is work to do */ 3034 lpfc_worker_wake_up(phba); 3035 else 3036 /* Restart the timer for next eratt poll */ 3037 mod_timer(&phba->eratt_poll, 3038 jiffies + 3039 msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 3040 return; 3041 } 3042 3043 3044 /** 3045 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 3046 * @phba: Pointer to HBA context object. 3047 * @pring: Pointer to driver SLI ring object. 3048 * @mask: Host attention register mask for this ring. 3049 * 3050 * This function is called from the interrupt context when there is a ring 3051 * event for the fcp ring. The caller does not hold any lock. 3052 * The function processes each response iocb in the response ring until it 3053 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 3054 * LE bit set. The function will call the completion handler of the command iocb 3055 * if the response iocb indicates a completion for a command iocb or it is 3056 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 3057 * function if this is an unsolicited iocb. 3058 * This routine presumes LPFC_FCP_RING handling and doesn't bother 3059 * to check it explicitly. 3060 */ 3061 int 3062 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 3063 struct lpfc_sli_ring *pring, uint32_t mask) 3064 { 3065 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3066 IOCB_t *irsp = NULL; 3067 IOCB_t *entry = NULL; 3068 struct lpfc_iocbq *cmdiocbq = NULL; 3069 struct lpfc_iocbq rspiocbq; 3070 uint32_t status; 3071 uint32_t portRspPut, portRspMax; 3072 int rc = 1; 3073 lpfc_iocb_type type; 3074 unsigned long iflag; 3075 uint32_t rsp_cmpl = 0; 3076 3077 spin_lock_irqsave(&phba->hbalock, iflag); 3078 pring->stats.iocb_event++; 3079 3080 /* 3081 * The next available response entry should never exceed the maximum 3082 * entries. If it does, treat it as an adapter hardware error. 3083 */ 3084 portRspMax = pring->sli.sli3.numRiocb; 3085 portRspPut = le32_to_cpu(pgp->rspPutInx); 3086 if (unlikely(portRspPut >= portRspMax)) { 3087 lpfc_sli_rsp_pointers_error(phba, pring); 3088 spin_unlock_irqrestore(&phba->hbalock, iflag); 3089 return 1; 3090 } 3091 if (phba->fcp_ring_in_use) { 3092 spin_unlock_irqrestore(&phba->hbalock, iflag); 3093 return 1; 3094 } else 3095 phba->fcp_ring_in_use = 1; 3096 3097 rmb(); 3098 while (pring->sli.sli3.rspidx != portRspPut) { 3099 /* 3100 * Fetch an entry off the ring and copy it into a local data 3101 * structure. The copy involves a byte-swap since the 3102 * network byte order and pci byte orders are different. 3103 */ 3104 entry = lpfc_resp_iocb(phba, pring); 3105 phba->last_completion_time = jiffies; 3106 3107 if (++pring->sli.sli3.rspidx >= portRspMax) 3108 pring->sli.sli3.rspidx = 0; 3109 3110 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 3111 (uint32_t *) &rspiocbq.iocb, 3112 phba->iocb_rsp_size); 3113 INIT_LIST_HEAD(&(rspiocbq.list)); 3114 irsp = &rspiocbq.iocb; 3115 3116 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 3117 pring->stats.iocb_rsp++; 3118 rsp_cmpl++; 3119 3120 if (unlikely(irsp->ulpStatus)) { 3121 /* 3122 * If resource errors reported from HBA, reduce 3123 * queuedepths of the SCSI device. 3124 */ 3125 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3126 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3127 IOERR_NO_RESOURCES)) { 3128 spin_unlock_irqrestore(&phba->hbalock, iflag); 3129 phba->lpfc_rampdown_queue_depth(phba); 3130 spin_lock_irqsave(&phba->hbalock, iflag); 3131 } 3132 3133 /* Rsp ring <ringno> error: IOCB */ 3134 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3135 "0336 Rsp Ring %d error: IOCB Data: " 3136 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 3137 pring->ringno, 3138 irsp->un.ulpWord[0], 3139 irsp->un.ulpWord[1], 3140 irsp->un.ulpWord[2], 3141 irsp->un.ulpWord[3], 3142 irsp->un.ulpWord[4], 3143 irsp->un.ulpWord[5], 3144 *(uint32_t *)&irsp->un1, 3145 *((uint32_t *)&irsp->un1 + 1)); 3146 } 3147 3148 switch (type) { 3149 case LPFC_ABORT_IOCB: 3150 case LPFC_SOL_IOCB: 3151 /* 3152 * Idle exchange closed via ABTS from port. No iocb 3153 * resources need to be recovered. 3154 */ 3155 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 3156 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3157 "0333 IOCB cmd 0x%x" 3158 " processed. Skipping" 3159 " completion\n", 3160 irsp->ulpCommand); 3161 break; 3162 } 3163 3164 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 3165 &rspiocbq); 3166 if (unlikely(!cmdiocbq)) 3167 break; 3168 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 3169 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3170 if (cmdiocbq->iocb_cmpl) { 3171 spin_unlock_irqrestore(&phba->hbalock, iflag); 3172 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 3173 &rspiocbq); 3174 spin_lock_irqsave(&phba->hbalock, iflag); 3175 } 3176 break; 3177 case LPFC_UNSOL_IOCB: 3178 spin_unlock_irqrestore(&phba->hbalock, iflag); 3179 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 3180 spin_lock_irqsave(&phba->hbalock, iflag); 3181 break; 3182 default: 3183 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3184 char adaptermsg[LPFC_MAX_ADPTMSG]; 3185 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3186 memcpy(&adaptermsg[0], (uint8_t *) irsp, 3187 MAX_MSG_DATA); 3188 dev_warn(&((phba->pcidev)->dev), 3189 "lpfc%d: %s\n", 3190 phba->brd_no, adaptermsg); 3191 } else { 3192 /* Unknown IOCB command */ 3193 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3194 "0334 Unknown IOCB command " 3195 "Data: x%x, x%x x%x x%x x%x\n", 3196 type, irsp->ulpCommand, 3197 irsp->ulpStatus, 3198 irsp->ulpIoTag, 3199 irsp->ulpContext); 3200 } 3201 break; 3202 } 3203 3204 /* 3205 * The response IOCB has been processed. Update the ring 3206 * pointer in SLIM. If the port response put pointer has not 3207 * been updated, sync the pgp->rspPutInx and fetch the new port 3208 * response put pointer. 3209 */ 3210 writel(pring->sli.sli3.rspidx, 3211 &phba->host_gp[pring->ringno].rspGetInx); 3212 3213 if (pring->sli.sli3.rspidx == portRspPut) 3214 portRspPut = le32_to_cpu(pgp->rspPutInx); 3215 } 3216 3217 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3218 pring->stats.iocb_rsp_full++; 3219 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3220 writel(status, phba->CAregaddr); 3221 readl(phba->CAregaddr); 3222 } 3223 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3224 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3225 pring->stats.iocb_cmd_empty++; 3226 3227 /* Force update of the local copy of cmdGetInx */ 3228 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3229 lpfc_sli_resume_iocb(phba, pring); 3230 3231 if ((pring->lpfc_sli_cmd_available)) 3232 (pring->lpfc_sli_cmd_available) (phba, pring); 3233 3234 } 3235 3236 phba->fcp_ring_in_use = 0; 3237 spin_unlock_irqrestore(&phba->hbalock, iflag); 3238 return rc; 3239 } 3240 3241 /** 3242 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3243 * @phba: Pointer to HBA context object. 3244 * @pring: Pointer to driver SLI ring object. 3245 * @rspiocbp: Pointer to driver response IOCB object. 3246 * 3247 * This function is called from the worker thread when there is a slow-path 3248 * response IOCB to process. This function chains all the response iocbs until 3249 * seeing the iocb with the LE bit set. The function will call 3250 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3251 * completion of a command iocb. The function will call the 3252 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3253 * The function frees the resources or calls the completion handler if this 3254 * iocb is an abort completion. The function returns NULL when the response 3255 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3256 * this function shall chain the iocb on to the iocb_continueq and return the 3257 * response iocb passed in. 3258 **/ 3259 static struct lpfc_iocbq * 3260 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3261 struct lpfc_iocbq *rspiocbp) 3262 { 3263 struct lpfc_iocbq *saveq; 3264 struct lpfc_iocbq *cmdiocbp; 3265 struct lpfc_iocbq *next_iocb; 3266 IOCB_t *irsp = NULL; 3267 uint32_t free_saveq; 3268 uint8_t iocb_cmd_type; 3269 lpfc_iocb_type type; 3270 unsigned long iflag; 3271 int rc; 3272 3273 spin_lock_irqsave(&phba->hbalock, iflag); 3274 /* First add the response iocb to the countinueq list */ 3275 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3276 pring->iocb_continueq_cnt++; 3277 3278 /* Now, determine whether the list is completed for processing */ 3279 irsp = &rspiocbp->iocb; 3280 if (irsp->ulpLe) { 3281 /* 3282 * By default, the driver expects to free all resources 3283 * associated with this iocb completion. 3284 */ 3285 free_saveq = 1; 3286 saveq = list_get_first(&pring->iocb_continueq, 3287 struct lpfc_iocbq, list); 3288 irsp = &(saveq->iocb); 3289 list_del_init(&pring->iocb_continueq); 3290 pring->iocb_continueq_cnt = 0; 3291 3292 pring->stats.iocb_rsp++; 3293 3294 /* 3295 * If resource errors reported from HBA, reduce 3296 * queuedepths of the SCSI device. 3297 */ 3298 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3299 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3300 IOERR_NO_RESOURCES)) { 3301 spin_unlock_irqrestore(&phba->hbalock, iflag); 3302 phba->lpfc_rampdown_queue_depth(phba); 3303 spin_lock_irqsave(&phba->hbalock, iflag); 3304 } 3305 3306 if (irsp->ulpStatus) { 3307 /* Rsp ring <ringno> error: IOCB */ 3308 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3309 "0328 Rsp Ring %d error: " 3310 "IOCB Data: " 3311 "x%x x%x x%x x%x " 3312 "x%x x%x x%x x%x " 3313 "x%x x%x x%x x%x " 3314 "x%x x%x x%x x%x\n", 3315 pring->ringno, 3316 irsp->un.ulpWord[0], 3317 irsp->un.ulpWord[1], 3318 irsp->un.ulpWord[2], 3319 irsp->un.ulpWord[3], 3320 irsp->un.ulpWord[4], 3321 irsp->un.ulpWord[5], 3322 *(((uint32_t *) irsp) + 6), 3323 *(((uint32_t *) irsp) + 7), 3324 *(((uint32_t *) irsp) + 8), 3325 *(((uint32_t *) irsp) + 9), 3326 *(((uint32_t *) irsp) + 10), 3327 *(((uint32_t *) irsp) + 11), 3328 *(((uint32_t *) irsp) + 12), 3329 *(((uint32_t *) irsp) + 13), 3330 *(((uint32_t *) irsp) + 14), 3331 *(((uint32_t *) irsp) + 15)); 3332 } 3333 3334 /* 3335 * Fetch the IOCB command type and call the correct completion 3336 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3337 * get freed back to the lpfc_iocb_list by the discovery 3338 * kernel thread. 3339 */ 3340 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3341 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3342 switch (type) { 3343 case LPFC_SOL_IOCB: 3344 spin_unlock_irqrestore(&phba->hbalock, iflag); 3345 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3346 spin_lock_irqsave(&phba->hbalock, iflag); 3347 break; 3348 3349 case LPFC_UNSOL_IOCB: 3350 spin_unlock_irqrestore(&phba->hbalock, iflag); 3351 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3352 spin_lock_irqsave(&phba->hbalock, iflag); 3353 if (!rc) 3354 free_saveq = 0; 3355 break; 3356 3357 case LPFC_ABORT_IOCB: 3358 cmdiocbp = NULL; 3359 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3360 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3361 saveq); 3362 if (cmdiocbp) { 3363 /* Call the specified completion routine */ 3364 if (cmdiocbp->iocb_cmpl) { 3365 spin_unlock_irqrestore(&phba->hbalock, 3366 iflag); 3367 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3368 saveq); 3369 spin_lock_irqsave(&phba->hbalock, 3370 iflag); 3371 } else 3372 __lpfc_sli_release_iocbq(phba, 3373 cmdiocbp); 3374 } 3375 break; 3376 3377 case LPFC_UNKNOWN_IOCB: 3378 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3379 char adaptermsg[LPFC_MAX_ADPTMSG]; 3380 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3381 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3382 MAX_MSG_DATA); 3383 dev_warn(&((phba->pcidev)->dev), 3384 "lpfc%d: %s\n", 3385 phba->brd_no, adaptermsg); 3386 } else { 3387 /* Unknown IOCB command */ 3388 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3389 "0335 Unknown IOCB " 3390 "command Data: x%x " 3391 "x%x x%x x%x\n", 3392 irsp->ulpCommand, 3393 irsp->ulpStatus, 3394 irsp->ulpIoTag, 3395 irsp->ulpContext); 3396 } 3397 break; 3398 } 3399 3400 if (free_saveq) { 3401 list_for_each_entry_safe(rspiocbp, next_iocb, 3402 &saveq->list, list) { 3403 list_del_init(&rspiocbp->list); 3404 __lpfc_sli_release_iocbq(phba, rspiocbp); 3405 } 3406 __lpfc_sli_release_iocbq(phba, saveq); 3407 } 3408 rspiocbp = NULL; 3409 } 3410 spin_unlock_irqrestore(&phba->hbalock, iflag); 3411 return rspiocbp; 3412 } 3413 3414 /** 3415 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3416 * @phba: Pointer to HBA context object. 3417 * @pring: Pointer to driver SLI ring object. 3418 * @mask: Host attention register mask for this ring. 3419 * 3420 * This routine wraps the actual slow_ring event process routine from the 3421 * API jump table function pointer from the lpfc_hba struct. 3422 **/ 3423 void 3424 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3425 struct lpfc_sli_ring *pring, uint32_t mask) 3426 { 3427 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3428 } 3429 3430 /** 3431 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3432 * @phba: Pointer to HBA context object. 3433 * @pring: Pointer to driver SLI ring object. 3434 * @mask: Host attention register mask for this ring. 3435 * 3436 * This function is called from the worker thread when there is a ring event 3437 * for non-fcp rings. The caller does not hold any lock. The function will 3438 * remove each response iocb in the response ring and calls the handle 3439 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3440 **/ 3441 static void 3442 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3443 struct lpfc_sli_ring *pring, uint32_t mask) 3444 { 3445 struct lpfc_pgp *pgp; 3446 IOCB_t *entry; 3447 IOCB_t *irsp = NULL; 3448 struct lpfc_iocbq *rspiocbp = NULL; 3449 uint32_t portRspPut, portRspMax; 3450 unsigned long iflag; 3451 uint32_t status; 3452 3453 pgp = &phba->port_gp[pring->ringno]; 3454 spin_lock_irqsave(&phba->hbalock, iflag); 3455 pring->stats.iocb_event++; 3456 3457 /* 3458 * The next available response entry should never exceed the maximum 3459 * entries. If it does, treat it as an adapter hardware error. 3460 */ 3461 portRspMax = pring->sli.sli3.numRiocb; 3462 portRspPut = le32_to_cpu(pgp->rspPutInx); 3463 if (portRspPut >= portRspMax) { 3464 /* 3465 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3466 * rsp ring <portRspMax> 3467 */ 3468 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3469 "0303 Ring %d handler: portRspPut %d " 3470 "is bigger than rsp ring %d\n", 3471 pring->ringno, portRspPut, portRspMax); 3472 3473 phba->link_state = LPFC_HBA_ERROR; 3474 spin_unlock_irqrestore(&phba->hbalock, iflag); 3475 3476 phba->work_hs = HS_FFER3; 3477 lpfc_handle_eratt(phba); 3478 3479 return; 3480 } 3481 3482 rmb(); 3483 while (pring->sli.sli3.rspidx != portRspPut) { 3484 /* 3485 * Build a completion list and call the appropriate handler. 3486 * The process is to get the next available response iocb, get 3487 * a free iocb from the list, copy the response data into the 3488 * free iocb, insert to the continuation list, and update the 3489 * next response index to slim. This process makes response 3490 * iocb's in the ring available to DMA as fast as possible but 3491 * pays a penalty for a copy operation. Since the iocb is 3492 * only 32 bytes, this penalty is considered small relative to 3493 * the PCI reads for register values and a slim write. When 3494 * the ulpLe field is set, the entire Command has been 3495 * received. 3496 */ 3497 entry = lpfc_resp_iocb(phba, pring); 3498 3499 phba->last_completion_time = jiffies; 3500 rspiocbp = __lpfc_sli_get_iocbq(phba); 3501 if (rspiocbp == NULL) { 3502 printk(KERN_ERR "%s: out of buffers! Failing " 3503 "completion.\n", __func__); 3504 break; 3505 } 3506 3507 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3508 phba->iocb_rsp_size); 3509 irsp = &rspiocbp->iocb; 3510 3511 if (++pring->sli.sli3.rspidx >= portRspMax) 3512 pring->sli.sli3.rspidx = 0; 3513 3514 if (pring->ringno == LPFC_ELS_RING) { 3515 lpfc_debugfs_slow_ring_trc(phba, 3516 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3517 *(((uint32_t *) irsp) + 4), 3518 *(((uint32_t *) irsp) + 6), 3519 *(((uint32_t *) irsp) + 7)); 3520 } 3521 3522 writel(pring->sli.sli3.rspidx, 3523 &phba->host_gp[pring->ringno].rspGetInx); 3524 3525 spin_unlock_irqrestore(&phba->hbalock, iflag); 3526 /* Handle the response IOCB */ 3527 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3528 spin_lock_irqsave(&phba->hbalock, iflag); 3529 3530 /* 3531 * If the port response put pointer has not been updated, sync 3532 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3533 * response put pointer. 3534 */ 3535 if (pring->sli.sli3.rspidx == portRspPut) { 3536 portRspPut = le32_to_cpu(pgp->rspPutInx); 3537 } 3538 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 3539 3540 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3541 /* At least one response entry has been freed */ 3542 pring->stats.iocb_rsp_full++; 3543 /* SET RxRE_RSP in Chip Att register */ 3544 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3545 writel(status, phba->CAregaddr); 3546 readl(phba->CAregaddr); /* flush */ 3547 } 3548 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3549 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3550 pring->stats.iocb_cmd_empty++; 3551 3552 /* Force update of the local copy of cmdGetInx */ 3553 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3554 lpfc_sli_resume_iocb(phba, pring); 3555 3556 if ((pring->lpfc_sli_cmd_available)) 3557 (pring->lpfc_sli_cmd_available) (phba, pring); 3558 3559 } 3560 3561 spin_unlock_irqrestore(&phba->hbalock, iflag); 3562 return; 3563 } 3564 3565 /** 3566 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3567 * @phba: Pointer to HBA context object. 3568 * @pring: Pointer to driver SLI ring object. 3569 * @mask: Host attention register mask for this ring. 3570 * 3571 * This function is called from the worker thread when there is a pending 3572 * ELS response iocb on the driver internal slow-path response iocb worker 3573 * queue. The caller does not hold any lock. The function will remove each 3574 * response iocb from the response worker queue and calls the handle 3575 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3576 **/ 3577 static void 3578 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3579 struct lpfc_sli_ring *pring, uint32_t mask) 3580 { 3581 struct lpfc_iocbq *irspiocbq; 3582 struct hbq_dmabuf *dmabuf; 3583 struct lpfc_cq_event *cq_event; 3584 unsigned long iflag; 3585 3586 spin_lock_irqsave(&phba->hbalock, iflag); 3587 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3588 spin_unlock_irqrestore(&phba->hbalock, iflag); 3589 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3590 /* Get the response iocb from the head of work queue */ 3591 spin_lock_irqsave(&phba->hbalock, iflag); 3592 list_remove_head(&phba->sli4_hba.sp_queue_event, 3593 cq_event, struct lpfc_cq_event, list); 3594 spin_unlock_irqrestore(&phba->hbalock, iflag); 3595 3596 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3597 case CQE_CODE_COMPL_WQE: 3598 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3599 cq_event); 3600 /* Translate ELS WCQE to response IOCBQ */ 3601 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3602 irspiocbq); 3603 if (irspiocbq) 3604 lpfc_sli_sp_handle_rspiocb(phba, pring, 3605 irspiocbq); 3606 break; 3607 case CQE_CODE_RECEIVE: 3608 case CQE_CODE_RECEIVE_V1: 3609 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3610 cq_event); 3611 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3612 break; 3613 default: 3614 break; 3615 } 3616 } 3617 } 3618 3619 /** 3620 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3621 * @phba: Pointer to HBA context object. 3622 * @pring: Pointer to driver SLI ring object. 3623 * 3624 * This function aborts all iocbs in the given ring and frees all the iocb 3625 * objects in txq. This function issues an abort iocb for all the iocb commands 3626 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3627 * the return of this function. The caller is not required to hold any locks. 3628 **/ 3629 void 3630 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3631 { 3632 LIST_HEAD(completions); 3633 struct lpfc_iocbq *iocb, *next_iocb; 3634 3635 if (pring->ringno == LPFC_ELS_RING) { 3636 lpfc_fabric_abort_hba(phba); 3637 } 3638 3639 /* Error everything on txq and txcmplq 3640 * First do the txq. 3641 */ 3642 if (phba->sli_rev >= LPFC_SLI_REV4) { 3643 spin_lock_irq(&pring->ring_lock); 3644 list_splice_init(&pring->txq, &completions); 3645 pring->txq_cnt = 0; 3646 spin_unlock_irq(&pring->ring_lock); 3647 3648 spin_lock_irq(&phba->hbalock); 3649 /* Next issue ABTS for everything on the txcmplq */ 3650 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3651 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3652 spin_unlock_irq(&phba->hbalock); 3653 } else { 3654 spin_lock_irq(&phba->hbalock); 3655 list_splice_init(&pring->txq, &completions); 3656 pring->txq_cnt = 0; 3657 3658 /* Next issue ABTS for everything on the txcmplq */ 3659 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3660 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3661 spin_unlock_irq(&phba->hbalock); 3662 } 3663 3664 /* Cancel all the IOCBs from the completions list */ 3665 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3666 IOERR_SLI_ABORTED); 3667 } 3668 3669 /** 3670 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring 3671 * @phba: Pointer to HBA context object. 3672 * @pring: Pointer to driver SLI ring object. 3673 * 3674 * This function aborts all iocbs in the given ring and frees all the iocb 3675 * objects in txq. This function issues an abort iocb for all the iocb commands 3676 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3677 * the return of this function. The caller is not required to hold any locks. 3678 **/ 3679 void 3680 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3681 { 3682 LIST_HEAD(completions); 3683 struct lpfc_iocbq *iocb, *next_iocb; 3684 3685 if (pring->ringno == LPFC_ELS_RING) 3686 lpfc_fabric_abort_hba(phba); 3687 3688 spin_lock_irq(&phba->hbalock); 3689 /* Next issue ABTS for everything on the txcmplq */ 3690 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3691 lpfc_sli4_abort_nvme_io(phba, pring, iocb); 3692 spin_unlock_irq(&phba->hbalock); 3693 } 3694 3695 3696 /** 3697 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 3698 * @phba: Pointer to HBA context object. 3699 * @pring: Pointer to driver SLI ring object. 3700 * 3701 * This function aborts all iocbs in FCP rings and frees all the iocb 3702 * objects in txq. This function issues an abort iocb for all the iocb commands 3703 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3704 * the return of this function. The caller is not required to hold any locks. 3705 **/ 3706 void 3707 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 3708 { 3709 struct lpfc_sli *psli = &phba->sli; 3710 struct lpfc_sli_ring *pring; 3711 uint32_t i; 3712 3713 /* Look on all the FCP Rings for the iotag */ 3714 if (phba->sli_rev >= LPFC_SLI_REV4) { 3715 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 3716 pring = phba->sli4_hba.fcp_wq[i]->pring; 3717 lpfc_sli_abort_iocb_ring(phba, pring); 3718 } 3719 } else { 3720 pring = &psli->sli3_ring[LPFC_FCP_RING]; 3721 lpfc_sli_abort_iocb_ring(phba, pring); 3722 } 3723 } 3724 3725 /** 3726 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings 3727 * @phba: Pointer to HBA context object. 3728 * 3729 * This function aborts all wqes in NVME rings. This function issues an 3730 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in 3731 * the txcmplq is not guaranteed to complete before the return of this 3732 * function. The caller is not required to hold any locks. 3733 **/ 3734 void 3735 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba) 3736 { 3737 struct lpfc_sli_ring *pring; 3738 uint32_t i; 3739 3740 if (phba->sli_rev < LPFC_SLI_REV4) 3741 return; 3742 3743 /* Abort all IO on each NVME ring. */ 3744 for (i = 0; i < phba->cfg_nvme_io_channel; i++) { 3745 pring = phba->sli4_hba.nvme_wq[i]->pring; 3746 lpfc_sli_abort_wqe_ring(phba, pring); 3747 } 3748 } 3749 3750 3751 /** 3752 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3753 * @phba: Pointer to HBA context object. 3754 * 3755 * This function flushes all iocbs in the fcp ring and frees all the iocb 3756 * objects in txq and txcmplq. This function will not issue abort iocbs 3757 * for all the iocb commands in txcmplq, they will just be returned with 3758 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3759 * slot has been permanently disabled. 3760 **/ 3761 void 3762 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3763 { 3764 LIST_HEAD(txq); 3765 LIST_HEAD(txcmplq); 3766 struct lpfc_sli *psli = &phba->sli; 3767 struct lpfc_sli_ring *pring; 3768 uint32_t i; 3769 3770 spin_lock_irq(&phba->hbalock); 3771 /* Indicate the I/O queues are flushed */ 3772 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3773 spin_unlock_irq(&phba->hbalock); 3774 3775 /* Look on all the FCP Rings for the iotag */ 3776 if (phba->sli_rev >= LPFC_SLI_REV4) { 3777 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 3778 pring = phba->sli4_hba.fcp_wq[i]->pring; 3779 3780 spin_lock_irq(&pring->ring_lock); 3781 /* Retrieve everything on txq */ 3782 list_splice_init(&pring->txq, &txq); 3783 /* Retrieve everything on the txcmplq */ 3784 list_splice_init(&pring->txcmplq, &txcmplq); 3785 pring->txq_cnt = 0; 3786 pring->txcmplq_cnt = 0; 3787 spin_unlock_irq(&pring->ring_lock); 3788 3789 /* Flush the txq */ 3790 lpfc_sli_cancel_iocbs(phba, &txq, 3791 IOSTAT_LOCAL_REJECT, 3792 IOERR_SLI_DOWN); 3793 /* Flush the txcmpq */ 3794 lpfc_sli_cancel_iocbs(phba, &txcmplq, 3795 IOSTAT_LOCAL_REJECT, 3796 IOERR_SLI_DOWN); 3797 } 3798 } else { 3799 pring = &psli->sli3_ring[LPFC_FCP_RING]; 3800 3801 spin_lock_irq(&phba->hbalock); 3802 /* Retrieve everything on txq */ 3803 list_splice_init(&pring->txq, &txq); 3804 /* Retrieve everything on the txcmplq */ 3805 list_splice_init(&pring->txcmplq, &txcmplq); 3806 pring->txq_cnt = 0; 3807 pring->txcmplq_cnt = 0; 3808 spin_unlock_irq(&phba->hbalock); 3809 3810 /* Flush the txq */ 3811 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3812 IOERR_SLI_DOWN); 3813 /* Flush the txcmpq */ 3814 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3815 IOERR_SLI_DOWN); 3816 } 3817 } 3818 3819 /** 3820 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings 3821 * @phba: Pointer to HBA context object. 3822 * 3823 * This function flushes all wqes in the nvme rings and frees all resources 3824 * in the txcmplq. This function does not issue abort wqes for the IO 3825 * commands in txcmplq, they will just be returned with 3826 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3827 * slot has been permanently disabled. 3828 **/ 3829 void 3830 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) 3831 { 3832 LIST_HEAD(txcmplq); 3833 struct lpfc_sli_ring *pring; 3834 uint32_t i; 3835 3836 if (phba->sli_rev < LPFC_SLI_REV4) 3837 return; 3838 3839 /* Hint to other driver operations that a flush is in progress. */ 3840 spin_lock_irq(&phba->hbalock); 3841 phba->hba_flag |= HBA_NVME_IOQ_FLUSH; 3842 spin_unlock_irq(&phba->hbalock); 3843 3844 /* Cycle through all NVME rings and complete each IO with 3845 * a local driver reason code. This is a flush so no 3846 * abort exchange to FW. 3847 */ 3848 for (i = 0; i < phba->cfg_nvme_io_channel; i++) { 3849 pring = phba->sli4_hba.nvme_wq[i]->pring; 3850 3851 /* Retrieve everything on the txcmplq */ 3852 spin_lock_irq(&pring->ring_lock); 3853 list_splice_init(&pring->txcmplq, &txcmplq); 3854 pring->txcmplq_cnt = 0; 3855 spin_unlock_irq(&pring->ring_lock); 3856 3857 /* Flush the txcmpq &&&PAE */ 3858 lpfc_sli_cancel_iocbs(phba, &txcmplq, 3859 IOSTAT_LOCAL_REJECT, 3860 IOERR_SLI_DOWN); 3861 } 3862 } 3863 3864 /** 3865 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 3866 * @phba: Pointer to HBA context object. 3867 * @mask: Bit mask to be checked. 3868 * 3869 * This function reads the host status register and compares 3870 * with the provided bit mask to check if HBA completed 3871 * the restart. This function will wait in a loop for the 3872 * HBA to complete restart. If the HBA does not restart within 3873 * 15 iterations, the function will reset the HBA again. The 3874 * function returns 1 when HBA fail to restart otherwise returns 3875 * zero. 3876 **/ 3877 static int 3878 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 3879 { 3880 uint32_t status; 3881 int i = 0; 3882 int retval = 0; 3883 3884 /* Read the HBA Host Status Register */ 3885 if (lpfc_readl(phba->HSregaddr, &status)) 3886 return 1; 3887 3888 /* 3889 * Check status register every 100ms for 5 retries, then every 3890 * 500ms for 5, then every 2.5 sec for 5, then reset board and 3891 * every 2.5 sec for 4. 3892 * Break our of the loop if errors occurred during init. 3893 */ 3894 while (((status & mask) != mask) && 3895 !(status & HS_FFERM) && 3896 i++ < 20) { 3897 3898 if (i <= 5) 3899 msleep(10); 3900 else if (i <= 10) 3901 msleep(500); 3902 else 3903 msleep(2500); 3904 3905 if (i == 15) { 3906 /* Do post */ 3907 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3908 lpfc_sli_brdrestart(phba); 3909 } 3910 /* Read the HBA Host Status Register */ 3911 if (lpfc_readl(phba->HSregaddr, &status)) { 3912 retval = 1; 3913 break; 3914 } 3915 } 3916 3917 /* Check to see if any errors occurred during init */ 3918 if ((status & HS_FFERM) || (i >= 20)) { 3919 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3920 "2751 Adapter failed to restart, " 3921 "status reg x%x, FW Data: A8 x%x AC x%x\n", 3922 status, 3923 readl(phba->MBslimaddr + 0xa8), 3924 readl(phba->MBslimaddr + 0xac)); 3925 phba->link_state = LPFC_HBA_ERROR; 3926 retval = 1; 3927 } 3928 3929 return retval; 3930 } 3931 3932 /** 3933 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 3934 * @phba: Pointer to HBA context object. 3935 * @mask: Bit mask to be checked. 3936 * 3937 * This function checks the host status register to check if HBA is 3938 * ready. This function will wait in a loop for the HBA to be ready 3939 * If the HBA is not ready , the function will will reset the HBA PCI 3940 * function again. The function returns 1 when HBA fail to be ready 3941 * otherwise returns zero. 3942 **/ 3943 static int 3944 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 3945 { 3946 uint32_t status; 3947 int retval = 0; 3948 3949 /* Read the HBA Host Status Register */ 3950 status = lpfc_sli4_post_status_check(phba); 3951 3952 if (status) { 3953 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3954 lpfc_sli_brdrestart(phba); 3955 status = lpfc_sli4_post_status_check(phba); 3956 } 3957 3958 /* Check to see if any errors occurred during init */ 3959 if (status) { 3960 phba->link_state = LPFC_HBA_ERROR; 3961 retval = 1; 3962 } else 3963 phba->sli4_hba.intr_enable = 0; 3964 3965 return retval; 3966 } 3967 3968 /** 3969 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 3970 * @phba: Pointer to HBA context object. 3971 * @mask: Bit mask to be checked. 3972 * 3973 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 3974 * from the API jump table function pointer from the lpfc_hba struct. 3975 **/ 3976 int 3977 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3978 { 3979 return phba->lpfc_sli_brdready(phba, mask); 3980 } 3981 3982 #define BARRIER_TEST_PATTERN (0xdeadbeef) 3983 3984 /** 3985 * lpfc_reset_barrier - Make HBA ready for HBA reset 3986 * @phba: Pointer to HBA context object. 3987 * 3988 * This function is called before resetting an HBA. This function is called 3989 * with hbalock held and requests HBA to quiesce DMAs before a reset. 3990 **/ 3991 void lpfc_reset_barrier(struct lpfc_hba *phba) 3992 { 3993 uint32_t __iomem *resp_buf; 3994 uint32_t __iomem *mbox_buf; 3995 volatile uint32_t mbox; 3996 uint32_t hc_copy, ha_copy, resp_data; 3997 int i; 3998 uint8_t hdrtype; 3999 4000 lockdep_assert_held(&phba->hbalock); 4001 4002 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 4003 if (hdrtype != 0x80 || 4004 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 4005 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 4006 return; 4007 4008 /* 4009 * Tell the other part of the chip to suspend temporarily all 4010 * its DMA activity. 4011 */ 4012 resp_buf = phba->MBslimaddr; 4013 4014 /* Disable the error attention */ 4015 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 4016 return; 4017 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 4018 readl(phba->HCregaddr); /* flush */ 4019 phba->link_flag |= LS_IGNORE_ERATT; 4020 4021 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4022 return; 4023 if (ha_copy & HA_ERATT) { 4024 /* Clear Chip error bit */ 4025 writel(HA_ERATT, phba->HAregaddr); 4026 phba->pport->stopped = 1; 4027 } 4028 4029 mbox = 0; 4030 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 4031 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 4032 4033 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 4034 mbox_buf = phba->MBslimaddr; 4035 writel(mbox, mbox_buf); 4036 4037 for (i = 0; i < 50; i++) { 4038 if (lpfc_readl((resp_buf + 1), &resp_data)) 4039 return; 4040 if (resp_data != ~(BARRIER_TEST_PATTERN)) 4041 mdelay(1); 4042 else 4043 break; 4044 } 4045 resp_data = 0; 4046 if (lpfc_readl((resp_buf + 1), &resp_data)) 4047 return; 4048 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 4049 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 4050 phba->pport->stopped) 4051 goto restore_hc; 4052 else 4053 goto clear_errat; 4054 } 4055 4056 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 4057 resp_data = 0; 4058 for (i = 0; i < 500; i++) { 4059 if (lpfc_readl(resp_buf, &resp_data)) 4060 return; 4061 if (resp_data != mbox) 4062 mdelay(1); 4063 else 4064 break; 4065 } 4066 4067 clear_errat: 4068 4069 while (++i < 500) { 4070 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4071 return; 4072 if (!(ha_copy & HA_ERATT)) 4073 mdelay(1); 4074 else 4075 break; 4076 } 4077 4078 if (readl(phba->HAregaddr) & HA_ERATT) { 4079 writel(HA_ERATT, phba->HAregaddr); 4080 phba->pport->stopped = 1; 4081 } 4082 4083 restore_hc: 4084 phba->link_flag &= ~LS_IGNORE_ERATT; 4085 writel(hc_copy, phba->HCregaddr); 4086 readl(phba->HCregaddr); /* flush */ 4087 } 4088 4089 /** 4090 * lpfc_sli_brdkill - Issue a kill_board mailbox command 4091 * @phba: Pointer to HBA context object. 4092 * 4093 * This function issues a kill_board mailbox command and waits for 4094 * the error attention interrupt. This function is called for stopping 4095 * the firmware processing. The caller is not required to hold any 4096 * locks. This function calls lpfc_hba_down_post function to free 4097 * any pending commands after the kill. The function will return 1 when it 4098 * fails to kill the board else will return 0. 4099 **/ 4100 int 4101 lpfc_sli_brdkill(struct lpfc_hba *phba) 4102 { 4103 struct lpfc_sli *psli; 4104 LPFC_MBOXQ_t *pmb; 4105 uint32_t status; 4106 uint32_t ha_copy; 4107 int retval; 4108 int i = 0; 4109 4110 psli = &phba->sli; 4111 4112 /* Kill HBA */ 4113 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4114 "0329 Kill HBA Data: x%x x%x\n", 4115 phba->pport->port_state, psli->sli_flag); 4116 4117 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4118 if (!pmb) 4119 return 1; 4120 4121 /* Disable the error attention */ 4122 spin_lock_irq(&phba->hbalock); 4123 if (lpfc_readl(phba->HCregaddr, &status)) { 4124 spin_unlock_irq(&phba->hbalock); 4125 mempool_free(pmb, phba->mbox_mem_pool); 4126 return 1; 4127 } 4128 status &= ~HC_ERINT_ENA; 4129 writel(status, phba->HCregaddr); 4130 readl(phba->HCregaddr); /* flush */ 4131 phba->link_flag |= LS_IGNORE_ERATT; 4132 spin_unlock_irq(&phba->hbalock); 4133 4134 lpfc_kill_board(phba, pmb); 4135 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4136 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4137 4138 if (retval != MBX_SUCCESS) { 4139 if (retval != MBX_BUSY) 4140 mempool_free(pmb, phba->mbox_mem_pool); 4141 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4142 "2752 KILL_BOARD command failed retval %d\n", 4143 retval); 4144 spin_lock_irq(&phba->hbalock); 4145 phba->link_flag &= ~LS_IGNORE_ERATT; 4146 spin_unlock_irq(&phba->hbalock); 4147 return 1; 4148 } 4149 4150 spin_lock_irq(&phba->hbalock); 4151 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 4152 spin_unlock_irq(&phba->hbalock); 4153 4154 mempool_free(pmb, phba->mbox_mem_pool); 4155 4156 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 4157 * attention every 100ms for 3 seconds. If we don't get ERATT after 4158 * 3 seconds we still set HBA_ERROR state because the status of the 4159 * board is now undefined. 4160 */ 4161 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4162 return 1; 4163 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 4164 mdelay(100); 4165 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4166 return 1; 4167 } 4168 4169 del_timer_sync(&psli->mbox_tmo); 4170 if (ha_copy & HA_ERATT) { 4171 writel(HA_ERATT, phba->HAregaddr); 4172 phba->pport->stopped = 1; 4173 } 4174 spin_lock_irq(&phba->hbalock); 4175 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4176 psli->mbox_active = NULL; 4177 phba->link_flag &= ~LS_IGNORE_ERATT; 4178 spin_unlock_irq(&phba->hbalock); 4179 4180 lpfc_hba_down_post(phba); 4181 phba->link_state = LPFC_HBA_ERROR; 4182 4183 return ha_copy & HA_ERATT ? 0 : 1; 4184 } 4185 4186 /** 4187 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 4188 * @phba: Pointer to HBA context object. 4189 * 4190 * This function resets the HBA by writing HC_INITFF to the control 4191 * register. After the HBA resets, this function resets all the iocb ring 4192 * indices. This function disables PCI layer parity checking during 4193 * the reset. 4194 * This function returns 0 always. 4195 * The caller is not required to hold any locks. 4196 **/ 4197 int 4198 lpfc_sli_brdreset(struct lpfc_hba *phba) 4199 { 4200 struct lpfc_sli *psli; 4201 struct lpfc_sli_ring *pring; 4202 uint16_t cfg_value; 4203 int i; 4204 4205 psli = &phba->sli; 4206 4207 /* Reset HBA */ 4208 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4209 "0325 Reset HBA Data: x%x x%x\n", 4210 (phba->pport) ? phba->pport->port_state : 0, 4211 psli->sli_flag); 4212 4213 /* perform board reset */ 4214 phba->fc_eventTag = 0; 4215 phba->link_events = 0; 4216 if (phba->pport) { 4217 phba->pport->fc_myDID = 0; 4218 phba->pport->fc_prevDID = 0; 4219 } 4220 4221 /* Turn off parity checking and serr during the physical reset */ 4222 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4223 pci_write_config_word(phba->pcidev, PCI_COMMAND, 4224 (cfg_value & 4225 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4226 4227 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 4228 4229 /* Now toggle INITFF bit in the Host Control Register */ 4230 writel(HC_INITFF, phba->HCregaddr); 4231 mdelay(1); 4232 readl(phba->HCregaddr); /* flush */ 4233 writel(0, phba->HCregaddr); 4234 readl(phba->HCregaddr); /* flush */ 4235 4236 /* Restore PCI cmd register */ 4237 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4238 4239 /* Initialize relevant SLI info */ 4240 for (i = 0; i < psli->num_rings; i++) { 4241 pring = &psli->sli3_ring[i]; 4242 pring->flag = 0; 4243 pring->sli.sli3.rspidx = 0; 4244 pring->sli.sli3.next_cmdidx = 0; 4245 pring->sli.sli3.local_getidx = 0; 4246 pring->sli.sli3.cmdidx = 0; 4247 pring->missbufcnt = 0; 4248 } 4249 4250 phba->link_state = LPFC_WARM_START; 4251 return 0; 4252 } 4253 4254 /** 4255 * lpfc_sli4_brdreset - Reset a sli-4 HBA 4256 * @phba: Pointer to HBA context object. 4257 * 4258 * This function resets a SLI4 HBA. This function disables PCI layer parity 4259 * checking during resets the device. The caller is not required to hold 4260 * any locks. 4261 * 4262 * This function returns 0 always. 4263 **/ 4264 int 4265 lpfc_sli4_brdreset(struct lpfc_hba *phba) 4266 { 4267 struct lpfc_sli *psli = &phba->sli; 4268 uint16_t cfg_value; 4269 int rc = 0; 4270 4271 /* Reset HBA */ 4272 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4273 "0295 Reset HBA Data: x%x x%x x%x\n", 4274 phba->pport->port_state, psli->sli_flag, 4275 phba->hba_flag); 4276 4277 /* perform board reset */ 4278 phba->fc_eventTag = 0; 4279 phba->link_events = 0; 4280 phba->pport->fc_myDID = 0; 4281 phba->pport->fc_prevDID = 0; 4282 4283 spin_lock_irq(&phba->hbalock); 4284 psli->sli_flag &= ~(LPFC_PROCESS_LA); 4285 phba->fcf.fcf_flag = 0; 4286 spin_unlock_irq(&phba->hbalock); 4287 4288 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ 4289 if (phba->hba_flag & HBA_FW_DUMP_OP) { 4290 phba->hba_flag &= ~HBA_FW_DUMP_OP; 4291 return rc; 4292 } 4293 4294 /* Now physically reset the device */ 4295 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4296 "0389 Performing PCI function reset!\n"); 4297 4298 /* Turn off parity checking and serr during the physical reset */ 4299 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4300 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 4301 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4302 4303 /* Perform FCoE PCI function reset before freeing queue memory */ 4304 rc = lpfc_pci_function_reset(phba); 4305 lpfc_sli4_queue_destroy(phba); 4306 4307 /* Restore PCI cmd register */ 4308 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4309 4310 return rc; 4311 } 4312 4313 /** 4314 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 4315 * @phba: Pointer to HBA context object. 4316 * 4317 * This function is called in the SLI initialization code path to 4318 * restart the HBA. The caller is not required to hold any lock. 4319 * This function writes MBX_RESTART mailbox command to the SLIM and 4320 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 4321 * function to free any pending commands. The function enables 4322 * POST only during the first initialization. The function returns zero. 4323 * The function does not guarantee completion of MBX_RESTART mailbox 4324 * command before the return of this function. 4325 **/ 4326 static int 4327 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 4328 { 4329 MAILBOX_t *mb; 4330 struct lpfc_sli *psli; 4331 volatile uint32_t word0; 4332 void __iomem *to_slim; 4333 uint32_t hba_aer_enabled; 4334 4335 spin_lock_irq(&phba->hbalock); 4336 4337 /* Take PCIe device Advanced Error Reporting (AER) state */ 4338 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4339 4340 psli = &phba->sli; 4341 4342 /* Restart HBA */ 4343 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4344 "0337 Restart HBA Data: x%x x%x\n", 4345 (phba->pport) ? phba->pport->port_state : 0, 4346 psli->sli_flag); 4347 4348 word0 = 0; 4349 mb = (MAILBOX_t *) &word0; 4350 mb->mbxCommand = MBX_RESTART; 4351 mb->mbxHc = 1; 4352 4353 lpfc_reset_barrier(phba); 4354 4355 to_slim = phba->MBslimaddr; 4356 writel(*(uint32_t *) mb, to_slim); 4357 readl(to_slim); /* flush */ 4358 4359 /* Only skip post after fc_ffinit is completed */ 4360 if (phba->pport && phba->pport->port_state) 4361 word0 = 1; /* This is really setting up word1 */ 4362 else 4363 word0 = 0; /* This is really setting up word1 */ 4364 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4365 writel(*(uint32_t *) mb, to_slim); 4366 readl(to_slim); /* flush */ 4367 4368 lpfc_sli_brdreset(phba); 4369 if (phba->pport) 4370 phba->pport->stopped = 0; 4371 phba->link_state = LPFC_INIT_START; 4372 phba->hba_flag = 0; 4373 spin_unlock_irq(&phba->hbalock); 4374 4375 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4376 psli->stats_start = get_seconds(); 4377 4378 /* Give the INITFF and Post time to settle. */ 4379 mdelay(100); 4380 4381 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4382 if (hba_aer_enabled) 4383 pci_disable_pcie_error_reporting(phba->pcidev); 4384 4385 lpfc_hba_down_post(phba); 4386 4387 return 0; 4388 } 4389 4390 /** 4391 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4392 * @phba: Pointer to HBA context object. 4393 * 4394 * This function is called in the SLI initialization code path to restart 4395 * a SLI4 HBA. The caller is not required to hold any lock. 4396 * At the end of the function, it calls lpfc_hba_down_post function to 4397 * free any pending commands. 4398 **/ 4399 static int 4400 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4401 { 4402 struct lpfc_sli *psli = &phba->sli; 4403 uint32_t hba_aer_enabled; 4404 int rc; 4405 4406 /* Restart HBA */ 4407 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4408 "0296 Restart HBA Data: x%x x%x\n", 4409 phba->pport->port_state, psli->sli_flag); 4410 4411 /* Take PCIe device Advanced Error Reporting (AER) state */ 4412 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4413 4414 rc = lpfc_sli4_brdreset(phba); 4415 4416 spin_lock_irq(&phba->hbalock); 4417 phba->pport->stopped = 0; 4418 phba->link_state = LPFC_INIT_START; 4419 phba->hba_flag = 0; 4420 spin_unlock_irq(&phba->hbalock); 4421 4422 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4423 psli->stats_start = get_seconds(); 4424 4425 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4426 if (hba_aer_enabled) 4427 pci_disable_pcie_error_reporting(phba->pcidev); 4428 4429 lpfc_hba_down_post(phba); 4430 4431 return rc; 4432 } 4433 4434 /** 4435 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4436 * @phba: Pointer to HBA context object. 4437 * 4438 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4439 * API jump table function pointer from the lpfc_hba struct. 4440 **/ 4441 int 4442 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4443 { 4444 return phba->lpfc_sli_brdrestart(phba); 4445 } 4446 4447 /** 4448 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4449 * @phba: Pointer to HBA context object. 4450 * 4451 * This function is called after a HBA restart to wait for successful 4452 * restart of the HBA. Successful restart of the HBA is indicated by 4453 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4454 * iteration, the function will restart the HBA again. The function returns 4455 * zero if HBA successfully restarted else returns negative error code. 4456 **/ 4457 int 4458 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4459 { 4460 uint32_t status, i = 0; 4461 4462 /* Read the HBA Host Status Register */ 4463 if (lpfc_readl(phba->HSregaddr, &status)) 4464 return -EIO; 4465 4466 /* Check status register to see what current state is */ 4467 i = 0; 4468 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4469 4470 /* Check every 10ms for 10 retries, then every 100ms for 90 4471 * retries, then every 1 sec for 50 retires for a total of 4472 * ~60 seconds before reset the board again and check every 4473 * 1 sec for 50 retries. The up to 60 seconds before the 4474 * board ready is required by the Falcon FIPS zeroization 4475 * complete, and any reset the board in between shall cause 4476 * restart of zeroization, further delay the board ready. 4477 */ 4478 if (i++ >= 200) { 4479 /* Adapter failed to init, timeout, status reg 4480 <status> */ 4481 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4482 "0436 Adapter failed to init, " 4483 "timeout, status reg x%x, " 4484 "FW Data: A8 x%x AC x%x\n", status, 4485 readl(phba->MBslimaddr + 0xa8), 4486 readl(phba->MBslimaddr + 0xac)); 4487 phba->link_state = LPFC_HBA_ERROR; 4488 return -ETIMEDOUT; 4489 } 4490 4491 /* Check to see if any errors occurred during init */ 4492 if (status & HS_FFERM) { 4493 /* ERROR: During chipset initialization */ 4494 /* Adapter failed to init, chipset, status reg 4495 <status> */ 4496 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4497 "0437 Adapter failed to init, " 4498 "chipset, status reg x%x, " 4499 "FW Data: A8 x%x AC x%x\n", status, 4500 readl(phba->MBslimaddr + 0xa8), 4501 readl(phba->MBslimaddr + 0xac)); 4502 phba->link_state = LPFC_HBA_ERROR; 4503 return -EIO; 4504 } 4505 4506 if (i <= 10) 4507 msleep(10); 4508 else if (i <= 100) 4509 msleep(100); 4510 else 4511 msleep(1000); 4512 4513 if (i == 150) { 4514 /* Do post */ 4515 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4516 lpfc_sli_brdrestart(phba); 4517 } 4518 /* Read the HBA Host Status Register */ 4519 if (lpfc_readl(phba->HSregaddr, &status)) 4520 return -EIO; 4521 } 4522 4523 /* Check to see if any errors occurred during init */ 4524 if (status & HS_FFERM) { 4525 /* ERROR: During chipset initialization */ 4526 /* Adapter failed to init, chipset, status reg <status> */ 4527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4528 "0438 Adapter failed to init, chipset, " 4529 "status reg x%x, " 4530 "FW Data: A8 x%x AC x%x\n", status, 4531 readl(phba->MBslimaddr + 0xa8), 4532 readl(phba->MBslimaddr + 0xac)); 4533 phba->link_state = LPFC_HBA_ERROR; 4534 return -EIO; 4535 } 4536 4537 /* Clear all interrupt enable conditions */ 4538 writel(0, phba->HCregaddr); 4539 readl(phba->HCregaddr); /* flush */ 4540 4541 /* setup host attn register */ 4542 writel(0xffffffff, phba->HAregaddr); 4543 readl(phba->HAregaddr); /* flush */ 4544 return 0; 4545 } 4546 4547 /** 4548 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4549 * 4550 * This function calculates and returns the number of HBQs required to be 4551 * configured. 4552 **/ 4553 int 4554 lpfc_sli_hbq_count(void) 4555 { 4556 return ARRAY_SIZE(lpfc_hbq_defs); 4557 } 4558 4559 /** 4560 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4561 * 4562 * This function adds the number of hbq entries in every HBQ to get 4563 * the total number of hbq entries required for the HBA and returns 4564 * the total count. 4565 **/ 4566 static int 4567 lpfc_sli_hbq_entry_count(void) 4568 { 4569 int hbq_count = lpfc_sli_hbq_count(); 4570 int count = 0; 4571 int i; 4572 4573 for (i = 0; i < hbq_count; ++i) 4574 count += lpfc_hbq_defs[i]->entry_count; 4575 return count; 4576 } 4577 4578 /** 4579 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4580 * 4581 * This function calculates amount of memory required for all hbq entries 4582 * to be configured and returns the total memory required. 4583 **/ 4584 int 4585 lpfc_sli_hbq_size(void) 4586 { 4587 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4588 } 4589 4590 /** 4591 * lpfc_sli_hbq_setup - configure and initialize HBQs 4592 * @phba: Pointer to HBA context object. 4593 * 4594 * This function is called during the SLI initialization to configure 4595 * all the HBQs and post buffers to the HBQ. The caller is not 4596 * required to hold any locks. This function will return zero if successful 4597 * else it will return negative error code. 4598 **/ 4599 static int 4600 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4601 { 4602 int hbq_count = lpfc_sli_hbq_count(); 4603 LPFC_MBOXQ_t *pmb; 4604 MAILBOX_t *pmbox; 4605 uint32_t hbqno; 4606 uint32_t hbq_entry_index; 4607 4608 /* Get a Mailbox buffer to setup mailbox 4609 * commands for HBA initialization 4610 */ 4611 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4612 4613 if (!pmb) 4614 return -ENOMEM; 4615 4616 pmbox = &pmb->u.mb; 4617 4618 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4619 phba->link_state = LPFC_INIT_MBX_CMDS; 4620 phba->hbq_in_use = 1; 4621 4622 hbq_entry_index = 0; 4623 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4624 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4625 phba->hbqs[hbqno].hbqPutIdx = 0; 4626 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4627 phba->hbqs[hbqno].entry_count = 4628 lpfc_hbq_defs[hbqno]->entry_count; 4629 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4630 hbq_entry_index, pmb); 4631 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4632 4633 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4634 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4635 mbxStatus <status>, ring <num> */ 4636 4637 lpfc_printf_log(phba, KERN_ERR, 4638 LOG_SLI | LOG_VPORT, 4639 "1805 Adapter failed to init. " 4640 "Data: x%x x%x x%x\n", 4641 pmbox->mbxCommand, 4642 pmbox->mbxStatus, hbqno); 4643 4644 phba->link_state = LPFC_HBA_ERROR; 4645 mempool_free(pmb, phba->mbox_mem_pool); 4646 return -ENXIO; 4647 } 4648 } 4649 phba->hbq_count = hbq_count; 4650 4651 mempool_free(pmb, phba->mbox_mem_pool); 4652 4653 /* Initially populate or replenish the HBQs */ 4654 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4655 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4656 return 0; 4657 } 4658 4659 /** 4660 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4661 * @phba: Pointer to HBA context object. 4662 * 4663 * This function is called during the SLI initialization to configure 4664 * all the HBQs and post buffers to the HBQ. The caller is not 4665 * required to hold any locks. This function will return zero if successful 4666 * else it will return negative error code. 4667 **/ 4668 static int 4669 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4670 { 4671 phba->hbq_in_use = 1; 4672 phba->hbqs[LPFC_ELS_HBQ].entry_count = 4673 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; 4674 phba->hbq_count = 1; 4675 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); 4676 /* Initially populate or replenish the HBQs */ 4677 return 0; 4678 } 4679 4680 /** 4681 * lpfc_sli_config_port - Issue config port mailbox command 4682 * @phba: Pointer to HBA context object. 4683 * @sli_mode: sli mode - 2/3 4684 * 4685 * This function is called by the sli initialization code path 4686 * to issue config_port mailbox command. This function restarts the 4687 * HBA firmware and issues a config_port mailbox command to configure 4688 * the SLI interface in the sli mode specified by sli_mode 4689 * variable. The caller is not required to hold any locks. 4690 * The function returns 0 if successful, else returns negative error 4691 * code. 4692 **/ 4693 int 4694 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4695 { 4696 LPFC_MBOXQ_t *pmb; 4697 uint32_t resetcount = 0, rc = 0, done = 0; 4698 4699 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4700 if (!pmb) { 4701 phba->link_state = LPFC_HBA_ERROR; 4702 return -ENOMEM; 4703 } 4704 4705 phba->sli_rev = sli_mode; 4706 while (resetcount < 2 && !done) { 4707 spin_lock_irq(&phba->hbalock); 4708 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4709 spin_unlock_irq(&phba->hbalock); 4710 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4711 lpfc_sli_brdrestart(phba); 4712 rc = lpfc_sli_chipset_init(phba); 4713 if (rc) 4714 break; 4715 4716 spin_lock_irq(&phba->hbalock); 4717 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4718 spin_unlock_irq(&phba->hbalock); 4719 resetcount++; 4720 4721 /* Call pre CONFIG_PORT mailbox command initialization. A 4722 * value of 0 means the call was successful. Any other 4723 * nonzero value is a failure, but if ERESTART is returned, 4724 * the driver may reset the HBA and try again. 4725 */ 4726 rc = lpfc_config_port_prep(phba); 4727 if (rc == -ERESTART) { 4728 phba->link_state = LPFC_LINK_UNKNOWN; 4729 continue; 4730 } else if (rc) 4731 break; 4732 4733 phba->link_state = LPFC_INIT_MBX_CMDS; 4734 lpfc_config_port(phba, pmb); 4735 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4736 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4737 LPFC_SLI3_HBQ_ENABLED | 4738 LPFC_SLI3_CRP_ENABLED | 4739 LPFC_SLI3_BG_ENABLED | 4740 LPFC_SLI3_DSS_ENABLED); 4741 if (rc != MBX_SUCCESS) { 4742 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4743 "0442 Adapter failed to init, mbxCmd x%x " 4744 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4745 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4746 spin_lock_irq(&phba->hbalock); 4747 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4748 spin_unlock_irq(&phba->hbalock); 4749 rc = -ENXIO; 4750 } else { 4751 /* Allow asynchronous mailbox command to go through */ 4752 spin_lock_irq(&phba->hbalock); 4753 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4754 spin_unlock_irq(&phba->hbalock); 4755 done = 1; 4756 4757 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 4758 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 4759 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4760 "3110 Port did not grant ASABT\n"); 4761 } 4762 } 4763 if (!done) { 4764 rc = -EINVAL; 4765 goto do_prep_failed; 4766 } 4767 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4768 if (!pmb->u.mb.un.varCfgPort.cMA) { 4769 rc = -ENXIO; 4770 goto do_prep_failed; 4771 } 4772 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 4773 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 4774 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 4775 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 4776 phba->max_vpi : phba->max_vports; 4777 4778 } else 4779 phba->max_vpi = 0; 4780 phba->fips_level = 0; 4781 phba->fips_spec_rev = 0; 4782 if (pmb->u.mb.un.varCfgPort.gdss) { 4783 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 4784 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 4785 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 4786 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4787 "2850 Security Crypto Active. FIPS x%d " 4788 "(Spec Rev: x%d)", 4789 phba->fips_level, phba->fips_spec_rev); 4790 } 4791 if (pmb->u.mb.un.varCfgPort.sec_err) { 4792 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4793 "2856 Config Port Security Crypto " 4794 "Error: x%x ", 4795 pmb->u.mb.un.varCfgPort.sec_err); 4796 } 4797 if (pmb->u.mb.un.varCfgPort.gerbm) 4798 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 4799 if (pmb->u.mb.un.varCfgPort.gcrp) 4800 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 4801 4802 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 4803 phba->port_gp = phba->mbox->us.s3_pgp.port; 4804 4805 if (phba->cfg_enable_bg) { 4806 if (pmb->u.mb.un.varCfgPort.gbg) 4807 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 4808 else 4809 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4810 "0443 Adapter did not grant " 4811 "BlockGuard\n"); 4812 } 4813 } else { 4814 phba->hbq_get = NULL; 4815 phba->port_gp = phba->mbox->us.s2.port; 4816 phba->max_vpi = 0; 4817 } 4818 do_prep_failed: 4819 mempool_free(pmb, phba->mbox_mem_pool); 4820 return rc; 4821 } 4822 4823 4824 /** 4825 * lpfc_sli_hba_setup - SLI initialization function 4826 * @phba: Pointer to HBA context object. 4827 * 4828 * This function is the main SLI initialization function. This function 4829 * is called by the HBA initialization code, HBA reset code and HBA 4830 * error attention handler code. Caller is not required to hold any 4831 * locks. This function issues config_port mailbox command to configure 4832 * the SLI, setup iocb rings and HBQ rings. In the end the function 4833 * calls the config_port_post function to issue init_link mailbox 4834 * command and to start the discovery. The function will return zero 4835 * if successful, else it will return negative error code. 4836 **/ 4837 int 4838 lpfc_sli_hba_setup(struct lpfc_hba *phba) 4839 { 4840 uint32_t rc; 4841 int mode = 3, i; 4842 int longs; 4843 4844 switch (phba->cfg_sli_mode) { 4845 case 2: 4846 if (phba->cfg_enable_npiv) { 4847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4848 "1824 NPIV enabled: Override sli_mode " 4849 "parameter (%d) to auto (0).\n", 4850 phba->cfg_sli_mode); 4851 break; 4852 } 4853 mode = 2; 4854 break; 4855 case 0: 4856 case 3: 4857 break; 4858 default: 4859 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4860 "1819 Unrecognized sli_mode parameter: %d.\n", 4861 phba->cfg_sli_mode); 4862 4863 break; 4864 } 4865 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 4866 4867 rc = lpfc_sli_config_port(phba, mode); 4868 4869 if (rc && phba->cfg_sli_mode == 3) 4870 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4871 "1820 Unable to select SLI-3. " 4872 "Not supported by adapter.\n"); 4873 if (rc && mode != 2) 4874 rc = lpfc_sli_config_port(phba, 2); 4875 else if (rc && mode == 2) 4876 rc = lpfc_sli_config_port(phba, 3); 4877 if (rc) 4878 goto lpfc_sli_hba_setup_error; 4879 4880 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4881 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4882 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4883 if (!rc) { 4884 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4885 "2709 This device supports " 4886 "Advanced Error Reporting (AER)\n"); 4887 spin_lock_irq(&phba->hbalock); 4888 phba->hba_flag |= HBA_AER_ENABLED; 4889 spin_unlock_irq(&phba->hbalock); 4890 } else { 4891 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4892 "2708 This device does not support " 4893 "Advanced Error Reporting (AER): %d\n", 4894 rc); 4895 phba->cfg_aer_support = 0; 4896 } 4897 } 4898 4899 if (phba->sli_rev == 3) { 4900 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4901 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4902 } else { 4903 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 4904 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 4905 phba->sli3_options = 0; 4906 } 4907 4908 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4909 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 4910 phba->sli_rev, phba->max_vpi); 4911 rc = lpfc_sli_ring_map(phba); 4912 4913 if (rc) 4914 goto lpfc_sli_hba_setup_error; 4915 4916 /* Initialize VPIs. */ 4917 if (phba->sli_rev == LPFC_SLI_REV3) { 4918 /* 4919 * The VPI bitmask and physical ID array are allocated 4920 * and initialized once only - at driver load. A port 4921 * reset doesn't need to reinitialize this memory. 4922 */ 4923 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 4924 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 4925 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), 4926 GFP_KERNEL); 4927 if (!phba->vpi_bmask) { 4928 rc = -ENOMEM; 4929 goto lpfc_sli_hba_setup_error; 4930 } 4931 4932 phba->vpi_ids = kzalloc( 4933 (phba->max_vpi+1) * sizeof(uint16_t), 4934 GFP_KERNEL); 4935 if (!phba->vpi_ids) { 4936 kfree(phba->vpi_bmask); 4937 rc = -ENOMEM; 4938 goto lpfc_sli_hba_setup_error; 4939 } 4940 for (i = 0; i < phba->max_vpi; i++) 4941 phba->vpi_ids[i] = i; 4942 } 4943 } 4944 4945 /* Init HBQs */ 4946 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4947 rc = lpfc_sli_hbq_setup(phba); 4948 if (rc) 4949 goto lpfc_sli_hba_setup_error; 4950 } 4951 spin_lock_irq(&phba->hbalock); 4952 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4953 spin_unlock_irq(&phba->hbalock); 4954 4955 rc = lpfc_config_port_post(phba); 4956 if (rc) 4957 goto lpfc_sli_hba_setup_error; 4958 4959 return rc; 4960 4961 lpfc_sli_hba_setup_error: 4962 phba->link_state = LPFC_HBA_ERROR; 4963 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4964 "0445 Firmware initialization failed\n"); 4965 return rc; 4966 } 4967 4968 /** 4969 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 4970 * @phba: Pointer to HBA context object. 4971 * @mboxq: mailbox pointer. 4972 * This function issue a dump mailbox command to read config region 4973 * 23 and parse the records in the region and populate driver 4974 * data structure. 4975 **/ 4976 static int 4977 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 4978 { 4979 LPFC_MBOXQ_t *mboxq; 4980 struct lpfc_dmabuf *mp; 4981 struct lpfc_mqe *mqe; 4982 uint32_t data_length; 4983 int rc; 4984 4985 /* Program the default value of vlan_id and fc_map */ 4986 phba->valid_vlan = 0; 4987 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4988 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4989 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4990 4991 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4992 if (!mboxq) 4993 return -ENOMEM; 4994 4995 mqe = &mboxq->u.mqe; 4996 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 4997 rc = -ENOMEM; 4998 goto out_free_mboxq; 4999 } 5000 5001 mp = (struct lpfc_dmabuf *) mboxq->context1; 5002 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5003 5004 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5005 "(%d):2571 Mailbox cmd x%x Status x%x " 5006 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5007 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5008 "CQ: x%x x%x x%x x%x\n", 5009 mboxq->vport ? mboxq->vport->vpi : 0, 5010 bf_get(lpfc_mqe_command, mqe), 5011 bf_get(lpfc_mqe_status, mqe), 5012 mqe->un.mb_words[0], mqe->un.mb_words[1], 5013 mqe->un.mb_words[2], mqe->un.mb_words[3], 5014 mqe->un.mb_words[4], mqe->un.mb_words[5], 5015 mqe->un.mb_words[6], mqe->un.mb_words[7], 5016 mqe->un.mb_words[8], mqe->un.mb_words[9], 5017 mqe->un.mb_words[10], mqe->un.mb_words[11], 5018 mqe->un.mb_words[12], mqe->un.mb_words[13], 5019 mqe->un.mb_words[14], mqe->un.mb_words[15], 5020 mqe->un.mb_words[16], mqe->un.mb_words[50], 5021 mboxq->mcqe.word0, 5022 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5023 mboxq->mcqe.trailer); 5024 5025 if (rc) { 5026 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5027 kfree(mp); 5028 rc = -EIO; 5029 goto out_free_mboxq; 5030 } 5031 data_length = mqe->un.mb_words[5]; 5032 if (data_length > DMP_RGN23_SIZE) { 5033 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5034 kfree(mp); 5035 rc = -EIO; 5036 goto out_free_mboxq; 5037 } 5038 5039 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 5040 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5041 kfree(mp); 5042 rc = 0; 5043 5044 out_free_mboxq: 5045 mempool_free(mboxq, phba->mbox_mem_pool); 5046 return rc; 5047 } 5048 5049 /** 5050 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 5051 * @phba: pointer to lpfc hba data structure. 5052 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 5053 * @vpd: pointer to the memory to hold resulting port vpd data. 5054 * @vpd_size: On input, the number of bytes allocated to @vpd. 5055 * On output, the number of data bytes in @vpd. 5056 * 5057 * This routine executes a READ_REV SLI4 mailbox command. In 5058 * addition, this routine gets the port vpd data. 5059 * 5060 * Return codes 5061 * 0 - successful 5062 * -ENOMEM - could not allocated memory. 5063 **/ 5064 static int 5065 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5066 uint8_t *vpd, uint32_t *vpd_size) 5067 { 5068 int rc = 0; 5069 uint32_t dma_size; 5070 struct lpfc_dmabuf *dmabuf; 5071 struct lpfc_mqe *mqe; 5072 5073 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5074 if (!dmabuf) 5075 return -ENOMEM; 5076 5077 /* 5078 * Get a DMA buffer for the vpd data resulting from the READ_REV 5079 * mailbox command. 5080 */ 5081 dma_size = *vpd_size; 5082 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size, 5083 &dmabuf->phys, GFP_KERNEL); 5084 if (!dmabuf->virt) { 5085 kfree(dmabuf); 5086 return -ENOMEM; 5087 } 5088 5089 /* 5090 * The SLI4 implementation of READ_REV conflicts at word1, 5091 * bits 31:16 and SLI4 adds vpd functionality not present 5092 * in SLI3. This code corrects the conflicts. 5093 */ 5094 lpfc_read_rev(phba, mboxq); 5095 mqe = &mboxq->u.mqe; 5096 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 5097 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 5098 mqe->un.read_rev.word1 &= 0x0000FFFF; 5099 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 5100 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 5101 5102 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5103 if (rc) { 5104 dma_free_coherent(&phba->pcidev->dev, dma_size, 5105 dmabuf->virt, dmabuf->phys); 5106 kfree(dmabuf); 5107 return -EIO; 5108 } 5109 5110 /* 5111 * The available vpd length cannot be bigger than the 5112 * DMA buffer passed to the port. Catch the less than 5113 * case and update the caller's size. 5114 */ 5115 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 5116 *vpd_size = mqe->un.read_rev.avail_vpd_len; 5117 5118 memcpy(vpd, dmabuf->virt, *vpd_size); 5119 5120 dma_free_coherent(&phba->pcidev->dev, dma_size, 5121 dmabuf->virt, dmabuf->phys); 5122 kfree(dmabuf); 5123 return 0; 5124 } 5125 5126 /** 5127 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 5128 * @phba: pointer to lpfc hba data structure. 5129 * 5130 * This routine retrieves SLI4 device physical port name this PCI function 5131 * is attached to. 5132 * 5133 * Return codes 5134 * 0 - successful 5135 * otherwise - failed to retrieve physical port name 5136 **/ 5137 static int 5138 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 5139 { 5140 LPFC_MBOXQ_t *mboxq; 5141 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 5142 struct lpfc_controller_attribute *cntl_attr; 5143 struct lpfc_mbx_get_port_name *get_port_name; 5144 void *virtaddr = NULL; 5145 uint32_t alloclen, reqlen; 5146 uint32_t shdr_status, shdr_add_status; 5147 union lpfc_sli4_cfg_shdr *shdr; 5148 char cport_name = 0; 5149 int rc; 5150 5151 /* We assume nothing at this point */ 5152 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5153 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 5154 5155 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5156 if (!mboxq) 5157 return -ENOMEM; 5158 /* obtain link type and link number via READ_CONFIG */ 5159 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5160 lpfc_sli4_read_config(phba); 5161 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 5162 goto retrieve_ppname; 5163 5164 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 5165 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 5166 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5167 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 5168 LPFC_SLI4_MBX_NEMBED); 5169 if (alloclen < reqlen) { 5170 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5171 "3084 Allocated DMA memory size (%d) is " 5172 "less than the requested DMA memory size " 5173 "(%d)\n", alloclen, reqlen); 5174 rc = -ENOMEM; 5175 goto out_free_mboxq; 5176 } 5177 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5178 virtaddr = mboxq->sge_array->addr[0]; 5179 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 5180 shdr = &mbx_cntl_attr->cfg_shdr; 5181 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5182 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5183 if (shdr_status || shdr_add_status || rc) { 5184 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5185 "3085 Mailbox x%x (x%x/x%x) failed, " 5186 "rc:x%x, status:x%x, add_status:x%x\n", 5187 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5188 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5189 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5190 rc, shdr_status, shdr_add_status); 5191 rc = -ENXIO; 5192 goto out_free_mboxq; 5193 } 5194 cntl_attr = &mbx_cntl_attr->cntl_attr; 5195 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 5196 phba->sli4_hba.lnk_info.lnk_tp = 5197 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 5198 phba->sli4_hba.lnk_info.lnk_no = 5199 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 5200 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5201 "3086 lnk_type:%d, lnk_numb:%d\n", 5202 phba->sli4_hba.lnk_info.lnk_tp, 5203 phba->sli4_hba.lnk_info.lnk_no); 5204 5205 retrieve_ppname: 5206 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5207 LPFC_MBOX_OPCODE_GET_PORT_NAME, 5208 sizeof(struct lpfc_mbx_get_port_name) - 5209 sizeof(struct lpfc_sli4_cfg_mhdr), 5210 LPFC_SLI4_MBX_EMBED); 5211 get_port_name = &mboxq->u.mqe.un.get_port_name; 5212 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 5213 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 5214 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 5215 phba->sli4_hba.lnk_info.lnk_tp); 5216 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5217 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5218 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5219 if (shdr_status || shdr_add_status || rc) { 5220 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5221 "3087 Mailbox x%x (x%x/x%x) failed: " 5222 "rc:x%x, status:x%x, add_status:x%x\n", 5223 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5224 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5225 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5226 rc, shdr_status, shdr_add_status); 5227 rc = -ENXIO; 5228 goto out_free_mboxq; 5229 } 5230 switch (phba->sli4_hba.lnk_info.lnk_no) { 5231 case LPFC_LINK_NUMBER_0: 5232 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 5233 &get_port_name->u.response); 5234 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5235 break; 5236 case LPFC_LINK_NUMBER_1: 5237 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 5238 &get_port_name->u.response); 5239 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5240 break; 5241 case LPFC_LINK_NUMBER_2: 5242 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 5243 &get_port_name->u.response); 5244 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5245 break; 5246 case LPFC_LINK_NUMBER_3: 5247 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 5248 &get_port_name->u.response); 5249 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5250 break; 5251 default: 5252 break; 5253 } 5254 5255 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 5256 phba->Port[0] = cport_name; 5257 phba->Port[1] = '\0'; 5258 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5259 "3091 SLI get port name: %s\n", phba->Port); 5260 } 5261 5262 out_free_mboxq: 5263 if (rc != MBX_TIMEOUT) { 5264 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5265 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5266 else 5267 mempool_free(mboxq, phba->mbox_mem_pool); 5268 } 5269 return rc; 5270 } 5271 5272 /** 5273 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 5274 * @phba: pointer to lpfc hba data structure. 5275 * 5276 * This routine is called to explicitly arm the SLI4 device's completion and 5277 * event queues 5278 **/ 5279 static void 5280 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 5281 { 5282 int qidx; 5283 5284 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 5285 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 5286 if (phba->sli4_hba.nvmels_cq) 5287 lpfc_sli4_cq_release(phba->sli4_hba.nvmels_cq, 5288 LPFC_QUEUE_REARM); 5289 5290 if (phba->sli4_hba.fcp_cq) 5291 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) 5292 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[qidx], 5293 LPFC_QUEUE_REARM); 5294 5295 if (phba->sli4_hba.nvme_cq) 5296 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) 5297 lpfc_sli4_cq_release(phba->sli4_hba.nvme_cq[qidx], 5298 LPFC_QUEUE_REARM); 5299 5300 if (phba->cfg_fof) 5301 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM); 5302 5303 if (phba->sli4_hba.hba_eq) 5304 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) 5305 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[qidx], 5306 LPFC_QUEUE_REARM); 5307 5308 if (phba->nvmet_support) { 5309 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { 5310 lpfc_sli4_cq_release( 5311 phba->sli4_hba.nvmet_cqset[qidx], 5312 LPFC_QUEUE_REARM); 5313 } 5314 } 5315 5316 if (phba->cfg_fof) 5317 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM); 5318 } 5319 5320 /** 5321 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 5322 * @phba: Pointer to HBA context object. 5323 * @type: The resource extent type. 5324 * @extnt_count: buffer to hold port available extent count. 5325 * @extnt_size: buffer to hold element count per extent. 5326 * 5327 * This function calls the port and retrievs the number of available 5328 * extents and their size for a particular extent type. 5329 * 5330 * Returns: 0 if successful. Nonzero otherwise. 5331 **/ 5332 int 5333 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 5334 uint16_t *extnt_count, uint16_t *extnt_size) 5335 { 5336 int rc = 0; 5337 uint32_t length; 5338 uint32_t mbox_tmo; 5339 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 5340 LPFC_MBOXQ_t *mbox; 5341 5342 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5343 if (!mbox) 5344 return -ENOMEM; 5345 5346 /* Find out how many extents are available for this resource type */ 5347 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 5348 sizeof(struct lpfc_sli4_cfg_mhdr)); 5349 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5350 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 5351 length, LPFC_SLI4_MBX_EMBED); 5352 5353 /* Send an extents count of 0 - the GET doesn't use it. */ 5354 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5355 LPFC_SLI4_MBX_EMBED); 5356 if (unlikely(rc)) { 5357 rc = -EIO; 5358 goto err_exit; 5359 } 5360 5361 if (!phba->sli4_hba.intr_enable) 5362 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5363 else { 5364 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5365 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5366 } 5367 if (unlikely(rc)) { 5368 rc = -EIO; 5369 goto err_exit; 5370 } 5371 5372 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5373 if (bf_get(lpfc_mbox_hdr_status, 5374 &rsrc_info->header.cfg_shdr.response)) { 5375 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5376 "2930 Failed to get resource extents " 5377 "Status 0x%x Add'l Status 0x%x\n", 5378 bf_get(lpfc_mbox_hdr_status, 5379 &rsrc_info->header.cfg_shdr.response), 5380 bf_get(lpfc_mbox_hdr_add_status, 5381 &rsrc_info->header.cfg_shdr.response)); 5382 rc = -EIO; 5383 goto err_exit; 5384 } 5385 5386 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 5387 &rsrc_info->u.rsp); 5388 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 5389 &rsrc_info->u.rsp); 5390 5391 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5392 "3162 Retrieved extents type-%d from port: count:%d, " 5393 "size:%d\n", type, *extnt_count, *extnt_size); 5394 5395 err_exit: 5396 mempool_free(mbox, phba->mbox_mem_pool); 5397 return rc; 5398 } 5399 5400 /** 5401 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 5402 * @phba: Pointer to HBA context object. 5403 * @type: The extent type to check. 5404 * 5405 * This function reads the current available extents from the port and checks 5406 * if the extent count or extent size has changed since the last access. 5407 * Callers use this routine post port reset to understand if there is a 5408 * extent reprovisioning requirement. 5409 * 5410 * Returns: 5411 * -Error: error indicates problem. 5412 * 1: Extent count or size has changed. 5413 * 0: No changes. 5414 **/ 5415 static int 5416 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5417 { 5418 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5419 uint16_t size_diff, rsrc_ext_size; 5420 int rc = 0; 5421 struct lpfc_rsrc_blks *rsrc_entry; 5422 struct list_head *rsrc_blk_list = NULL; 5423 5424 size_diff = 0; 5425 curr_ext_cnt = 0; 5426 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5427 &rsrc_ext_cnt, 5428 &rsrc_ext_size); 5429 if (unlikely(rc)) 5430 return -EIO; 5431 5432 switch (type) { 5433 case LPFC_RSC_TYPE_FCOE_RPI: 5434 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5435 break; 5436 case LPFC_RSC_TYPE_FCOE_VPI: 5437 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5438 break; 5439 case LPFC_RSC_TYPE_FCOE_XRI: 5440 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5441 break; 5442 case LPFC_RSC_TYPE_FCOE_VFI: 5443 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5444 break; 5445 default: 5446 break; 5447 } 5448 5449 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5450 curr_ext_cnt++; 5451 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5452 size_diff++; 5453 } 5454 5455 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5456 rc = 1; 5457 5458 return rc; 5459 } 5460 5461 /** 5462 * lpfc_sli4_cfg_post_extnts - 5463 * @phba: Pointer to HBA context object. 5464 * @extnt_cnt - number of available extents. 5465 * @type - the extent type (rpi, xri, vfi, vpi). 5466 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5467 * @mbox - pointer to the caller's allocated mailbox structure. 5468 * 5469 * This function executes the extents allocation request. It also 5470 * takes care of the amount of memory needed to allocate or get the 5471 * allocated extents. It is the caller's responsibility to evaluate 5472 * the response. 5473 * 5474 * Returns: 5475 * -Error: Error value describes the condition found. 5476 * 0: if successful 5477 **/ 5478 static int 5479 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5480 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5481 { 5482 int rc = 0; 5483 uint32_t req_len; 5484 uint32_t emb_len; 5485 uint32_t alloc_len, mbox_tmo; 5486 5487 /* Calculate the total requested length of the dma memory */ 5488 req_len = extnt_cnt * sizeof(uint16_t); 5489 5490 /* 5491 * Calculate the size of an embedded mailbox. The uint32_t 5492 * accounts for extents-specific word. 5493 */ 5494 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5495 sizeof(uint32_t); 5496 5497 /* 5498 * Presume the allocation and response will fit into an embedded 5499 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5500 */ 5501 *emb = LPFC_SLI4_MBX_EMBED; 5502 if (req_len > emb_len) { 5503 req_len = extnt_cnt * sizeof(uint16_t) + 5504 sizeof(union lpfc_sli4_cfg_shdr) + 5505 sizeof(uint32_t); 5506 *emb = LPFC_SLI4_MBX_NEMBED; 5507 } 5508 5509 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5510 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5511 req_len, *emb); 5512 if (alloc_len < req_len) { 5513 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5514 "2982 Allocated DMA memory size (x%x) is " 5515 "less than the requested DMA memory " 5516 "size (x%x)\n", alloc_len, req_len); 5517 return -ENOMEM; 5518 } 5519 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5520 if (unlikely(rc)) 5521 return -EIO; 5522 5523 if (!phba->sli4_hba.intr_enable) 5524 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5525 else { 5526 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5527 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5528 } 5529 5530 if (unlikely(rc)) 5531 rc = -EIO; 5532 return rc; 5533 } 5534 5535 /** 5536 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5537 * @phba: Pointer to HBA context object. 5538 * @type: The resource extent type to allocate. 5539 * 5540 * This function allocates the number of elements for the specified 5541 * resource type. 5542 **/ 5543 static int 5544 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5545 { 5546 bool emb = false; 5547 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5548 uint16_t rsrc_id, rsrc_start, j, k; 5549 uint16_t *ids; 5550 int i, rc; 5551 unsigned long longs; 5552 unsigned long *bmask; 5553 struct lpfc_rsrc_blks *rsrc_blks; 5554 LPFC_MBOXQ_t *mbox; 5555 uint32_t length; 5556 struct lpfc_id_range *id_array = NULL; 5557 void *virtaddr = NULL; 5558 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5559 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5560 struct list_head *ext_blk_list; 5561 5562 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5563 &rsrc_cnt, 5564 &rsrc_size); 5565 if (unlikely(rc)) 5566 return -EIO; 5567 5568 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5569 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5570 "3009 No available Resource Extents " 5571 "for resource type 0x%x: Count: 0x%x, " 5572 "Size 0x%x\n", type, rsrc_cnt, 5573 rsrc_size); 5574 return -ENOMEM; 5575 } 5576 5577 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5578 "2903 Post resource extents type-0x%x: " 5579 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5580 5581 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5582 if (!mbox) 5583 return -ENOMEM; 5584 5585 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5586 if (unlikely(rc)) { 5587 rc = -EIO; 5588 goto err_exit; 5589 } 5590 5591 /* 5592 * Figure out where the response is located. Then get local pointers 5593 * to the response data. The port does not guarantee to respond to 5594 * all extents counts request so update the local variable with the 5595 * allocated count from the port. 5596 */ 5597 if (emb == LPFC_SLI4_MBX_EMBED) { 5598 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5599 id_array = &rsrc_ext->u.rsp.id[0]; 5600 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5601 } else { 5602 virtaddr = mbox->sge_array->addr[0]; 5603 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5604 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5605 id_array = &n_rsrc->id; 5606 } 5607 5608 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5609 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5610 5611 /* 5612 * Based on the resource size and count, correct the base and max 5613 * resource values. 5614 */ 5615 length = sizeof(struct lpfc_rsrc_blks); 5616 switch (type) { 5617 case LPFC_RSC_TYPE_FCOE_RPI: 5618 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5619 sizeof(unsigned long), 5620 GFP_KERNEL); 5621 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5622 rc = -ENOMEM; 5623 goto err_exit; 5624 } 5625 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt * 5626 sizeof(uint16_t), 5627 GFP_KERNEL); 5628 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5629 kfree(phba->sli4_hba.rpi_bmask); 5630 rc = -ENOMEM; 5631 goto err_exit; 5632 } 5633 5634 /* 5635 * The next_rpi was initialized with the maximum available 5636 * count but the port may allocate a smaller number. Catch 5637 * that case and update the next_rpi. 5638 */ 5639 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5640 5641 /* Initialize local ptrs for common extent processing later. */ 5642 bmask = phba->sli4_hba.rpi_bmask; 5643 ids = phba->sli4_hba.rpi_ids; 5644 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5645 break; 5646 case LPFC_RSC_TYPE_FCOE_VPI: 5647 phba->vpi_bmask = kzalloc(longs * 5648 sizeof(unsigned long), 5649 GFP_KERNEL); 5650 if (unlikely(!phba->vpi_bmask)) { 5651 rc = -ENOMEM; 5652 goto err_exit; 5653 } 5654 phba->vpi_ids = kzalloc(rsrc_id_cnt * 5655 sizeof(uint16_t), 5656 GFP_KERNEL); 5657 if (unlikely(!phba->vpi_ids)) { 5658 kfree(phba->vpi_bmask); 5659 rc = -ENOMEM; 5660 goto err_exit; 5661 } 5662 5663 /* Initialize local ptrs for common extent processing later. */ 5664 bmask = phba->vpi_bmask; 5665 ids = phba->vpi_ids; 5666 ext_blk_list = &phba->lpfc_vpi_blk_list; 5667 break; 5668 case LPFC_RSC_TYPE_FCOE_XRI: 5669 phba->sli4_hba.xri_bmask = kzalloc(longs * 5670 sizeof(unsigned long), 5671 GFP_KERNEL); 5672 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5673 rc = -ENOMEM; 5674 goto err_exit; 5675 } 5676 phba->sli4_hba.max_cfg_param.xri_used = 0; 5677 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * 5678 sizeof(uint16_t), 5679 GFP_KERNEL); 5680 if (unlikely(!phba->sli4_hba.xri_ids)) { 5681 kfree(phba->sli4_hba.xri_bmask); 5682 rc = -ENOMEM; 5683 goto err_exit; 5684 } 5685 5686 /* Initialize local ptrs for common extent processing later. */ 5687 bmask = phba->sli4_hba.xri_bmask; 5688 ids = phba->sli4_hba.xri_ids; 5689 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5690 break; 5691 case LPFC_RSC_TYPE_FCOE_VFI: 5692 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5693 sizeof(unsigned long), 5694 GFP_KERNEL); 5695 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5696 rc = -ENOMEM; 5697 goto err_exit; 5698 } 5699 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt * 5700 sizeof(uint16_t), 5701 GFP_KERNEL); 5702 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5703 kfree(phba->sli4_hba.vfi_bmask); 5704 rc = -ENOMEM; 5705 goto err_exit; 5706 } 5707 5708 /* Initialize local ptrs for common extent processing later. */ 5709 bmask = phba->sli4_hba.vfi_bmask; 5710 ids = phba->sli4_hba.vfi_ids; 5711 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5712 break; 5713 default: 5714 /* Unsupported Opcode. Fail call. */ 5715 id_array = NULL; 5716 bmask = NULL; 5717 ids = NULL; 5718 ext_blk_list = NULL; 5719 goto err_exit; 5720 } 5721 5722 /* 5723 * Complete initializing the extent configuration with the 5724 * allocated ids assigned to this function. The bitmask serves 5725 * as an index into the array and manages the available ids. The 5726 * array just stores the ids communicated to the port via the wqes. 5727 */ 5728 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5729 if ((i % 2) == 0) 5730 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5731 &id_array[k]); 5732 else 5733 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5734 &id_array[k]); 5735 5736 rsrc_blks = kzalloc(length, GFP_KERNEL); 5737 if (unlikely(!rsrc_blks)) { 5738 rc = -ENOMEM; 5739 kfree(bmask); 5740 kfree(ids); 5741 goto err_exit; 5742 } 5743 rsrc_blks->rsrc_start = rsrc_id; 5744 rsrc_blks->rsrc_size = rsrc_size; 5745 list_add_tail(&rsrc_blks->list, ext_blk_list); 5746 rsrc_start = rsrc_id; 5747 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { 5748 phba->sli4_hba.scsi_xri_start = rsrc_start + 5749 lpfc_sli4_get_iocb_cnt(phba); 5750 phba->sli4_hba.nvme_xri_start = 5751 phba->sli4_hba.scsi_xri_start + 5752 phba->sli4_hba.scsi_xri_max; 5753 } 5754 5755 while (rsrc_id < (rsrc_start + rsrc_size)) { 5756 ids[j] = rsrc_id; 5757 rsrc_id++; 5758 j++; 5759 } 5760 /* Entire word processed. Get next word.*/ 5761 if ((i % 2) == 1) 5762 k++; 5763 } 5764 err_exit: 5765 lpfc_sli4_mbox_cmd_free(phba, mbox); 5766 return rc; 5767 } 5768 5769 5770 5771 /** 5772 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 5773 * @phba: Pointer to HBA context object. 5774 * @type: the extent's type. 5775 * 5776 * This function deallocates all extents of a particular resource type. 5777 * SLI4 does not allow for deallocating a particular extent range. It 5778 * is the caller's responsibility to release all kernel memory resources. 5779 **/ 5780 static int 5781 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 5782 { 5783 int rc; 5784 uint32_t length, mbox_tmo = 0; 5785 LPFC_MBOXQ_t *mbox; 5786 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 5787 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 5788 5789 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5790 if (!mbox) 5791 return -ENOMEM; 5792 5793 /* 5794 * This function sends an embedded mailbox because it only sends the 5795 * the resource type. All extents of this type are released by the 5796 * port. 5797 */ 5798 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 5799 sizeof(struct lpfc_sli4_cfg_mhdr)); 5800 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5801 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 5802 length, LPFC_SLI4_MBX_EMBED); 5803 5804 /* Send an extents count of 0 - the dealloc doesn't use it. */ 5805 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5806 LPFC_SLI4_MBX_EMBED); 5807 if (unlikely(rc)) { 5808 rc = -EIO; 5809 goto out_free_mbox; 5810 } 5811 if (!phba->sli4_hba.intr_enable) 5812 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5813 else { 5814 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5815 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5816 } 5817 if (unlikely(rc)) { 5818 rc = -EIO; 5819 goto out_free_mbox; 5820 } 5821 5822 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 5823 if (bf_get(lpfc_mbox_hdr_status, 5824 &dealloc_rsrc->header.cfg_shdr.response)) { 5825 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5826 "2919 Failed to release resource extents " 5827 "for type %d - Status 0x%x Add'l Status 0x%x. " 5828 "Resource memory not released.\n", 5829 type, 5830 bf_get(lpfc_mbox_hdr_status, 5831 &dealloc_rsrc->header.cfg_shdr.response), 5832 bf_get(lpfc_mbox_hdr_add_status, 5833 &dealloc_rsrc->header.cfg_shdr.response)); 5834 rc = -EIO; 5835 goto out_free_mbox; 5836 } 5837 5838 /* Release kernel memory resources for the specific type. */ 5839 switch (type) { 5840 case LPFC_RSC_TYPE_FCOE_VPI: 5841 kfree(phba->vpi_bmask); 5842 kfree(phba->vpi_ids); 5843 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5844 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5845 &phba->lpfc_vpi_blk_list, list) { 5846 list_del_init(&rsrc_blk->list); 5847 kfree(rsrc_blk); 5848 } 5849 phba->sli4_hba.max_cfg_param.vpi_used = 0; 5850 break; 5851 case LPFC_RSC_TYPE_FCOE_XRI: 5852 kfree(phba->sli4_hba.xri_bmask); 5853 kfree(phba->sli4_hba.xri_ids); 5854 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5855 &phba->sli4_hba.lpfc_xri_blk_list, list) { 5856 list_del_init(&rsrc_blk->list); 5857 kfree(rsrc_blk); 5858 } 5859 break; 5860 case LPFC_RSC_TYPE_FCOE_VFI: 5861 kfree(phba->sli4_hba.vfi_bmask); 5862 kfree(phba->sli4_hba.vfi_ids); 5863 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5864 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5865 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 5866 list_del_init(&rsrc_blk->list); 5867 kfree(rsrc_blk); 5868 } 5869 break; 5870 case LPFC_RSC_TYPE_FCOE_RPI: 5871 /* RPI bitmask and physical id array are cleaned up earlier. */ 5872 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5873 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 5874 list_del_init(&rsrc_blk->list); 5875 kfree(rsrc_blk); 5876 } 5877 break; 5878 default: 5879 break; 5880 } 5881 5882 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5883 5884 out_free_mbox: 5885 mempool_free(mbox, phba->mbox_mem_pool); 5886 return rc; 5887 } 5888 5889 static void 5890 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, 5891 uint32_t feature) 5892 { 5893 uint32_t len; 5894 5895 len = sizeof(struct lpfc_mbx_set_feature) - 5896 sizeof(struct lpfc_sli4_cfg_mhdr); 5897 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5898 LPFC_MBOX_OPCODE_SET_FEATURES, len, 5899 LPFC_SLI4_MBX_EMBED); 5900 5901 switch (feature) { 5902 case LPFC_SET_UE_RECOVERY: 5903 bf_set(lpfc_mbx_set_feature_UER, 5904 &mbox->u.mqe.un.set_feature, 1); 5905 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; 5906 mbox->u.mqe.un.set_feature.param_len = 8; 5907 break; 5908 case LPFC_SET_MDS_DIAGS: 5909 bf_set(lpfc_mbx_set_feature_mds, 5910 &mbox->u.mqe.un.set_feature, 1); 5911 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 5912 &mbox->u.mqe.un.set_feature, 1); 5913 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 5914 mbox->u.mqe.un.set_feature.param_len = 8; 5915 break; 5916 } 5917 5918 return; 5919 } 5920 5921 /** 5922 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 5923 * @phba: Pointer to HBA context object. 5924 * 5925 * This function allocates all SLI4 resource identifiers. 5926 **/ 5927 int 5928 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 5929 { 5930 int i, rc, error = 0; 5931 uint16_t count, base; 5932 unsigned long longs; 5933 5934 if (!phba->sli4_hba.rpi_hdrs_in_use) 5935 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 5936 if (phba->sli4_hba.extents_in_use) { 5937 /* 5938 * The port supports resource extents. The XRI, VPI, VFI, RPI 5939 * resource extent count must be read and allocated before 5940 * provisioning the resource id arrays. 5941 */ 5942 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5943 LPFC_IDX_RSRC_RDY) { 5944 /* 5945 * Extent-based resources are set - the driver could 5946 * be in a port reset. Figure out if any corrective 5947 * actions need to be taken. 5948 */ 5949 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5950 LPFC_RSC_TYPE_FCOE_VFI); 5951 if (rc != 0) 5952 error++; 5953 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5954 LPFC_RSC_TYPE_FCOE_VPI); 5955 if (rc != 0) 5956 error++; 5957 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5958 LPFC_RSC_TYPE_FCOE_XRI); 5959 if (rc != 0) 5960 error++; 5961 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5962 LPFC_RSC_TYPE_FCOE_RPI); 5963 if (rc != 0) 5964 error++; 5965 5966 /* 5967 * It's possible that the number of resources 5968 * provided to this port instance changed between 5969 * resets. Detect this condition and reallocate 5970 * resources. Otherwise, there is no action. 5971 */ 5972 if (error) { 5973 lpfc_printf_log(phba, KERN_INFO, 5974 LOG_MBOX | LOG_INIT, 5975 "2931 Detected extent resource " 5976 "change. Reallocating all " 5977 "extents.\n"); 5978 rc = lpfc_sli4_dealloc_extent(phba, 5979 LPFC_RSC_TYPE_FCOE_VFI); 5980 rc = lpfc_sli4_dealloc_extent(phba, 5981 LPFC_RSC_TYPE_FCOE_VPI); 5982 rc = lpfc_sli4_dealloc_extent(phba, 5983 LPFC_RSC_TYPE_FCOE_XRI); 5984 rc = lpfc_sli4_dealloc_extent(phba, 5985 LPFC_RSC_TYPE_FCOE_RPI); 5986 } else 5987 return 0; 5988 } 5989 5990 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5991 if (unlikely(rc)) 5992 goto err_exit; 5993 5994 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5995 if (unlikely(rc)) 5996 goto err_exit; 5997 5998 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5999 if (unlikely(rc)) 6000 goto err_exit; 6001 6002 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6003 if (unlikely(rc)) 6004 goto err_exit; 6005 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6006 LPFC_IDX_RSRC_RDY); 6007 return rc; 6008 } else { 6009 /* 6010 * The port does not support resource extents. The XRI, VPI, 6011 * VFI, RPI resource ids were determined from READ_CONFIG. 6012 * Just allocate the bitmasks and provision the resource id 6013 * arrays. If a port reset is active, the resources don't 6014 * need any action - just exit. 6015 */ 6016 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6017 LPFC_IDX_RSRC_RDY) { 6018 lpfc_sli4_dealloc_resource_identifiers(phba); 6019 lpfc_sli4_remove_rpis(phba); 6020 } 6021 /* RPIs. */ 6022 count = phba->sli4_hba.max_cfg_param.max_rpi; 6023 if (count <= 0) { 6024 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6025 "3279 Invalid provisioning of " 6026 "rpi:%d\n", count); 6027 rc = -EINVAL; 6028 goto err_exit; 6029 } 6030 base = phba->sli4_hba.max_cfg_param.rpi_base; 6031 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6032 phba->sli4_hba.rpi_bmask = kzalloc(longs * 6033 sizeof(unsigned long), 6034 GFP_KERNEL); 6035 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 6036 rc = -ENOMEM; 6037 goto err_exit; 6038 } 6039 phba->sli4_hba.rpi_ids = kzalloc(count * 6040 sizeof(uint16_t), 6041 GFP_KERNEL); 6042 if (unlikely(!phba->sli4_hba.rpi_ids)) { 6043 rc = -ENOMEM; 6044 goto free_rpi_bmask; 6045 } 6046 6047 for (i = 0; i < count; i++) 6048 phba->sli4_hba.rpi_ids[i] = base + i; 6049 6050 /* VPIs. */ 6051 count = phba->sli4_hba.max_cfg_param.max_vpi; 6052 if (count <= 0) { 6053 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6054 "3280 Invalid provisioning of " 6055 "vpi:%d\n", count); 6056 rc = -EINVAL; 6057 goto free_rpi_ids; 6058 } 6059 base = phba->sli4_hba.max_cfg_param.vpi_base; 6060 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6061 phba->vpi_bmask = kzalloc(longs * 6062 sizeof(unsigned long), 6063 GFP_KERNEL); 6064 if (unlikely(!phba->vpi_bmask)) { 6065 rc = -ENOMEM; 6066 goto free_rpi_ids; 6067 } 6068 phba->vpi_ids = kzalloc(count * 6069 sizeof(uint16_t), 6070 GFP_KERNEL); 6071 if (unlikely(!phba->vpi_ids)) { 6072 rc = -ENOMEM; 6073 goto free_vpi_bmask; 6074 } 6075 6076 for (i = 0; i < count; i++) 6077 phba->vpi_ids[i] = base + i; 6078 6079 /* XRIs. */ 6080 count = phba->sli4_hba.max_cfg_param.max_xri; 6081 if (count <= 0) { 6082 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6083 "3281 Invalid provisioning of " 6084 "xri:%d\n", count); 6085 rc = -EINVAL; 6086 goto free_vpi_ids; 6087 } 6088 base = phba->sli4_hba.max_cfg_param.xri_base; 6089 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6090 phba->sli4_hba.xri_bmask = kzalloc(longs * 6091 sizeof(unsigned long), 6092 GFP_KERNEL); 6093 if (unlikely(!phba->sli4_hba.xri_bmask)) { 6094 rc = -ENOMEM; 6095 goto free_vpi_ids; 6096 } 6097 phba->sli4_hba.max_cfg_param.xri_used = 0; 6098 phba->sli4_hba.xri_ids = kzalloc(count * 6099 sizeof(uint16_t), 6100 GFP_KERNEL); 6101 if (unlikely(!phba->sli4_hba.xri_ids)) { 6102 rc = -ENOMEM; 6103 goto free_xri_bmask; 6104 } 6105 6106 for (i = 0; i < count; i++) 6107 phba->sli4_hba.xri_ids[i] = base + i; 6108 6109 /* VFIs. */ 6110 count = phba->sli4_hba.max_cfg_param.max_vfi; 6111 if (count <= 0) { 6112 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6113 "3282 Invalid provisioning of " 6114 "vfi:%d\n", count); 6115 rc = -EINVAL; 6116 goto free_xri_ids; 6117 } 6118 base = phba->sli4_hba.max_cfg_param.vfi_base; 6119 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6120 phba->sli4_hba.vfi_bmask = kzalloc(longs * 6121 sizeof(unsigned long), 6122 GFP_KERNEL); 6123 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 6124 rc = -ENOMEM; 6125 goto free_xri_ids; 6126 } 6127 phba->sli4_hba.vfi_ids = kzalloc(count * 6128 sizeof(uint16_t), 6129 GFP_KERNEL); 6130 if (unlikely(!phba->sli4_hba.vfi_ids)) { 6131 rc = -ENOMEM; 6132 goto free_vfi_bmask; 6133 } 6134 6135 for (i = 0; i < count; i++) 6136 phba->sli4_hba.vfi_ids[i] = base + i; 6137 6138 /* 6139 * Mark all resources ready. An HBA reset doesn't need 6140 * to reset the initialization. 6141 */ 6142 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6143 LPFC_IDX_RSRC_RDY); 6144 return 0; 6145 } 6146 6147 free_vfi_bmask: 6148 kfree(phba->sli4_hba.vfi_bmask); 6149 phba->sli4_hba.vfi_bmask = NULL; 6150 free_xri_ids: 6151 kfree(phba->sli4_hba.xri_ids); 6152 phba->sli4_hba.xri_ids = NULL; 6153 free_xri_bmask: 6154 kfree(phba->sli4_hba.xri_bmask); 6155 phba->sli4_hba.xri_bmask = NULL; 6156 free_vpi_ids: 6157 kfree(phba->vpi_ids); 6158 phba->vpi_ids = NULL; 6159 free_vpi_bmask: 6160 kfree(phba->vpi_bmask); 6161 phba->vpi_bmask = NULL; 6162 free_rpi_ids: 6163 kfree(phba->sli4_hba.rpi_ids); 6164 phba->sli4_hba.rpi_ids = NULL; 6165 free_rpi_bmask: 6166 kfree(phba->sli4_hba.rpi_bmask); 6167 phba->sli4_hba.rpi_bmask = NULL; 6168 err_exit: 6169 return rc; 6170 } 6171 6172 /** 6173 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 6174 * @phba: Pointer to HBA context object. 6175 * 6176 * This function allocates the number of elements for the specified 6177 * resource type. 6178 **/ 6179 int 6180 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 6181 { 6182 if (phba->sli4_hba.extents_in_use) { 6183 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6184 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6185 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6186 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6187 } else { 6188 kfree(phba->vpi_bmask); 6189 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6190 kfree(phba->vpi_ids); 6191 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6192 kfree(phba->sli4_hba.xri_bmask); 6193 kfree(phba->sli4_hba.xri_ids); 6194 kfree(phba->sli4_hba.vfi_bmask); 6195 kfree(phba->sli4_hba.vfi_ids); 6196 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6197 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6198 } 6199 6200 return 0; 6201 } 6202 6203 /** 6204 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 6205 * @phba: Pointer to HBA context object. 6206 * @type: The resource extent type. 6207 * @extnt_count: buffer to hold port extent count response 6208 * @extnt_size: buffer to hold port extent size response. 6209 * 6210 * This function calls the port to read the host allocated extents 6211 * for a particular type. 6212 **/ 6213 int 6214 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 6215 uint16_t *extnt_cnt, uint16_t *extnt_size) 6216 { 6217 bool emb; 6218 int rc = 0; 6219 uint16_t curr_blks = 0; 6220 uint32_t req_len, emb_len; 6221 uint32_t alloc_len, mbox_tmo; 6222 struct list_head *blk_list_head; 6223 struct lpfc_rsrc_blks *rsrc_blk; 6224 LPFC_MBOXQ_t *mbox; 6225 void *virtaddr = NULL; 6226 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 6227 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 6228 union lpfc_sli4_cfg_shdr *shdr; 6229 6230 switch (type) { 6231 case LPFC_RSC_TYPE_FCOE_VPI: 6232 blk_list_head = &phba->lpfc_vpi_blk_list; 6233 break; 6234 case LPFC_RSC_TYPE_FCOE_XRI: 6235 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 6236 break; 6237 case LPFC_RSC_TYPE_FCOE_VFI: 6238 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 6239 break; 6240 case LPFC_RSC_TYPE_FCOE_RPI: 6241 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 6242 break; 6243 default: 6244 return -EIO; 6245 } 6246 6247 /* Count the number of extents currently allocatd for this type. */ 6248 list_for_each_entry(rsrc_blk, blk_list_head, list) { 6249 if (curr_blks == 0) { 6250 /* 6251 * The GET_ALLOCATED mailbox does not return the size, 6252 * just the count. The size should be just the size 6253 * stored in the current allocated block and all sizes 6254 * for an extent type are the same so set the return 6255 * value now. 6256 */ 6257 *extnt_size = rsrc_blk->rsrc_size; 6258 } 6259 curr_blks++; 6260 } 6261 6262 /* 6263 * Calculate the size of an embedded mailbox. The uint32_t 6264 * accounts for extents-specific word. 6265 */ 6266 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 6267 sizeof(uint32_t); 6268 6269 /* 6270 * Presume the allocation and response will fit into an embedded 6271 * mailbox. If not true, reconfigure to a non-embedded mailbox. 6272 */ 6273 emb = LPFC_SLI4_MBX_EMBED; 6274 req_len = emb_len; 6275 if (req_len > emb_len) { 6276 req_len = curr_blks * sizeof(uint16_t) + 6277 sizeof(union lpfc_sli4_cfg_shdr) + 6278 sizeof(uint32_t); 6279 emb = LPFC_SLI4_MBX_NEMBED; 6280 } 6281 6282 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6283 if (!mbox) 6284 return -ENOMEM; 6285 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 6286 6287 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6288 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 6289 req_len, emb); 6290 if (alloc_len < req_len) { 6291 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6292 "2983 Allocated DMA memory size (x%x) is " 6293 "less than the requested DMA memory " 6294 "size (x%x)\n", alloc_len, req_len); 6295 rc = -ENOMEM; 6296 goto err_exit; 6297 } 6298 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 6299 if (unlikely(rc)) { 6300 rc = -EIO; 6301 goto err_exit; 6302 } 6303 6304 if (!phba->sli4_hba.intr_enable) 6305 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6306 else { 6307 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6308 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6309 } 6310 6311 if (unlikely(rc)) { 6312 rc = -EIO; 6313 goto err_exit; 6314 } 6315 6316 /* 6317 * Figure out where the response is located. Then get local pointers 6318 * to the response data. The port does not guarantee to respond to 6319 * all extents counts request so update the local variable with the 6320 * allocated count from the port. 6321 */ 6322 if (emb == LPFC_SLI4_MBX_EMBED) { 6323 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 6324 shdr = &rsrc_ext->header.cfg_shdr; 6325 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 6326 } else { 6327 virtaddr = mbox->sge_array->addr[0]; 6328 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 6329 shdr = &n_rsrc->cfg_shdr; 6330 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 6331 } 6332 6333 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 6334 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6335 "2984 Failed to read allocated resources " 6336 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 6337 type, 6338 bf_get(lpfc_mbox_hdr_status, &shdr->response), 6339 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 6340 rc = -EIO; 6341 goto err_exit; 6342 } 6343 err_exit: 6344 lpfc_sli4_mbox_cmd_free(phba, mbox); 6345 return rc; 6346 } 6347 6348 /** 6349 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block 6350 * @phba: pointer to lpfc hba data structure. 6351 * @pring: Pointer to driver SLI ring object. 6352 * @sgl_list: linked link of sgl buffers to post 6353 * @cnt: number of linked list buffers 6354 * 6355 * This routine walks the list of buffers that have been allocated and 6356 * repost them to the port by using SGL block post. This is needed after a 6357 * pci_function_reset/warm_start or start. It attempts to construct blocks 6358 * of buffer sgls which contains contiguous xris and uses the non-embedded 6359 * SGL block post mailbox commands to post them to the port. For single 6360 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 6361 * mailbox command for posting. 6362 * 6363 * Returns: 0 = success, non-zero failure. 6364 **/ 6365 static int 6366 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, 6367 struct list_head *sgl_list, int cnt) 6368 { 6369 struct lpfc_sglq *sglq_entry = NULL; 6370 struct lpfc_sglq *sglq_entry_next = NULL; 6371 struct lpfc_sglq *sglq_entry_first = NULL; 6372 int status, total_cnt; 6373 int post_cnt = 0, num_posted = 0, block_cnt = 0; 6374 int last_xritag = NO_XRI; 6375 LIST_HEAD(prep_sgl_list); 6376 LIST_HEAD(blck_sgl_list); 6377 LIST_HEAD(allc_sgl_list); 6378 LIST_HEAD(post_sgl_list); 6379 LIST_HEAD(free_sgl_list); 6380 6381 spin_lock_irq(&phba->hbalock); 6382 spin_lock(&phba->sli4_hba.sgl_list_lock); 6383 list_splice_init(sgl_list, &allc_sgl_list); 6384 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6385 spin_unlock_irq(&phba->hbalock); 6386 6387 total_cnt = cnt; 6388 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6389 &allc_sgl_list, list) { 6390 list_del_init(&sglq_entry->list); 6391 block_cnt++; 6392 if ((last_xritag != NO_XRI) && 6393 (sglq_entry->sli4_xritag != last_xritag + 1)) { 6394 /* a hole in xri block, form a sgl posting block */ 6395 list_splice_init(&prep_sgl_list, &blck_sgl_list); 6396 post_cnt = block_cnt - 1; 6397 /* prepare list for next posting block */ 6398 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6399 block_cnt = 1; 6400 } else { 6401 /* prepare list for next posting block */ 6402 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6403 /* enough sgls for non-embed sgl mbox command */ 6404 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 6405 list_splice_init(&prep_sgl_list, 6406 &blck_sgl_list); 6407 post_cnt = block_cnt; 6408 block_cnt = 0; 6409 } 6410 } 6411 num_posted++; 6412 6413 /* keep track of last sgl's xritag */ 6414 last_xritag = sglq_entry->sli4_xritag; 6415 6416 /* end of repost sgl list condition for buffers */ 6417 if (num_posted == total_cnt) { 6418 if (post_cnt == 0) { 6419 list_splice_init(&prep_sgl_list, 6420 &blck_sgl_list); 6421 post_cnt = block_cnt; 6422 } else if (block_cnt == 1) { 6423 status = lpfc_sli4_post_sgl(phba, 6424 sglq_entry->phys, 0, 6425 sglq_entry->sli4_xritag); 6426 if (!status) { 6427 /* successful, put sgl to posted list */ 6428 list_add_tail(&sglq_entry->list, 6429 &post_sgl_list); 6430 } else { 6431 /* Failure, put sgl to free list */ 6432 lpfc_printf_log(phba, KERN_WARNING, 6433 LOG_SLI, 6434 "3159 Failed to post " 6435 "sgl, xritag:x%x\n", 6436 sglq_entry->sli4_xritag); 6437 list_add_tail(&sglq_entry->list, 6438 &free_sgl_list); 6439 total_cnt--; 6440 } 6441 } 6442 } 6443 6444 /* continue until a nembed page worth of sgls */ 6445 if (post_cnt == 0) 6446 continue; 6447 6448 /* post the buffer list sgls as a block */ 6449 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, 6450 post_cnt); 6451 6452 if (!status) { 6453 /* success, put sgl list to posted sgl list */ 6454 list_splice_init(&blck_sgl_list, &post_sgl_list); 6455 } else { 6456 /* Failure, put sgl list to free sgl list */ 6457 sglq_entry_first = list_first_entry(&blck_sgl_list, 6458 struct lpfc_sglq, 6459 list); 6460 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6461 "3160 Failed to post sgl-list, " 6462 "xritag:x%x-x%x\n", 6463 sglq_entry_first->sli4_xritag, 6464 (sglq_entry_first->sli4_xritag + 6465 post_cnt - 1)); 6466 list_splice_init(&blck_sgl_list, &free_sgl_list); 6467 total_cnt -= post_cnt; 6468 } 6469 6470 /* don't reset xirtag due to hole in xri block */ 6471 if (block_cnt == 0) 6472 last_xritag = NO_XRI; 6473 6474 /* reset sgl post count for next round of posting */ 6475 post_cnt = 0; 6476 } 6477 6478 /* free the sgls failed to post */ 6479 lpfc_free_sgl_list(phba, &free_sgl_list); 6480 6481 /* push sgls posted to the available list */ 6482 if (!list_empty(&post_sgl_list)) { 6483 spin_lock_irq(&phba->hbalock); 6484 spin_lock(&phba->sli4_hba.sgl_list_lock); 6485 list_splice_init(&post_sgl_list, sgl_list); 6486 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6487 spin_unlock_irq(&phba->hbalock); 6488 } else { 6489 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6490 "3161 Failure to post sgl to port.\n"); 6491 return -EIO; 6492 } 6493 6494 /* return the number of XRIs actually posted */ 6495 return total_cnt; 6496 } 6497 6498 void 6499 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 6500 { 6501 uint32_t len; 6502 6503 len = sizeof(struct lpfc_mbx_set_host_data) - 6504 sizeof(struct lpfc_sli4_cfg_mhdr); 6505 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6506 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 6507 LPFC_SLI4_MBX_EMBED); 6508 6509 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; 6510 mbox->u.mqe.un.set_host_data.param_len = 6511 LPFC_HOST_OS_DRIVER_VERSION_SIZE; 6512 snprintf(mbox->u.mqe.un.set_host_data.data, 6513 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 6514 "Linux %s v"LPFC_DRIVER_VERSION, 6515 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 6516 } 6517 6518 int 6519 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, 6520 struct lpfc_queue *drq, int count, int idx) 6521 { 6522 int rc, i; 6523 struct lpfc_rqe hrqe; 6524 struct lpfc_rqe drqe; 6525 struct lpfc_rqb *rqbp; 6526 struct rqb_dmabuf *rqb_buffer; 6527 LIST_HEAD(rqb_buf_list); 6528 6529 rqbp = hrq->rqbp; 6530 for (i = 0; i < count; i++) { 6531 /* IF RQ is already full, don't bother */ 6532 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) 6533 break; 6534 rqb_buffer = rqbp->rqb_alloc_buffer(phba); 6535 if (!rqb_buffer) 6536 break; 6537 rqb_buffer->hrq = hrq; 6538 rqb_buffer->drq = drq; 6539 rqb_buffer->idx = idx; 6540 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); 6541 } 6542 while (!list_empty(&rqb_buf_list)) { 6543 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, 6544 hbuf.list); 6545 6546 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); 6547 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); 6548 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); 6549 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); 6550 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 6551 if (rc < 0) { 6552 rqbp->rqb_free_buffer(phba, rqb_buffer); 6553 } else { 6554 list_add_tail(&rqb_buffer->hbuf.list, 6555 &rqbp->rqb_buffer_list); 6556 rqbp->buffer_count++; 6557 } 6558 } 6559 return 1; 6560 } 6561 6562 /** 6563 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 6564 * @phba: Pointer to HBA context object. 6565 * 6566 * This function is the main SLI4 device initialization PCI function. This 6567 * function is called by the HBA initialization code, HBA reset code and 6568 * HBA error attention handler code. Caller is not required to hold any 6569 * locks. 6570 **/ 6571 int 6572 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 6573 { 6574 int rc, i, cnt; 6575 LPFC_MBOXQ_t *mboxq; 6576 struct lpfc_mqe *mqe; 6577 uint8_t *vpd; 6578 uint32_t vpd_size; 6579 uint32_t ftr_rsp = 0; 6580 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 6581 struct lpfc_vport *vport = phba->pport; 6582 struct lpfc_dmabuf *mp; 6583 struct lpfc_rqb *rqbp; 6584 6585 /* Perform a PCI function reset to start from clean */ 6586 rc = lpfc_pci_function_reset(phba); 6587 if (unlikely(rc)) 6588 return -ENODEV; 6589 6590 /* Check the HBA Host Status Register for readyness */ 6591 rc = lpfc_sli4_post_status_check(phba); 6592 if (unlikely(rc)) 6593 return -ENODEV; 6594 else { 6595 spin_lock_irq(&phba->hbalock); 6596 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 6597 spin_unlock_irq(&phba->hbalock); 6598 } 6599 6600 /* 6601 * Allocate a single mailbox container for initializing the 6602 * port. 6603 */ 6604 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6605 if (!mboxq) 6606 return -ENOMEM; 6607 6608 /* Issue READ_REV to collect vpd and FW information. */ 6609 vpd_size = SLI4_PAGE_SIZE; 6610 vpd = kzalloc(vpd_size, GFP_KERNEL); 6611 if (!vpd) { 6612 rc = -ENOMEM; 6613 goto out_free_mbox; 6614 } 6615 6616 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 6617 if (unlikely(rc)) { 6618 kfree(vpd); 6619 goto out_free_mbox; 6620 } 6621 6622 mqe = &mboxq->u.mqe; 6623 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 6624 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 6625 phba->hba_flag |= HBA_FCOE_MODE; 6626 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 6627 } else { 6628 phba->hba_flag &= ~HBA_FCOE_MODE; 6629 } 6630 6631 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 6632 LPFC_DCBX_CEE_MODE) 6633 phba->hba_flag |= HBA_FIP_SUPPORT; 6634 else 6635 phba->hba_flag &= ~HBA_FIP_SUPPORT; 6636 6637 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; 6638 6639 if (phba->sli_rev != LPFC_SLI_REV4) { 6640 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6641 "0376 READ_REV Error. SLI Level %d " 6642 "FCoE enabled %d\n", 6643 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 6644 rc = -EIO; 6645 kfree(vpd); 6646 goto out_free_mbox; 6647 } 6648 6649 /* 6650 * Continue initialization with default values even if driver failed 6651 * to read FCoE param config regions, only read parameters if the 6652 * board is FCoE 6653 */ 6654 if (phba->hba_flag & HBA_FCOE_MODE && 6655 lpfc_sli4_read_fcoe_params(phba)) 6656 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 6657 "2570 Failed to read FCoE parameters\n"); 6658 6659 /* 6660 * Retrieve sli4 device physical port name, failure of doing it 6661 * is considered as non-fatal. 6662 */ 6663 rc = lpfc_sli4_retrieve_pport_name(phba); 6664 if (!rc) 6665 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6666 "3080 Successful retrieving SLI4 device " 6667 "physical port name: %s.\n", phba->Port); 6668 6669 /* 6670 * Evaluate the read rev and vpd data. Populate the driver 6671 * state with the results. If this routine fails, the failure 6672 * is not fatal as the driver will use generic values. 6673 */ 6674 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 6675 if (unlikely(!rc)) { 6676 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6677 "0377 Error %d parsing vpd. " 6678 "Using defaults.\n", rc); 6679 rc = 0; 6680 } 6681 kfree(vpd); 6682 6683 /* Save information as VPD data */ 6684 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 6685 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 6686 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 6687 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 6688 &mqe->un.read_rev); 6689 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 6690 &mqe->un.read_rev); 6691 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 6692 &mqe->un.read_rev); 6693 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 6694 &mqe->un.read_rev); 6695 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 6696 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 6697 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 6698 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 6699 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 6700 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 6701 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6702 "(%d):0380 READ_REV Status x%x " 6703 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 6704 mboxq->vport ? mboxq->vport->vpi : 0, 6705 bf_get(lpfc_mqe_status, mqe), 6706 phba->vpd.rev.opFwName, 6707 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 6708 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 6709 6710 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 6711 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); 6712 if (phba->pport->cfg_lun_queue_depth > rc) { 6713 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6714 "3362 LUN queue depth changed from %d to %d\n", 6715 phba->pport->cfg_lun_queue_depth, rc); 6716 phba->pport->cfg_lun_queue_depth = rc; 6717 } 6718 6719 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6720 LPFC_SLI_INTF_IF_TYPE_0) { 6721 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); 6722 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6723 if (rc == MBX_SUCCESS) { 6724 phba->hba_flag |= HBA_RECOVERABLE_UE; 6725 /* Set 1Sec interval to detect UE */ 6726 phba->eratt_poll_interval = 1; 6727 phba->sli4_hba.ue_to_sr = bf_get( 6728 lpfc_mbx_set_feature_UESR, 6729 &mboxq->u.mqe.un.set_feature); 6730 phba->sli4_hba.ue_to_rp = bf_get( 6731 lpfc_mbx_set_feature_UERP, 6732 &mboxq->u.mqe.un.set_feature); 6733 } 6734 } 6735 6736 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { 6737 /* Enable MDS Diagnostics only if the SLI Port supports it */ 6738 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); 6739 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6740 if (rc != MBX_SUCCESS) 6741 phba->mds_diags_support = 0; 6742 } 6743 6744 /* 6745 * Discover the port's supported feature set and match it against the 6746 * hosts requests. 6747 */ 6748 lpfc_request_features(phba, mboxq); 6749 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6750 if (unlikely(rc)) { 6751 rc = -EIO; 6752 goto out_free_mbox; 6753 } 6754 6755 /* 6756 * The port must support FCP initiator mode as this is the 6757 * only mode running in the host. 6758 */ 6759 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 6760 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6761 "0378 No support for fcpi mode.\n"); 6762 ftr_rsp++; 6763 } 6764 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 6765 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 6766 else 6767 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 6768 /* 6769 * If the port cannot support the host's requested features 6770 * then turn off the global config parameters to disable the 6771 * feature in the driver. This is not a fatal error. 6772 */ 6773 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 6774 if (phba->cfg_enable_bg) { 6775 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)) 6776 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 6777 else 6778 ftr_rsp++; 6779 } 6780 6781 if (phba->max_vpi && phba->cfg_enable_npiv && 6782 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6783 ftr_rsp++; 6784 6785 if (ftr_rsp) { 6786 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6787 "0379 Feature Mismatch Data: x%08x %08x " 6788 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 6789 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 6790 phba->cfg_enable_npiv, phba->max_vpi); 6791 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 6792 phba->cfg_enable_bg = 0; 6793 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6794 phba->cfg_enable_npiv = 0; 6795 } 6796 6797 /* These SLI3 features are assumed in SLI4 */ 6798 spin_lock_irq(&phba->hbalock); 6799 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 6800 spin_unlock_irq(&phba->hbalock); 6801 6802 /* 6803 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 6804 * calls depends on these resources to complete port setup. 6805 */ 6806 rc = lpfc_sli4_alloc_resource_identifiers(phba); 6807 if (rc) { 6808 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6809 "2920 Failed to alloc Resource IDs " 6810 "rc = x%x\n", rc); 6811 goto out_free_mbox; 6812 } 6813 6814 lpfc_set_host_data(phba, mboxq); 6815 6816 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6817 if (rc) { 6818 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6819 "2134 Failed to set host os driver version %x", 6820 rc); 6821 } 6822 6823 /* Read the port's service parameters. */ 6824 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 6825 if (rc) { 6826 phba->link_state = LPFC_HBA_ERROR; 6827 rc = -ENOMEM; 6828 goto out_free_mbox; 6829 } 6830 6831 mboxq->vport = vport; 6832 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6833 mp = (struct lpfc_dmabuf *) mboxq->context1; 6834 if (rc == MBX_SUCCESS) { 6835 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 6836 rc = 0; 6837 } 6838 6839 /* 6840 * This memory was allocated by the lpfc_read_sparam routine. Release 6841 * it to the mbuf pool. 6842 */ 6843 lpfc_mbuf_free(phba, mp->virt, mp->phys); 6844 kfree(mp); 6845 mboxq->context1 = NULL; 6846 if (unlikely(rc)) { 6847 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6848 "0382 READ_SPARAM command failed " 6849 "status %d, mbxStatus x%x\n", 6850 rc, bf_get(lpfc_mqe_status, mqe)); 6851 phba->link_state = LPFC_HBA_ERROR; 6852 rc = -EIO; 6853 goto out_free_mbox; 6854 } 6855 6856 lpfc_update_vport_wwn(vport); 6857 6858 /* Update the fc_host data structures with new wwn. */ 6859 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 6860 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 6861 6862 /* Create all the SLI4 queues */ 6863 rc = lpfc_sli4_queue_create(phba); 6864 if (rc) { 6865 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6866 "3089 Failed to allocate queues\n"); 6867 rc = -ENODEV; 6868 goto out_free_mbox; 6869 } 6870 /* Set up all the queues to the device */ 6871 rc = lpfc_sli4_queue_setup(phba); 6872 if (unlikely(rc)) { 6873 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6874 "0381 Error %d during queue setup.\n ", rc); 6875 goto out_stop_timers; 6876 } 6877 /* Initialize the driver internal SLI layer lists. */ 6878 lpfc_sli4_setup(phba); 6879 lpfc_sli4_queue_init(phba); 6880 6881 /* update host els xri-sgl sizes and mappings */ 6882 rc = lpfc_sli4_els_sgl_update(phba); 6883 if (unlikely(rc)) { 6884 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6885 "1400 Failed to update xri-sgl size and " 6886 "mapping: %d\n", rc); 6887 goto out_destroy_queue; 6888 } 6889 6890 /* register the els sgl pool to the port */ 6891 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, 6892 phba->sli4_hba.els_xri_cnt); 6893 if (unlikely(rc < 0)) { 6894 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6895 "0582 Error %d during els sgl post " 6896 "operation\n", rc); 6897 rc = -ENODEV; 6898 goto out_destroy_queue; 6899 } 6900 phba->sli4_hba.els_xri_cnt = rc; 6901 6902 if (phba->nvmet_support) { 6903 /* update host nvmet xri-sgl sizes and mappings */ 6904 rc = lpfc_sli4_nvmet_sgl_update(phba); 6905 if (unlikely(rc)) { 6906 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6907 "6308 Failed to update nvmet-sgl size " 6908 "and mapping: %d\n", rc); 6909 goto out_destroy_queue; 6910 } 6911 6912 /* register the nvmet sgl pool to the port */ 6913 rc = lpfc_sli4_repost_sgl_list( 6914 phba, 6915 &phba->sli4_hba.lpfc_nvmet_sgl_list, 6916 phba->sli4_hba.nvmet_xri_cnt); 6917 if (unlikely(rc < 0)) { 6918 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6919 "3117 Error %d during nvmet " 6920 "sgl post\n", rc); 6921 rc = -ENODEV; 6922 goto out_destroy_queue; 6923 } 6924 phba->sli4_hba.nvmet_xri_cnt = rc; 6925 6926 cnt = phba->cfg_iocb_cnt * 1024; 6927 /* We need 1 iocbq for every SGL, for IO processing */ 6928 cnt += phba->sli4_hba.nvmet_xri_cnt; 6929 /* Initialize and populate the iocb list per host */ 6930 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6931 "2821 initialize iocb list %d total %d\n", 6932 phba->cfg_iocb_cnt, cnt); 6933 rc = lpfc_init_iocb_list(phba, cnt); 6934 if (rc) { 6935 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6936 "1413 Failed to init iocb list.\n"); 6937 goto out_destroy_queue; 6938 } 6939 6940 lpfc_nvmet_create_targetport(phba); 6941 } else { 6942 /* update host scsi xri-sgl sizes and mappings */ 6943 rc = lpfc_sli4_scsi_sgl_update(phba); 6944 if (unlikely(rc)) { 6945 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6946 "6309 Failed to update scsi-sgl size " 6947 "and mapping: %d\n", rc); 6948 goto out_destroy_queue; 6949 } 6950 6951 /* update host nvme xri-sgl sizes and mappings */ 6952 rc = lpfc_sli4_nvme_sgl_update(phba); 6953 if (unlikely(rc)) { 6954 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6955 "6082 Failed to update nvme-sgl size " 6956 "and mapping: %d\n", rc); 6957 goto out_destroy_queue; 6958 } 6959 6960 cnt = phba->cfg_iocb_cnt * 1024; 6961 /* Initialize and populate the iocb list per host */ 6962 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6963 "2820 initialize iocb list %d total %d\n", 6964 phba->cfg_iocb_cnt, cnt); 6965 rc = lpfc_init_iocb_list(phba, cnt); 6966 if (rc) { 6967 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6968 "6301 Failed to init iocb list.\n"); 6969 goto out_destroy_queue; 6970 } 6971 } 6972 6973 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 6974 /* Post initial buffers to all RQs created */ 6975 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 6976 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 6977 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 6978 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 6979 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 6980 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; 6981 rqbp->buffer_count = 0; 6982 6983 lpfc_post_rq_buffer( 6984 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 6985 phba->sli4_hba.nvmet_mrq_data[i], 6986 LPFC_NVMET_RQE_DEF_COUNT, i); 6987 } 6988 } 6989 6990 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 6991 /* register the allocated scsi sgl pool to the port */ 6992 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 6993 if (unlikely(rc)) { 6994 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6995 "0383 Error %d during scsi sgl post " 6996 "operation\n", rc); 6997 /* Some Scsi buffers were moved to abort scsi list */ 6998 /* A pci function reset will repost them */ 6999 rc = -ENODEV; 7000 goto out_destroy_queue; 7001 } 7002 } 7003 7004 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 7005 (phba->nvmet_support == 0)) { 7006 7007 /* register the allocated nvme sgl pool to the port */ 7008 rc = lpfc_repost_nvme_sgl_list(phba); 7009 if (unlikely(rc)) { 7010 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7011 "6116 Error %d during nvme sgl post " 7012 "operation\n", rc); 7013 /* Some NVME buffers were moved to abort nvme list */ 7014 /* A pci function reset will repost them */ 7015 rc = -ENODEV; 7016 goto out_destroy_queue; 7017 } 7018 } 7019 7020 /* Post the rpi header region to the device. */ 7021 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 7022 if (unlikely(rc)) { 7023 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7024 "0393 Error %d during rpi post operation\n", 7025 rc); 7026 rc = -ENODEV; 7027 goto out_destroy_queue; 7028 } 7029 lpfc_sli4_node_prep(phba); 7030 7031 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 7032 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { 7033 /* 7034 * The FC Port needs to register FCFI (index 0) 7035 */ 7036 lpfc_reg_fcfi(phba, mboxq); 7037 mboxq->vport = phba->pport; 7038 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7039 if (rc != MBX_SUCCESS) 7040 goto out_unset_queue; 7041 rc = 0; 7042 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 7043 &mboxq->u.mqe.un.reg_fcfi); 7044 } else { 7045 /* We are a NVME Target mode with MRQ > 1 */ 7046 7047 /* First register the FCFI */ 7048 lpfc_reg_fcfi_mrq(phba, mboxq, 0); 7049 mboxq->vport = phba->pport; 7050 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7051 if (rc != MBX_SUCCESS) 7052 goto out_unset_queue; 7053 rc = 0; 7054 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, 7055 &mboxq->u.mqe.un.reg_fcfi_mrq); 7056 7057 /* Next register the MRQs */ 7058 lpfc_reg_fcfi_mrq(phba, mboxq, 1); 7059 mboxq->vport = phba->pport; 7060 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7061 if (rc != MBX_SUCCESS) 7062 goto out_unset_queue; 7063 rc = 0; 7064 } 7065 /* Check if the port is configured to be disabled */ 7066 lpfc_sli_read_link_ste(phba); 7067 } 7068 7069 /* Arm the CQs and then EQs on device */ 7070 lpfc_sli4_arm_cqeq_intr(phba); 7071 7072 /* Indicate device interrupt mode */ 7073 phba->sli4_hba.intr_enable = 1; 7074 7075 /* Allow asynchronous mailbox command to go through */ 7076 spin_lock_irq(&phba->hbalock); 7077 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7078 spin_unlock_irq(&phba->hbalock); 7079 7080 /* Post receive buffers to the device */ 7081 lpfc_sli4_rb_setup(phba); 7082 7083 /* Reset HBA FCF states after HBA reset */ 7084 phba->fcf.fcf_flag = 0; 7085 phba->fcf.current_rec.flag = 0; 7086 7087 /* Start the ELS watchdog timer */ 7088 mod_timer(&vport->els_tmofunc, 7089 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 7090 7091 /* Start heart beat timer */ 7092 mod_timer(&phba->hb_tmofunc, 7093 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 7094 phba->hb_outstanding = 0; 7095 phba->last_completion_time = jiffies; 7096 7097 /* Start error attention (ERATT) polling timer */ 7098 mod_timer(&phba->eratt_poll, 7099 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 7100 7101 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 7102 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 7103 rc = pci_enable_pcie_error_reporting(phba->pcidev); 7104 if (!rc) { 7105 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7106 "2829 This device supports " 7107 "Advanced Error Reporting (AER)\n"); 7108 spin_lock_irq(&phba->hbalock); 7109 phba->hba_flag |= HBA_AER_ENABLED; 7110 spin_unlock_irq(&phba->hbalock); 7111 } else { 7112 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7113 "2830 This device does not support " 7114 "Advanced Error Reporting (AER)\n"); 7115 phba->cfg_aer_support = 0; 7116 } 7117 rc = 0; 7118 } 7119 7120 /* 7121 * The port is ready, set the host's link state to LINK_DOWN 7122 * in preparation for link interrupts. 7123 */ 7124 spin_lock_irq(&phba->hbalock); 7125 phba->link_state = LPFC_LINK_DOWN; 7126 spin_unlock_irq(&phba->hbalock); 7127 if (!(phba->hba_flag & HBA_FCOE_MODE) && 7128 (phba->hba_flag & LINK_DISABLED)) { 7129 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7130 "3103 Adapter Link is disabled.\n"); 7131 lpfc_down_link(phba, mboxq); 7132 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7133 if (rc != MBX_SUCCESS) { 7134 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7135 "3104 Adapter failed to issue " 7136 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 7137 goto out_unset_queue; 7138 } 7139 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 7140 /* don't perform init_link on SLI4 FC port loopback test */ 7141 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 7142 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 7143 if (rc) 7144 goto out_unset_queue; 7145 } 7146 } 7147 mempool_free(mboxq, phba->mbox_mem_pool); 7148 return rc; 7149 out_unset_queue: 7150 /* Unset all the queues set up in this routine when error out */ 7151 lpfc_sli4_queue_unset(phba); 7152 out_destroy_queue: 7153 lpfc_free_iocb_list(phba); 7154 lpfc_sli4_queue_destroy(phba); 7155 out_stop_timers: 7156 lpfc_stop_hba_timers(phba); 7157 out_free_mbox: 7158 mempool_free(mboxq, phba->mbox_mem_pool); 7159 return rc; 7160 } 7161 7162 /** 7163 * lpfc_mbox_timeout - Timeout call back function for mbox timer 7164 * @ptr: context object - pointer to hba structure. 7165 * 7166 * This is the callback function for mailbox timer. The mailbox 7167 * timer is armed when a new mailbox command is issued and the timer 7168 * is deleted when the mailbox complete. The function is called by 7169 * the kernel timer code when a mailbox does not complete within 7170 * expected time. This function wakes up the worker thread to 7171 * process the mailbox timeout and returns. All the processing is 7172 * done by the worker thread function lpfc_mbox_timeout_handler. 7173 **/ 7174 void 7175 lpfc_mbox_timeout(unsigned long ptr) 7176 { 7177 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 7178 unsigned long iflag; 7179 uint32_t tmo_posted; 7180 7181 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 7182 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 7183 if (!tmo_posted) 7184 phba->pport->work_port_events |= WORKER_MBOX_TMO; 7185 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 7186 7187 if (!tmo_posted) 7188 lpfc_worker_wake_up(phba); 7189 return; 7190 } 7191 7192 /** 7193 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 7194 * are pending 7195 * @phba: Pointer to HBA context object. 7196 * 7197 * This function checks if any mailbox completions are present on the mailbox 7198 * completion queue. 7199 **/ 7200 static bool 7201 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 7202 { 7203 7204 uint32_t idx; 7205 struct lpfc_queue *mcq; 7206 struct lpfc_mcqe *mcqe; 7207 bool pending_completions = false; 7208 7209 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7210 return false; 7211 7212 /* Check for completions on mailbox completion queue */ 7213 7214 mcq = phba->sli4_hba.mbx_cq; 7215 idx = mcq->hba_index; 7216 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) { 7217 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; 7218 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 7219 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 7220 pending_completions = true; 7221 break; 7222 } 7223 idx = (idx + 1) % mcq->entry_count; 7224 if (mcq->hba_index == idx) 7225 break; 7226 } 7227 return pending_completions; 7228 7229 } 7230 7231 /** 7232 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 7233 * that were missed. 7234 * @phba: Pointer to HBA context object. 7235 * 7236 * For sli4, it is possible to miss an interrupt. As such mbox completions 7237 * maybe missed causing erroneous mailbox timeouts to occur. This function 7238 * checks to see if mbox completions are on the mailbox completion queue 7239 * and will process all the completions associated with the eq for the 7240 * mailbox completion queue. 7241 **/ 7242 bool 7243 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 7244 { 7245 7246 uint32_t eqidx; 7247 struct lpfc_queue *fpeq = NULL; 7248 struct lpfc_eqe *eqe; 7249 bool mbox_pending; 7250 7251 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7252 return false; 7253 7254 /* Find the eq associated with the mcq */ 7255 7256 if (phba->sli4_hba.hba_eq) 7257 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) 7258 if (phba->sli4_hba.hba_eq[eqidx]->queue_id == 7259 phba->sli4_hba.mbx_cq->assoc_qid) { 7260 fpeq = phba->sli4_hba.hba_eq[eqidx]; 7261 break; 7262 } 7263 if (!fpeq) 7264 return false; 7265 7266 /* Turn off interrupts from this EQ */ 7267 7268 lpfc_sli4_eq_clr_intr(fpeq); 7269 7270 /* Check to see if a mbox completion is pending */ 7271 7272 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 7273 7274 /* 7275 * If a mbox completion is pending, process all the events on EQ 7276 * associated with the mbox completion queue (this could include 7277 * mailbox commands, async events, els commands, receive queue data 7278 * and fcp commands) 7279 */ 7280 7281 if (mbox_pending) 7282 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 7283 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); 7284 fpeq->EQ_processed++; 7285 } 7286 7287 /* Always clear and re-arm the EQ */ 7288 7289 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 7290 7291 return mbox_pending; 7292 7293 } 7294 7295 /** 7296 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 7297 * @phba: Pointer to HBA context object. 7298 * 7299 * This function is called from worker thread when a mailbox command times out. 7300 * The caller is not required to hold any locks. This function will reset the 7301 * HBA and recover all the pending commands. 7302 **/ 7303 void 7304 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 7305 { 7306 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 7307 MAILBOX_t *mb = NULL; 7308 7309 struct lpfc_sli *psli = &phba->sli; 7310 7311 /* If the mailbox completed, process the completion and return */ 7312 if (lpfc_sli4_process_missed_mbox_completions(phba)) 7313 return; 7314 7315 if (pmbox != NULL) 7316 mb = &pmbox->u.mb; 7317 /* Check the pmbox pointer first. There is a race condition 7318 * between the mbox timeout handler getting executed in the 7319 * worklist and the mailbox actually completing. When this 7320 * race condition occurs, the mbox_active will be NULL. 7321 */ 7322 spin_lock_irq(&phba->hbalock); 7323 if (pmbox == NULL) { 7324 lpfc_printf_log(phba, KERN_WARNING, 7325 LOG_MBOX | LOG_SLI, 7326 "0353 Active Mailbox cleared - mailbox timeout " 7327 "exiting\n"); 7328 spin_unlock_irq(&phba->hbalock); 7329 return; 7330 } 7331 7332 /* Mbox cmd <mbxCommand> timeout */ 7333 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7334 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 7335 mb->mbxCommand, 7336 phba->pport->port_state, 7337 phba->sli.sli_flag, 7338 phba->sli.mbox_active); 7339 spin_unlock_irq(&phba->hbalock); 7340 7341 /* Setting state unknown so lpfc_sli_abort_iocb_ring 7342 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 7343 * it to fail all outstanding SCSI IO. 7344 */ 7345 spin_lock_irq(&phba->pport->work_port_lock); 7346 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 7347 spin_unlock_irq(&phba->pport->work_port_lock); 7348 spin_lock_irq(&phba->hbalock); 7349 phba->link_state = LPFC_LINK_UNKNOWN; 7350 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7351 spin_unlock_irq(&phba->hbalock); 7352 7353 lpfc_sli_abort_fcp_rings(phba); 7354 7355 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7356 "0345 Resetting board due to mailbox timeout\n"); 7357 7358 /* Reset the HBA device */ 7359 lpfc_reset_hba(phba); 7360 } 7361 7362 /** 7363 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 7364 * @phba: Pointer to HBA context object. 7365 * @pmbox: Pointer to mailbox object. 7366 * @flag: Flag indicating how the mailbox need to be processed. 7367 * 7368 * This function is called by discovery code and HBA management code 7369 * to submit a mailbox command to firmware with SLI-3 interface spec. This 7370 * function gets the hbalock to protect the data structures. 7371 * The mailbox command can be submitted in polling mode, in which case 7372 * this function will wait in a polling loop for the completion of the 7373 * mailbox. 7374 * If the mailbox is submitted in no_wait mode (not polling) the 7375 * function will submit the command and returns immediately without waiting 7376 * for the mailbox completion. The no_wait is supported only when HBA 7377 * is in SLI2/SLI3 mode - interrupts are enabled. 7378 * The SLI interface allows only one mailbox pending at a time. If the 7379 * mailbox is issued in polling mode and there is already a mailbox 7380 * pending, then the function will return an error. If the mailbox is issued 7381 * in NO_WAIT mode and there is a mailbox pending already, the function 7382 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 7383 * The sli layer owns the mailbox object until the completion of mailbox 7384 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 7385 * return codes the caller owns the mailbox command after the return of 7386 * the function. 7387 **/ 7388 static int 7389 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 7390 uint32_t flag) 7391 { 7392 MAILBOX_t *mbx; 7393 struct lpfc_sli *psli = &phba->sli; 7394 uint32_t status, evtctr; 7395 uint32_t ha_copy, hc_copy; 7396 int i; 7397 unsigned long timeout; 7398 unsigned long drvr_flag = 0; 7399 uint32_t word0, ldata; 7400 void __iomem *to_slim; 7401 int processing_queue = 0; 7402 7403 spin_lock_irqsave(&phba->hbalock, drvr_flag); 7404 if (!pmbox) { 7405 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7406 /* processing mbox queue from intr_handler */ 7407 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7408 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7409 return MBX_SUCCESS; 7410 } 7411 processing_queue = 1; 7412 pmbox = lpfc_mbox_get(phba); 7413 if (!pmbox) { 7414 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7415 return MBX_SUCCESS; 7416 } 7417 } 7418 7419 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 7420 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 7421 if(!pmbox->vport) { 7422 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7423 lpfc_printf_log(phba, KERN_ERR, 7424 LOG_MBOX | LOG_VPORT, 7425 "1806 Mbox x%x failed. No vport\n", 7426 pmbox->u.mb.mbxCommand); 7427 dump_stack(); 7428 goto out_not_finished; 7429 } 7430 } 7431 7432 /* If the PCI channel is in offline state, do not post mbox. */ 7433 if (unlikely(pci_channel_offline(phba->pcidev))) { 7434 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7435 goto out_not_finished; 7436 } 7437 7438 /* If HBA has a deferred error attention, fail the iocb. */ 7439 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7440 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7441 goto out_not_finished; 7442 } 7443 7444 psli = &phba->sli; 7445 7446 mbx = &pmbox->u.mb; 7447 status = MBX_SUCCESS; 7448 7449 if (phba->link_state == LPFC_HBA_ERROR) { 7450 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7451 7452 /* Mbox command <mbxCommand> cannot issue */ 7453 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7454 "(%d):0311 Mailbox command x%x cannot " 7455 "issue Data: x%x x%x\n", 7456 pmbox->vport ? pmbox->vport->vpi : 0, 7457 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 7458 goto out_not_finished; 7459 } 7460 7461 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 7462 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 7463 !(hc_copy & HC_MBINT_ENA)) { 7464 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7465 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7466 "(%d):2528 Mailbox command x%x cannot " 7467 "issue Data: x%x x%x\n", 7468 pmbox->vport ? pmbox->vport->vpi : 0, 7469 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 7470 goto out_not_finished; 7471 } 7472 } 7473 7474 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7475 /* Polling for a mbox command when another one is already active 7476 * is not allowed in SLI. Also, the driver must have established 7477 * SLI2 mode to queue and process multiple mbox commands. 7478 */ 7479 7480 if (flag & MBX_POLL) { 7481 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7482 7483 /* Mbox command <mbxCommand> cannot issue */ 7484 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7485 "(%d):2529 Mailbox command x%x " 7486 "cannot issue Data: x%x x%x\n", 7487 pmbox->vport ? pmbox->vport->vpi : 0, 7488 pmbox->u.mb.mbxCommand, 7489 psli->sli_flag, flag); 7490 goto out_not_finished; 7491 } 7492 7493 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 7494 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7495 /* Mbox command <mbxCommand> cannot issue */ 7496 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7497 "(%d):2530 Mailbox command x%x " 7498 "cannot issue Data: x%x x%x\n", 7499 pmbox->vport ? pmbox->vport->vpi : 0, 7500 pmbox->u.mb.mbxCommand, 7501 psli->sli_flag, flag); 7502 goto out_not_finished; 7503 } 7504 7505 /* Another mailbox command is still being processed, queue this 7506 * command to be processed later. 7507 */ 7508 lpfc_mbox_put(phba, pmbox); 7509 7510 /* Mbox cmd issue - BUSY */ 7511 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7512 "(%d):0308 Mbox cmd issue - BUSY Data: " 7513 "x%x x%x x%x x%x\n", 7514 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 7515 mbx->mbxCommand, phba->pport->port_state, 7516 psli->sli_flag, flag); 7517 7518 psli->slistat.mbox_busy++; 7519 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7520 7521 if (pmbox->vport) { 7522 lpfc_debugfs_disc_trc(pmbox->vport, 7523 LPFC_DISC_TRC_MBOX_VPORT, 7524 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 7525 (uint32_t)mbx->mbxCommand, 7526 mbx->un.varWords[0], mbx->un.varWords[1]); 7527 } 7528 else { 7529 lpfc_debugfs_disc_trc(phba->pport, 7530 LPFC_DISC_TRC_MBOX, 7531 "MBOX Bsy: cmd:x%x mb:x%x x%x", 7532 (uint32_t)mbx->mbxCommand, 7533 mbx->un.varWords[0], mbx->un.varWords[1]); 7534 } 7535 7536 return MBX_BUSY; 7537 } 7538 7539 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7540 7541 /* If we are not polling, we MUST be in SLI2 mode */ 7542 if (flag != MBX_POLL) { 7543 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 7544 (mbx->mbxCommand != MBX_KILL_BOARD)) { 7545 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7546 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7547 /* Mbox command <mbxCommand> cannot issue */ 7548 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7549 "(%d):2531 Mailbox command x%x " 7550 "cannot issue Data: x%x x%x\n", 7551 pmbox->vport ? pmbox->vport->vpi : 0, 7552 pmbox->u.mb.mbxCommand, 7553 psli->sli_flag, flag); 7554 goto out_not_finished; 7555 } 7556 /* timeout active mbox command */ 7557 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 7558 1000); 7559 mod_timer(&psli->mbox_tmo, jiffies + timeout); 7560 } 7561 7562 /* Mailbox cmd <cmd> issue */ 7563 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7564 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 7565 "x%x\n", 7566 pmbox->vport ? pmbox->vport->vpi : 0, 7567 mbx->mbxCommand, phba->pport->port_state, 7568 psli->sli_flag, flag); 7569 7570 if (mbx->mbxCommand != MBX_HEARTBEAT) { 7571 if (pmbox->vport) { 7572 lpfc_debugfs_disc_trc(pmbox->vport, 7573 LPFC_DISC_TRC_MBOX_VPORT, 7574 "MBOX Send vport: cmd:x%x mb:x%x x%x", 7575 (uint32_t)mbx->mbxCommand, 7576 mbx->un.varWords[0], mbx->un.varWords[1]); 7577 } 7578 else { 7579 lpfc_debugfs_disc_trc(phba->pport, 7580 LPFC_DISC_TRC_MBOX, 7581 "MBOX Send: cmd:x%x mb:x%x x%x", 7582 (uint32_t)mbx->mbxCommand, 7583 mbx->un.varWords[0], mbx->un.varWords[1]); 7584 } 7585 } 7586 7587 psli->slistat.mbox_cmd++; 7588 evtctr = psli->slistat.mbox_event; 7589 7590 /* next set own bit for the adapter and copy over command word */ 7591 mbx->mbxOwner = OWN_CHIP; 7592 7593 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7594 /* Populate mbox extension offset word. */ 7595 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 7596 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 7597 = (uint8_t *)phba->mbox_ext 7598 - (uint8_t *)phba->mbox; 7599 } 7600 7601 /* Copy the mailbox extension data */ 7602 if (pmbox->in_ext_byte_len && pmbox->context2) { 7603 lpfc_sli_pcimem_bcopy(pmbox->context2, 7604 (uint8_t *)phba->mbox_ext, 7605 pmbox->in_ext_byte_len); 7606 } 7607 /* Copy command data to host SLIM area */ 7608 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 7609 } else { 7610 /* Populate mbox extension offset word. */ 7611 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 7612 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 7613 = MAILBOX_HBA_EXT_OFFSET; 7614 7615 /* Copy the mailbox extension data */ 7616 if (pmbox->in_ext_byte_len && pmbox->context2) 7617 lpfc_memcpy_to_slim(phba->MBslimaddr + 7618 MAILBOX_HBA_EXT_OFFSET, 7619 pmbox->context2, pmbox->in_ext_byte_len); 7620 7621 if (mbx->mbxCommand == MBX_CONFIG_PORT) 7622 /* copy command data into host mbox for cmpl */ 7623 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, 7624 MAILBOX_CMD_SIZE); 7625 7626 /* First copy mbox command data to HBA SLIM, skip past first 7627 word */ 7628 to_slim = phba->MBslimaddr + sizeof (uint32_t); 7629 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 7630 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 7631 7632 /* Next copy over first word, with mbxOwner set */ 7633 ldata = *((uint32_t *)mbx); 7634 to_slim = phba->MBslimaddr; 7635 writel(ldata, to_slim); 7636 readl(to_slim); /* flush */ 7637 7638 if (mbx->mbxCommand == MBX_CONFIG_PORT) 7639 /* switch over to host mailbox */ 7640 psli->sli_flag |= LPFC_SLI_ACTIVE; 7641 } 7642 7643 wmb(); 7644 7645 switch (flag) { 7646 case MBX_NOWAIT: 7647 /* Set up reference to mailbox command */ 7648 psli->mbox_active = pmbox; 7649 /* Interrupt board to do it */ 7650 writel(CA_MBATT, phba->CAregaddr); 7651 readl(phba->CAregaddr); /* flush */ 7652 /* Don't wait for it to finish, just return */ 7653 break; 7654 7655 case MBX_POLL: 7656 /* Set up null reference to mailbox command */ 7657 psli->mbox_active = NULL; 7658 /* Interrupt board to do it */ 7659 writel(CA_MBATT, phba->CAregaddr); 7660 readl(phba->CAregaddr); /* flush */ 7661 7662 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7663 /* First read mbox status word */ 7664 word0 = *((uint32_t *)phba->mbox); 7665 word0 = le32_to_cpu(word0); 7666 } else { 7667 /* First read mbox status word */ 7668 if (lpfc_readl(phba->MBslimaddr, &word0)) { 7669 spin_unlock_irqrestore(&phba->hbalock, 7670 drvr_flag); 7671 goto out_not_finished; 7672 } 7673 } 7674 7675 /* Read the HBA Host Attention Register */ 7676 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 7677 spin_unlock_irqrestore(&phba->hbalock, 7678 drvr_flag); 7679 goto out_not_finished; 7680 } 7681 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 7682 1000) + jiffies; 7683 i = 0; 7684 /* Wait for command to complete */ 7685 while (((word0 & OWN_CHIP) == OWN_CHIP) || 7686 (!(ha_copy & HA_MBATT) && 7687 (phba->link_state > LPFC_WARM_START))) { 7688 if (time_after(jiffies, timeout)) { 7689 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7690 spin_unlock_irqrestore(&phba->hbalock, 7691 drvr_flag); 7692 goto out_not_finished; 7693 } 7694 7695 /* Check if we took a mbox interrupt while we were 7696 polling */ 7697 if (((word0 & OWN_CHIP) != OWN_CHIP) 7698 && (evtctr != psli->slistat.mbox_event)) 7699 break; 7700 7701 if (i++ > 10) { 7702 spin_unlock_irqrestore(&phba->hbalock, 7703 drvr_flag); 7704 msleep(1); 7705 spin_lock_irqsave(&phba->hbalock, drvr_flag); 7706 } 7707 7708 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7709 /* First copy command data */ 7710 word0 = *((uint32_t *)phba->mbox); 7711 word0 = le32_to_cpu(word0); 7712 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 7713 MAILBOX_t *slimmb; 7714 uint32_t slimword0; 7715 /* Check real SLIM for any errors */ 7716 slimword0 = readl(phba->MBslimaddr); 7717 slimmb = (MAILBOX_t *) & slimword0; 7718 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 7719 && slimmb->mbxStatus) { 7720 psli->sli_flag &= 7721 ~LPFC_SLI_ACTIVE; 7722 word0 = slimword0; 7723 } 7724 } 7725 } else { 7726 /* First copy command data */ 7727 word0 = readl(phba->MBslimaddr); 7728 } 7729 /* Read the HBA Host Attention Register */ 7730 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 7731 spin_unlock_irqrestore(&phba->hbalock, 7732 drvr_flag); 7733 goto out_not_finished; 7734 } 7735 } 7736 7737 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7738 /* copy results back to user */ 7739 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, 7740 MAILBOX_CMD_SIZE); 7741 /* Copy the mailbox extension data */ 7742 if (pmbox->out_ext_byte_len && pmbox->context2) { 7743 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 7744 pmbox->context2, 7745 pmbox->out_ext_byte_len); 7746 } 7747 } else { 7748 /* First copy command data */ 7749 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 7750 MAILBOX_CMD_SIZE); 7751 /* Copy the mailbox extension data */ 7752 if (pmbox->out_ext_byte_len && pmbox->context2) { 7753 lpfc_memcpy_from_slim(pmbox->context2, 7754 phba->MBslimaddr + 7755 MAILBOX_HBA_EXT_OFFSET, 7756 pmbox->out_ext_byte_len); 7757 } 7758 } 7759 7760 writel(HA_MBATT, phba->HAregaddr); 7761 readl(phba->HAregaddr); /* flush */ 7762 7763 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7764 status = mbx->mbxStatus; 7765 } 7766 7767 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7768 return status; 7769 7770 out_not_finished: 7771 if (processing_queue) { 7772 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 7773 lpfc_mbox_cmpl_put(phba, pmbox); 7774 } 7775 return MBX_NOT_FINISHED; 7776 } 7777 7778 /** 7779 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 7780 * @phba: Pointer to HBA context object. 7781 * 7782 * The function blocks the posting of SLI4 asynchronous mailbox commands from 7783 * the driver internal pending mailbox queue. It will then try to wait out the 7784 * possible outstanding mailbox command before return. 7785 * 7786 * Returns: 7787 * 0 - the outstanding mailbox command completed; otherwise, the wait for 7788 * the outstanding mailbox command timed out. 7789 **/ 7790 static int 7791 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 7792 { 7793 struct lpfc_sli *psli = &phba->sli; 7794 int rc = 0; 7795 unsigned long timeout = 0; 7796 7797 /* Mark the asynchronous mailbox command posting as blocked */ 7798 spin_lock_irq(&phba->hbalock); 7799 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7800 /* Determine how long we might wait for the active mailbox 7801 * command to be gracefully completed by firmware. 7802 */ 7803 if (phba->sli.mbox_active) 7804 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 7805 phba->sli.mbox_active) * 7806 1000) + jiffies; 7807 spin_unlock_irq(&phba->hbalock); 7808 7809 /* Make sure the mailbox is really active */ 7810 if (timeout) 7811 lpfc_sli4_process_missed_mbox_completions(phba); 7812 7813 /* Wait for the outstnading mailbox command to complete */ 7814 while (phba->sli.mbox_active) { 7815 /* Check active mailbox complete status every 2ms */ 7816 msleep(2); 7817 if (time_after(jiffies, timeout)) { 7818 /* Timeout, marked the outstanding cmd not complete */ 7819 rc = 1; 7820 break; 7821 } 7822 } 7823 7824 /* Can not cleanly block async mailbox command, fails it */ 7825 if (rc) { 7826 spin_lock_irq(&phba->hbalock); 7827 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7828 spin_unlock_irq(&phba->hbalock); 7829 } 7830 return rc; 7831 } 7832 7833 /** 7834 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 7835 * @phba: Pointer to HBA context object. 7836 * 7837 * The function unblocks and resume posting of SLI4 asynchronous mailbox 7838 * commands from the driver internal pending mailbox queue. It makes sure 7839 * that there is no outstanding mailbox command before resuming posting 7840 * asynchronous mailbox commands. If, for any reason, there is outstanding 7841 * mailbox command, it will try to wait it out before resuming asynchronous 7842 * mailbox command posting. 7843 **/ 7844 static void 7845 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 7846 { 7847 struct lpfc_sli *psli = &phba->sli; 7848 7849 spin_lock_irq(&phba->hbalock); 7850 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7851 /* Asynchronous mailbox posting is not blocked, do nothing */ 7852 spin_unlock_irq(&phba->hbalock); 7853 return; 7854 } 7855 7856 /* Outstanding synchronous mailbox command is guaranteed to be done, 7857 * successful or timeout, after timing-out the outstanding mailbox 7858 * command shall always be removed, so just unblock posting async 7859 * mailbox command and resume 7860 */ 7861 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7862 spin_unlock_irq(&phba->hbalock); 7863 7864 /* wake up worker thread to post asynchronlous mailbox command */ 7865 lpfc_worker_wake_up(phba); 7866 } 7867 7868 /** 7869 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 7870 * @phba: Pointer to HBA context object. 7871 * @mboxq: Pointer to mailbox object. 7872 * 7873 * The function waits for the bootstrap mailbox register ready bit from 7874 * port for twice the regular mailbox command timeout value. 7875 * 7876 * 0 - no timeout on waiting for bootstrap mailbox register ready. 7877 * MBXERR_ERROR - wait for bootstrap mailbox register timed out. 7878 **/ 7879 static int 7880 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7881 { 7882 uint32_t db_ready; 7883 unsigned long timeout; 7884 struct lpfc_register bmbx_reg; 7885 7886 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 7887 * 1000) + jiffies; 7888 7889 do { 7890 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 7891 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 7892 if (!db_ready) 7893 msleep(2); 7894 7895 if (time_after(jiffies, timeout)) 7896 return MBXERR_ERROR; 7897 } while (!db_ready); 7898 7899 return 0; 7900 } 7901 7902 /** 7903 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 7904 * @phba: Pointer to HBA context object. 7905 * @mboxq: Pointer to mailbox object. 7906 * 7907 * The function posts a mailbox to the port. The mailbox is expected 7908 * to be comletely filled in and ready for the port to operate on it. 7909 * This routine executes a synchronous completion operation on the 7910 * mailbox by polling for its completion. 7911 * 7912 * The caller must not be holding any locks when calling this routine. 7913 * 7914 * Returns: 7915 * MBX_SUCCESS - mailbox posted successfully 7916 * Any of the MBX error values. 7917 **/ 7918 static int 7919 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7920 { 7921 int rc = MBX_SUCCESS; 7922 unsigned long iflag; 7923 uint32_t mcqe_status; 7924 uint32_t mbx_cmnd; 7925 struct lpfc_sli *psli = &phba->sli; 7926 struct lpfc_mqe *mb = &mboxq->u.mqe; 7927 struct lpfc_bmbx_create *mbox_rgn; 7928 struct dma_address *dma_address; 7929 7930 /* 7931 * Only one mailbox can be active to the bootstrap mailbox region 7932 * at a time and there is no queueing provided. 7933 */ 7934 spin_lock_irqsave(&phba->hbalock, iflag); 7935 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7936 spin_unlock_irqrestore(&phba->hbalock, iflag); 7937 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7938 "(%d):2532 Mailbox command x%x (x%x/x%x) " 7939 "cannot issue Data: x%x x%x\n", 7940 mboxq->vport ? mboxq->vport->vpi : 0, 7941 mboxq->u.mb.mbxCommand, 7942 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7943 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7944 psli->sli_flag, MBX_POLL); 7945 return MBXERR_ERROR; 7946 } 7947 /* The server grabs the token and owns it until release */ 7948 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7949 phba->sli.mbox_active = mboxq; 7950 spin_unlock_irqrestore(&phba->hbalock, iflag); 7951 7952 /* wait for bootstrap mbox register for readyness */ 7953 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7954 if (rc) 7955 goto exit; 7956 7957 /* 7958 * Initialize the bootstrap memory region to avoid stale data areas 7959 * in the mailbox post. Then copy the caller's mailbox contents to 7960 * the bmbx mailbox region. 7961 */ 7962 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 7963 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 7964 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 7965 sizeof(struct lpfc_mqe)); 7966 7967 /* Post the high mailbox dma address to the port and wait for ready. */ 7968 dma_address = &phba->sli4_hba.bmbx.dma_address; 7969 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 7970 7971 /* wait for bootstrap mbox register for hi-address write done */ 7972 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7973 if (rc) 7974 goto exit; 7975 7976 /* Post the low mailbox dma address to the port. */ 7977 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 7978 7979 /* wait for bootstrap mbox register for low address write done */ 7980 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7981 if (rc) 7982 goto exit; 7983 7984 /* 7985 * Read the CQ to ensure the mailbox has completed. 7986 * If so, update the mailbox status so that the upper layers 7987 * can complete the request normally. 7988 */ 7989 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 7990 sizeof(struct lpfc_mqe)); 7991 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 7992 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 7993 sizeof(struct lpfc_mcqe)); 7994 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 7995 /* 7996 * When the CQE status indicates a failure and the mailbox status 7997 * indicates success then copy the CQE status into the mailbox status 7998 * (and prefix it with x4000). 7999 */ 8000 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 8001 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 8002 bf_set(lpfc_mqe_status, mb, 8003 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 8004 rc = MBXERR_ERROR; 8005 } else 8006 lpfc_sli4_swap_str(phba, mboxq); 8007 8008 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8009 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 8010 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 8011 " x%x x%x CQ: x%x x%x x%x x%x\n", 8012 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8013 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8014 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8015 bf_get(lpfc_mqe_status, mb), 8016 mb->un.mb_words[0], mb->un.mb_words[1], 8017 mb->un.mb_words[2], mb->un.mb_words[3], 8018 mb->un.mb_words[4], mb->un.mb_words[5], 8019 mb->un.mb_words[6], mb->un.mb_words[7], 8020 mb->un.mb_words[8], mb->un.mb_words[9], 8021 mb->un.mb_words[10], mb->un.mb_words[11], 8022 mb->un.mb_words[12], mboxq->mcqe.word0, 8023 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 8024 mboxq->mcqe.trailer); 8025 exit: 8026 /* We are holding the token, no needed for lock when release */ 8027 spin_lock_irqsave(&phba->hbalock, iflag); 8028 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8029 phba->sli.mbox_active = NULL; 8030 spin_unlock_irqrestore(&phba->hbalock, iflag); 8031 return rc; 8032 } 8033 8034 /** 8035 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 8036 * @phba: Pointer to HBA context object. 8037 * @pmbox: Pointer to mailbox object. 8038 * @flag: Flag indicating how the mailbox need to be processed. 8039 * 8040 * This function is called by discovery code and HBA management code to submit 8041 * a mailbox command to firmware with SLI-4 interface spec. 8042 * 8043 * Return codes the caller owns the mailbox command after the return of the 8044 * function. 8045 **/ 8046 static int 8047 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 8048 uint32_t flag) 8049 { 8050 struct lpfc_sli *psli = &phba->sli; 8051 unsigned long iflags; 8052 int rc; 8053 8054 /* dump from issue mailbox command if setup */ 8055 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 8056 8057 rc = lpfc_mbox_dev_check(phba); 8058 if (unlikely(rc)) { 8059 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8060 "(%d):2544 Mailbox command x%x (x%x/x%x) " 8061 "cannot issue Data: x%x x%x\n", 8062 mboxq->vport ? mboxq->vport->vpi : 0, 8063 mboxq->u.mb.mbxCommand, 8064 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8065 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8066 psli->sli_flag, flag); 8067 goto out_not_finished; 8068 } 8069 8070 /* Detect polling mode and jump to a handler */ 8071 if (!phba->sli4_hba.intr_enable) { 8072 if (flag == MBX_POLL) 8073 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8074 else 8075 rc = -EIO; 8076 if (rc != MBX_SUCCESS) 8077 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8078 "(%d):2541 Mailbox command x%x " 8079 "(x%x/x%x) failure: " 8080 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8081 "Data: x%x x%x\n,", 8082 mboxq->vport ? mboxq->vport->vpi : 0, 8083 mboxq->u.mb.mbxCommand, 8084 lpfc_sli_config_mbox_subsys_get(phba, 8085 mboxq), 8086 lpfc_sli_config_mbox_opcode_get(phba, 8087 mboxq), 8088 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8089 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8090 bf_get(lpfc_mcqe_ext_status, 8091 &mboxq->mcqe), 8092 psli->sli_flag, flag); 8093 return rc; 8094 } else if (flag == MBX_POLL) { 8095 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8096 "(%d):2542 Try to issue mailbox command " 8097 "x%x (x%x/x%x) synchronously ahead of async" 8098 "mailbox command queue: x%x x%x\n", 8099 mboxq->vport ? mboxq->vport->vpi : 0, 8100 mboxq->u.mb.mbxCommand, 8101 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8102 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8103 psli->sli_flag, flag); 8104 /* Try to block the asynchronous mailbox posting */ 8105 rc = lpfc_sli4_async_mbox_block(phba); 8106 if (!rc) { 8107 /* Successfully blocked, now issue sync mbox cmd */ 8108 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8109 if (rc != MBX_SUCCESS) 8110 lpfc_printf_log(phba, KERN_WARNING, 8111 LOG_MBOX | LOG_SLI, 8112 "(%d):2597 Sync Mailbox command " 8113 "x%x (x%x/x%x) failure: " 8114 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8115 "Data: x%x x%x\n,", 8116 mboxq->vport ? mboxq->vport->vpi : 0, 8117 mboxq->u.mb.mbxCommand, 8118 lpfc_sli_config_mbox_subsys_get(phba, 8119 mboxq), 8120 lpfc_sli_config_mbox_opcode_get(phba, 8121 mboxq), 8122 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8123 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8124 bf_get(lpfc_mcqe_ext_status, 8125 &mboxq->mcqe), 8126 psli->sli_flag, flag); 8127 /* Unblock the async mailbox posting afterward */ 8128 lpfc_sli4_async_mbox_unblock(phba); 8129 } 8130 return rc; 8131 } 8132 8133 /* Now, interrupt mode asynchrous mailbox command */ 8134 rc = lpfc_mbox_cmd_check(phba, mboxq); 8135 if (rc) { 8136 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8137 "(%d):2543 Mailbox command x%x (x%x/x%x) " 8138 "cannot issue Data: x%x x%x\n", 8139 mboxq->vport ? mboxq->vport->vpi : 0, 8140 mboxq->u.mb.mbxCommand, 8141 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8142 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8143 psli->sli_flag, flag); 8144 goto out_not_finished; 8145 } 8146 8147 /* Put the mailbox command to the driver internal FIFO */ 8148 psli->slistat.mbox_busy++; 8149 spin_lock_irqsave(&phba->hbalock, iflags); 8150 lpfc_mbox_put(phba, mboxq); 8151 spin_unlock_irqrestore(&phba->hbalock, iflags); 8152 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8153 "(%d):0354 Mbox cmd issue - Enqueue Data: " 8154 "x%x (x%x/x%x) x%x x%x x%x\n", 8155 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 8156 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8157 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8158 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8159 phba->pport->port_state, 8160 psli->sli_flag, MBX_NOWAIT); 8161 /* Wake up worker thread to transport mailbox command from head */ 8162 lpfc_worker_wake_up(phba); 8163 8164 return MBX_BUSY; 8165 8166 out_not_finished: 8167 return MBX_NOT_FINISHED; 8168 } 8169 8170 /** 8171 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 8172 * @phba: Pointer to HBA context object. 8173 * 8174 * This function is called by worker thread to send a mailbox command to 8175 * SLI4 HBA firmware. 8176 * 8177 **/ 8178 int 8179 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 8180 { 8181 struct lpfc_sli *psli = &phba->sli; 8182 LPFC_MBOXQ_t *mboxq; 8183 int rc = MBX_SUCCESS; 8184 unsigned long iflags; 8185 struct lpfc_mqe *mqe; 8186 uint32_t mbx_cmnd; 8187 8188 /* Check interrupt mode before post async mailbox command */ 8189 if (unlikely(!phba->sli4_hba.intr_enable)) 8190 return MBX_NOT_FINISHED; 8191 8192 /* Check for mailbox command service token */ 8193 spin_lock_irqsave(&phba->hbalock, iflags); 8194 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8195 spin_unlock_irqrestore(&phba->hbalock, iflags); 8196 return MBX_NOT_FINISHED; 8197 } 8198 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8199 spin_unlock_irqrestore(&phba->hbalock, iflags); 8200 return MBX_NOT_FINISHED; 8201 } 8202 if (unlikely(phba->sli.mbox_active)) { 8203 spin_unlock_irqrestore(&phba->hbalock, iflags); 8204 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8205 "0384 There is pending active mailbox cmd\n"); 8206 return MBX_NOT_FINISHED; 8207 } 8208 /* Take the mailbox command service token */ 8209 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8210 8211 /* Get the next mailbox command from head of queue */ 8212 mboxq = lpfc_mbox_get(phba); 8213 8214 /* If no more mailbox command waiting for post, we're done */ 8215 if (!mboxq) { 8216 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8217 spin_unlock_irqrestore(&phba->hbalock, iflags); 8218 return MBX_SUCCESS; 8219 } 8220 phba->sli.mbox_active = mboxq; 8221 spin_unlock_irqrestore(&phba->hbalock, iflags); 8222 8223 /* Check device readiness for posting mailbox command */ 8224 rc = lpfc_mbox_dev_check(phba); 8225 if (unlikely(rc)) 8226 /* Driver clean routine will clean up pending mailbox */ 8227 goto out_not_finished; 8228 8229 /* Prepare the mbox command to be posted */ 8230 mqe = &mboxq->u.mqe; 8231 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 8232 8233 /* Start timer for the mbox_tmo and log some mailbox post messages */ 8234 mod_timer(&psli->mbox_tmo, (jiffies + 8235 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 8236 8237 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8238 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 8239 "x%x x%x\n", 8240 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8241 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8242 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8243 phba->pport->port_state, psli->sli_flag); 8244 8245 if (mbx_cmnd != MBX_HEARTBEAT) { 8246 if (mboxq->vport) { 8247 lpfc_debugfs_disc_trc(mboxq->vport, 8248 LPFC_DISC_TRC_MBOX_VPORT, 8249 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8250 mbx_cmnd, mqe->un.mb_words[0], 8251 mqe->un.mb_words[1]); 8252 } else { 8253 lpfc_debugfs_disc_trc(phba->pport, 8254 LPFC_DISC_TRC_MBOX, 8255 "MBOX Send: cmd:x%x mb:x%x x%x", 8256 mbx_cmnd, mqe->un.mb_words[0], 8257 mqe->un.mb_words[1]); 8258 } 8259 } 8260 psli->slistat.mbox_cmd++; 8261 8262 /* Post the mailbox command to the port */ 8263 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 8264 if (rc != MBX_SUCCESS) { 8265 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8266 "(%d):2533 Mailbox command x%x (x%x/x%x) " 8267 "cannot issue Data: x%x x%x\n", 8268 mboxq->vport ? mboxq->vport->vpi : 0, 8269 mboxq->u.mb.mbxCommand, 8270 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8271 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8272 psli->sli_flag, MBX_NOWAIT); 8273 goto out_not_finished; 8274 } 8275 8276 return rc; 8277 8278 out_not_finished: 8279 spin_lock_irqsave(&phba->hbalock, iflags); 8280 if (phba->sli.mbox_active) { 8281 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8282 __lpfc_mbox_cmpl_put(phba, mboxq); 8283 /* Release the token */ 8284 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8285 phba->sli.mbox_active = NULL; 8286 } 8287 spin_unlock_irqrestore(&phba->hbalock, iflags); 8288 8289 return MBX_NOT_FINISHED; 8290 } 8291 8292 /** 8293 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 8294 * @phba: Pointer to HBA context object. 8295 * @pmbox: Pointer to mailbox object. 8296 * @flag: Flag indicating how the mailbox need to be processed. 8297 * 8298 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 8299 * the API jump table function pointer from the lpfc_hba struct. 8300 * 8301 * Return codes the caller owns the mailbox command after the return of the 8302 * function. 8303 **/ 8304 int 8305 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 8306 { 8307 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 8308 } 8309 8310 /** 8311 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 8312 * @phba: The hba struct for which this call is being executed. 8313 * @dev_grp: The HBA PCI-Device group number. 8314 * 8315 * This routine sets up the mbox interface API function jump table in @phba 8316 * struct. 8317 * Returns: 0 - success, -ENODEV - failure. 8318 **/ 8319 int 8320 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8321 { 8322 8323 switch (dev_grp) { 8324 case LPFC_PCI_DEV_LP: 8325 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 8326 phba->lpfc_sli_handle_slow_ring_event = 8327 lpfc_sli_handle_slow_ring_event_s3; 8328 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 8329 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 8330 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 8331 break; 8332 case LPFC_PCI_DEV_OC: 8333 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 8334 phba->lpfc_sli_handle_slow_ring_event = 8335 lpfc_sli_handle_slow_ring_event_s4; 8336 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 8337 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 8338 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 8339 break; 8340 default: 8341 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8342 "1420 Invalid HBA PCI-device group: 0x%x\n", 8343 dev_grp); 8344 return -ENODEV; 8345 break; 8346 } 8347 return 0; 8348 } 8349 8350 /** 8351 * __lpfc_sli_ringtx_put - Add an iocb to the txq 8352 * @phba: Pointer to HBA context object. 8353 * @pring: Pointer to driver SLI ring object. 8354 * @piocb: Pointer to address of newly added command iocb. 8355 * 8356 * This function is called with hbalock held to add a command 8357 * iocb to the txq when SLI layer cannot submit the command iocb 8358 * to the ring. 8359 **/ 8360 void 8361 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8362 struct lpfc_iocbq *piocb) 8363 { 8364 lockdep_assert_held(&phba->hbalock); 8365 /* Insert the caller's iocb in the txq tail for later processing. */ 8366 list_add_tail(&piocb->list, &pring->txq); 8367 } 8368 8369 /** 8370 * lpfc_sli_next_iocb - Get the next iocb in the txq 8371 * @phba: Pointer to HBA context object. 8372 * @pring: Pointer to driver SLI ring object. 8373 * @piocb: Pointer to address of newly added command iocb. 8374 * 8375 * This function is called with hbalock held before a new 8376 * iocb is submitted to the firmware. This function checks 8377 * txq to flush the iocbs in txq to Firmware before 8378 * submitting new iocbs to the Firmware. 8379 * If there are iocbs in the txq which need to be submitted 8380 * to firmware, lpfc_sli_next_iocb returns the first element 8381 * of the txq after dequeuing it from txq. 8382 * If there is no iocb in the txq then the function will return 8383 * *piocb and *piocb is set to NULL. Caller needs to check 8384 * *piocb to find if there are more commands in the txq. 8385 **/ 8386 static struct lpfc_iocbq * 8387 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8388 struct lpfc_iocbq **piocb) 8389 { 8390 struct lpfc_iocbq * nextiocb; 8391 8392 lockdep_assert_held(&phba->hbalock); 8393 8394 nextiocb = lpfc_sli_ringtx_get(phba, pring); 8395 if (!nextiocb) { 8396 nextiocb = *piocb; 8397 *piocb = NULL; 8398 } 8399 8400 return nextiocb; 8401 } 8402 8403 /** 8404 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 8405 * @phba: Pointer to HBA context object. 8406 * @ring_number: SLI ring number to issue iocb on. 8407 * @piocb: Pointer to command iocb. 8408 * @flag: Flag indicating if this command can be put into txq. 8409 * 8410 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 8411 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 8412 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 8413 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 8414 * this function allows only iocbs for posting buffers. This function finds 8415 * next available slot in the command ring and posts the command to the 8416 * available slot and writes the port attention register to request HBA start 8417 * processing new iocb. If there is no slot available in the ring and 8418 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 8419 * the function returns IOCB_BUSY. 8420 * 8421 * This function is called with hbalock held. The function will return success 8422 * after it successfully submit the iocb to firmware or after adding to the 8423 * txq. 8424 **/ 8425 static int 8426 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 8427 struct lpfc_iocbq *piocb, uint32_t flag) 8428 { 8429 struct lpfc_iocbq *nextiocb; 8430 IOCB_t *iocb; 8431 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; 8432 8433 lockdep_assert_held(&phba->hbalock); 8434 8435 if (piocb->iocb_cmpl && (!piocb->vport) && 8436 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 8437 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 8438 lpfc_printf_log(phba, KERN_ERR, 8439 LOG_SLI | LOG_VPORT, 8440 "1807 IOCB x%x failed. No vport\n", 8441 piocb->iocb.ulpCommand); 8442 dump_stack(); 8443 return IOCB_ERROR; 8444 } 8445 8446 8447 /* If the PCI channel is in offline state, do not post iocbs. */ 8448 if (unlikely(pci_channel_offline(phba->pcidev))) 8449 return IOCB_ERROR; 8450 8451 /* If HBA has a deferred error attention, fail the iocb. */ 8452 if (unlikely(phba->hba_flag & DEFER_ERATT)) 8453 return IOCB_ERROR; 8454 8455 /* 8456 * We should never get an IOCB if we are in a < LINK_DOWN state 8457 */ 8458 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 8459 return IOCB_ERROR; 8460 8461 /* 8462 * Check to see if we are blocking IOCB processing because of a 8463 * outstanding event. 8464 */ 8465 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 8466 goto iocb_busy; 8467 8468 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 8469 /* 8470 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 8471 * can be issued if the link is not up. 8472 */ 8473 switch (piocb->iocb.ulpCommand) { 8474 case CMD_GEN_REQUEST64_CR: 8475 case CMD_GEN_REQUEST64_CX: 8476 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 8477 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 8478 FC_RCTL_DD_UNSOL_CMD) || 8479 (piocb->iocb.un.genreq64.w5.hcsw.Type != 8480 MENLO_TRANSPORT_TYPE)) 8481 8482 goto iocb_busy; 8483 break; 8484 case CMD_QUE_RING_BUF_CN: 8485 case CMD_QUE_RING_BUF64_CN: 8486 /* 8487 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 8488 * completion, iocb_cmpl MUST be 0. 8489 */ 8490 if (piocb->iocb_cmpl) 8491 piocb->iocb_cmpl = NULL; 8492 /*FALLTHROUGH*/ 8493 case CMD_CREATE_XRI_CR: 8494 case CMD_CLOSE_XRI_CN: 8495 case CMD_CLOSE_XRI_CX: 8496 break; 8497 default: 8498 goto iocb_busy; 8499 } 8500 8501 /* 8502 * For FCP commands, we must be in a state where we can process link 8503 * attention events. 8504 */ 8505 } else if (unlikely(pring->ringno == LPFC_FCP_RING && 8506 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 8507 goto iocb_busy; 8508 } 8509 8510 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 8511 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 8512 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 8513 8514 if (iocb) 8515 lpfc_sli_update_ring(phba, pring); 8516 else 8517 lpfc_sli_update_full_ring(phba, pring); 8518 8519 if (!piocb) 8520 return IOCB_SUCCESS; 8521 8522 goto out_busy; 8523 8524 iocb_busy: 8525 pring->stats.iocb_cmd_delay++; 8526 8527 out_busy: 8528 8529 if (!(flag & SLI_IOCB_RET_IOCB)) { 8530 __lpfc_sli_ringtx_put(phba, pring, piocb); 8531 return IOCB_SUCCESS; 8532 } 8533 8534 return IOCB_BUSY; 8535 } 8536 8537 /** 8538 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 8539 * @phba: Pointer to HBA context object. 8540 * @piocb: Pointer to command iocb. 8541 * @sglq: Pointer to the scatter gather queue object. 8542 * 8543 * This routine converts the bpl or bde that is in the IOCB 8544 * to a sgl list for the sli4 hardware. The physical address 8545 * of the bpl/bde is converted back to a virtual address. 8546 * If the IOCB contains a BPL then the list of BDE's is 8547 * converted to sli4_sge's. If the IOCB contains a single 8548 * BDE then it is converted to a single sli_sge. 8549 * The IOCB is still in cpu endianess so the contents of 8550 * the bpl can be used without byte swapping. 8551 * 8552 * Returns valid XRI = Success, NO_XRI = Failure. 8553 **/ 8554 static uint16_t 8555 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 8556 struct lpfc_sglq *sglq) 8557 { 8558 uint16_t xritag = NO_XRI; 8559 struct ulp_bde64 *bpl = NULL; 8560 struct ulp_bde64 bde; 8561 struct sli4_sge *sgl = NULL; 8562 struct lpfc_dmabuf *dmabuf; 8563 IOCB_t *icmd; 8564 int numBdes = 0; 8565 int i = 0; 8566 uint32_t offset = 0; /* accumulated offset in the sg request list */ 8567 int inbound = 0; /* number of sg reply entries inbound from firmware */ 8568 8569 if (!piocbq || !sglq) 8570 return xritag; 8571 8572 sgl = (struct sli4_sge *)sglq->sgl; 8573 icmd = &piocbq->iocb; 8574 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 8575 return sglq->sli4_xritag; 8576 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8577 numBdes = icmd->un.genreq64.bdl.bdeSize / 8578 sizeof(struct ulp_bde64); 8579 /* The addrHigh and addrLow fields within the IOCB 8580 * have not been byteswapped yet so there is no 8581 * need to swap them back. 8582 */ 8583 if (piocbq->context3) 8584 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 8585 else 8586 return xritag; 8587 8588 bpl = (struct ulp_bde64 *)dmabuf->virt; 8589 if (!bpl) 8590 return xritag; 8591 8592 for (i = 0; i < numBdes; i++) { 8593 /* Should already be byte swapped. */ 8594 sgl->addr_hi = bpl->addrHigh; 8595 sgl->addr_lo = bpl->addrLow; 8596 8597 sgl->word2 = le32_to_cpu(sgl->word2); 8598 if ((i+1) == numBdes) 8599 bf_set(lpfc_sli4_sge_last, sgl, 1); 8600 else 8601 bf_set(lpfc_sli4_sge_last, sgl, 0); 8602 /* swap the size field back to the cpu so we 8603 * can assign it to the sgl. 8604 */ 8605 bde.tus.w = le32_to_cpu(bpl->tus.w); 8606 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 8607 /* The offsets in the sgl need to be accumulated 8608 * separately for the request and reply lists. 8609 * The request is always first, the reply follows. 8610 */ 8611 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 8612 /* add up the reply sg entries */ 8613 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 8614 inbound++; 8615 /* first inbound? reset the offset */ 8616 if (inbound == 1) 8617 offset = 0; 8618 bf_set(lpfc_sli4_sge_offset, sgl, offset); 8619 bf_set(lpfc_sli4_sge_type, sgl, 8620 LPFC_SGE_TYPE_DATA); 8621 offset += bde.tus.f.bdeSize; 8622 } 8623 sgl->word2 = cpu_to_le32(sgl->word2); 8624 bpl++; 8625 sgl++; 8626 } 8627 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 8628 /* The addrHigh and addrLow fields of the BDE have not 8629 * been byteswapped yet so they need to be swapped 8630 * before putting them in the sgl. 8631 */ 8632 sgl->addr_hi = 8633 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 8634 sgl->addr_lo = 8635 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 8636 sgl->word2 = le32_to_cpu(sgl->word2); 8637 bf_set(lpfc_sli4_sge_last, sgl, 1); 8638 sgl->word2 = cpu_to_le32(sgl->word2); 8639 sgl->sge_len = 8640 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 8641 } 8642 return sglq->sli4_xritag; 8643 } 8644 8645 /** 8646 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 8647 * @phba: Pointer to HBA context object. 8648 * @piocb: Pointer to command iocb. 8649 * @wqe: Pointer to the work queue entry. 8650 * 8651 * This routine converts the iocb command to its Work Queue Entry 8652 * equivalent. The wqe pointer should not have any fields set when 8653 * this routine is called because it will memcpy over them. 8654 * This routine does not set the CQ_ID or the WQEC bits in the 8655 * wqe. 8656 * 8657 * Returns: 0 = Success, IOCB_ERROR = Failure. 8658 **/ 8659 static int 8660 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 8661 union lpfc_wqe *wqe) 8662 { 8663 uint32_t xmit_len = 0, total_len = 0; 8664 uint8_t ct = 0; 8665 uint32_t fip; 8666 uint32_t abort_tag; 8667 uint8_t command_type = ELS_COMMAND_NON_FIP; 8668 uint8_t cmnd; 8669 uint16_t xritag; 8670 uint16_t abrt_iotag; 8671 struct lpfc_iocbq *abrtiocbq; 8672 struct ulp_bde64 *bpl = NULL; 8673 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 8674 int numBdes, i; 8675 struct ulp_bde64 bde; 8676 struct lpfc_nodelist *ndlp; 8677 uint32_t *pcmd; 8678 uint32_t if_type; 8679 8680 fip = phba->hba_flag & HBA_FIP_SUPPORT; 8681 /* The fcp commands will set command type */ 8682 if (iocbq->iocb_flag & LPFC_IO_FCP) 8683 command_type = FCP_COMMAND; 8684 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 8685 command_type = ELS_COMMAND_FIP; 8686 else 8687 command_type = ELS_COMMAND_NON_FIP; 8688 8689 if (phba->fcp_embed_io) 8690 memset(wqe, 0, sizeof(union lpfc_wqe128)); 8691 /* Some of the fields are in the right position already */ 8692 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 8693 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) { 8694 /* The ct field has moved so reset */ 8695 wqe->generic.wqe_com.word7 = 0; 8696 wqe->generic.wqe_com.word10 = 0; 8697 } 8698 8699 abort_tag = (uint32_t) iocbq->iotag; 8700 xritag = iocbq->sli4_xritag; 8701 /* words0-2 bpl convert bde */ 8702 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8703 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 8704 sizeof(struct ulp_bde64); 8705 bpl = (struct ulp_bde64 *) 8706 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 8707 if (!bpl) 8708 return IOCB_ERROR; 8709 8710 /* Should already be byte swapped. */ 8711 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 8712 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 8713 /* swap the size field back to the cpu so we 8714 * can assign it to the sgl. 8715 */ 8716 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 8717 xmit_len = wqe->generic.bde.tus.f.bdeSize; 8718 total_len = 0; 8719 for (i = 0; i < numBdes; i++) { 8720 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 8721 total_len += bde.tus.f.bdeSize; 8722 } 8723 } else 8724 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 8725 8726 iocbq->iocb.ulpIoTag = iocbq->iotag; 8727 cmnd = iocbq->iocb.ulpCommand; 8728 8729 switch (iocbq->iocb.ulpCommand) { 8730 case CMD_ELS_REQUEST64_CR: 8731 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 8732 ndlp = iocbq->context_un.ndlp; 8733 else 8734 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8735 if (!iocbq->iocb.ulpLe) { 8736 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8737 "2007 Only Limited Edition cmd Format" 8738 " supported 0x%x\n", 8739 iocbq->iocb.ulpCommand); 8740 return IOCB_ERROR; 8741 } 8742 8743 wqe->els_req.payload_len = xmit_len; 8744 /* Els_reguest64 has a TMO */ 8745 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 8746 iocbq->iocb.ulpTimeout); 8747 /* Need a VF for word 4 set the vf bit*/ 8748 bf_set(els_req64_vf, &wqe->els_req, 0); 8749 /* And a VFID for word 12 */ 8750 bf_set(els_req64_vfid, &wqe->els_req, 0); 8751 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 8752 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8753 iocbq->iocb.ulpContext); 8754 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 8755 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 8756 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 8757 if (command_type == ELS_COMMAND_FIP) 8758 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 8759 >> LPFC_FIP_ELS_ID_SHIFT); 8760 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8761 iocbq->context2)->virt); 8762 if_type = bf_get(lpfc_sli_intf_if_type, 8763 &phba->sli4_hba.sli_intf); 8764 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8765 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 8766 *pcmd == ELS_CMD_SCR || 8767 *pcmd == ELS_CMD_FDISC || 8768 *pcmd == ELS_CMD_LOGO || 8769 *pcmd == ELS_CMD_PLOGI)) { 8770 bf_set(els_req64_sp, &wqe->els_req, 1); 8771 bf_set(els_req64_sid, &wqe->els_req, 8772 iocbq->vport->fc_myDID); 8773 if ((*pcmd == ELS_CMD_FLOGI) && 8774 !(phba->fc_topology == 8775 LPFC_TOPOLOGY_LOOP)) 8776 bf_set(els_req64_sid, &wqe->els_req, 0); 8777 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 8778 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8779 phba->vpi_ids[iocbq->vport->vpi]); 8780 } else if (pcmd && iocbq->context1) { 8781 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 8782 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8783 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8784 } 8785 } 8786 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 8787 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8788 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 8789 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 8790 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 8791 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 8792 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 8793 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 8794 wqe->els_req.max_response_payload_len = total_len - xmit_len; 8795 break; 8796 case CMD_XMIT_SEQUENCE64_CX: 8797 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 8798 iocbq->iocb.un.ulpWord[3]); 8799 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 8800 iocbq->iocb.unsli3.rcvsli3.ox_id); 8801 /* The entire sequence is transmitted for this IOCB */ 8802 xmit_len = total_len; 8803 cmnd = CMD_XMIT_SEQUENCE64_CR; 8804 if (phba->link_flag & LS_LOOPBACK_MODE) 8805 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 8806 case CMD_XMIT_SEQUENCE64_CR: 8807 /* word3 iocb=io_tag32 wqe=reserved */ 8808 wqe->xmit_sequence.rsvd3 = 0; 8809 /* word4 relative_offset memcpy */ 8810 /* word5 r_ctl/df_ctl memcpy */ 8811 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 8812 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 8813 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 8814 LPFC_WQE_IOD_WRITE); 8815 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 8816 LPFC_WQE_LENLOC_WORD12); 8817 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 8818 wqe->xmit_sequence.xmit_len = xmit_len; 8819 command_type = OTHER_COMMAND; 8820 break; 8821 case CMD_XMIT_BCAST64_CN: 8822 /* word3 iocb=iotag32 wqe=seq_payload_len */ 8823 wqe->xmit_bcast64.seq_payload_len = xmit_len; 8824 /* word4 iocb=rsvd wqe=rsvd */ 8825 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 8826 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 8827 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 8828 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8829 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 8830 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 8831 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 8832 LPFC_WQE_LENLOC_WORD3); 8833 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 8834 break; 8835 case CMD_FCP_IWRITE64_CR: 8836 command_type = FCP_COMMAND_DATA_OUT; 8837 /* word3 iocb=iotag wqe=payload_offset_len */ 8838 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8839 bf_set(payload_offset_len, &wqe->fcp_iwrite, 8840 xmit_len + sizeof(struct fcp_rsp)); 8841 bf_set(cmd_buff_len, &wqe->fcp_iwrite, 8842 0); 8843 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 8844 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 8845 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 8846 iocbq->iocb.ulpFCP2Rcvy); 8847 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 8848 /* Always open the exchange */ 8849 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 8850 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 8851 LPFC_WQE_LENLOC_WORD4); 8852 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 8853 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 8854 if (iocbq->iocb_flag & LPFC_IO_OAS) { 8855 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); 8856 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 8857 if (iocbq->priority) { 8858 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 8859 (iocbq->priority << 1)); 8860 } else { 8861 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 8862 (phba->cfg_XLanePriority << 1)); 8863 } 8864 } 8865 /* Note, word 10 is already initialized to 0 */ 8866 8867 if (phba->fcp_embed_io) { 8868 struct lpfc_scsi_buf *lpfc_cmd; 8869 struct sli4_sge *sgl; 8870 union lpfc_wqe128 *wqe128; 8871 struct fcp_cmnd *fcp_cmnd; 8872 uint32_t *ptr; 8873 8874 /* 128 byte wqe support here */ 8875 wqe128 = (union lpfc_wqe128 *)wqe; 8876 8877 lpfc_cmd = iocbq->context1; 8878 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 8879 fcp_cmnd = lpfc_cmd->fcp_cmnd; 8880 8881 /* Word 0-2 - FCP_CMND */ 8882 wqe128->generic.bde.tus.f.bdeFlags = 8883 BUFF_TYPE_BDE_IMMED; 8884 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; 8885 wqe128->generic.bde.addrHigh = 0; 8886 wqe128->generic.bde.addrLow = 88; /* Word 22 */ 8887 8888 bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1); 8889 8890 /* Word 22-29 FCP CMND Payload */ 8891 ptr = &wqe128->words[22]; 8892 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 8893 } 8894 break; 8895 case CMD_FCP_IREAD64_CR: 8896 /* word3 iocb=iotag wqe=payload_offset_len */ 8897 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8898 bf_set(payload_offset_len, &wqe->fcp_iread, 8899 xmit_len + sizeof(struct fcp_rsp)); 8900 bf_set(cmd_buff_len, &wqe->fcp_iread, 8901 0); 8902 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 8903 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 8904 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 8905 iocbq->iocb.ulpFCP2Rcvy); 8906 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 8907 /* Always open the exchange */ 8908 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 8909 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 8910 LPFC_WQE_LENLOC_WORD4); 8911 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 8912 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 8913 if (iocbq->iocb_flag & LPFC_IO_OAS) { 8914 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); 8915 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); 8916 if (iocbq->priority) { 8917 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 8918 (iocbq->priority << 1)); 8919 } else { 8920 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 8921 (phba->cfg_XLanePriority << 1)); 8922 } 8923 } 8924 /* Note, word 10 is already initialized to 0 */ 8925 8926 if (phba->fcp_embed_io) { 8927 struct lpfc_scsi_buf *lpfc_cmd; 8928 struct sli4_sge *sgl; 8929 union lpfc_wqe128 *wqe128; 8930 struct fcp_cmnd *fcp_cmnd; 8931 uint32_t *ptr; 8932 8933 /* 128 byte wqe support here */ 8934 wqe128 = (union lpfc_wqe128 *)wqe; 8935 8936 lpfc_cmd = iocbq->context1; 8937 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 8938 fcp_cmnd = lpfc_cmd->fcp_cmnd; 8939 8940 /* Word 0-2 - FCP_CMND */ 8941 wqe128->generic.bde.tus.f.bdeFlags = 8942 BUFF_TYPE_BDE_IMMED; 8943 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; 8944 wqe128->generic.bde.addrHigh = 0; 8945 wqe128->generic.bde.addrLow = 88; /* Word 22 */ 8946 8947 bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1); 8948 8949 /* Word 22-29 FCP CMND Payload */ 8950 ptr = &wqe128->words[22]; 8951 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 8952 } 8953 break; 8954 case CMD_FCP_ICMND64_CR: 8955 /* word3 iocb=iotag wqe=payload_offset_len */ 8956 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8957 bf_set(payload_offset_len, &wqe->fcp_icmd, 8958 xmit_len + sizeof(struct fcp_rsp)); 8959 bf_set(cmd_buff_len, &wqe->fcp_icmd, 8960 0); 8961 /* word3 iocb=IO_TAG wqe=reserved */ 8962 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 8963 /* Always open the exchange */ 8964 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 8965 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 8966 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 8967 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 8968 LPFC_WQE_LENLOC_NONE); 8969 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 8970 iocbq->iocb.ulpFCP2Rcvy); 8971 if (iocbq->iocb_flag & LPFC_IO_OAS) { 8972 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); 8973 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); 8974 if (iocbq->priority) { 8975 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 8976 (iocbq->priority << 1)); 8977 } else { 8978 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 8979 (phba->cfg_XLanePriority << 1)); 8980 } 8981 } 8982 /* Note, word 10 is already initialized to 0 */ 8983 8984 if (phba->fcp_embed_io) { 8985 struct lpfc_scsi_buf *lpfc_cmd; 8986 struct sli4_sge *sgl; 8987 union lpfc_wqe128 *wqe128; 8988 struct fcp_cmnd *fcp_cmnd; 8989 uint32_t *ptr; 8990 8991 /* 128 byte wqe support here */ 8992 wqe128 = (union lpfc_wqe128 *)wqe; 8993 8994 lpfc_cmd = iocbq->context1; 8995 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 8996 fcp_cmnd = lpfc_cmd->fcp_cmnd; 8997 8998 /* Word 0-2 - FCP_CMND */ 8999 wqe128->generic.bde.tus.f.bdeFlags = 9000 BUFF_TYPE_BDE_IMMED; 9001 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; 9002 wqe128->generic.bde.addrHigh = 0; 9003 wqe128->generic.bde.addrLow = 88; /* Word 22 */ 9004 9005 bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1); 9006 9007 /* Word 22-29 FCP CMND Payload */ 9008 ptr = &wqe128->words[22]; 9009 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9010 } 9011 break; 9012 case CMD_GEN_REQUEST64_CR: 9013 /* For this command calculate the xmit length of the 9014 * request bde. 9015 */ 9016 xmit_len = 0; 9017 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9018 sizeof(struct ulp_bde64); 9019 for (i = 0; i < numBdes; i++) { 9020 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9021 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 9022 break; 9023 xmit_len += bde.tus.f.bdeSize; 9024 } 9025 /* word3 iocb=IO_TAG wqe=request_payload_len */ 9026 wqe->gen_req.request_payload_len = xmit_len; 9027 /* word4 iocb=parameter wqe=relative_offset memcpy */ 9028 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 9029 /* word6 context tag copied in memcpy */ 9030 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 9031 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9033 "2015 Invalid CT %x command 0x%x\n", 9034 ct, iocbq->iocb.ulpCommand); 9035 return IOCB_ERROR; 9036 } 9037 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 9038 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 9039 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 9040 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 9041 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 9042 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 9043 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9044 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 9045 wqe->gen_req.max_response_payload_len = total_len - xmit_len; 9046 command_type = OTHER_COMMAND; 9047 break; 9048 case CMD_XMIT_ELS_RSP64_CX: 9049 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9050 /* words0-2 BDE memcpy */ 9051 /* word3 iocb=iotag32 wqe=response_payload_len */ 9052 wqe->xmit_els_rsp.response_payload_len = xmit_len; 9053 /* word4 */ 9054 wqe->xmit_els_rsp.word4 = 0; 9055 /* word5 iocb=rsvd wge=did */ 9056 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 9057 iocbq->iocb.un.xseq64.xmit_els_remoteID); 9058 9059 if_type = bf_get(lpfc_sli_intf_if_type, 9060 &phba->sli4_hba.sli_intf); 9061 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 9062 if (iocbq->vport->fc_flag & FC_PT2PT) { 9063 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9064 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9065 iocbq->vport->fc_myDID); 9066 if (iocbq->vport->fc_myDID == Fabric_DID) { 9067 bf_set(wqe_els_did, 9068 &wqe->xmit_els_rsp.wqe_dest, 0); 9069 } 9070 } 9071 } 9072 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 9073 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9074 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 9075 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9076 iocbq->iocb.unsli3.rcvsli3.ox_id); 9077 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 9078 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9079 phba->vpi_ids[iocbq->vport->vpi]); 9080 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 9081 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 9082 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 9083 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 9084 LPFC_WQE_LENLOC_WORD3); 9085 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 9086 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 9087 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9088 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9089 iocbq->context2)->virt); 9090 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9091 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9092 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9093 iocbq->vport->fc_myDID); 9094 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 9095 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9096 phba->vpi_ids[phba->pport->vpi]); 9097 } 9098 command_type = OTHER_COMMAND; 9099 break; 9100 case CMD_CLOSE_XRI_CN: 9101 case CMD_ABORT_XRI_CN: 9102 case CMD_ABORT_XRI_CX: 9103 /* words 0-2 memcpy should be 0 rserved */ 9104 /* port will send abts */ 9105 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 9106 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 9107 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 9108 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 9109 } else 9110 fip = 0; 9111 9112 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 9113 /* 9114 * The link is down, or the command was ELS_FIP 9115 * so the fw does not need to send abts 9116 * on the wire. 9117 */ 9118 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 9119 else 9120 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 9121 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 9122 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 9123 wqe->abort_cmd.rsrvd5 = 0; 9124 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 9125 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9126 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 9127 /* 9128 * The abort handler will send us CMD_ABORT_XRI_CN or 9129 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 9130 */ 9131 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 9132 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 9133 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 9134 LPFC_WQE_LENLOC_NONE); 9135 cmnd = CMD_ABORT_XRI_CX; 9136 command_type = OTHER_COMMAND; 9137 xritag = 0; 9138 break; 9139 case CMD_XMIT_BLS_RSP64_CX: 9140 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9141 /* As BLS ABTS RSP WQE is very different from other WQEs, 9142 * we re-construct this WQE here based on information in 9143 * iocbq from scratch. 9144 */ 9145 memset(wqe, 0, sizeof(union lpfc_wqe)); 9146 /* OX_ID is invariable to who sent ABTS to CT exchange */ 9147 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 9148 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 9149 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 9150 LPFC_ABTS_UNSOL_INT) { 9151 /* ABTS sent by initiator to CT exchange, the 9152 * RX_ID field will be filled with the newly 9153 * allocated responder XRI. 9154 */ 9155 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9156 iocbq->sli4_xritag); 9157 } else { 9158 /* ABTS sent by responder to CT exchange, the 9159 * RX_ID field will be filled with the responder 9160 * RX_ID from ABTS. 9161 */ 9162 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9163 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 9164 } 9165 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 9166 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 9167 9168 /* Use CT=VPI */ 9169 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 9170 ndlp->nlp_DID); 9171 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 9172 iocbq->iocb.ulpContext); 9173 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 9174 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 9175 phba->vpi_ids[phba->pport->vpi]); 9176 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 9177 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 9178 LPFC_WQE_LENLOC_NONE); 9179 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 9180 command_type = OTHER_COMMAND; 9181 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 9182 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 9183 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 9184 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 9185 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 9186 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 9187 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 9188 } 9189 9190 break; 9191 case CMD_SEND_FRAME: 9192 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9193 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9194 return 0; 9195 case CMD_XRI_ABORTED_CX: 9196 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 9197 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 9198 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 9199 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 9200 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 9201 default: 9202 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9203 "2014 Invalid command 0x%x\n", 9204 iocbq->iocb.ulpCommand); 9205 return IOCB_ERROR; 9206 break; 9207 } 9208 9209 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) 9210 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 9211 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) 9212 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 9213 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) 9214 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 9215 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | 9216 LPFC_IO_DIF_INSERT); 9217 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9218 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9219 wqe->generic.wqe_com.abort_tag = abort_tag; 9220 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 9221 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 9222 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 9223 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 9224 return 0; 9225 } 9226 9227 /** 9228 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 9229 * @phba: Pointer to HBA context object. 9230 * @ring_number: SLI ring number to issue iocb on. 9231 * @piocb: Pointer to command iocb. 9232 * @flag: Flag indicating if this command can be put into txq. 9233 * 9234 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 9235 * an iocb command to an HBA with SLI-4 interface spec. 9236 * 9237 * This function is called with hbalock held. The function will return success 9238 * after it successfully submit the iocb to firmware or after adding to the 9239 * txq. 9240 **/ 9241 static int 9242 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 9243 struct lpfc_iocbq *piocb, uint32_t flag) 9244 { 9245 struct lpfc_sglq *sglq; 9246 union lpfc_wqe *wqe; 9247 union lpfc_wqe128 wqe128; 9248 struct lpfc_queue *wq; 9249 struct lpfc_sli_ring *pring; 9250 9251 /* Get the WQ */ 9252 if ((piocb->iocb_flag & LPFC_IO_FCP) || 9253 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9254 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) 9255 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx]; 9256 else 9257 wq = phba->sli4_hba.oas_wq; 9258 } else { 9259 wq = phba->sli4_hba.els_wq; 9260 } 9261 9262 /* Get corresponding ring */ 9263 pring = wq->pring; 9264 9265 /* 9266 * The WQE can be either 64 or 128 bytes, 9267 * so allocate space on the stack assuming the largest. 9268 */ 9269 wqe = (union lpfc_wqe *)&wqe128; 9270 9271 lockdep_assert_held(&phba->hbalock); 9272 9273 if (piocb->sli4_xritag == NO_XRI) { 9274 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 9275 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 9276 sglq = NULL; 9277 else { 9278 if (!list_empty(&pring->txq)) { 9279 if (!(flag & SLI_IOCB_RET_IOCB)) { 9280 __lpfc_sli_ringtx_put(phba, 9281 pring, piocb); 9282 return IOCB_SUCCESS; 9283 } else { 9284 return IOCB_BUSY; 9285 } 9286 } else { 9287 sglq = __lpfc_sli_get_els_sglq(phba, piocb); 9288 if (!sglq) { 9289 if (!(flag & SLI_IOCB_RET_IOCB)) { 9290 __lpfc_sli_ringtx_put(phba, 9291 pring, 9292 piocb); 9293 return IOCB_SUCCESS; 9294 } else 9295 return IOCB_BUSY; 9296 } 9297 } 9298 } 9299 } else if (piocb->iocb_flag & LPFC_IO_FCP) 9300 /* These IO's already have an XRI and a mapped sgl. */ 9301 sglq = NULL; 9302 else { 9303 /* 9304 * This is a continuation of a commandi,(CX) so this 9305 * sglq is on the active list 9306 */ 9307 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 9308 if (!sglq) 9309 return IOCB_ERROR; 9310 } 9311 9312 if (sglq) { 9313 piocb->sli4_lxritag = sglq->sli4_lxritag; 9314 piocb->sli4_xritag = sglq->sli4_xritag; 9315 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 9316 return IOCB_ERROR; 9317 } 9318 9319 if (lpfc_sli4_iocb2wqe(phba, piocb, wqe)) 9320 return IOCB_ERROR; 9321 9322 if (lpfc_sli4_wq_put(wq, wqe)) 9323 return IOCB_ERROR; 9324 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 9325 9326 return 0; 9327 } 9328 9329 /** 9330 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 9331 * 9332 * This routine wraps the actual lockless version for issusing IOCB function 9333 * pointer from the lpfc_hba struct. 9334 * 9335 * Return codes: 9336 * IOCB_ERROR - Error 9337 * IOCB_SUCCESS - Success 9338 * IOCB_BUSY - Busy 9339 **/ 9340 int 9341 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 9342 struct lpfc_iocbq *piocb, uint32_t flag) 9343 { 9344 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9345 } 9346 9347 /** 9348 * lpfc_sli_api_table_setup - Set up sli api function jump table 9349 * @phba: The hba struct for which this call is being executed. 9350 * @dev_grp: The HBA PCI-Device group number. 9351 * 9352 * This routine sets up the SLI interface API function jump table in @phba 9353 * struct. 9354 * Returns: 0 - success, -ENODEV - failure. 9355 **/ 9356 int 9357 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 9358 { 9359 9360 switch (dev_grp) { 9361 case LPFC_PCI_DEV_LP: 9362 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 9363 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 9364 break; 9365 case LPFC_PCI_DEV_OC: 9366 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 9367 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 9368 break; 9369 default: 9370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9371 "1419 Invalid HBA PCI-device group: 0x%x\n", 9372 dev_grp); 9373 return -ENODEV; 9374 break; 9375 } 9376 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 9377 return 0; 9378 } 9379 9380 /** 9381 * lpfc_sli4_calc_ring - Calculates which ring to use 9382 * @phba: Pointer to HBA context object. 9383 * @piocb: Pointer to command iocb. 9384 * 9385 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on 9386 * hba_wqidx, thus we need to calculate the corresponding ring. 9387 * Since ABORTS must go on the same WQ of the command they are 9388 * aborting, we use command's hba_wqidx. 9389 */ 9390 struct lpfc_sli_ring * 9391 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 9392 { 9393 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { 9394 if (!(phba->cfg_fof) || 9395 (!(piocb->iocb_flag & LPFC_IO_FOF))) { 9396 if (unlikely(!phba->sli4_hba.fcp_wq)) 9397 return NULL; 9398 /* 9399 * for abort iocb hba_wqidx should already 9400 * be setup based on what work queue we used. 9401 */ 9402 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) 9403 piocb->hba_wqidx = 9404 lpfc_sli4_scmd_to_wqidx_distr(phba, 9405 piocb->context1); 9406 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring; 9407 } else { 9408 if (unlikely(!phba->sli4_hba.oas_wq)) 9409 return NULL; 9410 piocb->hba_wqidx = 0; 9411 return phba->sli4_hba.oas_wq->pring; 9412 } 9413 } else { 9414 if (unlikely(!phba->sli4_hba.els_wq)) 9415 return NULL; 9416 piocb->hba_wqidx = 0; 9417 return phba->sli4_hba.els_wq->pring; 9418 } 9419 } 9420 9421 /** 9422 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 9423 * @phba: Pointer to HBA context object. 9424 * @pring: Pointer to driver SLI ring object. 9425 * @piocb: Pointer to command iocb. 9426 * @flag: Flag indicating if this command can be put into txq. 9427 * 9428 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 9429 * function. This function gets the hbalock and calls 9430 * __lpfc_sli_issue_iocb function and will return the error returned 9431 * by __lpfc_sli_issue_iocb function. This wrapper is used by 9432 * functions which do not hold hbalock. 9433 **/ 9434 int 9435 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 9436 struct lpfc_iocbq *piocb, uint32_t flag) 9437 { 9438 struct lpfc_hba_eq_hdl *hba_eq_hdl; 9439 struct lpfc_sli_ring *pring; 9440 struct lpfc_queue *fpeq; 9441 struct lpfc_eqe *eqe; 9442 unsigned long iflags; 9443 int rc, idx; 9444 9445 if (phba->sli_rev == LPFC_SLI_REV4) { 9446 pring = lpfc_sli4_calc_ring(phba, piocb); 9447 if (unlikely(pring == NULL)) 9448 return IOCB_ERROR; 9449 9450 spin_lock_irqsave(&pring->ring_lock, iflags); 9451 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9452 spin_unlock_irqrestore(&pring->ring_lock, iflags); 9453 9454 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) { 9455 idx = piocb->hba_wqidx; 9456 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx]; 9457 9458 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) { 9459 9460 /* Get associated EQ with this index */ 9461 fpeq = phba->sli4_hba.hba_eq[idx]; 9462 9463 /* Turn off interrupts from this EQ */ 9464 lpfc_sli4_eq_clr_intr(fpeq); 9465 9466 /* 9467 * Process all the events on FCP EQ 9468 */ 9469 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 9470 lpfc_sli4_hba_handle_eqe(phba, 9471 eqe, idx); 9472 fpeq->EQ_processed++; 9473 } 9474 9475 /* Always clear and re-arm the EQ */ 9476 lpfc_sli4_eq_release(fpeq, 9477 LPFC_QUEUE_REARM); 9478 } 9479 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 9480 } 9481 } else { 9482 /* For now, SLI2/3 will still use hbalock */ 9483 spin_lock_irqsave(&phba->hbalock, iflags); 9484 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9485 spin_unlock_irqrestore(&phba->hbalock, iflags); 9486 } 9487 return rc; 9488 } 9489 9490 /** 9491 * lpfc_extra_ring_setup - Extra ring setup function 9492 * @phba: Pointer to HBA context object. 9493 * 9494 * This function is called while driver attaches with the 9495 * HBA to setup the extra ring. The extra ring is used 9496 * only when driver needs to support target mode functionality 9497 * or IP over FC functionalities. 9498 * 9499 * This function is called with no lock held. SLI3 only. 9500 **/ 9501 static int 9502 lpfc_extra_ring_setup( struct lpfc_hba *phba) 9503 { 9504 struct lpfc_sli *psli; 9505 struct lpfc_sli_ring *pring; 9506 9507 psli = &phba->sli; 9508 9509 /* Adjust cmd/rsp ring iocb entries more evenly */ 9510 9511 /* Take some away from the FCP ring */ 9512 pring = &psli->sli3_ring[LPFC_FCP_RING]; 9513 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 9514 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 9515 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 9516 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 9517 9518 /* and give them to the extra ring */ 9519 pring = &psli->sli3_ring[LPFC_EXTRA_RING]; 9520 9521 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 9522 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 9523 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 9524 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 9525 9526 /* Setup default profile for this ring */ 9527 pring->iotag_max = 4096; 9528 pring->num_mask = 1; 9529 pring->prt[0].profile = 0; /* Mask 0 */ 9530 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 9531 pring->prt[0].type = phba->cfg_multi_ring_type; 9532 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 9533 return 0; 9534 } 9535 9536 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 9537 * @phba: Pointer to HBA context object. 9538 * @iocbq: Pointer to iocb object. 9539 * 9540 * The async_event handler calls this routine when it receives 9541 * an ASYNC_STATUS_CN event from the port. The port generates 9542 * this event when an Abort Sequence request to an rport fails 9543 * twice in succession. The abort could be originated by the 9544 * driver or by the port. The ABTS could have been for an ELS 9545 * or FCP IO. The port only generates this event when an ABTS 9546 * fails to complete after one retry. 9547 */ 9548 static void 9549 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 9550 struct lpfc_iocbq *iocbq) 9551 { 9552 struct lpfc_nodelist *ndlp = NULL; 9553 uint16_t rpi = 0, vpi = 0; 9554 struct lpfc_vport *vport = NULL; 9555 9556 /* The rpi in the ulpContext is vport-sensitive. */ 9557 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 9558 rpi = iocbq->iocb.ulpContext; 9559 9560 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9561 "3092 Port generated ABTS async event " 9562 "on vpi %d rpi %d status 0x%x\n", 9563 vpi, rpi, iocbq->iocb.ulpStatus); 9564 9565 vport = lpfc_find_vport_by_vpid(phba, vpi); 9566 if (!vport) 9567 goto err_exit; 9568 ndlp = lpfc_findnode_rpi(vport, rpi); 9569 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 9570 goto err_exit; 9571 9572 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 9573 lpfc_sli_abts_recover_port(vport, ndlp); 9574 return; 9575 9576 err_exit: 9577 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9578 "3095 Event Context not found, no " 9579 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 9580 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 9581 vpi, rpi); 9582 } 9583 9584 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 9585 * @phba: pointer to HBA context object. 9586 * @ndlp: nodelist pointer for the impacted rport. 9587 * @axri: pointer to the wcqe containing the failed exchange. 9588 * 9589 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 9590 * port. The port generates this event when an abort exchange request to an 9591 * rport fails twice in succession with no reply. The abort could be originated 9592 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 9593 */ 9594 void 9595 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 9596 struct lpfc_nodelist *ndlp, 9597 struct sli4_wcqe_xri_aborted *axri) 9598 { 9599 struct lpfc_vport *vport; 9600 uint32_t ext_status = 0; 9601 9602 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 9603 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9604 "3115 Node Context not found, driver " 9605 "ignoring abts err event\n"); 9606 return; 9607 } 9608 9609 vport = ndlp->vport; 9610 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9611 "3116 Port generated FCP XRI ABORT event on " 9612 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 9613 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 9614 bf_get(lpfc_wcqe_xa_xri, axri), 9615 bf_get(lpfc_wcqe_xa_status, axri), 9616 axri->parameter); 9617 9618 /* 9619 * Catch the ABTS protocol failure case. Older OCe FW releases returned 9620 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 9621 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 9622 */ 9623 ext_status = axri->parameter & IOERR_PARAM_MASK; 9624 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 9625 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 9626 lpfc_sli_abts_recover_port(vport, ndlp); 9627 } 9628 9629 /** 9630 * lpfc_sli_async_event_handler - ASYNC iocb handler function 9631 * @phba: Pointer to HBA context object. 9632 * @pring: Pointer to driver SLI ring object. 9633 * @iocbq: Pointer to iocb object. 9634 * 9635 * This function is called by the slow ring event handler 9636 * function when there is an ASYNC event iocb in the ring. 9637 * This function is called with no lock held. 9638 * Currently this function handles only temperature related 9639 * ASYNC events. The function decodes the temperature sensor 9640 * event message and posts events for the management applications. 9641 **/ 9642 static void 9643 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 9644 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 9645 { 9646 IOCB_t *icmd; 9647 uint16_t evt_code; 9648 struct temp_event temp_event_data; 9649 struct Scsi_Host *shost; 9650 uint32_t *iocb_w; 9651 9652 icmd = &iocbq->iocb; 9653 evt_code = icmd->un.asyncstat.evt_code; 9654 9655 switch (evt_code) { 9656 case ASYNC_TEMP_WARN: 9657 case ASYNC_TEMP_SAFE: 9658 temp_event_data.data = (uint32_t) icmd->ulpContext; 9659 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 9660 if (evt_code == ASYNC_TEMP_WARN) { 9661 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 9662 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 9663 "0347 Adapter is very hot, please take " 9664 "corrective action. temperature : %d Celsius\n", 9665 (uint32_t) icmd->ulpContext); 9666 } else { 9667 temp_event_data.event_code = LPFC_NORMAL_TEMP; 9668 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 9669 "0340 Adapter temperature is OK now. " 9670 "temperature : %d Celsius\n", 9671 (uint32_t) icmd->ulpContext); 9672 } 9673 9674 /* Send temperature change event to applications */ 9675 shost = lpfc_shost_from_vport(phba->pport); 9676 fc_host_post_vendor_event(shost, fc_get_event_number(), 9677 sizeof(temp_event_data), (char *) &temp_event_data, 9678 LPFC_NL_VENDOR_ID); 9679 break; 9680 case ASYNC_STATUS_CN: 9681 lpfc_sli_abts_err_handler(phba, iocbq); 9682 break; 9683 default: 9684 iocb_w = (uint32_t *) icmd; 9685 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9686 "0346 Ring %d handler: unexpected ASYNC_STATUS" 9687 " evt_code 0x%x\n" 9688 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 9689 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 9690 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 9691 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 9692 pring->ringno, icmd->un.asyncstat.evt_code, 9693 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 9694 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 9695 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 9696 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 9697 9698 break; 9699 } 9700 } 9701 9702 9703 /** 9704 * lpfc_sli4_setup - SLI ring setup function 9705 * @phba: Pointer to HBA context object. 9706 * 9707 * lpfc_sli_setup sets up rings of the SLI interface with 9708 * number of iocbs per ring and iotags. This function is 9709 * called while driver attach to the HBA and before the 9710 * interrupts are enabled. So there is no need for locking. 9711 * 9712 * This function always returns 0. 9713 **/ 9714 int 9715 lpfc_sli4_setup(struct lpfc_hba *phba) 9716 { 9717 struct lpfc_sli_ring *pring; 9718 9719 pring = phba->sli4_hba.els_wq->pring; 9720 pring->num_mask = LPFC_MAX_RING_MASK; 9721 pring->prt[0].profile = 0; /* Mask 0 */ 9722 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 9723 pring->prt[0].type = FC_TYPE_ELS; 9724 pring->prt[0].lpfc_sli_rcv_unsol_event = 9725 lpfc_els_unsol_event; 9726 pring->prt[1].profile = 0; /* Mask 1 */ 9727 pring->prt[1].rctl = FC_RCTL_ELS_REP; 9728 pring->prt[1].type = FC_TYPE_ELS; 9729 pring->prt[1].lpfc_sli_rcv_unsol_event = 9730 lpfc_els_unsol_event; 9731 pring->prt[2].profile = 0; /* Mask 2 */ 9732 /* NameServer Inquiry */ 9733 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 9734 /* NameServer */ 9735 pring->prt[2].type = FC_TYPE_CT; 9736 pring->prt[2].lpfc_sli_rcv_unsol_event = 9737 lpfc_ct_unsol_event; 9738 pring->prt[3].profile = 0; /* Mask 3 */ 9739 /* NameServer response */ 9740 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 9741 /* NameServer */ 9742 pring->prt[3].type = FC_TYPE_CT; 9743 pring->prt[3].lpfc_sli_rcv_unsol_event = 9744 lpfc_ct_unsol_event; 9745 return 0; 9746 } 9747 9748 /** 9749 * lpfc_sli_setup - SLI ring setup function 9750 * @phba: Pointer to HBA context object. 9751 * 9752 * lpfc_sli_setup sets up rings of the SLI interface with 9753 * number of iocbs per ring and iotags. This function is 9754 * called while driver attach to the HBA and before the 9755 * interrupts are enabled. So there is no need for locking. 9756 * 9757 * This function always returns 0. SLI3 only. 9758 **/ 9759 int 9760 lpfc_sli_setup(struct lpfc_hba *phba) 9761 { 9762 int i, totiocbsize = 0; 9763 struct lpfc_sli *psli = &phba->sli; 9764 struct lpfc_sli_ring *pring; 9765 9766 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 9767 psli->sli_flag = 0; 9768 9769 psli->iocbq_lookup = NULL; 9770 psli->iocbq_lookup_len = 0; 9771 psli->last_iotag = 0; 9772 9773 for (i = 0; i < psli->num_rings; i++) { 9774 pring = &psli->sli3_ring[i]; 9775 switch (i) { 9776 case LPFC_FCP_RING: /* ring 0 - FCP */ 9777 /* numCiocb and numRiocb are used in config_port */ 9778 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 9779 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 9780 pring->sli.sli3.numCiocb += 9781 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 9782 pring->sli.sli3.numRiocb += 9783 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 9784 pring->sli.sli3.numCiocb += 9785 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 9786 pring->sli.sli3.numRiocb += 9787 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 9788 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9789 SLI3_IOCB_CMD_SIZE : 9790 SLI2_IOCB_CMD_SIZE; 9791 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9792 SLI3_IOCB_RSP_SIZE : 9793 SLI2_IOCB_RSP_SIZE; 9794 pring->iotag_ctr = 0; 9795 pring->iotag_max = 9796 (phba->cfg_hba_queue_depth * 2); 9797 pring->fast_iotag = pring->iotag_max; 9798 pring->num_mask = 0; 9799 break; 9800 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 9801 /* numCiocb and numRiocb are used in config_port */ 9802 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 9803 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 9804 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9805 SLI3_IOCB_CMD_SIZE : 9806 SLI2_IOCB_CMD_SIZE; 9807 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9808 SLI3_IOCB_RSP_SIZE : 9809 SLI2_IOCB_RSP_SIZE; 9810 pring->iotag_max = phba->cfg_hba_queue_depth; 9811 pring->num_mask = 0; 9812 break; 9813 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 9814 /* numCiocb and numRiocb are used in config_port */ 9815 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 9816 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 9817 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9818 SLI3_IOCB_CMD_SIZE : 9819 SLI2_IOCB_CMD_SIZE; 9820 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9821 SLI3_IOCB_RSP_SIZE : 9822 SLI2_IOCB_RSP_SIZE; 9823 pring->fast_iotag = 0; 9824 pring->iotag_ctr = 0; 9825 pring->iotag_max = 4096; 9826 pring->lpfc_sli_rcv_async_status = 9827 lpfc_sli_async_event_handler; 9828 pring->num_mask = LPFC_MAX_RING_MASK; 9829 pring->prt[0].profile = 0; /* Mask 0 */ 9830 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 9831 pring->prt[0].type = FC_TYPE_ELS; 9832 pring->prt[0].lpfc_sli_rcv_unsol_event = 9833 lpfc_els_unsol_event; 9834 pring->prt[1].profile = 0; /* Mask 1 */ 9835 pring->prt[1].rctl = FC_RCTL_ELS_REP; 9836 pring->prt[1].type = FC_TYPE_ELS; 9837 pring->prt[1].lpfc_sli_rcv_unsol_event = 9838 lpfc_els_unsol_event; 9839 pring->prt[2].profile = 0; /* Mask 2 */ 9840 /* NameServer Inquiry */ 9841 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 9842 /* NameServer */ 9843 pring->prt[2].type = FC_TYPE_CT; 9844 pring->prt[2].lpfc_sli_rcv_unsol_event = 9845 lpfc_ct_unsol_event; 9846 pring->prt[3].profile = 0; /* Mask 3 */ 9847 /* NameServer response */ 9848 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 9849 /* NameServer */ 9850 pring->prt[3].type = FC_TYPE_CT; 9851 pring->prt[3].lpfc_sli_rcv_unsol_event = 9852 lpfc_ct_unsol_event; 9853 break; 9854 } 9855 totiocbsize += (pring->sli.sli3.numCiocb * 9856 pring->sli.sli3.sizeCiocb) + 9857 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 9858 } 9859 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 9860 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 9861 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 9862 "SLI2 SLIM Data: x%x x%lx\n", 9863 phba->brd_no, totiocbsize, 9864 (unsigned long) MAX_SLIM_IOCB_SIZE); 9865 } 9866 if (phba->cfg_multi_ring_support == 2) 9867 lpfc_extra_ring_setup(phba); 9868 9869 return 0; 9870 } 9871 9872 /** 9873 * lpfc_sli4_queue_init - Queue initialization function 9874 * @phba: Pointer to HBA context object. 9875 * 9876 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each 9877 * ring. This function also initializes ring indices of each ring. 9878 * This function is called during the initialization of the SLI 9879 * interface of an HBA. 9880 * This function is called with no lock held and always returns 9881 * 1. 9882 **/ 9883 void 9884 lpfc_sli4_queue_init(struct lpfc_hba *phba) 9885 { 9886 struct lpfc_sli *psli; 9887 struct lpfc_sli_ring *pring; 9888 int i; 9889 9890 psli = &phba->sli; 9891 spin_lock_irq(&phba->hbalock); 9892 INIT_LIST_HEAD(&psli->mboxq); 9893 INIT_LIST_HEAD(&psli->mboxq_cmpl); 9894 /* Initialize list headers for txq and txcmplq as double linked lists */ 9895 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 9896 pring = phba->sli4_hba.fcp_wq[i]->pring; 9897 pring->flag = 0; 9898 pring->ringno = LPFC_FCP_RING; 9899 INIT_LIST_HEAD(&pring->txq); 9900 INIT_LIST_HEAD(&pring->txcmplq); 9901 INIT_LIST_HEAD(&pring->iocb_continueq); 9902 spin_lock_init(&pring->ring_lock); 9903 } 9904 for (i = 0; i < phba->cfg_nvme_io_channel; i++) { 9905 pring = phba->sli4_hba.nvme_wq[i]->pring; 9906 pring->flag = 0; 9907 pring->ringno = LPFC_FCP_RING; 9908 INIT_LIST_HEAD(&pring->txq); 9909 INIT_LIST_HEAD(&pring->txcmplq); 9910 INIT_LIST_HEAD(&pring->iocb_continueq); 9911 spin_lock_init(&pring->ring_lock); 9912 } 9913 pring = phba->sli4_hba.els_wq->pring; 9914 pring->flag = 0; 9915 pring->ringno = LPFC_ELS_RING; 9916 INIT_LIST_HEAD(&pring->txq); 9917 INIT_LIST_HEAD(&pring->txcmplq); 9918 INIT_LIST_HEAD(&pring->iocb_continueq); 9919 spin_lock_init(&pring->ring_lock); 9920 9921 if (phba->cfg_nvme_io_channel) { 9922 pring = phba->sli4_hba.nvmels_wq->pring; 9923 pring->flag = 0; 9924 pring->ringno = LPFC_ELS_RING; 9925 INIT_LIST_HEAD(&pring->txq); 9926 INIT_LIST_HEAD(&pring->txcmplq); 9927 INIT_LIST_HEAD(&pring->iocb_continueq); 9928 spin_lock_init(&pring->ring_lock); 9929 } 9930 9931 if (phba->cfg_fof) { 9932 pring = phba->sli4_hba.oas_wq->pring; 9933 pring->flag = 0; 9934 pring->ringno = LPFC_FCP_RING; 9935 INIT_LIST_HEAD(&pring->txq); 9936 INIT_LIST_HEAD(&pring->txcmplq); 9937 INIT_LIST_HEAD(&pring->iocb_continueq); 9938 spin_lock_init(&pring->ring_lock); 9939 } 9940 9941 spin_unlock_irq(&phba->hbalock); 9942 } 9943 9944 /** 9945 * lpfc_sli_queue_init - Queue initialization function 9946 * @phba: Pointer to HBA context object. 9947 * 9948 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each 9949 * ring. This function also initializes ring indices of each ring. 9950 * This function is called during the initialization of the SLI 9951 * interface of an HBA. 9952 * This function is called with no lock held and always returns 9953 * 1. 9954 **/ 9955 void 9956 lpfc_sli_queue_init(struct lpfc_hba *phba) 9957 { 9958 struct lpfc_sli *psli; 9959 struct lpfc_sli_ring *pring; 9960 int i; 9961 9962 psli = &phba->sli; 9963 spin_lock_irq(&phba->hbalock); 9964 INIT_LIST_HEAD(&psli->mboxq); 9965 INIT_LIST_HEAD(&psli->mboxq_cmpl); 9966 /* Initialize list headers for txq and txcmplq as double linked lists */ 9967 for (i = 0; i < psli->num_rings; i++) { 9968 pring = &psli->sli3_ring[i]; 9969 pring->ringno = i; 9970 pring->sli.sli3.next_cmdidx = 0; 9971 pring->sli.sli3.local_getidx = 0; 9972 pring->sli.sli3.cmdidx = 0; 9973 INIT_LIST_HEAD(&pring->iocb_continueq); 9974 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 9975 INIT_LIST_HEAD(&pring->postbufq); 9976 pring->flag = 0; 9977 INIT_LIST_HEAD(&pring->txq); 9978 INIT_LIST_HEAD(&pring->txcmplq); 9979 spin_lock_init(&pring->ring_lock); 9980 } 9981 spin_unlock_irq(&phba->hbalock); 9982 } 9983 9984 /** 9985 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 9986 * @phba: Pointer to HBA context object. 9987 * 9988 * This routine flushes the mailbox command subsystem. It will unconditionally 9989 * flush all the mailbox commands in the three possible stages in the mailbox 9990 * command sub-system: pending mailbox command queue; the outstanding mailbox 9991 * command; and completed mailbox command queue. It is caller's responsibility 9992 * to make sure that the driver is in the proper state to flush the mailbox 9993 * command sub-system. Namely, the posting of mailbox commands into the 9994 * pending mailbox command queue from the various clients must be stopped; 9995 * either the HBA is in a state that it will never works on the outstanding 9996 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 9997 * mailbox command has been completed. 9998 **/ 9999 static void 10000 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 10001 { 10002 LIST_HEAD(completions); 10003 struct lpfc_sli *psli = &phba->sli; 10004 LPFC_MBOXQ_t *pmb; 10005 unsigned long iflag; 10006 10007 /* Flush all the mailbox commands in the mbox system */ 10008 spin_lock_irqsave(&phba->hbalock, iflag); 10009 /* The pending mailbox command queue */ 10010 list_splice_init(&phba->sli.mboxq, &completions); 10011 /* The outstanding active mailbox command */ 10012 if (psli->mbox_active) { 10013 list_add_tail(&psli->mbox_active->list, &completions); 10014 psli->mbox_active = NULL; 10015 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10016 } 10017 /* The completed mailbox command queue */ 10018 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 10019 spin_unlock_irqrestore(&phba->hbalock, iflag); 10020 10021 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 10022 while (!list_empty(&completions)) { 10023 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 10024 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 10025 if (pmb->mbox_cmpl) 10026 pmb->mbox_cmpl(phba, pmb); 10027 } 10028 } 10029 10030 /** 10031 * lpfc_sli_host_down - Vport cleanup function 10032 * @vport: Pointer to virtual port object. 10033 * 10034 * lpfc_sli_host_down is called to clean up the resources 10035 * associated with a vport before destroying virtual 10036 * port data structures. 10037 * This function does following operations: 10038 * - Free discovery resources associated with this virtual 10039 * port. 10040 * - Free iocbs associated with this virtual port in 10041 * the txq. 10042 * - Send abort for all iocb commands associated with this 10043 * vport in txcmplq. 10044 * 10045 * This function is called with no lock held and always returns 1. 10046 **/ 10047 int 10048 lpfc_sli_host_down(struct lpfc_vport *vport) 10049 { 10050 LIST_HEAD(completions); 10051 struct lpfc_hba *phba = vport->phba; 10052 struct lpfc_sli *psli = &phba->sli; 10053 struct lpfc_queue *qp = NULL; 10054 struct lpfc_sli_ring *pring; 10055 struct lpfc_iocbq *iocb, *next_iocb; 10056 int i; 10057 unsigned long flags = 0; 10058 uint16_t prev_pring_flag; 10059 10060 lpfc_cleanup_discovery_resources(vport); 10061 10062 spin_lock_irqsave(&phba->hbalock, flags); 10063 10064 /* 10065 * Error everything on the txq since these iocbs 10066 * have not been given to the FW yet. 10067 * Also issue ABTS for everything on the txcmplq 10068 */ 10069 if (phba->sli_rev != LPFC_SLI_REV4) { 10070 for (i = 0; i < psli->num_rings; i++) { 10071 pring = &psli->sli3_ring[i]; 10072 prev_pring_flag = pring->flag; 10073 /* Only slow rings */ 10074 if (pring->ringno == LPFC_ELS_RING) { 10075 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10076 /* Set the lpfc data pending flag */ 10077 set_bit(LPFC_DATA_READY, &phba->data_flags); 10078 } 10079 list_for_each_entry_safe(iocb, next_iocb, 10080 &pring->txq, list) { 10081 if (iocb->vport != vport) 10082 continue; 10083 list_move_tail(&iocb->list, &completions); 10084 } 10085 list_for_each_entry_safe(iocb, next_iocb, 10086 &pring->txcmplq, list) { 10087 if (iocb->vport != vport) 10088 continue; 10089 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10090 } 10091 pring->flag = prev_pring_flag; 10092 } 10093 } else { 10094 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10095 pring = qp->pring; 10096 if (!pring) 10097 continue; 10098 if (pring == phba->sli4_hba.els_wq->pring) { 10099 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10100 /* Set the lpfc data pending flag */ 10101 set_bit(LPFC_DATA_READY, &phba->data_flags); 10102 } 10103 prev_pring_flag = pring->flag; 10104 spin_lock_irq(&pring->ring_lock); 10105 list_for_each_entry_safe(iocb, next_iocb, 10106 &pring->txq, list) { 10107 if (iocb->vport != vport) 10108 continue; 10109 list_move_tail(&iocb->list, &completions); 10110 } 10111 spin_unlock_irq(&pring->ring_lock); 10112 list_for_each_entry_safe(iocb, next_iocb, 10113 &pring->txcmplq, list) { 10114 if (iocb->vport != vport) 10115 continue; 10116 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10117 } 10118 pring->flag = prev_pring_flag; 10119 } 10120 } 10121 spin_unlock_irqrestore(&phba->hbalock, flags); 10122 10123 /* Cancel all the IOCBs from the completions list */ 10124 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10125 IOERR_SLI_DOWN); 10126 return 1; 10127 } 10128 10129 /** 10130 * lpfc_sli_hba_down - Resource cleanup function for the HBA 10131 * @phba: Pointer to HBA context object. 10132 * 10133 * This function cleans up all iocb, buffers, mailbox commands 10134 * while shutting down the HBA. This function is called with no 10135 * lock held and always returns 1. 10136 * This function does the following to cleanup driver resources: 10137 * - Free discovery resources for each virtual port 10138 * - Cleanup any pending fabric iocbs 10139 * - Iterate through the iocb txq and free each entry 10140 * in the list. 10141 * - Free up any buffer posted to the HBA 10142 * - Free mailbox commands in the mailbox queue. 10143 **/ 10144 int 10145 lpfc_sli_hba_down(struct lpfc_hba *phba) 10146 { 10147 LIST_HEAD(completions); 10148 struct lpfc_sli *psli = &phba->sli; 10149 struct lpfc_queue *qp = NULL; 10150 struct lpfc_sli_ring *pring; 10151 struct lpfc_dmabuf *buf_ptr; 10152 unsigned long flags = 0; 10153 int i; 10154 10155 /* Shutdown the mailbox command sub-system */ 10156 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 10157 10158 lpfc_hba_down_prep(phba); 10159 10160 lpfc_fabric_abort_hba(phba); 10161 10162 spin_lock_irqsave(&phba->hbalock, flags); 10163 10164 /* 10165 * Error everything on the txq since these iocbs 10166 * have not been given to the FW yet. 10167 */ 10168 if (phba->sli_rev != LPFC_SLI_REV4) { 10169 for (i = 0; i < psli->num_rings; i++) { 10170 pring = &psli->sli3_ring[i]; 10171 /* Only slow rings */ 10172 if (pring->ringno == LPFC_ELS_RING) { 10173 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10174 /* Set the lpfc data pending flag */ 10175 set_bit(LPFC_DATA_READY, &phba->data_flags); 10176 } 10177 list_splice_init(&pring->txq, &completions); 10178 } 10179 } else { 10180 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10181 pring = qp->pring; 10182 if (!pring) 10183 continue; 10184 spin_lock_irq(&pring->ring_lock); 10185 list_splice_init(&pring->txq, &completions); 10186 spin_unlock_irq(&pring->ring_lock); 10187 if (pring == phba->sli4_hba.els_wq->pring) { 10188 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10189 /* Set the lpfc data pending flag */ 10190 set_bit(LPFC_DATA_READY, &phba->data_flags); 10191 } 10192 } 10193 } 10194 spin_unlock_irqrestore(&phba->hbalock, flags); 10195 10196 /* Cancel all the IOCBs from the completions list */ 10197 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10198 IOERR_SLI_DOWN); 10199 10200 spin_lock_irqsave(&phba->hbalock, flags); 10201 list_splice_init(&phba->elsbuf, &completions); 10202 phba->elsbuf_cnt = 0; 10203 phba->elsbuf_prev_cnt = 0; 10204 spin_unlock_irqrestore(&phba->hbalock, flags); 10205 10206 while (!list_empty(&completions)) { 10207 list_remove_head(&completions, buf_ptr, 10208 struct lpfc_dmabuf, list); 10209 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 10210 kfree(buf_ptr); 10211 } 10212 10213 /* Return any active mbox cmds */ 10214 del_timer_sync(&psli->mbox_tmo); 10215 10216 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 10217 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 10218 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 10219 10220 return 1; 10221 } 10222 10223 /** 10224 * lpfc_sli_pcimem_bcopy - SLI memory copy function 10225 * @srcp: Source memory pointer. 10226 * @destp: Destination memory pointer. 10227 * @cnt: Number of words required to be copied. 10228 * 10229 * This function is used for copying data between driver memory 10230 * and the SLI memory. This function also changes the endianness 10231 * of each word if native endianness is different from SLI 10232 * endianness. This function can be called with or without 10233 * lock. 10234 **/ 10235 void 10236 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 10237 { 10238 uint32_t *src = srcp; 10239 uint32_t *dest = destp; 10240 uint32_t ldata; 10241 int i; 10242 10243 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 10244 ldata = *src; 10245 ldata = le32_to_cpu(ldata); 10246 *dest = ldata; 10247 src++; 10248 dest++; 10249 } 10250 } 10251 10252 10253 /** 10254 * lpfc_sli_bemem_bcopy - SLI memory copy function 10255 * @srcp: Source memory pointer. 10256 * @destp: Destination memory pointer. 10257 * @cnt: Number of words required to be copied. 10258 * 10259 * This function is used for copying data between a data structure 10260 * with big endian representation to local endianness. 10261 * This function can be called with or without lock. 10262 **/ 10263 void 10264 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 10265 { 10266 uint32_t *src = srcp; 10267 uint32_t *dest = destp; 10268 uint32_t ldata; 10269 int i; 10270 10271 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 10272 ldata = *src; 10273 ldata = be32_to_cpu(ldata); 10274 *dest = ldata; 10275 src++; 10276 dest++; 10277 } 10278 } 10279 10280 /** 10281 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 10282 * @phba: Pointer to HBA context object. 10283 * @pring: Pointer to driver SLI ring object. 10284 * @mp: Pointer to driver buffer object. 10285 * 10286 * This function is called with no lock held. 10287 * It always return zero after adding the buffer to the postbufq 10288 * buffer list. 10289 **/ 10290 int 10291 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10292 struct lpfc_dmabuf *mp) 10293 { 10294 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 10295 later */ 10296 spin_lock_irq(&phba->hbalock); 10297 list_add_tail(&mp->list, &pring->postbufq); 10298 pring->postbufq_cnt++; 10299 spin_unlock_irq(&phba->hbalock); 10300 return 0; 10301 } 10302 10303 /** 10304 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 10305 * @phba: Pointer to HBA context object. 10306 * 10307 * When HBQ is enabled, buffers are searched based on tags. This function 10308 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 10309 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 10310 * does not conflict with tags of buffer posted for unsolicited events. 10311 * The function returns the allocated tag. The function is called with 10312 * no locks held. 10313 **/ 10314 uint32_t 10315 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 10316 { 10317 spin_lock_irq(&phba->hbalock); 10318 phba->buffer_tag_count++; 10319 /* 10320 * Always set the QUE_BUFTAG_BIT to distiguish between 10321 * a tag assigned by HBQ. 10322 */ 10323 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 10324 spin_unlock_irq(&phba->hbalock); 10325 return phba->buffer_tag_count; 10326 } 10327 10328 /** 10329 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 10330 * @phba: Pointer to HBA context object. 10331 * @pring: Pointer to driver SLI ring object. 10332 * @tag: Buffer tag. 10333 * 10334 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 10335 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 10336 * iocb is posted to the response ring with the tag of the buffer. 10337 * This function searches the pring->postbufq list using the tag 10338 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 10339 * iocb. If the buffer is found then lpfc_dmabuf object of the 10340 * buffer is returned to the caller else NULL is returned. 10341 * This function is called with no lock held. 10342 **/ 10343 struct lpfc_dmabuf * 10344 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10345 uint32_t tag) 10346 { 10347 struct lpfc_dmabuf *mp, *next_mp; 10348 struct list_head *slp = &pring->postbufq; 10349 10350 /* Search postbufq, from the beginning, looking for a match on tag */ 10351 spin_lock_irq(&phba->hbalock); 10352 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10353 if (mp->buffer_tag == tag) { 10354 list_del_init(&mp->list); 10355 pring->postbufq_cnt--; 10356 spin_unlock_irq(&phba->hbalock); 10357 return mp; 10358 } 10359 } 10360 10361 spin_unlock_irq(&phba->hbalock); 10362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10363 "0402 Cannot find virtual addr for buffer tag on " 10364 "ring %d Data x%lx x%p x%p x%x\n", 10365 pring->ringno, (unsigned long) tag, 10366 slp->next, slp->prev, pring->postbufq_cnt); 10367 10368 return NULL; 10369 } 10370 10371 /** 10372 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 10373 * @phba: Pointer to HBA context object. 10374 * @pring: Pointer to driver SLI ring object. 10375 * @phys: DMA address of the buffer. 10376 * 10377 * This function searches the buffer list using the dma_address 10378 * of unsolicited event to find the driver's lpfc_dmabuf object 10379 * corresponding to the dma_address. The function returns the 10380 * lpfc_dmabuf object if a buffer is found else it returns NULL. 10381 * This function is called by the ct and els unsolicited event 10382 * handlers to get the buffer associated with the unsolicited 10383 * event. 10384 * 10385 * This function is called with no lock held. 10386 **/ 10387 struct lpfc_dmabuf * 10388 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10389 dma_addr_t phys) 10390 { 10391 struct lpfc_dmabuf *mp, *next_mp; 10392 struct list_head *slp = &pring->postbufq; 10393 10394 /* Search postbufq, from the beginning, looking for a match on phys */ 10395 spin_lock_irq(&phba->hbalock); 10396 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10397 if (mp->phys == phys) { 10398 list_del_init(&mp->list); 10399 pring->postbufq_cnt--; 10400 spin_unlock_irq(&phba->hbalock); 10401 return mp; 10402 } 10403 } 10404 10405 spin_unlock_irq(&phba->hbalock); 10406 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10407 "0410 Cannot find virtual addr for mapped buf on " 10408 "ring %d Data x%llx x%p x%p x%x\n", 10409 pring->ringno, (unsigned long long)phys, 10410 slp->next, slp->prev, pring->postbufq_cnt); 10411 return NULL; 10412 } 10413 10414 /** 10415 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 10416 * @phba: Pointer to HBA context object. 10417 * @cmdiocb: Pointer to driver command iocb object. 10418 * @rspiocb: Pointer to driver response iocb object. 10419 * 10420 * This function is the completion handler for the abort iocbs for 10421 * ELS commands. This function is called from the ELS ring event 10422 * handler with no lock held. This function frees memory resources 10423 * associated with the abort iocb. 10424 **/ 10425 static void 10426 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10427 struct lpfc_iocbq *rspiocb) 10428 { 10429 IOCB_t *irsp = &rspiocb->iocb; 10430 uint16_t abort_iotag, abort_context; 10431 struct lpfc_iocbq *abort_iocb = NULL; 10432 10433 if (irsp->ulpStatus) { 10434 10435 /* 10436 * Assume that the port already completed and returned, or 10437 * will return the iocb. Just Log the message. 10438 */ 10439 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 10440 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 10441 10442 spin_lock_irq(&phba->hbalock); 10443 if (phba->sli_rev < LPFC_SLI_REV4) { 10444 if (abort_iotag != 0 && 10445 abort_iotag <= phba->sli.last_iotag) 10446 abort_iocb = 10447 phba->sli.iocbq_lookup[abort_iotag]; 10448 } else 10449 /* For sli4 the abort_tag is the XRI, 10450 * so the abort routine puts the iotag of the iocb 10451 * being aborted in the context field of the abort 10452 * IOCB. 10453 */ 10454 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 10455 10456 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 10457 "0327 Cannot abort els iocb %p " 10458 "with tag %x context %x, abort status %x, " 10459 "abort code %x\n", 10460 abort_iocb, abort_iotag, abort_context, 10461 irsp->ulpStatus, irsp->un.ulpWord[4]); 10462 10463 spin_unlock_irq(&phba->hbalock); 10464 } 10465 lpfc_sli_release_iocbq(phba, cmdiocb); 10466 return; 10467 } 10468 10469 /** 10470 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 10471 * @phba: Pointer to HBA context object. 10472 * @cmdiocb: Pointer to driver command iocb object. 10473 * @rspiocb: Pointer to driver response iocb object. 10474 * 10475 * The function is called from SLI ring event handler with no 10476 * lock held. This function is the completion handler for ELS commands 10477 * which are aborted. The function frees memory resources used for 10478 * the aborted ELS commands. 10479 **/ 10480 static void 10481 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10482 struct lpfc_iocbq *rspiocb) 10483 { 10484 IOCB_t *irsp = &rspiocb->iocb; 10485 10486 /* ELS cmd tag <ulpIoTag> completes */ 10487 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 10488 "0139 Ignoring ELS cmd tag x%x completion Data: " 10489 "x%x x%x x%x\n", 10490 irsp->ulpIoTag, irsp->ulpStatus, 10491 irsp->un.ulpWord[4], irsp->ulpTimeout); 10492 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 10493 lpfc_ct_free_iocb(phba, cmdiocb); 10494 else 10495 lpfc_els_free_iocb(phba, cmdiocb); 10496 return; 10497 } 10498 10499 /** 10500 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 10501 * @phba: Pointer to HBA context object. 10502 * @pring: Pointer to driver SLI ring object. 10503 * @cmdiocb: Pointer to driver command iocb object. 10504 * 10505 * This function issues an abort iocb for the provided command iocb down to 10506 * the port. Other than the case the outstanding command iocb is an abort 10507 * request, this function issues abort out unconditionally. This function is 10508 * called with hbalock held. The function returns 0 when it fails due to 10509 * memory allocation failure or when the command iocb is an abort request. 10510 **/ 10511 static int 10512 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10513 struct lpfc_iocbq *cmdiocb) 10514 { 10515 struct lpfc_vport *vport = cmdiocb->vport; 10516 struct lpfc_iocbq *abtsiocbp; 10517 IOCB_t *icmd = NULL; 10518 IOCB_t *iabt = NULL; 10519 int retval; 10520 unsigned long iflags; 10521 10522 lockdep_assert_held(&phba->hbalock); 10523 10524 /* 10525 * There are certain command types we don't want to abort. And we 10526 * don't want to abort commands that are already in the process of 10527 * being aborted. 10528 */ 10529 icmd = &cmdiocb->iocb; 10530 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 10531 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 10532 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 10533 return 0; 10534 10535 /* issue ABTS for this IOCB based on iotag */ 10536 abtsiocbp = __lpfc_sli_get_iocbq(phba); 10537 if (abtsiocbp == NULL) 10538 return 0; 10539 10540 /* This signals the response to set the correct status 10541 * before calling the completion handler 10542 */ 10543 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 10544 10545 iabt = &abtsiocbp->iocb; 10546 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 10547 iabt->un.acxri.abortContextTag = icmd->ulpContext; 10548 if (phba->sli_rev == LPFC_SLI_REV4) { 10549 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 10550 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 10551 } 10552 else 10553 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 10554 iabt->ulpLe = 1; 10555 iabt->ulpClass = icmd->ulpClass; 10556 10557 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 10558 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; 10559 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 10560 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 10561 if (cmdiocb->iocb_flag & LPFC_IO_FOF) 10562 abtsiocbp->iocb_flag |= LPFC_IO_FOF; 10563 10564 if (phba->link_state >= LPFC_LINK_UP) 10565 iabt->ulpCommand = CMD_ABORT_XRI_CN; 10566 else 10567 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 10568 10569 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 10570 abtsiocbp->vport = vport; 10571 10572 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 10573 "0339 Abort xri x%x, original iotag x%x, " 10574 "abort cmd iotag x%x\n", 10575 iabt->un.acxri.abortIoTag, 10576 iabt->un.acxri.abortContextTag, 10577 abtsiocbp->iotag); 10578 10579 if (phba->sli_rev == LPFC_SLI_REV4) { 10580 pring = lpfc_sli4_calc_ring(phba, abtsiocbp); 10581 if (unlikely(pring == NULL)) 10582 return 0; 10583 /* Note: both hbalock and ring_lock need to be set here */ 10584 spin_lock_irqsave(&pring->ring_lock, iflags); 10585 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 10586 abtsiocbp, 0); 10587 spin_unlock_irqrestore(&pring->ring_lock, iflags); 10588 } else { 10589 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 10590 abtsiocbp, 0); 10591 } 10592 10593 if (retval) 10594 __lpfc_sli_release_iocbq(phba, abtsiocbp); 10595 10596 /* 10597 * Caller to this routine should check for IOCB_ERROR 10598 * and handle it properly. This routine no longer removes 10599 * iocb off txcmplq and call compl in case of IOCB_ERROR. 10600 */ 10601 return retval; 10602 } 10603 10604 /** 10605 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 10606 * @phba: Pointer to HBA context object. 10607 * @pring: Pointer to driver SLI ring object. 10608 * @cmdiocb: Pointer to driver command iocb object. 10609 * 10610 * This function issues an abort iocb for the provided command iocb. In case 10611 * of unloading, the abort iocb will not be issued to commands on the ELS 10612 * ring. Instead, the callback function shall be changed to those commands 10613 * so that nothing happens when them finishes. This function is called with 10614 * hbalock held. The function returns 0 when the command iocb is an abort 10615 * request. 10616 **/ 10617 int 10618 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10619 struct lpfc_iocbq *cmdiocb) 10620 { 10621 struct lpfc_vport *vport = cmdiocb->vport; 10622 int retval = IOCB_ERROR; 10623 IOCB_t *icmd = NULL; 10624 10625 lockdep_assert_held(&phba->hbalock); 10626 10627 /* 10628 * There are certain command types we don't want to abort. And we 10629 * don't want to abort commands that are already in the process of 10630 * being aborted. 10631 */ 10632 icmd = &cmdiocb->iocb; 10633 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 10634 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 10635 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 10636 return 0; 10637 10638 /* 10639 * If we're unloading, don't abort iocb on the ELS ring, but change 10640 * the callback so that nothing happens when it finishes. 10641 */ 10642 if ((vport->load_flag & FC_UNLOADING) && 10643 (pring->ringno == LPFC_ELS_RING)) { 10644 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 10645 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 10646 else 10647 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 10648 goto abort_iotag_exit; 10649 } 10650 10651 /* Now, we try to issue the abort to the cmdiocb out */ 10652 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 10653 10654 abort_iotag_exit: 10655 /* 10656 * Caller to this routine should check for IOCB_ERROR 10657 * and handle it properly. This routine no longer removes 10658 * iocb off txcmplq and call compl in case of IOCB_ERROR. 10659 */ 10660 return retval; 10661 } 10662 10663 /** 10664 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb 10665 * @phba: Pointer to HBA context object. 10666 * @pring: Pointer to driver SLI ring object. 10667 * @cmdiocb: Pointer to driver command iocb object. 10668 * 10669 * This function issues an abort iocb for the provided command iocb down to 10670 * the port. Other than the case the outstanding command iocb is an abort 10671 * request, this function issues abort out unconditionally. This function is 10672 * called with hbalock held. The function returns 0 when it fails due to 10673 * memory allocation failure or when the command iocb is an abort request. 10674 **/ 10675 static int 10676 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10677 struct lpfc_iocbq *cmdiocb) 10678 { 10679 struct lpfc_vport *vport = cmdiocb->vport; 10680 struct lpfc_iocbq *abtsiocbp; 10681 union lpfc_wqe *abts_wqe; 10682 int retval; 10683 10684 /* 10685 * There are certain command types we don't want to abort. And we 10686 * don't want to abort commands that are already in the process of 10687 * being aborted. 10688 */ 10689 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 10690 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || 10691 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 10692 return 0; 10693 10694 /* issue ABTS for this io based on iotag */ 10695 abtsiocbp = __lpfc_sli_get_iocbq(phba); 10696 if (abtsiocbp == NULL) 10697 return 0; 10698 10699 /* This signals the response to set the correct status 10700 * before calling the completion handler 10701 */ 10702 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 10703 10704 /* Complete prepping the abort wqe and issue to the FW. */ 10705 abts_wqe = &abtsiocbp->wqe; 10706 bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0); 10707 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 10708 10709 /* Explicitly set reserved fields to zero.*/ 10710 abts_wqe->abort_cmd.rsrvd4 = 0; 10711 abts_wqe->abort_cmd.rsrvd5 = 0; 10712 10713 /* WQE Common - word 6. Context is XRI tag. Set 0. */ 10714 bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0); 10715 bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0); 10716 10717 /* word 7 */ 10718 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); 10719 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 10720 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, 10721 cmdiocb->iocb.ulpClass); 10722 10723 /* word 8 - tell the FW to abort the IO associated with this 10724 * outstanding exchange ID. 10725 */ 10726 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag; 10727 10728 /* word 9 - this is the iotag for the abts_wqe completion. */ 10729 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, 10730 abtsiocbp->iotag); 10731 10732 /* word 10 */ 10733 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx); 10734 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); 10735 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 10736 10737 /* word 11 */ 10738 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); 10739 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); 10740 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 10741 10742 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 10743 abtsiocbp->iocb_flag |= LPFC_IO_NVME; 10744 abtsiocbp->vport = vport; 10745 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; 10746 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp); 10747 if (retval == IOCB_ERROR) { 10748 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 10749 "6147 Failed abts issue_wqe with status x%x " 10750 "for oxid x%x\n", 10751 retval, cmdiocb->sli4_xritag); 10752 lpfc_sli_release_iocbq(phba, abtsiocbp); 10753 return retval; 10754 } 10755 10756 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 10757 "6148 Drv Abort NVME Request Issued for " 10758 "ox_id x%x on reqtag x%x\n", 10759 cmdiocb->sli4_xritag, 10760 abtsiocbp->iotag); 10761 10762 return retval; 10763 } 10764 10765 /** 10766 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 10767 * @phba: pointer to lpfc HBA data structure. 10768 * 10769 * This routine will abort all pending and outstanding iocbs to an HBA. 10770 **/ 10771 void 10772 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 10773 { 10774 struct lpfc_sli *psli = &phba->sli; 10775 struct lpfc_sli_ring *pring; 10776 struct lpfc_queue *qp = NULL; 10777 int i; 10778 10779 if (phba->sli_rev != LPFC_SLI_REV4) { 10780 for (i = 0; i < psli->num_rings; i++) { 10781 pring = &psli->sli3_ring[i]; 10782 lpfc_sli_abort_iocb_ring(phba, pring); 10783 } 10784 return; 10785 } 10786 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10787 pring = qp->pring; 10788 if (!pring) 10789 continue; 10790 lpfc_sli_abort_iocb_ring(phba, pring); 10791 } 10792 } 10793 10794 /** 10795 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 10796 * @iocbq: Pointer to driver iocb object. 10797 * @vport: Pointer to driver virtual port object. 10798 * @tgt_id: SCSI ID of the target. 10799 * @lun_id: LUN ID of the scsi device. 10800 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 10801 * 10802 * This function acts as an iocb filter for functions which abort or count 10803 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 10804 * 0 if the filtering criteria is met for the given iocb and will return 10805 * 1 if the filtering criteria is not met. 10806 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 10807 * given iocb is for the SCSI device specified by vport, tgt_id and 10808 * lun_id parameter. 10809 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 10810 * given iocb is for the SCSI target specified by vport and tgt_id 10811 * parameters. 10812 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 10813 * given iocb is for the SCSI host associated with the given vport. 10814 * This function is called with no locks held. 10815 **/ 10816 static int 10817 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 10818 uint16_t tgt_id, uint64_t lun_id, 10819 lpfc_ctx_cmd ctx_cmd) 10820 { 10821 struct lpfc_scsi_buf *lpfc_cmd; 10822 int rc = 1; 10823 10824 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 10825 return rc; 10826 10827 if (iocbq->vport != vport) 10828 return rc; 10829 10830 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 10831 10832 if (lpfc_cmd->pCmd == NULL) 10833 return rc; 10834 10835 switch (ctx_cmd) { 10836 case LPFC_CTX_LUN: 10837 if ((lpfc_cmd->rdata->pnode) && 10838 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 10839 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 10840 rc = 0; 10841 break; 10842 case LPFC_CTX_TGT: 10843 if ((lpfc_cmd->rdata->pnode) && 10844 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 10845 rc = 0; 10846 break; 10847 case LPFC_CTX_HOST: 10848 rc = 0; 10849 break; 10850 default: 10851 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 10852 __func__, ctx_cmd); 10853 break; 10854 } 10855 10856 return rc; 10857 } 10858 10859 /** 10860 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 10861 * @vport: Pointer to virtual port. 10862 * @tgt_id: SCSI ID of the target. 10863 * @lun_id: LUN ID of the scsi device. 10864 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 10865 * 10866 * This function returns number of FCP commands pending for the vport. 10867 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 10868 * commands pending on the vport associated with SCSI device specified 10869 * by tgt_id and lun_id parameters. 10870 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 10871 * commands pending on the vport associated with SCSI target specified 10872 * by tgt_id parameter. 10873 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 10874 * commands pending on the vport. 10875 * This function returns the number of iocbs which satisfy the filter. 10876 * This function is called without any lock held. 10877 **/ 10878 int 10879 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 10880 lpfc_ctx_cmd ctx_cmd) 10881 { 10882 struct lpfc_hba *phba = vport->phba; 10883 struct lpfc_iocbq *iocbq; 10884 int sum, i; 10885 10886 spin_lock_irq(&phba->hbalock); 10887 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 10888 iocbq = phba->sli.iocbq_lookup[i]; 10889 10890 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 10891 ctx_cmd) == 0) 10892 sum++; 10893 } 10894 spin_unlock_irq(&phba->hbalock); 10895 10896 return sum; 10897 } 10898 10899 /** 10900 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 10901 * @phba: Pointer to HBA context object 10902 * @cmdiocb: Pointer to command iocb object. 10903 * @rspiocb: Pointer to response iocb object. 10904 * 10905 * This function is called when an aborted FCP iocb completes. This 10906 * function is called by the ring event handler with no lock held. 10907 * This function frees the iocb. 10908 **/ 10909 void 10910 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10911 struct lpfc_iocbq *rspiocb) 10912 { 10913 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10914 "3096 ABORT_XRI_CN completing on rpi x%x " 10915 "original iotag x%x, abort cmd iotag x%x " 10916 "status 0x%x, reason 0x%x\n", 10917 cmdiocb->iocb.un.acxri.abortContextTag, 10918 cmdiocb->iocb.un.acxri.abortIoTag, 10919 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 10920 rspiocb->iocb.un.ulpWord[4]); 10921 lpfc_sli_release_iocbq(phba, cmdiocb); 10922 return; 10923 } 10924 10925 /** 10926 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 10927 * @vport: Pointer to virtual port. 10928 * @pring: Pointer to driver SLI ring object. 10929 * @tgt_id: SCSI ID of the target. 10930 * @lun_id: LUN ID of the scsi device. 10931 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 10932 * 10933 * This function sends an abort command for every SCSI command 10934 * associated with the given virtual port pending on the ring 10935 * filtered by lpfc_sli_validate_fcp_iocb function. 10936 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 10937 * FCP iocbs associated with lun specified by tgt_id and lun_id 10938 * parameters 10939 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 10940 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 10941 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 10942 * FCP iocbs associated with virtual port. 10943 * This function returns number of iocbs it failed to abort. 10944 * This function is called with no locks held. 10945 **/ 10946 int 10947 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 10948 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 10949 { 10950 struct lpfc_hba *phba = vport->phba; 10951 struct lpfc_iocbq *iocbq; 10952 struct lpfc_iocbq *abtsiocb; 10953 IOCB_t *cmd = NULL; 10954 int errcnt = 0, ret_val = 0; 10955 int i; 10956 10957 for (i = 1; i <= phba->sli.last_iotag; i++) { 10958 iocbq = phba->sli.iocbq_lookup[i]; 10959 10960 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 10961 abort_cmd) != 0) 10962 continue; 10963 10964 /* 10965 * If the iocbq is already being aborted, don't take a second 10966 * action, but do count it. 10967 */ 10968 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 10969 continue; 10970 10971 /* issue ABTS for this IOCB based on iotag */ 10972 abtsiocb = lpfc_sli_get_iocbq(phba); 10973 if (abtsiocb == NULL) { 10974 errcnt++; 10975 continue; 10976 } 10977 10978 /* indicate the IO is being aborted by the driver. */ 10979 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 10980 10981 cmd = &iocbq->iocb; 10982 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 10983 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 10984 if (phba->sli_rev == LPFC_SLI_REV4) 10985 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 10986 else 10987 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 10988 abtsiocb->iocb.ulpLe = 1; 10989 abtsiocb->iocb.ulpClass = cmd->ulpClass; 10990 abtsiocb->vport = vport; 10991 10992 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 10993 abtsiocb->hba_wqidx = iocbq->hba_wqidx; 10994 if (iocbq->iocb_flag & LPFC_IO_FCP) 10995 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 10996 if (iocbq->iocb_flag & LPFC_IO_FOF) 10997 abtsiocb->iocb_flag |= LPFC_IO_FOF; 10998 10999 if (lpfc_is_link_up(phba)) 11000 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11001 else 11002 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11003 11004 /* Setup callback routine and issue the command. */ 11005 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11006 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 11007 abtsiocb, 0); 11008 if (ret_val == IOCB_ERROR) { 11009 lpfc_sli_release_iocbq(phba, abtsiocb); 11010 errcnt++; 11011 continue; 11012 } 11013 } 11014 11015 return errcnt; 11016 } 11017 11018 /** 11019 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 11020 * @vport: Pointer to virtual port. 11021 * @pring: Pointer to driver SLI ring object. 11022 * @tgt_id: SCSI ID of the target. 11023 * @lun_id: LUN ID of the scsi device. 11024 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11025 * 11026 * This function sends an abort command for every SCSI command 11027 * associated with the given virtual port pending on the ring 11028 * filtered by lpfc_sli_validate_fcp_iocb function. 11029 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 11030 * FCP iocbs associated with lun specified by tgt_id and lun_id 11031 * parameters 11032 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 11033 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11034 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 11035 * FCP iocbs associated with virtual port. 11036 * This function returns number of iocbs it aborted . 11037 * This function is called with no locks held right after a taskmgmt 11038 * command is sent. 11039 **/ 11040 int 11041 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11042 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 11043 { 11044 struct lpfc_hba *phba = vport->phba; 11045 struct lpfc_scsi_buf *lpfc_cmd; 11046 struct lpfc_iocbq *abtsiocbq; 11047 struct lpfc_nodelist *ndlp; 11048 struct lpfc_iocbq *iocbq; 11049 IOCB_t *icmd; 11050 int sum, i, ret_val; 11051 unsigned long iflags; 11052 struct lpfc_sli_ring *pring_s4; 11053 11054 spin_lock_irq(&phba->hbalock); 11055 11056 /* all I/Os are in process of being flushed */ 11057 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 11058 spin_unlock_irq(&phba->hbalock); 11059 return 0; 11060 } 11061 sum = 0; 11062 11063 for (i = 1; i <= phba->sli.last_iotag; i++) { 11064 iocbq = phba->sli.iocbq_lookup[i]; 11065 11066 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11067 cmd) != 0) 11068 continue; 11069 11070 /* 11071 * If the iocbq is already being aborted, don't take a second 11072 * action, but do count it. 11073 */ 11074 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 11075 continue; 11076 11077 /* issue ABTS for this IOCB based on iotag */ 11078 abtsiocbq = __lpfc_sli_get_iocbq(phba); 11079 if (abtsiocbq == NULL) 11080 continue; 11081 11082 icmd = &iocbq->iocb; 11083 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11084 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; 11085 if (phba->sli_rev == LPFC_SLI_REV4) 11086 abtsiocbq->iocb.un.acxri.abortIoTag = 11087 iocbq->sli4_xritag; 11088 else 11089 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; 11090 abtsiocbq->iocb.ulpLe = 1; 11091 abtsiocbq->iocb.ulpClass = icmd->ulpClass; 11092 abtsiocbq->vport = vport; 11093 11094 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11095 abtsiocbq->hba_wqidx = iocbq->hba_wqidx; 11096 if (iocbq->iocb_flag & LPFC_IO_FCP) 11097 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 11098 if (iocbq->iocb_flag & LPFC_IO_FOF) 11099 abtsiocbq->iocb_flag |= LPFC_IO_FOF; 11100 11101 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 11102 ndlp = lpfc_cmd->rdata->pnode; 11103 11104 if (lpfc_is_link_up(phba) && 11105 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 11106 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11107 else 11108 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11109 11110 /* Setup callback routine and issue the command. */ 11111 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11112 11113 /* 11114 * Indicate the IO is being aborted by the driver and set 11115 * the caller's flag into the aborted IO. 11116 */ 11117 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11118 11119 if (phba->sli_rev == LPFC_SLI_REV4) { 11120 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); 11121 if (pring_s4 == NULL) 11122 continue; 11123 /* Note: both hbalock and ring_lock must be set here */ 11124 spin_lock_irqsave(&pring_s4->ring_lock, iflags); 11125 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11126 abtsiocbq, 0); 11127 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); 11128 } else { 11129 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 11130 abtsiocbq, 0); 11131 } 11132 11133 11134 if (ret_val == IOCB_ERROR) 11135 __lpfc_sli_release_iocbq(phba, abtsiocbq); 11136 else 11137 sum++; 11138 } 11139 spin_unlock_irq(&phba->hbalock); 11140 return sum; 11141 } 11142 11143 /** 11144 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 11145 * @phba: Pointer to HBA context object. 11146 * @cmdiocbq: Pointer to command iocb. 11147 * @rspiocbq: Pointer to response iocb. 11148 * 11149 * This function is the completion handler for iocbs issued using 11150 * lpfc_sli_issue_iocb_wait function. This function is called by the 11151 * ring event handler function without any lock held. This function 11152 * can be called from both worker thread context and interrupt 11153 * context. This function also can be called from other thread which 11154 * cleans up the SLI layer objects. 11155 * This function copy the contents of the response iocb to the 11156 * response iocb memory object provided by the caller of 11157 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 11158 * sleeps for the iocb completion. 11159 **/ 11160 static void 11161 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 11162 struct lpfc_iocbq *cmdiocbq, 11163 struct lpfc_iocbq *rspiocbq) 11164 { 11165 wait_queue_head_t *pdone_q; 11166 unsigned long iflags; 11167 struct lpfc_scsi_buf *lpfc_cmd; 11168 11169 spin_lock_irqsave(&phba->hbalock, iflags); 11170 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { 11171 11172 /* 11173 * A time out has occurred for the iocb. If a time out 11174 * completion handler has been supplied, call it. Otherwise, 11175 * just free the iocbq. 11176 */ 11177 11178 spin_unlock_irqrestore(&phba->hbalock, iflags); 11179 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; 11180 cmdiocbq->wait_iocb_cmpl = NULL; 11181 if (cmdiocbq->iocb_cmpl) 11182 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); 11183 else 11184 lpfc_sli_release_iocbq(phba, cmdiocbq); 11185 return; 11186 } 11187 11188 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 11189 if (cmdiocbq->context2 && rspiocbq) 11190 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 11191 &rspiocbq->iocb, sizeof(IOCB_t)); 11192 11193 /* Set the exchange busy flag for task management commands */ 11194 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 11195 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 11196 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 11197 cur_iocbq); 11198 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 11199 } 11200 11201 pdone_q = cmdiocbq->context_un.wait_queue; 11202 if (pdone_q) 11203 wake_up(pdone_q); 11204 spin_unlock_irqrestore(&phba->hbalock, iflags); 11205 return; 11206 } 11207 11208 /** 11209 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 11210 * @phba: Pointer to HBA context object.. 11211 * @piocbq: Pointer to command iocb. 11212 * @flag: Flag to test. 11213 * 11214 * This routine grabs the hbalock and then test the iocb_flag to 11215 * see if the passed in flag is set. 11216 * Returns: 11217 * 1 if flag is set. 11218 * 0 if flag is not set. 11219 **/ 11220 static int 11221 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 11222 struct lpfc_iocbq *piocbq, uint32_t flag) 11223 { 11224 unsigned long iflags; 11225 int ret; 11226 11227 spin_lock_irqsave(&phba->hbalock, iflags); 11228 ret = piocbq->iocb_flag & flag; 11229 spin_unlock_irqrestore(&phba->hbalock, iflags); 11230 return ret; 11231 11232 } 11233 11234 /** 11235 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 11236 * @phba: Pointer to HBA context object.. 11237 * @pring: Pointer to sli ring. 11238 * @piocb: Pointer to command iocb. 11239 * @prspiocbq: Pointer to response iocb. 11240 * @timeout: Timeout in number of seconds. 11241 * 11242 * This function issues the iocb to firmware and waits for the 11243 * iocb to complete. The iocb_cmpl field of the shall be used 11244 * to handle iocbs which time out. If the field is NULL, the 11245 * function shall free the iocbq structure. If more clean up is 11246 * needed, the caller is expected to provide a completion function 11247 * that will provide the needed clean up. If the iocb command is 11248 * not completed within timeout seconds, the function will either 11249 * free the iocbq structure (if iocb_cmpl == NULL) or execute the 11250 * completion function set in the iocb_cmpl field and then return 11251 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 11252 * resources if this function returns IOCB_TIMEDOUT. 11253 * The function waits for the iocb completion using an 11254 * non-interruptible wait. 11255 * This function will sleep while waiting for iocb completion. 11256 * So, this function should not be called from any context which 11257 * does not allow sleeping. Due to the same reason, this function 11258 * cannot be called with interrupt disabled. 11259 * This function assumes that the iocb completions occur while 11260 * this function sleep. So, this function cannot be called from 11261 * the thread which process iocb completion for this ring. 11262 * This function clears the iocb_flag of the iocb object before 11263 * issuing the iocb and the iocb completion handler sets this 11264 * flag and wakes this thread when the iocb completes. 11265 * The contents of the response iocb will be copied to prspiocbq 11266 * by the completion handler when the command completes. 11267 * This function returns IOCB_SUCCESS when success. 11268 * This function is called with no lock held. 11269 **/ 11270 int 11271 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 11272 uint32_t ring_number, 11273 struct lpfc_iocbq *piocb, 11274 struct lpfc_iocbq *prspiocbq, 11275 uint32_t timeout) 11276 { 11277 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 11278 long timeleft, timeout_req = 0; 11279 int retval = IOCB_SUCCESS; 11280 uint32_t creg_val; 11281 struct lpfc_iocbq *iocb; 11282 int txq_cnt = 0; 11283 int txcmplq_cnt = 0; 11284 struct lpfc_sli_ring *pring; 11285 unsigned long iflags; 11286 bool iocb_completed = true; 11287 11288 if (phba->sli_rev >= LPFC_SLI_REV4) 11289 pring = lpfc_sli4_calc_ring(phba, piocb); 11290 else 11291 pring = &phba->sli.sli3_ring[ring_number]; 11292 /* 11293 * If the caller has provided a response iocbq buffer, then context2 11294 * is NULL or its an error. 11295 */ 11296 if (prspiocbq) { 11297 if (piocb->context2) 11298 return IOCB_ERROR; 11299 piocb->context2 = prspiocbq; 11300 } 11301 11302 piocb->wait_iocb_cmpl = piocb->iocb_cmpl; 11303 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 11304 piocb->context_un.wait_queue = &done_q; 11305 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 11306 11307 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11308 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11309 return IOCB_ERROR; 11310 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 11311 writel(creg_val, phba->HCregaddr); 11312 readl(phba->HCregaddr); /* flush */ 11313 } 11314 11315 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 11316 SLI_IOCB_RET_IOCB); 11317 if (retval == IOCB_SUCCESS) { 11318 timeout_req = msecs_to_jiffies(timeout * 1000); 11319 timeleft = wait_event_timeout(done_q, 11320 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 11321 timeout_req); 11322 spin_lock_irqsave(&phba->hbalock, iflags); 11323 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 11324 11325 /* 11326 * IOCB timed out. Inform the wake iocb wait 11327 * completion function and set local status 11328 */ 11329 11330 iocb_completed = false; 11331 piocb->iocb_flag |= LPFC_IO_WAKE_TMO; 11332 } 11333 spin_unlock_irqrestore(&phba->hbalock, iflags); 11334 if (iocb_completed) { 11335 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11336 "0331 IOCB wake signaled\n"); 11337 /* Note: we are not indicating if the IOCB has a success 11338 * status or not - that's for the caller to check. 11339 * IOCB_SUCCESS means just that the command was sent and 11340 * completed. Not that it completed successfully. 11341 * */ 11342 } else if (timeleft == 0) { 11343 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11344 "0338 IOCB wait timeout error - no " 11345 "wake response Data x%x\n", timeout); 11346 retval = IOCB_TIMEDOUT; 11347 } else { 11348 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11349 "0330 IOCB wake NOT set, " 11350 "Data x%x x%lx\n", 11351 timeout, (timeleft / jiffies)); 11352 retval = IOCB_TIMEDOUT; 11353 } 11354 } else if (retval == IOCB_BUSY) { 11355 if (phba->cfg_log_verbose & LOG_SLI) { 11356 list_for_each_entry(iocb, &pring->txq, list) { 11357 txq_cnt++; 11358 } 11359 list_for_each_entry(iocb, &pring->txcmplq, list) { 11360 txcmplq_cnt++; 11361 } 11362 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11363 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 11364 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 11365 } 11366 return retval; 11367 } else { 11368 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11369 "0332 IOCB wait issue failed, Data x%x\n", 11370 retval); 11371 retval = IOCB_ERROR; 11372 } 11373 11374 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11375 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11376 return IOCB_ERROR; 11377 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 11378 writel(creg_val, phba->HCregaddr); 11379 readl(phba->HCregaddr); /* flush */ 11380 } 11381 11382 if (prspiocbq) 11383 piocb->context2 = NULL; 11384 11385 piocb->context_un.wait_queue = NULL; 11386 piocb->iocb_cmpl = NULL; 11387 return retval; 11388 } 11389 11390 /** 11391 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 11392 * @phba: Pointer to HBA context object. 11393 * @pmboxq: Pointer to driver mailbox object. 11394 * @timeout: Timeout in number of seconds. 11395 * 11396 * This function issues the mailbox to firmware and waits for the 11397 * mailbox command to complete. If the mailbox command is not 11398 * completed within timeout seconds, it returns MBX_TIMEOUT. 11399 * The function waits for the mailbox completion using an 11400 * interruptible wait. If the thread is woken up due to a 11401 * signal, MBX_TIMEOUT error is returned to the caller. Caller 11402 * should not free the mailbox resources, if this function returns 11403 * MBX_TIMEOUT. 11404 * This function will sleep while waiting for mailbox completion. 11405 * So, this function should not be called from any context which 11406 * does not allow sleeping. Due to the same reason, this function 11407 * cannot be called with interrupt disabled. 11408 * This function assumes that the mailbox completion occurs while 11409 * this function sleep. So, this function cannot be called from 11410 * the worker thread which processes mailbox completion. 11411 * This function is called in the context of HBA management 11412 * applications. 11413 * This function returns MBX_SUCCESS when successful. 11414 * This function is called with no lock held. 11415 **/ 11416 int 11417 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 11418 uint32_t timeout) 11419 { 11420 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 11421 MAILBOX_t *mb = NULL; 11422 int retval; 11423 unsigned long flag; 11424 11425 /* The caller might set context1 for extended buffer */ 11426 if (pmboxq->context1) 11427 mb = (MAILBOX_t *)pmboxq->context1; 11428 11429 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 11430 /* setup wake call as IOCB callback */ 11431 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 11432 /* setup context field to pass wait_queue pointer to wake function */ 11433 pmboxq->context1 = &done_q; 11434 11435 /* now issue the command */ 11436 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 11437 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 11438 wait_event_interruptible_timeout(done_q, 11439 pmboxq->mbox_flag & LPFC_MBX_WAKE, 11440 msecs_to_jiffies(timeout * 1000)); 11441 11442 spin_lock_irqsave(&phba->hbalock, flag); 11443 /* restore the possible extended buffer for free resource */ 11444 pmboxq->context1 = (uint8_t *)mb; 11445 /* 11446 * if LPFC_MBX_WAKE flag is set the mailbox is completed 11447 * else do not free the resources. 11448 */ 11449 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 11450 retval = MBX_SUCCESS; 11451 } else { 11452 retval = MBX_TIMEOUT; 11453 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 11454 } 11455 spin_unlock_irqrestore(&phba->hbalock, flag); 11456 } else { 11457 /* restore the possible extended buffer for free resource */ 11458 pmboxq->context1 = (uint8_t *)mb; 11459 } 11460 11461 return retval; 11462 } 11463 11464 /** 11465 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 11466 * @phba: Pointer to HBA context. 11467 * 11468 * This function is called to shutdown the driver's mailbox sub-system. 11469 * It first marks the mailbox sub-system is in a block state to prevent 11470 * the asynchronous mailbox command from issued off the pending mailbox 11471 * command queue. If the mailbox command sub-system shutdown is due to 11472 * HBA error conditions such as EEH or ERATT, this routine shall invoke 11473 * the mailbox sub-system flush routine to forcefully bring down the 11474 * mailbox sub-system. Otherwise, if it is due to normal condition (such 11475 * as with offline or HBA function reset), this routine will wait for the 11476 * outstanding mailbox command to complete before invoking the mailbox 11477 * sub-system flush routine to gracefully bring down mailbox sub-system. 11478 **/ 11479 void 11480 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 11481 { 11482 struct lpfc_sli *psli = &phba->sli; 11483 unsigned long timeout; 11484 11485 if (mbx_action == LPFC_MBX_NO_WAIT) { 11486 /* delay 100ms for port state */ 11487 msleep(100); 11488 lpfc_sli_mbox_sys_flush(phba); 11489 return; 11490 } 11491 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 11492 11493 spin_lock_irq(&phba->hbalock); 11494 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 11495 11496 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 11497 /* Determine how long we might wait for the active mailbox 11498 * command to be gracefully completed by firmware. 11499 */ 11500 if (phba->sli.mbox_active) 11501 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 11502 phba->sli.mbox_active) * 11503 1000) + jiffies; 11504 spin_unlock_irq(&phba->hbalock); 11505 11506 while (phba->sli.mbox_active) { 11507 /* Check active mailbox complete status every 2ms */ 11508 msleep(2); 11509 if (time_after(jiffies, timeout)) 11510 /* Timeout, let the mailbox flush routine to 11511 * forcefully release active mailbox command 11512 */ 11513 break; 11514 } 11515 } else 11516 spin_unlock_irq(&phba->hbalock); 11517 11518 lpfc_sli_mbox_sys_flush(phba); 11519 } 11520 11521 /** 11522 * lpfc_sli_eratt_read - read sli-3 error attention events 11523 * @phba: Pointer to HBA context. 11524 * 11525 * This function is called to read the SLI3 device error attention registers 11526 * for possible error attention events. The caller must hold the hostlock 11527 * with spin_lock_irq(). 11528 * 11529 * This function returns 1 when there is Error Attention in the Host Attention 11530 * Register and returns 0 otherwise. 11531 **/ 11532 static int 11533 lpfc_sli_eratt_read(struct lpfc_hba *phba) 11534 { 11535 uint32_t ha_copy; 11536 11537 /* Read chip Host Attention (HA) register */ 11538 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 11539 goto unplug_err; 11540 11541 if (ha_copy & HA_ERATT) { 11542 /* Read host status register to retrieve error event */ 11543 if (lpfc_sli_read_hs(phba)) 11544 goto unplug_err; 11545 11546 /* Check if there is a deferred error condition is active */ 11547 if ((HS_FFER1 & phba->work_hs) && 11548 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 11549 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 11550 phba->hba_flag |= DEFER_ERATT; 11551 /* Clear all interrupt enable conditions */ 11552 writel(0, phba->HCregaddr); 11553 readl(phba->HCregaddr); 11554 } 11555 11556 /* Set the driver HA work bitmap */ 11557 phba->work_ha |= HA_ERATT; 11558 /* Indicate polling handles this ERATT */ 11559 phba->hba_flag |= HBA_ERATT_HANDLED; 11560 return 1; 11561 } 11562 return 0; 11563 11564 unplug_err: 11565 /* Set the driver HS work bitmap */ 11566 phba->work_hs |= UNPLUG_ERR; 11567 /* Set the driver HA work bitmap */ 11568 phba->work_ha |= HA_ERATT; 11569 /* Indicate polling handles this ERATT */ 11570 phba->hba_flag |= HBA_ERATT_HANDLED; 11571 return 1; 11572 } 11573 11574 /** 11575 * lpfc_sli4_eratt_read - read sli-4 error attention events 11576 * @phba: Pointer to HBA context. 11577 * 11578 * This function is called to read the SLI4 device error attention registers 11579 * for possible error attention events. The caller must hold the hostlock 11580 * with spin_lock_irq(). 11581 * 11582 * This function returns 1 when there is Error Attention in the Host Attention 11583 * Register and returns 0 otherwise. 11584 **/ 11585 static int 11586 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 11587 { 11588 uint32_t uerr_sta_hi, uerr_sta_lo; 11589 uint32_t if_type, portsmphr; 11590 struct lpfc_register portstat_reg; 11591 11592 /* 11593 * For now, use the SLI4 device internal unrecoverable error 11594 * registers for error attention. This can be changed later. 11595 */ 11596 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11597 switch (if_type) { 11598 case LPFC_SLI_INTF_IF_TYPE_0: 11599 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 11600 &uerr_sta_lo) || 11601 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 11602 &uerr_sta_hi)) { 11603 phba->work_hs |= UNPLUG_ERR; 11604 phba->work_ha |= HA_ERATT; 11605 phba->hba_flag |= HBA_ERATT_HANDLED; 11606 return 1; 11607 } 11608 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 11609 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 11610 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11611 "1423 HBA Unrecoverable error: " 11612 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 11613 "ue_mask_lo_reg=0x%x, " 11614 "ue_mask_hi_reg=0x%x\n", 11615 uerr_sta_lo, uerr_sta_hi, 11616 phba->sli4_hba.ue_mask_lo, 11617 phba->sli4_hba.ue_mask_hi); 11618 phba->work_status[0] = uerr_sta_lo; 11619 phba->work_status[1] = uerr_sta_hi; 11620 phba->work_ha |= HA_ERATT; 11621 phba->hba_flag |= HBA_ERATT_HANDLED; 11622 return 1; 11623 } 11624 break; 11625 case LPFC_SLI_INTF_IF_TYPE_2: 11626 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 11627 &portstat_reg.word0) || 11628 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 11629 &portsmphr)){ 11630 phba->work_hs |= UNPLUG_ERR; 11631 phba->work_ha |= HA_ERATT; 11632 phba->hba_flag |= HBA_ERATT_HANDLED; 11633 return 1; 11634 } 11635 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 11636 phba->work_status[0] = 11637 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 11638 phba->work_status[1] = 11639 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 11640 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11641 "2885 Port Status Event: " 11642 "port status reg 0x%x, " 11643 "port smphr reg 0x%x, " 11644 "error 1=0x%x, error 2=0x%x\n", 11645 portstat_reg.word0, 11646 portsmphr, 11647 phba->work_status[0], 11648 phba->work_status[1]); 11649 phba->work_ha |= HA_ERATT; 11650 phba->hba_flag |= HBA_ERATT_HANDLED; 11651 return 1; 11652 } 11653 break; 11654 case LPFC_SLI_INTF_IF_TYPE_1: 11655 default: 11656 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11657 "2886 HBA Error Attention on unsupported " 11658 "if type %d.", if_type); 11659 return 1; 11660 } 11661 11662 return 0; 11663 } 11664 11665 /** 11666 * lpfc_sli_check_eratt - check error attention events 11667 * @phba: Pointer to HBA context. 11668 * 11669 * This function is called from timer soft interrupt context to check HBA's 11670 * error attention register bit for error attention events. 11671 * 11672 * This function returns 1 when there is Error Attention in the Host Attention 11673 * Register and returns 0 otherwise. 11674 **/ 11675 int 11676 lpfc_sli_check_eratt(struct lpfc_hba *phba) 11677 { 11678 uint32_t ha_copy; 11679 11680 /* If somebody is waiting to handle an eratt, don't process it 11681 * here. The brdkill function will do this. 11682 */ 11683 if (phba->link_flag & LS_IGNORE_ERATT) 11684 return 0; 11685 11686 /* Check if interrupt handler handles this ERATT */ 11687 spin_lock_irq(&phba->hbalock); 11688 if (phba->hba_flag & HBA_ERATT_HANDLED) { 11689 /* Interrupt handler has handled ERATT */ 11690 spin_unlock_irq(&phba->hbalock); 11691 return 0; 11692 } 11693 11694 /* 11695 * If there is deferred error attention, do not check for error 11696 * attention 11697 */ 11698 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 11699 spin_unlock_irq(&phba->hbalock); 11700 return 0; 11701 } 11702 11703 /* If PCI channel is offline, don't process it */ 11704 if (unlikely(pci_channel_offline(phba->pcidev))) { 11705 spin_unlock_irq(&phba->hbalock); 11706 return 0; 11707 } 11708 11709 switch (phba->sli_rev) { 11710 case LPFC_SLI_REV2: 11711 case LPFC_SLI_REV3: 11712 /* Read chip Host Attention (HA) register */ 11713 ha_copy = lpfc_sli_eratt_read(phba); 11714 break; 11715 case LPFC_SLI_REV4: 11716 /* Read device Uncoverable Error (UERR) registers */ 11717 ha_copy = lpfc_sli4_eratt_read(phba); 11718 break; 11719 default: 11720 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11721 "0299 Invalid SLI revision (%d)\n", 11722 phba->sli_rev); 11723 ha_copy = 0; 11724 break; 11725 } 11726 spin_unlock_irq(&phba->hbalock); 11727 11728 return ha_copy; 11729 } 11730 11731 /** 11732 * lpfc_intr_state_check - Check device state for interrupt handling 11733 * @phba: Pointer to HBA context. 11734 * 11735 * This inline routine checks whether a device or its PCI slot is in a state 11736 * that the interrupt should be handled. 11737 * 11738 * This function returns 0 if the device or the PCI slot is in a state that 11739 * interrupt should be handled, otherwise -EIO. 11740 */ 11741 static inline int 11742 lpfc_intr_state_check(struct lpfc_hba *phba) 11743 { 11744 /* If the pci channel is offline, ignore all the interrupts */ 11745 if (unlikely(pci_channel_offline(phba->pcidev))) 11746 return -EIO; 11747 11748 /* Update device level interrupt statistics */ 11749 phba->sli.slistat.sli_intr++; 11750 11751 /* Ignore all interrupts during initialization. */ 11752 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 11753 return -EIO; 11754 11755 return 0; 11756 } 11757 11758 /** 11759 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 11760 * @irq: Interrupt number. 11761 * @dev_id: The device context pointer. 11762 * 11763 * This function is directly called from the PCI layer as an interrupt 11764 * service routine when device with SLI-3 interface spec is enabled with 11765 * MSI-X multi-message interrupt mode and there are slow-path events in 11766 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 11767 * interrupt mode, this function is called as part of the device-level 11768 * interrupt handler. When the PCI slot is in error recovery or the HBA 11769 * is undergoing initialization, the interrupt handler will not process 11770 * the interrupt. The link attention and ELS ring attention events are 11771 * handled by the worker thread. The interrupt handler signals the worker 11772 * thread and returns for these events. This function is called without 11773 * any lock held. It gets the hbalock to access and update SLI data 11774 * structures. 11775 * 11776 * This function returns IRQ_HANDLED when interrupt is handled else it 11777 * returns IRQ_NONE. 11778 **/ 11779 irqreturn_t 11780 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 11781 { 11782 struct lpfc_hba *phba; 11783 uint32_t ha_copy, hc_copy; 11784 uint32_t work_ha_copy; 11785 unsigned long status; 11786 unsigned long iflag; 11787 uint32_t control; 11788 11789 MAILBOX_t *mbox, *pmbox; 11790 struct lpfc_vport *vport; 11791 struct lpfc_nodelist *ndlp; 11792 struct lpfc_dmabuf *mp; 11793 LPFC_MBOXQ_t *pmb; 11794 int rc; 11795 11796 /* 11797 * Get the driver's phba structure from the dev_id and 11798 * assume the HBA is not interrupting. 11799 */ 11800 phba = (struct lpfc_hba *)dev_id; 11801 11802 if (unlikely(!phba)) 11803 return IRQ_NONE; 11804 11805 /* 11806 * Stuff needs to be attented to when this function is invoked as an 11807 * individual interrupt handler in MSI-X multi-message interrupt mode 11808 */ 11809 if (phba->intr_type == MSIX) { 11810 /* Check device state for handling interrupt */ 11811 if (lpfc_intr_state_check(phba)) 11812 return IRQ_NONE; 11813 /* Need to read HA REG for slow-path events */ 11814 spin_lock_irqsave(&phba->hbalock, iflag); 11815 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 11816 goto unplug_error; 11817 /* If somebody is waiting to handle an eratt don't process it 11818 * here. The brdkill function will do this. 11819 */ 11820 if (phba->link_flag & LS_IGNORE_ERATT) 11821 ha_copy &= ~HA_ERATT; 11822 /* Check the need for handling ERATT in interrupt handler */ 11823 if (ha_copy & HA_ERATT) { 11824 if (phba->hba_flag & HBA_ERATT_HANDLED) 11825 /* ERATT polling has handled ERATT */ 11826 ha_copy &= ~HA_ERATT; 11827 else 11828 /* Indicate interrupt handler handles ERATT */ 11829 phba->hba_flag |= HBA_ERATT_HANDLED; 11830 } 11831 11832 /* 11833 * If there is deferred error attention, do not check for any 11834 * interrupt. 11835 */ 11836 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 11837 spin_unlock_irqrestore(&phba->hbalock, iflag); 11838 return IRQ_NONE; 11839 } 11840 11841 /* Clear up only attention source related to slow-path */ 11842 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 11843 goto unplug_error; 11844 11845 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 11846 HC_LAINT_ENA | HC_ERINT_ENA), 11847 phba->HCregaddr); 11848 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 11849 phba->HAregaddr); 11850 writel(hc_copy, phba->HCregaddr); 11851 readl(phba->HAregaddr); /* flush */ 11852 spin_unlock_irqrestore(&phba->hbalock, iflag); 11853 } else 11854 ha_copy = phba->ha_copy; 11855 11856 work_ha_copy = ha_copy & phba->work_ha_mask; 11857 11858 if (work_ha_copy) { 11859 if (work_ha_copy & HA_LATT) { 11860 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 11861 /* 11862 * Turn off Link Attention interrupts 11863 * until CLEAR_LA done 11864 */ 11865 spin_lock_irqsave(&phba->hbalock, iflag); 11866 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 11867 if (lpfc_readl(phba->HCregaddr, &control)) 11868 goto unplug_error; 11869 control &= ~HC_LAINT_ENA; 11870 writel(control, phba->HCregaddr); 11871 readl(phba->HCregaddr); /* flush */ 11872 spin_unlock_irqrestore(&phba->hbalock, iflag); 11873 } 11874 else 11875 work_ha_copy &= ~HA_LATT; 11876 } 11877 11878 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 11879 /* 11880 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 11881 * the only slow ring. 11882 */ 11883 status = (work_ha_copy & 11884 (HA_RXMASK << (4*LPFC_ELS_RING))); 11885 status >>= (4*LPFC_ELS_RING); 11886 if (status & HA_RXMASK) { 11887 spin_lock_irqsave(&phba->hbalock, iflag); 11888 if (lpfc_readl(phba->HCregaddr, &control)) 11889 goto unplug_error; 11890 11891 lpfc_debugfs_slow_ring_trc(phba, 11892 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 11893 control, status, 11894 (uint32_t)phba->sli.slistat.sli_intr); 11895 11896 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 11897 lpfc_debugfs_slow_ring_trc(phba, 11898 "ISR Disable ring:" 11899 "pwork:x%x hawork:x%x wait:x%x", 11900 phba->work_ha, work_ha_copy, 11901 (uint32_t)((unsigned long) 11902 &phba->work_waitq)); 11903 11904 control &= 11905 ~(HC_R0INT_ENA << LPFC_ELS_RING); 11906 writel(control, phba->HCregaddr); 11907 readl(phba->HCregaddr); /* flush */ 11908 } 11909 else { 11910 lpfc_debugfs_slow_ring_trc(phba, 11911 "ISR slow ring: pwork:" 11912 "x%x hawork:x%x wait:x%x", 11913 phba->work_ha, work_ha_copy, 11914 (uint32_t)((unsigned long) 11915 &phba->work_waitq)); 11916 } 11917 spin_unlock_irqrestore(&phba->hbalock, iflag); 11918 } 11919 } 11920 spin_lock_irqsave(&phba->hbalock, iflag); 11921 if (work_ha_copy & HA_ERATT) { 11922 if (lpfc_sli_read_hs(phba)) 11923 goto unplug_error; 11924 /* 11925 * Check if there is a deferred error condition 11926 * is active 11927 */ 11928 if ((HS_FFER1 & phba->work_hs) && 11929 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 11930 HS_FFER6 | HS_FFER7 | HS_FFER8) & 11931 phba->work_hs)) { 11932 phba->hba_flag |= DEFER_ERATT; 11933 /* Clear all interrupt enable conditions */ 11934 writel(0, phba->HCregaddr); 11935 readl(phba->HCregaddr); 11936 } 11937 } 11938 11939 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 11940 pmb = phba->sli.mbox_active; 11941 pmbox = &pmb->u.mb; 11942 mbox = phba->mbox; 11943 vport = pmb->vport; 11944 11945 /* First check out the status word */ 11946 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 11947 if (pmbox->mbxOwner != OWN_HOST) { 11948 spin_unlock_irqrestore(&phba->hbalock, iflag); 11949 /* 11950 * Stray Mailbox Interrupt, mbxCommand <cmd> 11951 * mbxStatus <status> 11952 */ 11953 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 11954 LOG_SLI, 11955 "(%d):0304 Stray Mailbox " 11956 "Interrupt mbxCommand x%x " 11957 "mbxStatus x%x\n", 11958 (vport ? vport->vpi : 0), 11959 pmbox->mbxCommand, 11960 pmbox->mbxStatus); 11961 /* clear mailbox attention bit */ 11962 work_ha_copy &= ~HA_MBATT; 11963 } else { 11964 phba->sli.mbox_active = NULL; 11965 spin_unlock_irqrestore(&phba->hbalock, iflag); 11966 phba->last_completion_time = jiffies; 11967 del_timer(&phba->sli.mbox_tmo); 11968 if (pmb->mbox_cmpl) { 11969 lpfc_sli_pcimem_bcopy(mbox, pmbox, 11970 MAILBOX_CMD_SIZE); 11971 if (pmb->out_ext_byte_len && 11972 pmb->context2) 11973 lpfc_sli_pcimem_bcopy( 11974 phba->mbox_ext, 11975 pmb->context2, 11976 pmb->out_ext_byte_len); 11977 } 11978 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 11979 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 11980 11981 lpfc_debugfs_disc_trc(vport, 11982 LPFC_DISC_TRC_MBOX_VPORT, 11983 "MBOX dflt rpi: : " 11984 "status:x%x rpi:x%x", 11985 (uint32_t)pmbox->mbxStatus, 11986 pmbox->un.varWords[0], 0); 11987 11988 if (!pmbox->mbxStatus) { 11989 mp = (struct lpfc_dmabuf *) 11990 (pmb->context1); 11991 ndlp = (struct lpfc_nodelist *) 11992 pmb->context2; 11993 11994 /* Reg_LOGIN of dflt RPI was 11995 * successful. new lets get 11996 * rid of the RPI using the 11997 * same mbox buffer. 11998 */ 11999 lpfc_unreg_login(phba, 12000 vport->vpi, 12001 pmbox->un.varWords[0], 12002 pmb); 12003 pmb->mbox_cmpl = 12004 lpfc_mbx_cmpl_dflt_rpi; 12005 pmb->context1 = mp; 12006 pmb->context2 = ndlp; 12007 pmb->vport = vport; 12008 rc = lpfc_sli_issue_mbox(phba, 12009 pmb, 12010 MBX_NOWAIT); 12011 if (rc != MBX_BUSY) 12012 lpfc_printf_log(phba, 12013 KERN_ERR, 12014 LOG_MBOX | LOG_SLI, 12015 "0350 rc should have" 12016 "been MBX_BUSY\n"); 12017 if (rc != MBX_NOT_FINISHED) 12018 goto send_current_mbox; 12019 } 12020 } 12021 spin_lock_irqsave( 12022 &phba->pport->work_port_lock, 12023 iflag); 12024 phba->pport->work_port_events &= 12025 ~WORKER_MBOX_TMO; 12026 spin_unlock_irqrestore( 12027 &phba->pport->work_port_lock, 12028 iflag); 12029 lpfc_mbox_cmpl_put(phba, pmb); 12030 } 12031 } else 12032 spin_unlock_irqrestore(&phba->hbalock, iflag); 12033 12034 if ((work_ha_copy & HA_MBATT) && 12035 (phba->sli.mbox_active == NULL)) { 12036 send_current_mbox: 12037 /* Process next mailbox command if there is one */ 12038 do { 12039 rc = lpfc_sli_issue_mbox(phba, NULL, 12040 MBX_NOWAIT); 12041 } while (rc == MBX_NOT_FINISHED); 12042 if (rc != MBX_SUCCESS) 12043 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12044 LOG_SLI, "0349 rc should be " 12045 "MBX_SUCCESS\n"); 12046 } 12047 12048 spin_lock_irqsave(&phba->hbalock, iflag); 12049 phba->work_ha |= work_ha_copy; 12050 spin_unlock_irqrestore(&phba->hbalock, iflag); 12051 lpfc_worker_wake_up(phba); 12052 } 12053 return IRQ_HANDLED; 12054 unplug_error: 12055 spin_unlock_irqrestore(&phba->hbalock, iflag); 12056 return IRQ_HANDLED; 12057 12058 } /* lpfc_sli_sp_intr_handler */ 12059 12060 /** 12061 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 12062 * @irq: Interrupt number. 12063 * @dev_id: The device context pointer. 12064 * 12065 * This function is directly called from the PCI layer as an interrupt 12066 * service routine when device with SLI-3 interface spec is enabled with 12067 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 12068 * ring event in the HBA. However, when the device is enabled with either 12069 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12070 * device-level interrupt handler. When the PCI slot is in error recovery 12071 * or the HBA is undergoing initialization, the interrupt handler will not 12072 * process the interrupt. The SCSI FCP fast-path ring event are handled in 12073 * the intrrupt context. This function is called without any lock held. 12074 * It gets the hbalock to access and update SLI data structures. 12075 * 12076 * This function returns IRQ_HANDLED when interrupt is handled else it 12077 * returns IRQ_NONE. 12078 **/ 12079 irqreturn_t 12080 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 12081 { 12082 struct lpfc_hba *phba; 12083 uint32_t ha_copy; 12084 unsigned long status; 12085 unsigned long iflag; 12086 struct lpfc_sli_ring *pring; 12087 12088 /* Get the driver's phba structure from the dev_id and 12089 * assume the HBA is not interrupting. 12090 */ 12091 phba = (struct lpfc_hba *) dev_id; 12092 12093 if (unlikely(!phba)) 12094 return IRQ_NONE; 12095 12096 /* 12097 * Stuff needs to be attented to when this function is invoked as an 12098 * individual interrupt handler in MSI-X multi-message interrupt mode 12099 */ 12100 if (phba->intr_type == MSIX) { 12101 /* Check device state for handling interrupt */ 12102 if (lpfc_intr_state_check(phba)) 12103 return IRQ_NONE; 12104 /* Need to read HA REG for FCP ring and other ring events */ 12105 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12106 return IRQ_HANDLED; 12107 /* Clear up only attention source related to fast-path */ 12108 spin_lock_irqsave(&phba->hbalock, iflag); 12109 /* 12110 * If there is deferred error attention, do not check for 12111 * any interrupt. 12112 */ 12113 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12114 spin_unlock_irqrestore(&phba->hbalock, iflag); 12115 return IRQ_NONE; 12116 } 12117 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 12118 phba->HAregaddr); 12119 readl(phba->HAregaddr); /* flush */ 12120 spin_unlock_irqrestore(&phba->hbalock, iflag); 12121 } else 12122 ha_copy = phba->ha_copy; 12123 12124 /* 12125 * Process all events on FCP ring. Take the optimized path for FCP IO. 12126 */ 12127 ha_copy &= ~(phba->work_ha_mask); 12128 12129 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12130 status >>= (4*LPFC_FCP_RING); 12131 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 12132 if (status & HA_RXMASK) 12133 lpfc_sli_handle_fast_ring_event(phba, pring, status); 12134 12135 if (phba->cfg_multi_ring_support == 2) { 12136 /* 12137 * Process all events on extra ring. Take the optimized path 12138 * for extra ring IO. 12139 */ 12140 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12141 status >>= (4*LPFC_EXTRA_RING); 12142 if (status & HA_RXMASK) { 12143 lpfc_sli_handle_fast_ring_event(phba, 12144 &phba->sli.sli3_ring[LPFC_EXTRA_RING], 12145 status); 12146 } 12147 } 12148 return IRQ_HANDLED; 12149 } /* lpfc_sli_fp_intr_handler */ 12150 12151 /** 12152 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 12153 * @irq: Interrupt number. 12154 * @dev_id: The device context pointer. 12155 * 12156 * This function is the HBA device-level interrupt handler to device with 12157 * SLI-3 interface spec, called from the PCI layer when either MSI or 12158 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 12159 * requires driver attention. This function invokes the slow-path interrupt 12160 * attention handling function and fast-path interrupt attention handling 12161 * function in turn to process the relevant HBA attention events. This 12162 * function is called without any lock held. It gets the hbalock to access 12163 * and update SLI data structures. 12164 * 12165 * This function returns IRQ_HANDLED when interrupt is handled, else it 12166 * returns IRQ_NONE. 12167 **/ 12168 irqreturn_t 12169 lpfc_sli_intr_handler(int irq, void *dev_id) 12170 { 12171 struct lpfc_hba *phba; 12172 irqreturn_t sp_irq_rc, fp_irq_rc; 12173 unsigned long status1, status2; 12174 uint32_t hc_copy; 12175 12176 /* 12177 * Get the driver's phba structure from the dev_id and 12178 * assume the HBA is not interrupting. 12179 */ 12180 phba = (struct lpfc_hba *) dev_id; 12181 12182 if (unlikely(!phba)) 12183 return IRQ_NONE; 12184 12185 /* Check device state for handling interrupt */ 12186 if (lpfc_intr_state_check(phba)) 12187 return IRQ_NONE; 12188 12189 spin_lock(&phba->hbalock); 12190 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 12191 spin_unlock(&phba->hbalock); 12192 return IRQ_HANDLED; 12193 } 12194 12195 if (unlikely(!phba->ha_copy)) { 12196 spin_unlock(&phba->hbalock); 12197 return IRQ_NONE; 12198 } else if (phba->ha_copy & HA_ERATT) { 12199 if (phba->hba_flag & HBA_ERATT_HANDLED) 12200 /* ERATT polling has handled ERATT */ 12201 phba->ha_copy &= ~HA_ERATT; 12202 else 12203 /* Indicate interrupt handler handles ERATT */ 12204 phba->hba_flag |= HBA_ERATT_HANDLED; 12205 } 12206 12207 /* 12208 * If there is deferred error attention, do not check for any interrupt. 12209 */ 12210 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12211 spin_unlock(&phba->hbalock); 12212 return IRQ_NONE; 12213 } 12214 12215 /* Clear attention sources except link and error attentions */ 12216 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 12217 spin_unlock(&phba->hbalock); 12218 return IRQ_HANDLED; 12219 } 12220 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 12221 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 12222 phba->HCregaddr); 12223 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 12224 writel(hc_copy, phba->HCregaddr); 12225 readl(phba->HAregaddr); /* flush */ 12226 spin_unlock(&phba->hbalock); 12227 12228 /* 12229 * Invokes slow-path host attention interrupt handling as appropriate. 12230 */ 12231 12232 /* status of events with mailbox and link attention */ 12233 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 12234 12235 /* status of events with ELS ring */ 12236 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 12237 status2 >>= (4*LPFC_ELS_RING); 12238 12239 if (status1 || (status2 & HA_RXMASK)) 12240 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 12241 else 12242 sp_irq_rc = IRQ_NONE; 12243 12244 /* 12245 * Invoke fast-path host attention interrupt handling as appropriate. 12246 */ 12247 12248 /* status of events with FCP ring */ 12249 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12250 status1 >>= (4*LPFC_FCP_RING); 12251 12252 /* status of events with extra ring */ 12253 if (phba->cfg_multi_ring_support == 2) { 12254 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12255 status2 >>= (4*LPFC_EXTRA_RING); 12256 } else 12257 status2 = 0; 12258 12259 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 12260 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 12261 else 12262 fp_irq_rc = IRQ_NONE; 12263 12264 /* Return device-level interrupt handling status */ 12265 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 12266 } /* lpfc_sli_intr_handler */ 12267 12268 /** 12269 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 12270 * @phba: pointer to lpfc hba data structure. 12271 * 12272 * This routine is invoked by the worker thread to process all the pending 12273 * SLI4 FCP abort XRI events. 12274 **/ 12275 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 12276 { 12277 struct lpfc_cq_event *cq_event; 12278 12279 /* First, declare the fcp xri abort event has been handled */ 12280 spin_lock_irq(&phba->hbalock); 12281 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 12282 spin_unlock_irq(&phba->hbalock); 12283 /* Now, handle all the fcp xri abort events */ 12284 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 12285 /* Get the first event from the head of the event queue */ 12286 spin_lock_irq(&phba->hbalock); 12287 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 12288 cq_event, struct lpfc_cq_event, list); 12289 spin_unlock_irq(&phba->hbalock); 12290 /* Notify aborted XRI for FCP work queue */ 12291 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12292 /* Free the event processed back to the free pool */ 12293 lpfc_sli4_cq_event_release(phba, cq_event); 12294 } 12295 } 12296 12297 /** 12298 * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event 12299 * @phba: pointer to lpfc hba data structure. 12300 * 12301 * This routine is invoked by the worker thread to process all the pending 12302 * SLI4 NVME abort XRI events. 12303 **/ 12304 void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba) 12305 { 12306 struct lpfc_cq_event *cq_event; 12307 12308 /* First, declare the fcp xri abort event has been handled */ 12309 spin_lock_irq(&phba->hbalock); 12310 phba->hba_flag &= ~NVME_XRI_ABORT_EVENT; 12311 spin_unlock_irq(&phba->hbalock); 12312 /* Now, handle all the fcp xri abort events */ 12313 while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) { 12314 /* Get the first event from the head of the event queue */ 12315 spin_lock_irq(&phba->hbalock); 12316 list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue, 12317 cq_event, struct lpfc_cq_event, list); 12318 spin_unlock_irq(&phba->hbalock); 12319 /* Notify aborted XRI for NVME work queue */ 12320 if (phba->nvmet_support) { 12321 lpfc_sli4_nvmet_xri_aborted(phba, 12322 &cq_event->cqe.wcqe_axri); 12323 } else { 12324 lpfc_sli4_nvme_xri_aborted(phba, 12325 &cq_event->cqe.wcqe_axri); 12326 } 12327 /* Free the event processed back to the free pool */ 12328 lpfc_sli4_cq_event_release(phba, cq_event); 12329 } 12330 } 12331 12332 /** 12333 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 12334 * @phba: pointer to lpfc hba data structure. 12335 * 12336 * This routine is invoked by the worker thread to process all the pending 12337 * SLI4 els abort xri events. 12338 **/ 12339 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 12340 { 12341 struct lpfc_cq_event *cq_event; 12342 12343 /* First, declare the els xri abort event has been handled */ 12344 spin_lock_irq(&phba->hbalock); 12345 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 12346 spin_unlock_irq(&phba->hbalock); 12347 /* Now, handle all the els xri abort events */ 12348 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 12349 /* Get the first event from the head of the event queue */ 12350 spin_lock_irq(&phba->hbalock); 12351 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 12352 cq_event, struct lpfc_cq_event, list); 12353 spin_unlock_irq(&phba->hbalock); 12354 /* Notify aborted XRI for ELS work queue */ 12355 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12356 /* Free the event processed back to the free pool */ 12357 lpfc_sli4_cq_event_release(phba, cq_event); 12358 } 12359 } 12360 12361 /** 12362 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 12363 * @phba: pointer to lpfc hba data structure 12364 * @pIocbIn: pointer to the rspiocbq 12365 * @pIocbOut: pointer to the cmdiocbq 12366 * @wcqe: pointer to the complete wcqe 12367 * 12368 * This routine transfers the fields of a command iocbq to a response iocbq 12369 * by copying all the IOCB fields from command iocbq and transferring the 12370 * completion status information from the complete wcqe. 12371 **/ 12372 static void 12373 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 12374 struct lpfc_iocbq *pIocbIn, 12375 struct lpfc_iocbq *pIocbOut, 12376 struct lpfc_wcqe_complete *wcqe) 12377 { 12378 int numBdes, i; 12379 unsigned long iflags; 12380 uint32_t status, max_response; 12381 struct lpfc_dmabuf *dmabuf; 12382 struct ulp_bde64 *bpl, bde; 12383 size_t offset = offsetof(struct lpfc_iocbq, iocb); 12384 12385 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 12386 sizeof(struct lpfc_iocbq) - offset); 12387 /* Map WCQE parameters into irspiocb parameters */ 12388 status = bf_get(lpfc_wcqe_c_status, wcqe); 12389 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 12390 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 12391 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 12392 pIocbIn->iocb.un.fcpi.fcpi_parm = 12393 pIocbOut->iocb.un.fcpi.fcpi_parm - 12394 wcqe->total_data_placed; 12395 else 12396 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12397 else { 12398 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12399 switch (pIocbOut->iocb.ulpCommand) { 12400 case CMD_ELS_REQUEST64_CR: 12401 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12402 bpl = (struct ulp_bde64 *)dmabuf->virt; 12403 bde.tus.w = le32_to_cpu(bpl[1].tus.w); 12404 max_response = bde.tus.f.bdeSize; 12405 break; 12406 case CMD_GEN_REQUEST64_CR: 12407 max_response = 0; 12408 if (!pIocbOut->context3) 12409 break; 12410 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 12411 sizeof(struct ulp_bde64); 12412 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12413 bpl = (struct ulp_bde64 *)dmabuf->virt; 12414 for (i = 0; i < numBdes; i++) { 12415 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 12416 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 12417 max_response += bde.tus.f.bdeSize; 12418 } 12419 break; 12420 default: 12421 max_response = wcqe->total_data_placed; 12422 break; 12423 } 12424 if (max_response < wcqe->total_data_placed) 12425 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 12426 else 12427 pIocbIn->iocb.un.genreq64.bdl.bdeSize = 12428 wcqe->total_data_placed; 12429 } 12430 12431 /* Convert BG errors for completion status */ 12432 if (status == CQE_STATUS_DI_ERROR) { 12433 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 12434 12435 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 12436 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 12437 else 12438 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 12439 12440 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 12441 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 12442 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12443 BGS_GUARD_ERR_MASK; 12444 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 12445 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12446 BGS_APPTAG_ERR_MASK; 12447 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 12448 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12449 BGS_REFTAG_ERR_MASK; 12450 12451 /* Check to see if there was any good data before the error */ 12452 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 12453 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12454 BGS_HI_WATER_MARK_PRESENT_MASK; 12455 pIocbIn->iocb.unsli3.sli3_bg.bghm = 12456 wcqe->total_data_placed; 12457 } 12458 12459 /* 12460 * Set ALL the error bits to indicate we don't know what 12461 * type of error it is. 12462 */ 12463 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 12464 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12465 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 12466 BGS_GUARD_ERR_MASK); 12467 } 12468 12469 /* Pick up HBA exchange busy condition */ 12470 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 12471 spin_lock_irqsave(&phba->hbalock, iflags); 12472 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 12473 spin_unlock_irqrestore(&phba->hbalock, iflags); 12474 } 12475 } 12476 12477 /** 12478 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 12479 * @phba: Pointer to HBA context object. 12480 * @wcqe: Pointer to work-queue completion queue entry. 12481 * 12482 * This routine handles an ELS work-queue completion event and construct 12483 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 12484 * discovery engine to handle. 12485 * 12486 * Return: Pointer to the receive IOCBQ, NULL otherwise. 12487 **/ 12488 static struct lpfc_iocbq * 12489 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 12490 struct lpfc_iocbq *irspiocbq) 12491 { 12492 struct lpfc_sli_ring *pring; 12493 struct lpfc_iocbq *cmdiocbq; 12494 struct lpfc_wcqe_complete *wcqe; 12495 unsigned long iflags; 12496 12497 pring = lpfc_phba_elsring(phba); 12498 12499 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 12500 spin_lock_irqsave(&pring->ring_lock, iflags); 12501 pring->stats.iocb_event++; 12502 /* Look up the ELS command IOCB and create pseudo response IOCB */ 12503 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 12504 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 12505 /* Put the iocb back on the txcmplq */ 12506 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); 12507 spin_unlock_irqrestore(&pring->ring_lock, iflags); 12508 12509 if (unlikely(!cmdiocbq)) { 12510 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12511 "0386 ELS complete with no corresponding " 12512 "cmdiocb: iotag (%d)\n", 12513 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 12514 lpfc_sli_release_iocbq(phba, irspiocbq); 12515 return NULL; 12516 } 12517 12518 /* Fake the irspiocbq and copy necessary response information */ 12519 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 12520 12521 return irspiocbq; 12522 } 12523 12524 /** 12525 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 12526 * @phba: Pointer to HBA context object. 12527 * @cqe: Pointer to mailbox completion queue entry. 12528 * 12529 * This routine process a mailbox completion queue entry with asynchrous 12530 * event. 12531 * 12532 * Return: true if work posted to worker thread, otherwise false. 12533 **/ 12534 static bool 12535 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 12536 { 12537 struct lpfc_cq_event *cq_event; 12538 unsigned long iflags; 12539 12540 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 12541 "0392 Async Event: word0:x%x, word1:x%x, " 12542 "word2:x%x, word3:x%x\n", mcqe->word0, 12543 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 12544 12545 /* Allocate a new internal CQ_EVENT entry */ 12546 cq_event = lpfc_sli4_cq_event_alloc(phba); 12547 if (!cq_event) { 12548 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12549 "0394 Failed to allocate CQ_EVENT entry\n"); 12550 return false; 12551 } 12552 12553 /* Move the CQE into an asynchronous event entry */ 12554 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); 12555 spin_lock_irqsave(&phba->hbalock, iflags); 12556 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 12557 /* Set the async event flag */ 12558 phba->hba_flag |= ASYNC_EVENT; 12559 spin_unlock_irqrestore(&phba->hbalock, iflags); 12560 12561 return true; 12562 } 12563 12564 /** 12565 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 12566 * @phba: Pointer to HBA context object. 12567 * @cqe: Pointer to mailbox completion queue entry. 12568 * 12569 * This routine process a mailbox completion queue entry with mailbox 12570 * completion event. 12571 * 12572 * Return: true if work posted to worker thread, otherwise false. 12573 **/ 12574 static bool 12575 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 12576 { 12577 uint32_t mcqe_status; 12578 MAILBOX_t *mbox, *pmbox; 12579 struct lpfc_mqe *mqe; 12580 struct lpfc_vport *vport; 12581 struct lpfc_nodelist *ndlp; 12582 struct lpfc_dmabuf *mp; 12583 unsigned long iflags; 12584 LPFC_MBOXQ_t *pmb; 12585 bool workposted = false; 12586 int rc; 12587 12588 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 12589 if (!bf_get(lpfc_trailer_completed, mcqe)) 12590 goto out_no_mqe_complete; 12591 12592 /* Get the reference to the active mbox command */ 12593 spin_lock_irqsave(&phba->hbalock, iflags); 12594 pmb = phba->sli.mbox_active; 12595 if (unlikely(!pmb)) { 12596 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 12597 "1832 No pending MBOX command to handle\n"); 12598 spin_unlock_irqrestore(&phba->hbalock, iflags); 12599 goto out_no_mqe_complete; 12600 } 12601 spin_unlock_irqrestore(&phba->hbalock, iflags); 12602 mqe = &pmb->u.mqe; 12603 pmbox = (MAILBOX_t *)&pmb->u.mqe; 12604 mbox = phba->mbox; 12605 vport = pmb->vport; 12606 12607 /* Reset heartbeat timer */ 12608 phba->last_completion_time = jiffies; 12609 del_timer(&phba->sli.mbox_tmo); 12610 12611 /* Move mbox data to caller's mailbox region, do endian swapping */ 12612 if (pmb->mbox_cmpl && mbox) 12613 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 12614 12615 /* 12616 * For mcqe errors, conditionally move a modified error code to 12617 * the mbox so that the error will not be missed. 12618 */ 12619 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 12620 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 12621 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 12622 bf_set(lpfc_mqe_status, mqe, 12623 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 12624 } 12625 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 12626 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 12627 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 12628 "MBOX dflt rpi: status:x%x rpi:x%x", 12629 mcqe_status, 12630 pmbox->un.varWords[0], 0); 12631 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 12632 mp = (struct lpfc_dmabuf *)(pmb->context1); 12633 ndlp = (struct lpfc_nodelist *)pmb->context2; 12634 /* Reg_LOGIN of dflt RPI was successful. Now lets get 12635 * RID of the PPI using the same mbox buffer. 12636 */ 12637 lpfc_unreg_login(phba, vport->vpi, 12638 pmbox->un.varWords[0], pmb); 12639 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 12640 pmb->context1 = mp; 12641 pmb->context2 = ndlp; 12642 pmb->vport = vport; 12643 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 12644 if (rc != MBX_BUSY) 12645 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12646 LOG_SLI, "0385 rc should " 12647 "have been MBX_BUSY\n"); 12648 if (rc != MBX_NOT_FINISHED) 12649 goto send_current_mbox; 12650 } 12651 } 12652 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 12653 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 12654 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 12655 12656 /* There is mailbox completion work to do */ 12657 spin_lock_irqsave(&phba->hbalock, iflags); 12658 __lpfc_mbox_cmpl_put(phba, pmb); 12659 phba->work_ha |= HA_MBATT; 12660 spin_unlock_irqrestore(&phba->hbalock, iflags); 12661 workposted = true; 12662 12663 send_current_mbox: 12664 spin_lock_irqsave(&phba->hbalock, iflags); 12665 /* Release the mailbox command posting token */ 12666 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 12667 /* Setting active mailbox pointer need to be in sync to flag clear */ 12668 phba->sli.mbox_active = NULL; 12669 spin_unlock_irqrestore(&phba->hbalock, iflags); 12670 /* Wake up worker thread to post the next pending mailbox command */ 12671 lpfc_worker_wake_up(phba); 12672 out_no_mqe_complete: 12673 if (bf_get(lpfc_trailer_consumed, mcqe)) 12674 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 12675 return workposted; 12676 } 12677 12678 /** 12679 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 12680 * @phba: Pointer to HBA context object. 12681 * @cqe: Pointer to mailbox completion queue entry. 12682 * 12683 * This routine process a mailbox completion queue entry, it invokes the 12684 * proper mailbox complete handling or asynchrous event handling routine 12685 * according to the MCQE's async bit. 12686 * 12687 * Return: true if work posted to worker thread, otherwise false. 12688 **/ 12689 static bool 12690 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 12691 { 12692 struct lpfc_mcqe mcqe; 12693 bool workposted; 12694 12695 /* Copy the mailbox MCQE and convert endian order as needed */ 12696 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 12697 12698 /* Invoke the proper event handling routine */ 12699 if (!bf_get(lpfc_trailer_async, &mcqe)) 12700 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 12701 else 12702 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 12703 return workposted; 12704 } 12705 12706 /** 12707 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 12708 * @phba: Pointer to HBA context object. 12709 * @cq: Pointer to associated CQ 12710 * @wcqe: Pointer to work-queue completion queue entry. 12711 * 12712 * This routine handles an ELS work-queue completion event. 12713 * 12714 * Return: true if work posted to worker thread, otherwise false. 12715 **/ 12716 static bool 12717 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 12718 struct lpfc_wcqe_complete *wcqe) 12719 { 12720 struct lpfc_iocbq *irspiocbq; 12721 unsigned long iflags; 12722 struct lpfc_sli_ring *pring = cq->pring; 12723 int txq_cnt = 0; 12724 int txcmplq_cnt = 0; 12725 int fcp_txcmplq_cnt = 0; 12726 12727 /* Get an irspiocbq for later ELS response processing use */ 12728 irspiocbq = lpfc_sli_get_iocbq(phba); 12729 if (!irspiocbq) { 12730 if (!list_empty(&pring->txq)) 12731 txq_cnt++; 12732 if (!list_empty(&pring->txcmplq)) 12733 txcmplq_cnt++; 12734 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12735 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 12736 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 12737 txq_cnt, phba->iocb_cnt, 12738 fcp_txcmplq_cnt, 12739 txcmplq_cnt); 12740 return false; 12741 } 12742 12743 /* Save off the slow-path queue event for work thread to process */ 12744 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 12745 spin_lock_irqsave(&phba->hbalock, iflags); 12746 list_add_tail(&irspiocbq->cq_event.list, 12747 &phba->sli4_hba.sp_queue_event); 12748 phba->hba_flag |= HBA_SP_QUEUE_EVT; 12749 spin_unlock_irqrestore(&phba->hbalock, iflags); 12750 12751 return true; 12752 } 12753 12754 /** 12755 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 12756 * @phba: Pointer to HBA context object. 12757 * @wcqe: Pointer to work-queue completion queue entry. 12758 * 12759 * This routine handles slow-path WQ entry consumed event by invoking the 12760 * proper WQ release routine to the slow-path WQ. 12761 **/ 12762 static void 12763 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 12764 struct lpfc_wcqe_release *wcqe) 12765 { 12766 /* sanity check on queue memory */ 12767 if (unlikely(!phba->sli4_hba.els_wq)) 12768 return; 12769 /* Check for the slow-path ELS work queue */ 12770 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 12771 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 12772 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 12773 else 12774 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12775 "2579 Slow-path wqe consume event carries " 12776 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 12777 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 12778 phba->sli4_hba.els_wq->queue_id); 12779 } 12780 12781 /** 12782 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 12783 * @phba: Pointer to HBA context object. 12784 * @cq: Pointer to a WQ completion queue. 12785 * @wcqe: Pointer to work-queue completion queue entry. 12786 * 12787 * This routine handles an XRI abort event. 12788 * 12789 * Return: true if work posted to worker thread, otherwise false. 12790 **/ 12791 static bool 12792 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 12793 struct lpfc_queue *cq, 12794 struct sli4_wcqe_xri_aborted *wcqe) 12795 { 12796 bool workposted = false; 12797 struct lpfc_cq_event *cq_event; 12798 unsigned long iflags; 12799 12800 /* Allocate a new internal CQ_EVENT entry */ 12801 cq_event = lpfc_sli4_cq_event_alloc(phba); 12802 if (!cq_event) { 12803 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12804 "0602 Failed to allocate CQ_EVENT entry\n"); 12805 return false; 12806 } 12807 12808 /* Move the CQE into the proper xri abort event list */ 12809 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 12810 switch (cq->subtype) { 12811 case LPFC_FCP: 12812 spin_lock_irqsave(&phba->hbalock, iflags); 12813 list_add_tail(&cq_event->list, 12814 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 12815 /* Set the fcp xri abort event flag */ 12816 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 12817 spin_unlock_irqrestore(&phba->hbalock, iflags); 12818 workposted = true; 12819 break; 12820 case LPFC_ELS: 12821 spin_lock_irqsave(&phba->hbalock, iflags); 12822 list_add_tail(&cq_event->list, 12823 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 12824 /* Set the els xri abort event flag */ 12825 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 12826 spin_unlock_irqrestore(&phba->hbalock, iflags); 12827 workposted = true; 12828 break; 12829 case LPFC_NVME: 12830 spin_lock_irqsave(&phba->hbalock, iflags); 12831 list_add_tail(&cq_event->list, 12832 &phba->sli4_hba.sp_nvme_xri_aborted_work_queue); 12833 /* Set the nvme xri abort event flag */ 12834 phba->hba_flag |= NVME_XRI_ABORT_EVENT; 12835 spin_unlock_irqrestore(&phba->hbalock, iflags); 12836 workposted = true; 12837 break; 12838 default: 12839 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12840 "0603 Invalid CQ subtype %d: " 12841 "%08x %08x %08x %08x\n", 12842 cq->subtype, wcqe->word0, wcqe->parameter, 12843 wcqe->word2, wcqe->word3); 12844 lpfc_sli4_cq_event_release(phba, cq_event); 12845 workposted = false; 12846 break; 12847 } 12848 return workposted; 12849 } 12850 12851 /** 12852 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 12853 * @phba: Pointer to HBA context object. 12854 * @rcqe: Pointer to receive-queue completion queue entry. 12855 * 12856 * This routine process a receive-queue completion queue entry. 12857 * 12858 * Return: true if work posted to worker thread, otherwise false. 12859 **/ 12860 static bool 12861 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 12862 { 12863 bool workposted = false; 12864 struct fc_frame_header *fc_hdr; 12865 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 12866 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 12867 struct lpfc_nvmet_tgtport *tgtp; 12868 struct hbq_dmabuf *dma_buf; 12869 uint32_t status, rq_id; 12870 unsigned long iflags; 12871 12872 /* sanity check on queue memory */ 12873 if (unlikely(!hrq) || unlikely(!drq)) 12874 return workposted; 12875 12876 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 12877 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 12878 else 12879 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 12880 if (rq_id != hrq->queue_id) 12881 goto out; 12882 12883 status = bf_get(lpfc_rcqe_status, rcqe); 12884 switch (status) { 12885 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 12886 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12887 "2537 Receive Frame Truncated!!\n"); 12888 case FC_STATUS_RQ_SUCCESS: 12889 lpfc_sli4_rq_release(hrq, drq); 12890 spin_lock_irqsave(&phba->hbalock, iflags); 12891 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 12892 if (!dma_buf) { 12893 hrq->RQ_no_buf_found++; 12894 spin_unlock_irqrestore(&phba->hbalock, iflags); 12895 goto out; 12896 } 12897 hrq->RQ_rcv_buf++; 12898 hrq->RQ_buf_posted--; 12899 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 12900 12901 /* If a NVME LS event (type 0x28), treat it as Fast path */ 12902 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 12903 12904 /* save off the frame for the word thread to process */ 12905 list_add_tail(&dma_buf->cq_event.list, 12906 &phba->sli4_hba.sp_queue_event); 12907 /* Frame received */ 12908 phba->hba_flag |= HBA_SP_QUEUE_EVT; 12909 spin_unlock_irqrestore(&phba->hbalock, iflags); 12910 workposted = true; 12911 break; 12912 case FC_STATUS_INSUFF_BUF_FRM_DISC: 12913 if (phba->nvmet_support) { 12914 tgtp = phba->targetport->private; 12915 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 12916 "6402 RQE Error x%x, posted %d err_cnt " 12917 "%d: %x %x %x\n", 12918 status, hrq->RQ_buf_posted, 12919 hrq->RQ_no_posted_buf, 12920 atomic_read(&tgtp->rcv_fcp_cmd_in), 12921 atomic_read(&tgtp->rcv_fcp_cmd_out), 12922 atomic_read(&tgtp->xmt_fcp_release)); 12923 } 12924 /* fallthrough */ 12925 12926 case FC_STATUS_INSUFF_BUF_NEED_BUF: 12927 hrq->RQ_no_posted_buf++; 12928 /* Post more buffers if possible */ 12929 spin_lock_irqsave(&phba->hbalock, iflags); 12930 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 12931 spin_unlock_irqrestore(&phba->hbalock, iflags); 12932 workposted = true; 12933 break; 12934 } 12935 out: 12936 return workposted; 12937 } 12938 12939 /** 12940 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 12941 * @phba: Pointer to HBA context object. 12942 * @cq: Pointer to the completion queue. 12943 * @wcqe: Pointer to a completion queue entry. 12944 * 12945 * This routine process a slow-path work-queue or receive queue completion queue 12946 * entry. 12947 * 12948 * Return: true if work posted to worker thread, otherwise false. 12949 **/ 12950 static bool 12951 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 12952 struct lpfc_cqe *cqe) 12953 { 12954 struct lpfc_cqe cqevt; 12955 bool workposted = false; 12956 12957 /* Copy the work queue CQE and convert endian order if needed */ 12958 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 12959 12960 /* Check and process for different type of WCQE and dispatch */ 12961 switch (bf_get(lpfc_cqe_code, &cqevt)) { 12962 case CQE_CODE_COMPL_WQE: 12963 /* Process the WQ/RQ complete event */ 12964 phba->last_completion_time = jiffies; 12965 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 12966 (struct lpfc_wcqe_complete *)&cqevt); 12967 break; 12968 case CQE_CODE_RELEASE_WQE: 12969 /* Process the WQ release event */ 12970 lpfc_sli4_sp_handle_rel_wcqe(phba, 12971 (struct lpfc_wcqe_release *)&cqevt); 12972 break; 12973 case CQE_CODE_XRI_ABORTED: 12974 /* Process the WQ XRI abort event */ 12975 phba->last_completion_time = jiffies; 12976 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 12977 (struct sli4_wcqe_xri_aborted *)&cqevt); 12978 break; 12979 case CQE_CODE_RECEIVE: 12980 case CQE_CODE_RECEIVE_V1: 12981 /* Process the RQ event */ 12982 phba->last_completion_time = jiffies; 12983 workposted = lpfc_sli4_sp_handle_rcqe(phba, 12984 (struct lpfc_rcqe *)&cqevt); 12985 break; 12986 default: 12987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12988 "0388 Not a valid WCQE code: x%x\n", 12989 bf_get(lpfc_cqe_code, &cqevt)); 12990 break; 12991 } 12992 return workposted; 12993 } 12994 12995 /** 12996 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 12997 * @phba: Pointer to HBA context object. 12998 * @eqe: Pointer to fast-path event queue entry. 12999 * 13000 * This routine process a event queue entry from the slow-path event queue. 13001 * It will check the MajorCode and MinorCode to determine this is for a 13002 * completion event on a completion queue, if not, an error shall be logged 13003 * and just return. Otherwise, it will get to the corresponding completion 13004 * queue and process all the entries on that completion queue, rearm the 13005 * completion queue, and then return. 13006 * 13007 **/ 13008 static void 13009 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13010 struct lpfc_queue *speq) 13011 { 13012 struct lpfc_queue *cq = NULL, *childq; 13013 struct lpfc_cqe *cqe; 13014 bool workposted = false; 13015 int ecount = 0; 13016 uint16_t cqid; 13017 13018 /* Get the reference to the corresponding CQ */ 13019 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13020 13021 list_for_each_entry(childq, &speq->child_list, list) { 13022 if (childq->queue_id == cqid) { 13023 cq = childq; 13024 break; 13025 } 13026 } 13027 if (unlikely(!cq)) { 13028 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13029 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13030 "0365 Slow-path CQ identifier " 13031 "(%d) does not exist\n", cqid); 13032 return; 13033 } 13034 13035 /* Save EQ associated with this CQ */ 13036 cq->assoc_qp = speq; 13037 13038 /* Process all the entries to the CQ */ 13039 switch (cq->type) { 13040 case LPFC_MCQ: 13041 while ((cqe = lpfc_sli4_cq_get(cq))) { 13042 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 13043 if (!(++ecount % cq->entry_repost)) 13044 break; 13045 cq->CQ_mbox++; 13046 } 13047 break; 13048 case LPFC_WCQ: 13049 while ((cqe = lpfc_sli4_cq_get(cq))) { 13050 if ((cq->subtype == LPFC_FCP) || 13051 (cq->subtype == LPFC_NVME)) 13052 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, 13053 cqe); 13054 else 13055 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 13056 cqe); 13057 if (!(++ecount % cq->entry_repost)) 13058 break; 13059 } 13060 13061 /* Track the max number of CQEs processed in 1 EQ */ 13062 if (ecount > cq->CQ_max_cqe) 13063 cq->CQ_max_cqe = ecount; 13064 break; 13065 default: 13066 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13067 "0370 Invalid completion queue type (%d)\n", 13068 cq->type); 13069 return; 13070 } 13071 13072 /* Catch the no cq entry condition, log an error */ 13073 if (unlikely(ecount == 0)) 13074 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13075 "0371 No entry from the CQ: identifier " 13076 "(x%x), type (%d)\n", cq->queue_id, cq->type); 13077 13078 /* In any case, flash and re-arm the RCQ */ 13079 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 13080 13081 /* wake up worker thread if there are works to be done */ 13082 if (workposted) 13083 lpfc_worker_wake_up(phba); 13084 } 13085 13086 /** 13087 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 13088 * @phba: Pointer to HBA context object. 13089 * @cq: Pointer to associated CQ 13090 * @wcqe: Pointer to work-queue completion queue entry. 13091 * 13092 * This routine process a fast-path work queue completion entry from fast-path 13093 * event queue for FCP command response completion. 13094 **/ 13095 static void 13096 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13097 struct lpfc_wcqe_complete *wcqe) 13098 { 13099 struct lpfc_sli_ring *pring = cq->pring; 13100 struct lpfc_iocbq *cmdiocbq; 13101 struct lpfc_iocbq irspiocbq; 13102 unsigned long iflags; 13103 13104 /* Check for response status */ 13105 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13106 /* If resource errors reported from HBA, reduce queue 13107 * depth of the SCSI device. 13108 */ 13109 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 13110 IOSTAT_LOCAL_REJECT)) && 13111 ((wcqe->parameter & IOERR_PARAM_MASK) == 13112 IOERR_NO_RESOURCES)) 13113 phba->lpfc_rampdown_queue_depth(phba); 13114 13115 /* Log the error status */ 13116 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13117 "0373 FCP complete error: status=x%x, " 13118 "hw_status=x%x, total_data_specified=%d, " 13119 "parameter=x%x, word3=x%x\n", 13120 bf_get(lpfc_wcqe_c_status, wcqe), 13121 bf_get(lpfc_wcqe_c_hw_status, wcqe), 13122 wcqe->total_data_placed, wcqe->parameter, 13123 wcqe->word3); 13124 } 13125 13126 /* Look up the FCP command IOCB and create pseudo response IOCB */ 13127 spin_lock_irqsave(&pring->ring_lock, iflags); 13128 pring->stats.iocb_event++; 13129 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13130 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13131 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13132 if (unlikely(!cmdiocbq)) { 13133 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13134 "0374 FCP complete with no corresponding " 13135 "cmdiocb: iotag (%d)\n", 13136 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13137 return; 13138 } 13139 13140 if (cq->assoc_qp) 13141 cmdiocbq->isr_timestamp = 13142 cq->assoc_qp->isr_timestamp; 13143 13144 if (cmdiocbq->iocb_cmpl == NULL) { 13145 if (cmdiocbq->wqe_cmpl) { 13146 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13147 spin_lock_irqsave(&phba->hbalock, iflags); 13148 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13149 spin_unlock_irqrestore(&phba->hbalock, iflags); 13150 } 13151 13152 /* Pass the cmd_iocb and the wcqe to the upper layer */ 13153 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe); 13154 return; 13155 } 13156 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13157 "0375 FCP cmdiocb not callback function " 13158 "iotag: (%d)\n", 13159 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13160 return; 13161 } 13162 13163 /* Fake the irspiocb and copy necessary response information */ 13164 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 13165 13166 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13167 spin_lock_irqsave(&phba->hbalock, iflags); 13168 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13169 spin_unlock_irqrestore(&phba->hbalock, iflags); 13170 } 13171 13172 /* Pass the cmd_iocb and the rsp state to the upper layer */ 13173 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 13174 } 13175 13176 /** 13177 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 13178 * @phba: Pointer to HBA context object. 13179 * @cq: Pointer to completion queue. 13180 * @wcqe: Pointer to work-queue completion queue entry. 13181 * 13182 * This routine handles an fast-path WQ entry consumed event by invoking the 13183 * proper WQ release routine to the slow-path WQ. 13184 **/ 13185 static void 13186 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13187 struct lpfc_wcqe_release *wcqe) 13188 { 13189 struct lpfc_queue *childwq; 13190 bool wqid_matched = false; 13191 uint16_t hba_wqid; 13192 13193 /* Check for fast-path FCP work queue release */ 13194 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 13195 list_for_each_entry(childwq, &cq->child_list, list) { 13196 if (childwq->queue_id == hba_wqid) { 13197 lpfc_sli4_wq_release(childwq, 13198 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13199 wqid_matched = true; 13200 break; 13201 } 13202 } 13203 /* Report warning log message if no match found */ 13204 if (wqid_matched != true) 13205 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13206 "2580 Fast-path wqe consume event carries " 13207 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); 13208 } 13209 13210 /** 13211 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry 13212 * @phba: Pointer to HBA context object. 13213 * @rcqe: Pointer to receive-queue completion queue entry. 13214 * 13215 * This routine process a receive-queue completion queue entry. 13216 * 13217 * Return: true if work posted to worker thread, otherwise false. 13218 **/ 13219 static bool 13220 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13221 struct lpfc_rcqe *rcqe) 13222 { 13223 bool workposted = false; 13224 struct lpfc_queue *hrq; 13225 struct lpfc_queue *drq; 13226 struct rqb_dmabuf *dma_buf; 13227 struct fc_frame_header *fc_hdr; 13228 struct lpfc_nvmet_tgtport *tgtp; 13229 uint32_t status, rq_id; 13230 unsigned long iflags; 13231 uint32_t fctl, idx; 13232 13233 if ((phba->nvmet_support == 0) || 13234 (phba->sli4_hba.nvmet_cqset == NULL)) 13235 return workposted; 13236 13237 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; 13238 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; 13239 drq = phba->sli4_hba.nvmet_mrq_data[idx]; 13240 13241 /* sanity check on queue memory */ 13242 if (unlikely(!hrq) || unlikely(!drq)) 13243 return workposted; 13244 13245 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13246 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13247 else 13248 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13249 13250 if ((phba->nvmet_support == 0) || 13251 (rq_id != hrq->queue_id)) 13252 return workposted; 13253 13254 status = bf_get(lpfc_rcqe_status, rcqe); 13255 switch (status) { 13256 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13257 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13258 "6126 Receive Frame Truncated!!\n"); 13259 case FC_STATUS_RQ_SUCCESS: 13260 lpfc_sli4_rq_release(hrq, drq); 13261 spin_lock_irqsave(&phba->hbalock, iflags); 13262 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 13263 if (!dma_buf) { 13264 hrq->RQ_no_buf_found++; 13265 spin_unlock_irqrestore(&phba->hbalock, iflags); 13266 goto out; 13267 } 13268 spin_unlock_irqrestore(&phba->hbalock, iflags); 13269 hrq->RQ_rcv_buf++; 13270 hrq->RQ_buf_posted--; 13271 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13272 13273 /* Just some basic sanity checks on FCP Command frame */ 13274 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 13275 fc_hdr->fh_f_ctl[1] << 8 | 13276 fc_hdr->fh_f_ctl[2]); 13277 if (((fctl & 13278 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != 13279 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || 13280 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ 13281 goto drop; 13282 13283 if (fc_hdr->fh_type == FC_TYPE_FCP) { 13284 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 13285 lpfc_nvmet_unsol_fcp_event( 13286 phba, phba->sli4_hba.els_wq->pring, dma_buf, 13287 cq->assoc_qp->isr_timestamp); 13288 return false; 13289 } 13290 drop: 13291 lpfc_in_buf_free(phba, &dma_buf->dbuf); 13292 break; 13293 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13294 if (phba->nvmet_support) { 13295 tgtp = phba->targetport->private; 13296 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13297 "6401 RQE Error x%x, posted %d err_cnt " 13298 "%d: %x %x %x\n", 13299 status, hrq->RQ_buf_posted, 13300 hrq->RQ_no_posted_buf, 13301 atomic_read(&tgtp->rcv_fcp_cmd_in), 13302 atomic_read(&tgtp->rcv_fcp_cmd_out), 13303 atomic_read(&tgtp->xmt_fcp_release)); 13304 } 13305 /* fallthrough */ 13306 13307 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13308 hrq->RQ_no_posted_buf++; 13309 /* Post more buffers if possible */ 13310 break; 13311 } 13312 out: 13313 return workposted; 13314 } 13315 13316 /** 13317 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry 13318 * @cq: Pointer to the completion queue. 13319 * @eqe: Pointer to fast-path completion queue entry. 13320 * 13321 * This routine process a fast-path work queue completion entry from fast-path 13322 * event queue for FCP command response completion. 13323 **/ 13324 static int 13325 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13326 struct lpfc_cqe *cqe) 13327 { 13328 struct lpfc_wcqe_release wcqe; 13329 bool workposted = false; 13330 13331 /* Copy the work queue CQE and convert endian order if needed */ 13332 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 13333 13334 /* Check and process for different type of WCQE and dispatch */ 13335 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 13336 case CQE_CODE_COMPL_WQE: 13337 case CQE_CODE_NVME_ERSP: 13338 cq->CQ_wq++; 13339 /* Process the WQ complete event */ 13340 phba->last_completion_time = jiffies; 13341 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME)) 13342 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 13343 (struct lpfc_wcqe_complete *)&wcqe); 13344 if (cq->subtype == LPFC_NVME_LS) 13345 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 13346 (struct lpfc_wcqe_complete *)&wcqe); 13347 break; 13348 case CQE_CODE_RELEASE_WQE: 13349 cq->CQ_release_wqe++; 13350 /* Process the WQ release event */ 13351 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 13352 (struct lpfc_wcqe_release *)&wcqe); 13353 break; 13354 case CQE_CODE_XRI_ABORTED: 13355 cq->CQ_xri_aborted++; 13356 /* Process the WQ XRI abort event */ 13357 phba->last_completion_time = jiffies; 13358 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13359 (struct sli4_wcqe_xri_aborted *)&wcqe); 13360 break; 13361 case CQE_CODE_RECEIVE_V1: 13362 case CQE_CODE_RECEIVE: 13363 phba->last_completion_time = jiffies; 13364 if (cq->subtype == LPFC_NVMET) { 13365 workposted = lpfc_sli4_nvmet_handle_rcqe( 13366 phba, cq, (struct lpfc_rcqe *)&wcqe); 13367 } 13368 break; 13369 default: 13370 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13371 "0144 Not a valid CQE code: x%x\n", 13372 bf_get(lpfc_wcqe_c_code, &wcqe)); 13373 break; 13374 } 13375 return workposted; 13376 } 13377 13378 /** 13379 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 13380 * @phba: Pointer to HBA context object. 13381 * @eqe: Pointer to fast-path event queue entry. 13382 * 13383 * This routine process a event queue entry from the fast-path event queue. 13384 * It will check the MajorCode and MinorCode to determine this is for a 13385 * completion event on a completion queue, if not, an error shall be logged 13386 * and just return. Otherwise, it will get to the corresponding completion 13387 * queue and process all the entries on the completion queue, rearm the 13388 * completion queue, and then return. 13389 **/ 13390 static void 13391 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13392 uint32_t qidx) 13393 { 13394 struct lpfc_queue *cq = NULL; 13395 struct lpfc_cqe *cqe; 13396 bool workposted = false; 13397 uint16_t cqid, id; 13398 int ecount = 0; 13399 13400 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 13401 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13402 "0366 Not a valid completion " 13403 "event: majorcode=x%x, minorcode=x%x\n", 13404 bf_get_le32(lpfc_eqe_major_code, eqe), 13405 bf_get_le32(lpfc_eqe_minor_code, eqe)); 13406 return; 13407 } 13408 13409 /* Get the reference to the corresponding CQ */ 13410 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13411 13412 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { 13413 id = phba->sli4_hba.nvmet_cqset[0]->queue_id; 13414 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { 13415 /* Process NVMET unsol rcv */ 13416 cq = phba->sli4_hba.nvmet_cqset[cqid - id]; 13417 goto process_cq; 13418 } 13419 } 13420 13421 if (phba->sli4_hba.nvme_cq_map && 13422 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) { 13423 /* Process NVME / NVMET command completion */ 13424 cq = phba->sli4_hba.nvme_cq[qidx]; 13425 goto process_cq; 13426 } 13427 13428 if (phba->sli4_hba.fcp_cq_map && 13429 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) { 13430 /* Process FCP command completion */ 13431 cq = phba->sli4_hba.fcp_cq[qidx]; 13432 goto process_cq; 13433 } 13434 13435 if (phba->sli4_hba.nvmels_cq && 13436 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { 13437 /* Process NVME unsol rcv */ 13438 cq = phba->sli4_hba.nvmels_cq; 13439 } 13440 13441 /* Otherwise this is a Slow path event */ 13442 if (cq == NULL) { 13443 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]); 13444 return; 13445 } 13446 13447 process_cq: 13448 if (unlikely(cqid != cq->queue_id)) { 13449 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13450 "0368 Miss-matched fast-path completion " 13451 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 13452 cqid, cq->queue_id); 13453 return; 13454 } 13455 13456 /* Save EQ associated with this CQ */ 13457 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx]; 13458 13459 /* Process all the entries to the CQ */ 13460 while ((cqe = lpfc_sli4_cq_get(cq))) { 13461 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); 13462 if (!(++ecount % cq->entry_repost)) 13463 break; 13464 } 13465 13466 /* Track the max number of CQEs processed in 1 EQ */ 13467 if (ecount > cq->CQ_max_cqe) 13468 cq->CQ_max_cqe = ecount; 13469 13470 /* Catch the no cq entry condition */ 13471 if (unlikely(ecount == 0)) 13472 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13473 "0369 No entry from fast-path completion " 13474 "queue fcpcqid=%d\n", cq->queue_id); 13475 13476 /* In any case, flash and re-arm the CQ */ 13477 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 13478 13479 /* wake up worker thread if there are works to be done */ 13480 if (workposted) 13481 lpfc_worker_wake_up(phba); 13482 } 13483 13484 static void 13485 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 13486 { 13487 struct lpfc_eqe *eqe; 13488 13489 /* walk all the EQ entries and drop on the floor */ 13490 while ((eqe = lpfc_sli4_eq_get(eq))) 13491 ; 13492 13493 /* Clear and re-arm the EQ */ 13494 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 13495 } 13496 13497 13498 /** 13499 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue 13500 * entry 13501 * @phba: Pointer to HBA context object. 13502 * @eqe: Pointer to fast-path event queue entry. 13503 * 13504 * This routine process a event queue entry from the Flash Optimized Fabric 13505 * event queue. It will check the MajorCode and MinorCode to determine this 13506 * is for a completion event on a completion queue, if not, an error shall be 13507 * logged and just return. Otherwise, it will get to the corresponding 13508 * completion queue and process all the entries on the completion queue, rearm 13509 * the completion queue, and then return. 13510 **/ 13511 static void 13512 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 13513 { 13514 struct lpfc_queue *cq; 13515 struct lpfc_cqe *cqe; 13516 bool workposted = false; 13517 uint16_t cqid; 13518 int ecount = 0; 13519 13520 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 13521 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13522 "9147 Not a valid completion " 13523 "event: majorcode=x%x, minorcode=x%x\n", 13524 bf_get_le32(lpfc_eqe_major_code, eqe), 13525 bf_get_le32(lpfc_eqe_minor_code, eqe)); 13526 return; 13527 } 13528 13529 /* Get the reference to the corresponding CQ */ 13530 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13531 13532 /* Next check for OAS */ 13533 cq = phba->sli4_hba.oas_cq; 13534 if (unlikely(!cq)) { 13535 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13536 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13537 "9148 OAS completion queue " 13538 "does not exist\n"); 13539 return; 13540 } 13541 13542 if (unlikely(cqid != cq->queue_id)) { 13543 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13544 "9149 Miss-matched fast-path compl " 13545 "queue id: eqcqid=%d, fcpcqid=%d\n", 13546 cqid, cq->queue_id); 13547 return; 13548 } 13549 13550 /* Process all the entries to the OAS CQ */ 13551 while ((cqe = lpfc_sli4_cq_get(cq))) { 13552 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); 13553 if (!(++ecount % cq->entry_repost)) 13554 break; 13555 } 13556 13557 /* Track the max number of CQEs processed in 1 EQ */ 13558 if (ecount > cq->CQ_max_cqe) 13559 cq->CQ_max_cqe = ecount; 13560 13561 /* Catch the no cq entry condition */ 13562 if (unlikely(ecount == 0)) 13563 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13564 "9153 No entry from fast-path completion " 13565 "queue fcpcqid=%d\n", cq->queue_id); 13566 13567 /* In any case, flash and re-arm the CQ */ 13568 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 13569 13570 /* wake up worker thread if there are works to be done */ 13571 if (workposted) 13572 lpfc_worker_wake_up(phba); 13573 } 13574 13575 /** 13576 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device 13577 * @irq: Interrupt number. 13578 * @dev_id: The device context pointer. 13579 * 13580 * This function is directly called from the PCI layer as an interrupt 13581 * service routine when device with SLI-4 interface spec is enabled with 13582 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric 13583 * IOCB ring event in the HBA. However, when the device is enabled with either 13584 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 13585 * device-level interrupt handler. When the PCI slot is in error recovery 13586 * or the HBA is undergoing initialization, the interrupt handler will not 13587 * process the interrupt. The Flash Optimized Fabric ring event are handled in 13588 * the intrrupt context. This function is called without any lock held. 13589 * It gets the hbalock to access and update SLI data structures. Note that, 13590 * the EQ to CQ are one-to-one map such that the EQ index is 13591 * equal to that of CQ index. 13592 * 13593 * This function returns IRQ_HANDLED when interrupt is handled else it 13594 * returns IRQ_NONE. 13595 **/ 13596 irqreturn_t 13597 lpfc_sli4_fof_intr_handler(int irq, void *dev_id) 13598 { 13599 struct lpfc_hba *phba; 13600 struct lpfc_hba_eq_hdl *hba_eq_hdl; 13601 struct lpfc_queue *eq; 13602 struct lpfc_eqe *eqe; 13603 unsigned long iflag; 13604 int ecount = 0; 13605 13606 /* Get the driver's phba structure from the dev_id */ 13607 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 13608 phba = hba_eq_hdl->phba; 13609 13610 if (unlikely(!phba)) 13611 return IRQ_NONE; 13612 13613 /* Get to the EQ struct associated with this vector */ 13614 eq = phba->sli4_hba.fof_eq; 13615 if (unlikely(!eq)) 13616 return IRQ_NONE; 13617 13618 /* Check device state for handling interrupt */ 13619 if (unlikely(lpfc_intr_state_check(phba))) { 13620 eq->EQ_badstate++; 13621 /* Check again for link_state with lock held */ 13622 spin_lock_irqsave(&phba->hbalock, iflag); 13623 if (phba->link_state < LPFC_LINK_DOWN) 13624 /* Flush, clear interrupt, and rearm the EQ */ 13625 lpfc_sli4_eq_flush(phba, eq); 13626 spin_unlock_irqrestore(&phba->hbalock, iflag); 13627 return IRQ_NONE; 13628 } 13629 13630 /* 13631 * Process all the event on FCP fast-path EQ 13632 */ 13633 while ((eqe = lpfc_sli4_eq_get(eq))) { 13634 lpfc_sli4_fof_handle_eqe(phba, eqe); 13635 if (!(++ecount % eq->entry_repost)) 13636 break; 13637 eq->EQ_processed++; 13638 } 13639 13640 /* Track the max number of EQEs processed in 1 intr */ 13641 if (ecount > eq->EQ_max_eqe) 13642 eq->EQ_max_eqe = ecount; 13643 13644 13645 if (unlikely(ecount == 0)) { 13646 eq->EQ_no_entry++; 13647 13648 if (phba->intr_type == MSIX) 13649 /* MSI-X treated interrupt served as no EQ share INT */ 13650 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13651 "9145 MSI-X interrupt with no EQE\n"); 13652 else { 13653 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13654 "9146 ISR interrupt with no EQE\n"); 13655 /* Non MSI-X treated on interrupt as EQ share INT */ 13656 return IRQ_NONE; 13657 } 13658 } 13659 /* Always clear and re-arm the fast-path EQ */ 13660 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 13661 return IRQ_HANDLED; 13662 } 13663 13664 /** 13665 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 13666 * @irq: Interrupt number. 13667 * @dev_id: The device context pointer. 13668 * 13669 * This function is directly called from the PCI layer as an interrupt 13670 * service routine when device with SLI-4 interface spec is enabled with 13671 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 13672 * ring event in the HBA. However, when the device is enabled with either 13673 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 13674 * device-level interrupt handler. When the PCI slot is in error recovery 13675 * or the HBA is undergoing initialization, the interrupt handler will not 13676 * process the interrupt. The SCSI FCP fast-path ring event are handled in 13677 * the intrrupt context. This function is called without any lock held. 13678 * It gets the hbalock to access and update SLI data structures. Note that, 13679 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 13680 * equal to that of FCP CQ index. 13681 * 13682 * The link attention and ELS ring attention events are handled 13683 * by the worker thread. The interrupt handler signals the worker thread 13684 * and returns for these events. This function is called without any lock 13685 * held. It gets the hbalock to access and update SLI data structures. 13686 * 13687 * This function returns IRQ_HANDLED when interrupt is handled else it 13688 * returns IRQ_NONE. 13689 **/ 13690 irqreturn_t 13691 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 13692 { 13693 struct lpfc_hba *phba; 13694 struct lpfc_hba_eq_hdl *hba_eq_hdl; 13695 struct lpfc_queue *fpeq; 13696 struct lpfc_eqe *eqe; 13697 unsigned long iflag; 13698 int ecount = 0; 13699 int hba_eqidx; 13700 13701 /* Get the driver's phba structure from the dev_id */ 13702 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 13703 phba = hba_eq_hdl->phba; 13704 hba_eqidx = hba_eq_hdl->idx; 13705 13706 if (unlikely(!phba)) 13707 return IRQ_NONE; 13708 if (unlikely(!phba->sli4_hba.hba_eq)) 13709 return IRQ_NONE; 13710 13711 /* Get to the EQ struct associated with this vector */ 13712 fpeq = phba->sli4_hba.hba_eq[hba_eqidx]; 13713 if (unlikely(!fpeq)) 13714 return IRQ_NONE; 13715 13716 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13717 if (phba->ktime_on) 13718 fpeq->isr_timestamp = ktime_get_ns(); 13719 #endif 13720 13721 if (lpfc_fcp_look_ahead) { 13722 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) 13723 lpfc_sli4_eq_clr_intr(fpeq); 13724 else { 13725 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 13726 return IRQ_NONE; 13727 } 13728 } 13729 13730 /* Check device state for handling interrupt */ 13731 if (unlikely(lpfc_intr_state_check(phba))) { 13732 fpeq->EQ_badstate++; 13733 /* Check again for link_state with lock held */ 13734 spin_lock_irqsave(&phba->hbalock, iflag); 13735 if (phba->link_state < LPFC_LINK_DOWN) 13736 /* Flush, clear interrupt, and rearm the EQ */ 13737 lpfc_sli4_eq_flush(phba, fpeq); 13738 spin_unlock_irqrestore(&phba->hbalock, iflag); 13739 if (lpfc_fcp_look_ahead) 13740 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 13741 return IRQ_NONE; 13742 } 13743 13744 /* 13745 * Process all the event on FCP fast-path EQ 13746 */ 13747 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 13748 if (eqe == NULL) 13749 break; 13750 13751 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); 13752 if (!(++ecount % fpeq->entry_repost)) 13753 break; 13754 fpeq->EQ_processed++; 13755 } 13756 13757 /* Track the max number of EQEs processed in 1 intr */ 13758 if (ecount > fpeq->EQ_max_eqe) 13759 fpeq->EQ_max_eqe = ecount; 13760 13761 /* Always clear and re-arm the fast-path EQ */ 13762 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 13763 13764 if (unlikely(ecount == 0)) { 13765 fpeq->EQ_no_entry++; 13766 13767 if (lpfc_fcp_look_ahead) { 13768 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 13769 return IRQ_NONE; 13770 } 13771 13772 if (phba->intr_type == MSIX) 13773 /* MSI-X treated interrupt served as no EQ share INT */ 13774 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13775 "0358 MSI-X interrupt with no EQE\n"); 13776 else 13777 /* Non MSI-X treated on interrupt as EQ share INT */ 13778 return IRQ_NONE; 13779 } 13780 13781 if (lpfc_fcp_look_ahead) 13782 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 13783 13784 return IRQ_HANDLED; 13785 } /* lpfc_sli4_fp_intr_handler */ 13786 13787 /** 13788 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 13789 * @irq: Interrupt number. 13790 * @dev_id: The device context pointer. 13791 * 13792 * This function is the device-level interrupt handler to device with SLI-4 13793 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 13794 * interrupt mode is enabled and there is an event in the HBA which requires 13795 * driver attention. This function invokes the slow-path interrupt attention 13796 * handling function and fast-path interrupt attention handling function in 13797 * turn to process the relevant HBA attention events. This function is called 13798 * without any lock held. It gets the hbalock to access and update SLI data 13799 * structures. 13800 * 13801 * This function returns IRQ_HANDLED when interrupt is handled, else it 13802 * returns IRQ_NONE. 13803 **/ 13804 irqreturn_t 13805 lpfc_sli4_intr_handler(int irq, void *dev_id) 13806 { 13807 struct lpfc_hba *phba; 13808 irqreturn_t hba_irq_rc; 13809 bool hba_handled = false; 13810 int qidx; 13811 13812 /* Get the driver's phba structure from the dev_id */ 13813 phba = (struct lpfc_hba *)dev_id; 13814 13815 if (unlikely(!phba)) 13816 return IRQ_NONE; 13817 13818 /* 13819 * Invoke fast-path host attention interrupt handling as appropriate. 13820 */ 13821 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) { 13822 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 13823 &phba->sli4_hba.hba_eq_hdl[qidx]); 13824 if (hba_irq_rc == IRQ_HANDLED) 13825 hba_handled |= true; 13826 } 13827 13828 if (phba->cfg_fof) { 13829 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq, 13830 &phba->sli4_hba.hba_eq_hdl[qidx]); 13831 if (hba_irq_rc == IRQ_HANDLED) 13832 hba_handled |= true; 13833 } 13834 13835 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 13836 } /* lpfc_sli4_intr_handler */ 13837 13838 /** 13839 * lpfc_sli4_queue_free - free a queue structure and associated memory 13840 * @queue: The queue structure to free. 13841 * 13842 * This function frees a queue structure and the DMAable memory used for 13843 * the host resident queue. This function must be called after destroying the 13844 * queue on the HBA. 13845 **/ 13846 void 13847 lpfc_sli4_queue_free(struct lpfc_queue *queue) 13848 { 13849 struct lpfc_dmabuf *dmabuf; 13850 13851 if (!queue) 13852 return; 13853 13854 while (!list_empty(&queue->page_list)) { 13855 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 13856 list); 13857 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE, 13858 dmabuf->virt, dmabuf->phys); 13859 kfree(dmabuf); 13860 } 13861 if (queue->rqbp) { 13862 lpfc_free_rq_buffer(queue->phba, queue); 13863 kfree(queue->rqbp); 13864 } 13865 13866 if (!list_empty(&queue->wq_list)) 13867 list_del(&queue->wq_list); 13868 13869 kfree(queue); 13870 return; 13871 } 13872 13873 /** 13874 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 13875 * @phba: The HBA that this queue is being created on. 13876 * @entry_size: The size of each queue entry for this queue. 13877 * @entry count: The number of entries that this queue will handle. 13878 * 13879 * This function allocates a queue structure and the DMAable memory used for 13880 * the host resident queue. This function must be called before creating the 13881 * queue on the HBA. 13882 **/ 13883 struct lpfc_queue * 13884 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, 13885 uint32_t entry_count) 13886 { 13887 struct lpfc_queue *queue; 13888 struct lpfc_dmabuf *dmabuf; 13889 int x, total_qe_count; 13890 void *dma_pointer; 13891 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 13892 13893 if (!phba->sli4_hba.pc_sli4_params.supported) 13894 hw_page_size = SLI4_PAGE_SIZE; 13895 13896 queue = kzalloc(sizeof(struct lpfc_queue) + 13897 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 13898 if (!queue) 13899 return NULL; 13900 queue->page_count = (ALIGN(entry_size * entry_count, 13901 hw_page_size))/hw_page_size; 13902 13903 /* If needed, Adjust page count to match the max the adapter supports */ 13904 if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt) 13905 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt; 13906 13907 INIT_LIST_HEAD(&queue->list); 13908 INIT_LIST_HEAD(&queue->wq_list); 13909 INIT_LIST_HEAD(&queue->page_list); 13910 INIT_LIST_HEAD(&queue->child_list); 13911 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 13912 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 13913 if (!dmabuf) 13914 goto out_fail; 13915 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 13916 hw_page_size, &dmabuf->phys, 13917 GFP_KERNEL); 13918 if (!dmabuf->virt) { 13919 kfree(dmabuf); 13920 goto out_fail; 13921 } 13922 dmabuf->buffer_tag = x; 13923 list_add_tail(&dmabuf->list, &queue->page_list); 13924 /* initialize queue's entry array */ 13925 dma_pointer = dmabuf->virt; 13926 for (; total_qe_count < entry_count && 13927 dma_pointer < (hw_page_size + dmabuf->virt); 13928 total_qe_count++, dma_pointer += entry_size) { 13929 queue->qe[total_qe_count].address = dma_pointer; 13930 } 13931 } 13932 queue->entry_size = entry_size; 13933 queue->entry_count = entry_count; 13934 queue->phba = phba; 13935 13936 /* entry_repost will be set during q creation */ 13937 13938 return queue; 13939 out_fail: 13940 lpfc_sli4_queue_free(queue); 13941 return NULL; 13942 } 13943 13944 /** 13945 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 13946 * @phba: HBA structure that indicates port to create a queue on. 13947 * @pci_barset: PCI BAR set flag. 13948 * 13949 * This function shall perform iomap of the specified PCI BAR address to host 13950 * memory address if not already done so and return it. The returned host 13951 * memory address can be NULL. 13952 */ 13953 static void __iomem * 13954 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 13955 { 13956 if (!phba->pcidev) 13957 return NULL; 13958 13959 switch (pci_barset) { 13960 case WQ_PCI_BAR_0_AND_1: 13961 return phba->pci_bar0_memmap_p; 13962 case WQ_PCI_BAR_2_AND_3: 13963 return phba->pci_bar2_memmap_p; 13964 case WQ_PCI_BAR_4_AND_5: 13965 return phba->pci_bar4_memmap_p; 13966 default: 13967 break; 13968 } 13969 return NULL; 13970 } 13971 13972 /** 13973 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs 13974 * @phba: HBA structure that indicates port to create a queue on. 13975 * @startq: The starting FCP EQ to modify 13976 * 13977 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. 13978 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be 13979 * updated in one mailbox command. 13980 * 13981 * The @phba struct is used to send mailbox command to HBA. The @startq 13982 * is used to get the starting FCP EQ to change. 13983 * This function is asynchronous and will wait for the mailbox 13984 * command to finish before continuing. 13985 * 13986 * On success this function will return a zero. If unable to allocate enough 13987 * memory this function will return -ENOMEM. If the queue create mailbox command 13988 * fails this function will return -ENXIO. 13989 **/ 13990 int 13991 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq) 13992 { 13993 struct lpfc_mbx_modify_eq_delay *eq_delay; 13994 LPFC_MBOXQ_t *mbox; 13995 struct lpfc_queue *eq; 13996 int cnt, rc, length, status = 0; 13997 uint32_t shdr_status, shdr_add_status; 13998 uint32_t result; 13999 int qidx; 14000 union lpfc_sli4_cfg_shdr *shdr; 14001 uint16_t dmult; 14002 14003 if (startq >= phba->io_channel_irqs) 14004 return 0; 14005 14006 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14007 if (!mbox) 14008 return -ENOMEM; 14009 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 14010 sizeof(struct lpfc_sli4_cfg_mhdr)); 14011 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14012 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 14013 length, LPFC_SLI4_MBX_EMBED); 14014 eq_delay = &mbox->u.mqe.un.eq_delay; 14015 14016 /* Calculate delay multiper from maximum interrupt per second */ 14017 result = phba->cfg_fcp_imax / phba->io_channel_irqs; 14018 if (result > LPFC_DMULT_CONST || result == 0) 14019 dmult = 0; 14020 else 14021 dmult = LPFC_DMULT_CONST/result - 1; 14022 14023 cnt = 0; 14024 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) { 14025 eq = phba->sli4_hba.hba_eq[qidx]; 14026 if (!eq) 14027 continue; 14028 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 14029 eq_delay->u.request.eq[cnt].phase = 0; 14030 eq_delay->u.request.eq[cnt].delay_multi = dmult; 14031 cnt++; 14032 if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT) 14033 break; 14034 } 14035 eq_delay->u.request.num_eq = cnt; 14036 14037 mbox->vport = phba->pport; 14038 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14039 mbox->context1 = NULL; 14040 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14041 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 14042 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14043 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14044 if (shdr_status || shdr_add_status || rc) { 14045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14046 "2512 MODIFY_EQ_DELAY mailbox failed with " 14047 "status x%x add_status x%x, mbx status x%x\n", 14048 shdr_status, shdr_add_status, rc); 14049 status = -ENXIO; 14050 } 14051 mempool_free(mbox, phba->mbox_mem_pool); 14052 return status; 14053 } 14054 14055 /** 14056 * lpfc_eq_create - Create an Event Queue on the HBA 14057 * @phba: HBA structure that indicates port to create a queue on. 14058 * @eq: The queue structure to use to create the event queue. 14059 * @imax: The maximum interrupt per second limit. 14060 * 14061 * This function creates an event queue, as detailed in @eq, on a port, 14062 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 14063 * 14064 * The @phba struct is used to send mailbox command to HBA. The @eq struct 14065 * is used to get the entry count and entry size that are necessary to 14066 * determine the number of pages to allocate and use for this queue. This 14067 * function will send the EQ_CREATE mailbox command to the HBA to setup the 14068 * event queue. This function is asynchronous and will wait for the mailbox 14069 * command to finish before continuing. 14070 * 14071 * On success this function will return a zero. If unable to allocate enough 14072 * memory this function will return -ENOMEM. If the queue create mailbox command 14073 * fails this function will return -ENXIO. 14074 **/ 14075 int 14076 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 14077 { 14078 struct lpfc_mbx_eq_create *eq_create; 14079 LPFC_MBOXQ_t *mbox; 14080 int rc, length, status = 0; 14081 struct lpfc_dmabuf *dmabuf; 14082 uint32_t shdr_status, shdr_add_status; 14083 union lpfc_sli4_cfg_shdr *shdr; 14084 uint16_t dmult; 14085 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14086 14087 /* sanity check on queue memory */ 14088 if (!eq) 14089 return -ENODEV; 14090 if (!phba->sli4_hba.pc_sli4_params.supported) 14091 hw_page_size = SLI4_PAGE_SIZE; 14092 14093 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14094 if (!mbox) 14095 return -ENOMEM; 14096 length = (sizeof(struct lpfc_mbx_eq_create) - 14097 sizeof(struct lpfc_sli4_cfg_mhdr)); 14098 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14099 LPFC_MBOX_OPCODE_EQ_CREATE, 14100 length, LPFC_SLI4_MBX_EMBED); 14101 eq_create = &mbox->u.mqe.un.eq_create; 14102 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 14103 eq->page_count); 14104 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 14105 LPFC_EQE_SIZE); 14106 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 14107 /* don't setup delay multiplier using EQ_CREATE */ 14108 dmult = 0; 14109 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 14110 dmult); 14111 switch (eq->entry_count) { 14112 default: 14113 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14114 "0360 Unsupported EQ count. (%d)\n", 14115 eq->entry_count); 14116 if (eq->entry_count < 256) 14117 return -EINVAL; 14118 /* otherwise default to smallest count (drop through) */ 14119 case 256: 14120 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14121 LPFC_EQ_CNT_256); 14122 break; 14123 case 512: 14124 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14125 LPFC_EQ_CNT_512); 14126 break; 14127 case 1024: 14128 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14129 LPFC_EQ_CNT_1024); 14130 break; 14131 case 2048: 14132 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14133 LPFC_EQ_CNT_2048); 14134 break; 14135 case 4096: 14136 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14137 LPFC_EQ_CNT_4096); 14138 break; 14139 } 14140 list_for_each_entry(dmabuf, &eq->page_list, list) { 14141 memset(dmabuf->virt, 0, hw_page_size); 14142 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14143 putPaddrLow(dmabuf->phys); 14144 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14145 putPaddrHigh(dmabuf->phys); 14146 } 14147 mbox->vport = phba->pport; 14148 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14149 mbox->context1 = NULL; 14150 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14151 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 14152 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14153 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14154 if (shdr_status || shdr_add_status || rc) { 14155 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14156 "2500 EQ_CREATE mailbox failed with " 14157 "status x%x add_status x%x, mbx status x%x\n", 14158 shdr_status, shdr_add_status, rc); 14159 status = -ENXIO; 14160 } 14161 eq->type = LPFC_EQ; 14162 eq->subtype = LPFC_NONE; 14163 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 14164 if (eq->queue_id == 0xFFFF) 14165 status = -ENXIO; 14166 eq->host_index = 0; 14167 eq->hba_index = 0; 14168 eq->entry_repost = LPFC_EQ_REPOST; 14169 14170 mempool_free(mbox, phba->mbox_mem_pool); 14171 return status; 14172 } 14173 14174 /** 14175 * lpfc_cq_create - Create a Completion Queue on the HBA 14176 * @phba: HBA structure that indicates port to create a queue on. 14177 * @cq: The queue structure to use to create the completion queue. 14178 * @eq: The event queue to bind this completion queue to. 14179 * 14180 * This function creates a completion queue, as detailed in @wq, on a port, 14181 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 14182 * 14183 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14184 * is used to get the entry count and entry size that are necessary to 14185 * determine the number of pages to allocate and use for this queue. The @eq 14186 * is used to indicate which event queue to bind this completion queue to. This 14187 * function will send the CQ_CREATE mailbox command to the HBA to setup the 14188 * completion queue. This function is asynchronous and will wait for the mailbox 14189 * command to finish before continuing. 14190 * 14191 * On success this function will return a zero. If unable to allocate enough 14192 * memory this function will return -ENOMEM. If the queue create mailbox command 14193 * fails this function will return -ENXIO. 14194 **/ 14195 int 14196 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 14197 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 14198 { 14199 struct lpfc_mbx_cq_create *cq_create; 14200 struct lpfc_dmabuf *dmabuf; 14201 LPFC_MBOXQ_t *mbox; 14202 int rc, length, status = 0; 14203 uint32_t shdr_status, shdr_add_status; 14204 union lpfc_sli4_cfg_shdr *shdr; 14205 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14206 14207 /* sanity check on queue memory */ 14208 if (!cq || !eq) 14209 return -ENODEV; 14210 if (!phba->sli4_hba.pc_sli4_params.supported) 14211 hw_page_size = SLI4_PAGE_SIZE; 14212 14213 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14214 if (!mbox) 14215 return -ENOMEM; 14216 length = (sizeof(struct lpfc_mbx_cq_create) - 14217 sizeof(struct lpfc_sli4_cfg_mhdr)); 14218 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14219 LPFC_MBOX_OPCODE_CQ_CREATE, 14220 length, LPFC_SLI4_MBX_EMBED); 14221 cq_create = &mbox->u.mqe.un.cq_create; 14222 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 14223 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 14224 cq->page_count); 14225 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 14226 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 14227 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14228 phba->sli4_hba.pc_sli4_params.cqv); 14229 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 14230 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */ 14231 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1); 14232 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 14233 eq->queue_id); 14234 } else { 14235 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 14236 eq->queue_id); 14237 } 14238 switch (cq->entry_count) { 14239 default: 14240 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14241 "0361 Unsupported CQ count: " 14242 "entry cnt %d sz %d pg cnt %d\n", 14243 cq->entry_count, cq->entry_size, 14244 cq->page_count); 14245 if (cq->entry_count < 256) { 14246 status = -EINVAL; 14247 goto out; 14248 } 14249 /* otherwise default to smallest count (drop through) */ 14250 case 256: 14251 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14252 LPFC_CQ_CNT_256); 14253 break; 14254 case 512: 14255 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14256 LPFC_CQ_CNT_512); 14257 break; 14258 case 1024: 14259 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14260 LPFC_CQ_CNT_1024); 14261 break; 14262 } 14263 list_for_each_entry(dmabuf, &cq->page_list, list) { 14264 memset(dmabuf->virt, 0, hw_page_size); 14265 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14266 putPaddrLow(dmabuf->phys); 14267 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14268 putPaddrHigh(dmabuf->phys); 14269 } 14270 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14271 14272 /* The IOCTL status is embedded in the mailbox subheader. */ 14273 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14274 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14275 if (shdr_status || shdr_add_status || rc) { 14276 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14277 "2501 CQ_CREATE mailbox failed with " 14278 "status x%x add_status x%x, mbx status x%x\n", 14279 shdr_status, shdr_add_status, rc); 14280 status = -ENXIO; 14281 goto out; 14282 } 14283 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14284 if (cq->queue_id == 0xFFFF) { 14285 status = -ENXIO; 14286 goto out; 14287 } 14288 /* link the cq onto the parent eq child list */ 14289 list_add_tail(&cq->list, &eq->child_list); 14290 /* Set up completion queue's type and subtype */ 14291 cq->type = type; 14292 cq->subtype = subtype; 14293 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14294 cq->assoc_qid = eq->queue_id; 14295 cq->host_index = 0; 14296 cq->hba_index = 0; 14297 cq->entry_repost = LPFC_CQ_REPOST; 14298 14299 out: 14300 mempool_free(mbox, phba->mbox_mem_pool); 14301 return status; 14302 } 14303 14304 /** 14305 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ 14306 * @phba: HBA structure that indicates port to create a queue on. 14307 * @cqp: The queue structure array to use to create the completion queues. 14308 * @eqp: The event queue array to bind these completion queues to. 14309 * 14310 * This function creates a set of completion queue, s to support MRQ 14311 * as detailed in @cqp, on a port, 14312 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. 14313 * 14314 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14315 * is used to get the entry count and entry size that are necessary to 14316 * determine the number of pages to allocate and use for this queue. The @eq 14317 * is used to indicate which event queue to bind this completion queue to. This 14318 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the 14319 * completion queue. This function is asynchronous and will wait for the mailbox 14320 * command to finish before continuing. 14321 * 14322 * On success this function will return a zero. If unable to allocate enough 14323 * memory this function will return -ENOMEM. If the queue create mailbox command 14324 * fails this function will return -ENXIO. 14325 **/ 14326 int 14327 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 14328 struct lpfc_queue **eqp, uint32_t type, uint32_t subtype) 14329 { 14330 struct lpfc_queue *cq; 14331 struct lpfc_queue *eq; 14332 struct lpfc_mbx_cq_create_set *cq_set; 14333 struct lpfc_dmabuf *dmabuf; 14334 LPFC_MBOXQ_t *mbox; 14335 int rc, length, alloclen, status = 0; 14336 int cnt, idx, numcq, page_idx = 0; 14337 uint32_t shdr_status, shdr_add_status; 14338 union lpfc_sli4_cfg_shdr *shdr; 14339 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14340 14341 /* sanity check on queue memory */ 14342 numcq = phba->cfg_nvmet_mrq; 14343 if (!cqp || !eqp || !numcq) 14344 return -ENODEV; 14345 if (!phba->sli4_hba.pc_sli4_params.supported) 14346 hw_page_size = SLI4_PAGE_SIZE; 14347 14348 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14349 if (!mbox) 14350 return -ENOMEM; 14351 14352 length = sizeof(struct lpfc_mbx_cq_create_set); 14353 length += ((numcq * cqp[0]->page_count) * 14354 sizeof(struct dma_address)); 14355 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14356 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, 14357 LPFC_SLI4_MBX_NEMBED); 14358 if (alloclen < length) { 14359 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14360 "3098 Allocated DMA memory size (%d) is " 14361 "less than the requested DMA memory size " 14362 "(%d)\n", alloclen, length); 14363 status = -ENOMEM; 14364 goto out; 14365 } 14366 cq_set = mbox->sge_array->addr[0]; 14367 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; 14368 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); 14369 14370 for (idx = 0; idx < numcq; idx++) { 14371 cq = cqp[idx]; 14372 eq = eqp[idx]; 14373 if (!cq || !eq) { 14374 status = -ENOMEM; 14375 goto out; 14376 } 14377 14378 switch (idx) { 14379 case 0: 14380 bf_set(lpfc_mbx_cq_create_set_page_size, 14381 &cq_set->u.request, 14382 (hw_page_size / SLI4_PAGE_SIZE)); 14383 bf_set(lpfc_mbx_cq_create_set_num_pages, 14384 &cq_set->u.request, cq->page_count); 14385 bf_set(lpfc_mbx_cq_create_set_evt, 14386 &cq_set->u.request, 1); 14387 bf_set(lpfc_mbx_cq_create_set_valid, 14388 &cq_set->u.request, 1); 14389 bf_set(lpfc_mbx_cq_create_set_cqe_size, 14390 &cq_set->u.request, 0); 14391 bf_set(lpfc_mbx_cq_create_set_num_cq, 14392 &cq_set->u.request, numcq); 14393 switch (cq->entry_count) { 14394 default: 14395 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14396 "3118 Bad CQ count. (%d)\n", 14397 cq->entry_count); 14398 if (cq->entry_count < 256) { 14399 status = -EINVAL; 14400 goto out; 14401 } 14402 /* otherwise default to smallest (drop thru) */ 14403 case 256: 14404 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14405 &cq_set->u.request, LPFC_CQ_CNT_256); 14406 break; 14407 case 512: 14408 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14409 &cq_set->u.request, LPFC_CQ_CNT_512); 14410 break; 14411 case 1024: 14412 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14413 &cq_set->u.request, LPFC_CQ_CNT_1024); 14414 break; 14415 } 14416 bf_set(lpfc_mbx_cq_create_set_eq_id0, 14417 &cq_set->u.request, eq->queue_id); 14418 break; 14419 case 1: 14420 bf_set(lpfc_mbx_cq_create_set_eq_id1, 14421 &cq_set->u.request, eq->queue_id); 14422 break; 14423 case 2: 14424 bf_set(lpfc_mbx_cq_create_set_eq_id2, 14425 &cq_set->u.request, eq->queue_id); 14426 break; 14427 case 3: 14428 bf_set(lpfc_mbx_cq_create_set_eq_id3, 14429 &cq_set->u.request, eq->queue_id); 14430 break; 14431 case 4: 14432 bf_set(lpfc_mbx_cq_create_set_eq_id4, 14433 &cq_set->u.request, eq->queue_id); 14434 break; 14435 case 5: 14436 bf_set(lpfc_mbx_cq_create_set_eq_id5, 14437 &cq_set->u.request, eq->queue_id); 14438 break; 14439 case 6: 14440 bf_set(lpfc_mbx_cq_create_set_eq_id6, 14441 &cq_set->u.request, eq->queue_id); 14442 break; 14443 case 7: 14444 bf_set(lpfc_mbx_cq_create_set_eq_id7, 14445 &cq_set->u.request, eq->queue_id); 14446 break; 14447 case 8: 14448 bf_set(lpfc_mbx_cq_create_set_eq_id8, 14449 &cq_set->u.request, eq->queue_id); 14450 break; 14451 case 9: 14452 bf_set(lpfc_mbx_cq_create_set_eq_id9, 14453 &cq_set->u.request, eq->queue_id); 14454 break; 14455 case 10: 14456 bf_set(lpfc_mbx_cq_create_set_eq_id10, 14457 &cq_set->u.request, eq->queue_id); 14458 break; 14459 case 11: 14460 bf_set(lpfc_mbx_cq_create_set_eq_id11, 14461 &cq_set->u.request, eq->queue_id); 14462 break; 14463 case 12: 14464 bf_set(lpfc_mbx_cq_create_set_eq_id12, 14465 &cq_set->u.request, eq->queue_id); 14466 break; 14467 case 13: 14468 bf_set(lpfc_mbx_cq_create_set_eq_id13, 14469 &cq_set->u.request, eq->queue_id); 14470 break; 14471 case 14: 14472 bf_set(lpfc_mbx_cq_create_set_eq_id14, 14473 &cq_set->u.request, eq->queue_id); 14474 break; 14475 case 15: 14476 bf_set(lpfc_mbx_cq_create_set_eq_id15, 14477 &cq_set->u.request, eq->queue_id); 14478 break; 14479 } 14480 14481 /* link the cq onto the parent eq child list */ 14482 list_add_tail(&cq->list, &eq->child_list); 14483 /* Set up completion queue's type and subtype */ 14484 cq->type = type; 14485 cq->subtype = subtype; 14486 cq->assoc_qid = eq->queue_id; 14487 cq->host_index = 0; 14488 cq->hba_index = 0; 14489 cq->entry_repost = LPFC_CQ_REPOST; 14490 14491 rc = 0; 14492 list_for_each_entry(dmabuf, &cq->page_list, list) { 14493 memset(dmabuf->virt, 0, hw_page_size); 14494 cnt = page_idx + dmabuf->buffer_tag; 14495 cq_set->u.request.page[cnt].addr_lo = 14496 putPaddrLow(dmabuf->phys); 14497 cq_set->u.request.page[cnt].addr_hi = 14498 putPaddrHigh(dmabuf->phys); 14499 rc++; 14500 } 14501 page_idx += rc; 14502 } 14503 14504 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14505 14506 /* The IOCTL status is embedded in the mailbox subheader. */ 14507 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14508 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14509 if (shdr_status || shdr_add_status || rc) { 14510 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14511 "3119 CQ_CREATE_SET mailbox failed with " 14512 "status x%x add_status x%x, mbx status x%x\n", 14513 shdr_status, shdr_add_status, rc); 14514 status = -ENXIO; 14515 goto out; 14516 } 14517 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); 14518 if (rc == 0xFFFF) { 14519 status = -ENXIO; 14520 goto out; 14521 } 14522 14523 for (idx = 0; idx < numcq; idx++) { 14524 cq = cqp[idx]; 14525 cq->queue_id = rc + idx; 14526 } 14527 14528 out: 14529 lpfc_sli4_mbox_cmd_free(phba, mbox); 14530 return status; 14531 } 14532 14533 /** 14534 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 14535 * @phba: HBA structure that indicates port to create a queue on. 14536 * @mq: The queue structure to use to create the mailbox queue. 14537 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 14538 * @cq: The completion queue to associate with this cq. 14539 * 14540 * This function provides failback (fb) functionality when the 14541 * mq_create_ext fails on older FW generations. It's purpose is identical 14542 * to mq_create_ext otherwise. 14543 * 14544 * This routine cannot fail as all attributes were previously accessed and 14545 * initialized in mq_create_ext. 14546 **/ 14547 static void 14548 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 14549 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 14550 { 14551 struct lpfc_mbx_mq_create *mq_create; 14552 struct lpfc_dmabuf *dmabuf; 14553 int length; 14554 14555 length = (sizeof(struct lpfc_mbx_mq_create) - 14556 sizeof(struct lpfc_sli4_cfg_mhdr)); 14557 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14558 LPFC_MBOX_OPCODE_MQ_CREATE, 14559 length, LPFC_SLI4_MBX_EMBED); 14560 mq_create = &mbox->u.mqe.un.mq_create; 14561 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 14562 mq->page_count); 14563 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 14564 cq->queue_id); 14565 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 14566 switch (mq->entry_count) { 14567 case 16: 14568 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 14569 LPFC_MQ_RING_SIZE_16); 14570 break; 14571 case 32: 14572 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 14573 LPFC_MQ_RING_SIZE_32); 14574 break; 14575 case 64: 14576 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 14577 LPFC_MQ_RING_SIZE_64); 14578 break; 14579 case 128: 14580 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 14581 LPFC_MQ_RING_SIZE_128); 14582 break; 14583 } 14584 list_for_each_entry(dmabuf, &mq->page_list, list) { 14585 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14586 putPaddrLow(dmabuf->phys); 14587 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14588 putPaddrHigh(dmabuf->phys); 14589 } 14590 } 14591 14592 /** 14593 * lpfc_mq_create - Create a mailbox Queue on the HBA 14594 * @phba: HBA structure that indicates port to create a queue on. 14595 * @mq: The queue structure to use to create the mailbox queue. 14596 * @cq: The completion queue to associate with this cq. 14597 * @subtype: The queue's subtype. 14598 * 14599 * This function creates a mailbox queue, as detailed in @mq, on a port, 14600 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 14601 * 14602 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14603 * is used to get the entry count and entry size that are necessary to 14604 * determine the number of pages to allocate and use for this queue. This 14605 * function will send the MQ_CREATE mailbox command to the HBA to setup the 14606 * mailbox queue. This function is asynchronous and will wait for the mailbox 14607 * command to finish before continuing. 14608 * 14609 * On success this function will return a zero. If unable to allocate enough 14610 * memory this function will return -ENOMEM. If the queue create mailbox command 14611 * fails this function will return -ENXIO. 14612 **/ 14613 int32_t 14614 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 14615 struct lpfc_queue *cq, uint32_t subtype) 14616 { 14617 struct lpfc_mbx_mq_create *mq_create; 14618 struct lpfc_mbx_mq_create_ext *mq_create_ext; 14619 struct lpfc_dmabuf *dmabuf; 14620 LPFC_MBOXQ_t *mbox; 14621 int rc, length, status = 0; 14622 uint32_t shdr_status, shdr_add_status; 14623 union lpfc_sli4_cfg_shdr *shdr; 14624 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14625 14626 /* sanity check on queue memory */ 14627 if (!mq || !cq) 14628 return -ENODEV; 14629 if (!phba->sli4_hba.pc_sli4_params.supported) 14630 hw_page_size = SLI4_PAGE_SIZE; 14631 14632 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14633 if (!mbox) 14634 return -ENOMEM; 14635 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 14636 sizeof(struct lpfc_sli4_cfg_mhdr)); 14637 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14638 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 14639 length, LPFC_SLI4_MBX_EMBED); 14640 14641 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 14642 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 14643 bf_set(lpfc_mbx_mq_create_ext_num_pages, 14644 &mq_create_ext->u.request, mq->page_count); 14645 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 14646 &mq_create_ext->u.request, 1); 14647 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 14648 &mq_create_ext->u.request, 1); 14649 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 14650 &mq_create_ext->u.request, 1); 14651 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 14652 &mq_create_ext->u.request, 1); 14653 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 14654 &mq_create_ext->u.request, 1); 14655 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 14656 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14657 phba->sli4_hba.pc_sli4_params.mqv); 14658 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 14659 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 14660 cq->queue_id); 14661 else 14662 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 14663 cq->queue_id); 14664 switch (mq->entry_count) { 14665 default: 14666 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14667 "0362 Unsupported MQ count. (%d)\n", 14668 mq->entry_count); 14669 if (mq->entry_count < 16) { 14670 status = -EINVAL; 14671 goto out; 14672 } 14673 /* otherwise default to smallest count (drop through) */ 14674 case 16: 14675 bf_set(lpfc_mq_context_ring_size, 14676 &mq_create_ext->u.request.context, 14677 LPFC_MQ_RING_SIZE_16); 14678 break; 14679 case 32: 14680 bf_set(lpfc_mq_context_ring_size, 14681 &mq_create_ext->u.request.context, 14682 LPFC_MQ_RING_SIZE_32); 14683 break; 14684 case 64: 14685 bf_set(lpfc_mq_context_ring_size, 14686 &mq_create_ext->u.request.context, 14687 LPFC_MQ_RING_SIZE_64); 14688 break; 14689 case 128: 14690 bf_set(lpfc_mq_context_ring_size, 14691 &mq_create_ext->u.request.context, 14692 LPFC_MQ_RING_SIZE_128); 14693 break; 14694 } 14695 list_for_each_entry(dmabuf, &mq->page_list, list) { 14696 memset(dmabuf->virt, 0, hw_page_size); 14697 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 14698 putPaddrLow(dmabuf->phys); 14699 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 14700 putPaddrHigh(dmabuf->phys); 14701 } 14702 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14703 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 14704 &mq_create_ext->u.response); 14705 if (rc != MBX_SUCCESS) { 14706 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14707 "2795 MQ_CREATE_EXT failed with " 14708 "status x%x. Failback to MQ_CREATE.\n", 14709 rc); 14710 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 14711 mq_create = &mbox->u.mqe.un.mq_create; 14712 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14713 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 14714 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 14715 &mq_create->u.response); 14716 } 14717 14718 /* The IOCTL status is embedded in the mailbox subheader. */ 14719 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14720 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14721 if (shdr_status || shdr_add_status || rc) { 14722 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14723 "2502 MQ_CREATE mailbox failed with " 14724 "status x%x add_status x%x, mbx status x%x\n", 14725 shdr_status, shdr_add_status, rc); 14726 status = -ENXIO; 14727 goto out; 14728 } 14729 if (mq->queue_id == 0xFFFF) { 14730 status = -ENXIO; 14731 goto out; 14732 } 14733 mq->type = LPFC_MQ; 14734 mq->assoc_qid = cq->queue_id; 14735 mq->subtype = subtype; 14736 mq->host_index = 0; 14737 mq->hba_index = 0; 14738 mq->entry_repost = LPFC_MQ_REPOST; 14739 14740 /* link the mq onto the parent cq child list */ 14741 list_add_tail(&mq->list, &cq->child_list); 14742 out: 14743 mempool_free(mbox, phba->mbox_mem_pool); 14744 return status; 14745 } 14746 14747 /** 14748 * lpfc_wq_create - Create a Work Queue on the HBA 14749 * @phba: HBA structure that indicates port to create a queue on. 14750 * @wq: The queue structure to use to create the work queue. 14751 * @cq: The completion queue to bind this work queue to. 14752 * @subtype: The subtype of the work queue indicating its functionality. 14753 * 14754 * This function creates a work queue, as detailed in @wq, on a port, described 14755 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 14756 * 14757 * The @phba struct is used to send mailbox command to HBA. The @wq struct 14758 * is used to get the entry count and entry size that are necessary to 14759 * determine the number of pages to allocate and use for this queue. The @cq 14760 * is used to indicate which completion queue to bind this work queue to. This 14761 * function will send the WQ_CREATE mailbox command to the HBA to setup the 14762 * work queue. This function is asynchronous and will wait for the mailbox 14763 * command to finish before continuing. 14764 * 14765 * On success this function will return a zero. If unable to allocate enough 14766 * memory this function will return -ENOMEM. If the queue create mailbox command 14767 * fails this function will return -ENXIO. 14768 **/ 14769 int 14770 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 14771 struct lpfc_queue *cq, uint32_t subtype) 14772 { 14773 struct lpfc_mbx_wq_create *wq_create; 14774 struct lpfc_dmabuf *dmabuf; 14775 LPFC_MBOXQ_t *mbox; 14776 int rc, length, status = 0; 14777 uint32_t shdr_status, shdr_add_status; 14778 union lpfc_sli4_cfg_shdr *shdr; 14779 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14780 struct dma_address *page; 14781 void __iomem *bar_memmap_p; 14782 uint32_t db_offset; 14783 uint16_t pci_barset; 14784 14785 /* sanity check on queue memory */ 14786 if (!wq || !cq) 14787 return -ENODEV; 14788 if (!phba->sli4_hba.pc_sli4_params.supported) 14789 hw_page_size = SLI4_PAGE_SIZE; 14790 14791 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14792 if (!mbox) 14793 return -ENOMEM; 14794 length = (sizeof(struct lpfc_mbx_wq_create) - 14795 sizeof(struct lpfc_sli4_cfg_mhdr)); 14796 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14797 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 14798 length, LPFC_SLI4_MBX_EMBED); 14799 wq_create = &mbox->u.mqe.un.wq_create; 14800 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 14801 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 14802 wq->page_count); 14803 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 14804 cq->queue_id); 14805 14806 /* wqv is the earliest version supported, NOT the latest */ 14807 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14808 phba->sli4_hba.pc_sli4_params.wqv); 14809 14810 switch (phba->sli4_hba.pc_sli4_params.wqv) { 14811 case LPFC_Q_CREATE_VERSION_0: 14812 switch (wq->entry_size) { 14813 default: 14814 case 64: 14815 /* Nothing to do, version 0 ONLY supports 64 byte */ 14816 page = wq_create->u.request.page; 14817 break; 14818 case 128: 14819 if (!(phba->sli4_hba.pc_sli4_params.wqsize & 14820 LPFC_WQ_SZ128_SUPPORT)) { 14821 status = -ERANGE; 14822 goto out; 14823 } 14824 /* If we get here the HBA MUST also support V1 and 14825 * we MUST use it 14826 */ 14827 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14828 LPFC_Q_CREATE_VERSION_1); 14829 14830 bf_set(lpfc_mbx_wq_create_wqe_count, 14831 &wq_create->u.request_1, wq->entry_count); 14832 bf_set(lpfc_mbx_wq_create_wqe_size, 14833 &wq_create->u.request_1, 14834 LPFC_WQ_WQE_SIZE_128); 14835 bf_set(lpfc_mbx_wq_create_page_size, 14836 &wq_create->u.request_1, 14837 LPFC_WQ_PAGE_SIZE_4096); 14838 page = wq_create->u.request_1.page; 14839 break; 14840 } 14841 break; 14842 case LPFC_Q_CREATE_VERSION_1: 14843 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 14844 wq->entry_count); 14845 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14846 LPFC_Q_CREATE_VERSION_1); 14847 14848 switch (wq->entry_size) { 14849 default: 14850 case 64: 14851 bf_set(lpfc_mbx_wq_create_wqe_size, 14852 &wq_create->u.request_1, 14853 LPFC_WQ_WQE_SIZE_64); 14854 break; 14855 case 128: 14856 if (!(phba->sli4_hba.pc_sli4_params.wqsize & 14857 LPFC_WQ_SZ128_SUPPORT)) { 14858 status = -ERANGE; 14859 goto out; 14860 } 14861 bf_set(lpfc_mbx_wq_create_wqe_size, 14862 &wq_create->u.request_1, 14863 LPFC_WQ_WQE_SIZE_128); 14864 break; 14865 } 14866 bf_set(lpfc_mbx_wq_create_page_size, 14867 &wq_create->u.request_1, 14868 LPFC_WQ_PAGE_SIZE_4096); 14869 page = wq_create->u.request_1.page; 14870 break; 14871 default: 14872 status = -ERANGE; 14873 goto out; 14874 } 14875 14876 list_for_each_entry(dmabuf, &wq->page_list, list) { 14877 memset(dmabuf->virt, 0, hw_page_size); 14878 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 14879 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 14880 } 14881 14882 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 14883 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 14884 14885 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14886 /* The IOCTL status is embedded in the mailbox subheader. */ 14887 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14888 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14889 if (shdr_status || shdr_add_status || rc) { 14890 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14891 "2503 WQ_CREATE mailbox failed with " 14892 "status x%x add_status x%x, mbx status x%x\n", 14893 shdr_status, shdr_add_status, rc); 14894 status = -ENXIO; 14895 goto out; 14896 } 14897 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); 14898 if (wq->queue_id == 0xFFFF) { 14899 status = -ENXIO; 14900 goto out; 14901 } 14902 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 14903 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 14904 &wq_create->u.response); 14905 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 14906 (wq->db_format != LPFC_DB_RING_FORMAT)) { 14907 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14908 "3265 WQ[%d] doorbell format not " 14909 "supported: x%x\n", wq->queue_id, 14910 wq->db_format); 14911 status = -EINVAL; 14912 goto out; 14913 } 14914 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 14915 &wq_create->u.response); 14916 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 14917 if (!bar_memmap_p) { 14918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14919 "3263 WQ[%d] failed to memmap pci " 14920 "barset:x%x\n", wq->queue_id, 14921 pci_barset); 14922 status = -ENOMEM; 14923 goto out; 14924 } 14925 db_offset = wq_create->u.response.doorbell_offset; 14926 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 14927 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 14928 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14929 "3252 WQ[%d] doorbell offset not " 14930 "supported: x%x\n", wq->queue_id, 14931 db_offset); 14932 status = -EINVAL; 14933 goto out; 14934 } 14935 wq->db_regaddr = bar_memmap_p + db_offset; 14936 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14937 "3264 WQ[%d]: barset:x%x, offset:x%x, " 14938 "format:x%x\n", wq->queue_id, pci_barset, 14939 db_offset, wq->db_format); 14940 } else { 14941 wq->db_format = LPFC_DB_LIST_FORMAT; 14942 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 14943 } 14944 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); 14945 if (wq->pring == NULL) { 14946 status = -ENOMEM; 14947 goto out; 14948 } 14949 wq->type = LPFC_WQ; 14950 wq->assoc_qid = cq->queue_id; 14951 wq->subtype = subtype; 14952 wq->host_index = 0; 14953 wq->hba_index = 0; 14954 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL; 14955 14956 /* link the wq onto the parent cq child list */ 14957 list_add_tail(&wq->list, &cq->child_list); 14958 out: 14959 mempool_free(mbox, phba->mbox_mem_pool); 14960 return status; 14961 } 14962 14963 /** 14964 * lpfc_rq_create - Create a Receive Queue on the HBA 14965 * @phba: HBA structure that indicates port to create a queue on. 14966 * @hrq: The queue structure to use to create the header receive queue. 14967 * @drq: The queue structure to use to create the data receive queue. 14968 * @cq: The completion queue to bind this work queue to. 14969 * 14970 * This function creates a receive buffer queue pair , as detailed in @hrq and 14971 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 14972 * to the HBA. 14973 * 14974 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 14975 * struct is used to get the entry count that is necessary to determine the 14976 * number of pages to use for this queue. The @cq is used to indicate which 14977 * completion queue to bind received buffers that are posted to these queues to. 14978 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 14979 * receive queue pair. This function is asynchronous and will wait for the 14980 * mailbox command to finish before continuing. 14981 * 14982 * On success this function will return a zero. If unable to allocate enough 14983 * memory this function will return -ENOMEM. If the queue create mailbox command 14984 * fails this function will return -ENXIO. 14985 **/ 14986 int 14987 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 14988 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 14989 { 14990 struct lpfc_mbx_rq_create *rq_create; 14991 struct lpfc_dmabuf *dmabuf; 14992 LPFC_MBOXQ_t *mbox; 14993 int rc, length, status = 0; 14994 uint32_t shdr_status, shdr_add_status; 14995 union lpfc_sli4_cfg_shdr *shdr; 14996 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14997 void __iomem *bar_memmap_p; 14998 uint32_t db_offset; 14999 uint16_t pci_barset; 15000 15001 /* sanity check on queue memory */ 15002 if (!hrq || !drq || !cq) 15003 return -ENODEV; 15004 if (!phba->sli4_hba.pc_sli4_params.supported) 15005 hw_page_size = SLI4_PAGE_SIZE; 15006 15007 if (hrq->entry_count != drq->entry_count) 15008 return -EINVAL; 15009 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15010 if (!mbox) 15011 return -ENOMEM; 15012 length = (sizeof(struct lpfc_mbx_rq_create) - 15013 sizeof(struct lpfc_sli4_cfg_mhdr)); 15014 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15015 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15016 length, LPFC_SLI4_MBX_EMBED); 15017 rq_create = &mbox->u.mqe.un.rq_create; 15018 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15019 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15020 phba->sli4_hba.pc_sli4_params.rqv); 15021 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15022 bf_set(lpfc_rq_context_rqe_count_1, 15023 &rq_create->u.request.context, 15024 hrq->entry_count); 15025 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 15026 bf_set(lpfc_rq_context_rqe_size, 15027 &rq_create->u.request.context, 15028 LPFC_RQE_SIZE_8); 15029 bf_set(lpfc_rq_context_page_size, 15030 &rq_create->u.request.context, 15031 LPFC_RQ_PAGE_SIZE_4096); 15032 } else { 15033 switch (hrq->entry_count) { 15034 default: 15035 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15036 "2535 Unsupported RQ count. (%d)\n", 15037 hrq->entry_count); 15038 if (hrq->entry_count < 512) { 15039 status = -EINVAL; 15040 goto out; 15041 } 15042 /* otherwise default to smallest count (drop through) */ 15043 case 512: 15044 bf_set(lpfc_rq_context_rqe_count, 15045 &rq_create->u.request.context, 15046 LPFC_RQ_RING_SIZE_512); 15047 break; 15048 case 1024: 15049 bf_set(lpfc_rq_context_rqe_count, 15050 &rq_create->u.request.context, 15051 LPFC_RQ_RING_SIZE_1024); 15052 break; 15053 case 2048: 15054 bf_set(lpfc_rq_context_rqe_count, 15055 &rq_create->u.request.context, 15056 LPFC_RQ_RING_SIZE_2048); 15057 break; 15058 case 4096: 15059 bf_set(lpfc_rq_context_rqe_count, 15060 &rq_create->u.request.context, 15061 LPFC_RQ_RING_SIZE_4096); 15062 break; 15063 } 15064 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 15065 LPFC_HDR_BUF_SIZE); 15066 } 15067 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15068 cq->queue_id); 15069 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15070 hrq->page_count); 15071 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15072 memset(dmabuf->virt, 0, hw_page_size); 15073 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15074 putPaddrLow(dmabuf->phys); 15075 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15076 putPaddrHigh(dmabuf->phys); 15077 } 15078 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15079 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15080 15081 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15082 /* The IOCTL status is embedded in the mailbox subheader. */ 15083 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15084 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15085 if (shdr_status || shdr_add_status || rc) { 15086 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15087 "2504 RQ_CREATE mailbox failed with " 15088 "status x%x add_status x%x, mbx status x%x\n", 15089 shdr_status, shdr_add_status, rc); 15090 status = -ENXIO; 15091 goto out; 15092 } 15093 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15094 if (hrq->queue_id == 0xFFFF) { 15095 status = -ENXIO; 15096 goto out; 15097 } 15098 15099 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15100 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 15101 &rq_create->u.response); 15102 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 15103 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 15104 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15105 "3262 RQ [%d] doorbell format not " 15106 "supported: x%x\n", hrq->queue_id, 15107 hrq->db_format); 15108 status = -EINVAL; 15109 goto out; 15110 } 15111 15112 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 15113 &rq_create->u.response); 15114 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 15115 if (!bar_memmap_p) { 15116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15117 "3269 RQ[%d] failed to memmap pci " 15118 "barset:x%x\n", hrq->queue_id, 15119 pci_barset); 15120 status = -ENOMEM; 15121 goto out; 15122 } 15123 15124 db_offset = rq_create->u.response.doorbell_offset; 15125 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 15126 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 15127 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15128 "3270 RQ[%d] doorbell offset not " 15129 "supported: x%x\n", hrq->queue_id, 15130 db_offset); 15131 status = -EINVAL; 15132 goto out; 15133 } 15134 hrq->db_regaddr = bar_memmap_p + db_offset; 15135 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15136 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 15137 "format:x%x\n", hrq->queue_id, pci_barset, 15138 db_offset, hrq->db_format); 15139 } else { 15140 hrq->db_format = LPFC_DB_RING_FORMAT; 15141 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15142 } 15143 hrq->type = LPFC_HRQ; 15144 hrq->assoc_qid = cq->queue_id; 15145 hrq->subtype = subtype; 15146 hrq->host_index = 0; 15147 hrq->hba_index = 0; 15148 hrq->entry_repost = LPFC_RQ_REPOST; 15149 15150 /* now create the data queue */ 15151 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15152 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15153 length, LPFC_SLI4_MBX_EMBED); 15154 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15155 phba->sli4_hba.pc_sli4_params.rqv); 15156 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15157 bf_set(lpfc_rq_context_rqe_count_1, 15158 &rq_create->u.request.context, hrq->entry_count); 15159 if (subtype == LPFC_NVMET) 15160 rq_create->u.request.context.buffer_size = 15161 LPFC_NVMET_DATA_BUF_SIZE; 15162 else 15163 rq_create->u.request.context.buffer_size = 15164 LPFC_DATA_BUF_SIZE; 15165 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 15166 LPFC_RQE_SIZE_8); 15167 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 15168 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15169 } else { 15170 switch (drq->entry_count) { 15171 default: 15172 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15173 "2536 Unsupported RQ count. (%d)\n", 15174 drq->entry_count); 15175 if (drq->entry_count < 512) { 15176 status = -EINVAL; 15177 goto out; 15178 } 15179 /* otherwise default to smallest count (drop through) */ 15180 case 512: 15181 bf_set(lpfc_rq_context_rqe_count, 15182 &rq_create->u.request.context, 15183 LPFC_RQ_RING_SIZE_512); 15184 break; 15185 case 1024: 15186 bf_set(lpfc_rq_context_rqe_count, 15187 &rq_create->u.request.context, 15188 LPFC_RQ_RING_SIZE_1024); 15189 break; 15190 case 2048: 15191 bf_set(lpfc_rq_context_rqe_count, 15192 &rq_create->u.request.context, 15193 LPFC_RQ_RING_SIZE_2048); 15194 break; 15195 case 4096: 15196 bf_set(lpfc_rq_context_rqe_count, 15197 &rq_create->u.request.context, 15198 LPFC_RQ_RING_SIZE_4096); 15199 break; 15200 } 15201 if (subtype == LPFC_NVMET) 15202 bf_set(lpfc_rq_context_buf_size, 15203 &rq_create->u.request.context, 15204 LPFC_NVMET_DATA_BUF_SIZE); 15205 else 15206 bf_set(lpfc_rq_context_buf_size, 15207 &rq_create->u.request.context, 15208 LPFC_DATA_BUF_SIZE); 15209 } 15210 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15211 cq->queue_id); 15212 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15213 drq->page_count); 15214 list_for_each_entry(dmabuf, &drq->page_list, list) { 15215 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15216 putPaddrLow(dmabuf->phys); 15217 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15218 putPaddrHigh(dmabuf->phys); 15219 } 15220 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15221 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15222 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15223 /* The IOCTL status is embedded in the mailbox subheader. */ 15224 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15225 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15226 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15227 if (shdr_status || shdr_add_status || rc) { 15228 status = -ENXIO; 15229 goto out; 15230 } 15231 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15232 if (drq->queue_id == 0xFFFF) { 15233 status = -ENXIO; 15234 goto out; 15235 } 15236 drq->type = LPFC_DRQ; 15237 drq->assoc_qid = cq->queue_id; 15238 drq->subtype = subtype; 15239 drq->host_index = 0; 15240 drq->hba_index = 0; 15241 drq->entry_repost = LPFC_RQ_REPOST; 15242 15243 /* link the header and data RQs onto the parent cq child list */ 15244 list_add_tail(&hrq->list, &cq->child_list); 15245 list_add_tail(&drq->list, &cq->child_list); 15246 15247 out: 15248 mempool_free(mbox, phba->mbox_mem_pool); 15249 return status; 15250 } 15251 15252 /** 15253 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA 15254 * @phba: HBA structure that indicates port to create a queue on. 15255 * @hrqp: The queue structure array to use to create the header receive queues. 15256 * @drqp: The queue structure array to use to create the data receive queues. 15257 * @cqp: The completion queue array to bind these receive queues to. 15258 * 15259 * This function creates a receive buffer queue pair , as detailed in @hrq and 15260 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15261 * to the HBA. 15262 * 15263 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15264 * struct is used to get the entry count that is necessary to determine the 15265 * number of pages to use for this queue. The @cq is used to indicate which 15266 * completion queue to bind received buffers that are posted to these queues to. 15267 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15268 * receive queue pair. This function is asynchronous and will wait for the 15269 * mailbox command to finish before continuing. 15270 * 15271 * On success this function will return a zero. If unable to allocate enough 15272 * memory this function will return -ENOMEM. If the queue create mailbox command 15273 * fails this function will return -ENXIO. 15274 **/ 15275 int 15276 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 15277 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 15278 uint32_t subtype) 15279 { 15280 struct lpfc_queue *hrq, *drq, *cq; 15281 struct lpfc_mbx_rq_create_v2 *rq_create; 15282 struct lpfc_dmabuf *dmabuf; 15283 LPFC_MBOXQ_t *mbox; 15284 int rc, length, alloclen, status = 0; 15285 int cnt, idx, numrq, page_idx = 0; 15286 uint32_t shdr_status, shdr_add_status; 15287 union lpfc_sli4_cfg_shdr *shdr; 15288 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15289 15290 numrq = phba->cfg_nvmet_mrq; 15291 /* sanity check on array memory */ 15292 if (!hrqp || !drqp || !cqp || !numrq) 15293 return -ENODEV; 15294 if (!phba->sli4_hba.pc_sli4_params.supported) 15295 hw_page_size = SLI4_PAGE_SIZE; 15296 15297 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15298 if (!mbox) 15299 return -ENOMEM; 15300 15301 length = sizeof(struct lpfc_mbx_rq_create_v2); 15302 length += ((2 * numrq * hrqp[0]->page_count) * 15303 sizeof(struct dma_address)); 15304 15305 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15306 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, 15307 LPFC_SLI4_MBX_NEMBED); 15308 if (alloclen < length) { 15309 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15310 "3099 Allocated DMA memory size (%d) is " 15311 "less than the requested DMA memory size " 15312 "(%d)\n", alloclen, length); 15313 status = -ENOMEM; 15314 goto out; 15315 } 15316 15317 15318 15319 rq_create = mbox->sge_array->addr[0]; 15320 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; 15321 15322 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); 15323 cnt = 0; 15324 15325 for (idx = 0; idx < numrq; idx++) { 15326 hrq = hrqp[idx]; 15327 drq = drqp[idx]; 15328 cq = cqp[idx]; 15329 15330 /* sanity check on queue memory */ 15331 if (!hrq || !drq || !cq) { 15332 status = -ENODEV; 15333 goto out; 15334 } 15335 15336 if (hrq->entry_count != drq->entry_count) { 15337 status = -EINVAL; 15338 goto out; 15339 } 15340 15341 if (idx == 0) { 15342 bf_set(lpfc_mbx_rq_create_num_pages, 15343 &rq_create->u.request, 15344 hrq->page_count); 15345 bf_set(lpfc_mbx_rq_create_rq_cnt, 15346 &rq_create->u.request, (numrq * 2)); 15347 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, 15348 1); 15349 bf_set(lpfc_rq_context_base_cq, 15350 &rq_create->u.request.context, 15351 cq->queue_id); 15352 bf_set(lpfc_rq_context_data_size, 15353 &rq_create->u.request.context, 15354 LPFC_NVMET_DATA_BUF_SIZE); 15355 bf_set(lpfc_rq_context_hdr_size, 15356 &rq_create->u.request.context, 15357 LPFC_HDR_BUF_SIZE); 15358 bf_set(lpfc_rq_context_rqe_count_1, 15359 &rq_create->u.request.context, 15360 hrq->entry_count); 15361 bf_set(lpfc_rq_context_rqe_size, 15362 &rq_create->u.request.context, 15363 LPFC_RQE_SIZE_8); 15364 bf_set(lpfc_rq_context_page_size, 15365 &rq_create->u.request.context, 15366 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15367 } 15368 rc = 0; 15369 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15370 memset(dmabuf->virt, 0, hw_page_size); 15371 cnt = page_idx + dmabuf->buffer_tag; 15372 rq_create->u.request.page[cnt].addr_lo = 15373 putPaddrLow(dmabuf->phys); 15374 rq_create->u.request.page[cnt].addr_hi = 15375 putPaddrHigh(dmabuf->phys); 15376 rc++; 15377 } 15378 page_idx += rc; 15379 15380 rc = 0; 15381 list_for_each_entry(dmabuf, &drq->page_list, list) { 15382 memset(dmabuf->virt, 0, hw_page_size); 15383 cnt = page_idx + dmabuf->buffer_tag; 15384 rq_create->u.request.page[cnt].addr_lo = 15385 putPaddrLow(dmabuf->phys); 15386 rq_create->u.request.page[cnt].addr_hi = 15387 putPaddrHigh(dmabuf->phys); 15388 rc++; 15389 } 15390 page_idx += rc; 15391 15392 hrq->db_format = LPFC_DB_RING_FORMAT; 15393 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15394 hrq->type = LPFC_HRQ; 15395 hrq->assoc_qid = cq->queue_id; 15396 hrq->subtype = subtype; 15397 hrq->host_index = 0; 15398 hrq->hba_index = 0; 15399 hrq->entry_repost = LPFC_RQ_REPOST; 15400 15401 drq->db_format = LPFC_DB_RING_FORMAT; 15402 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15403 drq->type = LPFC_DRQ; 15404 drq->assoc_qid = cq->queue_id; 15405 drq->subtype = subtype; 15406 drq->host_index = 0; 15407 drq->hba_index = 0; 15408 drq->entry_repost = LPFC_RQ_REPOST; 15409 15410 list_add_tail(&hrq->list, &cq->child_list); 15411 list_add_tail(&drq->list, &cq->child_list); 15412 } 15413 15414 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15415 /* The IOCTL status is embedded in the mailbox subheader. */ 15416 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15417 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15418 if (shdr_status || shdr_add_status || rc) { 15419 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15420 "3120 RQ_CREATE mailbox failed with " 15421 "status x%x add_status x%x, mbx status x%x\n", 15422 shdr_status, shdr_add_status, rc); 15423 status = -ENXIO; 15424 goto out; 15425 } 15426 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15427 if (rc == 0xFFFF) { 15428 status = -ENXIO; 15429 goto out; 15430 } 15431 15432 /* Initialize all RQs with associated queue id */ 15433 for (idx = 0; idx < numrq; idx++) { 15434 hrq = hrqp[idx]; 15435 hrq->queue_id = rc + (2 * idx); 15436 drq = drqp[idx]; 15437 drq->queue_id = rc + (2 * idx) + 1; 15438 } 15439 15440 out: 15441 lpfc_sli4_mbox_cmd_free(phba, mbox); 15442 return status; 15443 } 15444 15445 /** 15446 * lpfc_eq_destroy - Destroy an event Queue on the HBA 15447 * @eq: The queue structure associated with the queue to destroy. 15448 * 15449 * This function destroys a queue, as detailed in @eq by sending an mailbox 15450 * command, specific to the type of queue, to the HBA. 15451 * 15452 * The @eq struct is used to get the queue ID of the queue to destroy. 15453 * 15454 * On success this function will return a zero. If the queue destroy mailbox 15455 * command fails this function will return -ENXIO. 15456 **/ 15457 int 15458 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 15459 { 15460 LPFC_MBOXQ_t *mbox; 15461 int rc, length, status = 0; 15462 uint32_t shdr_status, shdr_add_status; 15463 union lpfc_sli4_cfg_shdr *shdr; 15464 15465 /* sanity check on queue memory */ 15466 if (!eq) 15467 return -ENODEV; 15468 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 15469 if (!mbox) 15470 return -ENOMEM; 15471 length = (sizeof(struct lpfc_mbx_eq_destroy) - 15472 sizeof(struct lpfc_sli4_cfg_mhdr)); 15473 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15474 LPFC_MBOX_OPCODE_EQ_DESTROY, 15475 length, LPFC_SLI4_MBX_EMBED); 15476 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 15477 eq->queue_id); 15478 mbox->vport = eq->phba->pport; 15479 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15480 15481 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 15482 /* The IOCTL status is embedded in the mailbox subheader. */ 15483 shdr = (union lpfc_sli4_cfg_shdr *) 15484 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 15485 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15486 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15487 if (shdr_status || shdr_add_status || rc) { 15488 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15489 "2505 EQ_DESTROY mailbox failed with " 15490 "status x%x add_status x%x, mbx status x%x\n", 15491 shdr_status, shdr_add_status, rc); 15492 status = -ENXIO; 15493 } 15494 15495 /* Remove eq from any list */ 15496 list_del_init(&eq->list); 15497 mempool_free(mbox, eq->phba->mbox_mem_pool); 15498 return status; 15499 } 15500 15501 /** 15502 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 15503 * @cq: The queue structure associated with the queue to destroy. 15504 * 15505 * This function destroys a queue, as detailed in @cq by sending an mailbox 15506 * command, specific to the type of queue, to the HBA. 15507 * 15508 * The @cq struct is used to get the queue ID of the queue to destroy. 15509 * 15510 * On success this function will return a zero. If the queue destroy mailbox 15511 * command fails this function will return -ENXIO. 15512 **/ 15513 int 15514 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 15515 { 15516 LPFC_MBOXQ_t *mbox; 15517 int rc, length, status = 0; 15518 uint32_t shdr_status, shdr_add_status; 15519 union lpfc_sli4_cfg_shdr *shdr; 15520 15521 /* sanity check on queue memory */ 15522 if (!cq) 15523 return -ENODEV; 15524 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 15525 if (!mbox) 15526 return -ENOMEM; 15527 length = (sizeof(struct lpfc_mbx_cq_destroy) - 15528 sizeof(struct lpfc_sli4_cfg_mhdr)); 15529 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15530 LPFC_MBOX_OPCODE_CQ_DESTROY, 15531 length, LPFC_SLI4_MBX_EMBED); 15532 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 15533 cq->queue_id); 15534 mbox->vport = cq->phba->pport; 15535 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15536 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 15537 /* The IOCTL status is embedded in the mailbox subheader. */ 15538 shdr = (union lpfc_sli4_cfg_shdr *) 15539 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 15540 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15541 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15542 if (shdr_status || shdr_add_status || rc) { 15543 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15544 "2506 CQ_DESTROY mailbox failed with " 15545 "status x%x add_status x%x, mbx status x%x\n", 15546 shdr_status, shdr_add_status, rc); 15547 status = -ENXIO; 15548 } 15549 /* Remove cq from any list */ 15550 list_del_init(&cq->list); 15551 mempool_free(mbox, cq->phba->mbox_mem_pool); 15552 return status; 15553 } 15554 15555 /** 15556 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 15557 * @qm: The queue structure associated with the queue to destroy. 15558 * 15559 * This function destroys a queue, as detailed in @mq by sending an mailbox 15560 * command, specific to the type of queue, to the HBA. 15561 * 15562 * The @mq struct is used to get the queue ID of the queue to destroy. 15563 * 15564 * On success this function will return a zero. If the queue destroy mailbox 15565 * command fails this function will return -ENXIO. 15566 **/ 15567 int 15568 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 15569 { 15570 LPFC_MBOXQ_t *mbox; 15571 int rc, length, status = 0; 15572 uint32_t shdr_status, shdr_add_status; 15573 union lpfc_sli4_cfg_shdr *shdr; 15574 15575 /* sanity check on queue memory */ 15576 if (!mq) 15577 return -ENODEV; 15578 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 15579 if (!mbox) 15580 return -ENOMEM; 15581 length = (sizeof(struct lpfc_mbx_mq_destroy) - 15582 sizeof(struct lpfc_sli4_cfg_mhdr)); 15583 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15584 LPFC_MBOX_OPCODE_MQ_DESTROY, 15585 length, LPFC_SLI4_MBX_EMBED); 15586 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 15587 mq->queue_id); 15588 mbox->vport = mq->phba->pport; 15589 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15590 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 15591 /* The IOCTL status is embedded in the mailbox subheader. */ 15592 shdr = (union lpfc_sli4_cfg_shdr *) 15593 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 15594 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15595 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15596 if (shdr_status || shdr_add_status || rc) { 15597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15598 "2507 MQ_DESTROY mailbox failed with " 15599 "status x%x add_status x%x, mbx status x%x\n", 15600 shdr_status, shdr_add_status, rc); 15601 status = -ENXIO; 15602 } 15603 /* Remove mq from any list */ 15604 list_del_init(&mq->list); 15605 mempool_free(mbox, mq->phba->mbox_mem_pool); 15606 return status; 15607 } 15608 15609 /** 15610 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 15611 * @wq: The queue structure associated with the queue to destroy. 15612 * 15613 * This function destroys a queue, as detailed in @wq by sending an mailbox 15614 * command, specific to the type of queue, to the HBA. 15615 * 15616 * The @wq struct is used to get the queue ID of the queue to destroy. 15617 * 15618 * On success this function will return a zero. If the queue destroy mailbox 15619 * command fails this function will return -ENXIO. 15620 **/ 15621 int 15622 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 15623 { 15624 LPFC_MBOXQ_t *mbox; 15625 int rc, length, status = 0; 15626 uint32_t shdr_status, shdr_add_status; 15627 union lpfc_sli4_cfg_shdr *shdr; 15628 15629 /* sanity check on queue memory */ 15630 if (!wq) 15631 return -ENODEV; 15632 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 15633 if (!mbox) 15634 return -ENOMEM; 15635 length = (sizeof(struct lpfc_mbx_wq_destroy) - 15636 sizeof(struct lpfc_sli4_cfg_mhdr)); 15637 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15638 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 15639 length, LPFC_SLI4_MBX_EMBED); 15640 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 15641 wq->queue_id); 15642 mbox->vport = wq->phba->pport; 15643 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15644 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 15645 shdr = (union lpfc_sli4_cfg_shdr *) 15646 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 15647 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15648 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15649 if (shdr_status || shdr_add_status || rc) { 15650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15651 "2508 WQ_DESTROY mailbox failed with " 15652 "status x%x add_status x%x, mbx status x%x\n", 15653 shdr_status, shdr_add_status, rc); 15654 status = -ENXIO; 15655 } 15656 /* Remove wq from any list */ 15657 list_del_init(&wq->list); 15658 kfree(wq->pring); 15659 wq->pring = NULL; 15660 mempool_free(mbox, wq->phba->mbox_mem_pool); 15661 return status; 15662 } 15663 15664 /** 15665 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 15666 * @rq: The queue structure associated with the queue to destroy. 15667 * 15668 * This function destroys a queue, as detailed in @rq by sending an mailbox 15669 * command, specific to the type of queue, to the HBA. 15670 * 15671 * The @rq struct is used to get the queue ID of the queue to destroy. 15672 * 15673 * On success this function will return a zero. If the queue destroy mailbox 15674 * command fails this function will return -ENXIO. 15675 **/ 15676 int 15677 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 15678 struct lpfc_queue *drq) 15679 { 15680 LPFC_MBOXQ_t *mbox; 15681 int rc, length, status = 0; 15682 uint32_t shdr_status, shdr_add_status; 15683 union lpfc_sli4_cfg_shdr *shdr; 15684 15685 /* sanity check on queue memory */ 15686 if (!hrq || !drq) 15687 return -ENODEV; 15688 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 15689 if (!mbox) 15690 return -ENOMEM; 15691 length = (sizeof(struct lpfc_mbx_rq_destroy) - 15692 sizeof(struct lpfc_sli4_cfg_mhdr)); 15693 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15694 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 15695 length, LPFC_SLI4_MBX_EMBED); 15696 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 15697 hrq->queue_id); 15698 mbox->vport = hrq->phba->pport; 15699 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15700 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 15701 /* The IOCTL status is embedded in the mailbox subheader. */ 15702 shdr = (union lpfc_sli4_cfg_shdr *) 15703 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 15704 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15705 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15706 if (shdr_status || shdr_add_status || rc) { 15707 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15708 "2509 RQ_DESTROY mailbox failed with " 15709 "status x%x add_status x%x, mbx status x%x\n", 15710 shdr_status, shdr_add_status, rc); 15711 if (rc != MBX_TIMEOUT) 15712 mempool_free(mbox, hrq->phba->mbox_mem_pool); 15713 return -ENXIO; 15714 } 15715 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 15716 drq->queue_id); 15717 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 15718 shdr = (union lpfc_sli4_cfg_shdr *) 15719 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 15720 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15721 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15722 if (shdr_status || shdr_add_status || rc) { 15723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15724 "2510 RQ_DESTROY mailbox failed with " 15725 "status x%x add_status x%x, mbx status x%x\n", 15726 shdr_status, shdr_add_status, rc); 15727 status = -ENXIO; 15728 } 15729 list_del_init(&hrq->list); 15730 list_del_init(&drq->list); 15731 mempool_free(mbox, hrq->phba->mbox_mem_pool); 15732 return status; 15733 } 15734 15735 /** 15736 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 15737 * @phba: The virtual port for which this call being executed. 15738 * @pdma_phys_addr0: Physical address of the 1st SGL page. 15739 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 15740 * @xritag: the xritag that ties this io to the SGL pages. 15741 * 15742 * This routine will post the sgl pages for the IO that has the xritag 15743 * that is in the iocbq structure. The xritag is assigned during iocbq 15744 * creation and persists for as long as the driver is loaded. 15745 * if the caller has fewer than 256 scatter gather segments to map then 15746 * pdma_phys_addr1 should be 0. 15747 * If the caller needs to map more than 256 scatter gather segment then 15748 * pdma_phys_addr1 should be a valid physical address. 15749 * physical address for SGLs must be 64 byte aligned. 15750 * If you are going to map 2 SGL's then the first one must have 256 entries 15751 * the second sgl can have between 1 and 256 entries. 15752 * 15753 * Return codes: 15754 * 0 - Success 15755 * -ENXIO, -ENOMEM - Failure 15756 **/ 15757 int 15758 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 15759 dma_addr_t pdma_phys_addr0, 15760 dma_addr_t pdma_phys_addr1, 15761 uint16_t xritag) 15762 { 15763 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 15764 LPFC_MBOXQ_t *mbox; 15765 int rc; 15766 uint32_t shdr_status, shdr_add_status; 15767 uint32_t mbox_tmo; 15768 union lpfc_sli4_cfg_shdr *shdr; 15769 15770 if (xritag == NO_XRI) { 15771 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15772 "0364 Invalid param:\n"); 15773 return -EINVAL; 15774 } 15775 15776 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15777 if (!mbox) 15778 return -ENOMEM; 15779 15780 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15781 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 15782 sizeof(struct lpfc_mbx_post_sgl_pages) - 15783 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 15784 15785 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 15786 &mbox->u.mqe.un.post_sgl_pages; 15787 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 15788 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 15789 15790 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 15791 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 15792 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 15793 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 15794 15795 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 15796 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 15797 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 15798 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 15799 if (!phba->sli4_hba.intr_enable) 15800 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15801 else { 15802 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 15803 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 15804 } 15805 /* The IOCTL status is embedded in the mailbox subheader. */ 15806 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 15807 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15808 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15809 if (rc != MBX_TIMEOUT) 15810 mempool_free(mbox, phba->mbox_mem_pool); 15811 if (shdr_status || shdr_add_status || rc) { 15812 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15813 "2511 POST_SGL mailbox failed with " 15814 "status x%x add_status x%x, mbx status x%x\n", 15815 shdr_status, shdr_add_status, rc); 15816 } 15817 return 0; 15818 } 15819 15820 /** 15821 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 15822 * @phba: pointer to lpfc hba data structure. 15823 * 15824 * This routine is invoked to post rpi header templates to the 15825 * HBA consistent with the SLI-4 interface spec. This routine 15826 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 15827 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 15828 * 15829 * Returns 15830 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 15831 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 15832 **/ 15833 static uint16_t 15834 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 15835 { 15836 unsigned long xri; 15837 15838 /* 15839 * Fetch the next logical xri. Because this index is logical, 15840 * the driver starts at 0 each time. 15841 */ 15842 spin_lock_irq(&phba->hbalock); 15843 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 15844 phba->sli4_hba.max_cfg_param.max_xri, 0); 15845 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 15846 spin_unlock_irq(&phba->hbalock); 15847 return NO_XRI; 15848 } else { 15849 set_bit(xri, phba->sli4_hba.xri_bmask); 15850 phba->sli4_hba.max_cfg_param.xri_used++; 15851 } 15852 spin_unlock_irq(&phba->hbalock); 15853 return xri; 15854 } 15855 15856 /** 15857 * lpfc_sli4_free_xri - Release an xri for reuse. 15858 * @phba: pointer to lpfc hba data structure. 15859 * 15860 * This routine is invoked to release an xri to the pool of 15861 * available rpis maintained by the driver. 15862 **/ 15863 static void 15864 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 15865 { 15866 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 15867 phba->sli4_hba.max_cfg_param.xri_used--; 15868 } 15869 } 15870 15871 /** 15872 * lpfc_sli4_free_xri - Release an xri for reuse. 15873 * @phba: pointer to lpfc hba data structure. 15874 * 15875 * This routine is invoked to release an xri to the pool of 15876 * available rpis maintained by the driver. 15877 **/ 15878 void 15879 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 15880 { 15881 spin_lock_irq(&phba->hbalock); 15882 __lpfc_sli4_free_xri(phba, xri); 15883 spin_unlock_irq(&phba->hbalock); 15884 } 15885 15886 /** 15887 * lpfc_sli4_next_xritag - Get an xritag for the io 15888 * @phba: Pointer to HBA context object. 15889 * 15890 * This function gets an xritag for the iocb. If there is no unused xritag 15891 * it will return 0xffff. 15892 * The function returns the allocated xritag if successful, else returns zero. 15893 * Zero is not a valid xritag. 15894 * The caller is not required to hold any lock. 15895 **/ 15896 uint16_t 15897 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 15898 { 15899 uint16_t xri_index; 15900 15901 xri_index = lpfc_sli4_alloc_xri(phba); 15902 if (xri_index == NO_XRI) 15903 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 15904 "2004 Failed to allocate XRI.last XRITAG is %d" 15905 " Max XRI is %d, Used XRI is %d\n", 15906 xri_index, 15907 phba->sli4_hba.max_cfg_param.max_xri, 15908 phba->sli4_hba.max_cfg_param.xri_used); 15909 return xri_index; 15910 } 15911 15912 /** 15913 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. 15914 * @phba: pointer to lpfc hba data structure. 15915 * @post_sgl_list: pointer to els sgl entry list. 15916 * @count: number of els sgl entries on the list. 15917 * 15918 * This routine is invoked to post a block of driver's sgl pages to the 15919 * HBA using non-embedded mailbox command. No Lock is held. This routine 15920 * is only called when the driver is loading and after all IO has been 15921 * stopped. 15922 **/ 15923 static int 15924 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, 15925 struct list_head *post_sgl_list, 15926 int post_cnt) 15927 { 15928 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 15929 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 15930 struct sgl_page_pairs *sgl_pg_pairs; 15931 void *viraddr; 15932 LPFC_MBOXQ_t *mbox; 15933 uint32_t reqlen, alloclen, pg_pairs; 15934 uint32_t mbox_tmo; 15935 uint16_t xritag_start = 0; 15936 int rc = 0; 15937 uint32_t shdr_status, shdr_add_status; 15938 union lpfc_sli4_cfg_shdr *shdr; 15939 15940 reqlen = post_cnt * sizeof(struct sgl_page_pairs) + 15941 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 15942 if (reqlen > SLI4_PAGE_SIZE) { 15943 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15944 "2559 Block sgl registration required DMA " 15945 "size (%d) great than a page\n", reqlen); 15946 return -ENOMEM; 15947 } 15948 15949 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15950 if (!mbox) 15951 return -ENOMEM; 15952 15953 /* Allocate DMA memory and set up the non-embedded mailbox command */ 15954 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15955 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 15956 LPFC_SLI4_MBX_NEMBED); 15957 15958 if (alloclen < reqlen) { 15959 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15960 "0285 Allocated DMA memory size (%d) is " 15961 "less than the requested DMA memory " 15962 "size (%d)\n", alloclen, reqlen); 15963 lpfc_sli4_mbox_cmd_free(phba, mbox); 15964 return -ENOMEM; 15965 } 15966 /* Set up the SGL pages in the non-embedded DMA pages */ 15967 viraddr = mbox->sge_array->addr[0]; 15968 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 15969 sgl_pg_pairs = &sgl->sgl_pg_pairs; 15970 15971 pg_pairs = 0; 15972 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 15973 /* Set up the sge entry */ 15974 sgl_pg_pairs->sgl_pg0_addr_lo = 15975 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 15976 sgl_pg_pairs->sgl_pg0_addr_hi = 15977 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 15978 sgl_pg_pairs->sgl_pg1_addr_lo = 15979 cpu_to_le32(putPaddrLow(0)); 15980 sgl_pg_pairs->sgl_pg1_addr_hi = 15981 cpu_to_le32(putPaddrHigh(0)); 15982 15983 /* Keep the first xritag on the list */ 15984 if (pg_pairs == 0) 15985 xritag_start = sglq_entry->sli4_xritag; 15986 sgl_pg_pairs++; 15987 pg_pairs++; 15988 } 15989 15990 /* Complete initialization and perform endian conversion. */ 15991 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 15992 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); 15993 sgl->word0 = cpu_to_le32(sgl->word0); 15994 15995 if (!phba->sli4_hba.intr_enable) 15996 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15997 else { 15998 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 15999 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16000 } 16001 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16002 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16003 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16004 if (rc != MBX_TIMEOUT) 16005 lpfc_sli4_mbox_cmd_free(phba, mbox); 16006 if (shdr_status || shdr_add_status || rc) { 16007 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16008 "2513 POST_SGL_BLOCK mailbox command failed " 16009 "status x%x add_status x%x mbx status x%x\n", 16010 shdr_status, shdr_add_status, rc); 16011 rc = -ENXIO; 16012 } 16013 return rc; 16014 } 16015 16016 /** 16017 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 16018 * @phba: pointer to lpfc hba data structure. 16019 * @sblist: pointer to scsi buffer list. 16020 * @count: number of scsi buffers on the list. 16021 * 16022 * This routine is invoked to post a block of @count scsi sgl pages from a 16023 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 16024 * No Lock is held. 16025 * 16026 **/ 16027 int 16028 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, 16029 struct list_head *sblist, 16030 int count) 16031 { 16032 struct lpfc_scsi_buf *psb; 16033 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16034 struct sgl_page_pairs *sgl_pg_pairs; 16035 void *viraddr; 16036 LPFC_MBOXQ_t *mbox; 16037 uint32_t reqlen, alloclen, pg_pairs; 16038 uint32_t mbox_tmo; 16039 uint16_t xritag_start = 0; 16040 int rc = 0; 16041 uint32_t shdr_status, shdr_add_status; 16042 dma_addr_t pdma_phys_bpl1; 16043 union lpfc_sli4_cfg_shdr *shdr; 16044 16045 /* Calculate the requested length of the dma memory */ 16046 reqlen = count * sizeof(struct sgl_page_pairs) + 16047 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16048 if (reqlen > SLI4_PAGE_SIZE) { 16049 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 16050 "0217 Block sgl registration required DMA " 16051 "size (%d) great than a page\n", reqlen); 16052 return -ENOMEM; 16053 } 16054 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16055 if (!mbox) { 16056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16057 "0283 Failed to allocate mbox cmd memory\n"); 16058 return -ENOMEM; 16059 } 16060 16061 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16062 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16063 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 16064 LPFC_SLI4_MBX_NEMBED); 16065 16066 if (alloclen < reqlen) { 16067 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16068 "2561 Allocated DMA memory size (%d) is " 16069 "less than the requested DMA memory " 16070 "size (%d)\n", alloclen, reqlen); 16071 lpfc_sli4_mbox_cmd_free(phba, mbox); 16072 return -ENOMEM; 16073 } 16074 16075 /* Get the first SGE entry from the non-embedded DMA memory */ 16076 viraddr = mbox->sge_array->addr[0]; 16077 16078 /* Set up the SGL pages in the non-embedded DMA pages */ 16079 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16080 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16081 16082 pg_pairs = 0; 16083 list_for_each_entry(psb, sblist, list) { 16084 /* Set up the sge entry */ 16085 sgl_pg_pairs->sgl_pg0_addr_lo = 16086 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 16087 sgl_pg_pairs->sgl_pg0_addr_hi = 16088 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 16089 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 16090 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 16091 else 16092 pdma_phys_bpl1 = 0; 16093 sgl_pg_pairs->sgl_pg1_addr_lo = 16094 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 16095 sgl_pg_pairs->sgl_pg1_addr_hi = 16096 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 16097 /* Keep the first xritag on the list */ 16098 if (pg_pairs == 0) 16099 xritag_start = psb->cur_iocbq.sli4_xritag; 16100 sgl_pg_pairs++; 16101 pg_pairs++; 16102 } 16103 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16104 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 16105 /* Perform endian conversion if necessary */ 16106 sgl->word0 = cpu_to_le32(sgl->word0); 16107 16108 if (!phba->sli4_hba.intr_enable) 16109 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16110 else { 16111 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16112 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16113 } 16114 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16115 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16116 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16117 if (rc != MBX_TIMEOUT) 16118 lpfc_sli4_mbox_cmd_free(phba, mbox); 16119 if (shdr_status || shdr_add_status || rc) { 16120 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16121 "2564 POST_SGL_BLOCK mailbox command failed " 16122 "status x%x add_status x%x mbx status x%x\n", 16123 shdr_status, shdr_add_status, rc); 16124 rc = -ENXIO; 16125 } 16126 return rc; 16127 } 16128 16129 static char *lpfc_rctl_names[] = FC_RCTL_NAMES_INIT; 16130 static char *lpfc_type_names[] = FC_TYPE_NAMES_INIT; 16131 16132 /** 16133 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 16134 * @phba: pointer to lpfc_hba struct that the frame was received on 16135 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16136 * 16137 * This function checks the fields in the @fc_hdr to see if the FC frame is a 16138 * valid type of frame that the LPFC driver will handle. This function will 16139 * return a zero if the frame is a valid frame or a non zero value when the 16140 * frame does not pass the check. 16141 **/ 16142 static int 16143 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 16144 { 16145 /* make rctl_names static to save stack space */ 16146 struct fc_vft_header *fc_vft_hdr; 16147 uint32_t *header = (uint32_t *) fc_hdr; 16148 16149 #define FC_RCTL_MDS_DIAGS 0xF4 16150 16151 switch (fc_hdr->fh_r_ctl) { 16152 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 16153 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 16154 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 16155 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 16156 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 16157 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 16158 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 16159 case FC_RCTL_DD_CMD_STATUS: /* command status */ 16160 case FC_RCTL_ELS_REQ: /* extended link services request */ 16161 case FC_RCTL_ELS_REP: /* extended link services reply */ 16162 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 16163 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 16164 case FC_RCTL_BA_NOP: /* basic link service NOP */ 16165 case FC_RCTL_BA_ABTS: /* basic link service abort */ 16166 case FC_RCTL_BA_RMC: /* remove connection */ 16167 case FC_RCTL_BA_ACC: /* basic accept */ 16168 case FC_RCTL_BA_RJT: /* basic reject */ 16169 case FC_RCTL_BA_PRMT: 16170 case FC_RCTL_ACK_1: /* acknowledge_1 */ 16171 case FC_RCTL_ACK_0: /* acknowledge_0 */ 16172 case FC_RCTL_P_RJT: /* port reject */ 16173 case FC_RCTL_F_RJT: /* fabric reject */ 16174 case FC_RCTL_P_BSY: /* port busy */ 16175 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 16176 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 16177 case FC_RCTL_LCR: /* link credit reset */ 16178 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ 16179 case FC_RCTL_END: /* end */ 16180 break; 16181 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 16182 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16183 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 16184 return lpfc_fc_frame_check(phba, fc_hdr); 16185 default: 16186 goto drop; 16187 } 16188 16189 #define FC_TYPE_VENDOR_UNIQUE 0xFF 16190 16191 switch (fc_hdr->fh_type) { 16192 case FC_TYPE_BLS: 16193 case FC_TYPE_ELS: 16194 case FC_TYPE_FCP: 16195 case FC_TYPE_CT: 16196 case FC_TYPE_NVME: 16197 case FC_TYPE_VENDOR_UNIQUE: 16198 break; 16199 case FC_TYPE_IP: 16200 case FC_TYPE_ILS: 16201 default: 16202 goto drop; 16203 } 16204 16205 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 16206 "2538 Received frame rctl:%s (x%x), type:%s (x%x), " 16207 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 16208 (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS) ? "MDS Diags" : 16209 lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, 16210 (fc_hdr->fh_type == FC_TYPE_VENDOR_UNIQUE) ? 16211 "Vendor Unique" : lpfc_type_names[fc_hdr->fh_type], 16212 fc_hdr->fh_type, be32_to_cpu(header[0]), 16213 be32_to_cpu(header[1]), be32_to_cpu(header[2]), 16214 be32_to_cpu(header[3]), be32_to_cpu(header[4]), 16215 be32_to_cpu(header[5]), be32_to_cpu(header[6])); 16216 return 0; 16217 drop: 16218 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 16219 "2539 Dropped frame rctl:%s type:%s\n", 16220 lpfc_rctl_names[fc_hdr->fh_r_ctl], 16221 lpfc_type_names[fc_hdr->fh_type]); 16222 return 1; 16223 } 16224 16225 /** 16226 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 16227 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16228 * 16229 * This function processes the FC header to retrieve the VFI from the VF 16230 * header, if one exists. This function will return the VFI if one exists 16231 * or 0 if no VSAN Header exists. 16232 **/ 16233 static uint32_t 16234 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 16235 { 16236 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16237 16238 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 16239 return 0; 16240 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 16241 } 16242 16243 /** 16244 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 16245 * @phba: Pointer to the HBA structure to search for the vport on 16246 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16247 * @fcfi: The FC Fabric ID that the frame came from 16248 * 16249 * This function searches the @phba for a vport that matches the content of the 16250 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 16251 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 16252 * returns the matching vport pointer or NULL if unable to match frame to a 16253 * vport. 16254 **/ 16255 static struct lpfc_vport * 16256 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 16257 uint16_t fcfi, uint32_t did) 16258 { 16259 struct lpfc_vport **vports; 16260 struct lpfc_vport *vport = NULL; 16261 int i; 16262 16263 if (did == Fabric_DID) 16264 return phba->pport; 16265 if ((phba->pport->fc_flag & FC_PT2PT) && 16266 !(phba->link_state == LPFC_HBA_READY)) 16267 return phba->pport; 16268 16269 vports = lpfc_create_vport_work_array(phba); 16270 if (vports != NULL) { 16271 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 16272 if (phba->fcf.fcfi == fcfi && 16273 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 16274 vports[i]->fc_myDID == did) { 16275 vport = vports[i]; 16276 break; 16277 } 16278 } 16279 } 16280 lpfc_destroy_vport_work_array(phba, vports); 16281 return vport; 16282 } 16283 16284 /** 16285 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 16286 * @vport: The vport to work on. 16287 * 16288 * This function updates the receive sequence time stamp for this vport. The 16289 * receive sequence time stamp indicates the time that the last frame of the 16290 * the sequence that has been idle for the longest amount of time was received. 16291 * the driver uses this time stamp to indicate if any received sequences have 16292 * timed out. 16293 **/ 16294 static void 16295 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 16296 { 16297 struct lpfc_dmabuf *h_buf; 16298 struct hbq_dmabuf *dmabuf = NULL; 16299 16300 /* get the oldest sequence on the rcv list */ 16301 h_buf = list_get_first(&vport->rcv_buffer_list, 16302 struct lpfc_dmabuf, list); 16303 if (!h_buf) 16304 return; 16305 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16306 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 16307 } 16308 16309 /** 16310 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 16311 * @vport: The vport that the received sequences were sent to. 16312 * 16313 * This function cleans up all outstanding received sequences. This is called 16314 * by the driver when a link event or user action invalidates all the received 16315 * sequences. 16316 **/ 16317 void 16318 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 16319 { 16320 struct lpfc_dmabuf *h_buf, *hnext; 16321 struct lpfc_dmabuf *d_buf, *dnext; 16322 struct hbq_dmabuf *dmabuf = NULL; 16323 16324 /* start with the oldest sequence on the rcv list */ 16325 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 16326 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16327 list_del_init(&dmabuf->hbuf.list); 16328 list_for_each_entry_safe(d_buf, dnext, 16329 &dmabuf->dbuf.list, list) { 16330 list_del_init(&d_buf->list); 16331 lpfc_in_buf_free(vport->phba, d_buf); 16332 } 16333 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 16334 } 16335 } 16336 16337 /** 16338 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 16339 * @vport: The vport that the received sequences were sent to. 16340 * 16341 * This function determines whether any received sequences have timed out by 16342 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 16343 * indicates that there is at least one timed out sequence this routine will 16344 * go through the received sequences one at a time from most inactive to most 16345 * active to determine which ones need to be cleaned up. Once it has determined 16346 * that a sequence needs to be cleaned up it will simply free up the resources 16347 * without sending an abort. 16348 **/ 16349 void 16350 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 16351 { 16352 struct lpfc_dmabuf *h_buf, *hnext; 16353 struct lpfc_dmabuf *d_buf, *dnext; 16354 struct hbq_dmabuf *dmabuf = NULL; 16355 unsigned long timeout; 16356 int abort_count = 0; 16357 16358 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 16359 vport->rcv_buffer_time_stamp); 16360 if (list_empty(&vport->rcv_buffer_list) || 16361 time_before(jiffies, timeout)) 16362 return; 16363 /* start with the oldest sequence on the rcv list */ 16364 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 16365 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16366 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 16367 dmabuf->time_stamp); 16368 if (time_before(jiffies, timeout)) 16369 break; 16370 abort_count++; 16371 list_del_init(&dmabuf->hbuf.list); 16372 list_for_each_entry_safe(d_buf, dnext, 16373 &dmabuf->dbuf.list, list) { 16374 list_del_init(&d_buf->list); 16375 lpfc_in_buf_free(vport->phba, d_buf); 16376 } 16377 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 16378 } 16379 if (abort_count) 16380 lpfc_update_rcv_time_stamp(vport); 16381 } 16382 16383 /** 16384 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 16385 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 16386 * 16387 * This function searches through the existing incomplete sequences that have 16388 * been sent to this @vport. If the frame matches one of the incomplete 16389 * sequences then the dbuf in the @dmabuf is added to the list of frames that 16390 * make up that sequence. If no sequence is found that matches this frame then 16391 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 16392 * This function returns a pointer to the first dmabuf in the sequence list that 16393 * the frame was linked to. 16394 **/ 16395 static struct hbq_dmabuf * 16396 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 16397 { 16398 struct fc_frame_header *new_hdr; 16399 struct fc_frame_header *temp_hdr; 16400 struct lpfc_dmabuf *d_buf; 16401 struct lpfc_dmabuf *h_buf; 16402 struct hbq_dmabuf *seq_dmabuf = NULL; 16403 struct hbq_dmabuf *temp_dmabuf = NULL; 16404 uint8_t found = 0; 16405 16406 INIT_LIST_HEAD(&dmabuf->dbuf.list); 16407 dmabuf->time_stamp = jiffies; 16408 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 16409 16410 /* Use the hdr_buf to find the sequence that this frame belongs to */ 16411 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 16412 temp_hdr = (struct fc_frame_header *)h_buf->virt; 16413 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 16414 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 16415 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 16416 continue; 16417 /* found a pending sequence that matches this frame */ 16418 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16419 break; 16420 } 16421 if (!seq_dmabuf) { 16422 /* 16423 * This indicates first frame received for this sequence. 16424 * Queue the buffer on the vport's rcv_buffer_list. 16425 */ 16426 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 16427 lpfc_update_rcv_time_stamp(vport); 16428 return dmabuf; 16429 } 16430 temp_hdr = seq_dmabuf->hbuf.virt; 16431 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 16432 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 16433 list_del_init(&seq_dmabuf->hbuf.list); 16434 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 16435 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 16436 lpfc_update_rcv_time_stamp(vport); 16437 return dmabuf; 16438 } 16439 /* move this sequence to the tail to indicate a young sequence */ 16440 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 16441 seq_dmabuf->time_stamp = jiffies; 16442 lpfc_update_rcv_time_stamp(vport); 16443 if (list_empty(&seq_dmabuf->dbuf.list)) { 16444 temp_hdr = dmabuf->hbuf.virt; 16445 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 16446 return seq_dmabuf; 16447 } 16448 /* find the correct place in the sequence to insert this frame */ 16449 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); 16450 while (!found) { 16451 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 16452 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 16453 /* 16454 * If the frame's sequence count is greater than the frame on 16455 * the list then insert the frame right after this frame 16456 */ 16457 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 16458 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 16459 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 16460 found = 1; 16461 break; 16462 } 16463 16464 if (&d_buf->list == &seq_dmabuf->dbuf.list) 16465 break; 16466 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); 16467 } 16468 16469 if (found) 16470 return seq_dmabuf; 16471 return NULL; 16472 } 16473 16474 /** 16475 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 16476 * @vport: pointer to a vitural port 16477 * @dmabuf: pointer to a dmabuf that describes the FC sequence 16478 * 16479 * This function tries to abort from the partially assembed sequence, described 16480 * by the information from basic abbort @dmabuf. It checks to see whether such 16481 * partially assembled sequence held by the driver. If so, it shall free up all 16482 * the frames from the partially assembled sequence. 16483 * 16484 * Return 16485 * true -- if there is matching partially assembled sequence present and all 16486 * the frames freed with the sequence; 16487 * false -- if there is no matching partially assembled sequence present so 16488 * nothing got aborted in the lower layer driver 16489 **/ 16490 static bool 16491 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 16492 struct hbq_dmabuf *dmabuf) 16493 { 16494 struct fc_frame_header *new_hdr; 16495 struct fc_frame_header *temp_hdr; 16496 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 16497 struct hbq_dmabuf *seq_dmabuf = NULL; 16498 16499 /* Use the hdr_buf to find the sequence that matches this frame */ 16500 INIT_LIST_HEAD(&dmabuf->dbuf.list); 16501 INIT_LIST_HEAD(&dmabuf->hbuf.list); 16502 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 16503 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 16504 temp_hdr = (struct fc_frame_header *)h_buf->virt; 16505 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 16506 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 16507 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 16508 continue; 16509 /* found a pending sequence that matches this frame */ 16510 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16511 break; 16512 } 16513 16514 /* Free up all the frames from the partially assembled sequence */ 16515 if (seq_dmabuf) { 16516 list_for_each_entry_safe(d_buf, n_buf, 16517 &seq_dmabuf->dbuf.list, list) { 16518 list_del_init(&d_buf->list); 16519 lpfc_in_buf_free(vport->phba, d_buf); 16520 } 16521 return true; 16522 } 16523 return false; 16524 } 16525 16526 /** 16527 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 16528 * @vport: pointer to a vitural port 16529 * @dmabuf: pointer to a dmabuf that describes the FC sequence 16530 * 16531 * This function tries to abort from the assembed sequence from upper level 16532 * protocol, described by the information from basic abbort @dmabuf. It 16533 * checks to see whether such pending context exists at upper level protocol. 16534 * If so, it shall clean up the pending context. 16535 * 16536 * Return 16537 * true -- if there is matching pending context of the sequence cleaned 16538 * at ulp; 16539 * false -- if there is no matching pending context of the sequence present 16540 * at ulp. 16541 **/ 16542 static bool 16543 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 16544 { 16545 struct lpfc_hba *phba = vport->phba; 16546 int handled; 16547 16548 /* Accepting abort at ulp with SLI4 only */ 16549 if (phba->sli_rev < LPFC_SLI_REV4) 16550 return false; 16551 16552 /* Register all caring upper level protocols to attend abort */ 16553 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 16554 if (handled) 16555 return true; 16556 16557 return false; 16558 } 16559 16560 /** 16561 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 16562 * @phba: Pointer to HBA context object. 16563 * @cmd_iocbq: pointer to the command iocbq structure. 16564 * @rsp_iocbq: pointer to the response iocbq structure. 16565 * 16566 * This function handles the sequence abort response iocb command complete 16567 * event. It properly releases the memory allocated to the sequence abort 16568 * accept iocb. 16569 **/ 16570 static void 16571 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 16572 struct lpfc_iocbq *cmd_iocbq, 16573 struct lpfc_iocbq *rsp_iocbq) 16574 { 16575 struct lpfc_nodelist *ndlp; 16576 16577 if (cmd_iocbq) { 16578 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; 16579 lpfc_nlp_put(ndlp); 16580 lpfc_nlp_not_used(ndlp); 16581 lpfc_sli_release_iocbq(phba, cmd_iocbq); 16582 } 16583 16584 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 16585 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 16586 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16587 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 16588 rsp_iocbq->iocb.ulpStatus, 16589 rsp_iocbq->iocb.un.ulpWord[4]); 16590 } 16591 16592 /** 16593 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 16594 * @phba: Pointer to HBA context object. 16595 * @xri: xri id in transaction. 16596 * 16597 * This function validates the xri maps to the known range of XRIs allocated an 16598 * used by the driver. 16599 **/ 16600 uint16_t 16601 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 16602 uint16_t xri) 16603 { 16604 uint16_t i; 16605 16606 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 16607 if (xri == phba->sli4_hba.xri_ids[i]) 16608 return i; 16609 } 16610 return NO_XRI; 16611 } 16612 16613 /** 16614 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 16615 * @phba: Pointer to HBA context object. 16616 * @fc_hdr: pointer to a FC frame header. 16617 * 16618 * This function sends a basic response to a previous unsol sequence abort 16619 * event after aborting the sequence handling. 16620 **/ 16621 void 16622 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 16623 struct fc_frame_header *fc_hdr, bool aborted) 16624 { 16625 struct lpfc_hba *phba = vport->phba; 16626 struct lpfc_iocbq *ctiocb = NULL; 16627 struct lpfc_nodelist *ndlp; 16628 uint16_t oxid, rxid, xri, lxri; 16629 uint32_t sid, fctl; 16630 IOCB_t *icmd; 16631 int rc; 16632 16633 if (!lpfc_is_link_up(phba)) 16634 return; 16635 16636 sid = sli4_sid_from_fc_hdr(fc_hdr); 16637 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 16638 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 16639 16640 ndlp = lpfc_findnode_did(vport, sid); 16641 if (!ndlp) { 16642 ndlp = lpfc_nlp_init(vport, sid); 16643 if (!ndlp) { 16644 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 16645 "1268 Failed to allocate ndlp for " 16646 "oxid:x%x SID:x%x\n", oxid, sid); 16647 return; 16648 } 16649 /* Put ndlp onto pport node list */ 16650 lpfc_enqueue_node(vport, ndlp); 16651 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 16652 /* re-setup ndlp without removing from node list */ 16653 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 16654 if (!ndlp) { 16655 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 16656 "3275 Failed to active ndlp found " 16657 "for oxid:x%x SID:x%x\n", oxid, sid); 16658 return; 16659 } 16660 } 16661 16662 /* Allocate buffer for rsp iocb */ 16663 ctiocb = lpfc_sli_get_iocbq(phba); 16664 if (!ctiocb) 16665 return; 16666 16667 /* Extract the F_CTL field from FC_HDR */ 16668 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 16669 16670 icmd = &ctiocb->iocb; 16671 icmd->un.xseq64.bdl.bdeSize = 0; 16672 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 16673 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 16674 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 16675 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 16676 16677 /* Fill in the rest of iocb fields */ 16678 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 16679 icmd->ulpBdeCount = 0; 16680 icmd->ulpLe = 1; 16681 icmd->ulpClass = CLASS3; 16682 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 16683 ctiocb->context1 = lpfc_nlp_get(ndlp); 16684 16685 ctiocb->iocb_cmpl = NULL; 16686 ctiocb->vport = phba->pport; 16687 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 16688 ctiocb->sli4_lxritag = NO_XRI; 16689 ctiocb->sli4_xritag = NO_XRI; 16690 16691 if (fctl & FC_FC_EX_CTX) 16692 /* Exchange responder sent the abort so we 16693 * own the oxid. 16694 */ 16695 xri = oxid; 16696 else 16697 xri = rxid; 16698 lxri = lpfc_sli4_xri_inrange(phba, xri); 16699 if (lxri != NO_XRI) 16700 lpfc_set_rrq_active(phba, ndlp, lxri, 16701 (xri == oxid) ? rxid : oxid, 0); 16702 /* For BA_ABTS from exchange responder, if the logical xri with 16703 * the oxid maps to the FCP XRI range, the port no longer has 16704 * that exchange context, send a BLS_RJT. Override the IOCB for 16705 * a BA_RJT. 16706 */ 16707 if ((fctl & FC_FC_EX_CTX) && 16708 (lxri > lpfc_sli4_get_iocb_cnt(phba))) { 16709 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 16710 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 16711 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 16712 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 16713 } 16714 16715 /* If BA_ABTS failed to abort a partially assembled receive sequence, 16716 * the driver no longer has that exchange, send a BLS_RJT. Override 16717 * the IOCB for a BA_RJT. 16718 */ 16719 if (aborted == false) { 16720 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 16721 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 16722 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 16723 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 16724 } 16725 16726 if (fctl & FC_FC_EX_CTX) { 16727 /* ABTS sent by responder to CT exchange, construction 16728 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 16729 * field and RX_ID from ABTS for RX_ID field. 16730 */ 16731 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 16732 } else { 16733 /* ABTS sent by initiator to CT exchange, construction 16734 * of BA_ACC will need to allocate a new XRI as for the 16735 * XRI_TAG field. 16736 */ 16737 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 16738 } 16739 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 16740 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 16741 16742 /* Xmit CT abts response on exchange <xid> */ 16743 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 16744 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 16745 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 16746 16747 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 16748 if (rc == IOCB_ERROR) { 16749 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 16750 "2925 Failed to issue CT ABTS RSP x%x on " 16751 "xri x%x, Data x%x\n", 16752 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 16753 phba->link_state); 16754 lpfc_nlp_put(ndlp); 16755 ctiocb->context1 = NULL; 16756 lpfc_sli_release_iocbq(phba, ctiocb); 16757 } 16758 } 16759 16760 /** 16761 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 16762 * @vport: Pointer to the vport on which this sequence was received 16763 * @dmabuf: pointer to a dmabuf that describes the FC sequence 16764 * 16765 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 16766 * receive sequence is only partially assembed by the driver, it shall abort 16767 * the partially assembled frames for the sequence. Otherwise, if the 16768 * unsolicited receive sequence has been completely assembled and passed to 16769 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 16770 * unsolicited sequence has been aborted. After that, it will issue a basic 16771 * accept to accept the abort. 16772 **/ 16773 static void 16774 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 16775 struct hbq_dmabuf *dmabuf) 16776 { 16777 struct lpfc_hba *phba = vport->phba; 16778 struct fc_frame_header fc_hdr; 16779 uint32_t fctl; 16780 bool aborted; 16781 16782 /* Make a copy of fc_hdr before the dmabuf being released */ 16783 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 16784 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 16785 16786 if (fctl & FC_FC_EX_CTX) { 16787 /* ABTS by responder to exchange, no cleanup needed */ 16788 aborted = true; 16789 } else { 16790 /* ABTS by initiator to exchange, need to do cleanup */ 16791 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 16792 if (aborted == false) 16793 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 16794 } 16795 lpfc_in_buf_free(phba, &dmabuf->dbuf); 16796 16797 if (phba->nvmet_support) { 16798 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); 16799 return; 16800 } 16801 16802 /* Respond with BA_ACC or BA_RJT accordingly */ 16803 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 16804 } 16805 16806 /** 16807 * lpfc_seq_complete - Indicates if a sequence is complete 16808 * @dmabuf: pointer to a dmabuf that describes the FC sequence 16809 * 16810 * This function checks the sequence, starting with the frame described by 16811 * @dmabuf, to see if all the frames associated with this sequence are present. 16812 * the frames associated with this sequence are linked to the @dmabuf using the 16813 * dbuf list. This function looks for two major things. 1) That the first frame 16814 * has a sequence count of zero. 2) There is a frame with last frame of sequence 16815 * set. 3) That there are no holes in the sequence count. The function will 16816 * return 1 when the sequence is complete, otherwise it will return 0. 16817 **/ 16818 static int 16819 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 16820 { 16821 struct fc_frame_header *hdr; 16822 struct lpfc_dmabuf *d_buf; 16823 struct hbq_dmabuf *seq_dmabuf; 16824 uint32_t fctl; 16825 int seq_count = 0; 16826 16827 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 16828 /* make sure first fame of sequence has a sequence count of zero */ 16829 if (hdr->fh_seq_cnt != seq_count) 16830 return 0; 16831 fctl = (hdr->fh_f_ctl[0] << 16 | 16832 hdr->fh_f_ctl[1] << 8 | 16833 hdr->fh_f_ctl[2]); 16834 /* If last frame of sequence we can return success. */ 16835 if (fctl & FC_FC_END_SEQ) 16836 return 1; 16837 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 16838 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 16839 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 16840 /* If there is a hole in the sequence count then fail. */ 16841 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 16842 return 0; 16843 fctl = (hdr->fh_f_ctl[0] << 16 | 16844 hdr->fh_f_ctl[1] << 8 | 16845 hdr->fh_f_ctl[2]); 16846 /* If last frame of sequence we can return success. */ 16847 if (fctl & FC_FC_END_SEQ) 16848 return 1; 16849 } 16850 return 0; 16851 } 16852 16853 /** 16854 * lpfc_prep_seq - Prep sequence for ULP processing 16855 * @vport: Pointer to the vport on which this sequence was received 16856 * @dmabuf: pointer to a dmabuf that describes the FC sequence 16857 * 16858 * This function takes a sequence, described by a list of frames, and creates 16859 * a list of iocbq structures to describe the sequence. This iocbq list will be 16860 * used to issue to the generic unsolicited sequence handler. This routine 16861 * returns a pointer to the first iocbq in the list. If the function is unable 16862 * to allocate an iocbq then it throw out the received frames that were not 16863 * able to be described and return a pointer to the first iocbq. If unable to 16864 * allocate any iocbqs (including the first) this function will return NULL. 16865 **/ 16866 static struct lpfc_iocbq * 16867 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 16868 { 16869 struct hbq_dmabuf *hbq_buf; 16870 struct lpfc_dmabuf *d_buf, *n_buf; 16871 struct lpfc_iocbq *first_iocbq, *iocbq; 16872 struct fc_frame_header *fc_hdr; 16873 uint32_t sid; 16874 uint32_t len, tot_len; 16875 struct ulp_bde64 *pbde; 16876 16877 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 16878 /* remove from receive buffer list */ 16879 list_del_init(&seq_dmabuf->hbuf.list); 16880 lpfc_update_rcv_time_stamp(vport); 16881 /* get the Remote Port's SID */ 16882 sid = sli4_sid_from_fc_hdr(fc_hdr); 16883 tot_len = 0; 16884 /* Get an iocbq struct to fill in. */ 16885 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 16886 if (first_iocbq) { 16887 /* Initialize the first IOCB. */ 16888 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 16889 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 16890 first_iocbq->vport = vport; 16891 16892 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 16893 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 16894 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 16895 first_iocbq->iocb.un.rcvels.parmRo = 16896 sli4_did_from_fc_hdr(fc_hdr); 16897 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 16898 } else 16899 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 16900 first_iocbq->iocb.ulpContext = NO_XRI; 16901 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 16902 be16_to_cpu(fc_hdr->fh_ox_id); 16903 /* iocbq is prepped for internal consumption. Physical vpi. */ 16904 first_iocbq->iocb.unsli3.rcvsli3.vpi = 16905 vport->phba->vpi_ids[vport->vpi]; 16906 /* put the first buffer into the first IOCBq */ 16907 tot_len = bf_get(lpfc_rcqe_length, 16908 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 16909 16910 first_iocbq->context2 = &seq_dmabuf->dbuf; 16911 first_iocbq->context3 = NULL; 16912 first_iocbq->iocb.ulpBdeCount = 1; 16913 if (tot_len > LPFC_DATA_BUF_SIZE) 16914 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 16915 LPFC_DATA_BUF_SIZE; 16916 else 16917 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; 16918 16919 first_iocbq->iocb.un.rcvels.remoteID = sid; 16920 16921 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 16922 } 16923 iocbq = first_iocbq; 16924 /* 16925 * Each IOCBq can have two Buffers assigned, so go through the list 16926 * of buffers for this sequence and save two buffers in each IOCBq 16927 */ 16928 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 16929 if (!iocbq) { 16930 lpfc_in_buf_free(vport->phba, d_buf); 16931 continue; 16932 } 16933 if (!iocbq->context3) { 16934 iocbq->context3 = d_buf; 16935 iocbq->iocb.ulpBdeCount++; 16936 /* We need to get the size out of the right CQE */ 16937 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 16938 len = bf_get(lpfc_rcqe_length, 16939 &hbq_buf->cq_event.cqe.rcqe_cmpl); 16940 pbde = (struct ulp_bde64 *) 16941 &iocbq->iocb.unsli3.sli3Words[4]; 16942 if (len > LPFC_DATA_BUF_SIZE) 16943 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 16944 else 16945 pbde->tus.f.bdeSize = len; 16946 16947 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 16948 tot_len += len; 16949 } else { 16950 iocbq = lpfc_sli_get_iocbq(vport->phba); 16951 if (!iocbq) { 16952 if (first_iocbq) { 16953 first_iocbq->iocb.ulpStatus = 16954 IOSTAT_FCP_RSP_ERROR; 16955 first_iocbq->iocb.un.ulpWord[4] = 16956 IOERR_NO_RESOURCES; 16957 } 16958 lpfc_in_buf_free(vport->phba, d_buf); 16959 continue; 16960 } 16961 /* We need to get the size out of the right CQE */ 16962 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 16963 len = bf_get(lpfc_rcqe_length, 16964 &hbq_buf->cq_event.cqe.rcqe_cmpl); 16965 iocbq->context2 = d_buf; 16966 iocbq->context3 = NULL; 16967 iocbq->iocb.ulpBdeCount = 1; 16968 if (len > LPFC_DATA_BUF_SIZE) 16969 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 16970 LPFC_DATA_BUF_SIZE; 16971 else 16972 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; 16973 16974 tot_len += len; 16975 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 16976 16977 iocbq->iocb.un.rcvels.remoteID = sid; 16978 list_add_tail(&iocbq->list, &first_iocbq->list); 16979 } 16980 } 16981 return first_iocbq; 16982 } 16983 16984 static void 16985 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 16986 struct hbq_dmabuf *seq_dmabuf) 16987 { 16988 struct fc_frame_header *fc_hdr; 16989 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 16990 struct lpfc_hba *phba = vport->phba; 16991 16992 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 16993 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 16994 if (!iocbq) { 16995 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16996 "2707 Ring %d handler: Failed to allocate " 16997 "iocb Rctl x%x Type x%x received\n", 16998 LPFC_ELS_RING, 16999 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17000 return; 17001 } 17002 if (!lpfc_complete_unsol_iocb(phba, 17003 phba->sli4_hba.els_wq->pring, 17004 iocbq, fc_hdr->fh_r_ctl, 17005 fc_hdr->fh_type)) 17006 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17007 "2540 Ring %d handler: unexpected Rctl " 17008 "x%x Type x%x received\n", 17009 LPFC_ELS_RING, 17010 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17011 17012 /* Free iocb created in lpfc_prep_seq */ 17013 list_for_each_entry_safe(curr_iocb, next_iocb, 17014 &iocbq->list, list) { 17015 list_del_init(&curr_iocb->list); 17016 lpfc_sli_release_iocbq(phba, curr_iocb); 17017 } 17018 lpfc_sli_release_iocbq(phba, iocbq); 17019 } 17020 17021 static void 17022 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 17023 struct lpfc_iocbq *rspiocb) 17024 { 17025 struct lpfc_dmabuf *pcmd = cmdiocb->context2; 17026 17027 if (pcmd && pcmd->virt) 17028 pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17029 kfree(pcmd); 17030 lpfc_sli_release_iocbq(phba, cmdiocb); 17031 } 17032 17033 static void 17034 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 17035 struct hbq_dmabuf *dmabuf) 17036 { 17037 struct fc_frame_header *fc_hdr; 17038 struct lpfc_hba *phba = vport->phba; 17039 struct lpfc_iocbq *iocbq = NULL; 17040 union lpfc_wqe *wqe; 17041 struct lpfc_dmabuf *pcmd = NULL; 17042 uint32_t frame_len; 17043 int rc; 17044 17045 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17046 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); 17047 17048 /* Send the received frame back */ 17049 iocbq = lpfc_sli_get_iocbq(phba); 17050 if (!iocbq) 17051 goto exit; 17052 17053 /* Allocate buffer for command payload */ 17054 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 17055 if (pcmd) 17056 pcmd->virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 17057 &pcmd->phys); 17058 if (!pcmd || !pcmd->virt) 17059 goto exit; 17060 17061 INIT_LIST_HEAD(&pcmd->list); 17062 17063 /* copyin the payload */ 17064 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); 17065 17066 /* fill in BDE's for command */ 17067 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys); 17068 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys); 17069 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 17070 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len; 17071 17072 iocbq->context2 = pcmd; 17073 iocbq->vport = vport; 17074 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 17075 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 17076 17077 /* 17078 * Setup rest of the iocb as though it were a WQE 17079 * Build the SEND_FRAME WQE 17080 */ 17081 wqe = (union lpfc_wqe *)&iocbq->iocb; 17082 17083 wqe->send_frame.frame_len = frame_len; 17084 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr)); 17085 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1)); 17086 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2)); 17087 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3)); 17088 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4)); 17089 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5)); 17090 17091 iocbq->iocb.ulpCommand = CMD_SEND_FRAME; 17092 iocbq->iocb.ulpLe = 1; 17093 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl; 17094 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); 17095 if (rc == IOCB_ERROR) 17096 goto exit; 17097 17098 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17099 return; 17100 17101 exit: 17102 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 17103 "2023 Unable to process MDS loopback frame\n"); 17104 if (pcmd && pcmd->virt) 17105 pci_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17106 kfree(pcmd); 17107 lpfc_sli_release_iocbq(phba, iocbq); 17108 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17109 } 17110 17111 /** 17112 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 17113 * @phba: Pointer to HBA context object. 17114 * 17115 * This function is called with no lock held. This function processes all 17116 * the received buffers and gives it to upper layers when a received buffer 17117 * indicates that it is the final frame in the sequence. The interrupt 17118 * service routine processes received buffers at interrupt contexts. 17119 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 17120 * appropriate receive function when the final frame in a sequence is received. 17121 **/ 17122 void 17123 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 17124 struct hbq_dmabuf *dmabuf) 17125 { 17126 struct hbq_dmabuf *seq_dmabuf; 17127 struct fc_frame_header *fc_hdr; 17128 struct lpfc_vport *vport; 17129 uint32_t fcfi; 17130 uint32_t did; 17131 17132 /* Process each received buffer */ 17133 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17134 17135 /* check to see if this a valid type of frame */ 17136 if (lpfc_fc_frame_check(phba, fc_hdr)) { 17137 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17138 return; 17139 } 17140 17141 if ((bf_get(lpfc_cqe_code, 17142 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 17143 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 17144 &dmabuf->cq_event.cqe.rcqe_cmpl); 17145 else 17146 fcfi = bf_get(lpfc_rcqe_fcf_id, 17147 &dmabuf->cq_event.cqe.rcqe_cmpl); 17148 17149 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { 17150 vport = phba->pport; 17151 /* Handle MDS Loopback frames */ 17152 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 17153 return; 17154 } 17155 17156 /* d_id this frame is directed to */ 17157 did = sli4_did_from_fc_hdr(fc_hdr); 17158 17159 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); 17160 if (!vport) { 17161 /* throw out the frame */ 17162 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17163 return; 17164 } 17165 17166 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 17167 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 17168 (did != Fabric_DID)) { 17169 /* 17170 * Throw out the frame if we are not pt2pt. 17171 * The pt2pt protocol allows for discovery frames 17172 * to be received without a registered VPI. 17173 */ 17174 if (!(vport->fc_flag & FC_PT2PT) || 17175 (phba->link_state == LPFC_HBA_READY)) { 17176 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17177 return; 17178 } 17179 } 17180 17181 /* Handle the basic abort sequence (BA_ABTS) event */ 17182 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 17183 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 17184 return; 17185 } 17186 17187 /* Link this frame */ 17188 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 17189 if (!seq_dmabuf) { 17190 /* unable to add frame to vport - throw it out */ 17191 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17192 return; 17193 } 17194 /* If not last frame in sequence continue processing frames. */ 17195 if (!lpfc_seq_complete(seq_dmabuf)) 17196 return; 17197 17198 /* Send the complete sequence to the upper layer protocol */ 17199 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 17200 } 17201 17202 /** 17203 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 17204 * @phba: pointer to lpfc hba data structure. 17205 * 17206 * This routine is invoked to post rpi header templates to the 17207 * HBA consistent with the SLI-4 interface spec. This routine 17208 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 17209 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 17210 * 17211 * This routine does not require any locks. It's usage is expected 17212 * to be driver load or reset recovery when the driver is 17213 * sequential. 17214 * 17215 * Return codes 17216 * 0 - successful 17217 * -EIO - The mailbox failed to complete successfully. 17218 * When this error occurs, the driver is not guaranteed 17219 * to have any rpi regions posted to the device and 17220 * must either attempt to repost the regions or take a 17221 * fatal error. 17222 **/ 17223 int 17224 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 17225 { 17226 struct lpfc_rpi_hdr *rpi_page; 17227 uint32_t rc = 0; 17228 uint16_t lrpi = 0; 17229 17230 /* SLI4 ports that support extents do not require RPI headers. */ 17231 if (!phba->sli4_hba.rpi_hdrs_in_use) 17232 goto exit; 17233 if (phba->sli4_hba.extents_in_use) 17234 return -EIO; 17235 17236 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 17237 /* 17238 * Assign the rpi headers a physical rpi only if the driver 17239 * has not initialized those resources. A port reset only 17240 * needs the headers posted. 17241 */ 17242 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 17243 LPFC_RPI_RSRC_RDY) 17244 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 17245 17246 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 17247 if (rc != MBX_SUCCESS) { 17248 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17249 "2008 Error %d posting all rpi " 17250 "headers\n", rc); 17251 rc = -EIO; 17252 break; 17253 } 17254 } 17255 17256 exit: 17257 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 17258 LPFC_RPI_RSRC_RDY); 17259 return rc; 17260 } 17261 17262 /** 17263 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 17264 * @phba: pointer to lpfc hba data structure. 17265 * @rpi_page: pointer to the rpi memory region. 17266 * 17267 * This routine is invoked to post a single rpi header to the 17268 * HBA consistent with the SLI-4 interface spec. This memory region 17269 * maps up to 64 rpi context regions. 17270 * 17271 * Return codes 17272 * 0 - successful 17273 * -ENOMEM - No available memory 17274 * -EIO - The mailbox failed to complete successfully. 17275 **/ 17276 int 17277 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 17278 { 17279 LPFC_MBOXQ_t *mboxq; 17280 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 17281 uint32_t rc = 0; 17282 uint32_t shdr_status, shdr_add_status; 17283 union lpfc_sli4_cfg_shdr *shdr; 17284 17285 /* SLI4 ports that support extents do not require RPI headers. */ 17286 if (!phba->sli4_hba.rpi_hdrs_in_use) 17287 return rc; 17288 if (phba->sli4_hba.extents_in_use) 17289 return -EIO; 17290 17291 /* The port is notified of the header region via a mailbox command. */ 17292 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17293 if (!mboxq) { 17294 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17295 "2001 Unable to allocate memory for issuing " 17296 "SLI_CONFIG_SPECIAL mailbox command\n"); 17297 return -ENOMEM; 17298 } 17299 17300 /* Post all rpi memory regions to the port. */ 17301 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 17302 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 17303 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 17304 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 17305 sizeof(struct lpfc_sli4_cfg_mhdr), 17306 LPFC_SLI4_MBX_EMBED); 17307 17308 17309 /* Post the physical rpi to the port for this rpi header. */ 17310 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 17311 rpi_page->start_rpi); 17312 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 17313 hdr_tmpl, rpi_page->page_count); 17314 17315 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 17316 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 17317 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 17318 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 17319 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17320 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17321 if (rc != MBX_TIMEOUT) 17322 mempool_free(mboxq, phba->mbox_mem_pool); 17323 if (shdr_status || shdr_add_status || rc) { 17324 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17325 "2514 POST_RPI_HDR mailbox failed with " 17326 "status x%x add_status x%x, mbx status x%x\n", 17327 shdr_status, shdr_add_status, rc); 17328 rc = -ENXIO; 17329 } else { 17330 /* 17331 * The next_rpi stores the next logical module-64 rpi value used 17332 * to post physical rpis in subsequent rpi postings. 17333 */ 17334 spin_lock_irq(&phba->hbalock); 17335 phba->sli4_hba.next_rpi = rpi_page->next_rpi; 17336 spin_unlock_irq(&phba->hbalock); 17337 } 17338 return rc; 17339 } 17340 17341 /** 17342 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 17343 * @phba: pointer to lpfc hba data structure. 17344 * 17345 * This routine is invoked to post rpi header templates to the 17346 * HBA consistent with the SLI-4 interface spec. This routine 17347 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 17348 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 17349 * 17350 * Returns 17351 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 17352 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 17353 **/ 17354 int 17355 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 17356 { 17357 unsigned long rpi; 17358 uint16_t max_rpi, rpi_limit; 17359 uint16_t rpi_remaining, lrpi = 0; 17360 struct lpfc_rpi_hdr *rpi_hdr; 17361 unsigned long iflag; 17362 17363 /* 17364 * Fetch the next logical rpi. Because this index is logical, 17365 * the driver starts at 0 each time. 17366 */ 17367 spin_lock_irqsave(&phba->hbalock, iflag); 17368 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 17369 rpi_limit = phba->sli4_hba.next_rpi; 17370 17371 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 17372 if (rpi >= rpi_limit) 17373 rpi = LPFC_RPI_ALLOC_ERROR; 17374 else { 17375 set_bit(rpi, phba->sli4_hba.rpi_bmask); 17376 phba->sli4_hba.max_cfg_param.rpi_used++; 17377 phba->sli4_hba.rpi_count++; 17378 } 17379 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 17380 "0001 rpi:%x max:%x lim:%x\n", 17381 (int) rpi, max_rpi, rpi_limit); 17382 17383 /* 17384 * Don't try to allocate more rpi header regions if the device limit 17385 * has been exhausted. 17386 */ 17387 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 17388 (phba->sli4_hba.rpi_count >= max_rpi)) { 17389 spin_unlock_irqrestore(&phba->hbalock, iflag); 17390 return rpi; 17391 } 17392 17393 /* 17394 * RPI header postings are not required for SLI4 ports capable of 17395 * extents. 17396 */ 17397 if (!phba->sli4_hba.rpi_hdrs_in_use) { 17398 spin_unlock_irqrestore(&phba->hbalock, iflag); 17399 return rpi; 17400 } 17401 17402 /* 17403 * If the driver is running low on rpi resources, allocate another 17404 * page now. Note that the next_rpi value is used because 17405 * it represents how many are actually in use whereas max_rpi notes 17406 * how many are supported max by the device. 17407 */ 17408 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 17409 spin_unlock_irqrestore(&phba->hbalock, iflag); 17410 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 17411 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 17412 if (!rpi_hdr) { 17413 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17414 "2002 Error Could not grow rpi " 17415 "count\n"); 17416 } else { 17417 lrpi = rpi_hdr->start_rpi; 17418 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 17419 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 17420 } 17421 } 17422 17423 return rpi; 17424 } 17425 17426 /** 17427 * lpfc_sli4_free_rpi - Release an rpi for reuse. 17428 * @phba: pointer to lpfc hba data structure. 17429 * 17430 * This routine is invoked to release an rpi to the pool of 17431 * available rpis maintained by the driver. 17432 **/ 17433 static void 17434 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 17435 { 17436 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 17437 phba->sli4_hba.rpi_count--; 17438 phba->sli4_hba.max_cfg_param.rpi_used--; 17439 } 17440 } 17441 17442 /** 17443 * lpfc_sli4_free_rpi - Release an rpi for reuse. 17444 * @phba: pointer to lpfc hba data structure. 17445 * 17446 * This routine is invoked to release an rpi to the pool of 17447 * available rpis maintained by the driver. 17448 **/ 17449 void 17450 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 17451 { 17452 spin_lock_irq(&phba->hbalock); 17453 __lpfc_sli4_free_rpi(phba, rpi); 17454 spin_unlock_irq(&phba->hbalock); 17455 } 17456 17457 /** 17458 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 17459 * @phba: pointer to lpfc hba data structure. 17460 * 17461 * This routine is invoked to remove the memory region that 17462 * provided rpi via a bitmask. 17463 **/ 17464 void 17465 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 17466 { 17467 kfree(phba->sli4_hba.rpi_bmask); 17468 kfree(phba->sli4_hba.rpi_ids); 17469 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 17470 } 17471 17472 /** 17473 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 17474 * @phba: pointer to lpfc hba data structure. 17475 * 17476 * This routine is invoked to remove the memory region that 17477 * provided rpi via a bitmask. 17478 **/ 17479 int 17480 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 17481 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 17482 { 17483 LPFC_MBOXQ_t *mboxq; 17484 struct lpfc_hba *phba = ndlp->phba; 17485 int rc; 17486 17487 /* The port is notified of the header region via a mailbox command. */ 17488 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17489 if (!mboxq) 17490 return -ENOMEM; 17491 17492 /* Post all rpi memory regions to the port. */ 17493 lpfc_resume_rpi(mboxq, ndlp); 17494 if (cmpl) { 17495 mboxq->mbox_cmpl = cmpl; 17496 mboxq->context1 = arg; 17497 mboxq->context2 = ndlp; 17498 } else 17499 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 17500 mboxq->vport = ndlp->vport; 17501 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 17502 if (rc == MBX_NOT_FINISHED) { 17503 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17504 "2010 Resume RPI Mailbox failed " 17505 "status %d, mbxStatus x%x\n", rc, 17506 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 17507 mempool_free(mboxq, phba->mbox_mem_pool); 17508 return -EIO; 17509 } 17510 return 0; 17511 } 17512 17513 /** 17514 * lpfc_sli4_init_vpi - Initialize a vpi with the port 17515 * @vport: Pointer to the vport for which the vpi is being initialized 17516 * 17517 * This routine is invoked to activate a vpi with the port. 17518 * 17519 * Returns: 17520 * 0 success 17521 * -Evalue otherwise 17522 **/ 17523 int 17524 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 17525 { 17526 LPFC_MBOXQ_t *mboxq; 17527 int rc = 0; 17528 int retval = MBX_SUCCESS; 17529 uint32_t mbox_tmo; 17530 struct lpfc_hba *phba = vport->phba; 17531 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17532 if (!mboxq) 17533 return -ENOMEM; 17534 lpfc_init_vpi(phba, mboxq, vport->vpi); 17535 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 17536 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 17537 if (rc != MBX_SUCCESS) { 17538 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 17539 "2022 INIT VPI Mailbox failed " 17540 "status %d, mbxStatus x%x\n", rc, 17541 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 17542 retval = -EIO; 17543 } 17544 if (rc != MBX_TIMEOUT) 17545 mempool_free(mboxq, vport->phba->mbox_mem_pool); 17546 17547 return retval; 17548 } 17549 17550 /** 17551 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 17552 * @phba: pointer to lpfc hba data structure. 17553 * @mboxq: Pointer to mailbox object. 17554 * 17555 * This routine is invoked to manually add a single FCF record. The caller 17556 * must pass a completely initialized FCF_Record. This routine takes 17557 * care of the nonembedded mailbox operations. 17558 **/ 17559 static void 17560 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 17561 { 17562 void *virt_addr; 17563 union lpfc_sli4_cfg_shdr *shdr; 17564 uint32_t shdr_status, shdr_add_status; 17565 17566 virt_addr = mboxq->sge_array->addr[0]; 17567 /* The IOCTL status is embedded in the mailbox subheader. */ 17568 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 17569 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17570 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17571 17572 if ((shdr_status || shdr_add_status) && 17573 (shdr_status != STATUS_FCF_IN_USE)) 17574 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17575 "2558 ADD_FCF_RECORD mailbox failed with " 17576 "status x%x add_status x%x\n", 17577 shdr_status, shdr_add_status); 17578 17579 lpfc_sli4_mbox_cmd_free(phba, mboxq); 17580 } 17581 17582 /** 17583 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 17584 * @phba: pointer to lpfc hba data structure. 17585 * @fcf_record: pointer to the initialized fcf record to add. 17586 * 17587 * This routine is invoked to manually add a single FCF record. The caller 17588 * must pass a completely initialized FCF_Record. This routine takes 17589 * care of the nonembedded mailbox operations. 17590 **/ 17591 int 17592 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 17593 { 17594 int rc = 0; 17595 LPFC_MBOXQ_t *mboxq; 17596 uint8_t *bytep; 17597 void *virt_addr; 17598 struct lpfc_mbx_sge sge; 17599 uint32_t alloc_len, req_len; 17600 uint32_t fcfindex; 17601 17602 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17603 if (!mboxq) { 17604 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17605 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 17606 return -ENOMEM; 17607 } 17608 17609 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 17610 sizeof(uint32_t); 17611 17612 /* Allocate DMA memory and set up the non-embedded mailbox command */ 17613 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 17614 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 17615 req_len, LPFC_SLI4_MBX_NEMBED); 17616 if (alloc_len < req_len) { 17617 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17618 "2523 Allocated DMA memory size (x%x) is " 17619 "less than the requested DMA memory " 17620 "size (x%x)\n", alloc_len, req_len); 17621 lpfc_sli4_mbox_cmd_free(phba, mboxq); 17622 return -ENOMEM; 17623 } 17624 17625 /* 17626 * Get the first SGE entry from the non-embedded DMA memory. This 17627 * routine only uses a single SGE. 17628 */ 17629 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 17630 virt_addr = mboxq->sge_array->addr[0]; 17631 /* 17632 * Configure the FCF record for FCFI 0. This is the driver's 17633 * hardcoded default and gets used in nonFIP mode. 17634 */ 17635 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 17636 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 17637 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 17638 17639 /* 17640 * Copy the fcf_index and the FCF Record Data. The data starts after 17641 * the FCoE header plus word10. The data copy needs to be endian 17642 * correct. 17643 */ 17644 bytep += sizeof(uint32_t); 17645 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 17646 mboxq->vport = phba->pport; 17647 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 17648 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 17649 if (rc == MBX_NOT_FINISHED) { 17650 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17651 "2515 ADD_FCF_RECORD mailbox failed with " 17652 "status 0x%x\n", rc); 17653 lpfc_sli4_mbox_cmd_free(phba, mboxq); 17654 rc = -EIO; 17655 } else 17656 rc = 0; 17657 17658 return rc; 17659 } 17660 17661 /** 17662 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 17663 * @phba: pointer to lpfc hba data structure. 17664 * @fcf_record: pointer to the fcf record to write the default data. 17665 * @fcf_index: FCF table entry index. 17666 * 17667 * This routine is invoked to build the driver's default FCF record. The 17668 * values used are hardcoded. This routine handles memory initialization. 17669 * 17670 **/ 17671 void 17672 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 17673 struct fcf_record *fcf_record, 17674 uint16_t fcf_index) 17675 { 17676 memset(fcf_record, 0, sizeof(struct fcf_record)); 17677 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 17678 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 17679 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 17680 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 17681 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 17682 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 17683 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 17684 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 17685 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 17686 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 17687 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 17688 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 17689 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 17690 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 17691 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 17692 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 17693 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 17694 /* Set the VLAN bit map */ 17695 if (phba->valid_vlan) { 17696 fcf_record->vlan_bitmap[phba->vlan_id / 8] 17697 = 1 << (phba->vlan_id % 8); 17698 } 17699 } 17700 17701 /** 17702 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 17703 * @phba: pointer to lpfc hba data structure. 17704 * @fcf_index: FCF table entry offset. 17705 * 17706 * This routine is invoked to scan the entire FCF table by reading FCF 17707 * record and processing it one at a time starting from the @fcf_index 17708 * for initial FCF discovery or fast FCF failover rediscovery. 17709 * 17710 * Return 0 if the mailbox command is submitted successfully, none 0 17711 * otherwise. 17712 **/ 17713 int 17714 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 17715 { 17716 int rc = 0, error; 17717 LPFC_MBOXQ_t *mboxq; 17718 17719 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 17720 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 17721 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17722 if (!mboxq) { 17723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17724 "2000 Failed to allocate mbox for " 17725 "READ_FCF cmd\n"); 17726 error = -ENOMEM; 17727 goto fail_fcf_scan; 17728 } 17729 /* Construct the read FCF record mailbox command */ 17730 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 17731 if (rc) { 17732 error = -EINVAL; 17733 goto fail_fcf_scan; 17734 } 17735 /* Issue the mailbox command asynchronously */ 17736 mboxq->vport = phba->pport; 17737 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 17738 17739 spin_lock_irq(&phba->hbalock); 17740 phba->hba_flag |= FCF_TS_INPROG; 17741 spin_unlock_irq(&phba->hbalock); 17742 17743 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 17744 if (rc == MBX_NOT_FINISHED) 17745 error = -EIO; 17746 else { 17747 /* Reset eligible FCF count for new scan */ 17748 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 17749 phba->fcf.eligible_fcf_cnt = 0; 17750 error = 0; 17751 } 17752 fail_fcf_scan: 17753 if (error) { 17754 if (mboxq) 17755 lpfc_sli4_mbox_cmd_free(phba, mboxq); 17756 /* FCF scan failed, clear FCF_TS_INPROG flag */ 17757 spin_lock_irq(&phba->hbalock); 17758 phba->hba_flag &= ~FCF_TS_INPROG; 17759 spin_unlock_irq(&phba->hbalock); 17760 } 17761 return error; 17762 } 17763 17764 /** 17765 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 17766 * @phba: pointer to lpfc hba data structure. 17767 * @fcf_index: FCF table entry offset. 17768 * 17769 * This routine is invoked to read an FCF record indicated by @fcf_index 17770 * and to use it for FLOGI roundrobin FCF failover. 17771 * 17772 * Return 0 if the mailbox command is submitted successfully, none 0 17773 * otherwise. 17774 **/ 17775 int 17776 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 17777 { 17778 int rc = 0, error; 17779 LPFC_MBOXQ_t *mboxq; 17780 17781 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17782 if (!mboxq) { 17783 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 17784 "2763 Failed to allocate mbox for " 17785 "READ_FCF cmd\n"); 17786 error = -ENOMEM; 17787 goto fail_fcf_read; 17788 } 17789 /* Construct the read FCF record mailbox command */ 17790 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 17791 if (rc) { 17792 error = -EINVAL; 17793 goto fail_fcf_read; 17794 } 17795 /* Issue the mailbox command asynchronously */ 17796 mboxq->vport = phba->pport; 17797 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 17798 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 17799 if (rc == MBX_NOT_FINISHED) 17800 error = -EIO; 17801 else 17802 error = 0; 17803 17804 fail_fcf_read: 17805 if (error && mboxq) 17806 lpfc_sli4_mbox_cmd_free(phba, mboxq); 17807 return error; 17808 } 17809 17810 /** 17811 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 17812 * @phba: pointer to lpfc hba data structure. 17813 * @fcf_index: FCF table entry offset. 17814 * 17815 * This routine is invoked to read an FCF record indicated by @fcf_index to 17816 * determine whether it's eligible for FLOGI roundrobin failover list. 17817 * 17818 * Return 0 if the mailbox command is submitted successfully, none 0 17819 * otherwise. 17820 **/ 17821 int 17822 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 17823 { 17824 int rc = 0, error; 17825 LPFC_MBOXQ_t *mboxq; 17826 17827 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17828 if (!mboxq) { 17829 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 17830 "2758 Failed to allocate mbox for " 17831 "READ_FCF cmd\n"); 17832 error = -ENOMEM; 17833 goto fail_fcf_read; 17834 } 17835 /* Construct the read FCF record mailbox command */ 17836 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 17837 if (rc) { 17838 error = -EINVAL; 17839 goto fail_fcf_read; 17840 } 17841 /* Issue the mailbox command asynchronously */ 17842 mboxq->vport = phba->pport; 17843 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 17844 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 17845 if (rc == MBX_NOT_FINISHED) 17846 error = -EIO; 17847 else 17848 error = 0; 17849 17850 fail_fcf_read: 17851 if (error && mboxq) 17852 lpfc_sli4_mbox_cmd_free(phba, mboxq); 17853 return error; 17854 } 17855 17856 /** 17857 * lpfc_check_next_fcf_pri_level 17858 * phba pointer to the lpfc_hba struct for this port. 17859 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 17860 * routine when the rr_bmask is empty. The FCF indecies are put into the 17861 * rr_bmask based on their priority level. Starting from the highest priority 17862 * to the lowest. The most likely FCF candidate will be in the highest 17863 * priority group. When this routine is called it searches the fcf_pri list for 17864 * next lowest priority group and repopulates the rr_bmask with only those 17865 * fcf_indexes. 17866 * returns: 17867 * 1=success 0=failure 17868 **/ 17869 static int 17870 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 17871 { 17872 uint16_t next_fcf_pri; 17873 uint16_t last_index; 17874 struct lpfc_fcf_pri *fcf_pri; 17875 int rc; 17876 int ret = 0; 17877 17878 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 17879 LPFC_SLI4_FCF_TBL_INDX_MAX); 17880 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 17881 "3060 Last IDX %d\n", last_index); 17882 17883 /* Verify the priority list has 2 or more entries */ 17884 spin_lock_irq(&phba->hbalock); 17885 if (list_empty(&phba->fcf.fcf_pri_list) || 17886 list_is_singular(&phba->fcf.fcf_pri_list)) { 17887 spin_unlock_irq(&phba->hbalock); 17888 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 17889 "3061 Last IDX %d\n", last_index); 17890 return 0; /* Empty rr list */ 17891 } 17892 spin_unlock_irq(&phba->hbalock); 17893 17894 next_fcf_pri = 0; 17895 /* 17896 * Clear the rr_bmask and set all of the bits that are at this 17897 * priority. 17898 */ 17899 memset(phba->fcf.fcf_rr_bmask, 0, 17900 sizeof(*phba->fcf.fcf_rr_bmask)); 17901 spin_lock_irq(&phba->hbalock); 17902 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 17903 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 17904 continue; 17905 /* 17906 * the 1st priority that has not FLOGI failed 17907 * will be the highest. 17908 */ 17909 if (!next_fcf_pri) 17910 next_fcf_pri = fcf_pri->fcf_rec.priority; 17911 spin_unlock_irq(&phba->hbalock); 17912 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 17913 rc = lpfc_sli4_fcf_rr_index_set(phba, 17914 fcf_pri->fcf_rec.fcf_index); 17915 if (rc) 17916 return 0; 17917 } 17918 spin_lock_irq(&phba->hbalock); 17919 } 17920 /* 17921 * if next_fcf_pri was not set above and the list is not empty then 17922 * we have failed flogis on all of them. So reset flogi failed 17923 * and start at the beginning. 17924 */ 17925 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 17926 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 17927 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 17928 /* 17929 * the 1st priority that has not FLOGI failed 17930 * will be the highest. 17931 */ 17932 if (!next_fcf_pri) 17933 next_fcf_pri = fcf_pri->fcf_rec.priority; 17934 spin_unlock_irq(&phba->hbalock); 17935 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 17936 rc = lpfc_sli4_fcf_rr_index_set(phba, 17937 fcf_pri->fcf_rec.fcf_index); 17938 if (rc) 17939 return 0; 17940 } 17941 spin_lock_irq(&phba->hbalock); 17942 } 17943 } else 17944 ret = 1; 17945 spin_unlock_irq(&phba->hbalock); 17946 17947 return ret; 17948 } 17949 /** 17950 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 17951 * @phba: pointer to lpfc hba data structure. 17952 * 17953 * This routine is to get the next eligible FCF record index in a round 17954 * robin fashion. If the next eligible FCF record index equals to the 17955 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 17956 * shall be returned, otherwise, the next eligible FCF record's index 17957 * shall be returned. 17958 **/ 17959 uint16_t 17960 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 17961 { 17962 uint16_t next_fcf_index; 17963 17964 initial_priority: 17965 /* Search start from next bit of currently registered FCF index */ 17966 next_fcf_index = phba->fcf.current_rec.fcf_indx; 17967 17968 next_priority: 17969 /* Determine the next fcf index to check */ 17970 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 17971 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 17972 LPFC_SLI4_FCF_TBL_INDX_MAX, 17973 next_fcf_index); 17974 17975 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 17976 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 17977 /* 17978 * If we have wrapped then we need to clear the bits that 17979 * have been tested so that we can detect when we should 17980 * change the priority level. 17981 */ 17982 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 17983 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 17984 } 17985 17986 17987 /* Check roundrobin failover list empty condition */ 17988 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 17989 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 17990 /* 17991 * If next fcf index is not found check if there are lower 17992 * Priority level fcf's in the fcf_priority list. 17993 * Set up the rr_bmask with all of the avaiable fcf bits 17994 * at that level and continue the selection process. 17995 */ 17996 if (lpfc_check_next_fcf_pri_level(phba)) 17997 goto initial_priority; 17998 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 17999 "2844 No roundrobin failover FCF available\n"); 18000 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 18001 return LPFC_FCOE_FCF_NEXT_NONE; 18002 else { 18003 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 18004 "3063 Only FCF available idx %d, flag %x\n", 18005 next_fcf_index, 18006 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); 18007 return next_fcf_index; 18008 } 18009 } 18010 18011 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 18012 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 18013 LPFC_FCF_FLOGI_FAILED) { 18014 if (list_is_singular(&phba->fcf.fcf_pri_list)) 18015 return LPFC_FCOE_FCF_NEXT_NONE; 18016 18017 goto next_priority; 18018 } 18019 18020 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18021 "2845 Get next roundrobin failover FCF (x%x)\n", 18022 next_fcf_index); 18023 18024 return next_fcf_index; 18025 } 18026 18027 /** 18028 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 18029 * @phba: pointer to lpfc hba data structure. 18030 * 18031 * This routine sets the FCF record index in to the eligible bmask for 18032 * roundrobin failover search. It checks to make sure that the index 18033 * does not go beyond the range of the driver allocated bmask dimension 18034 * before setting the bit. 18035 * 18036 * Returns 0 if the index bit successfully set, otherwise, it returns 18037 * -EINVAL. 18038 **/ 18039 int 18040 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 18041 { 18042 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18043 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18044 "2610 FCF (x%x) reached driver's book " 18045 "keeping dimension:x%x\n", 18046 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18047 return -EINVAL; 18048 } 18049 /* Set the eligible FCF record index bmask */ 18050 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18051 18052 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18053 "2790 Set FCF (x%x) to roundrobin FCF failover " 18054 "bmask\n", fcf_index); 18055 18056 return 0; 18057 } 18058 18059 /** 18060 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 18061 * @phba: pointer to lpfc hba data structure. 18062 * 18063 * This routine clears the FCF record index from the eligible bmask for 18064 * roundrobin failover search. It checks to make sure that the index 18065 * does not go beyond the range of the driver allocated bmask dimension 18066 * before clearing the bit. 18067 **/ 18068 void 18069 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 18070 { 18071 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 18072 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18073 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18074 "2762 FCF (x%x) reached driver's book " 18075 "keeping dimension:x%x\n", 18076 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18077 return; 18078 } 18079 /* Clear the eligible FCF record index bmask */ 18080 spin_lock_irq(&phba->hbalock); 18081 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 18082 list) { 18083 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 18084 list_del_init(&fcf_pri->list); 18085 break; 18086 } 18087 } 18088 spin_unlock_irq(&phba->hbalock); 18089 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18090 18091 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18092 "2791 Clear FCF (x%x) from roundrobin failover " 18093 "bmask\n", fcf_index); 18094 } 18095 18096 /** 18097 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 18098 * @phba: pointer to lpfc hba data structure. 18099 * 18100 * This routine is the completion routine for the rediscover FCF table mailbox 18101 * command. If the mailbox command returned failure, it will try to stop the 18102 * FCF rediscover wait timer. 18103 **/ 18104 static void 18105 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 18106 { 18107 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18108 uint32_t shdr_status, shdr_add_status; 18109 18110 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18111 18112 shdr_status = bf_get(lpfc_mbox_hdr_status, 18113 &redisc_fcf->header.cfg_shdr.response); 18114 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 18115 &redisc_fcf->header.cfg_shdr.response); 18116 if (shdr_status || shdr_add_status) { 18117 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18118 "2746 Requesting for FCF rediscovery failed " 18119 "status x%x add_status x%x\n", 18120 shdr_status, shdr_add_status); 18121 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 18122 spin_lock_irq(&phba->hbalock); 18123 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 18124 spin_unlock_irq(&phba->hbalock); 18125 /* 18126 * CVL event triggered FCF rediscover request failed, 18127 * last resort to re-try current registered FCF entry. 18128 */ 18129 lpfc_retry_pport_discovery(phba); 18130 } else { 18131 spin_lock_irq(&phba->hbalock); 18132 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 18133 spin_unlock_irq(&phba->hbalock); 18134 /* 18135 * DEAD FCF event triggered FCF rediscover request 18136 * failed, last resort to fail over as a link down 18137 * to FCF registration. 18138 */ 18139 lpfc_sli4_fcf_dead_failthrough(phba); 18140 } 18141 } else { 18142 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18143 "2775 Start FCF rediscover quiescent timer\n"); 18144 /* 18145 * Start FCF rediscovery wait timer for pending FCF 18146 * before rescan FCF record table. 18147 */ 18148 lpfc_fcf_redisc_wait_start_timer(phba); 18149 } 18150 18151 mempool_free(mbox, phba->mbox_mem_pool); 18152 } 18153 18154 /** 18155 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 18156 * @phba: pointer to lpfc hba data structure. 18157 * 18158 * This routine is invoked to request for rediscovery of the entire FCF table 18159 * by the port. 18160 **/ 18161 int 18162 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 18163 { 18164 LPFC_MBOXQ_t *mbox; 18165 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18166 int rc, length; 18167 18168 /* Cancel retry delay timers to all vports before FCF rediscover */ 18169 lpfc_cancel_all_vport_retry_delay_timer(phba); 18170 18171 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18172 if (!mbox) { 18173 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18174 "2745 Failed to allocate mbox for " 18175 "requesting FCF rediscover.\n"); 18176 return -ENOMEM; 18177 } 18178 18179 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 18180 sizeof(struct lpfc_sli4_cfg_mhdr)); 18181 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 18182 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 18183 length, LPFC_SLI4_MBX_EMBED); 18184 18185 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18186 /* Set count to 0 for invalidating the entire FCF database */ 18187 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 18188 18189 /* Issue the mailbox command asynchronously */ 18190 mbox->vport = phba->pport; 18191 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 18192 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 18193 18194 if (rc == MBX_NOT_FINISHED) { 18195 mempool_free(mbox, phba->mbox_mem_pool); 18196 return -EIO; 18197 } 18198 return 0; 18199 } 18200 18201 /** 18202 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 18203 * @phba: pointer to lpfc hba data structure. 18204 * 18205 * This function is the failover routine as a last resort to the FCF DEAD 18206 * event when driver failed to perform fast FCF failover. 18207 **/ 18208 void 18209 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 18210 { 18211 uint32_t link_state; 18212 18213 /* 18214 * Last resort as FCF DEAD event failover will treat this as 18215 * a link down, but save the link state because we don't want 18216 * it to be changed to Link Down unless it is already down. 18217 */ 18218 link_state = phba->link_state; 18219 lpfc_linkdown(phba); 18220 phba->link_state = link_state; 18221 18222 /* Unregister FCF if no devices connected to it */ 18223 lpfc_unregister_unused_fcf(phba); 18224 } 18225 18226 /** 18227 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 18228 * @phba: pointer to lpfc hba data structure. 18229 * @rgn23_data: pointer to configure region 23 data. 18230 * 18231 * This function gets SLI3 port configure region 23 data through memory dump 18232 * mailbox command. When it successfully retrieves data, the size of the data 18233 * will be returned, otherwise, 0 will be returned. 18234 **/ 18235 static uint32_t 18236 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 18237 { 18238 LPFC_MBOXQ_t *pmb = NULL; 18239 MAILBOX_t *mb; 18240 uint32_t offset = 0; 18241 int rc; 18242 18243 if (!rgn23_data) 18244 return 0; 18245 18246 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18247 if (!pmb) { 18248 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18249 "2600 failed to allocate mailbox memory\n"); 18250 return 0; 18251 } 18252 mb = &pmb->u.mb; 18253 18254 do { 18255 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 18256 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 18257 18258 if (rc != MBX_SUCCESS) { 18259 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 18260 "2601 failed to read config " 18261 "region 23, rc 0x%x Status 0x%x\n", 18262 rc, mb->mbxStatus); 18263 mb->un.varDmp.word_cnt = 0; 18264 } 18265 /* 18266 * dump mem may return a zero when finished or we got a 18267 * mailbox error, either way we are done. 18268 */ 18269 if (mb->un.varDmp.word_cnt == 0) 18270 break; 18271 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 18272 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 18273 18274 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 18275 rgn23_data + offset, 18276 mb->un.varDmp.word_cnt); 18277 offset += mb->un.varDmp.word_cnt; 18278 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 18279 18280 mempool_free(pmb, phba->mbox_mem_pool); 18281 return offset; 18282 } 18283 18284 /** 18285 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 18286 * @phba: pointer to lpfc hba data structure. 18287 * @rgn23_data: pointer to configure region 23 data. 18288 * 18289 * This function gets SLI4 port configure region 23 data through memory dump 18290 * mailbox command. When it successfully retrieves data, the size of the data 18291 * will be returned, otherwise, 0 will be returned. 18292 **/ 18293 static uint32_t 18294 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 18295 { 18296 LPFC_MBOXQ_t *mboxq = NULL; 18297 struct lpfc_dmabuf *mp = NULL; 18298 struct lpfc_mqe *mqe; 18299 uint32_t data_length = 0; 18300 int rc; 18301 18302 if (!rgn23_data) 18303 return 0; 18304 18305 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18306 if (!mboxq) { 18307 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18308 "3105 failed to allocate mailbox memory\n"); 18309 return 0; 18310 } 18311 18312 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 18313 goto out; 18314 mqe = &mboxq->u.mqe; 18315 mp = (struct lpfc_dmabuf *) mboxq->context1; 18316 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 18317 if (rc) 18318 goto out; 18319 data_length = mqe->un.mb_words[5]; 18320 if (data_length == 0) 18321 goto out; 18322 if (data_length > DMP_RGN23_SIZE) { 18323 data_length = 0; 18324 goto out; 18325 } 18326 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 18327 out: 18328 mempool_free(mboxq, phba->mbox_mem_pool); 18329 if (mp) { 18330 lpfc_mbuf_free(phba, mp->virt, mp->phys); 18331 kfree(mp); 18332 } 18333 return data_length; 18334 } 18335 18336 /** 18337 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 18338 * @phba: pointer to lpfc hba data structure. 18339 * 18340 * This function read region 23 and parse TLV for port status to 18341 * decide if the user disaled the port. If the TLV indicates the 18342 * port is disabled, the hba_flag is set accordingly. 18343 **/ 18344 void 18345 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 18346 { 18347 uint8_t *rgn23_data = NULL; 18348 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 18349 uint32_t offset = 0; 18350 18351 /* Get adapter Region 23 data */ 18352 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 18353 if (!rgn23_data) 18354 goto out; 18355 18356 if (phba->sli_rev < LPFC_SLI_REV4) 18357 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 18358 else { 18359 if_type = bf_get(lpfc_sli_intf_if_type, 18360 &phba->sli4_hba.sli_intf); 18361 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 18362 goto out; 18363 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 18364 } 18365 18366 if (!data_size) 18367 goto out; 18368 18369 /* Check the region signature first */ 18370 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 18371 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18372 "2619 Config region 23 has bad signature\n"); 18373 goto out; 18374 } 18375 offset += 4; 18376 18377 /* Check the data structure version */ 18378 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 18379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18380 "2620 Config region 23 has bad version\n"); 18381 goto out; 18382 } 18383 offset += 4; 18384 18385 /* Parse TLV entries in the region */ 18386 while (offset < data_size) { 18387 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 18388 break; 18389 /* 18390 * If the TLV is not driver specific TLV or driver id is 18391 * not linux driver id, skip the record. 18392 */ 18393 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 18394 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 18395 (rgn23_data[offset + 3] != 0)) { 18396 offset += rgn23_data[offset + 1] * 4 + 4; 18397 continue; 18398 } 18399 18400 /* Driver found a driver specific TLV in the config region */ 18401 sub_tlv_len = rgn23_data[offset + 1] * 4; 18402 offset += 4; 18403 tlv_offset = 0; 18404 18405 /* 18406 * Search for configured port state sub-TLV. 18407 */ 18408 while ((offset < data_size) && 18409 (tlv_offset < sub_tlv_len)) { 18410 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 18411 offset += 4; 18412 tlv_offset += 4; 18413 break; 18414 } 18415 if (rgn23_data[offset] != PORT_STE_TYPE) { 18416 offset += rgn23_data[offset + 1] * 4 + 4; 18417 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 18418 continue; 18419 } 18420 18421 /* This HBA contains PORT_STE configured */ 18422 if (!rgn23_data[offset + 2]) 18423 phba->hba_flag |= LINK_DISABLED; 18424 18425 goto out; 18426 } 18427 } 18428 18429 out: 18430 kfree(rgn23_data); 18431 return; 18432 } 18433 18434 /** 18435 * lpfc_wr_object - write an object to the firmware 18436 * @phba: HBA structure that indicates port to create a queue on. 18437 * @dmabuf_list: list of dmabufs to write to the port. 18438 * @size: the total byte value of the objects to write to the port. 18439 * @offset: the current offset to be used to start the transfer. 18440 * 18441 * This routine will create a wr_object mailbox command to send to the port. 18442 * the mailbox command will be constructed using the dma buffers described in 18443 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 18444 * BDEs that the imbedded mailbox can support. The @offset variable will be 18445 * used to indicate the starting offset of the transfer and will also return 18446 * the offset after the write object mailbox has completed. @size is used to 18447 * determine the end of the object and whether the eof bit should be set. 18448 * 18449 * Return 0 is successful and offset will contain the the new offset to use 18450 * for the next write. 18451 * Return negative value for error cases. 18452 **/ 18453 int 18454 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 18455 uint32_t size, uint32_t *offset) 18456 { 18457 struct lpfc_mbx_wr_object *wr_object; 18458 LPFC_MBOXQ_t *mbox; 18459 int rc = 0, i = 0; 18460 uint32_t shdr_status, shdr_add_status; 18461 uint32_t mbox_tmo; 18462 union lpfc_sli4_cfg_shdr *shdr; 18463 struct lpfc_dmabuf *dmabuf; 18464 uint32_t written = 0; 18465 18466 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18467 if (!mbox) 18468 return -ENOMEM; 18469 18470 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 18471 LPFC_MBOX_OPCODE_WRITE_OBJECT, 18472 sizeof(struct lpfc_mbx_wr_object) - 18473 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 18474 18475 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 18476 wr_object->u.request.write_offset = *offset; 18477 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 18478 wr_object->u.request.object_name[0] = 18479 cpu_to_le32(wr_object->u.request.object_name[0]); 18480 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 18481 list_for_each_entry(dmabuf, dmabuf_list, list) { 18482 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 18483 break; 18484 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 18485 wr_object->u.request.bde[i].addrHigh = 18486 putPaddrHigh(dmabuf->phys); 18487 if (written + SLI4_PAGE_SIZE >= size) { 18488 wr_object->u.request.bde[i].tus.f.bdeSize = 18489 (size - written); 18490 written += (size - written); 18491 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 18492 } else { 18493 wr_object->u.request.bde[i].tus.f.bdeSize = 18494 SLI4_PAGE_SIZE; 18495 written += SLI4_PAGE_SIZE; 18496 } 18497 i++; 18498 } 18499 wr_object->u.request.bde_count = i; 18500 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 18501 if (!phba->sli4_hba.intr_enable) 18502 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 18503 else { 18504 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 18505 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 18506 } 18507 /* The IOCTL status is embedded in the mailbox subheader. */ 18508 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr; 18509 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18510 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18511 if (rc != MBX_TIMEOUT) 18512 mempool_free(mbox, phba->mbox_mem_pool); 18513 if (shdr_status || shdr_add_status || rc) { 18514 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18515 "3025 Write Object mailbox failed with " 18516 "status x%x add_status x%x, mbx status x%x\n", 18517 shdr_status, shdr_add_status, rc); 18518 rc = -ENXIO; 18519 } else 18520 *offset += wr_object->u.response.actual_write_length; 18521 return rc; 18522 } 18523 18524 /** 18525 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 18526 * @vport: pointer to vport data structure. 18527 * 18528 * This function iterate through the mailboxq and clean up all REG_LOGIN 18529 * and REG_VPI mailbox commands associated with the vport. This function 18530 * is called when driver want to restart discovery of the vport due to 18531 * a Clear Virtual Link event. 18532 **/ 18533 void 18534 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 18535 { 18536 struct lpfc_hba *phba = vport->phba; 18537 LPFC_MBOXQ_t *mb, *nextmb; 18538 struct lpfc_dmabuf *mp; 18539 struct lpfc_nodelist *ndlp; 18540 struct lpfc_nodelist *act_mbx_ndlp = NULL; 18541 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 18542 LIST_HEAD(mbox_cmd_list); 18543 uint8_t restart_loop; 18544 18545 /* Clean up internally queued mailbox commands with the vport */ 18546 spin_lock_irq(&phba->hbalock); 18547 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 18548 if (mb->vport != vport) 18549 continue; 18550 18551 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 18552 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 18553 continue; 18554 18555 list_del(&mb->list); 18556 list_add_tail(&mb->list, &mbox_cmd_list); 18557 } 18558 /* Clean up active mailbox command with the vport */ 18559 mb = phba->sli.mbox_active; 18560 if (mb && (mb->vport == vport)) { 18561 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 18562 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 18563 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18564 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 18565 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 18566 /* Put reference count for delayed processing */ 18567 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 18568 /* Unregister the RPI when mailbox complete */ 18569 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 18570 } 18571 } 18572 /* Cleanup any mailbox completions which are not yet processed */ 18573 do { 18574 restart_loop = 0; 18575 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 18576 /* 18577 * If this mailox is already processed or it is 18578 * for another vport ignore it. 18579 */ 18580 if ((mb->vport != vport) || 18581 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 18582 continue; 18583 18584 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 18585 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 18586 continue; 18587 18588 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18589 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 18590 ndlp = (struct lpfc_nodelist *)mb->context2; 18591 /* Unregister the RPI when mailbox complete */ 18592 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 18593 restart_loop = 1; 18594 spin_unlock_irq(&phba->hbalock); 18595 spin_lock(shost->host_lock); 18596 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 18597 spin_unlock(shost->host_lock); 18598 spin_lock_irq(&phba->hbalock); 18599 break; 18600 } 18601 } 18602 } while (restart_loop); 18603 18604 spin_unlock_irq(&phba->hbalock); 18605 18606 /* Release the cleaned-up mailbox commands */ 18607 while (!list_empty(&mbox_cmd_list)) { 18608 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 18609 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 18610 mp = (struct lpfc_dmabuf *) (mb->context1); 18611 if (mp) { 18612 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 18613 kfree(mp); 18614 } 18615 ndlp = (struct lpfc_nodelist *) mb->context2; 18616 mb->context2 = NULL; 18617 if (ndlp) { 18618 spin_lock(shost->host_lock); 18619 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 18620 spin_unlock(shost->host_lock); 18621 lpfc_nlp_put(ndlp); 18622 } 18623 } 18624 mempool_free(mb, phba->mbox_mem_pool); 18625 } 18626 18627 /* Release the ndlp with the cleaned-up active mailbox command */ 18628 if (act_mbx_ndlp) { 18629 spin_lock(shost->host_lock); 18630 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 18631 spin_unlock(shost->host_lock); 18632 lpfc_nlp_put(act_mbx_ndlp); 18633 } 18634 } 18635 18636 /** 18637 * lpfc_drain_txq - Drain the txq 18638 * @phba: Pointer to HBA context object. 18639 * 18640 * This function attempt to submit IOCBs on the txq 18641 * to the adapter. For SLI4 adapters, the txq contains 18642 * ELS IOCBs that have been deferred because the there 18643 * are no SGLs. This congestion can occur with large 18644 * vport counts during node discovery. 18645 **/ 18646 18647 uint32_t 18648 lpfc_drain_txq(struct lpfc_hba *phba) 18649 { 18650 LIST_HEAD(completions); 18651 struct lpfc_sli_ring *pring; 18652 struct lpfc_iocbq *piocbq = NULL; 18653 unsigned long iflags = 0; 18654 char *fail_msg = NULL; 18655 struct lpfc_sglq *sglq; 18656 union lpfc_wqe128 wqe128; 18657 union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128; 18658 uint32_t txq_cnt = 0; 18659 18660 pring = lpfc_phba_elsring(phba); 18661 18662 spin_lock_irqsave(&pring->ring_lock, iflags); 18663 list_for_each_entry(piocbq, &pring->txq, list) { 18664 txq_cnt++; 18665 } 18666 18667 if (txq_cnt > pring->txq_max) 18668 pring->txq_max = txq_cnt; 18669 18670 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18671 18672 while (!list_empty(&pring->txq)) { 18673 spin_lock_irqsave(&pring->ring_lock, iflags); 18674 18675 piocbq = lpfc_sli_ringtx_get(phba, pring); 18676 if (!piocbq) { 18677 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18679 "2823 txq empty and txq_cnt is %d\n ", 18680 txq_cnt); 18681 break; 18682 } 18683 sglq = __lpfc_sli_get_els_sglq(phba, piocbq); 18684 if (!sglq) { 18685 __lpfc_sli_ringtx_put(phba, pring, piocbq); 18686 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18687 break; 18688 } 18689 txq_cnt--; 18690 18691 /* The xri and iocb resources secured, 18692 * attempt to issue request 18693 */ 18694 piocbq->sli4_lxritag = sglq->sli4_lxritag; 18695 piocbq->sli4_xritag = sglq->sli4_xritag; 18696 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 18697 fail_msg = "to convert bpl to sgl"; 18698 else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe)) 18699 fail_msg = "to convert iocb to wqe"; 18700 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe)) 18701 fail_msg = " - Wq is full"; 18702 else 18703 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 18704 18705 if (fail_msg) { 18706 /* Failed means we can't issue and need to cancel */ 18707 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18708 "2822 IOCB failed %s iotag 0x%x " 18709 "xri 0x%x\n", 18710 fail_msg, 18711 piocbq->iotag, piocbq->sli4_xritag); 18712 list_add_tail(&piocbq->list, &completions); 18713 } 18714 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18715 } 18716 18717 /* Cancel all the IOCBs that cannot be issued */ 18718 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 18719 IOERR_SLI_ABORTED); 18720 18721 return txq_cnt; 18722 } 18723 18724 /** 18725 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. 18726 * @phba: Pointer to HBA context object. 18727 * @pwqe: Pointer to command WQE. 18728 * @sglq: Pointer to the scatter gather queue object. 18729 * 18730 * This routine converts the bpl or bde that is in the WQE 18731 * to a sgl list for the sli4 hardware. The physical address 18732 * of the bpl/bde is converted back to a virtual address. 18733 * If the WQE contains a BPL then the list of BDE's is 18734 * converted to sli4_sge's. If the WQE contains a single 18735 * BDE then it is converted to a single sli_sge. 18736 * The WQE is still in cpu endianness so the contents of 18737 * the bpl can be used without byte swapping. 18738 * 18739 * Returns valid XRI = Success, NO_XRI = Failure. 18740 */ 18741 static uint16_t 18742 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, 18743 struct lpfc_sglq *sglq) 18744 { 18745 uint16_t xritag = NO_XRI; 18746 struct ulp_bde64 *bpl = NULL; 18747 struct ulp_bde64 bde; 18748 struct sli4_sge *sgl = NULL; 18749 struct lpfc_dmabuf *dmabuf; 18750 union lpfc_wqe *wqe; 18751 int numBdes = 0; 18752 int i = 0; 18753 uint32_t offset = 0; /* accumulated offset in the sg request list */ 18754 int inbound = 0; /* number of sg reply entries inbound from firmware */ 18755 uint32_t cmd; 18756 18757 if (!pwqeq || !sglq) 18758 return xritag; 18759 18760 sgl = (struct sli4_sge *)sglq->sgl; 18761 wqe = &pwqeq->wqe; 18762 pwqeq->iocb.ulpIoTag = pwqeq->iotag; 18763 18764 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); 18765 if (cmd == CMD_XMIT_BLS_RSP64_WQE) 18766 return sglq->sli4_xritag; 18767 numBdes = pwqeq->rsvd2; 18768 if (numBdes) { 18769 /* The addrHigh and addrLow fields within the WQE 18770 * have not been byteswapped yet so there is no 18771 * need to swap them back. 18772 */ 18773 if (pwqeq->context3) 18774 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3; 18775 else 18776 return xritag; 18777 18778 bpl = (struct ulp_bde64 *)dmabuf->virt; 18779 if (!bpl) 18780 return xritag; 18781 18782 for (i = 0; i < numBdes; i++) { 18783 /* Should already be byte swapped. */ 18784 sgl->addr_hi = bpl->addrHigh; 18785 sgl->addr_lo = bpl->addrLow; 18786 18787 sgl->word2 = le32_to_cpu(sgl->word2); 18788 if ((i+1) == numBdes) 18789 bf_set(lpfc_sli4_sge_last, sgl, 1); 18790 else 18791 bf_set(lpfc_sli4_sge_last, sgl, 0); 18792 /* swap the size field back to the cpu so we 18793 * can assign it to the sgl. 18794 */ 18795 bde.tus.w = le32_to_cpu(bpl->tus.w); 18796 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 18797 /* The offsets in the sgl need to be accumulated 18798 * separately for the request and reply lists. 18799 * The request is always first, the reply follows. 18800 */ 18801 switch (cmd) { 18802 case CMD_GEN_REQUEST64_WQE: 18803 /* add up the reply sg entries */ 18804 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 18805 inbound++; 18806 /* first inbound? reset the offset */ 18807 if (inbound == 1) 18808 offset = 0; 18809 bf_set(lpfc_sli4_sge_offset, sgl, offset); 18810 bf_set(lpfc_sli4_sge_type, sgl, 18811 LPFC_SGE_TYPE_DATA); 18812 offset += bde.tus.f.bdeSize; 18813 break; 18814 case CMD_FCP_TRSP64_WQE: 18815 bf_set(lpfc_sli4_sge_offset, sgl, 0); 18816 bf_set(lpfc_sli4_sge_type, sgl, 18817 LPFC_SGE_TYPE_DATA); 18818 break; 18819 case CMD_FCP_TSEND64_WQE: 18820 case CMD_FCP_TRECEIVE64_WQE: 18821 bf_set(lpfc_sli4_sge_type, sgl, 18822 bpl->tus.f.bdeFlags); 18823 if (i < 3) 18824 offset = 0; 18825 else 18826 offset += bde.tus.f.bdeSize; 18827 bf_set(lpfc_sli4_sge_offset, sgl, offset); 18828 break; 18829 } 18830 sgl->word2 = cpu_to_le32(sgl->word2); 18831 bpl++; 18832 sgl++; 18833 } 18834 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { 18835 /* The addrHigh and addrLow fields of the BDE have not 18836 * been byteswapped yet so they need to be swapped 18837 * before putting them in the sgl. 18838 */ 18839 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); 18840 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); 18841 sgl->word2 = le32_to_cpu(sgl->word2); 18842 bf_set(lpfc_sli4_sge_last, sgl, 1); 18843 sgl->word2 = cpu_to_le32(sgl->word2); 18844 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); 18845 } 18846 return sglq->sli4_xritag; 18847 } 18848 18849 /** 18850 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) 18851 * @phba: Pointer to HBA context object. 18852 * @ring_number: Base sli ring number 18853 * @pwqe: Pointer to command WQE. 18854 **/ 18855 int 18856 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, 18857 struct lpfc_iocbq *pwqe) 18858 { 18859 union lpfc_wqe *wqe = &pwqe->wqe; 18860 struct lpfc_nvmet_rcv_ctx *ctxp; 18861 struct lpfc_queue *wq; 18862 struct lpfc_sglq *sglq; 18863 struct lpfc_sli_ring *pring; 18864 unsigned long iflags; 18865 18866 /* NVME_LS and NVME_LS ABTS requests. */ 18867 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { 18868 pring = phba->sli4_hba.nvmels_wq->pring; 18869 spin_lock_irqsave(&pring->ring_lock, iflags); 18870 sglq = __lpfc_sli_get_els_sglq(phba, pwqe); 18871 if (!sglq) { 18872 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18873 return WQE_BUSY; 18874 } 18875 pwqe->sli4_lxritag = sglq->sli4_lxritag; 18876 pwqe->sli4_xritag = sglq->sli4_xritag; 18877 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { 18878 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18879 return WQE_ERROR; 18880 } 18881 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 18882 pwqe->sli4_xritag); 18883 if (lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe)) { 18884 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18885 return WQE_ERROR; 18886 } 18887 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 18888 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18889 return 0; 18890 } 18891 18892 /* NVME_FCREQ and NVME_ABTS requests */ 18893 if (pwqe->iocb_flag & LPFC_IO_NVME) { 18894 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 18895 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; 18896 18897 spin_lock_irqsave(&pring->ring_lock, iflags); 18898 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; 18899 bf_set(wqe_cqid, &wqe->generic.wqe_com, 18900 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); 18901 if (lpfc_sli4_wq_put(wq, wqe)) { 18902 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18903 return WQE_ERROR; 18904 } 18905 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 18906 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18907 return 0; 18908 } 18909 18910 /* NVMET requests */ 18911 if (pwqe->iocb_flag & LPFC_IO_NVMET) { 18912 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 18913 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; 18914 18915 spin_lock_irqsave(&pring->ring_lock, iflags); 18916 ctxp = pwqe->context2; 18917 sglq = ctxp->ctxbuf->sglq; 18918 if (pwqe->sli4_xritag == NO_XRI) { 18919 pwqe->sli4_lxritag = sglq->sli4_lxritag; 18920 pwqe->sli4_xritag = sglq->sli4_xritag; 18921 } 18922 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 18923 pwqe->sli4_xritag); 18924 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; 18925 bf_set(wqe_cqid, &wqe->generic.wqe_com, 18926 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); 18927 if (lpfc_sli4_wq_put(wq, wqe)) { 18928 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18929 return WQE_ERROR; 18930 } 18931 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 18932 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18933 return 0; 18934 } 18935 return WQE_ERROR; 18936 } 18937