1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/interrupt.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/lockdep.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 #include <scsi/fc/fc_fs.h> 37 #include <linux/aer.h> 38 #ifdef CONFIG_X86 39 #include <asm/set_memory.h> 40 #endif 41 42 #include <linux/nvme-fc-driver.h> 43 44 #include "lpfc_hw4.h" 45 #include "lpfc_hw.h" 46 #include "lpfc_sli.h" 47 #include "lpfc_sli4.h" 48 #include "lpfc_nl.h" 49 #include "lpfc_disc.h" 50 #include "lpfc.h" 51 #include "lpfc_scsi.h" 52 #include "lpfc_nvme.h" 53 #include "lpfc_nvmet.h" 54 #include "lpfc_crtn.h" 55 #include "lpfc_logmsg.h" 56 #include "lpfc_compat.h" 57 #include "lpfc_debugfs.h" 58 #include "lpfc_vport.h" 59 #include "lpfc_version.h" 60 61 /* There are only four IOCB completion types. */ 62 typedef enum _lpfc_iocb_type { 63 LPFC_UNKNOWN_IOCB, 64 LPFC_UNSOL_IOCB, 65 LPFC_SOL_IOCB, 66 LPFC_ABORT_IOCB 67 } lpfc_iocb_type; 68 69 70 /* Provide function prototypes local to this module. */ 71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 72 uint32_t); 73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 74 uint8_t *, uint32_t *); 75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 76 struct lpfc_iocbq *); 77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 78 struct hbq_dmabuf *); 79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 80 struct hbq_dmabuf *dmabuf); 81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, 82 struct lpfc_queue *cq, struct lpfc_cqe *cqe); 83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 84 int); 85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, 86 struct lpfc_queue *eq, 87 struct lpfc_eqe *eqe); 88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 90 91 static IOCB_t * 92 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 93 { 94 return &iocbq->iocb; 95 } 96 97 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) 98 /** 99 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function 100 * @srcp: Source memory pointer. 101 * @destp: Destination memory pointer. 102 * @cnt: Number of words required to be copied. 103 * Must be a multiple of sizeof(uint64_t) 104 * 105 * This function is used for copying data between driver memory 106 * and the SLI WQ. This function also changes the endianness 107 * of each word if native endianness is different from SLI 108 * endianness. This function can be called with or without 109 * lock. 110 **/ 111 static void 112 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 113 { 114 uint64_t *src = srcp; 115 uint64_t *dest = destp; 116 int i; 117 118 for (i = 0; i < (int)cnt; i += sizeof(uint64_t)) 119 *dest++ = *src++; 120 } 121 #else 122 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) 123 #endif 124 125 /** 126 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 127 * @q: The Work Queue to operate on. 128 * @wqe: The work Queue Entry to put on the Work queue. 129 * 130 * This routine will copy the contents of @wqe to the next available entry on 131 * the @q. This function will then ring the Work Queue Doorbell to signal the 132 * HBA to start processing the Work Queue Entry. This function returns 0 if 133 * successful. If no entries are available on @q then this function will return 134 * -ENOMEM. 135 * The caller is expected to hold the hbalock when calling this routine. 136 **/ 137 static int 138 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) 139 { 140 union lpfc_wqe *temp_wqe; 141 struct lpfc_register doorbell; 142 uint32_t host_index; 143 uint32_t idx; 144 uint32_t i = 0; 145 uint8_t *tmp; 146 u32 if_type; 147 148 /* sanity check on queue memory */ 149 if (unlikely(!q)) 150 return -ENOMEM; 151 temp_wqe = lpfc_sli4_qe(q, q->host_index); 152 153 /* If the host has not yet processed the next entry then we are done */ 154 idx = ((q->host_index + 1) % q->entry_count); 155 if (idx == q->hba_index) { 156 q->WQ_overflow++; 157 return -EBUSY; 158 } 159 q->WQ_posted++; 160 /* set consumption flag every once in a while */ 161 if (!((q->host_index + 1) % q->notify_interval)) 162 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 163 else 164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); 165 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 166 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 167 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 168 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 169 /* write to DPP aperture taking advatage of Combined Writes */ 170 tmp = (uint8_t *)temp_wqe; 171 #ifdef __raw_writeq 172 for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) 173 __raw_writeq(*((uint64_t *)(tmp + i)), 174 q->dpp_regaddr + i); 175 #else 176 for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) 177 __raw_writel(*((uint32_t *)(tmp + i)), 178 q->dpp_regaddr + i); 179 #endif 180 } 181 /* ensure WQE bcopy and DPP flushed before doorbell write */ 182 wmb(); 183 184 /* Update the host index before invoking device */ 185 host_index = q->host_index; 186 187 q->host_index = idx; 188 189 /* Ring Doorbell */ 190 doorbell.word0 = 0; 191 if (q->db_format == LPFC_DB_LIST_FORMAT) { 192 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 193 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); 194 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); 195 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, 196 q->dpp_id); 197 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell, 198 q->queue_id); 199 } else { 200 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 201 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 202 203 /* Leave bits <23:16> clear for if_type 6 dpp */ 204 if_type = bf_get(lpfc_sli_intf_if_type, 205 &q->phba->sli4_hba.sli_intf); 206 if (if_type != LPFC_SLI_INTF_IF_TYPE_6) 207 bf_set(lpfc_wq_db_list_fm_index, &doorbell, 208 host_index); 209 } 210 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 211 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 212 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 213 } else { 214 return -EINVAL; 215 } 216 writel(doorbell.word0, q->db_regaddr); 217 218 return 0; 219 } 220 221 /** 222 * lpfc_sli4_wq_release - Updates internal hba index for WQ 223 * @q: The Work Queue to operate on. 224 * @index: The index to advance the hba index to. 225 * 226 * This routine will update the HBA index of a queue to reflect consumption of 227 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 228 * an entry the host calls this function to update the queue's internal 229 * pointers. This routine returns the number of entries that were consumed by 230 * the HBA. 231 **/ 232 static uint32_t 233 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 234 { 235 uint32_t released = 0; 236 237 /* sanity check on queue memory */ 238 if (unlikely(!q)) 239 return 0; 240 241 if (q->hba_index == index) 242 return 0; 243 do { 244 q->hba_index = ((q->hba_index + 1) % q->entry_count); 245 released++; 246 } while (q->hba_index != index); 247 return released; 248 } 249 250 /** 251 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 252 * @q: The Mailbox Queue to operate on. 253 * @wqe: The Mailbox Queue Entry to put on the Work queue. 254 * 255 * This routine will copy the contents of @mqe to the next available entry on 256 * the @q. This function will then ring the Work Queue Doorbell to signal the 257 * HBA to start processing the Work Queue Entry. This function returns 0 if 258 * successful. If no entries are available on @q then this function will return 259 * -ENOMEM. 260 * The caller is expected to hold the hbalock when calling this routine. 261 **/ 262 static uint32_t 263 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 264 { 265 struct lpfc_mqe *temp_mqe; 266 struct lpfc_register doorbell; 267 268 /* sanity check on queue memory */ 269 if (unlikely(!q)) 270 return -ENOMEM; 271 temp_mqe = lpfc_sli4_qe(q, q->host_index); 272 273 /* If the host has not yet processed the next entry then we are done */ 274 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 275 return -ENOMEM; 276 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 277 /* Save off the mailbox pointer for completion */ 278 q->phba->mbox = (MAILBOX_t *)temp_mqe; 279 280 /* Update the host index before invoking device */ 281 q->host_index = ((q->host_index + 1) % q->entry_count); 282 283 /* Ring Doorbell */ 284 doorbell.word0 = 0; 285 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 286 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 287 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 288 return 0; 289 } 290 291 /** 292 * lpfc_sli4_mq_release - Updates internal hba index for MQ 293 * @q: The Mailbox Queue to operate on. 294 * 295 * This routine will update the HBA index of a queue to reflect consumption of 296 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 297 * an entry the host calls this function to update the queue's internal 298 * pointers. This routine returns the number of entries that were consumed by 299 * the HBA. 300 **/ 301 static uint32_t 302 lpfc_sli4_mq_release(struct lpfc_queue *q) 303 { 304 /* sanity check on queue memory */ 305 if (unlikely(!q)) 306 return 0; 307 308 /* Clear the mailbox pointer for completion */ 309 q->phba->mbox = NULL; 310 q->hba_index = ((q->hba_index + 1) % q->entry_count); 311 return 1; 312 } 313 314 /** 315 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 316 * @q: The Event Queue to get the first valid EQE from 317 * 318 * This routine will get the first valid Event Queue Entry from @q, update 319 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 320 * the Queue (no more work to do), or the Queue is full of EQEs that have been 321 * processed, but not popped back to the HBA then this routine will return NULL. 322 **/ 323 static struct lpfc_eqe * 324 lpfc_sli4_eq_get(struct lpfc_queue *q) 325 { 326 struct lpfc_eqe *eqe; 327 328 /* sanity check on queue memory */ 329 if (unlikely(!q)) 330 return NULL; 331 eqe = lpfc_sli4_qe(q, q->host_index); 332 333 /* If the next EQE is not valid then we are done */ 334 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) 335 return NULL; 336 337 /* 338 * insert barrier for instruction interlock : data from the hardware 339 * must have the valid bit checked before it can be copied and acted 340 * upon. Speculative instructions were allowing a bcopy at the start 341 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately 342 * after our return, to copy data before the valid bit check above 343 * was done. As such, some of the copied data was stale. The barrier 344 * ensures the check is before any data is copied. 345 */ 346 mb(); 347 return eqe; 348 } 349 350 /** 351 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 352 * @q: The Event Queue to disable interrupts 353 * 354 **/ 355 void 356 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 357 { 358 struct lpfc_register doorbell; 359 360 doorbell.word0 = 0; 361 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 362 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 363 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 364 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 365 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 366 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 367 } 368 369 /** 370 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ 371 * @q: The Event Queue to disable interrupts 372 * 373 **/ 374 void 375 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) 376 { 377 struct lpfc_register doorbell; 378 379 doorbell.word0 = 0; 380 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 382 } 383 384 /** 385 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state 386 * @phba: adapter with EQ 387 * @q: The Event Queue that the host has completed processing for. 388 * @count: Number of elements that have been consumed 389 * @arm: Indicates whether the host wants to arms this CQ. 390 * 391 * This routine will notify the HBA, by ringing the doorbell, that count 392 * number of EQEs have been processed. The @arm parameter indicates whether 393 * the queue should be rearmed when ringing the doorbell. 394 **/ 395 void 396 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 397 uint32_t count, bool arm) 398 { 399 struct lpfc_register doorbell; 400 401 /* sanity check on queue memory */ 402 if (unlikely(!q || (count == 0 && !arm))) 403 return; 404 405 /* ring doorbell for number popped */ 406 doorbell.word0 = 0; 407 if (arm) { 408 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 409 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 410 } 411 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 412 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 413 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 414 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 415 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 416 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 417 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 418 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 419 readl(q->phba->sli4_hba.EQDBregaddr); 420 } 421 422 /** 423 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state 424 * @phba: adapter with EQ 425 * @q: The Event Queue that the host has completed processing for. 426 * @count: Number of elements that have been consumed 427 * @arm: Indicates whether the host wants to arms this CQ. 428 * 429 * This routine will notify the HBA, by ringing the doorbell, that count 430 * number of EQEs have been processed. The @arm parameter indicates whether 431 * the queue should be rearmed when ringing the doorbell. 432 **/ 433 void 434 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 435 uint32_t count, bool arm) 436 { 437 struct lpfc_register doorbell; 438 439 /* sanity check on queue memory */ 440 if (unlikely(!q || (count == 0 && !arm))) 441 return; 442 443 /* ring doorbell for number popped */ 444 doorbell.word0 = 0; 445 if (arm) 446 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); 447 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); 448 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 449 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 450 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 451 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 452 readl(q->phba->sli4_hba.EQDBregaddr); 453 } 454 455 static void 456 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 457 struct lpfc_eqe *eqe) 458 { 459 if (!phba->sli4_hba.pc_sli4_params.eqav) 460 bf_set_le32(lpfc_eqe_valid, eqe, 0); 461 462 eq->host_index = ((eq->host_index + 1) % eq->entry_count); 463 464 /* if the index wrapped around, toggle the valid bit */ 465 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index) 466 eq->qe_valid = (eq->qe_valid) ? 0 : 1; 467 } 468 469 static void 470 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 471 { 472 struct lpfc_eqe *eqe; 473 uint32_t count = 0; 474 475 /* walk all the EQ entries and drop on the floor */ 476 eqe = lpfc_sli4_eq_get(eq); 477 while (eqe) { 478 __lpfc_sli4_consume_eqe(phba, eq, eqe); 479 count++; 480 eqe = lpfc_sli4_eq_get(eq); 481 } 482 483 /* Clear and re-arm the EQ */ 484 phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM); 485 } 486 487 static int 488 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq) 489 { 490 struct lpfc_eqe *eqe; 491 int count = 0, consumed = 0; 492 493 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0) 494 goto rearm_and_exit; 495 496 eqe = lpfc_sli4_eq_get(eq); 497 while (eqe) { 498 lpfc_sli4_hba_handle_eqe(phba, eq, eqe); 499 __lpfc_sli4_consume_eqe(phba, eq, eqe); 500 501 consumed++; 502 if (!(++count % eq->max_proc_limit)) 503 break; 504 505 if (!(count % eq->notify_interval)) { 506 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, 507 LPFC_QUEUE_NOARM); 508 consumed = 0; 509 } 510 511 eqe = lpfc_sli4_eq_get(eq); 512 } 513 eq->EQ_processed += count; 514 515 /* Track the max number of EQEs processed in 1 intr */ 516 if (count > eq->EQ_max_eqe) 517 eq->EQ_max_eqe = count; 518 519 eq->queue_claimed = 0; 520 521 rearm_and_exit: 522 /* Always clear and re-arm the EQ */ 523 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM); 524 525 return count; 526 } 527 528 /** 529 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 530 * @q: The Completion Queue to get the first valid CQE from 531 * 532 * This routine will get the first valid Completion Queue Entry from @q, update 533 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 534 * the Queue (no more work to do), or the Queue is full of CQEs that have been 535 * processed, but not popped back to the HBA then this routine will return NULL. 536 **/ 537 static struct lpfc_cqe * 538 lpfc_sli4_cq_get(struct lpfc_queue *q) 539 { 540 struct lpfc_cqe *cqe; 541 542 /* sanity check on queue memory */ 543 if (unlikely(!q)) 544 return NULL; 545 cqe = lpfc_sli4_qe(q, q->host_index); 546 547 /* If the next CQE is not valid then we are done */ 548 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) 549 return NULL; 550 551 /* 552 * insert barrier for instruction interlock : data from the hardware 553 * must have the valid bit checked before it can be copied and acted 554 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative 555 * instructions allowing action on content before valid bit checked, 556 * add barrier here as well. May not be needed as "content" is a 557 * single 32-bit entity here (vs multi word structure for cq's). 558 */ 559 mb(); 560 return cqe; 561 } 562 563 static void 564 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 565 struct lpfc_cqe *cqe) 566 { 567 if (!phba->sli4_hba.pc_sli4_params.cqav) 568 bf_set_le32(lpfc_cqe_valid, cqe, 0); 569 570 cq->host_index = ((cq->host_index + 1) % cq->entry_count); 571 572 /* if the index wrapped around, toggle the valid bit */ 573 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) 574 cq->qe_valid = (cq->qe_valid) ? 0 : 1; 575 } 576 577 /** 578 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state. 579 * @phba: the adapter with the CQ 580 * @q: The Completion Queue that the host has completed processing for. 581 * @count: the number of elements that were consumed 582 * @arm: Indicates whether the host wants to arms this CQ. 583 * 584 * This routine will notify the HBA, by ringing the doorbell, that the 585 * CQEs have been processed. The @arm parameter specifies whether the 586 * queue should be rearmed when ringing the doorbell. 587 **/ 588 void 589 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 590 uint32_t count, bool arm) 591 { 592 struct lpfc_register doorbell; 593 594 /* sanity check on queue memory */ 595 if (unlikely(!q || (count == 0 && !arm))) 596 return; 597 598 /* ring doorbell for number popped */ 599 doorbell.word0 = 0; 600 if (arm) 601 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 602 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 603 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 604 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 605 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 606 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 607 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 608 } 609 610 /** 611 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state. 612 * @phba: the adapter with the CQ 613 * @q: The Completion Queue that the host has completed processing for. 614 * @count: the number of elements that were consumed 615 * @arm: Indicates whether the host wants to arms this CQ. 616 * 617 * This routine will notify the HBA, by ringing the doorbell, that the 618 * CQEs have been processed. The @arm parameter specifies whether the 619 * queue should be rearmed when ringing the doorbell. 620 **/ 621 void 622 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 623 uint32_t count, bool arm) 624 { 625 struct lpfc_register doorbell; 626 627 /* sanity check on queue memory */ 628 if (unlikely(!q || (count == 0 && !arm))) 629 return; 630 631 /* ring doorbell for number popped */ 632 doorbell.word0 = 0; 633 if (arm) 634 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); 635 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count); 636 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); 637 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 638 } 639 640 /** 641 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 642 * @q: The Header Receive Queue to operate on. 643 * @wqe: The Receive Queue Entry to put on the Receive queue. 644 * 645 * This routine will copy the contents of @wqe to the next available entry on 646 * the @q. This function will then ring the Receive Queue Doorbell to signal the 647 * HBA to start processing the Receive Queue Entry. This function returns the 648 * index that the rqe was copied to if successful. If no entries are available 649 * on @q then this function will return -ENOMEM. 650 * The caller is expected to hold the hbalock when calling this routine. 651 **/ 652 int 653 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 654 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 655 { 656 struct lpfc_rqe *temp_hrqe; 657 struct lpfc_rqe *temp_drqe; 658 struct lpfc_register doorbell; 659 int hq_put_index; 660 int dq_put_index; 661 662 /* sanity check on queue memory */ 663 if (unlikely(!hq) || unlikely(!dq)) 664 return -ENOMEM; 665 hq_put_index = hq->host_index; 666 dq_put_index = dq->host_index; 667 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index); 668 temp_drqe = lpfc_sli4_qe(dq, dq_put_index); 669 670 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 671 return -EINVAL; 672 if (hq_put_index != dq_put_index) 673 return -EINVAL; 674 /* If the host has not yet processed the next entry then we are done */ 675 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) 676 return -EBUSY; 677 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 678 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 679 680 /* Update the host index to point to the next slot */ 681 hq->host_index = ((hq_put_index + 1) % hq->entry_count); 682 dq->host_index = ((dq_put_index + 1) % dq->entry_count); 683 hq->RQ_buf_posted++; 684 685 /* Ring The Header Receive Queue Doorbell */ 686 if (!(hq->host_index % hq->notify_interval)) { 687 doorbell.word0 = 0; 688 if (hq->db_format == LPFC_DB_RING_FORMAT) { 689 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 690 hq->notify_interval); 691 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 692 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 693 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 694 hq->notify_interval); 695 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 696 hq->host_index); 697 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 698 } else { 699 return -EINVAL; 700 } 701 writel(doorbell.word0, hq->db_regaddr); 702 } 703 return hq_put_index; 704 } 705 706 /** 707 * lpfc_sli4_rq_release - Updates internal hba index for RQ 708 * @q: The Header Receive Queue to operate on. 709 * 710 * This routine will update the HBA index of a queue to reflect consumption of 711 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 712 * consumed an entry the host calls this function to update the queue's 713 * internal pointers. This routine returns the number of entries that were 714 * consumed by the HBA. 715 **/ 716 static uint32_t 717 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 718 { 719 /* sanity check on queue memory */ 720 if (unlikely(!hq) || unlikely(!dq)) 721 return 0; 722 723 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 724 return 0; 725 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 726 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 727 return 1; 728 } 729 730 /** 731 * lpfc_cmd_iocb - Get next command iocb entry in the ring 732 * @phba: Pointer to HBA context object. 733 * @pring: Pointer to driver SLI ring object. 734 * 735 * This function returns pointer to next command iocb entry 736 * in the command ring. The caller must hold hbalock to prevent 737 * other threads consume the next command iocb. 738 * SLI-2/SLI-3 provide different sized iocbs. 739 **/ 740 static inline IOCB_t * 741 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 742 { 743 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 744 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 745 } 746 747 /** 748 * lpfc_resp_iocb - Get next response iocb entry in the ring 749 * @phba: Pointer to HBA context object. 750 * @pring: Pointer to driver SLI ring object. 751 * 752 * This function returns pointer to next response iocb entry 753 * in the response ring. The caller must hold hbalock to make sure 754 * that no other thread consume the next response iocb. 755 * SLI-2/SLI-3 provide different sized iocbs. 756 **/ 757 static inline IOCB_t * 758 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 759 { 760 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 761 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 762 } 763 764 /** 765 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 766 * @phba: Pointer to HBA context object. 767 * 768 * This function is called with hbalock held. This function 769 * allocates a new driver iocb object from the iocb pool. If the 770 * allocation is successful, it returns pointer to the newly 771 * allocated iocb object else it returns NULL. 772 **/ 773 struct lpfc_iocbq * 774 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 775 { 776 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 777 struct lpfc_iocbq * iocbq = NULL; 778 779 lockdep_assert_held(&phba->hbalock); 780 781 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 782 if (iocbq) 783 phba->iocb_cnt++; 784 if (phba->iocb_cnt > phba->iocb_max) 785 phba->iocb_max = phba->iocb_cnt; 786 return iocbq; 787 } 788 789 /** 790 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 791 * @phba: Pointer to HBA context object. 792 * @xritag: XRI value. 793 * 794 * This function clears the sglq pointer from the array of acive 795 * sglq's. The xritag that is passed in is used to index into the 796 * array. Before the xritag can be used it needs to be adjusted 797 * by subtracting the xribase. 798 * 799 * Returns sglq ponter = success, NULL = Failure. 800 **/ 801 struct lpfc_sglq * 802 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 803 { 804 struct lpfc_sglq *sglq; 805 806 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 807 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 808 return sglq; 809 } 810 811 /** 812 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 813 * @phba: Pointer to HBA context object. 814 * @xritag: XRI value. 815 * 816 * This function returns the sglq pointer from the array of acive 817 * sglq's. The xritag that is passed in is used to index into the 818 * array. Before the xritag can be used it needs to be adjusted 819 * by subtracting the xribase. 820 * 821 * Returns sglq ponter = success, NULL = Failure. 822 **/ 823 struct lpfc_sglq * 824 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 825 { 826 struct lpfc_sglq *sglq; 827 828 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 829 return sglq; 830 } 831 832 /** 833 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 834 * @phba: Pointer to HBA context object. 835 * @xritag: xri used in this exchange. 836 * @rrq: The RRQ to be cleared. 837 * 838 **/ 839 void 840 lpfc_clr_rrq_active(struct lpfc_hba *phba, 841 uint16_t xritag, 842 struct lpfc_node_rrq *rrq) 843 { 844 struct lpfc_nodelist *ndlp = NULL; 845 846 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 847 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 848 849 /* The target DID could have been swapped (cable swap) 850 * we should use the ndlp from the findnode if it is 851 * available. 852 */ 853 if ((!ndlp) && rrq->ndlp) 854 ndlp = rrq->ndlp; 855 856 if (!ndlp) 857 goto out; 858 859 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 860 rrq->send_rrq = 0; 861 rrq->xritag = 0; 862 rrq->rrq_stop_time = 0; 863 } 864 out: 865 mempool_free(rrq, phba->rrq_pool); 866 } 867 868 /** 869 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 870 * @phba: Pointer to HBA context object. 871 * 872 * This function is called with hbalock held. This function 873 * Checks if stop_time (ratov from setting rrq active) has 874 * been reached, if it has and the send_rrq flag is set then 875 * it will call lpfc_send_rrq. If the send_rrq flag is not set 876 * then it will just call the routine to clear the rrq and 877 * free the rrq resource. 878 * The timer is set to the next rrq that is going to expire before 879 * leaving the routine. 880 * 881 **/ 882 void 883 lpfc_handle_rrq_active(struct lpfc_hba *phba) 884 { 885 struct lpfc_node_rrq *rrq; 886 struct lpfc_node_rrq *nextrrq; 887 unsigned long next_time; 888 unsigned long iflags; 889 LIST_HEAD(send_rrq); 890 891 spin_lock_irqsave(&phba->hbalock, iflags); 892 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 893 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 894 list_for_each_entry_safe(rrq, nextrrq, 895 &phba->active_rrq_list, list) { 896 if (time_after(jiffies, rrq->rrq_stop_time)) 897 list_move(&rrq->list, &send_rrq); 898 else if (time_before(rrq->rrq_stop_time, next_time)) 899 next_time = rrq->rrq_stop_time; 900 } 901 spin_unlock_irqrestore(&phba->hbalock, iflags); 902 if ((!list_empty(&phba->active_rrq_list)) && 903 (!(phba->pport->load_flag & FC_UNLOADING))) 904 mod_timer(&phba->rrq_tmr, next_time); 905 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 906 list_del(&rrq->list); 907 if (!rrq->send_rrq) { 908 /* this call will free the rrq */ 909 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 910 } else if (lpfc_send_rrq(phba, rrq)) { 911 /* if we send the rrq then the completion handler 912 * will clear the bit in the xribitmap. 913 */ 914 lpfc_clr_rrq_active(phba, rrq->xritag, 915 rrq); 916 } 917 } 918 } 919 920 /** 921 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 922 * @vport: Pointer to vport context object. 923 * @xri: The xri used in the exchange. 924 * @did: The targets DID for this exchange. 925 * 926 * returns NULL = rrq not found in the phba->active_rrq_list. 927 * rrq = rrq for this xri and target. 928 **/ 929 struct lpfc_node_rrq * 930 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 931 { 932 struct lpfc_hba *phba = vport->phba; 933 struct lpfc_node_rrq *rrq; 934 struct lpfc_node_rrq *nextrrq; 935 unsigned long iflags; 936 937 if (phba->sli_rev != LPFC_SLI_REV4) 938 return NULL; 939 spin_lock_irqsave(&phba->hbalock, iflags); 940 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 941 if (rrq->vport == vport && rrq->xritag == xri && 942 rrq->nlp_DID == did){ 943 list_del(&rrq->list); 944 spin_unlock_irqrestore(&phba->hbalock, iflags); 945 return rrq; 946 } 947 } 948 spin_unlock_irqrestore(&phba->hbalock, iflags); 949 return NULL; 950 } 951 952 /** 953 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 954 * @vport: Pointer to vport context object. 955 * @ndlp: Pointer to the lpfc_node_list structure. 956 * If ndlp is NULL Remove all active RRQs for this vport from the 957 * phba->active_rrq_list and clear the rrq. 958 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 959 **/ 960 void 961 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 962 963 { 964 struct lpfc_hba *phba = vport->phba; 965 struct lpfc_node_rrq *rrq; 966 struct lpfc_node_rrq *nextrrq; 967 unsigned long iflags; 968 LIST_HEAD(rrq_list); 969 970 if (phba->sli_rev != LPFC_SLI_REV4) 971 return; 972 if (!ndlp) { 973 lpfc_sli4_vport_delete_els_xri_aborted(vport); 974 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 975 } 976 spin_lock_irqsave(&phba->hbalock, iflags); 977 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 978 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 979 list_move(&rrq->list, &rrq_list); 980 spin_unlock_irqrestore(&phba->hbalock, iflags); 981 982 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 983 list_del(&rrq->list); 984 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 985 } 986 } 987 988 /** 989 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 990 * @phba: Pointer to HBA context object. 991 * @ndlp: Targets nodelist pointer for this exchange. 992 * @xritag the xri in the bitmap to test. 993 * 994 * This function returns: 995 * 0 = rrq not active for this xri 996 * 1 = rrq is valid for this xri. 997 **/ 998 int 999 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1000 uint16_t xritag) 1001 { 1002 if (!ndlp) 1003 return 0; 1004 if (!ndlp->active_rrqs_xri_bitmap) 1005 return 0; 1006 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1007 return 1; 1008 else 1009 return 0; 1010 } 1011 1012 /** 1013 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 1014 * @phba: Pointer to HBA context object. 1015 * @ndlp: nodelist pointer for this target. 1016 * @xritag: xri used in this exchange. 1017 * @rxid: Remote Exchange ID. 1018 * @send_rrq: Flag used to determine if we should send rrq els cmd. 1019 * 1020 * This function takes the hbalock. 1021 * The active bit is always set in the active rrq xri_bitmap even 1022 * if there is no slot avaiable for the other rrq information. 1023 * 1024 * returns 0 rrq actived for this xri 1025 * < 0 No memory or invalid ndlp. 1026 **/ 1027 int 1028 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1029 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 1030 { 1031 unsigned long iflags; 1032 struct lpfc_node_rrq *rrq; 1033 int empty; 1034 1035 if (!ndlp) 1036 return -EINVAL; 1037 1038 if (!phba->cfg_enable_rrq) 1039 return -EINVAL; 1040 1041 spin_lock_irqsave(&phba->hbalock, iflags); 1042 if (phba->pport->load_flag & FC_UNLOADING) { 1043 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1044 goto out; 1045 } 1046 1047 /* 1048 * set the active bit even if there is no mem available. 1049 */ 1050 if (NLP_CHK_FREE_REQ(ndlp)) 1051 goto out; 1052 1053 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 1054 goto out; 1055 1056 if (!ndlp->active_rrqs_xri_bitmap) 1057 goto out; 1058 1059 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1060 goto out; 1061 1062 spin_unlock_irqrestore(&phba->hbalock, iflags); 1063 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 1064 if (!rrq) { 1065 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1066 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 1067 " DID:0x%x Send:%d\n", 1068 xritag, rxid, ndlp->nlp_DID, send_rrq); 1069 return -EINVAL; 1070 } 1071 if (phba->cfg_enable_rrq == 1) 1072 rrq->send_rrq = send_rrq; 1073 else 1074 rrq->send_rrq = 0; 1075 rrq->xritag = xritag; 1076 rrq->rrq_stop_time = jiffies + 1077 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 1078 rrq->ndlp = ndlp; 1079 rrq->nlp_DID = ndlp->nlp_DID; 1080 rrq->vport = ndlp->vport; 1081 rrq->rxid = rxid; 1082 spin_lock_irqsave(&phba->hbalock, iflags); 1083 empty = list_empty(&phba->active_rrq_list); 1084 list_add_tail(&rrq->list, &phba->active_rrq_list); 1085 phba->hba_flag |= HBA_RRQ_ACTIVE; 1086 if (empty) 1087 lpfc_worker_wake_up(phba); 1088 spin_unlock_irqrestore(&phba->hbalock, iflags); 1089 return 0; 1090 out: 1091 spin_unlock_irqrestore(&phba->hbalock, iflags); 1092 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1093 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 1094 " DID:0x%x Send:%d\n", 1095 xritag, rxid, ndlp->nlp_DID, send_rrq); 1096 return -EINVAL; 1097 } 1098 1099 /** 1100 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool 1101 * @phba: Pointer to HBA context object. 1102 * @piocb: Pointer to the iocbq. 1103 * 1104 * The driver calls this function with either the nvme ls ring lock 1105 * or the fc els ring lock held depending on the iocb usage. This function 1106 * gets a new driver sglq object from the sglq list. If the list is not empty 1107 * then it is successful, it returns pointer to the newly allocated sglq 1108 * object else it returns NULL. 1109 **/ 1110 static struct lpfc_sglq * 1111 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1112 { 1113 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; 1114 struct lpfc_sglq *sglq = NULL; 1115 struct lpfc_sglq *start_sglq = NULL; 1116 struct lpfc_io_buf *lpfc_cmd; 1117 struct lpfc_nodelist *ndlp; 1118 struct lpfc_sli_ring *pring = NULL; 1119 int found = 0; 1120 1121 if (piocbq->iocb_flag & LPFC_IO_NVME_LS) 1122 pring = phba->sli4_hba.nvmels_wq->pring; 1123 else 1124 pring = lpfc_phba_elsring(phba); 1125 1126 lockdep_assert_held(&pring->ring_lock); 1127 1128 if (piocbq->iocb_flag & LPFC_IO_FCP) { 1129 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1; 1130 ndlp = lpfc_cmd->rdata->pnode; 1131 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 1132 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { 1133 ndlp = piocbq->context_un.ndlp; 1134 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) { 1135 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK) 1136 ndlp = NULL; 1137 else 1138 ndlp = piocbq->context_un.ndlp; 1139 } else { 1140 ndlp = piocbq->context1; 1141 } 1142 1143 spin_lock(&phba->sli4_hba.sgl_list_lock); 1144 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); 1145 start_sglq = sglq; 1146 while (!found) { 1147 if (!sglq) 1148 break; 1149 if (ndlp && ndlp->active_rrqs_xri_bitmap && 1150 test_bit(sglq->sli4_lxritag, 1151 ndlp->active_rrqs_xri_bitmap)) { 1152 /* This xri has an rrq outstanding for this DID. 1153 * put it back in the list and get another xri. 1154 */ 1155 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1156 sglq = NULL; 1157 list_remove_head(lpfc_els_sgl_list, sglq, 1158 struct lpfc_sglq, list); 1159 if (sglq == start_sglq) { 1160 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1161 sglq = NULL; 1162 break; 1163 } else 1164 continue; 1165 } 1166 sglq->ndlp = ndlp; 1167 found = 1; 1168 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1169 sglq->state = SGL_ALLOCATED; 1170 } 1171 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1172 return sglq; 1173 } 1174 1175 /** 1176 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool 1177 * @phba: Pointer to HBA context object. 1178 * @piocb: Pointer to the iocbq. 1179 * 1180 * This function is called with the sgl_list lock held. This function 1181 * gets a new driver sglq object from the sglq list. If the 1182 * list is not empty then it is successful, it returns pointer to the newly 1183 * allocated sglq object else it returns NULL. 1184 **/ 1185 struct lpfc_sglq * 1186 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1187 { 1188 struct list_head *lpfc_nvmet_sgl_list; 1189 struct lpfc_sglq *sglq = NULL; 1190 1191 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; 1192 1193 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); 1194 1195 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); 1196 if (!sglq) 1197 return NULL; 1198 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1199 sglq->state = SGL_ALLOCATED; 1200 return sglq; 1201 } 1202 1203 /** 1204 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 1205 * @phba: Pointer to HBA context object. 1206 * 1207 * This function is called with no lock held. This function 1208 * allocates a new driver iocb object from the iocb pool. If the 1209 * allocation is successful, it returns pointer to the newly 1210 * allocated iocb object else it returns NULL. 1211 **/ 1212 struct lpfc_iocbq * 1213 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 1214 { 1215 struct lpfc_iocbq * iocbq = NULL; 1216 unsigned long iflags; 1217 1218 spin_lock_irqsave(&phba->hbalock, iflags); 1219 iocbq = __lpfc_sli_get_iocbq(phba); 1220 spin_unlock_irqrestore(&phba->hbalock, iflags); 1221 return iocbq; 1222 } 1223 1224 /** 1225 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 1226 * @phba: Pointer to HBA context object. 1227 * @iocbq: Pointer to driver iocb object. 1228 * 1229 * This function is called with hbalock held to release driver 1230 * iocb object to the iocb pool. The iotag in the iocb object 1231 * does not change for each use of the iocb object. This function 1232 * clears all other fields of the iocb object when it is freed. 1233 * The sqlq structure that holds the xritag and phys and virtual 1234 * mappings for the scatter gather list is retrieved from the 1235 * active array of sglq. The get of the sglq pointer also clears 1236 * the entry in the array. If the status of the IO indiactes that 1237 * this IO was aborted then the sglq entry it put on the 1238 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1239 * IO has good status or fails for any other reason then the sglq 1240 * entry is added to the free list (lpfc_els_sgl_list). 1241 **/ 1242 static void 1243 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1244 { 1245 struct lpfc_sglq *sglq; 1246 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1247 unsigned long iflag = 0; 1248 struct lpfc_sli_ring *pring; 1249 1250 lockdep_assert_held(&phba->hbalock); 1251 1252 if (iocbq->sli4_xritag == NO_XRI) 1253 sglq = NULL; 1254 else 1255 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1256 1257 1258 if (sglq) { 1259 if (iocbq->iocb_flag & LPFC_IO_NVMET) { 1260 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1261 iflag); 1262 sglq->state = SGL_FREED; 1263 sglq->ndlp = NULL; 1264 list_add_tail(&sglq->list, 1265 &phba->sli4_hba.lpfc_nvmet_sgl_list); 1266 spin_unlock_irqrestore( 1267 &phba->sli4_hba.sgl_list_lock, iflag); 1268 goto out; 1269 } 1270 1271 pring = phba->sli4_hba.els_wq->pring; 1272 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1273 (sglq->state != SGL_XRI_ABORTED)) { 1274 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1275 iflag); 1276 list_add(&sglq->list, 1277 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1278 spin_unlock_irqrestore( 1279 &phba->sli4_hba.sgl_list_lock, iflag); 1280 } else { 1281 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1282 iflag); 1283 sglq->state = SGL_FREED; 1284 sglq->ndlp = NULL; 1285 list_add_tail(&sglq->list, 1286 &phba->sli4_hba.lpfc_els_sgl_list); 1287 spin_unlock_irqrestore( 1288 &phba->sli4_hba.sgl_list_lock, iflag); 1289 1290 /* Check if TXQ queue needs to be serviced */ 1291 if (!list_empty(&pring->txq)) 1292 lpfc_worker_wake_up(phba); 1293 } 1294 } 1295 1296 out: 1297 /* 1298 * Clean all volatile data fields, preserve iotag and node struct. 1299 */ 1300 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1301 iocbq->sli4_lxritag = NO_XRI; 1302 iocbq->sli4_xritag = NO_XRI; 1303 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | 1304 LPFC_IO_NVME_LS); 1305 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1306 } 1307 1308 1309 /** 1310 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1311 * @phba: Pointer to HBA context object. 1312 * @iocbq: Pointer to driver iocb object. 1313 * 1314 * This function is called with hbalock held to release driver 1315 * iocb object to the iocb pool. The iotag in the iocb object 1316 * does not change for each use of the iocb object. This function 1317 * clears all other fields of the iocb object when it is freed. 1318 **/ 1319 static void 1320 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1321 { 1322 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1323 1324 lockdep_assert_held(&phba->hbalock); 1325 1326 /* 1327 * Clean all volatile data fields, preserve iotag and node struct. 1328 */ 1329 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1330 iocbq->sli4_xritag = NO_XRI; 1331 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1332 } 1333 1334 /** 1335 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1336 * @phba: Pointer to HBA context object. 1337 * @iocbq: Pointer to driver iocb object. 1338 * 1339 * This function is called with hbalock held to release driver 1340 * iocb object to the iocb pool. The iotag in the iocb object 1341 * does not change for each use of the iocb object. This function 1342 * clears all other fields of the iocb object when it is freed. 1343 **/ 1344 static void 1345 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1346 { 1347 lockdep_assert_held(&phba->hbalock); 1348 1349 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1350 phba->iocb_cnt--; 1351 } 1352 1353 /** 1354 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1355 * @phba: Pointer to HBA context object. 1356 * @iocbq: Pointer to driver iocb object. 1357 * 1358 * This function is called with no lock held to release the iocb to 1359 * iocb pool. 1360 **/ 1361 void 1362 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1363 { 1364 unsigned long iflags; 1365 1366 /* 1367 * Clean all volatile data fields, preserve iotag and node struct. 1368 */ 1369 spin_lock_irqsave(&phba->hbalock, iflags); 1370 __lpfc_sli_release_iocbq(phba, iocbq); 1371 spin_unlock_irqrestore(&phba->hbalock, iflags); 1372 } 1373 1374 /** 1375 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1376 * @phba: Pointer to HBA context object. 1377 * @iocblist: List of IOCBs. 1378 * @ulpstatus: ULP status in IOCB command field. 1379 * @ulpWord4: ULP word-4 in IOCB command field. 1380 * 1381 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1382 * on the list by invoking the complete callback function associated with the 1383 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1384 * fields. 1385 **/ 1386 void 1387 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1388 uint32_t ulpstatus, uint32_t ulpWord4) 1389 { 1390 struct lpfc_iocbq *piocb; 1391 1392 while (!list_empty(iocblist)) { 1393 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1394 if (!piocb->iocb_cmpl) 1395 lpfc_sli_release_iocbq(phba, piocb); 1396 else { 1397 piocb->iocb.ulpStatus = ulpstatus; 1398 piocb->iocb.un.ulpWord[4] = ulpWord4; 1399 (piocb->iocb_cmpl) (phba, piocb, piocb); 1400 } 1401 } 1402 return; 1403 } 1404 1405 /** 1406 * lpfc_sli_iocb_cmd_type - Get the iocb type 1407 * @iocb_cmnd: iocb command code. 1408 * 1409 * This function is called by ring event handler function to get the iocb type. 1410 * This function translates the iocb command to an iocb command type used to 1411 * decide the final disposition of each completed IOCB. 1412 * The function returns 1413 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1414 * LPFC_SOL_IOCB if it is a solicited iocb completion 1415 * LPFC_ABORT_IOCB if it is an abort iocb 1416 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1417 * 1418 * The caller is not required to hold any lock. 1419 **/ 1420 static lpfc_iocb_type 1421 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1422 { 1423 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1424 1425 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1426 return 0; 1427 1428 switch (iocb_cmnd) { 1429 case CMD_XMIT_SEQUENCE_CR: 1430 case CMD_XMIT_SEQUENCE_CX: 1431 case CMD_XMIT_BCAST_CN: 1432 case CMD_XMIT_BCAST_CX: 1433 case CMD_ELS_REQUEST_CR: 1434 case CMD_ELS_REQUEST_CX: 1435 case CMD_CREATE_XRI_CR: 1436 case CMD_CREATE_XRI_CX: 1437 case CMD_GET_RPI_CN: 1438 case CMD_XMIT_ELS_RSP_CX: 1439 case CMD_GET_RPI_CR: 1440 case CMD_FCP_IWRITE_CR: 1441 case CMD_FCP_IWRITE_CX: 1442 case CMD_FCP_IREAD_CR: 1443 case CMD_FCP_IREAD_CX: 1444 case CMD_FCP_ICMND_CR: 1445 case CMD_FCP_ICMND_CX: 1446 case CMD_FCP_TSEND_CX: 1447 case CMD_FCP_TRSP_CX: 1448 case CMD_FCP_TRECEIVE_CX: 1449 case CMD_FCP_AUTO_TRSP_CX: 1450 case CMD_ADAPTER_MSG: 1451 case CMD_ADAPTER_DUMP: 1452 case CMD_XMIT_SEQUENCE64_CR: 1453 case CMD_XMIT_SEQUENCE64_CX: 1454 case CMD_XMIT_BCAST64_CN: 1455 case CMD_XMIT_BCAST64_CX: 1456 case CMD_ELS_REQUEST64_CR: 1457 case CMD_ELS_REQUEST64_CX: 1458 case CMD_FCP_IWRITE64_CR: 1459 case CMD_FCP_IWRITE64_CX: 1460 case CMD_FCP_IREAD64_CR: 1461 case CMD_FCP_IREAD64_CX: 1462 case CMD_FCP_ICMND64_CR: 1463 case CMD_FCP_ICMND64_CX: 1464 case CMD_FCP_TSEND64_CX: 1465 case CMD_FCP_TRSP64_CX: 1466 case CMD_FCP_TRECEIVE64_CX: 1467 case CMD_GEN_REQUEST64_CR: 1468 case CMD_GEN_REQUEST64_CX: 1469 case CMD_XMIT_ELS_RSP64_CX: 1470 case DSSCMD_IWRITE64_CR: 1471 case DSSCMD_IWRITE64_CX: 1472 case DSSCMD_IREAD64_CR: 1473 case DSSCMD_IREAD64_CX: 1474 type = LPFC_SOL_IOCB; 1475 break; 1476 case CMD_ABORT_XRI_CN: 1477 case CMD_ABORT_XRI_CX: 1478 case CMD_CLOSE_XRI_CN: 1479 case CMD_CLOSE_XRI_CX: 1480 case CMD_XRI_ABORTED_CX: 1481 case CMD_ABORT_MXRI64_CN: 1482 case CMD_XMIT_BLS_RSP64_CX: 1483 type = LPFC_ABORT_IOCB; 1484 break; 1485 case CMD_RCV_SEQUENCE_CX: 1486 case CMD_RCV_ELS_REQ_CX: 1487 case CMD_RCV_SEQUENCE64_CX: 1488 case CMD_RCV_ELS_REQ64_CX: 1489 case CMD_ASYNC_STATUS: 1490 case CMD_IOCB_RCV_SEQ64_CX: 1491 case CMD_IOCB_RCV_ELS64_CX: 1492 case CMD_IOCB_RCV_CONT64_CX: 1493 case CMD_IOCB_RET_XRI64_CX: 1494 type = LPFC_UNSOL_IOCB; 1495 break; 1496 case CMD_IOCB_XMIT_MSEQ64_CR: 1497 case CMD_IOCB_XMIT_MSEQ64_CX: 1498 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1499 case CMD_IOCB_RCV_ELS_LIST64_CX: 1500 case CMD_IOCB_CLOSE_EXTENDED_CN: 1501 case CMD_IOCB_ABORT_EXTENDED_CN: 1502 case CMD_IOCB_RET_HBQE64_CN: 1503 case CMD_IOCB_FCP_IBIDIR64_CR: 1504 case CMD_IOCB_FCP_IBIDIR64_CX: 1505 case CMD_IOCB_FCP_ITASKMGT64_CX: 1506 case CMD_IOCB_LOGENTRY_CN: 1507 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1508 printk("%s - Unhandled SLI-3 Command x%x\n", 1509 __func__, iocb_cmnd); 1510 type = LPFC_UNKNOWN_IOCB; 1511 break; 1512 default: 1513 type = LPFC_UNKNOWN_IOCB; 1514 break; 1515 } 1516 1517 return type; 1518 } 1519 1520 /** 1521 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1522 * @phba: Pointer to HBA context object. 1523 * 1524 * This function is called from SLI initialization code 1525 * to configure every ring of the HBA's SLI interface. The 1526 * caller is not required to hold any lock. This function issues 1527 * a config_ring mailbox command for each ring. 1528 * This function returns zero if successful else returns a negative 1529 * error code. 1530 **/ 1531 static int 1532 lpfc_sli_ring_map(struct lpfc_hba *phba) 1533 { 1534 struct lpfc_sli *psli = &phba->sli; 1535 LPFC_MBOXQ_t *pmb; 1536 MAILBOX_t *pmbox; 1537 int i, rc, ret = 0; 1538 1539 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1540 if (!pmb) 1541 return -ENOMEM; 1542 pmbox = &pmb->u.mb; 1543 phba->link_state = LPFC_INIT_MBX_CMDS; 1544 for (i = 0; i < psli->num_rings; i++) { 1545 lpfc_config_ring(phba, i, pmb); 1546 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1547 if (rc != MBX_SUCCESS) { 1548 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1549 "0446 Adapter failed to init (%d), " 1550 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1551 "ring %d\n", 1552 rc, pmbox->mbxCommand, 1553 pmbox->mbxStatus, i); 1554 phba->link_state = LPFC_HBA_ERROR; 1555 ret = -ENXIO; 1556 break; 1557 } 1558 } 1559 mempool_free(pmb, phba->mbox_mem_pool); 1560 return ret; 1561 } 1562 1563 /** 1564 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1565 * @phba: Pointer to HBA context object. 1566 * @pring: Pointer to driver SLI ring object. 1567 * @piocb: Pointer to the driver iocb object. 1568 * 1569 * The driver calls this function with the hbalock held for SLI3 ports or 1570 * the ring lock held for SLI4 ports. The function adds the 1571 * new iocb to txcmplq of the given ring. This function always returns 1572 * 0. If this function is called for ELS ring, this function checks if 1573 * there is a vport associated with the ELS command. This function also 1574 * starts els_tmofunc timer if this is an ELS command. 1575 **/ 1576 static int 1577 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1578 struct lpfc_iocbq *piocb) 1579 { 1580 if (phba->sli_rev == LPFC_SLI_REV4) 1581 lockdep_assert_held(&pring->ring_lock); 1582 else 1583 lockdep_assert_held(&phba->hbalock); 1584 1585 BUG_ON(!piocb); 1586 1587 list_add_tail(&piocb->list, &pring->txcmplq); 1588 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1589 pring->txcmplq_cnt++; 1590 1591 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1592 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1593 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1594 BUG_ON(!piocb->vport); 1595 if (!(piocb->vport->load_flag & FC_UNLOADING)) 1596 mod_timer(&piocb->vport->els_tmofunc, 1597 jiffies + 1598 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1599 } 1600 1601 return 0; 1602 } 1603 1604 /** 1605 * lpfc_sli_ringtx_get - Get first element of the txq 1606 * @phba: Pointer to HBA context object. 1607 * @pring: Pointer to driver SLI ring object. 1608 * 1609 * This function is called with hbalock held to get next 1610 * iocb in txq of the given ring. If there is any iocb in 1611 * the txq, the function returns first iocb in the list after 1612 * removing the iocb from the list, else it returns NULL. 1613 **/ 1614 struct lpfc_iocbq * 1615 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1616 { 1617 struct lpfc_iocbq *cmd_iocb; 1618 1619 lockdep_assert_held(&phba->hbalock); 1620 1621 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1622 return cmd_iocb; 1623 } 1624 1625 /** 1626 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1627 * @phba: Pointer to HBA context object. 1628 * @pring: Pointer to driver SLI ring object. 1629 * 1630 * This function is called with hbalock held and the caller must post the 1631 * iocb without releasing the lock. If the caller releases the lock, 1632 * iocb slot returned by the function is not guaranteed to be available. 1633 * The function returns pointer to the next available iocb slot if there 1634 * is available slot in the ring, else it returns NULL. 1635 * If the get index of the ring is ahead of the put index, the function 1636 * will post an error attention event to the worker thread to take the 1637 * HBA to offline state. 1638 **/ 1639 static IOCB_t * 1640 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1641 { 1642 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1643 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 1644 1645 lockdep_assert_held(&phba->hbalock); 1646 1647 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 1648 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 1649 pring->sli.sli3.next_cmdidx = 0; 1650 1651 if (unlikely(pring->sli.sli3.local_getidx == 1652 pring->sli.sli3.next_cmdidx)) { 1653 1654 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1655 1656 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1657 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1658 "0315 Ring %d issue: portCmdGet %d " 1659 "is bigger than cmd ring %d\n", 1660 pring->ringno, 1661 pring->sli.sli3.local_getidx, 1662 max_cmd_idx); 1663 1664 phba->link_state = LPFC_HBA_ERROR; 1665 /* 1666 * All error attention handlers are posted to 1667 * worker thread 1668 */ 1669 phba->work_ha |= HA_ERATT; 1670 phba->work_hs = HS_FFER3; 1671 1672 lpfc_worker_wake_up(phba); 1673 1674 return NULL; 1675 } 1676 1677 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 1678 return NULL; 1679 } 1680 1681 return lpfc_cmd_iocb(phba, pring); 1682 } 1683 1684 /** 1685 * lpfc_sli_next_iotag - Get an iotag for the iocb 1686 * @phba: Pointer to HBA context object. 1687 * @iocbq: Pointer to driver iocb object. 1688 * 1689 * This function gets an iotag for the iocb. If there is no unused iotag and 1690 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1691 * array and assigns a new iotag. 1692 * The function returns the allocated iotag if successful, else returns zero. 1693 * Zero is not a valid iotag. 1694 * The caller is not required to hold any lock. 1695 **/ 1696 uint16_t 1697 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1698 { 1699 struct lpfc_iocbq **new_arr; 1700 struct lpfc_iocbq **old_arr; 1701 size_t new_len; 1702 struct lpfc_sli *psli = &phba->sli; 1703 uint16_t iotag; 1704 1705 spin_lock_irq(&phba->hbalock); 1706 iotag = psli->last_iotag; 1707 if(++iotag < psli->iocbq_lookup_len) { 1708 psli->last_iotag = iotag; 1709 psli->iocbq_lookup[iotag] = iocbq; 1710 spin_unlock_irq(&phba->hbalock); 1711 iocbq->iotag = iotag; 1712 return iotag; 1713 } else if (psli->iocbq_lookup_len < (0xffff 1714 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1715 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1716 spin_unlock_irq(&phba->hbalock); 1717 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *), 1718 GFP_KERNEL); 1719 if (new_arr) { 1720 spin_lock_irq(&phba->hbalock); 1721 old_arr = psli->iocbq_lookup; 1722 if (new_len <= psli->iocbq_lookup_len) { 1723 /* highly unprobable case */ 1724 kfree(new_arr); 1725 iotag = psli->last_iotag; 1726 if(++iotag < psli->iocbq_lookup_len) { 1727 psli->last_iotag = iotag; 1728 psli->iocbq_lookup[iotag] = iocbq; 1729 spin_unlock_irq(&phba->hbalock); 1730 iocbq->iotag = iotag; 1731 return iotag; 1732 } 1733 spin_unlock_irq(&phba->hbalock); 1734 return 0; 1735 } 1736 if (psli->iocbq_lookup) 1737 memcpy(new_arr, old_arr, 1738 ((psli->last_iotag + 1) * 1739 sizeof (struct lpfc_iocbq *))); 1740 psli->iocbq_lookup = new_arr; 1741 psli->iocbq_lookup_len = new_len; 1742 psli->last_iotag = iotag; 1743 psli->iocbq_lookup[iotag] = iocbq; 1744 spin_unlock_irq(&phba->hbalock); 1745 iocbq->iotag = iotag; 1746 kfree(old_arr); 1747 return iotag; 1748 } 1749 } else 1750 spin_unlock_irq(&phba->hbalock); 1751 1752 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1753 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1754 psli->last_iotag); 1755 1756 return 0; 1757 } 1758 1759 /** 1760 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1761 * @phba: Pointer to HBA context object. 1762 * @pring: Pointer to driver SLI ring object. 1763 * @iocb: Pointer to iocb slot in the ring. 1764 * @nextiocb: Pointer to driver iocb object which need to be 1765 * posted to firmware. 1766 * 1767 * This function is called with hbalock held to post a new iocb to 1768 * the firmware. This function copies the new iocb to ring iocb slot and 1769 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1770 * a completion call back for this iocb else the function will free the 1771 * iocb object. 1772 **/ 1773 static void 1774 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1775 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1776 { 1777 lockdep_assert_held(&phba->hbalock); 1778 /* 1779 * Set up an iotag 1780 */ 1781 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1782 1783 1784 if (pring->ringno == LPFC_ELS_RING) { 1785 lpfc_debugfs_slow_ring_trc(phba, 1786 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1787 *(((uint32_t *) &nextiocb->iocb) + 4), 1788 *(((uint32_t *) &nextiocb->iocb) + 6), 1789 *(((uint32_t *) &nextiocb->iocb) + 7)); 1790 } 1791 1792 /* 1793 * Issue iocb command to adapter 1794 */ 1795 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1796 wmb(); 1797 pring->stats.iocb_cmd++; 1798 1799 /* 1800 * If there is no completion routine to call, we can release the 1801 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1802 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1803 */ 1804 if (nextiocb->iocb_cmpl) 1805 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1806 else 1807 __lpfc_sli_release_iocbq(phba, nextiocb); 1808 1809 /* 1810 * Let the HBA know what IOCB slot will be the next one the 1811 * driver will put a command into. 1812 */ 1813 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 1814 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1815 } 1816 1817 /** 1818 * lpfc_sli_update_full_ring - Update the chip attention register 1819 * @phba: Pointer to HBA context object. 1820 * @pring: Pointer to driver SLI ring object. 1821 * 1822 * The caller is not required to hold any lock for calling this function. 1823 * This function updates the chip attention bits for the ring to inform firmware 1824 * that there are pending work to be done for this ring and requests an 1825 * interrupt when there is space available in the ring. This function is 1826 * called when the driver is unable to post more iocbs to the ring due 1827 * to unavailability of space in the ring. 1828 **/ 1829 static void 1830 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1831 { 1832 int ringno = pring->ringno; 1833 1834 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1835 1836 wmb(); 1837 1838 /* 1839 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1840 * The HBA will tell us when an IOCB entry is available. 1841 */ 1842 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1843 readl(phba->CAregaddr); /* flush */ 1844 1845 pring->stats.iocb_cmd_full++; 1846 } 1847 1848 /** 1849 * lpfc_sli_update_ring - Update chip attention register 1850 * @phba: Pointer to HBA context object. 1851 * @pring: Pointer to driver SLI ring object. 1852 * 1853 * This function updates the chip attention register bit for the 1854 * given ring to inform HBA that there is more work to be done 1855 * in this ring. The caller is not required to hold any lock. 1856 **/ 1857 static void 1858 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1859 { 1860 int ringno = pring->ringno; 1861 1862 /* 1863 * Tell the HBA that there is work to do in this ring. 1864 */ 1865 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1866 wmb(); 1867 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1868 readl(phba->CAregaddr); /* flush */ 1869 } 1870 } 1871 1872 /** 1873 * lpfc_sli_resume_iocb - Process iocbs in the txq 1874 * @phba: Pointer to HBA context object. 1875 * @pring: Pointer to driver SLI ring object. 1876 * 1877 * This function is called with hbalock held to post pending iocbs 1878 * in the txq to the firmware. This function is called when driver 1879 * detects space available in the ring. 1880 **/ 1881 static void 1882 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1883 { 1884 IOCB_t *iocb; 1885 struct lpfc_iocbq *nextiocb; 1886 1887 lockdep_assert_held(&phba->hbalock); 1888 1889 /* 1890 * Check to see if: 1891 * (a) there is anything on the txq to send 1892 * (b) link is up 1893 * (c) link attention events can be processed (fcp ring only) 1894 * (d) IOCB processing is not blocked by the outstanding mbox command. 1895 */ 1896 1897 if (lpfc_is_link_up(phba) && 1898 (!list_empty(&pring->txq)) && 1899 (pring->ringno != LPFC_FCP_RING || 1900 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1901 1902 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1903 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1904 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1905 1906 if (iocb) 1907 lpfc_sli_update_ring(phba, pring); 1908 else 1909 lpfc_sli_update_full_ring(phba, pring); 1910 } 1911 1912 return; 1913 } 1914 1915 /** 1916 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1917 * @phba: Pointer to HBA context object. 1918 * @hbqno: HBQ number. 1919 * 1920 * This function is called with hbalock held to get the next 1921 * available slot for the given HBQ. If there is free slot 1922 * available for the HBQ it will return pointer to the next available 1923 * HBQ entry else it will return NULL. 1924 **/ 1925 static struct lpfc_hbq_entry * 1926 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1927 { 1928 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1929 1930 lockdep_assert_held(&phba->hbalock); 1931 1932 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1933 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1934 hbqp->next_hbqPutIdx = 0; 1935 1936 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1937 uint32_t raw_index = phba->hbq_get[hbqno]; 1938 uint32_t getidx = le32_to_cpu(raw_index); 1939 1940 hbqp->local_hbqGetIdx = getidx; 1941 1942 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1943 lpfc_printf_log(phba, KERN_ERR, 1944 LOG_SLI | LOG_VPORT, 1945 "1802 HBQ %d: local_hbqGetIdx " 1946 "%u is > than hbqp->entry_count %u\n", 1947 hbqno, hbqp->local_hbqGetIdx, 1948 hbqp->entry_count); 1949 1950 phba->link_state = LPFC_HBA_ERROR; 1951 return NULL; 1952 } 1953 1954 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1955 return NULL; 1956 } 1957 1958 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1959 hbqp->hbqPutIdx; 1960 } 1961 1962 /** 1963 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1964 * @phba: Pointer to HBA context object. 1965 * 1966 * This function is called with no lock held to free all the 1967 * hbq buffers while uninitializing the SLI interface. It also 1968 * frees the HBQ buffers returned by the firmware but not yet 1969 * processed by the upper layers. 1970 **/ 1971 void 1972 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1973 { 1974 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1975 struct hbq_dmabuf *hbq_buf; 1976 unsigned long flags; 1977 int i, hbq_count; 1978 1979 hbq_count = lpfc_sli_hbq_count(); 1980 /* Return all memory used by all HBQs */ 1981 spin_lock_irqsave(&phba->hbalock, flags); 1982 for (i = 0; i < hbq_count; ++i) { 1983 list_for_each_entry_safe(dmabuf, next_dmabuf, 1984 &phba->hbqs[i].hbq_buffer_list, list) { 1985 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1986 list_del(&hbq_buf->dbuf.list); 1987 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1988 } 1989 phba->hbqs[i].buffer_count = 0; 1990 } 1991 1992 /* Mark the HBQs not in use */ 1993 phba->hbq_in_use = 0; 1994 spin_unlock_irqrestore(&phba->hbalock, flags); 1995 } 1996 1997 /** 1998 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1999 * @phba: Pointer to HBA context object. 2000 * @hbqno: HBQ number. 2001 * @hbq_buf: Pointer to HBQ buffer. 2002 * 2003 * This function is called with the hbalock held to post a 2004 * hbq buffer to the firmware. If the function finds an empty 2005 * slot in the HBQ, it will post the buffer. The function will return 2006 * pointer to the hbq entry if it successfully post the buffer 2007 * else it will return NULL. 2008 **/ 2009 static int 2010 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 2011 struct hbq_dmabuf *hbq_buf) 2012 { 2013 lockdep_assert_held(&phba->hbalock); 2014 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 2015 } 2016 2017 /** 2018 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 2019 * @phba: Pointer to HBA context object. 2020 * @hbqno: HBQ number. 2021 * @hbq_buf: Pointer to HBQ buffer. 2022 * 2023 * This function is called with the hbalock held to post a hbq buffer to the 2024 * firmware. If the function finds an empty slot in the HBQ, it will post the 2025 * buffer and place it on the hbq_buffer_list. The function will return zero if 2026 * it successfully post the buffer else it will return an error. 2027 **/ 2028 static int 2029 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 2030 struct hbq_dmabuf *hbq_buf) 2031 { 2032 struct lpfc_hbq_entry *hbqe; 2033 dma_addr_t physaddr = hbq_buf->dbuf.phys; 2034 2035 lockdep_assert_held(&phba->hbalock); 2036 /* Get next HBQ entry slot to use */ 2037 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 2038 if (hbqe) { 2039 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 2040 2041 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 2042 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 2043 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; 2044 hbqe->bde.tus.f.bdeFlags = 0; 2045 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 2046 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 2047 /* Sync SLIM */ 2048 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 2049 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 2050 /* flush */ 2051 readl(phba->hbq_put + hbqno); 2052 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 2053 return 0; 2054 } else 2055 return -ENOMEM; 2056 } 2057 2058 /** 2059 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 2060 * @phba: Pointer to HBA context object. 2061 * @hbqno: HBQ number. 2062 * @hbq_buf: Pointer to HBQ buffer. 2063 * 2064 * This function is called with the hbalock held to post an RQE to the SLI4 2065 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 2066 * the hbq_buffer_list and return zero, otherwise it will return an error. 2067 **/ 2068 static int 2069 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 2070 struct hbq_dmabuf *hbq_buf) 2071 { 2072 int rc; 2073 struct lpfc_rqe hrqe; 2074 struct lpfc_rqe drqe; 2075 struct lpfc_queue *hrq; 2076 struct lpfc_queue *drq; 2077 2078 if (hbqno != LPFC_ELS_HBQ) 2079 return 1; 2080 hrq = phba->sli4_hba.hdr_rq; 2081 drq = phba->sli4_hba.dat_rq; 2082 2083 lockdep_assert_held(&phba->hbalock); 2084 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 2085 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 2086 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 2087 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 2088 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 2089 if (rc < 0) 2090 return rc; 2091 hbq_buf->tag = (rc | (hbqno << 16)); 2092 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 2093 return 0; 2094 } 2095 2096 /* HBQ for ELS and CT traffic. */ 2097 static struct lpfc_hbq_init lpfc_els_hbq = { 2098 .rn = 1, 2099 .entry_count = 256, 2100 .mask_count = 0, 2101 .profile = 0, 2102 .ring_mask = (1 << LPFC_ELS_RING), 2103 .buffer_count = 0, 2104 .init_count = 40, 2105 .add_count = 40, 2106 }; 2107 2108 /* Array of HBQs */ 2109 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 2110 &lpfc_els_hbq, 2111 }; 2112 2113 /** 2114 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 2115 * @phba: Pointer to HBA context object. 2116 * @hbqno: HBQ number. 2117 * @count: Number of HBQ buffers to be posted. 2118 * 2119 * This function is called with no lock held to post more hbq buffers to the 2120 * given HBQ. The function returns the number of HBQ buffers successfully 2121 * posted. 2122 **/ 2123 static int 2124 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 2125 { 2126 uint32_t i, posted = 0; 2127 unsigned long flags; 2128 struct hbq_dmabuf *hbq_buffer; 2129 LIST_HEAD(hbq_buf_list); 2130 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 2131 return 0; 2132 2133 if ((phba->hbqs[hbqno].buffer_count + count) > 2134 lpfc_hbq_defs[hbqno]->entry_count) 2135 count = lpfc_hbq_defs[hbqno]->entry_count - 2136 phba->hbqs[hbqno].buffer_count; 2137 if (!count) 2138 return 0; 2139 /* Allocate HBQ entries */ 2140 for (i = 0; i < count; i++) { 2141 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 2142 if (!hbq_buffer) 2143 break; 2144 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 2145 } 2146 /* Check whether HBQ is still in use */ 2147 spin_lock_irqsave(&phba->hbalock, flags); 2148 if (!phba->hbq_in_use) 2149 goto err; 2150 while (!list_empty(&hbq_buf_list)) { 2151 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2152 dbuf.list); 2153 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 2154 (hbqno << 16)); 2155 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 2156 phba->hbqs[hbqno].buffer_count++; 2157 posted++; 2158 } else 2159 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2160 } 2161 spin_unlock_irqrestore(&phba->hbalock, flags); 2162 return posted; 2163 err: 2164 spin_unlock_irqrestore(&phba->hbalock, flags); 2165 while (!list_empty(&hbq_buf_list)) { 2166 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2167 dbuf.list); 2168 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2169 } 2170 return 0; 2171 } 2172 2173 /** 2174 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 2175 * @phba: Pointer to HBA context object. 2176 * @qno: HBQ number. 2177 * 2178 * This function posts more buffers to the HBQ. This function 2179 * is called with no lock held. The function returns the number of HBQ entries 2180 * successfully allocated. 2181 **/ 2182 int 2183 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 2184 { 2185 if (phba->sli_rev == LPFC_SLI_REV4) 2186 return 0; 2187 else 2188 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2189 lpfc_hbq_defs[qno]->add_count); 2190 } 2191 2192 /** 2193 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 2194 * @phba: Pointer to HBA context object. 2195 * @qno: HBQ queue number. 2196 * 2197 * This function is called from SLI initialization code path with 2198 * no lock held to post initial HBQ buffers to firmware. The 2199 * function returns the number of HBQ entries successfully allocated. 2200 **/ 2201 static int 2202 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 2203 { 2204 if (phba->sli_rev == LPFC_SLI_REV4) 2205 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2206 lpfc_hbq_defs[qno]->entry_count); 2207 else 2208 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2209 lpfc_hbq_defs[qno]->init_count); 2210 } 2211 2212 /** 2213 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 2214 * @phba: Pointer to HBA context object. 2215 * @hbqno: HBQ number. 2216 * 2217 * This function removes the first hbq buffer on an hbq list and returns a 2218 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2219 **/ 2220 static struct hbq_dmabuf * 2221 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 2222 { 2223 struct lpfc_dmabuf *d_buf; 2224 2225 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 2226 if (!d_buf) 2227 return NULL; 2228 return container_of(d_buf, struct hbq_dmabuf, dbuf); 2229 } 2230 2231 /** 2232 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list 2233 * @phba: Pointer to HBA context object. 2234 * @hbqno: HBQ number. 2235 * 2236 * This function removes the first RQ buffer on an RQ buffer list and returns a 2237 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2238 **/ 2239 static struct rqb_dmabuf * 2240 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) 2241 { 2242 struct lpfc_dmabuf *h_buf; 2243 struct lpfc_rqb *rqbp; 2244 2245 rqbp = hrq->rqbp; 2246 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 2247 struct lpfc_dmabuf, list); 2248 if (!h_buf) 2249 return NULL; 2250 rqbp->buffer_count--; 2251 return container_of(h_buf, struct rqb_dmabuf, hbuf); 2252 } 2253 2254 /** 2255 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 2256 * @phba: Pointer to HBA context object. 2257 * @tag: Tag of the hbq buffer. 2258 * 2259 * This function searches for the hbq buffer associated with the given tag in 2260 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer 2261 * otherwise it returns NULL. 2262 **/ 2263 static struct hbq_dmabuf * 2264 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 2265 { 2266 struct lpfc_dmabuf *d_buf; 2267 struct hbq_dmabuf *hbq_buf; 2268 uint32_t hbqno; 2269 2270 hbqno = tag >> 16; 2271 if (hbqno >= LPFC_MAX_HBQS) 2272 return NULL; 2273 2274 spin_lock_irq(&phba->hbalock); 2275 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2276 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2277 if (hbq_buf->tag == tag) { 2278 spin_unlock_irq(&phba->hbalock); 2279 return hbq_buf; 2280 } 2281 } 2282 spin_unlock_irq(&phba->hbalock); 2283 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2284 "1803 Bad hbq tag. Data: x%x x%x\n", 2285 tag, phba->hbqs[tag >> 16].buffer_count); 2286 return NULL; 2287 } 2288 2289 /** 2290 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2291 * @phba: Pointer to HBA context object. 2292 * @hbq_buffer: Pointer to HBQ buffer. 2293 * 2294 * This function is called with hbalock. This function gives back 2295 * the hbq buffer to firmware. If the HBQ does not have space to 2296 * post the buffer, it will free the buffer. 2297 **/ 2298 void 2299 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2300 { 2301 uint32_t hbqno; 2302 2303 if (hbq_buffer) { 2304 hbqno = hbq_buffer->tag >> 16; 2305 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2306 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2307 } 2308 } 2309 2310 /** 2311 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2312 * @mbxCommand: mailbox command code. 2313 * 2314 * This function is called by the mailbox event handler function to verify 2315 * that the completed mailbox command is a legitimate mailbox command. If the 2316 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2317 * and the mailbox event handler will take the HBA offline. 2318 **/ 2319 static int 2320 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2321 { 2322 uint8_t ret; 2323 2324 switch (mbxCommand) { 2325 case MBX_LOAD_SM: 2326 case MBX_READ_NV: 2327 case MBX_WRITE_NV: 2328 case MBX_WRITE_VPARMS: 2329 case MBX_RUN_BIU_DIAG: 2330 case MBX_INIT_LINK: 2331 case MBX_DOWN_LINK: 2332 case MBX_CONFIG_LINK: 2333 case MBX_CONFIG_RING: 2334 case MBX_RESET_RING: 2335 case MBX_READ_CONFIG: 2336 case MBX_READ_RCONFIG: 2337 case MBX_READ_SPARM: 2338 case MBX_READ_STATUS: 2339 case MBX_READ_RPI: 2340 case MBX_READ_XRI: 2341 case MBX_READ_REV: 2342 case MBX_READ_LNK_STAT: 2343 case MBX_REG_LOGIN: 2344 case MBX_UNREG_LOGIN: 2345 case MBX_CLEAR_LA: 2346 case MBX_DUMP_MEMORY: 2347 case MBX_DUMP_CONTEXT: 2348 case MBX_RUN_DIAGS: 2349 case MBX_RESTART: 2350 case MBX_UPDATE_CFG: 2351 case MBX_DOWN_LOAD: 2352 case MBX_DEL_LD_ENTRY: 2353 case MBX_RUN_PROGRAM: 2354 case MBX_SET_MASK: 2355 case MBX_SET_VARIABLE: 2356 case MBX_UNREG_D_ID: 2357 case MBX_KILL_BOARD: 2358 case MBX_CONFIG_FARP: 2359 case MBX_BEACON: 2360 case MBX_LOAD_AREA: 2361 case MBX_RUN_BIU_DIAG64: 2362 case MBX_CONFIG_PORT: 2363 case MBX_READ_SPARM64: 2364 case MBX_READ_RPI64: 2365 case MBX_REG_LOGIN64: 2366 case MBX_READ_TOPOLOGY: 2367 case MBX_WRITE_WWN: 2368 case MBX_SET_DEBUG: 2369 case MBX_LOAD_EXP_ROM: 2370 case MBX_ASYNCEVT_ENABLE: 2371 case MBX_REG_VPI: 2372 case MBX_UNREG_VPI: 2373 case MBX_HEARTBEAT: 2374 case MBX_PORT_CAPABILITIES: 2375 case MBX_PORT_IOV_CONTROL: 2376 case MBX_SLI4_CONFIG: 2377 case MBX_SLI4_REQ_FTRS: 2378 case MBX_REG_FCFI: 2379 case MBX_UNREG_FCFI: 2380 case MBX_REG_VFI: 2381 case MBX_UNREG_VFI: 2382 case MBX_INIT_VPI: 2383 case MBX_INIT_VFI: 2384 case MBX_RESUME_RPI: 2385 case MBX_READ_EVENT_LOG_STATUS: 2386 case MBX_READ_EVENT_LOG: 2387 case MBX_SECURITY_MGMT: 2388 case MBX_AUTH_PORT: 2389 case MBX_ACCESS_VDATA: 2390 ret = mbxCommand; 2391 break; 2392 default: 2393 ret = MBX_SHUTDOWN; 2394 break; 2395 } 2396 return ret; 2397 } 2398 2399 /** 2400 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2401 * @phba: Pointer to HBA context object. 2402 * @pmboxq: Pointer to mailbox command. 2403 * 2404 * This is completion handler function for mailbox commands issued from 2405 * lpfc_sli_issue_mbox_wait function. This function is called by the 2406 * mailbox event handler function with no lock held. This function 2407 * will wake up thread waiting on the wait queue pointed by context1 2408 * of the mailbox. 2409 **/ 2410 void 2411 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2412 { 2413 unsigned long drvr_flag; 2414 struct completion *pmbox_done; 2415 2416 /* 2417 * If pmbox_done is empty, the driver thread gave up waiting and 2418 * continued running. 2419 */ 2420 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2421 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2422 pmbox_done = (struct completion *)pmboxq->context3; 2423 if (pmbox_done) 2424 complete(pmbox_done); 2425 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2426 return; 2427 } 2428 2429 2430 /** 2431 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2432 * @phba: Pointer to HBA context object. 2433 * @pmb: Pointer to mailbox object. 2434 * 2435 * This function is the default mailbox completion handler. It 2436 * frees the memory resources associated with the completed mailbox 2437 * command. If the completed command is a REG_LOGIN mailbox command, 2438 * this function will issue a UREG_LOGIN to re-claim the RPI. 2439 **/ 2440 void 2441 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2442 { 2443 struct lpfc_vport *vport = pmb->vport; 2444 struct lpfc_dmabuf *mp; 2445 struct lpfc_nodelist *ndlp; 2446 struct Scsi_Host *shost; 2447 uint16_t rpi, vpi; 2448 int rc; 2449 2450 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 2451 2452 if (mp) { 2453 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2454 kfree(mp); 2455 } 2456 2457 /* 2458 * If a REG_LOGIN succeeded after node is destroyed or node 2459 * is in re-discovery driver need to cleanup the RPI. 2460 */ 2461 if (!(phba->pport->load_flag & FC_UNLOADING) && 2462 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2463 !pmb->u.mb.mbxStatus) { 2464 rpi = pmb->u.mb.un.varWords[0]; 2465 vpi = pmb->u.mb.un.varRegLogin.vpi; 2466 lpfc_unreg_login(phba, vpi, rpi, pmb); 2467 pmb->vport = vport; 2468 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2469 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2470 if (rc != MBX_NOT_FINISHED) 2471 return; 2472 } 2473 2474 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2475 !(phba->pport->load_flag & FC_UNLOADING) && 2476 !pmb->u.mb.mbxStatus) { 2477 shost = lpfc_shost_from_vport(vport); 2478 spin_lock_irq(shost->host_lock); 2479 vport->vpi_state |= LPFC_VPI_REGISTERED; 2480 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2481 spin_unlock_irq(shost->host_lock); 2482 } 2483 2484 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2485 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2486 lpfc_nlp_put(ndlp); 2487 pmb->ctx_buf = NULL; 2488 pmb->ctx_ndlp = NULL; 2489 } 2490 2491 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2492 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2493 2494 /* Check to see if there are any deferred events to process */ 2495 if (ndlp) { 2496 lpfc_printf_vlog( 2497 vport, 2498 KERN_INFO, LOG_MBOX | LOG_DISCOVERY, 2499 "1438 UNREG cmpl deferred mbox x%x " 2500 "on NPort x%x Data: x%x x%x %p\n", 2501 ndlp->nlp_rpi, ndlp->nlp_DID, 2502 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp); 2503 2504 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2505 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { 2506 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2507 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 2508 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2509 } else { 2510 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2511 } 2512 pmb->ctx_ndlp = NULL; 2513 } 2514 } 2515 2516 /* Check security permission status on INIT_LINK mailbox command */ 2517 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2518 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2519 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2520 "2860 SLI authentication is required " 2521 "for INIT_LINK but has not done yet\n"); 2522 2523 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2524 lpfc_sli4_mbox_cmd_free(phba, pmb); 2525 else 2526 mempool_free(pmb, phba->mbox_mem_pool); 2527 } 2528 /** 2529 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler 2530 * @phba: Pointer to HBA context object. 2531 * @pmb: Pointer to mailbox object. 2532 * 2533 * This function is the unreg rpi mailbox completion handler. It 2534 * frees the memory resources associated with the completed mailbox 2535 * command. An additional refrenece is put on the ndlp to prevent 2536 * lpfc_nlp_release from freeing the rpi bit in the bitmask before 2537 * the unreg mailbox command completes, this routine puts the 2538 * reference back. 2539 * 2540 **/ 2541 void 2542 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2543 { 2544 struct lpfc_vport *vport = pmb->vport; 2545 struct lpfc_nodelist *ndlp; 2546 2547 ndlp = pmb->ctx_ndlp; 2548 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2549 if (phba->sli_rev == LPFC_SLI_REV4 && 2550 (bf_get(lpfc_sli_intf_if_type, 2551 &phba->sli4_hba.sli_intf) >= 2552 LPFC_SLI_INTF_IF_TYPE_2)) { 2553 if (ndlp) { 2554 lpfc_printf_vlog( 2555 vport, KERN_INFO, LOG_MBOX | LOG_SLI, 2556 "0010 UNREG_LOGIN vpi:%x " 2557 "rpi:%x DID:%x defer x%x flg x%x " 2558 "map:%x %p\n", 2559 vport->vpi, ndlp->nlp_rpi, 2560 ndlp->nlp_DID, ndlp->nlp_defer_did, 2561 ndlp->nlp_flag, 2562 ndlp->nlp_usg_map, ndlp); 2563 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2564 lpfc_nlp_put(ndlp); 2565 2566 /* Check to see if there are any deferred 2567 * events to process 2568 */ 2569 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2570 (ndlp->nlp_defer_did != 2571 NLP_EVT_NOTHING_PENDING)) { 2572 lpfc_printf_vlog( 2573 vport, KERN_INFO, LOG_DISCOVERY, 2574 "4111 UNREG cmpl deferred " 2575 "clr x%x on " 2576 "NPort x%x Data: x%x %p\n", 2577 ndlp->nlp_rpi, ndlp->nlp_DID, 2578 ndlp->nlp_defer_did, ndlp); 2579 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2580 ndlp->nlp_defer_did = 2581 NLP_EVT_NOTHING_PENDING; 2582 lpfc_issue_els_plogi( 2583 vport, ndlp->nlp_DID, 0); 2584 } else { 2585 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2586 } 2587 } 2588 } 2589 } 2590 2591 mempool_free(pmb, phba->mbox_mem_pool); 2592 } 2593 2594 /** 2595 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2596 * @phba: Pointer to HBA context object. 2597 * 2598 * This function is called with no lock held. This function processes all 2599 * the completed mailbox commands and gives it to upper layers. The interrupt 2600 * service routine processes mailbox completion interrupt and adds completed 2601 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2602 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2603 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2604 * function returns the mailbox commands to the upper layer by calling the 2605 * completion handler function of each mailbox. 2606 **/ 2607 int 2608 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2609 { 2610 MAILBOX_t *pmbox; 2611 LPFC_MBOXQ_t *pmb; 2612 int rc; 2613 LIST_HEAD(cmplq); 2614 2615 phba->sli.slistat.mbox_event++; 2616 2617 /* Get all completed mailboxe buffers into the cmplq */ 2618 spin_lock_irq(&phba->hbalock); 2619 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2620 spin_unlock_irq(&phba->hbalock); 2621 2622 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2623 do { 2624 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2625 if (pmb == NULL) 2626 break; 2627 2628 pmbox = &pmb->u.mb; 2629 2630 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2631 if (pmb->vport) { 2632 lpfc_debugfs_disc_trc(pmb->vport, 2633 LPFC_DISC_TRC_MBOX_VPORT, 2634 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2635 (uint32_t)pmbox->mbxCommand, 2636 pmbox->un.varWords[0], 2637 pmbox->un.varWords[1]); 2638 } 2639 else { 2640 lpfc_debugfs_disc_trc(phba->pport, 2641 LPFC_DISC_TRC_MBOX, 2642 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2643 (uint32_t)pmbox->mbxCommand, 2644 pmbox->un.varWords[0], 2645 pmbox->un.varWords[1]); 2646 } 2647 } 2648 2649 /* 2650 * It is a fatal error if unknown mbox command completion. 2651 */ 2652 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2653 MBX_SHUTDOWN) { 2654 /* Unknown mailbox command compl */ 2655 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2656 "(%d):0323 Unknown Mailbox command " 2657 "x%x (x%x/x%x) Cmpl\n", 2658 pmb->vport ? pmb->vport->vpi : 0, 2659 pmbox->mbxCommand, 2660 lpfc_sli_config_mbox_subsys_get(phba, 2661 pmb), 2662 lpfc_sli_config_mbox_opcode_get(phba, 2663 pmb)); 2664 phba->link_state = LPFC_HBA_ERROR; 2665 phba->work_hs = HS_FFER3; 2666 lpfc_handle_eratt(phba); 2667 continue; 2668 } 2669 2670 if (pmbox->mbxStatus) { 2671 phba->sli.slistat.mbox_stat_err++; 2672 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2673 /* Mbox cmd cmpl error - RETRYing */ 2674 lpfc_printf_log(phba, KERN_INFO, 2675 LOG_MBOX | LOG_SLI, 2676 "(%d):0305 Mbox cmd cmpl " 2677 "error - RETRYing Data: x%x " 2678 "(x%x/x%x) x%x x%x x%x\n", 2679 pmb->vport ? pmb->vport->vpi : 0, 2680 pmbox->mbxCommand, 2681 lpfc_sli_config_mbox_subsys_get(phba, 2682 pmb), 2683 lpfc_sli_config_mbox_opcode_get(phba, 2684 pmb), 2685 pmbox->mbxStatus, 2686 pmbox->un.varWords[0], 2687 pmb->vport->port_state); 2688 pmbox->mbxStatus = 0; 2689 pmbox->mbxOwner = OWN_HOST; 2690 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2691 if (rc != MBX_NOT_FINISHED) 2692 continue; 2693 } 2694 } 2695 2696 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2697 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2698 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2699 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2700 "x%x x%x x%x\n", 2701 pmb->vport ? pmb->vport->vpi : 0, 2702 pmbox->mbxCommand, 2703 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2704 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2705 pmb->mbox_cmpl, 2706 *((uint32_t *) pmbox), 2707 pmbox->un.varWords[0], 2708 pmbox->un.varWords[1], 2709 pmbox->un.varWords[2], 2710 pmbox->un.varWords[3], 2711 pmbox->un.varWords[4], 2712 pmbox->un.varWords[5], 2713 pmbox->un.varWords[6], 2714 pmbox->un.varWords[7], 2715 pmbox->un.varWords[8], 2716 pmbox->un.varWords[9], 2717 pmbox->un.varWords[10]); 2718 2719 if (pmb->mbox_cmpl) 2720 pmb->mbox_cmpl(phba,pmb); 2721 } while (1); 2722 return 0; 2723 } 2724 2725 /** 2726 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2727 * @phba: Pointer to HBA context object. 2728 * @pring: Pointer to driver SLI ring object. 2729 * @tag: buffer tag. 2730 * 2731 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2732 * is set in the tag the buffer is posted for a particular exchange, 2733 * the function will return the buffer without replacing the buffer. 2734 * If the buffer is for unsolicited ELS or CT traffic, this function 2735 * returns the buffer and also posts another buffer to the firmware. 2736 **/ 2737 static struct lpfc_dmabuf * 2738 lpfc_sli_get_buff(struct lpfc_hba *phba, 2739 struct lpfc_sli_ring *pring, 2740 uint32_t tag) 2741 { 2742 struct hbq_dmabuf *hbq_entry; 2743 2744 if (tag & QUE_BUFTAG_BIT) 2745 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2746 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2747 if (!hbq_entry) 2748 return NULL; 2749 return &hbq_entry->dbuf; 2750 } 2751 2752 /** 2753 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2754 * @phba: Pointer to HBA context object. 2755 * @pring: Pointer to driver SLI ring object. 2756 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2757 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2758 * @fch_type: the type for the first frame of the sequence. 2759 * 2760 * This function is called with no lock held. This function uses the r_ctl and 2761 * type of the received sequence to find the correct callback function to call 2762 * to process the sequence. 2763 **/ 2764 static int 2765 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2766 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2767 uint32_t fch_type) 2768 { 2769 int i; 2770 2771 switch (fch_type) { 2772 case FC_TYPE_NVME: 2773 lpfc_nvmet_unsol_ls_event(phba, pring, saveq); 2774 return 1; 2775 default: 2776 break; 2777 } 2778 2779 /* unSolicited Responses */ 2780 if (pring->prt[0].profile) { 2781 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2782 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2783 saveq); 2784 return 1; 2785 } 2786 /* We must search, based on rctl / type 2787 for the right routine */ 2788 for (i = 0; i < pring->num_mask; i++) { 2789 if ((pring->prt[i].rctl == fch_r_ctl) && 2790 (pring->prt[i].type == fch_type)) { 2791 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2792 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2793 (phba, pring, saveq); 2794 return 1; 2795 } 2796 } 2797 return 0; 2798 } 2799 2800 /** 2801 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2802 * @phba: Pointer to HBA context object. 2803 * @pring: Pointer to driver SLI ring object. 2804 * @saveq: Pointer to the unsolicited iocb. 2805 * 2806 * This function is called with no lock held by the ring event handler 2807 * when there is an unsolicited iocb posted to the response ring by the 2808 * firmware. This function gets the buffer associated with the iocbs 2809 * and calls the event handler for the ring. This function handles both 2810 * qring buffers and hbq buffers. 2811 * When the function returns 1 the caller can free the iocb object otherwise 2812 * upper layer functions will free the iocb objects. 2813 **/ 2814 static int 2815 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2816 struct lpfc_iocbq *saveq) 2817 { 2818 IOCB_t * irsp; 2819 WORD5 * w5p; 2820 uint32_t Rctl, Type; 2821 struct lpfc_iocbq *iocbq; 2822 struct lpfc_dmabuf *dmzbuf; 2823 2824 irsp = &(saveq->iocb); 2825 2826 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2827 if (pring->lpfc_sli_rcv_async_status) 2828 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2829 else 2830 lpfc_printf_log(phba, 2831 KERN_WARNING, 2832 LOG_SLI, 2833 "0316 Ring %d handler: unexpected " 2834 "ASYNC_STATUS iocb received evt_code " 2835 "0x%x\n", 2836 pring->ringno, 2837 irsp->un.asyncstat.evt_code); 2838 return 1; 2839 } 2840 2841 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2842 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2843 if (irsp->ulpBdeCount > 0) { 2844 dmzbuf = lpfc_sli_get_buff(phba, pring, 2845 irsp->un.ulpWord[3]); 2846 lpfc_in_buf_free(phba, dmzbuf); 2847 } 2848 2849 if (irsp->ulpBdeCount > 1) { 2850 dmzbuf = lpfc_sli_get_buff(phba, pring, 2851 irsp->unsli3.sli3Words[3]); 2852 lpfc_in_buf_free(phba, dmzbuf); 2853 } 2854 2855 if (irsp->ulpBdeCount > 2) { 2856 dmzbuf = lpfc_sli_get_buff(phba, pring, 2857 irsp->unsli3.sli3Words[7]); 2858 lpfc_in_buf_free(phba, dmzbuf); 2859 } 2860 2861 return 1; 2862 } 2863 2864 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2865 if (irsp->ulpBdeCount != 0) { 2866 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2867 irsp->un.ulpWord[3]); 2868 if (!saveq->context2) 2869 lpfc_printf_log(phba, 2870 KERN_ERR, 2871 LOG_SLI, 2872 "0341 Ring %d Cannot find buffer for " 2873 "an unsolicited iocb. tag 0x%x\n", 2874 pring->ringno, 2875 irsp->un.ulpWord[3]); 2876 } 2877 if (irsp->ulpBdeCount == 2) { 2878 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2879 irsp->unsli3.sli3Words[7]); 2880 if (!saveq->context3) 2881 lpfc_printf_log(phba, 2882 KERN_ERR, 2883 LOG_SLI, 2884 "0342 Ring %d Cannot find buffer for an" 2885 " unsolicited iocb. tag 0x%x\n", 2886 pring->ringno, 2887 irsp->unsli3.sli3Words[7]); 2888 } 2889 list_for_each_entry(iocbq, &saveq->list, list) { 2890 irsp = &(iocbq->iocb); 2891 if (irsp->ulpBdeCount != 0) { 2892 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2893 irsp->un.ulpWord[3]); 2894 if (!iocbq->context2) 2895 lpfc_printf_log(phba, 2896 KERN_ERR, 2897 LOG_SLI, 2898 "0343 Ring %d Cannot find " 2899 "buffer for an unsolicited iocb" 2900 ". tag 0x%x\n", pring->ringno, 2901 irsp->un.ulpWord[3]); 2902 } 2903 if (irsp->ulpBdeCount == 2) { 2904 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2905 irsp->unsli3.sli3Words[7]); 2906 if (!iocbq->context3) 2907 lpfc_printf_log(phba, 2908 KERN_ERR, 2909 LOG_SLI, 2910 "0344 Ring %d Cannot find " 2911 "buffer for an unsolicited " 2912 "iocb. tag 0x%x\n", 2913 pring->ringno, 2914 irsp->unsli3.sli3Words[7]); 2915 } 2916 } 2917 } 2918 if (irsp->ulpBdeCount != 0 && 2919 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2920 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2921 int found = 0; 2922 2923 /* search continue save q for same XRI */ 2924 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2925 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2926 saveq->iocb.unsli3.rcvsli3.ox_id) { 2927 list_add_tail(&saveq->list, &iocbq->list); 2928 found = 1; 2929 break; 2930 } 2931 } 2932 if (!found) 2933 list_add_tail(&saveq->clist, 2934 &pring->iocb_continue_saveq); 2935 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2936 list_del_init(&iocbq->clist); 2937 saveq = iocbq; 2938 irsp = &(saveq->iocb); 2939 } else 2940 return 0; 2941 } 2942 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2943 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2944 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2945 Rctl = FC_RCTL_ELS_REQ; 2946 Type = FC_TYPE_ELS; 2947 } else { 2948 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2949 Rctl = w5p->hcsw.Rctl; 2950 Type = w5p->hcsw.Type; 2951 2952 /* Firmware Workaround */ 2953 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2954 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2955 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2956 Rctl = FC_RCTL_ELS_REQ; 2957 Type = FC_TYPE_ELS; 2958 w5p->hcsw.Rctl = Rctl; 2959 w5p->hcsw.Type = Type; 2960 } 2961 } 2962 2963 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2964 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2965 "0313 Ring %d handler: unexpected Rctl x%x " 2966 "Type x%x received\n", 2967 pring->ringno, Rctl, Type); 2968 2969 return 1; 2970 } 2971 2972 /** 2973 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2974 * @phba: Pointer to HBA context object. 2975 * @pring: Pointer to driver SLI ring object. 2976 * @prspiocb: Pointer to response iocb object. 2977 * 2978 * This function looks up the iocb_lookup table to get the command iocb 2979 * corresponding to the given response iocb using the iotag of the 2980 * response iocb. The driver calls this function with the hbalock held 2981 * for SLI3 ports or the ring lock held for SLI4 ports. 2982 * This function returns the command iocb object if it finds the command 2983 * iocb else returns NULL. 2984 **/ 2985 static struct lpfc_iocbq * 2986 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2987 struct lpfc_sli_ring *pring, 2988 struct lpfc_iocbq *prspiocb) 2989 { 2990 struct lpfc_iocbq *cmd_iocb = NULL; 2991 uint16_t iotag; 2992 spinlock_t *temp_lock = NULL; 2993 unsigned long iflag = 0; 2994 2995 if (phba->sli_rev == LPFC_SLI_REV4) 2996 temp_lock = &pring->ring_lock; 2997 else 2998 temp_lock = &phba->hbalock; 2999 3000 spin_lock_irqsave(temp_lock, iflag); 3001 iotag = prspiocb->iocb.ulpIoTag; 3002 3003 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3004 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3005 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 3006 /* remove from txcmpl queue list */ 3007 list_del_init(&cmd_iocb->list); 3008 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 3009 pring->txcmplq_cnt--; 3010 spin_unlock_irqrestore(temp_lock, iflag); 3011 return cmd_iocb; 3012 } 3013 } 3014 3015 spin_unlock_irqrestore(temp_lock, iflag); 3016 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3017 "0317 iotag x%x is out of " 3018 "range: max iotag x%x wd0 x%x\n", 3019 iotag, phba->sli.last_iotag, 3020 *(((uint32_t *) &prspiocb->iocb) + 7)); 3021 return NULL; 3022 } 3023 3024 /** 3025 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 3026 * @phba: Pointer to HBA context object. 3027 * @pring: Pointer to driver SLI ring object. 3028 * @iotag: IOCB tag. 3029 * 3030 * This function looks up the iocb_lookup table to get the command iocb 3031 * corresponding to the given iotag. The driver calls this function with 3032 * the ring lock held because this function is an SLI4 port only helper. 3033 * This function returns the command iocb object if it finds the command 3034 * iocb else returns NULL. 3035 **/ 3036 static struct lpfc_iocbq * 3037 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 3038 struct lpfc_sli_ring *pring, uint16_t iotag) 3039 { 3040 struct lpfc_iocbq *cmd_iocb = NULL; 3041 spinlock_t *temp_lock = NULL; 3042 unsigned long iflag = 0; 3043 3044 if (phba->sli_rev == LPFC_SLI_REV4) 3045 temp_lock = &pring->ring_lock; 3046 else 3047 temp_lock = &phba->hbalock; 3048 3049 spin_lock_irqsave(temp_lock, iflag); 3050 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3051 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3052 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 3053 /* remove from txcmpl queue list */ 3054 list_del_init(&cmd_iocb->list); 3055 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 3056 pring->txcmplq_cnt--; 3057 spin_unlock_irqrestore(temp_lock, iflag); 3058 return cmd_iocb; 3059 } 3060 } 3061 3062 spin_unlock_irqrestore(temp_lock, iflag); 3063 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3064 "0372 iotag x%x lookup error: max iotag (x%x) " 3065 "iocb_flag x%x\n", 3066 iotag, phba->sli.last_iotag, 3067 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff); 3068 return NULL; 3069 } 3070 3071 /** 3072 * lpfc_sli_process_sol_iocb - process solicited iocb completion 3073 * @phba: Pointer to HBA context object. 3074 * @pring: Pointer to driver SLI ring object. 3075 * @saveq: Pointer to the response iocb to be processed. 3076 * 3077 * This function is called by the ring event handler for non-fcp 3078 * rings when there is a new response iocb in the response ring. 3079 * The caller is not required to hold any locks. This function 3080 * gets the command iocb associated with the response iocb and 3081 * calls the completion handler for the command iocb. If there 3082 * is no completion handler, the function will free the resources 3083 * associated with command iocb. If the response iocb is for 3084 * an already aborted command iocb, the status of the completion 3085 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 3086 * This function always returns 1. 3087 **/ 3088 static int 3089 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3090 struct lpfc_iocbq *saveq) 3091 { 3092 struct lpfc_iocbq *cmdiocbp; 3093 int rc = 1; 3094 unsigned long iflag; 3095 3096 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 3097 if (cmdiocbp) { 3098 if (cmdiocbp->iocb_cmpl) { 3099 /* 3100 * If an ELS command failed send an event to mgmt 3101 * application. 3102 */ 3103 if (saveq->iocb.ulpStatus && 3104 (pring->ringno == LPFC_ELS_RING) && 3105 (cmdiocbp->iocb.ulpCommand == 3106 CMD_ELS_REQUEST64_CR)) 3107 lpfc_send_els_failure_event(phba, 3108 cmdiocbp, saveq); 3109 3110 /* 3111 * Post all ELS completions to the worker thread. 3112 * All other are passed to the completion callback. 3113 */ 3114 if (pring->ringno == LPFC_ELS_RING) { 3115 if ((phba->sli_rev < LPFC_SLI_REV4) && 3116 (cmdiocbp->iocb_flag & 3117 LPFC_DRIVER_ABORTED)) { 3118 spin_lock_irqsave(&phba->hbalock, 3119 iflag); 3120 cmdiocbp->iocb_flag &= 3121 ~LPFC_DRIVER_ABORTED; 3122 spin_unlock_irqrestore(&phba->hbalock, 3123 iflag); 3124 saveq->iocb.ulpStatus = 3125 IOSTAT_LOCAL_REJECT; 3126 saveq->iocb.un.ulpWord[4] = 3127 IOERR_SLI_ABORTED; 3128 3129 /* Firmware could still be in progress 3130 * of DMAing payload, so don't free data 3131 * buffer till after a hbeat. 3132 */ 3133 spin_lock_irqsave(&phba->hbalock, 3134 iflag); 3135 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 3136 spin_unlock_irqrestore(&phba->hbalock, 3137 iflag); 3138 } 3139 if (phba->sli_rev == LPFC_SLI_REV4) { 3140 if (saveq->iocb_flag & 3141 LPFC_EXCHANGE_BUSY) { 3142 /* Set cmdiocb flag for the 3143 * exchange busy so sgl (xri) 3144 * will not be released until 3145 * the abort xri is received 3146 * from hba. 3147 */ 3148 spin_lock_irqsave( 3149 &phba->hbalock, iflag); 3150 cmdiocbp->iocb_flag |= 3151 LPFC_EXCHANGE_BUSY; 3152 spin_unlock_irqrestore( 3153 &phba->hbalock, iflag); 3154 } 3155 if (cmdiocbp->iocb_flag & 3156 LPFC_DRIVER_ABORTED) { 3157 /* 3158 * Clear LPFC_DRIVER_ABORTED 3159 * bit in case it was driver 3160 * initiated abort. 3161 */ 3162 spin_lock_irqsave( 3163 &phba->hbalock, iflag); 3164 cmdiocbp->iocb_flag &= 3165 ~LPFC_DRIVER_ABORTED; 3166 spin_unlock_irqrestore( 3167 &phba->hbalock, iflag); 3168 cmdiocbp->iocb.ulpStatus = 3169 IOSTAT_LOCAL_REJECT; 3170 cmdiocbp->iocb.un.ulpWord[4] = 3171 IOERR_ABORT_REQUESTED; 3172 /* 3173 * For SLI4, irsiocb contains 3174 * NO_XRI in sli_xritag, it 3175 * shall not affect releasing 3176 * sgl (xri) process. 3177 */ 3178 saveq->iocb.ulpStatus = 3179 IOSTAT_LOCAL_REJECT; 3180 saveq->iocb.un.ulpWord[4] = 3181 IOERR_SLI_ABORTED; 3182 spin_lock_irqsave( 3183 &phba->hbalock, iflag); 3184 saveq->iocb_flag |= 3185 LPFC_DELAY_MEM_FREE; 3186 spin_unlock_irqrestore( 3187 &phba->hbalock, iflag); 3188 } 3189 } 3190 } 3191 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 3192 } else 3193 lpfc_sli_release_iocbq(phba, cmdiocbp); 3194 } else { 3195 /* 3196 * Unknown initiating command based on the response iotag. 3197 * This could be the case on the ELS ring because of 3198 * lpfc_els_abort(). 3199 */ 3200 if (pring->ringno != LPFC_ELS_RING) { 3201 /* 3202 * Ring <ringno> handler: unexpected completion IoTag 3203 * <IoTag> 3204 */ 3205 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3206 "0322 Ring %d handler: " 3207 "unexpected completion IoTag x%x " 3208 "Data: x%x x%x x%x x%x\n", 3209 pring->ringno, 3210 saveq->iocb.ulpIoTag, 3211 saveq->iocb.ulpStatus, 3212 saveq->iocb.un.ulpWord[4], 3213 saveq->iocb.ulpCommand, 3214 saveq->iocb.ulpContext); 3215 } 3216 } 3217 3218 return rc; 3219 } 3220 3221 /** 3222 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 3223 * @phba: Pointer to HBA context object. 3224 * @pring: Pointer to driver SLI ring object. 3225 * 3226 * This function is called from the iocb ring event handlers when 3227 * put pointer is ahead of the get pointer for a ring. This function signal 3228 * an error attention condition to the worker thread and the worker 3229 * thread will transition the HBA to offline state. 3230 **/ 3231 static void 3232 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3233 { 3234 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3235 /* 3236 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3237 * rsp ring <portRspMax> 3238 */ 3239 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3240 "0312 Ring %d handler: portRspPut %d " 3241 "is bigger than rsp ring %d\n", 3242 pring->ringno, le32_to_cpu(pgp->rspPutInx), 3243 pring->sli.sli3.numRiocb); 3244 3245 phba->link_state = LPFC_HBA_ERROR; 3246 3247 /* 3248 * All error attention handlers are posted to 3249 * worker thread 3250 */ 3251 phba->work_ha |= HA_ERATT; 3252 phba->work_hs = HS_FFER3; 3253 3254 lpfc_worker_wake_up(phba); 3255 3256 return; 3257 } 3258 3259 /** 3260 * lpfc_poll_eratt - Error attention polling timer timeout handler 3261 * @ptr: Pointer to address of HBA context object. 3262 * 3263 * This function is invoked by the Error Attention polling timer when the 3264 * timer times out. It will check the SLI Error Attention register for 3265 * possible attention events. If so, it will post an Error Attention event 3266 * and wake up worker thread to process it. Otherwise, it will set up the 3267 * Error Attention polling timer for the next poll. 3268 **/ 3269 void lpfc_poll_eratt(struct timer_list *t) 3270 { 3271 struct lpfc_hba *phba; 3272 uint32_t eratt = 0; 3273 uint64_t sli_intr, cnt; 3274 3275 phba = from_timer(phba, t, eratt_poll); 3276 3277 /* Here we will also keep track of interrupts per sec of the hba */ 3278 sli_intr = phba->sli.slistat.sli_intr; 3279 3280 if (phba->sli.slistat.sli_prev_intr > sli_intr) 3281 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 3282 sli_intr); 3283 else 3284 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 3285 3286 /* 64-bit integer division not supported on 32-bit x86 - use do_div */ 3287 do_div(cnt, phba->eratt_poll_interval); 3288 phba->sli.slistat.sli_ips = cnt; 3289 3290 phba->sli.slistat.sli_prev_intr = sli_intr; 3291 3292 /* Check chip HA register for error event */ 3293 eratt = lpfc_sli_check_eratt(phba); 3294 3295 if (eratt) 3296 /* Tell the worker thread there is work to do */ 3297 lpfc_worker_wake_up(phba); 3298 else 3299 /* Restart the timer for next eratt poll */ 3300 mod_timer(&phba->eratt_poll, 3301 jiffies + 3302 msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 3303 return; 3304 } 3305 3306 3307 /** 3308 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 3309 * @phba: Pointer to HBA context object. 3310 * @pring: Pointer to driver SLI ring object. 3311 * @mask: Host attention register mask for this ring. 3312 * 3313 * This function is called from the interrupt context when there is a ring 3314 * event for the fcp ring. The caller does not hold any lock. 3315 * The function processes each response iocb in the response ring until it 3316 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 3317 * LE bit set. The function will call the completion handler of the command iocb 3318 * if the response iocb indicates a completion for a command iocb or it is 3319 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 3320 * function if this is an unsolicited iocb. 3321 * This routine presumes LPFC_FCP_RING handling and doesn't bother 3322 * to check it explicitly. 3323 */ 3324 int 3325 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 3326 struct lpfc_sli_ring *pring, uint32_t mask) 3327 { 3328 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3329 IOCB_t *irsp = NULL; 3330 IOCB_t *entry = NULL; 3331 struct lpfc_iocbq *cmdiocbq = NULL; 3332 struct lpfc_iocbq rspiocbq; 3333 uint32_t status; 3334 uint32_t portRspPut, portRspMax; 3335 int rc = 1; 3336 lpfc_iocb_type type; 3337 unsigned long iflag; 3338 uint32_t rsp_cmpl = 0; 3339 3340 spin_lock_irqsave(&phba->hbalock, iflag); 3341 pring->stats.iocb_event++; 3342 3343 /* 3344 * The next available response entry should never exceed the maximum 3345 * entries. If it does, treat it as an adapter hardware error. 3346 */ 3347 portRspMax = pring->sli.sli3.numRiocb; 3348 portRspPut = le32_to_cpu(pgp->rspPutInx); 3349 if (unlikely(portRspPut >= portRspMax)) { 3350 lpfc_sli_rsp_pointers_error(phba, pring); 3351 spin_unlock_irqrestore(&phba->hbalock, iflag); 3352 return 1; 3353 } 3354 if (phba->fcp_ring_in_use) { 3355 spin_unlock_irqrestore(&phba->hbalock, iflag); 3356 return 1; 3357 } else 3358 phba->fcp_ring_in_use = 1; 3359 3360 rmb(); 3361 while (pring->sli.sli3.rspidx != portRspPut) { 3362 /* 3363 * Fetch an entry off the ring and copy it into a local data 3364 * structure. The copy involves a byte-swap since the 3365 * network byte order and pci byte orders are different. 3366 */ 3367 entry = lpfc_resp_iocb(phba, pring); 3368 phba->last_completion_time = jiffies; 3369 3370 if (++pring->sli.sli3.rspidx >= portRspMax) 3371 pring->sli.sli3.rspidx = 0; 3372 3373 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 3374 (uint32_t *) &rspiocbq.iocb, 3375 phba->iocb_rsp_size); 3376 INIT_LIST_HEAD(&(rspiocbq.list)); 3377 irsp = &rspiocbq.iocb; 3378 3379 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 3380 pring->stats.iocb_rsp++; 3381 rsp_cmpl++; 3382 3383 if (unlikely(irsp->ulpStatus)) { 3384 /* 3385 * If resource errors reported from HBA, reduce 3386 * queuedepths of the SCSI device. 3387 */ 3388 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3389 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3390 IOERR_NO_RESOURCES)) { 3391 spin_unlock_irqrestore(&phba->hbalock, iflag); 3392 phba->lpfc_rampdown_queue_depth(phba); 3393 spin_lock_irqsave(&phba->hbalock, iflag); 3394 } 3395 3396 /* Rsp ring <ringno> error: IOCB */ 3397 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3398 "0336 Rsp Ring %d error: IOCB Data: " 3399 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 3400 pring->ringno, 3401 irsp->un.ulpWord[0], 3402 irsp->un.ulpWord[1], 3403 irsp->un.ulpWord[2], 3404 irsp->un.ulpWord[3], 3405 irsp->un.ulpWord[4], 3406 irsp->un.ulpWord[5], 3407 *(uint32_t *)&irsp->un1, 3408 *((uint32_t *)&irsp->un1 + 1)); 3409 } 3410 3411 switch (type) { 3412 case LPFC_ABORT_IOCB: 3413 case LPFC_SOL_IOCB: 3414 /* 3415 * Idle exchange closed via ABTS from port. No iocb 3416 * resources need to be recovered. 3417 */ 3418 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 3419 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3420 "0333 IOCB cmd 0x%x" 3421 " processed. Skipping" 3422 " completion\n", 3423 irsp->ulpCommand); 3424 break; 3425 } 3426 3427 spin_unlock_irqrestore(&phba->hbalock, iflag); 3428 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 3429 &rspiocbq); 3430 spin_lock_irqsave(&phba->hbalock, iflag); 3431 if (unlikely(!cmdiocbq)) 3432 break; 3433 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 3434 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3435 if (cmdiocbq->iocb_cmpl) { 3436 spin_unlock_irqrestore(&phba->hbalock, iflag); 3437 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 3438 &rspiocbq); 3439 spin_lock_irqsave(&phba->hbalock, iflag); 3440 } 3441 break; 3442 case LPFC_UNSOL_IOCB: 3443 spin_unlock_irqrestore(&phba->hbalock, iflag); 3444 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 3445 spin_lock_irqsave(&phba->hbalock, iflag); 3446 break; 3447 default: 3448 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3449 char adaptermsg[LPFC_MAX_ADPTMSG]; 3450 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3451 memcpy(&adaptermsg[0], (uint8_t *) irsp, 3452 MAX_MSG_DATA); 3453 dev_warn(&((phba->pcidev)->dev), 3454 "lpfc%d: %s\n", 3455 phba->brd_no, adaptermsg); 3456 } else { 3457 /* Unknown IOCB command */ 3458 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3459 "0334 Unknown IOCB command " 3460 "Data: x%x, x%x x%x x%x x%x\n", 3461 type, irsp->ulpCommand, 3462 irsp->ulpStatus, 3463 irsp->ulpIoTag, 3464 irsp->ulpContext); 3465 } 3466 break; 3467 } 3468 3469 /* 3470 * The response IOCB has been processed. Update the ring 3471 * pointer in SLIM. If the port response put pointer has not 3472 * been updated, sync the pgp->rspPutInx and fetch the new port 3473 * response put pointer. 3474 */ 3475 writel(pring->sli.sli3.rspidx, 3476 &phba->host_gp[pring->ringno].rspGetInx); 3477 3478 if (pring->sli.sli3.rspidx == portRspPut) 3479 portRspPut = le32_to_cpu(pgp->rspPutInx); 3480 } 3481 3482 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3483 pring->stats.iocb_rsp_full++; 3484 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3485 writel(status, phba->CAregaddr); 3486 readl(phba->CAregaddr); 3487 } 3488 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3489 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3490 pring->stats.iocb_cmd_empty++; 3491 3492 /* Force update of the local copy of cmdGetInx */ 3493 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3494 lpfc_sli_resume_iocb(phba, pring); 3495 3496 if ((pring->lpfc_sli_cmd_available)) 3497 (pring->lpfc_sli_cmd_available) (phba, pring); 3498 3499 } 3500 3501 phba->fcp_ring_in_use = 0; 3502 spin_unlock_irqrestore(&phba->hbalock, iflag); 3503 return rc; 3504 } 3505 3506 /** 3507 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3508 * @phba: Pointer to HBA context object. 3509 * @pring: Pointer to driver SLI ring object. 3510 * @rspiocbp: Pointer to driver response IOCB object. 3511 * 3512 * This function is called from the worker thread when there is a slow-path 3513 * response IOCB to process. This function chains all the response iocbs until 3514 * seeing the iocb with the LE bit set. The function will call 3515 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3516 * completion of a command iocb. The function will call the 3517 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3518 * The function frees the resources or calls the completion handler if this 3519 * iocb is an abort completion. The function returns NULL when the response 3520 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3521 * this function shall chain the iocb on to the iocb_continueq and return the 3522 * response iocb passed in. 3523 **/ 3524 static struct lpfc_iocbq * 3525 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3526 struct lpfc_iocbq *rspiocbp) 3527 { 3528 struct lpfc_iocbq *saveq; 3529 struct lpfc_iocbq *cmdiocbp; 3530 struct lpfc_iocbq *next_iocb; 3531 IOCB_t *irsp = NULL; 3532 uint32_t free_saveq; 3533 uint8_t iocb_cmd_type; 3534 lpfc_iocb_type type; 3535 unsigned long iflag; 3536 int rc; 3537 3538 spin_lock_irqsave(&phba->hbalock, iflag); 3539 /* First add the response iocb to the countinueq list */ 3540 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3541 pring->iocb_continueq_cnt++; 3542 3543 /* Now, determine whether the list is completed for processing */ 3544 irsp = &rspiocbp->iocb; 3545 if (irsp->ulpLe) { 3546 /* 3547 * By default, the driver expects to free all resources 3548 * associated with this iocb completion. 3549 */ 3550 free_saveq = 1; 3551 saveq = list_get_first(&pring->iocb_continueq, 3552 struct lpfc_iocbq, list); 3553 irsp = &(saveq->iocb); 3554 list_del_init(&pring->iocb_continueq); 3555 pring->iocb_continueq_cnt = 0; 3556 3557 pring->stats.iocb_rsp++; 3558 3559 /* 3560 * If resource errors reported from HBA, reduce 3561 * queuedepths of the SCSI device. 3562 */ 3563 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3564 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3565 IOERR_NO_RESOURCES)) { 3566 spin_unlock_irqrestore(&phba->hbalock, iflag); 3567 phba->lpfc_rampdown_queue_depth(phba); 3568 spin_lock_irqsave(&phba->hbalock, iflag); 3569 } 3570 3571 if (irsp->ulpStatus) { 3572 /* Rsp ring <ringno> error: IOCB */ 3573 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3574 "0328 Rsp Ring %d error: " 3575 "IOCB Data: " 3576 "x%x x%x x%x x%x " 3577 "x%x x%x x%x x%x " 3578 "x%x x%x x%x x%x " 3579 "x%x x%x x%x x%x\n", 3580 pring->ringno, 3581 irsp->un.ulpWord[0], 3582 irsp->un.ulpWord[1], 3583 irsp->un.ulpWord[2], 3584 irsp->un.ulpWord[3], 3585 irsp->un.ulpWord[4], 3586 irsp->un.ulpWord[5], 3587 *(((uint32_t *) irsp) + 6), 3588 *(((uint32_t *) irsp) + 7), 3589 *(((uint32_t *) irsp) + 8), 3590 *(((uint32_t *) irsp) + 9), 3591 *(((uint32_t *) irsp) + 10), 3592 *(((uint32_t *) irsp) + 11), 3593 *(((uint32_t *) irsp) + 12), 3594 *(((uint32_t *) irsp) + 13), 3595 *(((uint32_t *) irsp) + 14), 3596 *(((uint32_t *) irsp) + 15)); 3597 } 3598 3599 /* 3600 * Fetch the IOCB command type and call the correct completion 3601 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3602 * get freed back to the lpfc_iocb_list by the discovery 3603 * kernel thread. 3604 */ 3605 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3606 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3607 switch (type) { 3608 case LPFC_SOL_IOCB: 3609 spin_unlock_irqrestore(&phba->hbalock, iflag); 3610 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3611 spin_lock_irqsave(&phba->hbalock, iflag); 3612 break; 3613 3614 case LPFC_UNSOL_IOCB: 3615 spin_unlock_irqrestore(&phba->hbalock, iflag); 3616 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3617 spin_lock_irqsave(&phba->hbalock, iflag); 3618 if (!rc) 3619 free_saveq = 0; 3620 break; 3621 3622 case LPFC_ABORT_IOCB: 3623 cmdiocbp = NULL; 3624 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) { 3625 spin_unlock_irqrestore(&phba->hbalock, iflag); 3626 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3627 saveq); 3628 spin_lock_irqsave(&phba->hbalock, iflag); 3629 } 3630 if (cmdiocbp) { 3631 /* Call the specified completion routine */ 3632 if (cmdiocbp->iocb_cmpl) { 3633 spin_unlock_irqrestore(&phba->hbalock, 3634 iflag); 3635 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3636 saveq); 3637 spin_lock_irqsave(&phba->hbalock, 3638 iflag); 3639 } else 3640 __lpfc_sli_release_iocbq(phba, 3641 cmdiocbp); 3642 } 3643 break; 3644 3645 case LPFC_UNKNOWN_IOCB: 3646 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3647 char adaptermsg[LPFC_MAX_ADPTMSG]; 3648 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3649 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3650 MAX_MSG_DATA); 3651 dev_warn(&((phba->pcidev)->dev), 3652 "lpfc%d: %s\n", 3653 phba->brd_no, adaptermsg); 3654 } else { 3655 /* Unknown IOCB command */ 3656 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3657 "0335 Unknown IOCB " 3658 "command Data: x%x " 3659 "x%x x%x x%x\n", 3660 irsp->ulpCommand, 3661 irsp->ulpStatus, 3662 irsp->ulpIoTag, 3663 irsp->ulpContext); 3664 } 3665 break; 3666 } 3667 3668 if (free_saveq) { 3669 list_for_each_entry_safe(rspiocbp, next_iocb, 3670 &saveq->list, list) { 3671 list_del_init(&rspiocbp->list); 3672 __lpfc_sli_release_iocbq(phba, rspiocbp); 3673 } 3674 __lpfc_sli_release_iocbq(phba, saveq); 3675 } 3676 rspiocbp = NULL; 3677 } 3678 spin_unlock_irqrestore(&phba->hbalock, iflag); 3679 return rspiocbp; 3680 } 3681 3682 /** 3683 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3684 * @phba: Pointer to HBA context object. 3685 * @pring: Pointer to driver SLI ring object. 3686 * @mask: Host attention register mask for this ring. 3687 * 3688 * This routine wraps the actual slow_ring event process routine from the 3689 * API jump table function pointer from the lpfc_hba struct. 3690 **/ 3691 void 3692 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3693 struct lpfc_sli_ring *pring, uint32_t mask) 3694 { 3695 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3696 } 3697 3698 /** 3699 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3700 * @phba: Pointer to HBA context object. 3701 * @pring: Pointer to driver SLI ring object. 3702 * @mask: Host attention register mask for this ring. 3703 * 3704 * This function is called from the worker thread when there is a ring event 3705 * for non-fcp rings. The caller does not hold any lock. The function will 3706 * remove each response iocb in the response ring and calls the handle 3707 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3708 **/ 3709 static void 3710 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3711 struct lpfc_sli_ring *pring, uint32_t mask) 3712 { 3713 struct lpfc_pgp *pgp; 3714 IOCB_t *entry; 3715 IOCB_t *irsp = NULL; 3716 struct lpfc_iocbq *rspiocbp = NULL; 3717 uint32_t portRspPut, portRspMax; 3718 unsigned long iflag; 3719 uint32_t status; 3720 3721 pgp = &phba->port_gp[pring->ringno]; 3722 spin_lock_irqsave(&phba->hbalock, iflag); 3723 pring->stats.iocb_event++; 3724 3725 /* 3726 * The next available response entry should never exceed the maximum 3727 * entries. If it does, treat it as an adapter hardware error. 3728 */ 3729 portRspMax = pring->sli.sli3.numRiocb; 3730 portRspPut = le32_to_cpu(pgp->rspPutInx); 3731 if (portRspPut >= portRspMax) { 3732 /* 3733 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3734 * rsp ring <portRspMax> 3735 */ 3736 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3737 "0303 Ring %d handler: portRspPut %d " 3738 "is bigger than rsp ring %d\n", 3739 pring->ringno, portRspPut, portRspMax); 3740 3741 phba->link_state = LPFC_HBA_ERROR; 3742 spin_unlock_irqrestore(&phba->hbalock, iflag); 3743 3744 phba->work_hs = HS_FFER3; 3745 lpfc_handle_eratt(phba); 3746 3747 return; 3748 } 3749 3750 rmb(); 3751 while (pring->sli.sli3.rspidx != portRspPut) { 3752 /* 3753 * Build a completion list and call the appropriate handler. 3754 * The process is to get the next available response iocb, get 3755 * a free iocb from the list, copy the response data into the 3756 * free iocb, insert to the continuation list, and update the 3757 * next response index to slim. This process makes response 3758 * iocb's in the ring available to DMA as fast as possible but 3759 * pays a penalty for a copy operation. Since the iocb is 3760 * only 32 bytes, this penalty is considered small relative to 3761 * the PCI reads for register values and a slim write. When 3762 * the ulpLe field is set, the entire Command has been 3763 * received. 3764 */ 3765 entry = lpfc_resp_iocb(phba, pring); 3766 3767 phba->last_completion_time = jiffies; 3768 rspiocbp = __lpfc_sli_get_iocbq(phba); 3769 if (rspiocbp == NULL) { 3770 printk(KERN_ERR "%s: out of buffers! Failing " 3771 "completion.\n", __func__); 3772 break; 3773 } 3774 3775 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3776 phba->iocb_rsp_size); 3777 irsp = &rspiocbp->iocb; 3778 3779 if (++pring->sli.sli3.rspidx >= portRspMax) 3780 pring->sli.sli3.rspidx = 0; 3781 3782 if (pring->ringno == LPFC_ELS_RING) { 3783 lpfc_debugfs_slow_ring_trc(phba, 3784 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3785 *(((uint32_t *) irsp) + 4), 3786 *(((uint32_t *) irsp) + 6), 3787 *(((uint32_t *) irsp) + 7)); 3788 } 3789 3790 writel(pring->sli.sli3.rspidx, 3791 &phba->host_gp[pring->ringno].rspGetInx); 3792 3793 spin_unlock_irqrestore(&phba->hbalock, iflag); 3794 /* Handle the response IOCB */ 3795 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3796 spin_lock_irqsave(&phba->hbalock, iflag); 3797 3798 /* 3799 * If the port response put pointer has not been updated, sync 3800 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3801 * response put pointer. 3802 */ 3803 if (pring->sli.sli3.rspidx == portRspPut) { 3804 portRspPut = le32_to_cpu(pgp->rspPutInx); 3805 } 3806 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 3807 3808 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3809 /* At least one response entry has been freed */ 3810 pring->stats.iocb_rsp_full++; 3811 /* SET RxRE_RSP in Chip Att register */ 3812 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3813 writel(status, phba->CAregaddr); 3814 readl(phba->CAregaddr); /* flush */ 3815 } 3816 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3817 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3818 pring->stats.iocb_cmd_empty++; 3819 3820 /* Force update of the local copy of cmdGetInx */ 3821 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3822 lpfc_sli_resume_iocb(phba, pring); 3823 3824 if ((pring->lpfc_sli_cmd_available)) 3825 (pring->lpfc_sli_cmd_available) (phba, pring); 3826 3827 } 3828 3829 spin_unlock_irqrestore(&phba->hbalock, iflag); 3830 return; 3831 } 3832 3833 /** 3834 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3835 * @phba: Pointer to HBA context object. 3836 * @pring: Pointer to driver SLI ring object. 3837 * @mask: Host attention register mask for this ring. 3838 * 3839 * This function is called from the worker thread when there is a pending 3840 * ELS response iocb on the driver internal slow-path response iocb worker 3841 * queue. The caller does not hold any lock. The function will remove each 3842 * response iocb from the response worker queue and calls the handle 3843 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3844 **/ 3845 static void 3846 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3847 struct lpfc_sli_ring *pring, uint32_t mask) 3848 { 3849 struct lpfc_iocbq *irspiocbq; 3850 struct hbq_dmabuf *dmabuf; 3851 struct lpfc_cq_event *cq_event; 3852 unsigned long iflag; 3853 int count = 0; 3854 3855 spin_lock_irqsave(&phba->hbalock, iflag); 3856 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3857 spin_unlock_irqrestore(&phba->hbalock, iflag); 3858 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3859 /* Get the response iocb from the head of work queue */ 3860 spin_lock_irqsave(&phba->hbalock, iflag); 3861 list_remove_head(&phba->sli4_hba.sp_queue_event, 3862 cq_event, struct lpfc_cq_event, list); 3863 spin_unlock_irqrestore(&phba->hbalock, iflag); 3864 3865 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3866 case CQE_CODE_COMPL_WQE: 3867 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3868 cq_event); 3869 /* Translate ELS WCQE to response IOCBQ */ 3870 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3871 irspiocbq); 3872 if (irspiocbq) 3873 lpfc_sli_sp_handle_rspiocb(phba, pring, 3874 irspiocbq); 3875 count++; 3876 break; 3877 case CQE_CODE_RECEIVE: 3878 case CQE_CODE_RECEIVE_V1: 3879 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3880 cq_event); 3881 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3882 count++; 3883 break; 3884 default: 3885 break; 3886 } 3887 3888 /* Limit the number of events to 64 to avoid soft lockups */ 3889 if (count == 64) 3890 break; 3891 } 3892 } 3893 3894 /** 3895 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3896 * @phba: Pointer to HBA context object. 3897 * @pring: Pointer to driver SLI ring object. 3898 * 3899 * This function aborts all iocbs in the given ring and frees all the iocb 3900 * objects in txq. This function issues an abort iocb for all the iocb commands 3901 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3902 * the return of this function. The caller is not required to hold any locks. 3903 **/ 3904 void 3905 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3906 { 3907 LIST_HEAD(completions); 3908 struct lpfc_iocbq *iocb, *next_iocb; 3909 3910 if (pring->ringno == LPFC_ELS_RING) { 3911 lpfc_fabric_abort_hba(phba); 3912 } 3913 3914 /* Error everything on txq and txcmplq 3915 * First do the txq. 3916 */ 3917 if (phba->sli_rev >= LPFC_SLI_REV4) { 3918 spin_lock_irq(&pring->ring_lock); 3919 list_splice_init(&pring->txq, &completions); 3920 pring->txq_cnt = 0; 3921 spin_unlock_irq(&pring->ring_lock); 3922 3923 spin_lock_irq(&phba->hbalock); 3924 /* Next issue ABTS for everything on the txcmplq */ 3925 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3926 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3927 spin_unlock_irq(&phba->hbalock); 3928 } else { 3929 spin_lock_irq(&phba->hbalock); 3930 list_splice_init(&pring->txq, &completions); 3931 pring->txq_cnt = 0; 3932 3933 /* Next issue ABTS for everything on the txcmplq */ 3934 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3935 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3936 spin_unlock_irq(&phba->hbalock); 3937 } 3938 3939 /* Cancel all the IOCBs from the completions list */ 3940 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3941 IOERR_SLI_ABORTED); 3942 } 3943 3944 /** 3945 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 3946 * @phba: Pointer to HBA context object. 3947 * @pring: Pointer to driver SLI ring object. 3948 * 3949 * This function aborts all iocbs in FCP rings and frees all the iocb 3950 * objects in txq. This function issues an abort iocb for all the iocb commands 3951 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3952 * the return of this function. The caller is not required to hold any locks. 3953 **/ 3954 void 3955 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 3956 { 3957 struct lpfc_sli *psli = &phba->sli; 3958 struct lpfc_sli_ring *pring; 3959 uint32_t i; 3960 3961 /* Look on all the FCP Rings for the iotag */ 3962 if (phba->sli_rev >= LPFC_SLI_REV4) { 3963 for (i = 0; i < phba->cfg_hdw_queue; i++) { 3964 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 3965 lpfc_sli_abort_iocb_ring(phba, pring); 3966 } 3967 } else { 3968 pring = &psli->sli3_ring[LPFC_FCP_RING]; 3969 lpfc_sli_abort_iocb_ring(phba, pring); 3970 } 3971 } 3972 3973 /** 3974 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3975 * @phba: Pointer to HBA context object. 3976 * 3977 * This function flushes all iocbs in the fcp ring and frees all the iocb 3978 * objects in txq and txcmplq. This function will not issue abort iocbs 3979 * for all the iocb commands in txcmplq, they will just be returned with 3980 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3981 * slot has been permanently disabled. 3982 **/ 3983 void 3984 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3985 { 3986 LIST_HEAD(txq); 3987 LIST_HEAD(txcmplq); 3988 struct lpfc_sli *psli = &phba->sli; 3989 struct lpfc_sli_ring *pring; 3990 uint32_t i; 3991 struct lpfc_iocbq *piocb, *next_iocb; 3992 3993 spin_lock_irq(&phba->hbalock); 3994 /* Indicate the I/O queues are flushed */ 3995 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3996 spin_unlock_irq(&phba->hbalock); 3997 3998 /* Look on all the FCP Rings for the iotag */ 3999 if (phba->sli_rev >= LPFC_SLI_REV4) { 4000 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4001 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 4002 4003 spin_lock_irq(&pring->ring_lock); 4004 /* Retrieve everything on txq */ 4005 list_splice_init(&pring->txq, &txq); 4006 list_for_each_entry_safe(piocb, next_iocb, 4007 &pring->txcmplq, list) 4008 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4009 /* Retrieve everything on the txcmplq */ 4010 list_splice_init(&pring->txcmplq, &txcmplq); 4011 pring->txq_cnt = 0; 4012 pring->txcmplq_cnt = 0; 4013 spin_unlock_irq(&pring->ring_lock); 4014 4015 /* Flush the txq */ 4016 lpfc_sli_cancel_iocbs(phba, &txq, 4017 IOSTAT_LOCAL_REJECT, 4018 IOERR_SLI_DOWN); 4019 /* Flush the txcmpq */ 4020 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4021 IOSTAT_LOCAL_REJECT, 4022 IOERR_SLI_DOWN); 4023 } 4024 } else { 4025 pring = &psli->sli3_ring[LPFC_FCP_RING]; 4026 4027 spin_lock_irq(&phba->hbalock); 4028 /* Retrieve everything on txq */ 4029 list_splice_init(&pring->txq, &txq); 4030 list_for_each_entry_safe(piocb, next_iocb, 4031 &pring->txcmplq, list) 4032 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4033 /* Retrieve everything on the txcmplq */ 4034 list_splice_init(&pring->txcmplq, &txcmplq); 4035 pring->txq_cnt = 0; 4036 pring->txcmplq_cnt = 0; 4037 spin_unlock_irq(&phba->hbalock); 4038 4039 /* Flush the txq */ 4040 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 4041 IOERR_SLI_DOWN); 4042 /* Flush the txcmpq */ 4043 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 4044 IOERR_SLI_DOWN); 4045 } 4046 } 4047 4048 /** 4049 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings 4050 * @phba: Pointer to HBA context object. 4051 * 4052 * This function flushes all wqes in the nvme rings and frees all resources 4053 * in the txcmplq. This function does not issue abort wqes for the IO 4054 * commands in txcmplq, they will just be returned with 4055 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 4056 * slot has been permanently disabled. 4057 **/ 4058 void 4059 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) 4060 { 4061 LIST_HEAD(txcmplq); 4062 struct lpfc_sli_ring *pring; 4063 uint32_t i; 4064 struct lpfc_iocbq *piocb, *next_iocb; 4065 4066 if ((phba->sli_rev < LPFC_SLI_REV4) || 4067 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 4068 return; 4069 4070 /* Hint to other driver operations that a flush is in progress. */ 4071 spin_lock_irq(&phba->hbalock); 4072 phba->hba_flag |= HBA_NVME_IOQ_FLUSH; 4073 spin_unlock_irq(&phba->hbalock); 4074 4075 /* Cycle through all NVME rings and complete each IO with 4076 * a local driver reason code. This is a flush so no 4077 * abort exchange to FW. 4078 */ 4079 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4080 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; 4081 4082 spin_lock_irq(&pring->ring_lock); 4083 list_for_each_entry_safe(piocb, next_iocb, 4084 &pring->txcmplq, list) 4085 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4086 /* Retrieve everything on the txcmplq */ 4087 list_splice_init(&pring->txcmplq, &txcmplq); 4088 pring->txcmplq_cnt = 0; 4089 spin_unlock_irq(&pring->ring_lock); 4090 4091 /* Flush the txcmpq &&&PAE */ 4092 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4093 IOSTAT_LOCAL_REJECT, 4094 IOERR_SLI_DOWN); 4095 } 4096 } 4097 4098 /** 4099 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 4100 * @phba: Pointer to HBA context object. 4101 * @mask: Bit mask to be checked. 4102 * 4103 * This function reads the host status register and compares 4104 * with the provided bit mask to check if HBA completed 4105 * the restart. This function will wait in a loop for the 4106 * HBA to complete restart. If the HBA does not restart within 4107 * 15 iterations, the function will reset the HBA again. The 4108 * function returns 1 when HBA fail to restart otherwise returns 4109 * zero. 4110 **/ 4111 static int 4112 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 4113 { 4114 uint32_t status; 4115 int i = 0; 4116 int retval = 0; 4117 4118 /* Read the HBA Host Status Register */ 4119 if (lpfc_readl(phba->HSregaddr, &status)) 4120 return 1; 4121 4122 /* 4123 * Check status register every 100ms for 5 retries, then every 4124 * 500ms for 5, then every 2.5 sec for 5, then reset board and 4125 * every 2.5 sec for 4. 4126 * Break our of the loop if errors occurred during init. 4127 */ 4128 while (((status & mask) != mask) && 4129 !(status & HS_FFERM) && 4130 i++ < 20) { 4131 4132 if (i <= 5) 4133 msleep(10); 4134 else if (i <= 10) 4135 msleep(500); 4136 else 4137 msleep(2500); 4138 4139 if (i == 15) { 4140 /* Do post */ 4141 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4142 lpfc_sli_brdrestart(phba); 4143 } 4144 /* Read the HBA Host Status Register */ 4145 if (lpfc_readl(phba->HSregaddr, &status)) { 4146 retval = 1; 4147 break; 4148 } 4149 } 4150 4151 /* Check to see if any errors occurred during init */ 4152 if ((status & HS_FFERM) || (i >= 20)) { 4153 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4154 "2751 Adapter failed to restart, " 4155 "status reg x%x, FW Data: A8 x%x AC x%x\n", 4156 status, 4157 readl(phba->MBslimaddr + 0xa8), 4158 readl(phba->MBslimaddr + 0xac)); 4159 phba->link_state = LPFC_HBA_ERROR; 4160 retval = 1; 4161 } 4162 4163 return retval; 4164 } 4165 4166 /** 4167 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 4168 * @phba: Pointer to HBA context object. 4169 * @mask: Bit mask to be checked. 4170 * 4171 * This function checks the host status register to check if HBA is 4172 * ready. This function will wait in a loop for the HBA to be ready 4173 * If the HBA is not ready , the function will will reset the HBA PCI 4174 * function again. The function returns 1 when HBA fail to be ready 4175 * otherwise returns zero. 4176 **/ 4177 static int 4178 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 4179 { 4180 uint32_t status; 4181 int retval = 0; 4182 4183 /* Read the HBA Host Status Register */ 4184 status = lpfc_sli4_post_status_check(phba); 4185 4186 if (status) { 4187 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4188 lpfc_sli_brdrestart(phba); 4189 status = lpfc_sli4_post_status_check(phba); 4190 } 4191 4192 /* Check to see if any errors occurred during init */ 4193 if (status) { 4194 phba->link_state = LPFC_HBA_ERROR; 4195 retval = 1; 4196 } else 4197 phba->sli4_hba.intr_enable = 0; 4198 4199 return retval; 4200 } 4201 4202 /** 4203 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 4204 * @phba: Pointer to HBA context object. 4205 * @mask: Bit mask to be checked. 4206 * 4207 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 4208 * from the API jump table function pointer from the lpfc_hba struct. 4209 **/ 4210 int 4211 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 4212 { 4213 return phba->lpfc_sli_brdready(phba, mask); 4214 } 4215 4216 #define BARRIER_TEST_PATTERN (0xdeadbeef) 4217 4218 /** 4219 * lpfc_reset_barrier - Make HBA ready for HBA reset 4220 * @phba: Pointer to HBA context object. 4221 * 4222 * This function is called before resetting an HBA. This function is called 4223 * with hbalock held and requests HBA to quiesce DMAs before a reset. 4224 **/ 4225 void lpfc_reset_barrier(struct lpfc_hba *phba) 4226 { 4227 uint32_t __iomem *resp_buf; 4228 uint32_t __iomem *mbox_buf; 4229 volatile uint32_t mbox; 4230 uint32_t hc_copy, ha_copy, resp_data; 4231 int i; 4232 uint8_t hdrtype; 4233 4234 lockdep_assert_held(&phba->hbalock); 4235 4236 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 4237 if (hdrtype != 0x80 || 4238 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 4239 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 4240 return; 4241 4242 /* 4243 * Tell the other part of the chip to suspend temporarily all 4244 * its DMA activity. 4245 */ 4246 resp_buf = phba->MBslimaddr; 4247 4248 /* Disable the error attention */ 4249 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 4250 return; 4251 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 4252 readl(phba->HCregaddr); /* flush */ 4253 phba->link_flag |= LS_IGNORE_ERATT; 4254 4255 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4256 return; 4257 if (ha_copy & HA_ERATT) { 4258 /* Clear Chip error bit */ 4259 writel(HA_ERATT, phba->HAregaddr); 4260 phba->pport->stopped = 1; 4261 } 4262 4263 mbox = 0; 4264 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 4265 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 4266 4267 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 4268 mbox_buf = phba->MBslimaddr; 4269 writel(mbox, mbox_buf); 4270 4271 for (i = 0; i < 50; i++) { 4272 if (lpfc_readl((resp_buf + 1), &resp_data)) 4273 return; 4274 if (resp_data != ~(BARRIER_TEST_PATTERN)) 4275 mdelay(1); 4276 else 4277 break; 4278 } 4279 resp_data = 0; 4280 if (lpfc_readl((resp_buf + 1), &resp_data)) 4281 return; 4282 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 4283 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 4284 phba->pport->stopped) 4285 goto restore_hc; 4286 else 4287 goto clear_errat; 4288 } 4289 4290 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 4291 resp_data = 0; 4292 for (i = 0; i < 500; i++) { 4293 if (lpfc_readl(resp_buf, &resp_data)) 4294 return; 4295 if (resp_data != mbox) 4296 mdelay(1); 4297 else 4298 break; 4299 } 4300 4301 clear_errat: 4302 4303 while (++i < 500) { 4304 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4305 return; 4306 if (!(ha_copy & HA_ERATT)) 4307 mdelay(1); 4308 else 4309 break; 4310 } 4311 4312 if (readl(phba->HAregaddr) & HA_ERATT) { 4313 writel(HA_ERATT, phba->HAregaddr); 4314 phba->pport->stopped = 1; 4315 } 4316 4317 restore_hc: 4318 phba->link_flag &= ~LS_IGNORE_ERATT; 4319 writel(hc_copy, phba->HCregaddr); 4320 readl(phba->HCregaddr); /* flush */ 4321 } 4322 4323 /** 4324 * lpfc_sli_brdkill - Issue a kill_board mailbox command 4325 * @phba: Pointer to HBA context object. 4326 * 4327 * This function issues a kill_board mailbox command and waits for 4328 * the error attention interrupt. This function is called for stopping 4329 * the firmware processing. The caller is not required to hold any 4330 * locks. This function calls lpfc_hba_down_post function to free 4331 * any pending commands after the kill. The function will return 1 when it 4332 * fails to kill the board else will return 0. 4333 **/ 4334 int 4335 lpfc_sli_brdkill(struct lpfc_hba *phba) 4336 { 4337 struct lpfc_sli *psli; 4338 LPFC_MBOXQ_t *pmb; 4339 uint32_t status; 4340 uint32_t ha_copy; 4341 int retval; 4342 int i = 0; 4343 4344 psli = &phba->sli; 4345 4346 /* Kill HBA */ 4347 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4348 "0329 Kill HBA Data: x%x x%x\n", 4349 phba->pport->port_state, psli->sli_flag); 4350 4351 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4352 if (!pmb) 4353 return 1; 4354 4355 /* Disable the error attention */ 4356 spin_lock_irq(&phba->hbalock); 4357 if (lpfc_readl(phba->HCregaddr, &status)) { 4358 spin_unlock_irq(&phba->hbalock); 4359 mempool_free(pmb, phba->mbox_mem_pool); 4360 return 1; 4361 } 4362 status &= ~HC_ERINT_ENA; 4363 writel(status, phba->HCregaddr); 4364 readl(phba->HCregaddr); /* flush */ 4365 phba->link_flag |= LS_IGNORE_ERATT; 4366 spin_unlock_irq(&phba->hbalock); 4367 4368 lpfc_kill_board(phba, pmb); 4369 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4370 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4371 4372 if (retval != MBX_SUCCESS) { 4373 if (retval != MBX_BUSY) 4374 mempool_free(pmb, phba->mbox_mem_pool); 4375 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4376 "2752 KILL_BOARD command failed retval %d\n", 4377 retval); 4378 spin_lock_irq(&phba->hbalock); 4379 phba->link_flag &= ~LS_IGNORE_ERATT; 4380 spin_unlock_irq(&phba->hbalock); 4381 return 1; 4382 } 4383 4384 spin_lock_irq(&phba->hbalock); 4385 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 4386 spin_unlock_irq(&phba->hbalock); 4387 4388 mempool_free(pmb, phba->mbox_mem_pool); 4389 4390 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 4391 * attention every 100ms for 3 seconds. If we don't get ERATT after 4392 * 3 seconds we still set HBA_ERROR state because the status of the 4393 * board is now undefined. 4394 */ 4395 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4396 return 1; 4397 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 4398 mdelay(100); 4399 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4400 return 1; 4401 } 4402 4403 del_timer_sync(&psli->mbox_tmo); 4404 if (ha_copy & HA_ERATT) { 4405 writel(HA_ERATT, phba->HAregaddr); 4406 phba->pport->stopped = 1; 4407 } 4408 spin_lock_irq(&phba->hbalock); 4409 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4410 psli->mbox_active = NULL; 4411 phba->link_flag &= ~LS_IGNORE_ERATT; 4412 spin_unlock_irq(&phba->hbalock); 4413 4414 lpfc_hba_down_post(phba); 4415 phba->link_state = LPFC_HBA_ERROR; 4416 4417 return ha_copy & HA_ERATT ? 0 : 1; 4418 } 4419 4420 /** 4421 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 4422 * @phba: Pointer to HBA context object. 4423 * 4424 * This function resets the HBA by writing HC_INITFF to the control 4425 * register. After the HBA resets, this function resets all the iocb ring 4426 * indices. This function disables PCI layer parity checking during 4427 * the reset. 4428 * This function returns 0 always. 4429 * The caller is not required to hold any locks. 4430 **/ 4431 int 4432 lpfc_sli_brdreset(struct lpfc_hba *phba) 4433 { 4434 struct lpfc_sli *psli; 4435 struct lpfc_sli_ring *pring; 4436 uint16_t cfg_value; 4437 int i; 4438 4439 psli = &phba->sli; 4440 4441 /* Reset HBA */ 4442 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4443 "0325 Reset HBA Data: x%x x%x\n", 4444 (phba->pport) ? phba->pport->port_state : 0, 4445 psli->sli_flag); 4446 4447 /* perform board reset */ 4448 phba->fc_eventTag = 0; 4449 phba->link_events = 0; 4450 if (phba->pport) { 4451 phba->pport->fc_myDID = 0; 4452 phba->pport->fc_prevDID = 0; 4453 } 4454 4455 /* Turn off parity checking and serr during the physical reset */ 4456 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) 4457 return -EIO; 4458 4459 pci_write_config_word(phba->pcidev, PCI_COMMAND, 4460 (cfg_value & 4461 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4462 4463 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 4464 4465 /* Now toggle INITFF bit in the Host Control Register */ 4466 writel(HC_INITFF, phba->HCregaddr); 4467 mdelay(1); 4468 readl(phba->HCregaddr); /* flush */ 4469 writel(0, phba->HCregaddr); 4470 readl(phba->HCregaddr); /* flush */ 4471 4472 /* Restore PCI cmd register */ 4473 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4474 4475 /* Initialize relevant SLI info */ 4476 for (i = 0; i < psli->num_rings; i++) { 4477 pring = &psli->sli3_ring[i]; 4478 pring->flag = 0; 4479 pring->sli.sli3.rspidx = 0; 4480 pring->sli.sli3.next_cmdidx = 0; 4481 pring->sli.sli3.local_getidx = 0; 4482 pring->sli.sli3.cmdidx = 0; 4483 pring->missbufcnt = 0; 4484 } 4485 4486 phba->link_state = LPFC_WARM_START; 4487 return 0; 4488 } 4489 4490 /** 4491 * lpfc_sli4_brdreset - Reset a sli-4 HBA 4492 * @phba: Pointer to HBA context object. 4493 * 4494 * This function resets a SLI4 HBA. This function disables PCI layer parity 4495 * checking during resets the device. The caller is not required to hold 4496 * any locks. 4497 * 4498 * This function returns 0 always. 4499 **/ 4500 int 4501 lpfc_sli4_brdreset(struct lpfc_hba *phba) 4502 { 4503 struct lpfc_sli *psli = &phba->sli; 4504 uint16_t cfg_value; 4505 int rc = 0; 4506 4507 /* Reset HBA */ 4508 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4509 "0295 Reset HBA Data: x%x x%x x%x\n", 4510 phba->pport->port_state, psli->sli_flag, 4511 phba->hba_flag); 4512 4513 /* perform board reset */ 4514 phba->fc_eventTag = 0; 4515 phba->link_events = 0; 4516 phba->pport->fc_myDID = 0; 4517 phba->pport->fc_prevDID = 0; 4518 4519 spin_lock_irq(&phba->hbalock); 4520 psli->sli_flag &= ~(LPFC_PROCESS_LA); 4521 phba->fcf.fcf_flag = 0; 4522 spin_unlock_irq(&phba->hbalock); 4523 4524 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ 4525 if (phba->hba_flag & HBA_FW_DUMP_OP) { 4526 phba->hba_flag &= ~HBA_FW_DUMP_OP; 4527 return rc; 4528 } 4529 4530 /* Now physically reset the device */ 4531 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4532 "0389 Performing PCI function reset!\n"); 4533 4534 /* Turn off parity checking and serr during the physical reset */ 4535 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) { 4536 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4537 "3205 PCI read Config failed\n"); 4538 return -EIO; 4539 } 4540 4541 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 4542 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4543 4544 /* Perform FCoE PCI function reset before freeing queue memory */ 4545 rc = lpfc_pci_function_reset(phba); 4546 4547 /* Restore PCI cmd register */ 4548 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4549 4550 return rc; 4551 } 4552 4553 /** 4554 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 4555 * @phba: Pointer to HBA context object. 4556 * 4557 * This function is called in the SLI initialization code path to 4558 * restart the HBA. The caller is not required to hold any lock. 4559 * This function writes MBX_RESTART mailbox command to the SLIM and 4560 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 4561 * function to free any pending commands. The function enables 4562 * POST only during the first initialization. The function returns zero. 4563 * The function does not guarantee completion of MBX_RESTART mailbox 4564 * command before the return of this function. 4565 **/ 4566 static int 4567 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 4568 { 4569 MAILBOX_t *mb; 4570 struct lpfc_sli *psli; 4571 volatile uint32_t word0; 4572 void __iomem *to_slim; 4573 uint32_t hba_aer_enabled; 4574 4575 spin_lock_irq(&phba->hbalock); 4576 4577 /* Take PCIe device Advanced Error Reporting (AER) state */ 4578 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4579 4580 psli = &phba->sli; 4581 4582 /* Restart HBA */ 4583 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4584 "0337 Restart HBA Data: x%x x%x\n", 4585 (phba->pport) ? phba->pport->port_state : 0, 4586 psli->sli_flag); 4587 4588 word0 = 0; 4589 mb = (MAILBOX_t *) &word0; 4590 mb->mbxCommand = MBX_RESTART; 4591 mb->mbxHc = 1; 4592 4593 lpfc_reset_barrier(phba); 4594 4595 to_slim = phba->MBslimaddr; 4596 writel(*(uint32_t *) mb, to_slim); 4597 readl(to_slim); /* flush */ 4598 4599 /* Only skip post after fc_ffinit is completed */ 4600 if (phba->pport && phba->pport->port_state) 4601 word0 = 1; /* This is really setting up word1 */ 4602 else 4603 word0 = 0; /* This is really setting up word1 */ 4604 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4605 writel(*(uint32_t *) mb, to_slim); 4606 readl(to_slim); /* flush */ 4607 4608 lpfc_sli_brdreset(phba); 4609 if (phba->pport) 4610 phba->pport->stopped = 0; 4611 phba->link_state = LPFC_INIT_START; 4612 phba->hba_flag = 0; 4613 spin_unlock_irq(&phba->hbalock); 4614 4615 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4616 psli->stats_start = ktime_get_seconds(); 4617 4618 /* Give the INITFF and Post time to settle. */ 4619 mdelay(100); 4620 4621 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4622 if (hba_aer_enabled) 4623 pci_disable_pcie_error_reporting(phba->pcidev); 4624 4625 lpfc_hba_down_post(phba); 4626 4627 return 0; 4628 } 4629 4630 /** 4631 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4632 * @phba: Pointer to HBA context object. 4633 * 4634 * This function is called in the SLI initialization code path to restart 4635 * a SLI4 HBA. The caller is not required to hold any lock. 4636 * At the end of the function, it calls lpfc_hba_down_post function to 4637 * free any pending commands. 4638 **/ 4639 static int 4640 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4641 { 4642 struct lpfc_sli *psli = &phba->sli; 4643 uint32_t hba_aer_enabled; 4644 int rc; 4645 4646 /* Restart HBA */ 4647 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4648 "0296 Restart HBA Data: x%x x%x\n", 4649 phba->pport->port_state, psli->sli_flag); 4650 4651 /* Take PCIe device Advanced Error Reporting (AER) state */ 4652 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4653 4654 rc = lpfc_sli4_brdreset(phba); 4655 if (rc) 4656 return rc; 4657 4658 spin_lock_irq(&phba->hbalock); 4659 phba->pport->stopped = 0; 4660 phba->link_state = LPFC_INIT_START; 4661 phba->hba_flag = 0; 4662 spin_unlock_irq(&phba->hbalock); 4663 4664 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4665 psli->stats_start = ktime_get_seconds(); 4666 4667 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4668 if (hba_aer_enabled) 4669 pci_disable_pcie_error_reporting(phba->pcidev); 4670 4671 lpfc_hba_down_post(phba); 4672 lpfc_sli4_queue_destroy(phba); 4673 4674 return rc; 4675 } 4676 4677 /** 4678 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4679 * @phba: Pointer to HBA context object. 4680 * 4681 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4682 * API jump table function pointer from the lpfc_hba struct. 4683 **/ 4684 int 4685 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4686 { 4687 return phba->lpfc_sli_brdrestart(phba); 4688 } 4689 4690 /** 4691 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4692 * @phba: Pointer to HBA context object. 4693 * 4694 * This function is called after a HBA restart to wait for successful 4695 * restart of the HBA. Successful restart of the HBA is indicated by 4696 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4697 * iteration, the function will restart the HBA again. The function returns 4698 * zero if HBA successfully restarted else returns negative error code. 4699 **/ 4700 int 4701 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4702 { 4703 uint32_t status, i = 0; 4704 4705 /* Read the HBA Host Status Register */ 4706 if (lpfc_readl(phba->HSregaddr, &status)) 4707 return -EIO; 4708 4709 /* Check status register to see what current state is */ 4710 i = 0; 4711 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4712 4713 /* Check every 10ms for 10 retries, then every 100ms for 90 4714 * retries, then every 1 sec for 50 retires for a total of 4715 * ~60 seconds before reset the board again and check every 4716 * 1 sec for 50 retries. The up to 60 seconds before the 4717 * board ready is required by the Falcon FIPS zeroization 4718 * complete, and any reset the board in between shall cause 4719 * restart of zeroization, further delay the board ready. 4720 */ 4721 if (i++ >= 200) { 4722 /* Adapter failed to init, timeout, status reg 4723 <status> */ 4724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4725 "0436 Adapter failed to init, " 4726 "timeout, status reg x%x, " 4727 "FW Data: A8 x%x AC x%x\n", status, 4728 readl(phba->MBslimaddr + 0xa8), 4729 readl(phba->MBslimaddr + 0xac)); 4730 phba->link_state = LPFC_HBA_ERROR; 4731 return -ETIMEDOUT; 4732 } 4733 4734 /* Check to see if any errors occurred during init */ 4735 if (status & HS_FFERM) { 4736 /* ERROR: During chipset initialization */ 4737 /* Adapter failed to init, chipset, status reg 4738 <status> */ 4739 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4740 "0437 Adapter failed to init, " 4741 "chipset, status reg x%x, " 4742 "FW Data: A8 x%x AC x%x\n", status, 4743 readl(phba->MBslimaddr + 0xa8), 4744 readl(phba->MBslimaddr + 0xac)); 4745 phba->link_state = LPFC_HBA_ERROR; 4746 return -EIO; 4747 } 4748 4749 if (i <= 10) 4750 msleep(10); 4751 else if (i <= 100) 4752 msleep(100); 4753 else 4754 msleep(1000); 4755 4756 if (i == 150) { 4757 /* Do post */ 4758 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4759 lpfc_sli_brdrestart(phba); 4760 } 4761 /* Read the HBA Host Status Register */ 4762 if (lpfc_readl(phba->HSregaddr, &status)) 4763 return -EIO; 4764 } 4765 4766 /* Check to see if any errors occurred during init */ 4767 if (status & HS_FFERM) { 4768 /* ERROR: During chipset initialization */ 4769 /* Adapter failed to init, chipset, status reg <status> */ 4770 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4771 "0438 Adapter failed to init, chipset, " 4772 "status reg x%x, " 4773 "FW Data: A8 x%x AC x%x\n", status, 4774 readl(phba->MBslimaddr + 0xa8), 4775 readl(phba->MBslimaddr + 0xac)); 4776 phba->link_state = LPFC_HBA_ERROR; 4777 return -EIO; 4778 } 4779 4780 /* Clear all interrupt enable conditions */ 4781 writel(0, phba->HCregaddr); 4782 readl(phba->HCregaddr); /* flush */ 4783 4784 /* setup host attn register */ 4785 writel(0xffffffff, phba->HAregaddr); 4786 readl(phba->HAregaddr); /* flush */ 4787 return 0; 4788 } 4789 4790 /** 4791 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4792 * 4793 * This function calculates and returns the number of HBQs required to be 4794 * configured. 4795 **/ 4796 int 4797 lpfc_sli_hbq_count(void) 4798 { 4799 return ARRAY_SIZE(lpfc_hbq_defs); 4800 } 4801 4802 /** 4803 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4804 * 4805 * This function adds the number of hbq entries in every HBQ to get 4806 * the total number of hbq entries required for the HBA and returns 4807 * the total count. 4808 **/ 4809 static int 4810 lpfc_sli_hbq_entry_count(void) 4811 { 4812 int hbq_count = lpfc_sli_hbq_count(); 4813 int count = 0; 4814 int i; 4815 4816 for (i = 0; i < hbq_count; ++i) 4817 count += lpfc_hbq_defs[i]->entry_count; 4818 return count; 4819 } 4820 4821 /** 4822 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4823 * 4824 * This function calculates amount of memory required for all hbq entries 4825 * to be configured and returns the total memory required. 4826 **/ 4827 int 4828 lpfc_sli_hbq_size(void) 4829 { 4830 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4831 } 4832 4833 /** 4834 * lpfc_sli_hbq_setup - configure and initialize HBQs 4835 * @phba: Pointer to HBA context object. 4836 * 4837 * This function is called during the SLI initialization to configure 4838 * all the HBQs and post buffers to the HBQ. The caller is not 4839 * required to hold any locks. This function will return zero if successful 4840 * else it will return negative error code. 4841 **/ 4842 static int 4843 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4844 { 4845 int hbq_count = lpfc_sli_hbq_count(); 4846 LPFC_MBOXQ_t *pmb; 4847 MAILBOX_t *pmbox; 4848 uint32_t hbqno; 4849 uint32_t hbq_entry_index; 4850 4851 /* Get a Mailbox buffer to setup mailbox 4852 * commands for HBA initialization 4853 */ 4854 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4855 4856 if (!pmb) 4857 return -ENOMEM; 4858 4859 pmbox = &pmb->u.mb; 4860 4861 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4862 phba->link_state = LPFC_INIT_MBX_CMDS; 4863 phba->hbq_in_use = 1; 4864 4865 hbq_entry_index = 0; 4866 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4867 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4868 phba->hbqs[hbqno].hbqPutIdx = 0; 4869 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4870 phba->hbqs[hbqno].entry_count = 4871 lpfc_hbq_defs[hbqno]->entry_count; 4872 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4873 hbq_entry_index, pmb); 4874 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4875 4876 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4877 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4878 mbxStatus <status>, ring <num> */ 4879 4880 lpfc_printf_log(phba, KERN_ERR, 4881 LOG_SLI | LOG_VPORT, 4882 "1805 Adapter failed to init. " 4883 "Data: x%x x%x x%x\n", 4884 pmbox->mbxCommand, 4885 pmbox->mbxStatus, hbqno); 4886 4887 phba->link_state = LPFC_HBA_ERROR; 4888 mempool_free(pmb, phba->mbox_mem_pool); 4889 return -ENXIO; 4890 } 4891 } 4892 phba->hbq_count = hbq_count; 4893 4894 mempool_free(pmb, phba->mbox_mem_pool); 4895 4896 /* Initially populate or replenish the HBQs */ 4897 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4898 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4899 return 0; 4900 } 4901 4902 /** 4903 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4904 * @phba: Pointer to HBA context object. 4905 * 4906 * This function is called during the SLI initialization to configure 4907 * all the HBQs and post buffers to the HBQ. The caller is not 4908 * required to hold any locks. This function will return zero if successful 4909 * else it will return negative error code. 4910 **/ 4911 static int 4912 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4913 { 4914 phba->hbq_in_use = 1; 4915 phba->hbqs[LPFC_ELS_HBQ].entry_count = 4916 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; 4917 phba->hbq_count = 1; 4918 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); 4919 /* Initially populate or replenish the HBQs */ 4920 return 0; 4921 } 4922 4923 /** 4924 * lpfc_sli_config_port - Issue config port mailbox command 4925 * @phba: Pointer to HBA context object. 4926 * @sli_mode: sli mode - 2/3 4927 * 4928 * This function is called by the sli initialization code path 4929 * to issue config_port mailbox command. This function restarts the 4930 * HBA firmware and issues a config_port mailbox command to configure 4931 * the SLI interface in the sli mode specified by sli_mode 4932 * variable. The caller is not required to hold any locks. 4933 * The function returns 0 if successful, else returns negative error 4934 * code. 4935 **/ 4936 int 4937 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4938 { 4939 LPFC_MBOXQ_t *pmb; 4940 uint32_t resetcount = 0, rc = 0, done = 0; 4941 4942 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4943 if (!pmb) { 4944 phba->link_state = LPFC_HBA_ERROR; 4945 return -ENOMEM; 4946 } 4947 4948 phba->sli_rev = sli_mode; 4949 while (resetcount < 2 && !done) { 4950 spin_lock_irq(&phba->hbalock); 4951 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4952 spin_unlock_irq(&phba->hbalock); 4953 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4954 lpfc_sli_brdrestart(phba); 4955 rc = lpfc_sli_chipset_init(phba); 4956 if (rc) 4957 break; 4958 4959 spin_lock_irq(&phba->hbalock); 4960 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4961 spin_unlock_irq(&phba->hbalock); 4962 resetcount++; 4963 4964 /* Call pre CONFIG_PORT mailbox command initialization. A 4965 * value of 0 means the call was successful. Any other 4966 * nonzero value is a failure, but if ERESTART is returned, 4967 * the driver may reset the HBA and try again. 4968 */ 4969 rc = lpfc_config_port_prep(phba); 4970 if (rc == -ERESTART) { 4971 phba->link_state = LPFC_LINK_UNKNOWN; 4972 continue; 4973 } else if (rc) 4974 break; 4975 4976 phba->link_state = LPFC_INIT_MBX_CMDS; 4977 lpfc_config_port(phba, pmb); 4978 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4979 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4980 LPFC_SLI3_HBQ_ENABLED | 4981 LPFC_SLI3_CRP_ENABLED | 4982 LPFC_SLI3_DSS_ENABLED); 4983 if (rc != MBX_SUCCESS) { 4984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4985 "0442 Adapter failed to init, mbxCmd x%x " 4986 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4987 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4988 spin_lock_irq(&phba->hbalock); 4989 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4990 spin_unlock_irq(&phba->hbalock); 4991 rc = -ENXIO; 4992 } else { 4993 /* Allow asynchronous mailbox command to go through */ 4994 spin_lock_irq(&phba->hbalock); 4995 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4996 spin_unlock_irq(&phba->hbalock); 4997 done = 1; 4998 4999 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 5000 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 5001 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5002 "3110 Port did not grant ASABT\n"); 5003 } 5004 } 5005 if (!done) { 5006 rc = -EINVAL; 5007 goto do_prep_failed; 5008 } 5009 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 5010 if (!pmb->u.mb.un.varCfgPort.cMA) { 5011 rc = -ENXIO; 5012 goto do_prep_failed; 5013 } 5014 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 5015 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 5016 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 5017 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 5018 phba->max_vpi : phba->max_vports; 5019 5020 } else 5021 phba->max_vpi = 0; 5022 phba->fips_level = 0; 5023 phba->fips_spec_rev = 0; 5024 if (pmb->u.mb.un.varCfgPort.gdss) { 5025 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 5026 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 5027 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 5028 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5029 "2850 Security Crypto Active. FIPS x%d " 5030 "(Spec Rev: x%d)", 5031 phba->fips_level, phba->fips_spec_rev); 5032 } 5033 if (pmb->u.mb.un.varCfgPort.sec_err) { 5034 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5035 "2856 Config Port Security Crypto " 5036 "Error: x%x ", 5037 pmb->u.mb.un.varCfgPort.sec_err); 5038 } 5039 if (pmb->u.mb.un.varCfgPort.gerbm) 5040 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 5041 if (pmb->u.mb.un.varCfgPort.gcrp) 5042 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 5043 5044 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 5045 phba->port_gp = phba->mbox->us.s3_pgp.port; 5046 5047 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 5048 if (pmb->u.mb.un.varCfgPort.gbg == 0) { 5049 phba->cfg_enable_bg = 0; 5050 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 5051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5052 "0443 Adapter did not grant " 5053 "BlockGuard\n"); 5054 } 5055 } 5056 } else { 5057 phba->hbq_get = NULL; 5058 phba->port_gp = phba->mbox->us.s2.port; 5059 phba->max_vpi = 0; 5060 } 5061 do_prep_failed: 5062 mempool_free(pmb, phba->mbox_mem_pool); 5063 return rc; 5064 } 5065 5066 5067 /** 5068 * lpfc_sli_hba_setup - SLI initialization function 5069 * @phba: Pointer to HBA context object. 5070 * 5071 * This function is the main SLI initialization function. This function 5072 * is called by the HBA initialization code, HBA reset code and HBA 5073 * error attention handler code. Caller is not required to hold any 5074 * locks. This function issues config_port mailbox command to configure 5075 * the SLI, setup iocb rings and HBQ rings. In the end the function 5076 * calls the config_port_post function to issue init_link mailbox 5077 * command and to start the discovery. The function will return zero 5078 * if successful, else it will return negative error code. 5079 **/ 5080 int 5081 lpfc_sli_hba_setup(struct lpfc_hba *phba) 5082 { 5083 uint32_t rc; 5084 int mode = 3, i; 5085 int longs; 5086 5087 switch (phba->cfg_sli_mode) { 5088 case 2: 5089 if (phba->cfg_enable_npiv) { 5090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5091 "1824 NPIV enabled: Override sli_mode " 5092 "parameter (%d) to auto (0).\n", 5093 phba->cfg_sli_mode); 5094 break; 5095 } 5096 mode = 2; 5097 break; 5098 case 0: 5099 case 3: 5100 break; 5101 default: 5102 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5103 "1819 Unrecognized sli_mode parameter: %d.\n", 5104 phba->cfg_sli_mode); 5105 5106 break; 5107 } 5108 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 5109 5110 rc = lpfc_sli_config_port(phba, mode); 5111 5112 if (rc && phba->cfg_sli_mode == 3) 5113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5114 "1820 Unable to select SLI-3. " 5115 "Not supported by adapter.\n"); 5116 if (rc && mode != 2) 5117 rc = lpfc_sli_config_port(phba, 2); 5118 else if (rc && mode == 2) 5119 rc = lpfc_sli_config_port(phba, 3); 5120 if (rc) 5121 goto lpfc_sli_hba_setup_error; 5122 5123 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 5124 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 5125 rc = pci_enable_pcie_error_reporting(phba->pcidev); 5126 if (!rc) { 5127 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5128 "2709 This device supports " 5129 "Advanced Error Reporting (AER)\n"); 5130 spin_lock_irq(&phba->hbalock); 5131 phba->hba_flag |= HBA_AER_ENABLED; 5132 spin_unlock_irq(&phba->hbalock); 5133 } else { 5134 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5135 "2708 This device does not support " 5136 "Advanced Error Reporting (AER): %d\n", 5137 rc); 5138 phba->cfg_aer_support = 0; 5139 } 5140 } 5141 5142 if (phba->sli_rev == 3) { 5143 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 5144 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 5145 } else { 5146 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 5147 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 5148 phba->sli3_options = 0; 5149 } 5150 5151 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5152 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 5153 phba->sli_rev, phba->max_vpi); 5154 rc = lpfc_sli_ring_map(phba); 5155 5156 if (rc) 5157 goto lpfc_sli_hba_setup_error; 5158 5159 /* Initialize VPIs. */ 5160 if (phba->sli_rev == LPFC_SLI_REV3) { 5161 /* 5162 * The VPI bitmask and physical ID array are allocated 5163 * and initialized once only - at driver load. A port 5164 * reset doesn't need to reinitialize this memory. 5165 */ 5166 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 5167 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 5168 phba->vpi_bmask = kcalloc(longs, 5169 sizeof(unsigned long), 5170 GFP_KERNEL); 5171 if (!phba->vpi_bmask) { 5172 rc = -ENOMEM; 5173 goto lpfc_sli_hba_setup_error; 5174 } 5175 5176 phba->vpi_ids = kcalloc(phba->max_vpi + 1, 5177 sizeof(uint16_t), 5178 GFP_KERNEL); 5179 if (!phba->vpi_ids) { 5180 kfree(phba->vpi_bmask); 5181 rc = -ENOMEM; 5182 goto lpfc_sli_hba_setup_error; 5183 } 5184 for (i = 0; i < phba->max_vpi; i++) 5185 phba->vpi_ids[i] = i; 5186 } 5187 } 5188 5189 /* Init HBQs */ 5190 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 5191 rc = lpfc_sli_hbq_setup(phba); 5192 if (rc) 5193 goto lpfc_sli_hba_setup_error; 5194 } 5195 spin_lock_irq(&phba->hbalock); 5196 phba->sli.sli_flag |= LPFC_PROCESS_LA; 5197 spin_unlock_irq(&phba->hbalock); 5198 5199 rc = lpfc_config_port_post(phba); 5200 if (rc) 5201 goto lpfc_sli_hba_setup_error; 5202 5203 return rc; 5204 5205 lpfc_sli_hba_setup_error: 5206 phba->link_state = LPFC_HBA_ERROR; 5207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5208 "0445 Firmware initialization failed\n"); 5209 return rc; 5210 } 5211 5212 /** 5213 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 5214 * @phba: Pointer to HBA context object. 5215 * @mboxq: mailbox pointer. 5216 * This function issue a dump mailbox command to read config region 5217 * 23 and parse the records in the region and populate driver 5218 * data structure. 5219 **/ 5220 static int 5221 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 5222 { 5223 LPFC_MBOXQ_t *mboxq; 5224 struct lpfc_dmabuf *mp; 5225 struct lpfc_mqe *mqe; 5226 uint32_t data_length; 5227 int rc; 5228 5229 /* Program the default value of vlan_id and fc_map */ 5230 phba->valid_vlan = 0; 5231 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5232 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5233 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5234 5235 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5236 if (!mboxq) 5237 return -ENOMEM; 5238 5239 mqe = &mboxq->u.mqe; 5240 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 5241 rc = -ENOMEM; 5242 goto out_free_mboxq; 5243 } 5244 5245 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 5246 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5247 5248 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5249 "(%d):2571 Mailbox cmd x%x Status x%x " 5250 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5251 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5252 "CQ: x%x x%x x%x x%x\n", 5253 mboxq->vport ? mboxq->vport->vpi : 0, 5254 bf_get(lpfc_mqe_command, mqe), 5255 bf_get(lpfc_mqe_status, mqe), 5256 mqe->un.mb_words[0], mqe->un.mb_words[1], 5257 mqe->un.mb_words[2], mqe->un.mb_words[3], 5258 mqe->un.mb_words[4], mqe->un.mb_words[5], 5259 mqe->un.mb_words[6], mqe->un.mb_words[7], 5260 mqe->un.mb_words[8], mqe->un.mb_words[9], 5261 mqe->un.mb_words[10], mqe->un.mb_words[11], 5262 mqe->un.mb_words[12], mqe->un.mb_words[13], 5263 mqe->un.mb_words[14], mqe->un.mb_words[15], 5264 mqe->un.mb_words[16], mqe->un.mb_words[50], 5265 mboxq->mcqe.word0, 5266 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5267 mboxq->mcqe.trailer); 5268 5269 if (rc) { 5270 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5271 kfree(mp); 5272 rc = -EIO; 5273 goto out_free_mboxq; 5274 } 5275 data_length = mqe->un.mb_words[5]; 5276 if (data_length > DMP_RGN23_SIZE) { 5277 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5278 kfree(mp); 5279 rc = -EIO; 5280 goto out_free_mboxq; 5281 } 5282 5283 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 5284 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5285 kfree(mp); 5286 rc = 0; 5287 5288 out_free_mboxq: 5289 mempool_free(mboxq, phba->mbox_mem_pool); 5290 return rc; 5291 } 5292 5293 /** 5294 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 5295 * @phba: pointer to lpfc hba data structure. 5296 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 5297 * @vpd: pointer to the memory to hold resulting port vpd data. 5298 * @vpd_size: On input, the number of bytes allocated to @vpd. 5299 * On output, the number of data bytes in @vpd. 5300 * 5301 * This routine executes a READ_REV SLI4 mailbox command. In 5302 * addition, this routine gets the port vpd data. 5303 * 5304 * Return codes 5305 * 0 - successful 5306 * -ENOMEM - could not allocated memory. 5307 **/ 5308 static int 5309 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5310 uint8_t *vpd, uint32_t *vpd_size) 5311 { 5312 int rc = 0; 5313 uint32_t dma_size; 5314 struct lpfc_dmabuf *dmabuf; 5315 struct lpfc_mqe *mqe; 5316 5317 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5318 if (!dmabuf) 5319 return -ENOMEM; 5320 5321 /* 5322 * Get a DMA buffer for the vpd data resulting from the READ_REV 5323 * mailbox command. 5324 */ 5325 dma_size = *vpd_size; 5326 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, 5327 &dmabuf->phys, GFP_KERNEL); 5328 if (!dmabuf->virt) { 5329 kfree(dmabuf); 5330 return -ENOMEM; 5331 } 5332 5333 /* 5334 * The SLI4 implementation of READ_REV conflicts at word1, 5335 * bits 31:16 and SLI4 adds vpd functionality not present 5336 * in SLI3. This code corrects the conflicts. 5337 */ 5338 lpfc_read_rev(phba, mboxq); 5339 mqe = &mboxq->u.mqe; 5340 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 5341 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 5342 mqe->un.read_rev.word1 &= 0x0000FFFF; 5343 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 5344 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 5345 5346 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5347 if (rc) { 5348 dma_free_coherent(&phba->pcidev->dev, dma_size, 5349 dmabuf->virt, dmabuf->phys); 5350 kfree(dmabuf); 5351 return -EIO; 5352 } 5353 5354 /* 5355 * The available vpd length cannot be bigger than the 5356 * DMA buffer passed to the port. Catch the less than 5357 * case and update the caller's size. 5358 */ 5359 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 5360 *vpd_size = mqe->un.read_rev.avail_vpd_len; 5361 5362 memcpy(vpd, dmabuf->virt, *vpd_size); 5363 5364 dma_free_coherent(&phba->pcidev->dev, dma_size, 5365 dmabuf->virt, dmabuf->phys); 5366 kfree(dmabuf); 5367 return 0; 5368 } 5369 5370 /** 5371 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes 5372 * @phba: pointer to lpfc hba data structure. 5373 * 5374 * This routine retrieves SLI4 device physical port name this PCI function 5375 * is attached to. 5376 * 5377 * Return codes 5378 * 0 - successful 5379 * otherwise - failed to retrieve controller attributes 5380 **/ 5381 static int 5382 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) 5383 { 5384 LPFC_MBOXQ_t *mboxq; 5385 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 5386 struct lpfc_controller_attribute *cntl_attr; 5387 void *virtaddr = NULL; 5388 uint32_t alloclen, reqlen; 5389 uint32_t shdr_status, shdr_add_status; 5390 union lpfc_sli4_cfg_shdr *shdr; 5391 int rc; 5392 5393 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5394 if (!mboxq) 5395 return -ENOMEM; 5396 5397 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */ 5398 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 5399 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5400 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 5401 LPFC_SLI4_MBX_NEMBED); 5402 5403 if (alloclen < reqlen) { 5404 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5405 "3084 Allocated DMA memory size (%d) is " 5406 "less than the requested DMA memory size " 5407 "(%d)\n", alloclen, reqlen); 5408 rc = -ENOMEM; 5409 goto out_free_mboxq; 5410 } 5411 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5412 virtaddr = mboxq->sge_array->addr[0]; 5413 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 5414 shdr = &mbx_cntl_attr->cfg_shdr; 5415 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5416 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5417 if (shdr_status || shdr_add_status || rc) { 5418 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5419 "3085 Mailbox x%x (x%x/x%x) failed, " 5420 "rc:x%x, status:x%x, add_status:x%x\n", 5421 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5422 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5423 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5424 rc, shdr_status, shdr_add_status); 5425 rc = -ENXIO; 5426 goto out_free_mboxq; 5427 } 5428 5429 cntl_attr = &mbx_cntl_attr->cntl_attr; 5430 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 5431 phba->sli4_hba.lnk_info.lnk_tp = 5432 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 5433 phba->sli4_hba.lnk_info.lnk_no = 5434 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 5435 5436 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); 5437 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, 5438 sizeof(phba->BIOSVersion)); 5439 5440 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5441 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n", 5442 phba->sli4_hba.lnk_info.lnk_tp, 5443 phba->sli4_hba.lnk_info.lnk_no, 5444 phba->BIOSVersion); 5445 out_free_mboxq: 5446 if (rc != MBX_TIMEOUT) { 5447 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5448 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5449 else 5450 mempool_free(mboxq, phba->mbox_mem_pool); 5451 } 5452 return rc; 5453 } 5454 5455 /** 5456 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 5457 * @phba: pointer to lpfc hba data structure. 5458 * 5459 * This routine retrieves SLI4 device physical port name this PCI function 5460 * is attached to. 5461 * 5462 * Return codes 5463 * 0 - successful 5464 * otherwise - failed to retrieve physical port name 5465 **/ 5466 static int 5467 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 5468 { 5469 LPFC_MBOXQ_t *mboxq; 5470 struct lpfc_mbx_get_port_name *get_port_name; 5471 uint32_t shdr_status, shdr_add_status; 5472 union lpfc_sli4_cfg_shdr *shdr; 5473 char cport_name = 0; 5474 int rc; 5475 5476 /* We assume nothing at this point */ 5477 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5478 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 5479 5480 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5481 if (!mboxq) 5482 return -ENOMEM; 5483 /* obtain link type and link number via READ_CONFIG */ 5484 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5485 lpfc_sli4_read_config(phba); 5486 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 5487 goto retrieve_ppname; 5488 5489 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 5490 rc = lpfc_sli4_get_ctl_attr(phba); 5491 if (rc) 5492 goto out_free_mboxq; 5493 5494 retrieve_ppname: 5495 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5496 LPFC_MBOX_OPCODE_GET_PORT_NAME, 5497 sizeof(struct lpfc_mbx_get_port_name) - 5498 sizeof(struct lpfc_sli4_cfg_mhdr), 5499 LPFC_SLI4_MBX_EMBED); 5500 get_port_name = &mboxq->u.mqe.un.get_port_name; 5501 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 5502 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 5503 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 5504 phba->sli4_hba.lnk_info.lnk_tp); 5505 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5506 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5507 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5508 if (shdr_status || shdr_add_status || rc) { 5509 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5510 "3087 Mailbox x%x (x%x/x%x) failed: " 5511 "rc:x%x, status:x%x, add_status:x%x\n", 5512 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5513 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5514 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5515 rc, shdr_status, shdr_add_status); 5516 rc = -ENXIO; 5517 goto out_free_mboxq; 5518 } 5519 switch (phba->sli4_hba.lnk_info.lnk_no) { 5520 case LPFC_LINK_NUMBER_0: 5521 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 5522 &get_port_name->u.response); 5523 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5524 break; 5525 case LPFC_LINK_NUMBER_1: 5526 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 5527 &get_port_name->u.response); 5528 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5529 break; 5530 case LPFC_LINK_NUMBER_2: 5531 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 5532 &get_port_name->u.response); 5533 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5534 break; 5535 case LPFC_LINK_NUMBER_3: 5536 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 5537 &get_port_name->u.response); 5538 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5539 break; 5540 default: 5541 break; 5542 } 5543 5544 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 5545 phba->Port[0] = cport_name; 5546 phba->Port[1] = '\0'; 5547 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5548 "3091 SLI get port name: %s\n", phba->Port); 5549 } 5550 5551 out_free_mboxq: 5552 if (rc != MBX_TIMEOUT) { 5553 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5554 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5555 else 5556 mempool_free(mboxq, phba->mbox_mem_pool); 5557 } 5558 return rc; 5559 } 5560 5561 /** 5562 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 5563 * @phba: pointer to lpfc hba data structure. 5564 * 5565 * This routine is called to explicitly arm the SLI4 device's completion and 5566 * event queues 5567 **/ 5568 static void 5569 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 5570 { 5571 int qidx; 5572 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 5573 struct lpfc_sli4_hdw_queue *qp; 5574 struct lpfc_queue *eq; 5575 5576 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM); 5577 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); 5578 if (sli4_hba->nvmels_cq) 5579 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, 5580 LPFC_QUEUE_REARM); 5581 5582 if (sli4_hba->hdwq) { 5583 /* Loop thru all Hardware Queues */ 5584 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 5585 qp = &sli4_hba->hdwq[qidx]; 5586 /* ARM the corresponding CQ */ 5587 sli4_hba->sli4_write_cq_db(phba, qp->fcp_cq, 0, 5588 LPFC_QUEUE_REARM); 5589 sli4_hba->sli4_write_cq_db(phba, qp->nvme_cq, 0, 5590 LPFC_QUEUE_REARM); 5591 } 5592 5593 /* Loop thru all IRQ vectors */ 5594 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 5595 eq = sli4_hba->hba_eq_hdl[qidx].eq; 5596 /* ARM the corresponding EQ */ 5597 sli4_hba->sli4_write_eq_db(phba, eq, 5598 0, LPFC_QUEUE_REARM); 5599 } 5600 } 5601 5602 if (phba->nvmet_support) { 5603 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { 5604 sli4_hba->sli4_write_cq_db(phba, 5605 sli4_hba->nvmet_cqset[qidx], 0, 5606 LPFC_QUEUE_REARM); 5607 } 5608 } 5609 } 5610 5611 /** 5612 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 5613 * @phba: Pointer to HBA context object. 5614 * @type: The resource extent type. 5615 * @extnt_count: buffer to hold port available extent count. 5616 * @extnt_size: buffer to hold element count per extent. 5617 * 5618 * This function calls the port and retrievs the number of available 5619 * extents and their size for a particular extent type. 5620 * 5621 * Returns: 0 if successful. Nonzero otherwise. 5622 **/ 5623 int 5624 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 5625 uint16_t *extnt_count, uint16_t *extnt_size) 5626 { 5627 int rc = 0; 5628 uint32_t length; 5629 uint32_t mbox_tmo; 5630 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 5631 LPFC_MBOXQ_t *mbox; 5632 5633 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5634 if (!mbox) 5635 return -ENOMEM; 5636 5637 /* Find out how many extents are available for this resource type */ 5638 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 5639 sizeof(struct lpfc_sli4_cfg_mhdr)); 5640 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5641 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 5642 length, LPFC_SLI4_MBX_EMBED); 5643 5644 /* Send an extents count of 0 - the GET doesn't use it. */ 5645 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5646 LPFC_SLI4_MBX_EMBED); 5647 if (unlikely(rc)) { 5648 rc = -EIO; 5649 goto err_exit; 5650 } 5651 5652 if (!phba->sli4_hba.intr_enable) 5653 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5654 else { 5655 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5656 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5657 } 5658 if (unlikely(rc)) { 5659 rc = -EIO; 5660 goto err_exit; 5661 } 5662 5663 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5664 if (bf_get(lpfc_mbox_hdr_status, 5665 &rsrc_info->header.cfg_shdr.response)) { 5666 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5667 "2930 Failed to get resource extents " 5668 "Status 0x%x Add'l Status 0x%x\n", 5669 bf_get(lpfc_mbox_hdr_status, 5670 &rsrc_info->header.cfg_shdr.response), 5671 bf_get(lpfc_mbox_hdr_add_status, 5672 &rsrc_info->header.cfg_shdr.response)); 5673 rc = -EIO; 5674 goto err_exit; 5675 } 5676 5677 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 5678 &rsrc_info->u.rsp); 5679 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 5680 &rsrc_info->u.rsp); 5681 5682 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5683 "3162 Retrieved extents type-%d from port: count:%d, " 5684 "size:%d\n", type, *extnt_count, *extnt_size); 5685 5686 err_exit: 5687 mempool_free(mbox, phba->mbox_mem_pool); 5688 return rc; 5689 } 5690 5691 /** 5692 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 5693 * @phba: Pointer to HBA context object. 5694 * @type: The extent type to check. 5695 * 5696 * This function reads the current available extents from the port and checks 5697 * if the extent count or extent size has changed since the last access. 5698 * Callers use this routine post port reset to understand if there is a 5699 * extent reprovisioning requirement. 5700 * 5701 * Returns: 5702 * -Error: error indicates problem. 5703 * 1: Extent count or size has changed. 5704 * 0: No changes. 5705 **/ 5706 static int 5707 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5708 { 5709 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5710 uint16_t size_diff, rsrc_ext_size; 5711 int rc = 0; 5712 struct lpfc_rsrc_blks *rsrc_entry; 5713 struct list_head *rsrc_blk_list = NULL; 5714 5715 size_diff = 0; 5716 curr_ext_cnt = 0; 5717 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5718 &rsrc_ext_cnt, 5719 &rsrc_ext_size); 5720 if (unlikely(rc)) 5721 return -EIO; 5722 5723 switch (type) { 5724 case LPFC_RSC_TYPE_FCOE_RPI: 5725 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5726 break; 5727 case LPFC_RSC_TYPE_FCOE_VPI: 5728 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5729 break; 5730 case LPFC_RSC_TYPE_FCOE_XRI: 5731 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5732 break; 5733 case LPFC_RSC_TYPE_FCOE_VFI: 5734 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5735 break; 5736 default: 5737 break; 5738 } 5739 5740 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5741 curr_ext_cnt++; 5742 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5743 size_diff++; 5744 } 5745 5746 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5747 rc = 1; 5748 5749 return rc; 5750 } 5751 5752 /** 5753 * lpfc_sli4_cfg_post_extnts - 5754 * @phba: Pointer to HBA context object. 5755 * @extnt_cnt - number of available extents. 5756 * @type - the extent type (rpi, xri, vfi, vpi). 5757 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5758 * @mbox - pointer to the caller's allocated mailbox structure. 5759 * 5760 * This function executes the extents allocation request. It also 5761 * takes care of the amount of memory needed to allocate or get the 5762 * allocated extents. It is the caller's responsibility to evaluate 5763 * the response. 5764 * 5765 * Returns: 5766 * -Error: Error value describes the condition found. 5767 * 0: if successful 5768 **/ 5769 static int 5770 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5771 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5772 { 5773 int rc = 0; 5774 uint32_t req_len; 5775 uint32_t emb_len; 5776 uint32_t alloc_len, mbox_tmo; 5777 5778 /* Calculate the total requested length of the dma memory */ 5779 req_len = extnt_cnt * sizeof(uint16_t); 5780 5781 /* 5782 * Calculate the size of an embedded mailbox. The uint32_t 5783 * accounts for extents-specific word. 5784 */ 5785 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5786 sizeof(uint32_t); 5787 5788 /* 5789 * Presume the allocation and response will fit into an embedded 5790 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5791 */ 5792 *emb = LPFC_SLI4_MBX_EMBED; 5793 if (req_len > emb_len) { 5794 req_len = extnt_cnt * sizeof(uint16_t) + 5795 sizeof(union lpfc_sli4_cfg_shdr) + 5796 sizeof(uint32_t); 5797 *emb = LPFC_SLI4_MBX_NEMBED; 5798 } 5799 5800 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5801 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5802 req_len, *emb); 5803 if (alloc_len < req_len) { 5804 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5805 "2982 Allocated DMA memory size (x%x) is " 5806 "less than the requested DMA memory " 5807 "size (x%x)\n", alloc_len, req_len); 5808 return -ENOMEM; 5809 } 5810 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5811 if (unlikely(rc)) 5812 return -EIO; 5813 5814 if (!phba->sli4_hba.intr_enable) 5815 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5816 else { 5817 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5818 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5819 } 5820 5821 if (unlikely(rc)) 5822 rc = -EIO; 5823 return rc; 5824 } 5825 5826 /** 5827 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5828 * @phba: Pointer to HBA context object. 5829 * @type: The resource extent type to allocate. 5830 * 5831 * This function allocates the number of elements for the specified 5832 * resource type. 5833 **/ 5834 static int 5835 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5836 { 5837 bool emb = false; 5838 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5839 uint16_t rsrc_id, rsrc_start, j, k; 5840 uint16_t *ids; 5841 int i, rc; 5842 unsigned long longs; 5843 unsigned long *bmask; 5844 struct lpfc_rsrc_blks *rsrc_blks; 5845 LPFC_MBOXQ_t *mbox; 5846 uint32_t length; 5847 struct lpfc_id_range *id_array = NULL; 5848 void *virtaddr = NULL; 5849 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5850 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5851 struct list_head *ext_blk_list; 5852 5853 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5854 &rsrc_cnt, 5855 &rsrc_size); 5856 if (unlikely(rc)) 5857 return -EIO; 5858 5859 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5860 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5861 "3009 No available Resource Extents " 5862 "for resource type 0x%x: Count: 0x%x, " 5863 "Size 0x%x\n", type, rsrc_cnt, 5864 rsrc_size); 5865 return -ENOMEM; 5866 } 5867 5868 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5869 "2903 Post resource extents type-0x%x: " 5870 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5871 5872 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5873 if (!mbox) 5874 return -ENOMEM; 5875 5876 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5877 if (unlikely(rc)) { 5878 rc = -EIO; 5879 goto err_exit; 5880 } 5881 5882 /* 5883 * Figure out where the response is located. Then get local pointers 5884 * to the response data. The port does not guarantee to respond to 5885 * all extents counts request so update the local variable with the 5886 * allocated count from the port. 5887 */ 5888 if (emb == LPFC_SLI4_MBX_EMBED) { 5889 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5890 id_array = &rsrc_ext->u.rsp.id[0]; 5891 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5892 } else { 5893 virtaddr = mbox->sge_array->addr[0]; 5894 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5895 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5896 id_array = &n_rsrc->id; 5897 } 5898 5899 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5900 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5901 5902 /* 5903 * Based on the resource size and count, correct the base and max 5904 * resource values. 5905 */ 5906 length = sizeof(struct lpfc_rsrc_blks); 5907 switch (type) { 5908 case LPFC_RSC_TYPE_FCOE_RPI: 5909 phba->sli4_hba.rpi_bmask = kcalloc(longs, 5910 sizeof(unsigned long), 5911 GFP_KERNEL); 5912 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5913 rc = -ENOMEM; 5914 goto err_exit; 5915 } 5916 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, 5917 sizeof(uint16_t), 5918 GFP_KERNEL); 5919 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5920 kfree(phba->sli4_hba.rpi_bmask); 5921 rc = -ENOMEM; 5922 goto err_exit; 5923 } 5924 5925 /* 5926 * The next_rpi was initialized with the maximum available 5927 * count but the port may allocate a smaller number. Catch 5928 * that case and update the next_rpi. 5929 */ 5930 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5931 5932 /* Initialize local ptrs for common extent processing later. */ 5933 bmask = phba->sli4_hba.rpi_bmask; 5934 ids = phba->sli4_hba.rpi_ids; 5935 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5936 break; 5937 case LPFC_RSC_TYPE_FCOE_VPI: 5938 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 5939 GFP_KERNEL); 5940 if (unlikely(!phba->vpi_bmask)) { 5941 rc = -ENOMEM; 5942 goto err_exit; 5943 } 5944 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), 5945 GFP_KERNEL); 5946 if (unlikely(!phba->vpi_ids)) { 5947 kfree(phba->vpi_bmask); 5948 rc = -ENOMEM; 5949 goto err_exit; 5950 } 5951 5952 /* Initialize local ptrs for common extent processing later. */ 5953 bmask = phba->vpi_bmask; 5954 ids = phba->vpi_ids; 5955 ext_blk_list = &phba->lpfc_vpi_blk_list; 5956 break; 5957 case LPFC_RSC_TYPE_FCOE_XRI: 5958 phba->sli4_hba.xri_bmask = kcalloc(longs, 5959 sizeof(unsigned long), 5960 GFP_KERNEL); 5961 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5962 rc = -ENOMEM; 5963 goto err_exit; 5964 } 5965 phba->sli4_hba.max_cfg_param.xri_used = 0; 5966 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, 5967 sizeof(uint16_t), 5968 GFP_KERNEL); 5969 if (unlikely(!phba->sli4_hba.xri_ids)) { 5970 kfree(phba->sli4_hba.xri_bmask); 5971 rc = -ENOMEM; 5972 goto err_exit; 5973 } 5974 5975 /* Initialize local ptrs for common extent processing later. */ 5976 bmask = phba->sli4_hba.xri_bmask; 5977 ids = phba->sli4_hba.xri_ids; 5978 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5979 break; 5980 case LPFC_RSC_TYPE_FCOE_VFI: 5981 phba->sli4_hba.vfi_bmask = kcalloc(longs, 5982 sizeof(unsigned long), 5983 GFP_KERNEL); 5984 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5985 rc = -ENOMEM; 5986 goto err_exit; 5987 } 5988 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, 5989 sizeof(uint16_t), 5990 GFP_KERNEL); 5991 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5992 kfree(phba->sli4_hba.vfi_bmask); 5993 rc = -ENOMEM; 5994 goto err_exit; 5995 } 5996 5997 /* Initialize local ptrs for common extent processing later. */ 5998 bmask = phba->sli4_hba.vfi_bmask; 5999 ids = phba->sli4_hba.vfi_ids; 6000 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 6001 break; 6002 default: 6003 /* Unsupported Opcode. Fail call. */ 6004 id_array = NULL; 6005 bmask = NULL; 6006 ids = NULL; 6007 ext_blk_list = NULL; 6008 goto err_exit; 6009 } 6010 6011 /* 6012 * Complete initializing the extent configuration with the 6013 * allocated ids assigned to this function. The bitmask serves 6014 * as an index into the array and manages the available ids. The 6015 * array just stores the ids communicated to the port via the wqes. 6016 */ 6017 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 6018 if ((i % 2) == 0) 6019 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 6020 &id_array[k]); 6021 else 6022 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 6023 &id_array[k]); 6024 6025 rsrc_blks = kzalloc(length, GFP_KERNEL); 6026 if (unlikely(!rsrc_blks)) { 6027 rc = -ENOMEM; 6028 kfree(bmask); 6029 kfree(ids); 6030 goto err_exit; 6031 } 6032 rsrc_blks->rsrc_start = rsrc_id; 6033 rsrc_blks->rsrc_size = rsrc_size; 6034 list_add_tail(&rsrc_blks->list, ext_blk_list); 6035 rsrc_start = rsrc_id; 6036 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { 6037 phba->sli4_hba.io_xri_start = rsrc_start + 6038 lpfc_sli4_get_iocb_cnt(phba); 6039 } 6040 6041 while (rsrc_id < (rsrc_start + rsrc_size)) { 6042 ids[j] = rsrc_id; 6043 rsrc_id++; 6044 j++; 6045 } 6046 /* Entire word processed. Get next word.*/ 6047 if ((i % 2) == 1) 6048 k++; 6049 } 6050 err_exit: 6051 lpfc_sli4_mbox_cmd_free(phba, mbox); 6052 return rc; 6053 } 6054 6055 6056 6057 /** 6058 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 6059 * @phba: Pointer to HBA context object. 6060 * @type: the extent's type. 6061 * 6062 * This function deallocates all extents of a particular resource type. 6063 * SLI4 does not allow for deallocating a particular extent range. It 6064 * is the caller's responsibility to release all kernel memory resources. 6065 **/ 6066 static int 6067 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 6068 { 6069 int rc; 6070 uint32_t length, mbox_tmo = 0; 6071 LPFC_MBOXQ_t *mbox; 6072 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 6073 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 6074 6075 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6076 if (!mbox) 6077 return -ENOMEM; 6078 6079 /* 6080 * This function sends an embedded mailbox because it only sends the 6081 * the resource type. All extents of this type are released by the 6082 * port. 6083 */ 6084 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 6085 sizeof(struct lpfc_sli4_cfg_mhdr)); 6086 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6087 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 6088 length, LPFC_SLI4_MBX_EMBED); 6089 6090 /* Send an extents count of 0 - the dealloc doesn't use it. */ 6091 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 6092 LPFC_SLI4_MBX_EMBED); 6093 if (unlikely(rc)) { 6094 rc = -EIO; 6095 goto out_free_mbox; 6096 } 6097 if (!phba->sli4_hba.intr_enable) 6098 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6099 else { 6100 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6101 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6102 } 6103 if (unlikely(rc)) { 6104 rc = -EIO; 6105 goto out_free_mbox; 6106 } 6107 6108 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 6109 if (bf_get(lpfc_mbox_hdr_status, 6110 &dealloc_rsrc->header.cfg_shdr.response)) { 6111 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6112 "2919 Failed to release resource extents " 6113 "for type %d - Status 0x%x Add'l Status 0x%x. " 6114 "Resource memory not released.\n", 6115 type, 6116 bf_get(lpfc_mbox_hdr_status, 6117 &dealloc_rsrc->header.cfg_shdr.response), 6118 bf_get(lpfc_mbox_hdr_add_status, 6119 &dealloc_rsrc->header.cfg_shdr.response)); 6120 rc = -EIO; 6121 goto out_free_mbox; 6122 } 6123 6124 /* Release kernel memory resources for the specific type. */ 6125 switch (type) { 6126 case LPFC_RSC_TYPE_FCOE_VPI: 6127 kfree(phba->vpi_bmask); 6128 kfree(phba->vpi_ids); 6129 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6130 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6131 &phba->lpfc_vpi_blk_list, list) { 6132 list_del_init(&rsrc_blk->list); 6133 kfree(rsrc_blk); 6134 } 6135 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6136 break; 6137 case LPFC_RSC_TYPE_FCOE_XRI: 6138 kfree(phba->sli4_hba.xri_bmask); 6139 kfree(phba->sli4_hba.xri_ids); 6140 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6141 &phba->sli4_hba.lpfc_xri_blk_list, list) { 6142 list_del_init(&rsrc_blk->list); 6143 kfree(rsrc_blk); 6144 } 6145 break; 6146 case LPFC_RSC_TYPE_FCOE_VFI: 6147 kfree(phba->sli4_hba.vfi_bmask); 6148 kfree(phba->sli4_hba.vfi_ids); 6149 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6150 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6151 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 6152 list_del_init(&rsrc_blk->list); 6153 kfree(rsrc_blk); 6154 } 6155 break; 6156 case LPFC_RSC_TYPE_FCOE_RPI: 6157 /* RPI bitmask and physical id array are cleaned up earlier. */ 6158 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6159 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 6160 list_del_init(&rsrc_blk->list); 6161 kfree(rsrc_blk); 6162 } 6163 break; 6164 default: 6165 break; 6166 } 6167 6168 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6169 6170 out_free_mbox: 6171 mempool_free(mbox, phba->mbox_mem_pool); 6172 return rc; 6173 } 6174 6175 static void 6176 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, 6177 uint32_t feature) 6178 { 6179 uint32_t len; 6180 6181 len = sizeof(struct lpfc_mbx_set_feature) - 6182 sizeof(struct lpfc_sli4_cfg_mhdr); 6183 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6184 LPFC_MBOX_OPCODE_SET_FEATURES, len, 6185 LPFC_SLI4_MBX_EMBED); 6186 6187 switch (feature) { 6188 case LPFC_SET_UE_RECOVERY: 6189 bf_set(lpfc_mbx_set_feature_UER, 6190 &mbox->u.mqe.un.set_feature, 1); 6191 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; 6192 mbox->u.mqe.un.set_feature.param_len = 8; 6193 break; 6194 case LPFC_SET_MDS_DIAGS: 6195 bf_set(lpfc_mbx_set_feature_mds, 6196 &mbox->u.mqe.un.set_feature, 1); 6197 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 6198 &mbox->u.mqe.un.set_feature, 1); 6199 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 6200 mbox->u.mqe.un.set_feature.param_len = 8; 6201 break; 6202 } 6203 6204 return; 6205 } 6206 6207 /** 6208 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter 6209 * @phba: Pointer to HBA context object. 6210 * 6211 * Disable FW logging into host memory on the adapter. To 6212 * be done before reading logs from the host memory. 6213 **/ 6214 void 6215 lpfc_ras_stop_fwlog(struct lpfc_hba *phba) 6216 { 6217 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6218 6219 ras_fwlog->ras_active = false; 6220 6221 /* Disable FW logging to host memory */ 6222 writel(LPFC_CTL_PDEV_CTL_DDL_RAS, 6223 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 6224 } 6225 6226 /** 6227 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. 6228 * @phba: Pointer to HBA context object. 6229 * 6230 * This function is called to free memory allocated for RAS FW logging 6231 * support in the driver. 6232 **/ 6233 void 6234 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) 6235 { 6236 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6237 struct lpfc_dmabuf *dmabuf, *next; 6238 6239 if (!list_empty(&ras_fwlog->fwlog_buff_list)) { 6240 list_for_each_entry_safe(dmabuf, next, 6241 &ras_fwlog->fwlog_buff_list, 6242 list) { 6243 list_del(&dmabuf->list); 6244 dma_free_coherent(&phba->pcidev->dev, 6245 LPFC_RAS_MAX_ENTRY_SIZE, 6246 dmabuf->virt, dmabuf->phys); 6247 kfree(dmabuf); 6248 } 6249 } 6250 6251 if (ras_fwlog->lwpd.virt) { 6252 dma_free_coherent(&phba->pcidev->dev, 6253 sizeof(uint32_t) * 2, 6254 ras_fwlog->lwpd.virt, 6255 ras_fwlog->lwpd.phys); 6256 ras_fwlog->lwpd.virt = NULL; 6257 } 6258 6259 ras_fwlog->ras_active = false; 6260 } 6261 6262 /** 6263 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support 6264 * @phba: Pointer to HBA context object. 6265 * @fwlog_buff_count: Count of buffers to be created. 6266 * 6267 * This routine DMA memory for Log Write Position Data[LPWD] and buffer 6268 * to update FW log is posted to the adapter. 6269 * Buffer count is calculated based on module param ras_fwlog_buffsize 6270 * Size of each buffer posted to FW is 64K. 6271 **/ 6272 6273 static int 6274 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, 6275 uint32_t fwlog_buff_count) 6276 { 6277 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6278 struct lpfc_dmabuf *dmabuf; 6279 int rc = 0, i = 0; 6280 6281 /* Initialize List */ 6282 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list); 6283 6284 /* Allocate memory for the LWPD */ 6285 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev, 6286 sizeof(uint32_t) * 2, 6287 &ras_fwlog->lwpd.phys, 6288 GFP_KERNEL); 6289 if (!ras_fwlog->lwpd.virt) { 6290 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6291 "6185 LWPD Memory Alloc Failed\n"); 6292 6293 return -ENOMEM; 6294 } 6295 6296 ras_fwlog->fw_buffcount = fwlog_buff_count; 6297 for (i = 0; i < ras_fwlog->fw_buffcount; i++) { 6298 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 6299 GFP_KERNEL); 6300 if (!dmabuf) { 6301 rc = -ENOMEM; 6302 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6303 "6186 Memory Alloc failed FW logging"); 6304 goto free_mem; 6305 } 6306 6307 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6308 LPFC_RAS_MAX_ENTRY_SIZE, 6309 &dmabuf->phys, GFP_KERNEL); 6310 if (!dmabuf->virt) { 6311 kfree(dmabuf); 6312 rc = -ENOMEM; 6313 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6314 "6187 DMA Alloc Failed FW logging"); 6315 goto free_mem; 6316 } 6317 dmabuf->buffer_tag = i; 6318 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list); 6319 } 6320 6321 free_mem: 6322 if (rc) 6323 lpfc_sli4_ras_dma_free(phba); 6324 6325 return rc; 6326 } 6327 6328 /** 6329 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command 6330 * @phba: pointer to lpfc hba data structure. 6331 * @pmboxq: pointer to the driver internal queue element for mailbox command. 6332 * 6333 * Completion handler for driver's RAS MBX command to the device. 6334 **/ 6335 static void 6336 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6337 { 6338 MAILBOX_t *mb; 6339 union lpfc_sli4_cfg_shdr *shdr; 6340 uint32_t shdr_status, shdr_add_status; 6341 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6342 6343 mb = &pmb->u.mb; 6344 6345 shdr = (union lpfc_sli4_cfg_shdr *) 6346 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr; 6347 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6348 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6349 6350 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { 6351 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 6352 "6188 FW LOG mailbox " 6353 "completed with status x%x add_status x%x," 6354 " mbx status x%x\n", 6355 shdr_status, shdr_add_status, mb->mbxStatus); 6356 6357 ras_fwlog->ras_hwsupport = false; 6358 goto disable_ras; 6359 } 6360 6361 ras_fwlog->ras_active = true; 6362 mempool_free(pmb, phba->mbox_mem_pool); 6363 6364 return; 6365 6366 disable_ras: 6367 /* Free RAS DMA memory */ 6368 lpfc_sli4_ras_dma_free(phba); 6369 mempool_free(pmb, phba->mbox_mem_pool); 6370 } 6371 6372 /** 6373 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command 6374 * @phba: pointer to lpfc hba data structure. 6375 * @fwlog_level: Logging verbosity level. 6376 * @fwlog_enable: Enable/Disable logging. 6377 * 6378 * Initialize memory and post mailbox command to enable FW logging in host 6379 * memory. 6380 **/ 6381 int 6382 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, 6383 uint32_t fwlog_level, 6384 uint32_t fwlog_enable) 6385 { 6386 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6387 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL; 6388 struct lpfc_dmabuf *dmabuf; 6389 LPFC_MBOXQ_t *mbox; 6390 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; 6391 int rc = 0; 6392 6393 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * 6394 phba->cfg_ras_fwlog_buffsize); 6395 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); 6396 6397 /* 6398 * If re-enabling FW logging support use earlier allocated 6399 * DMA buffers while posting MBX command. 6400 **/ 6401 if (!ras_fwlog->lwpd.virt) { 6402 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count); 6403 if (rc) { 6404 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6405 "6189 FW Log Memory Allocation Failed"); 6406 return rc; 6407 } 6408 } 6409 6410 /* Setup Mailbox command */ 6411 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6412 if (!mbox) { 6413 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6414 "6190 RAS MBX Alloc Failed"); 6415 rc = -ENOMEM; 6416 goto mem_free; 6417 } 6418 6419 ras_fwlog->fw_loglevel = fwlog_level; 6420 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) - 6421 sizeof(struct lpfc_sli4_cfg_mhdr)); 6422 6423 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL, 6424 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION, 6425 len, LPFC_SLI4_MBX_EMBED); 6426 6427 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog; 6428 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request, 6429 fwlog_enable); 6430 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request, 6431 ras_fwlog->fw_loglevel); 6432 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request, 6433 ras_fwlog->fw_buffcount); 6434 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request, 6435 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE); 6436 6437 /* Update DMA buffer address */ 6438 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) { 6439 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); 6440 6441 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo = 6442 putPaddrLow(dmabuf->phys); 6443 6444 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi = 6445 putPaddrHigh(dmabuf->phys); 6446 } 6447 6448 /* Update LPWD address */ 6449 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); 6450 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); 6451 6452 mbox->vport = phba->pport; 6453 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; 6454 6455 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6456 6457 if (rc == MBX_NOT_FINISHED) { 6458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6459 "6191 FW-Log Mailbox failed. " 6460 "status %d mbxStatus : x%x", rc, 6461 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 6462 mempool_free(mbox, phba->mbox_mem_pool); 6463 rc = -EIO; 6464 goto mem_free; 6465 } else 6466 rc = 0; 6467 mem_free: 6468 if (rc) 6469 lpfc_sli4_ras_dma_free(phba); 6470 6471 return rc; 6472 } 6473 6474 /** 6475 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter 6476 * @phba: Pointer to HBA context object. 6477 * 6478 * Check if RAS is supported on the adapter and initialize it. 6479 **/ 6480 void 6481 lpfc_sli4_ras_setup(struct lpfc_hba *phba) 6482 { 6483 /* Check RAS FW Log needs to be enabled or not */ 6484 if (lpfc_check_fwlog_support(phba)) 6485 return; 6486 6487 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, 6488 LPFC_RAS_ENABLE_LOGGING); 6489 } 6490 6491 /** 6492 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 6493 * @phba: Pointer to HBA context object. 6494 * 6495 * This function allocates all SLI4 resource identifiers. 6496 **/ 6497 int 6498 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 6499 { 6500 int i, rc, error = 0; 6501 uint16_t count, base; 6502 unsigned long longs; 6503 6504 if (!phba->sli4_hba.rpi_hdrs_in_use) 6505 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 6506 if (phba->sli4_hba.extents_in_use) { 6507 /* 6508 * The port supports resource extents. The XRI, VPI, VFI, RPI 6509 * resource extent count must be read and allocated before 6510 * provisioning the resource id arrays. 6511 */ 6512 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6513 LPFC_IDX_RSRC_RDY) { 6514 /* 6515 * Extent-based resources are set - the driver could 6516 * be in a port reset. Figure out if any corrective 6517 * actions need to be taken. 6518 */ 6519 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6520 LPFC_RSC_TYPE_FCOE_VFI); 6521 if (rc != 0) 6522 error++; 6523 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6524 LPFC_RSC_TYPE_FCOE_VPI); 6525 if (rc != 0) 6526 error++; 6527 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6528 LPFC_RSC_TYPE_FCOE_XRI); 6529 if (rc != 0) 6530 error++; 6531 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6532 LPFC_RSC_TYPE_FCOE_RPI); 6533 if (rc != 0) 6534 error++; 6535 6536 /* 6537 * It's possible that the number of resources 6538 * provided to this port instance changed between 6539 * resets. Detect this condition and reallocate 6540 * resources. Otherwise, there is no action. 6541 */ 6542 if (error) { 6543 lpfc_printf_log(phba, KERN_INFO, 6544 LOG_MBOX | LOG_INIT, 6545 "2931 Detected extent resource " 6546 "change. Reallocating all " 6547 "extents.\n"); 6548 rc = lpfc_sli4_dealloc_extent(phba, 6549 LPFC_RSC_TYPE_FCOE_VFI); 6550 rc = lpfc_sli4_dealloc_extent(phba, 6551 LPFC_RSC_TYPE_FCOE_VPI); 6552 rc = lpfc_sli4_dealloc_extent(phba, 6553 LPFC_RSC_TYPE_FCOE_XRI); 6554 rc = lpfc_sli4_dealloc_extent(phba, 6555 LPFC_RSC_TYPE_FCOE_RPI); 6556 } else 6557 return 0; 6558 } 6559 6560 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6561 if (unlikely(rc)) 6562 goto err_exit; 6563 6564 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6565 if (unlikely(rc)) 6566 goto err_exit; 6567 6568 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6569 if (unlikely(rc)) 6570 goto err_exit; 6571 6572 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6573 if (unlikely(rc)) 6574 goto err_exit; 6575 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6576 LPFC_IDX_RSRC_RDY); 6577 return rc; 6578 } else { 6579 /* 6580 * The port does not support resource extents. The XRI, VPI, 6581 * VFI, RPI resource ids were determined from READ_CONFIG. 6582 * Just allocate the bitmasks and provision the resource id 6583 * arrays. If a port reset is active, the resources don't 6584 * need any action - just exit. 6585 */ 6586 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6587 LPFC_IDX_RSRC_RDY) { 6588 lpfc_sli4_dealloc_resource_identifiers(phba); 6589 lpfc_sli4_remove_rpis(phba); 6590 } 6591 /* RPIs. */ 6592 count = phba->sli4_hba.max_cfg_param.max_rpi; 6593 if (count <= 0) { 6594 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6595 "3279 Invalid provisioning of " 6596 "rpi:%d\n", count); 6597 rc = -EINVAL; 6598 goto err_exit; 6599 } 6600 base = phba->sli4_hba.max_cfg_param.rpi_base; 6601 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6602 phba->sli4_hba.rpi_bmask = kcalloc(longs, 6603 sizeof(unsigned long), 6604 GFP_KERNEL); 6605 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 6606 rc = -ENOMEM; 6607 goto err_exit; 6608 } 6609 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), 6610 GFP_KERNEL); 6611 if (unlikely(!phba->sli4_hba.rpi_ids)) { 6612 rc = -ENOMEM; 6613 goto free_rpi_bmask; 6614 } 6615 6616 for (i = 0; i < count; i++) 6617 phba->sli4_hba.rpi_ids[i] = base + i; 6618 6619 /* VPIs. */ 6620 count = phba->sli4_hba.max_cfg_param.max_vpi; 6621 if (count <= 0) { 6622 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6623 "3280 Invalid provisioning of " 6624 "vpi:%d\n", count); 6625 rc = -EINVAL; 6626 goto free_rpi_ids; 6627 } 6628 base = phba->sli4_hba.max_cfg_param.vpi_base; 6629 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6630 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 6631 GFP_KERNEL); 6632 if (unlikely(!phba->vpi_bmask)) { 6633 rc = -ENOMEM; 6634 goto free_rpi_ids; 6635 } 6636 phba->vpi_ids = kcalloc(count, sizeof(uint16_t), 6637 GFP_KERNEL); 6638 if (unlikely(!phba->vpi_ids)) { 6639 rc = -ENOMEM; 6640 goto free_vpi_bmask; 6641 } 6642 6643 for (i = 0; i < count; i++) 6644 phba->vpi_ids[i] = base + i; 6645 6646 /* XRIs. */ 6647 count = phba->sli4_hba.max_cfg_param.max_xri; 6648 if (count <= 0) { 6649 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6650 "3281 Invalid provisioning of " 6651 "xri:%d\n", count); 6652 rc = -EINVAL; 6653 goto free_vpi_ids; 6654 } 6655 base = phba->sli4_hba.max_cfg_param.xri_base; 6656 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6657 phba->sli4_hba.xri_bmask = kcalloc(longs, 6658 sizeof(unsigned long), 6659 GFP_KERNEL); 6660 if (unlikely(!phba->sli4_hba.xri_bmask)) { 6661 rc = -ENOMEM; 6662 goto free_vpi_ids; 6663 } 6664 phba->sli4_hba.max_cfg_param.xri_used = 0; 6665 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), 6666 GFP_KERNEL); 6667 if (unlikely(!phba->sli4_hba.xri_ids)) { 6668 rc = -ENOMEM; 6669 goto free_xri_bmask; 6670 } 6671 6672 for (i = 0; i < count; i++) 6673 phba->sli4_hba.xri_ids[i] = base + i; 6674 6675 /* VFIs. */ 6676 count = phba->sli4_hba.max_cfg_param.max_vfi; 6677 if (count <= 0) { 6678 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6679 "3282 Invalid provisioning of " 6680 "vfi:%d\n", count); 6681 rc = -EINVAL; 6682 goto free_xri_ids; 6683 } 6684 base = phba->sli4_hba.max_cfg_param.vfi_base; 6685 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6686 phba->sli4_hba.vfi_bmask = kcalloc(longs, 6687 sizeof(unsigned long), 6688 GFP_KERNEL); 6689 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 6690 rc = -ENOMEM; 6691 goto free_xri_ids; 6692 } 6693 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), 6694 GFP_KERNEL); 6695 if (unlikely(!phba->sli4_hba.vfi_ids)) { 6696 rc = -ENOMEM; 6697 goto free_vfi_bmask; 6698 } 6699 6700 for (i = 0; i < count; i++) 6701 phba->sli4_hba.vfi_ids[i] = base + i; 6702 6703 /* 6704 * Mark all resources ready. An HBA reset doesn't need 6705 * to reset the initialization. 6706 */ 6707 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6708 LPFC_IDX_RSRC_RDY); 6709 return 0; 6710 } 6711 6712 free_vfi_bmask: 6713 kfree(phba->sli4_hba.vfi_bmask); 6714 phba->sli4_hba.vfi_bmask = NULL; 6715 free_xri_ids: 6716 kfree(phba->sli4_hba.xri_ids); 6717 phba->sli4_hba.xri_ids = NULL; 6718 free_xri_bmask: 6719 kfree(phba->sli4_hba.xri_bmask); 6720 phba->sli4_hba.xri_bmask = NULL; 6721 free_vpi_ids: 6722 kfree(phba->vpi_ids); 6723 phba->vpi_ids = NULL; 6724 free_vpi_bmask: 6725 kfree(phba->vpi_bmask); 6726 phba->vpi_bmask = NULL; 6727 free_rpi_ids: 6728 kfree(phba->sli4_hba.rpi_ids); 6729 phba->sli4_hba.rpi_ids = NULL; 6730 free_rpi_bmask: 6731 kfree(phba->sli4_hba.rpi_bmask); 6732 phba->sli4_hba.rpi_bmask = NULL; 6733 err_exit: 6734 return rc; 6735 } 6736 6737 /** 6738 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 6739 * @phba: Pointer to HBA context object. 6740 * 6741 * This function allocates the number of elements for the specified 6742 * resource type. 6743 **/ 6744 int 6745 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 6746 { 6747 if (phba->sli4_hba.extents_in_use) { 6748 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6749 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6750 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6751 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6752 } else { 6753 kfree(phba->vpi_bmask); 6754 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6755 kfree(phba->vpi_ids); 6756 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6757 kfree(phba->sli4_hba.xri_bmask); 6758 kfree(phba->sli4_hba.xri_ids); 6759 kfree(phba->sli4_hba.vfi_bmask); 6760 kfree(phba->sli4_hba.vfi_ids); 6761 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6762 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6763 } 6764 6765 return 0; 6766 } 6767 6768 /** 6769 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 6770 * @phba: Pointer to HBA context object. 6771 * @type: The resource extent type. 6772 * @extnt_count: buffer to hold port extent count response 6773 * @extnt_size: buffer to hold port extent size response. 6774 * 6775 * This function calls the port to read the host allocated extents 6776 * for a particular type. 6777 **/ 6778 int 6779 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 6780 uint16_t *extnt_cnt, uint16_t *extnt_size) 6781 { 6782 bool emb; 6783 int rc = 0; 6784 uint16_t curr_blks = 0; 6785 uint32_t req_len, emb_len; 6786 uint32_t alloc_len, mbox_tmo; 6787 struct list_head *blk_list_head; 6788 struct lpfc_rsrc_blks *rsrc_blk; 6789 LPFC_MBOXQ_t *mbox; 6790 void *virtaddr = NULL; 6791 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 6792 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 6793 union lpfc_sli4_cfg_shdr *shdr; 6794 6795 switch (type) { 6796 case LPFC_RSC_TYPE_FCOE_VPI: 6797 blk_list_head = &phba->lpfc_vpi_blk_list; 6798 break; 6799 case LPFC_RSC_TYPE_FCOE_XRI: 6800 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 6801 break; 6802 case LPFC_RSC_TYPE_FCOE_VFI: 6803 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 6804 break; 6805 case LPFC_RSC_TYPE_FCOE_RPI: 6806 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 6807 break; 6808 default: 6809 return -EIO; 6810 } 6811 6812 /* Count the number of extents currently allocatd for this type. */ 6813 list_for_each_entry(rsrc_blk, blk_list_head, list) { 6814 if (curr_blks == 0) { 6815 /* 6816 * The GET_ALLOCATED mailbox does not return the size, 6817 * just the count. The size should be just the size 6818 * stored in the current allocated block and all sizes 6819 * for an extent type are the same so set the return 6820 * value now. 6821 */ 6822 *extnt_size = rsrc_blk->rsrc_size; 6823 } 6824 curr_blks++; 6825 } 6826 6827 /* 6828 * Calculate the size of an embedded mailbox. The uint32_t 6829 * accounts for extents-specific word. 6830 */ 6831 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 6832 sizeof(uint32_t); 6833 6834 /* 6835 * Presume the allocation and response will fit into an embedded 6836 * mailbox. If not true, reconfigure to a non-embedded mailbox. 6837 */ 6838 emb = LPFC_SLI4_MBX_EMBED; 6839 req_len = emb_len; 6840 if (req_len > emb_len) { 6841 req_len = curr_blks * sizeof(uint16_t) + 6842 sizeof(union lpfc_sli4_cfg_shdr) + 6843 sizeof(uint32_t); 6844 emb = LPFC_SLI4_MBX_NEMBED; 6845 } 6846 6847 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6848 if (!mbox) 6849 return -ENOMEM; 6850 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 6851 6852 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6853 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 6854 req_len, emb); 6855 if (alloc_len < req_len) { 6856 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6857 "2983 Allocated DMA memory size (x%x) is " 6858 "less than the requested DMA memory " 6859 "size (x%x)\n", alloc_len, req_len); 6860 rc = -ENOMEM; 6861 goto err_exit; 6862 } 6863 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 6864 if (unlikely(rc)) { 6865 rc = -EIO; 6866 goto err_exit; 6867 } 6868 6869 if (!phba->sli4_hba.intr_enable) 6870 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6871 else { 6872 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6873 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6874 } 6875 6876 if (unlikely(rc)) { 6877 rc = -EIO; 6878 goto err_exit; 6879 } 6880 6881 /* 6882 * Figure out where the response is located. Then get local pointers 6883 * to the response data. The port does not guarantee to respond to 6884 * all extents counts request so update the local variable with the 6885 * allocated count from the port. 6886 */ 6887 if (emb == LPFC_SLI4_MBX_EMBED) { 6888 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 6889 shdr = &rsrc_ext->header.cfg_shdr; 6890 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 6891 } else { 6892 virtaddr = mbox->sge_array->addr[0]; 6893 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 6894 shdr = &n_rsrc->cfg_shdr; 6895 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 6896 } 6897 6898 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 6899 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6900 "2984 Failed to read allocated resources " 6901 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 6902 type, 6903 bf_get(lpfc_mbox_hdr_status, &shdr->response), 6904 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 6905 rc = -EIO; 6906 goto err_exit; 6907 } 6908 err_exit: 6909 lpfc_sli4_mbox_cmd_free(phba, mbox); 6910 return rc; 6911 } 6912 6913 /** 6914 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block 6915 * @phba: pointer to lpfc hba data structure. 6916 * @pring: Pointer to driver SLI ring object. 6917 * @sgl_list: linked link of sgl buffers to post 6918 * @cnt: number of linked list buffers 6919 * 6920 * This routine walks the list of buffers that have been allocated and 6921 * repost them to the port by using SGL block post. This is needed after a 6922 * pci_function_reset/warm_start or start. It attempts to construct blocks 6923 * of buffer sgls which contains contiguous xris and uses the non-embedded 6924 * SGL block post mailbox commands to post them to the port. For single 6925 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 6926 * mailbox command for posting. 6927 * 6928 * Returns: 0 = success, non-zero failure. 6929 **/ 6930 static int 6931 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, 6932 struct list_head *sgl_list, int cnt) 6933 { 6934 struct lpfc_sglq *sglq_entry = NULL; 6935 struct lpfc_sglq *sglq_entry_next = NULL; 6936 struct lpfc_sglq *sglq_entry_first = NULL; 6937 int status, total_cnt; 6938 int post_cnt = 0, num_posted = 0, block_cnt = 0; 6939 int last_xritag = NO_XRI; 6940 LIST_HEAD(prep_sgl_list); 6941 LIST_HEAD(blck_sgl_list); 6942 LIST_HEAD(allc_sgl_list); 6943 LIST_HEAD(post_sgl_list); 6944 LIST_HEAD(free_sgl_list); 6945 6946 spin_lock_irq(&phba->hbalock); 6947 spin_lock(&phba->sli4_hba.sgl_list_lock); 6948 list_splice_init(sgl_list, &allc_sgl_list); 6949 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6950 spin_unlock_irq(&phba->hbalock); 6951 6952 total_cnt = cnt; 6953 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6954 &allc_sgl_list, list) { 6955 list_del_init(&sglq_entry->list); 6956 block_cnt++; 6957 if ((last_xritag != NO_XRI) && 6958 (sglq_entry->sli4_xritag != last_xritag + 1)) { 6959 /* a hole in xri block, form a sgl posting block */ 6960 list_splice_init(&prep_sgl_list, &blck_sgl_list); 6961 post_cnt = block_cnt - 1; 6962 /* prepare list for next posting block */ 6963 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6964 block_cnt = 1; 6965 } else { 6966 /* prepare list for next posting block */ 6967 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6968 /* enough sgls for non-embed sgl mbox command */ 6969 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 6970 list_splice_init(&prep_sgl_list, 6971 &blck_sgl_list); 6972 post_cnt = block_cnt; 6973 block_cnt = 0; 6974 } 6975 } 6976 num_posted++; 6977 6978 /* keep track of last sgl's xritag */ 6979 last_xritag = sglq_entry->sli4_xritag; 6980 6981 /* end of repost sgl list condition for buffers */ 6982 if (num_posted == total_cnt) { 6983 if (post_cnt == 0) { 6984 list_splice_init(&prep_sgl_list, 6985 &blck_sgl_list); 6986 post_cnt = block_cnt; 6987 } else if (block_cnt == 1) { 6988 status = lpfc_sli4_post_sgl(phba, 6989 sglq_entry->phys, 0, 6990 sglq_entry->sli4_xritag); 6991 if (!status) { 6992 /* successful, put sgl to posted list */ 6993 list_add_tail(&sglq_entry->list, 6994 &post_sgl_list); 6995 } else { 6996 /* Failure, put sgl to free list */ 6997 lpfc_printf_log(phba, KERN_WARNING, 6998 LOG_SLI, 6999 "3159 Failed to post " 7000 "sgl, xritag:x%x\n", 7001 sglq_entry->sli4_xritag); 7002 list_add_tail(&sglq_entry->list, 7003 &free_sgl_list); 7004 total_cnt--; 7005 } 7006 } 7007 } 7008 7009 /* continue until a nembed page worth of sgls */ 7010 if (post_cnt == 0) 7011 continue; 7012 7013 /* post the buffer list sgls as a block */ 7014 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, 7015 post_cnt); 7016 7017 if (!status) { 7018 /* success, put sgl list to posted sgl list */ 7019 list_splice_init(&blck_sgl_list, &post_sgl_list); 7020 } else { 7021 /* Failure, put sgl list to free sgl list */ 7022 sglq_entry_first = list_first_entry(&blck_sgl_list, 7023 struct lpfc_sglq, 7024 list); 7025 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 7026 "3160 Failed to post sgl-list, " 7027 "xritag:x%x-x%x\n", 7028 sglq_entry_first->sli4_xritag, 7029 (sglq_entry_first->sli4_xritag + 7030 post_cnt - 1)); 7031 list_splice_init(&blck_sgl_list, &free_sgl_list); 7032 total_cnt -= post_cnt; 7033 } 7034 7035 /* don't reset xirtag due to hole in xri block */ 7036 if (block_cnt == 0) 7037 last_xritag = NO_XRI; 7038 7039 /* reset sgl post count for next round of posting */ 7040 post_cnt = 0; 7041 } 7042 7043 /* free the sgls failed to post */ 7044 lpfc_free_sgl_list(phba, &free_sgl_list); 7045 7046 /* push sgls posted to the available list */ 7047 if (!list_empty(&post_sgl_list)) { 7048 spin_lock_irq(&phba->hbalock); 7049 spin_lock(&phba->sli4_hba.sgl_list_lock); 7050 list_splice_init(&post_sgl_list, sgl_list); 7051 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7052 spin_unlock_irq(&phba->hbalock); 7053 } else { 7054 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7055 "3161 Failure to post sgl to port.\n"); 7056 return -EIO; 7057 } 7058 7059 /* return the number of XRIs actually posted */ 7060 return total_cnt; 7061 } 7062 7063 /** 7064 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls 7065 * @phba: pointer to lpfc hba data structure. 7066 * 7067 * This routine walks the list of nvme buffers that have been allocated and 7068 * repost them to the port by using SGL block post. This is needed after a 7069 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 7070 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list 7071 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. 7072 * 7073 * Returns: 0 = success, non-zero failure. 7074 **/ 7075 static int 7076 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) 7077 { 7078 LIST_HEAD(post_nblist); 7079 int num_posted, rc = 0; 7080 7081 /* get all NVME buffers need to repost to a local list */ 7082 lpfc_io_buf_flush(phba, &post_nblist); 7083 7084 /* post the list of nvme buffer sgls to port if available */ 7085 if (!list_empty(&post_nblist)) { 7086 num_posted = lpfc_sli4_post_io_sgl_list( 7087 phba, &post_nblist, phba->sli4_hba.io_xri_cnt); 7088 /* failed to post any nvme buffer, return error */ 7089 if (num_posted == 0) 7090 rc = -EIO; 7091 } 7092 return rc; 7093 } 7094 7095 static void 7096 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 7097 { 7098 uint32_t len; 7099 7100 len = sizeof(struct lpfc_mbx_set_host_data) - 7101 sizeof(struct lpfc_sli4_cfg_mhdr); 7102 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7103 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 7104 LPFC_SLI4_MBX_EMBED); 7105 7106 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; 7107 mbox->u.mqe.un.set_host_data.param_len = 7108 LPFC_HOST_OS_DRIVER_VERSION_SIZE; 7109 snprintf(mbox->u.mqe.un.set_host_data.data, 7110 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 7111 "Linux %s v"LPFC_DRIVER_VERSION, 7112 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 7113 } 7114 7115 int 7116 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, 7117 struct lpfc_queue *drq, int count, int idx) 7118 { 7119 int rc, i; 7120 struct lpfc_rqe hrqe; 7121 struct lpfc_rqe drqe; 7122 struct lpfc_rqb *rqbp; 7123 unsigned long flags; 7124 struct rqb_dmabuf *rqb_buffer; 7125 LIST_HEAD(rqb_buf_list); 7126 7127 spin_lock_irqsave(&phba->hbalock, flags); 7128 rqbp = hrq->rqbp; 7129 for (i = 0; i < count; i++) { 7130 /* IF RQ is already full, don't bother */ 7131 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) 7132 break; 7133 rqb_buffer = rqbp->rqb_alloc_buffer(phba); 7134 if (!rqb_buffer) 7135 break; 7136 rqb_buffer->hrq = hrq; 7137 rqb_buffer->drq = drq; 7138 rqb_buffer->idx = idx; 7139 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); 7140 } 7141 while (!list_empty(&rqb_buf_list)) { 7142 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, 7143 hbuf.list); 7144 7145 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); 7146 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); 7147 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); 7148 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); 7149 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 7150 if (rc < 0) { 7151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7152 "6421 Cannot post to HRQ %d: %x %x %x " 7153 "DRQ %x %x\n", 7154 hrq->queue_id, 7155 hrq->host_index, 7156 hrq->hba_index, 7157 hrq->entry_count, 7158 drq->host_index, 7159 drq->hba_index); 7160 rqbp->rqb_free_buffer(phba, rqb_buffer); 7161 } else { 7162 list_add_tail(&rqb_buffer->hbuf.list, 7163 &rqbp->rqb_buffer_list); 7164 rqbp->buffer_count++; 7165 } 7166 } 7167 spin_unlock_irqrestore(&phba->hbalock, flags); 7168 return 1; 7169 } 7170 7171 /** 7172 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 7173 * @phba: Pointer to HBA context object. 7174 * 7175 * This function is the main SLI4 device initialization PCI function. This 7176 * function is called by the HBA initialization code, HBA reset code and 7177 * HBA error attention handler code. Caller is not required to hold any 7178 * locks. 7179 **/ 7180 int 7181 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 7182 { 7183 int rc, i, cnt, len; 7184 LPFC_MBOXQ_t *mboxq; 7185 struct lpfc_mqe *mqe; 7186 uint8_t *vpd; 7187 uint32_t vpd_size; 7188 uint32_t ftr_rsp = 0; 7189 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 7190 struct lpfc_vport *vport = phba->pport; 7191 struct lpfc_dmabuf *mp; 7192 struct lpfc_rqb *rqbp; 7193 7194 /* Perform a PCI function reset to start from clean */ 7195 rc = lpfc_pci_function_reset(phba); 7196 if (unlikely(rc)) 7197 return -ENODEV; 7198 7199 /* Check the HBA Host Status Register for readyness */ 7200 rc = lpfc_sli4_post_status_check(phba); 7201 if (unlikely(rc)) 7202 return -ENODEV; 7203 else { 7204 spin_lock_irq(&phba->hbalock); 7205 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 7206 spin_unlock_irq(&phba->hbalock); 7207 } 7208 7209 /* 7210 * Allocate a single mailbox container for initializing the 7211 * port. 7212 */ 7213 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7214 if (!mboxq) 7215 return -ENOMEM; 7216 7217 /* Issue READ_REV to collect vpd and FW information. */ 7218 vpd_size = SLI4_PAGE_SIZE; 7219 vpd = kzalloc(vpd_size, GFP_KERNEL); 7220 if (!vpd) { 7221 rc = -ENOMEM; 7222 goto out_free_mbox; 7223 } 7224 7225 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 7226 if (unlikely(rc)) { 7227 kfree(vpd); 7228 goto out_free_mbox; 7229 } 7230 7231 mqe = &mboxq->u.mqe; 7232 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 7233 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 7234 phba->hba_flag |= HBA_FCOE_MODE; 7235 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 7236 } else { 7237 phba->hba_flag &= ~HBA_FCOE_MODE; 7238 } 7239 7240 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 7241 LPFC_DCBX_CEE_MODE) 7242 phba->hba_flag |= HBA_FIP_SUPPORT; 7243 else 7244 phba->hba_flag &= ~HBA_FIP_SUPPORT; 7245 7246 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; 7247 7248 if (phba->sli_rev != LPFC_SLI_REV4) { 7249 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7250 "0376 READ_REV Error. SLI Level %d " 7251 "FCoE enabled %d\n", 7252 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 7253 rc = -EIO; 7254 kfree(vpd); 7255 goto out_free_mbox; 7256 } 7257 7258 /* 7259 * Continue initialization with default values even if driver failed 7260 * to read FCoE param config regions, only read parameters if the 7261 * board is FCoE 7262 */ 7263 if (phba->hba_flag & HBA_FCOE_MODE && 7264 lpfc_sli4_read_fcoe_params(phba)) 7265 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 7266 "2570 Failed to read FCoE parameters\n"); 7267 7268 /* 7269 * Retrieve sli4 device physical port name, failure of doing it 7270 * is considered as non-fatal. 7271 */ 7272 rc = lpfc_sli4_retrieve_pport_name(phba); 7273 if (!rc) 7274 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7275 "3080 Successful retrieving SLI4 device " 7276 "physical port name: %s.\n", phba->Port); 7277 7278 rc = lpfc_sli4_get_ctl_attr(phba); 7279 if (!rc) 7280 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7281 "8351 Successful retrieving SLI4 device " 7282 "CTL ATTR\n"); 7283 7284 /* 7285 * Evaluate the read rev and vpd data. Populate the driver 7286 * state with the results. If this routine fails, the failure 7287 * is not fatal as the driver will use generic values. 7288 */ 7289 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 7290 if (unlikely(!rc)) { 7291 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7292 "0377 Error %d parsing vpd. " 7293 "Using defaults.\n", rc); 7294 rc = 0; 7295 } 7296 kfree(vpd); 7297 7298 /* Save information as VPD data */ 7299 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 7300 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 7301 7302 /* 7303 * This is because first G7 ASIC doesn't support the standard 7304 * 0x5a NVME cmd descriptor type/subtype 7305 */ 7306 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7307 LPFC_SLI_INTF_IF_TYPE_6) && 7308 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) && 7309 (phba->vpd.rev.smRev == 0) && 7310 (phba->cfg_nvme_embed_cmd == 1)) 7311 phba->cfg_nvme_embed_cmd = 0; 7312 7313 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 7314 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 7315 &mqe->un.read_rev); 7316 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 7317 &mqe->un.read_rev); 7318 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 7319 &mqe->un.read_rev); 7320 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 7321 &mqe->un.read_rev); 7322 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 7323 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 7324 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 7325 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 7326 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 7327 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 7328 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7329 "(%d):0380 READ_REV Status x%x " 7330 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 7331 mboxq->vport ? mboxq->vport->vpi : 0, 7332 bf_get(lpfc_mqe_status, mqe), 7333 phba->vpd.rev.opFwName, 7334 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 7335 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 7336 7337 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 7338 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); 7339 if (phba->pport->cfg_lun_queue_depth > rc) { 7340 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7341 "3362 LUN queue depth changed from %d to %d\n", 7342 phba->pport->cfg_lun_queue_depth, rc); 7343 phba->pport->cfg_lun_queue_depth = rc; 7344 } 7345 7346 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7347 LPFC_SLI_INTF_IF_TYPE_0) { 7348 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); 7349 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7350 if (rc == MBX_SUCCESS) { 7351 phba->hba_flag |= HBA_RECOVERABLE_UE; 7352 /* Set 1Sec interval to detect UE */ 7353 phba->eratt_poll_interval = 1; 7354 phba->sli4_hba.ue_to_sr = bf_get( 7355 lpfc_mbx_set_feature_UESR, 7356 &mboxq->u.mqe.un.set_feature); 7357 phba->sli4_hba.ue_to_rp = bf_get( 7358 lpfc_mbx_set_feature_UERP, 7359 &mboxq->u.mqe.un.set_feature); 7360 } 7361 } 7362 7363 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { 7364 /* Enable MDS Diagnostics only if the SLI Port supports it */ 7365 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); 7366 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7367 if (rc != MBX_SUCCESS) 7368 phba->mds_diags_support = 0; 7369 } 7370 7371 /* 7372 * Discover the port's supported feature set and match it against the 7373 * hosts requests. 7374 */ 7375 lpfc_request_features(phba, mboxq); 7376 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7377 if (unlikely(rc)) { 7378 rc = -EIO; 7379 goto out_free_mbox; 7380 } 7381 7382 /* 7383 * The port must support FCP initiator mode as this is the 7384 * only mode running in the host. 7385 */ 7386 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 7387 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7388 "0378 No support for fcpi mode.\n"); 7389 ftr_rsp++; 7390 } 7391 7392 /* Performance Hints are ONLY for FCoE */ 7393 if (phba->hba_flag & HBA_FCOE_MODE) { 7394 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 7395 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 7396 else 7397 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 7398 } 7399 7400 /* 7401 * If the port cannot support the host's requested features 7402 * then turn off the global config parameters to disable the 7403 * feature in the driver. This is not a fatal error. 7404 */ 7405 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 7406 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { 7407 phba->cfg_enable_bg = 0; 7408 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 7409 ftr_rsp++; 7410 } 7411 } 7412 7413 if (phba->max_vpi && phba->cfg_enable_npiv && 7414 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7415 ftr_rsp++; 7416 7417 if (ftr_rsp) { 7418 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7419 "0379 Feature Mismatch Data: x%08x %08x " 7420 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 7421 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 7422 phba->cfg_enable_npiv, phba->max_vpi); 7423 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 7424 phba->cfg_enable_bg = 0; 7425 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7426 phba->cfg_enable_npiv = 0; 7427 } 7428 7429 /* These SLI3 features are assumed in SLI4 */ 7430 spin_lock_irq(&phba->hbalock); 7431 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 7432 spin_unlock_irq(&phba->hbalock); 7433 7434 /* 7435 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 7436 * calls depends on these resources to complete port setup. 7437 */ 7438 rc = lpfc_sli4_alloc_resource_identifiers(phba); 7439 if (rc) { 7440 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7441 "2920 Failed to alloc Resource IDs " 7442 "rc = x%x\n", rc); 7443 goto out_free_mbox; 7444 } 7445 7446 lpfc_set_host_data(phba, mboxq); 7447 7448 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7449 if (rc) { 7450 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7451 "2134 Failed to set host os driver version %x", 7452 rc); 7453 } 7454 7455 /* Read the port's service parameters. */ 7456 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 7457 if (rc) { 7458 phba->link_state = LPFC_HBA_ERROR; 7459 rc = -ENOMEM; 7460 goto out_free_mbox; 7461 } 7462 7463 mboxq->vport = vport; 7464 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7465 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 7466 if (rc == MBX_SUCCESS) { 7467 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 7468 rc = 0; 7469 } 7470 7471 /* 7472 * This memory was allocated by the lpfc_read_sparam routine. Release 7473 * it to the mbuf pool. 7474 */ 7475 lpfc_mbuf_free(phba, mp->virt, mp->phys); 7476 kfree(mp); 7477 mboxq->ctx_buf = NULL; 7478 if (unlikely(rc)) { 7479 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7480 "0382 READ_SPARAM command failed " 7481 "status %d, mbxStatus x%x\n", 7482 rc, bf_get(lpfc_mqe_status, mqe)); 7483 phba->link_state = LPFC_HBA_ERROR; 7484 rc = -EIO; 7485 goto out_free_mbox; 7486 } 7487 7488 lpfc_update_vport_wwn(vport); 7489 7490 /* Update the fc_host data structures with new wwn. */ 7491 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 7492 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 7493 7494 /* Create all the SLI4 queues */ 7495 rc = lpfc_sli4_queue_create(phba); 7496 if (rc) { 7497 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7498 "3089 Failed to allocate queues\n"); 7499 rc = -ENODEV; 7500 goto out_free_mbox; 7501 } 7502 /* Set up all the queues to the device */ 7503 rc = lpfc_sli4_queue_setup(phba); 7504 if (unlikely(rc)) { 7505 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7506 "0381 Error %d during queue setup.\n ", rc); 7507 goto out_stop_timers; 7508 } 7509 /* Initialize the driver internal SLI layer lists. */ 7510 lpfc_sli4_setup(phba); 7511 lpfc_sli4_queue_init(phba); 7512 7513 /* update host els xri-sgl sizes and mappings */ 7514 rc = lpfc_sli4_els_sgl_update(phba); 7515 if (unlikely(rc)) { 7516 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7517 "1400 Failed to update xri-sgl size and " 7518 "mapping: %d\n", rc); 7519 goto out_destroy_queue; 7520 } 7521 7522 /* register the els sgl pool to the port */ 7523 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, 7524 phba->sli4_hba.els_xri_cnt); 7525 if (unlikely(rc < 0)) { 7526 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7527 "0582 Error %d during els sgl post " 7528 "operation\n", rc); 7529 rc = -ENODEV; 7530 goto out_destroy_queue; 7531 } 7532 phba->sli4_hba.els_xri_cnt = rc; 7533 7534 if (phba->nvmet_support) { 7535 /* update host nvmet xri-sgl sizes and mappings */ 7536 rc = lpfc_sli4_nvmet_sgl_update(phba); 7537 if (unlikely(rc)) { 7538 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7539 "6308 Failed to update nvmet-sgl size " 7540 "and mapping: %d\n", rc); 7541 goto out_destroy_queue; 7542 } 7543 7544 /* register the nvmet sgl pool to the port */ 7545 rc = lpfc_sli4_repost_sgl_list( 7546 phba, 7547 &phba->sli4_hba.lpfc_nvmet_sgl_list, 7548 phba->sli4_hba.nvmet_xri_cnt); 7549 if (unlikely(rc < 0)) { 7550 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7551 "3117 Error %d during nvmet " 7552 "sgl post\n", rc); 7553 rc = -ENODEV; 7554 goto out_destroy_queue; 7555 } 7556 phba->sli4_hba.nvmet_xri_cnt = rc; 7557 7558 cnt = phba->cfg_iocb_cnt * 1024; 7559 /* We need 1 iocbq for every SGL, for IO processing */ 7560 cnt += phba->sli4_hba.nvmet_xri_cnt; 7561 } else { 7562 /* update host common xri-sgl sizes and mappings */ 7563 rc = lpfc_sli4_io_sgl_update(phba); 7564 if (unlikely(rc)) { 7565 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7566 "6082 Failed to update nvme-sgl size " 7567 "and mapping: %d\n", rc); 7568 goto out_destroy_queue; 7569 } 7570 7571 /* register the allocated common sgl pool to the port */ 7572 rc = lpfc_sli4_repost_io_sgl_list(phba); 7573 if (unlikely(rc)) { 7574 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7575 "6116 Error %d during nvme sgl post " 7576 "operation\n", rc); 7577 /* Some NVME buffers were moved to abort nvme list */ 7578 /* A pci function reset will repost them */ 7579 rc = -ENODEV; 7580 goto out_destroy_queue; 7581 } 7582 cnt = phba->cfg_iocb_cnt * 1024; 7583 } 7584 7585 if (!phba->sli.iocbq_lookup) { 7586 /* Initialize and populate the iocb list per host */ 7587 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7588 "2821 initialize iocb list %d total %d\n", 7589 phba->cfg_iocb_cnt, cnt); 7590 rc = lpfc_init_iocb_list(phba, cnt); 7591 if (rc) { 7592 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7593 "1413 Failed to init iocb list.\n"); 7594 goto out_destroy_queue; 7595 } 7596 } 7597 7598 if (phba->nvmet_support) 7599 lpfc_nvmet_create_targetport(phba); 7600 7601 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 7602 /* Post initial buffers to all RQs created */ 7603 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 7604 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 7605 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 7606 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 7607 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 7608 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; 7609 rqbp->buffer_count = 0; 7610 7611 lpfc_post_rq_buffer( 7612 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 7613 phba->sli4_hba.nvmet_mrq_data[i], 7614 phba->cfg_nvmet_mrq_post, i); 7615 } 7616 } 7617 7618 /* Post the rpi header region to the device. */ 7619 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 7620 if (unlikely(rc)) { 7621 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7622 "0393 Error %d during rpi post operation\n", 7623 rc); 7624 rc = -ENODEV; 7625 goto out_destroy_queue; 7626 } 7627 lpfc_sli4_node_prep(phba); 7628 7629 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 7630 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { 7631 /* 7632 * The FC Port needs to register FCFI (index 0) 7633 */ 7634 lpfc_reg_fcfi(phba, mboxq); 7635 mboxq->vport = phba->pport; 7636 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7637 if (rc != MBX_SUCCESS) 7638 goto out_unset_queue; 7639 rc = 0; 7640 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 7641 &mboxq->u.mqe.un.reg_fcfi); 7642 } else { 7643 /* We are a NVME Target mode with MRQ > 1 */ 7644 7645 /* First register the FCFI */ 7646 lpfc_reg_fcfi_mrq(phba, mboxq, 0); 7647 mboxq->vport = phba->pport; 7648 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7649 if (rc != MBX_SUCCESS) 7650 goto out_unset_queue; 7651 rc = 0; 7652 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, 7653 &mboxq->u.mqe.un.reg_fcfi_mrq); 7654 7655 /* Next register the MRQs */ 7656 lpfc_reg_fcfi_mrq(phba, mboxq, 1); 7657 mboxq->vport = phba->pport; 7658 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7659 if (rc != MBX_SUCCESS) 7660 goto out_unset_queue; 7661 rc = 0; 7662 } 7663 /* Check if the port is configured to be disabled */ 7664 lpfc_sli_read_link_ste(phba); 7665 } 7666 7667 /* Don't post more new bufs if repost already recovered 7668 * the nvme sgls. 7669 */ 7670 if (phba->nvmet_support == 0) { 7671 if (phba->sli4_hba.io_xri_cnt == 0) { 7672 len = lpfc_new_io_buf( 7673 phba, phba->sli4_hba.io_xri_max); 7674 if (len == 0) { 7675 rc = -ENOMEM; 7676 goto out_unset_queue; 7677 } 7678 7679 if (phba->cfg_xri_rebalancing) 7680 lpfc_create_multixri_pools(phba); 7681 } 7682 } else { 7683 phba->cfg_xri_rebalancing = 0; 7684 } 7685 7686 /* Allow asynchronous mailbox command to go through */ 7687 spin_lock_irq(&phba->hbalock); 7688 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7689 spin_unlock_irq(&phba->hbalock); 7690 7691 /* Post receive buffers to the device */ 7692 lpfc_sli4_rb_setup(phba); 7693 7694 /* Reset HBA FCF states after HBA reset */ 7695 phba->fcf.fcf_flag = 0; 7696 phba->fcf.current_rec.flag = 0; 7697 7698 /* Start the ELS watchdog timer */ 7699 mod_timer(&vport->els_tmofunc, 7700 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 7701 7702 /* Start heart beat timer */ 7703 mod_timer(&phba->hb_tmofunc, 7704 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 7705 phba->hb_outstanding = 0; 7706 phba->last_completion_time = jiffies; 7707 7708 /* start eq_delay heartbeat */ 7709 if (phba->cfg_auto_imax) 7710 queue_delayed_work(phba->wq, &phba->eq_delay_work, 7711 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 7712 7713 /* Start error attention (ERATT) polling timer */ 7714 mod_timer(&phba->eratt_poll, 7715 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 7716 7717 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 7718 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 7719 rc = pci_enable_pcie_error_reporting(phba->pcidev); 7720 if (!rc) { 7721 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7722 "2829 This device supports " 7723 "Advanced Error Reporting (AER)\n"); 7724 spin_lock_irq(&phba->hbalock); 7725 phba->hba_flag |= HBA_AER_ENABLED; 7726 spin_unlock_irq(&phba->hbalock); 7727 } else { 7728 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7729 "2830 This device does not support " 7730 "Advanced Error Reporting (AER)\n"); 7731 phba->cfg_aer_support = 0; 7732 } 7733 rc = 0; 7734 } 7735 7736 /* 7737 * The port is ready, set the host's link state to LINK_DOWN 7738 * in preparation for link interrupts. 7739 */ 7740 spin_lock_irq(&phba->hbalock); 7741 phba->link_state = LPFC_LINK_DOWN; 7742 7743 /* Check if physical ports are trunked */ 7744 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) 7745 phba->trunk_link.link0.state = LPFC_LINK_DOWN; 7746 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) 7747 phba->trunk_link.link1.state = LPFC_LINK_DOWN; 7748 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) 7749 phba->trunk_link.link2.state = LPFC_LINK_DOWN; 7750 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) 7751 phba->trunk_link.link3.state = LPFC_LINK_DOWN; 7752 spin_unlock_irq(&phba->hbalock); 7753 7754 /* Arm the CQs and then EQs on device */ 7755 lpfc_sli4_arm_cqeq_intr(phba); 7756 7757 /* Indicate device interrupt mode */ 7758 phba->sli4_hba.intr_enable = 1; 7759 7760 if (!(phba->hba_flag & HBA_FCOE_MODE) && 7761 (phba->hba_flag & LINK_DISABLED)) { 7762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7763 "3103 Adapter Link is disabled.\n"); 7764 lpfc_down_link(phba, mboxq); 7765 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7766 if (rc != MBX_SUCCESS) { 7767 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7768 "3104 Adapter failed to issue " 7769 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 7770 goto out_io_buff_free; 7771 } 7772 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 7773 /* don't perform init_link on SLI4 FC port loopback test */ 7774 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 7775 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 7776 if (rc) 7777 goto out_io_buff_free; 7778 } 7779 } 7780 mempool_free(mboxq, phba->mbox_mem_pool); 7781 return rc; 7782 out_io_buff_free: 7783 /* Free allocated IO Buffers */ 7784 lpfc_io_free(phba); 7785 out_unset_queue: 7786 /* Unset all the queues set up in this routine when error out */ 7787 lpfc_sli4_queue_unset(phba); 7788 out_destroy_queue: 7789 lpfc_free_iocb_list(phba); 7790 lpfc_sli4_queue_destroy(phba); 7791 out_stop_timers: 7792 lpfc_stop_hba_timers(phba); 7793 out_free_mbox: 7794 mempool_free(mboxq, phba->mbox_mem_pool); 7795 return rc; 7796 } 7797 7798 /** 7799 * lpfc_mbox_timeout - Timeout call back function for mbox timer 7800 * @ptr: context object - pointer to hba structure. 7801 * 7802 * This is the callback function for mailbox timer. The mailbox 7803 * timer is armed when a new mailbox command is issued and the timer 7804 * is deleted when the mailbox complete. The function is called by 7805 * the kernel timer code when a mailbox does not complete within 7806 * expected time. This function wakes up the worker thread to 7807 * process the mailbox timeout and returns. All the processing is 7808 * done by the worker thread function lpfc_mbox_timeout_handler. 7809 **/ 7810 void 7811 lpfc_mbox_timeout(struct timer_list *t) 7812 { 7813 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); 7814 unsigned long iflag; 7815 uint32_t tmo_posted; 7816 7817 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 7818 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 7819 if (!tmo_posted) 7820 phba->pport->work_port_events |= WORKER_MBOX_TMO; 7821 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 7822 7823 if (!tmo_posted) 7824 lpfc_worker_wake_up(phba); 7825 return; 7826 } 7827 7828 /** 7829 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 7830 * are pending 7831 * @phba: Pointer to HBA context object. 7832 * 7833 * This function checks if any mailbox completions are present on the mailbox 7834 * completion queue. 7835 **/ 7836 static bool 7837 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 7838 { 7839 7840 uint32_t idx; 7841 struct lpfc_queue *mcq; 7842 struct lpfc_mcqe *mcqe; 7843 bool pending_completions = false; 7844 uint8_t qe_valid; 7845 7846 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7847 return false; 7848 7849 /* Check for completions on mailbox completion queue */ 7850 7851 mcq = phba->sli4_hba.mbx_cq; 7852 idx = mcq->hba_index; 7853 qe_valid = mcq->qe_valid; 7854 while (bf_get_le32(lpfc_cqe_valid, 7855 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) { 7856 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx)); 7857 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 7858 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 7859 pending_completions = true; 7860 break; 7861 } 7862 idx = (idx + 1) % mcq->entry_count; 7863 if (mcq->hba_index == idx) 7864 break; 7865 7866 /* if the index wrapped around, toggle the valid bit */ 7867 if (phba->sli4_hba.pc_sli4_params.cqav && !idx) 7868 qe_valid = (qe_valid) ? 0 : 1; 7869 } 7870 return pending_completions; 7871 7872 } 7873 7874 /** 7875 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 7876 * that were missed. 7877 * @phba: Pointer to HBA context object. 7878 * 7879 * For sli4, it is possible to miss an interrupt. As such mbox completions 7880 * maybe missed causing erroneous mailbox timeouts to occur. This function 7881 * checks to see if mbox completions are on the mailbox completion queue 7882 * and will process all the completions associated with the eq for the 7883 * mailbox completion queue. 7884 **/ 7885 static bool 7886 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 7887 { 7888 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 7889 uint32_t eqidx; 7890 struct lpfc_queue *fpeq = NULL; 7891 struct lpfc_queue *eq; 7892 bool mbox_pending; 7893 7894 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7895 return false; 7896 7897 /* Find the EQ associated with the mbox CQ */ 7898 if (sli4_hba->hdwq) { 7899 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) { 7900 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq; 7901 if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { 7902 fpeq = eq; 7903 break; 7904 } 7905 } 7906 } 7907 if (!fpeq) 7908 return false; 7909 7910 /* Turn off interrupts from this EQ */ 7911 7912 sli4_hba->sli4_eq_clr_intr(fpeq); 7913 7914 /* Check to see if a mbox completion is pending */ 7915 7916 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 7917 7918 /* 7919 * If a mbox completion is pending, process all the events on EQ 7920 * associated with the mbox completion queue (this could include 7921 * mailbox commands, async events, els commands, receive queue data 7922 * and fcp commands) 7923 */ 7924 7925 if (mbox_pending) 7926 /* process and rearm the EQ */ 7927 lpfc_sli4_process_eq(phba, fpeq); 7928 else 7929 /* Always clear and re-arm the EQ */ 7930 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); 7931 7932 return mbox_pending; 7933 7934 } 7935 7936 /** 7937 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 7938 * @phba: Pointer to HBA context object. 7939 * 7940 * This function is called from worker thread when a mailbox command times out. 7941 * The caller is not required to hold any locks. This function will reset the 7942 * HBA and recover all the pending commands. 7943 **/ 7944 void 7945 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 7946 { 7947 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 7948 MAILBOX_t *mb = NULL; 7949 7950 struct lpfc_sli *psli = &phba->sli; 7951 7952 /* If the mailbox completed, process the completion and return */ 7953 if (lpfc_sli4_process_missed_mbox_completions(phba)) 7954 return; 7955 7956 if (pmbox != NULL) 7957 mb = &pmbox->u.mb; 7958 /* Check the pmbox pointer first. There is a race condition 7959 * between the mbox timeout handler getting executed in the 7960 * worklist and the mailbox actually completing. When this 7961 * race condition occurs, the mbox_active will be NULL. 7962 */ 7963 spin_lock_irq(&phba->hbalock); 7964 if (pmbox == NULL) { 7965 lpfc_printf_log(phba, KERN_WARNING, 7966 LOG_MBOX | LOG_SLI, 7967 "0353 Active Mailbox cleared - mailbox timeout " 7968 "exiting\n"); 7969 spin_unlock_irq(&phba->hbalock); 7970 return; 7971 } 7972 7973 /* Mbox cmd <mbxCommand> timeout */ 7974 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7975 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 7976 mb->mbxCommand, 7977 phba->pport->port_state, 7978 phba->sli.sli_flag, 7979 phba->sli.mbox_active); 7980 spin_unlock_irq(&phba->hbalock); 7981 7982 /* Setting state unknown so lpfc_sli_abort_iocb_ring 7983 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 7984 * it to fail all outstanding SCSI IO. 7985 */ 7986 spin_lock_irq(&phba->pport->work_port_lock); 7987 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 7988 spin_unlock_irq(&phba->pport->work_port_lock); 7989 spin_lock_irq(&phba->hbalock); 7990 phba->link_state = LPFC_LINK_UNKNOWN; 7991 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7992 spin_unlock_irq(&phba->hbalock); 7993 7994 lpfc_sli_abort_fcp_rings(phba); 7995 7996 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7997 "0345 Resetting board due to mailbox timeout\n"); 7998 7999 /* Reset the HBA device */ 8000 lpfc_reset_hba(phba); 8001 } 8002 8003 /** 8004 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 8005 * @phba: Pointer to HBA context object. 8006 * @pmbox: Pointer to mailbox object. 8007 * @flag: Flag indicating how the mailbox need to be processed. 8008 * 8009 * This function is called by discovery code and HBA management code 8010 * to submit a mailbox command to firmware with SLI-3 interface spec. This 8011 * function gets the hbalock to protect the data structures. 8012 * The mailbox command can be submitted in polling mode, in which case 8013 * this function will wait in a polling loop for the completion of the 8014 * mailbox. 8015 * If the mailbox is submitted in no_wait mode (not polling) the 8016 * function will submit the command and returns immediately without waiting 8017 * for the mailbox completion. The no_wait is supported only when HBA 8018 * is in SLI2/SLI3 mode - interrupts are enabled. 8019 * The SLI interface allows only one mailbox pending at a time. If the 8020 * mailbox is issued in polling mode and there is already a mailbox 8021 * pending, then the function will return an error. If the mailbox is issued 8022 * in NO_WAIT mode and there is a mailbox pending already, the function 8023 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 8024 * The sli layer owns the mailbox object until the completion of mailbox 8025 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 8026 * return codes the caller owns the mailbox command after the return of 8027 * the function. 8028 **/ 8029 static int 8030 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 8031 uint32_t flag) 8032 { 8033 MAILBOX_t *mbx; 8034 struct lpfc_sli *psli = &phba->sli; 8035 uint32_t status, evtctr; 8036 uint32_t ha_copy, hc_copy; 8037 int i; 8038 unsigned long timeout; 8039 unsigned long drvr_flag = 0; 8040 uint32_t word0, ldata; 8041 void __iomem *to_slim; 8042 int processing_queue = 0; 8043 8044 spin_lock_irqsave(&phba->hbalock, drvr_flag); 8045 if (!pmbox) { 8046 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8047 /* processing mbox queue from intr_handler */ 8048 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8049 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8050 return MBX_SUCCESS; 8051 } 8052 processing_queue = 1; 8053 pmbox = lpfc_mbox_get(phba); 8054 if (!pmbox) { 8055 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8056 return MBX_SUCCESS; 8057 } 8058 } 8059 8060 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 8061 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 8062 if(!pmbox->vport) { 8063 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8064 lpfc_printf_log(phba, KERN_ERR, 8065 LOG_MBOX | LOG_VPORT, 8066 "1806 Mbox x%x failed. No vport\n", 8067 pmbox->u.mb.mbxCommand); 8068 dump_stack(); 8069 goto out_not_finished; 8070 } 8071 } 8072 8073 /* If the PCI channel is in offline state, do not post mbox. */ 8074 if (unlikely(pci_channel_offline(phba->pcidev))) { 8075 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8076 goto out_not_finished; 8077 } 8078 8079 /* If HBA has a deferred error attention, fail the iocb. */ 8080 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8081 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8082 goto out_not_finished; 8083 } 8084 8085 psli = &phba->sli; 8086 8087 mbx = &pmbox->u.mb; 8088 status = MBX_SUCCESS; 8089 8090 if (phba->link_state == LPFC_HBA_ERROR) { 8091 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8092 8093 /* Mbox command <mbxCommand> cannot issue */ 8094 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8095 "(%d):0311 Mailbox command x%x cannot " 8096 "issue Data: x%x x%x\n", 8097 pmbox->vport ? pmbox->vport->vpi : 0, 8098 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 8099 goto out_not_finished; 8100 } 8101 8102 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 8103 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 8104 !(hc_copy & HC_MBINT_ENA)) { 8105 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8106 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8107 "(%d):2528 Mailbox command x%x cannot " 8108 "issue Data: x%x x%x\n", 8109 pmbox->vport ? pmbox->vport->vpi : 0, 8110 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 8111 goto out_not_finished; 8112 } 8113 } 8114 8115 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8116 /* Polling for a mbox command when another one is already active 8117 * is not allowed in SLI. Also, the driver must have established 8118 * SLI2 mode to queue and process multiple mbox commands. 8119 */ 8120 8121 if (flag & MBX_POLL) { 8122 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8123 8124 /* Mbox command <mbxCommand> cannot issue */ 8125 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8126 "(%d):2529 Mailbox command x%x " 8127 "cannot issue Data: x%x x%x\n", 8128 pmbox->vport ? pmbox->vport->vpi : 0, 8129 pmbox->u.mb.mbxCommand, 8130 psli->sli_flag, flag); 8131 goto out_not_finished; 8132 } 8133 8134 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 8135 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8136 /* Mbox command <mbxCommand> cannot issue */ 8137 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8138 "(%d):2530 Mailbox command x%x " 8139 "cannot issue Data: x%x x%x\n", 8140 pmbox->vport ? pmbox->vport->vpi : 0, 8141 pmbox->u.mb.mbxCommand, 8142 psli->sli_flag, flag); 8143 goto out_not_finished; 8144 } 8145 8146 /* Another mailbox command is still being processed, queue this 8147 * command to be processed later. 8148 */ 8149 lpfc_mbox_put(phba, pmbox); 8150 8151 /* Mbox cmd issue - BUSY */ 8152 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8153 "(%d):0308 Mbox cmd issue - BUSY Data: " 8154 "x%x x%x x%x x%x\n", 8155 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 8156 mbx->mbxCommand, 8157 phba->pport ? phba->pport->port_state : 0xff, 8158 psli->sli_flag, flag); 8159 8160 psli->slistat.mbox_busy++; 8161 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8162 8163 if (pmbox->vport) { 8164 lpfc_debugfs_disc_trc(pmbox->vport, 8165 LPFC_DISC_TRC_MBOX_VPORT, 8166 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 8167 (uint32_t)mbx->mbxCommand, 8168 mbx->un.varWords[0], mbx->un.varWords[1]); 8169 } 8170 else { 8171 lpfc_debugfs_disc_trc(phba->pport, 8172 LPFC_DISC_TRC_MBOX, 8173 "MBOX Bsy: cmd:x%x mb:x%x x%x", 8174 (uint32_t)mbx->mbxCommand, 8175 mbx->un.varWords[0], mbx->un.varWords[1]); 8176 } 8177 8178 return MBX_BUSY; 8179 } 8180 8181 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8182 8183 /* If we are not polling, we MUST be in SLI2 mode */ 8184 if (flag != MBX_POLL) { 8185 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 8186 (mbx->mbxCommand != MBX_KILL_BOARD)) { 8187 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8188 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8189 /* Mbox command <mbxCommand> cannot issue */ 8190 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8191 "(%d):2531 Mailbox command x%x " 8192 "cannot issue Data: x%x x%x\n", 8193 pmbox->vport ? pmbox->vport->vpi : 0, 8194 pmbox->u.mb.mbxCommand, 8195 psli->sli_flag, flag); 8196 goto out_not_finished; 8197 } 8198 /* timeout active mbox command */ 8199 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8200 1000); 8201 mod_timer(&psli->mbox_tmo, jiffies + timeout); 8202 } 8203 8204 /* Mailbox cmd <cmd> issue */ 8205 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8206 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 8207 "x%x\n", 8208 pmbox->vport ? pmbox->vport->vpi : 0, 8209 mbx->mbxCommand, 8210 phba->pport ? phba->pport->port_state : 0xff, 8211 psli->sli_flag, flag); 8212 8213 if (mbx->mbxCommand != MBX_HEARTBEAT) { 8214 if (pmbox->vport) { 8215 lpfc_debugfs_disc_trc(pmbox->vport, 8216 LPFC_DISC_TRC_MBOX_VPORT, 8217 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8218 (uint32_t)mbx->mbxCommand, 8219 mbx->un.varWords[0], mbx->un.varWords[1]); 8220 } 8221 else { 8222 lpfc_debugfs_disc_trc(phba->pport, 8223 LPFC_DISC_TRC_MBOX, 8224 "MBOX Send: cmd:x%x mb:x%x x%x", 8225 (uint32_t)mbx->mbxCommand, 8226 mbx->un.varWords[0], mbx->un.varWords[1]); 8227 } 8228 } 8229 8230 psli->slistat.mbox_cmd++; 8231 evtctr = psli->slistat.mbox_event; 8232 8233 /* next set own bit for the adapter and copy over command word */ 8234 mbx->mbxOwner = OWN_CHIP; 8235 8236 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8237 /* Populate mbox extension offset word. */ 8238 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 8239 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8240 = (uint8_t *)phba->mbox_ext 8241 - (uint8_t *)phba->mbox; 8242 } 8243 8244 /* Copy the mailbox extension data */ 8245 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) { 8246 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf, 8247 (uint8_t *)phba->mbox_ext, 8248 pmbox->in_ext_byte_len); 8249 } 8250 /* Copy command data to host SLIM area */ 8251 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 8252 } else { 8253 /* Populate mbox extension offset word. */ 8254 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 8255 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8256 = MAILBOX_HBA_EXT_OFFSET; 8257 8258 /* Copy the mailbox extension data */ 8259 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) 8260 lpfc_memcpy_to_slim(phba->MBslimaddr + 8261 MAILBOX_HBA_EXT_OFFSET, 8262 pmbox->ctx_buf, pmbox->in_ext_byte_len); 8263 8264 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8265 /* copy command data into host mbox for cmpl */ 8266 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, 8267 MAILBOX_CMD_SIZE); 8268 8269 /* First copy mbox command data to HBA SLIM, skip past first 8270 word */ 8271 to_slim = phba->MBslimaddr + sizeof (uint32_t); 8272 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 8273 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 8274 8275 /* Next copy over first word, with mbxOwner set */ 8276 ldata = *((uint32_t *)mbx); 8277 to_slim = phba->MBslimaddr; 8278 writel(ldata, to_slim); 8279 readl(to_slim); /* flush */ 8280 8281 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8282 /* switch over to host mailbox */ 8283 psli->sli_flag |= LPFC_SLI_ACTIVE; 8284 } 8285 8286 wmb(); 8287 8288 switch (flag) { 8289 case MBX_NOWAIT: 8290 /* Set up reference to mailbox command */ 8291 psli->mbox_active = pmbox; 8292 /* Interrupt board to do it */ 8293 writel(CA_MBATT, phba->CAregaddr); 8294 readl(phba->CAregaddr); /* flush */ 8295 /* Don't wait for it to finish, just return */ 8296 break; 8297 8298 case MBX_POLL: 8299 /* Set up null reference to mailbox command */ 8300 psli->mbox_active = NULL; 8301 /* Interrupt board to do it */ 8302 writel(CA_MBATT, phba->CAregaddr); 8303 readl(phba->CAregaddr); /* flush */ 8304 8305 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8306 /* First read mbox status word */ 8307 word0 = *((uint32_t *)phba->mbox); 8308 word0 = le32_to_cpu(word0); 8309 } else { 8310 /* First read mbox status word */ 8311 if (lpfc_readl(phba->MBslimaddr, &word0)) { 8312 spin_unlock_irqrestore(&phba->hbalock, 8313 drvr_flag); 8314 goto out_not_finished; 8315 } 8316 } 8317 8318 /* Read the HBA Host Attention Register */ 8319 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8320 spin_unlock_irqrestore(&phba->hbalock, 8321 drvr_flag); 8322 goto out_not_finished; 8323 } 8324 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8325 1000) + jiffies; 8326 i = 0; 8327 /* Wait for command to complete */ 8328 while (((word0 & OWN_CHIP) == OWN_CHIP) || 8329 (!(ha_copy & HA_MBATT) && 8330 (phba->link_state > LPFC_WARM_START))) { 8331 if (time_after(jiffies, timeout)) { 8332 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8333 spin_unlock_irqrestore(&phba->hbalock, 8334 drvr_flag); 8335 goto out_not_finished; 8336 } 8337 8338 /* Check if we took a mbox interrupt while we were 8339 polling */ 8340 if (((word0 & OWN_CHIP) != OWN_CHIP) 8341 && (evtctr != psli->slistat.mbox_event)) 8342 break; 8343 8344 if (i++ > 10) { 8345 spin_unlock_irqrestore(&phba->hbalock, 8346 drvr_flag); 8347 msleep(1); 8348 spin_lock_irqsave(&phba->hbalock, drvr_flag); 8349 } 8350 8351 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8352 /* First copy command data */ 8353 word0 = *((uint32_t *)phba->mbox); 8354 word0 = le32_to_cpu(word0); 8355 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 8356 MAILBOX_t *slimmb; 8357 uint32_t slimword0; 8358 /* Check real SLIM for any errors */ 8359 slimword0 = readl(phba->MBslimaddr); 8360 slimmb = (MAILBOX_t *) & slimword0; 8361 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 8362 && slimmb->mbxStatus) { 8363 psli->sli_flag &= 8364 ~LPFC_SLI_ACTIVE; 8365 word0 = slimword0; 8366 } 8367 } 8368 } else { 8369 /* First copy command data */ 8370 word0 = readl(phba->MBslimaddr); 8371 } 8372 /* Read the HBA Host Attention Register */ 8373 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8374 spin_unlock_irqrestore(&phba->hbalock, 8375 drvr_flag); 8376 goto out_not_finished; 8377 } 8378 } 8379 8380 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8381 /* copy results back to user */ 8382 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, 8383 MAILBOX_CMD_SIZE); 8384 /* Copy the mailbox extension data */ 8385 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { 8386 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 8387 pmbox->ctx_buf, 8388 pmbox->out_ext_byte_len); 8389 } 8390 } else { 8391 /* First copy command data */ 8392 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 8393 MAILBOX_CMD_SIZE); 8394 /* Copy the mailbox extension data */ 8395 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { 8396 lpfc_memcpy_from_slim( 8397 pmbox->ctx_buf, 8398 phba->MBslimaddr + 8399 MAILBOX_HBA_EXT_OFFSET, 8400 pmbox->out_ext_byte_len); 8401 } 8402 } 8403 8404 writel(HA_MBATT, phba->HAregaddr); 8405 readl(phba->HAregaddr); /* flush */ 8406 8407 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8408 status = mbx->mbxStatus; 8409 } 8410 8411 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8412 return status; 8413 8414 out_not_finished: 8415 if (processing_queue) { 8416 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 8417 lpfc_mbox_cmpl_put(phba, pmbox); 8418 } 8419 return MBX_NOT_FINISHED; 8420 } 8421 8422 /** 8423 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 8424 * @phba: Pointer to HBA context object. 8425 * 8426 * The function blocks the posting of SLI4 asynchronous mailbox commands from 8427 * the driver internal pending mailbox queue. It will then try to wait out the 8428 * possible outstanding mailbox command before return. 8429 * 8430 * Returns: 8431 * 0 - the outstanding mailbox command completed; otherwise, the wait for 8432 * the outstanding mailbox command timed out. 8433 **/ 8434 static int 8435 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 8436 { 8437 struct lpfc_sli *psli = &phba->sli; 8438 int rc = 0; 8439 unsigned long timeout = 0; 8440 8441 /* Mark the asynchronous mailbox command posting as blocked */ 8442 spin_lock_irq(&phba->hbalock); 8443 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 8444 /* Determine how long we might wait for the active mailbox 8445 * command to be gracefully completed by firmware. 8446 */ 8447 if (phba->sli.mbox_active) 8448 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 8449 phba->sli.mbox_active) * 8450 1000) + jiffies; 8451 spin_unlock_irq(&phba->hbalock); 8452 8453 /* Make sure the mailbox is really active */ 8454 if (timeout) 8455 lpfc_sli4_process_missed_mbox_completions(phba); 8456 8457 /* Wait for the outstnading mailbox command to complete */ 8458 while (phba->sli.mbox_active) { 8459 /* Check active mailbox complete status every 2ms */ 8460 msleep(2); 8461 if (time_after(jiffies, timeout)) { 8462 /* Timeout, marked the outstanding cmd not complete */ 8463 rc = 1; 8464 break; 8465 } 8466 } 8467 8468 /* Can not cleanly block async mailbox command, fails it */ 8469 if (rc) { 8470 spin_lock_irq(&phba->hbalock); 8471 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8472 spin_unlock_irq(&phba->hbalock); 8473 } 8474 return rc; 8475 } 8476 8477 /** 8478 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 8479 * @phba: Pointer to HBA context object. 8480 * 8481 * The function unblocks and resume posting of SLI4 asynchronous mailbox 8482 * commands from the driver internal pending mailbox queue. It makes sure 8483 * that there is no outstanding mailbox command before resuming posting 8484 * asynchronous mailbox commands. If, for any reason, there is outstanding 8485 * mailbox command, it will try to wait it out before resuming asynchronous 8486 * mailbox command posting. 8487 **/ 8488 static void 8489 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 8490 { 8491 struct lpfc_sli *psli = &phba->sli; 8492 8493 spin_lock_irq(&phba->hbalock); 8494 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8495 /* Asynchronous mailbox posting is not blocked, do nothing */ 8496 spin_unlock_irq(&phba->hbalock); 8497 return; 8498 } 8499 8500 /* Outstanding synchronous mailbox command is guaranteed to be done, 8501 * successful or timeout, after timing-out the outstanding mailbox 8502 * command shall always be removed, so just unblock posting async 8503 * mailbox command and resume 8504 */ 8505 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8506 spin_unlock_irq(&phba->hbalock); 8507 8508 /* wake up worker thread to post asynchronlous mailbox command */ 8509 lpfc_worker_wake_up(phba); 8510 } 8511 8512 /** 8513 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 8514 * @phba: Pointer to HBA context object. 8515 * @mboxq: Pointer to mailbox object. 8516 * 8517 * The function waits for the bootstrap mailbox register ready bit from 8518 * port for twice the regular mailbox command timeout value. 8519 * 8520 * 0 - no timeout on waiting for bootstrap mailbox register ready. 8521 * MBXERR_ERROR - wait for bootstrap mailbox register timed out. 8522 **/ 8523 static int 8524 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8525 { 8526 uint32_t db_ready; 8527 unsigned long timeout; 8528 struct lpfc_register bmbx_reg; 8529 8530 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 8531 * 1000) + jiffies; 8532 8533 do { 8534 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 8535 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 8536 if (!db_ready) 8537 mdelay(2); 8538 8539 if (time_after(jiffies, timeout)) 8540 return MBXERR_ERROR; 8541 } while (!db_ready); 8542 8543 return 0; 8544 } 8545 8546 /** 8547 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 8548 * @phba: Pointer to HBA context object. 8549 * @mboxq: Pointer to mailbox object. 8550 * 8551 * The function posts a mailbox to the port. The mailbox is expected 8552 * to be comletely filled in and ready for the port to operate on it. 8553 * This routine executes a synchronous completion operation on the 8554 * mailbox by polling for its completion. 8555 * 8556 * The caller must not be holding any locks when calling this routine. 8557 * 8558 * Returns: 8559 * MBX_SUCCESS - mailbox posted successfully 8560 * Any of the MBX error values. 8561 **/ 8562 static int 8563 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8564 { 8565 int rc = MBX_SUCCESS; 8566 unsigned long iflag; 8567 uint32_t mcqe_status; 8568 uint32_t mbx_cmnd; 8569 struct lpfc_sli *psli = &phba->sli; 8570 struct lpfc_mqe *mb = &mboxq->u.mqe; 8571 struct lpfc_bmbx_create *mbox_rgn; 8572 struct dma_address *dma_address; 8573 8574 /* 8575 * Only one mailbox can be active to the bootstrap mailbox region 8576 * at a time and there is no queueing provided. 8577 */ 8578 spin_lock_irqsave(&phba->hbalock, iflag); 8579 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8580 spin_unlock_irqrestore(&phba->hbalock, iflag); 8581 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8582 "(%d):2532 Mailbox command x%x (x%x/x%x) " 8583 "cannot issue Data: x%x x%x\n", 8584 mboxq->vport ? mboxq->vport->vpi : 0, 8585 mboxq->u.mb.mbxCommand, 8586 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8587 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8588 psli->sli_flag, MBX_POLL); 8589 return MBXERR_ERROR; 8590 } 8591 /* The server grabs the token and owns it until release */ 8592 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8593 phba->sli.mbox_active = mboxq; 8594 spin_unlock_irqrestore(&phba->hbalock, iflag); 8595 8596 /* wait for bootstrap mbox register for readyness */ 8597 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8598 if (rc) 8599 goto exit; 8600 /* 8601 * Initialize the bootstrap memory region to avoid stale data areas 8602 * in the mailbox post. Then copy the caller's mailbox contents to 8603 * the bmbx mailbox region. 8604 */ 8605 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 8606 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 8607 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 8608 sizeof(struct lpfc_mqe)); 8609 8610 /* Post the high mailbox dma address to the port and wait for ready. */ 8611 dma_address = &phba->sli4_hba.bmbx.dma_address; 8612 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 8613 8614 /* wait for bootstrap mbox register for hi-address write done */ 8615 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8616 if (rc) 8617 goto exit; 8618 8619 /* Post the low mailbox dma address to the port. */ 8620 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 8621 8622 /* wait for bootstrap mbox register for low address write done */ 8623 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8624 if (rc) 8625 goto exit; 8626 8627 /* 8628 * Read the CQ to ensure the mailbox has completed. 8629 * If so, update the mailbox status so that the upper layers 8630 * can complete the request normally. 8631 */ 8632 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 8633 sizeof(struct lpfc_mqe)); 8634 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 8635 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 8636 sizeof(struct lpfc_mcqe)); 8637 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 8638 /* 8639 * When the CQE status indicates a failure and the mailbox status 8640 * indicates success then copy the CQE status into the mailbox status 8641 * (and prefix it with x4000). 8642 */ 8643 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 8644 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 8645 bf_set(lpfc_mqe_status, mb, 8646 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 8647 rc = MBXERR_ERROR; 8648 } else 8649 lpfc_sli4_swap_str(phba, mboxq); 8650 8651 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8652 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 8653 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 8654 " x%x x%x CQ: x%x x%x x%x x%x\n", 8655 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8656 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8657 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8658 bf_get(lpfc_mqe_status, mb), 8659 mb->un.mb_words[0], mb->un.mb_words[1], 8660 mb->un.mb_words[2], mb->un.mb_words[3], 8661 mb->un.mb_words[4], mb->un.mb_words[5], 8662 mb->un.mb_words[6], mb->un.mb_words[7], 8663 mb->un.mb_words[8], mb->un.mb_words[9], 8664 mb->un.mb_words[10], mb->un.mb_words[11], 8665 mb->un.mb_words[12], mboxq->mcqe.word0, 8666 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 8667 mboxq->mcqe.trailer); 8668 exit: 8669 /* We are holding the token, no needed for lock when release */ 8670 spin_lock_irqsave(&phba->hbalock, iflag); 8671 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8672 phba->sli.mbox_active = NULL; 8673 spin_unlock_irqrestore(&phba->hbalock, iflag); 8674 return rc; 8675 } 8676 8677 /** 8678 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 8679 * @phba: Pointer to HBA context object. 8680 * @pmbox: Pointer to mailbox object. 8681 * @flag: Flag indicating how the mailbox need to be processed. 8682 * 8683 * This function is called by discovery code and HBA management code to submit 8684 * a mailbox command to firmware with SLI-4 interface spec. 8685 * 8686 * Return codes the caller owns the mailbox command after the return of the 8687 * function. 8688 **/ 8689 static int 8690 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 8691 uint32_t flag) 8692 { 8693 struct lpfc_sli *psli = &phba->sli; 8694 unsigned long iflags; 8695 int rc; 8696 8697 /* dump from issue mailbox command if setup */ 8698 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 8699 8700 rc = lpfc_mbox_dev_check(phba); 8701 if (unlikely(rc)) { 8702 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8703 "(%d):2544 Mailbox command x%x (x%x/x%x) " 8704 "cannot issue Data: x%x x%x\n", 8705 mboxq->vport ? mboxq->vport->vpi : 0, 8706 mboxq->u.mb.mbxCommand, 8707 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8708 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8709 psli->sli_flag, flag); 8710 goto out_not_finished; 8711 } 8712 8713 /* Detect polling mode and jump to a handler */ 8714 if (!phba->sli4_hba.intr_enable) { 8715 if (flag == MBX_POLL) 8716 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8717 else 8718 rc = -EIO; 8719 if (rc != MBX_SUCCESS) 8720 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8721 "(%d):2541 Mailbox command x%x " 8722 "(x%x/x%x) failure: " 8723 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8724 "Data: x%x x%x\n,", 8725 mboxq->vport ? mboxq->vport->vpi : 0, 8726 mboxq->u.mb.mbxCommand, 8727 lpfc_sli_config_mbox_subsys_get(phba, 8728 mboxq), 8729 lpfc_sli_config_mbox_opcode_get(phba, 8730 mboxq), 8731 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8732 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8733 bf_get(lpfc_mcqe_ext_status, 8734 &mboxq->mcqe), 8735 psli->sli_flag, flag); 8736 return rc; 8737 } else if (flag == MBX_POLL) { 8738 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8739 "(%d):2542 Try to issue mailbox command " 8740 "x%x (x%x/x%x) synchronously ahead of async " 8741 "mailbox command queue: x%x x%x\n", 8742 mboxq->vport ? mboxq->vport->vpi : 0, 8743 mboxq->u.mb.mbxCommand, 8744 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8745 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8746 psli->sli_flag, flag); 8747 /* Try to block the asynchronous mailbox posting */ 8748 rc = lpfc_sli4_async_mbox_block(phba); 8749 if (!rc) { 8750 /* Successfully blocked, now issue sync mbox cmd */ 8751 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8752 if (rc != MBX_SUCCESS) 8753 lpfc_printf_log(phba, KERN_WARNING, 8754 LOG_MBOX | LOG_SLI, 8755 "(%d):2597 Sync Mailbox command " 8756 "x%x (x%x/x%x) failure: " 8757 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8758 "Data: x%x x%x\n,", 8759 mboxq->vport ? mboxq->vport->vpi : 0, 8760 mboxq->u.mb.mbxCommand, 8761 lpfc_sli_config_mbox_subsys_get(phba, 8762 mboxq), 8763 lpfc_sli_config_mbox_opcode_get(phba, 8764 mboxq), 8765 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8766 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8767 bf_get(lpfc_mcqe_ext_status, 8768 &mboxq->mcqe), 8769 psli->sli_flag, flag); 8770 /* Unblock the async mailbox posting afterward */ 8771 lpfc_sli4_async_mbox_unblock(phba); 8772 } 8773 return rc; 8774 } 8775 8776 /* Now, interrupt mode asynchrous mailbox command */ 8777 rc = lpfc_mbox_cmd_check(phba, mboxq); 8778 if (rc) { 8779 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8780 "(%d):2543 Mailbox command x%x (x%x/x%x) " 8781 "cannot issue Data: x%x x%x\n", 8782 mboxq->vport ? mboxq->vport->vpi : 0, 8783 mboxq->u.mb.mbxCommand, 8784 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8785 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8786 psli->sli_flag, flag); 8787 goto out_not_finished; 8788 } 8789 8790 /* Put the mailbox command to the driver internal FIFO */ 8791 psli->slistat.mbox_busy++; 8792 spin_lock_irqsave(&phba->hbalock, iflags); 8793 lpfc_mbox_put(phba, mboxq); 8794 spin_unlock_irqrestore(&phba->hbalock, iflags); 8795 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8796 "(%d):0354 Mbox cmd issue - Enqueue Data: " 8797 "x%x (x%x/x%x) x%x x%x x%x\n", 8798 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 8799 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8800 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8801 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8802 phba->pport->port_state, 8803 psli->sli_flag, MBX_NOWAIT); 8804 /* Wake up worker thread to transport mailbox command from head */ 8805 lpfc_worker_wake_up(phba); 8806 8807 return MBX_BUSY; 8808 8809 out_not_finished: 8810 return MBX_NOT_FINISHED; 8811 } 8812 8813 /** 8814 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 8815 * @phba: Pointer to HBA context object. 8816 * 8817 * This function is called by worker thread to send a mailbox command to 8818 * SLI4 HBA firmware. 8819 * 8820 **/ 8821 int 8822 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 8823 { 8824 struct lpfc_sli *psli = &phba->sli; 8825 LPFC_MBOXQ_t *mboxq; 8826 int rc = MBX_SUCCESS; 8827 unsigned long iflags; 8828 struct lpfc_mqe *mqe; 8829 uint32_t mbx_cmnd; 8830 8831 /* Check interrupt mode before post async mailbox command */ 8832 if (unlikely(!phba->sli4_hba.intr_enable)) 8833 return MBX_NOT_FINISHED; 8834 8835 /* Check for mailbox command service token */ 8836 spin_lock_irqsave(&phba->hbalock, iflags); 8837 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8838 spin_unlock_irqrestore(&phba->hbalock, iflags); 8839 return MBX_NOT_FINISHED; 8840 } 8841 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8842 spin_unlock_irqrestore(&phba->hbalock, iflags); 8843 return MBX_NOT_FINISHED; 8844 } 8845 if (unlikely(phba->sli.mbox_active)) { 8846 spin_unlock_irqrestore(&phba->hbalock, iflags); 8847 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8848 "0384 There is pending active mailbox cmd\n"); 8849 return MBX_NOT_FINISHED; 8850 } 8851 /* Take the mailbox command service token */ 8852 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8853 8854 /* Get the next mailbox command from head of queue */ 8855 mboxq = lpfc_mbox_get(phba); 8856 8857 /* If no more mailbox command waiting for post, we're done */ 8858 if (!mboxq) { 8859 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8860 spin_unlock_irqrestore(&phba->hbalock, iflags); 8861 return MBX_SUCCESS; 8862 } 8863 phba->sli.mbox_active = mboxq; 8864 spin_unlock_irqrestore(&phba->hbalock, iflags); 8865 8866 /* Check device readiness for posting mailbox command */ 8867 rc = lpfc_mbox_dev_check(phba); 8868 if (unlikely(rc)) 8869 /* Driver clean routine will clean up pending mailbox */ 8870 goto out_not_finished; 8871 8872 /* Prepare the mbox command to be posted */ 8873 mqe = &mboxq->u.mqe; 8874 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 8875 8876 /* Start timer for the mbox_tmo and log some mailbox post messages */ 8877 mod_timer(&psli->mbox_tmo, (jiffies + 8878 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 8879 8880 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8881 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 8882 "x%x x%x\n", 8883 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8884 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8885 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8886 phba->pport->port_state, psli->sli_flag); 8887 8888 if (mbx_cmnd != MBX_HEARTBEAT) { 8889 if (mboxq->vport) { 8890 lpfc_debugfs_disc_trc(mboxq->vport, 8891 LPFC_DISC_TRC_MBOX_VPORT, 8892 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8893 mbx_cmnd, mqe->un.mb_words[0], 8894 mqe->un.mb_words[1]); 8895 } else { 8896 lpfc_debugfs_disc_trc(phba->pport, 8897 LPFC_DISC_TRC_MBOX, 8898 "MBOX Send: cmd:x%x mb:x%x x%x", 8899 mbx_cmnd, mqe->un.mb_words[0], 8900 mqe->un.mb_words[1]); 8901 } 8902 } 8903 psli->slistat.mbox_cmd++; 8904 8905 /* Post the mailbox command to the port */ 8906 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 8907 if (rc != MBX_SUCCESS) { 8908 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8909 "(%d):2533 Mailbox command x%x (x%x/x%x) " 8910 "cannot issue Data: x%x x%x\n", 8911 mboxq->vport ? mboxq->vport->vpi : 0, 8912 mboxq->u.mb.mbxCommand, 8913 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8914 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8915 psli->sli_flag, MBX_NOWAIT); 8916 goto out_not_finished; 8917 } 8918 8919 return rc; 8920 8921 out_not_finished: 8922 spin_lock_irqsave(&phba->hbalock, iflags); 8923 if (phba->sli.mbox_active) { 8924 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8925 __lpfc_mbox_cmpl_put(phba, mboxq); 8926 /* Release the token */ 8927 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8928 phba->sli.mbox_active = NULL; 8929 } 8930 spin_unlock_irqrestore(&phba->hbalock, iflags); 8931 8932 return MBX_NOT_FINISHED; 8933 } 8934 8935 /** 8936 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 8937 * @phba: Pointer to HBA context object. 8938 * @pmbox: Pointer to mailbox object. 8939 * @flag: Flag indicating how the mailbox need to be processed. 8940 * 8941 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 8942 * the API jump table function pointer from the lpfc_hba struct. 8943 * 8944 * Return codes the caller owns the mailbox command after the return of the 8945 * function. 8946 **/ 8947 int 8948 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 8949 { 8950 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 8951 } 8952 8953 /** 8954 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 8955 * @phba: The hba struct for which this call is being executed. 8956 * @dev_grp: The HBA PCI-Device group number. 8957 * 8958 * This routine sets up the mbox interface API function jump table in @phba 8959 * struct. 8960 * Returns: 0 - success, -ENODEV - failure. 8961 **/ 8962 int 8963 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8964 { 8965 8966 switch (dev_grp) { 8967 case LPFC_PCI_DEV_LP: 8968 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 8969 phba->lpfc_sli_handle_slow_ring_event = 8970 lpfc_sli_handle_slow_ring_event_s3; 8971 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 8972 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 8973 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 8974 break; 8975 case LPFC_PCI_DEV_OC: 8976 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 8977 phba->lpfc_sli_handle_slow_ring_event = 8978 lpfc_sli_handle_slow_ring_event_s4; 8979 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 8980 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 8981 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 8982 break; 8983 default: 8984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8985 "1420 Invalid HBA PCI-device group: 0x%x\n", 8986 dev_grp); 8987 return -ENODEV; 8988 break; 8989 } 8990 return 0; 8991 } 8992 8993 /** 8994 * __lpfc_sli_ringtx_put - Add an iocb to the txq 8995 * @phba: Pointer to HBA context object. 8996 * @pring: Pointer to driver SLI ring object. 8997 * @piocb: Pointer to address of newly added command iocb. 8998 * 8999 * This function is called with hbalock held to add a command 9000 * iocb to the txq when SLI layer cannot submit the command iocb 9001 * to the ring. 9002 **/ 9003 void 9004 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9005 struct lpfc_iocbq *piocb) 9006 { 9007 lockdep_assert_held(&phba->hbalock); 9008 /* Insert the caller's iocb in the txq tail for later processing. */ 9009 list_add_tail(&piocb->list, &pring->txq); 9010 } 9011 9012 /** 9013 * lpfc_sli_next_iocb - Get the next iocb in the txq 9014 * @phba: Pointer to HBA context object. 9015 * @pring: Pointer to driver SLI ring object. 9016 * @piocb: Pointer to address of newly added command iocb. 9017 * 9018 * This function is called with hbalock held before a new 9019 * iocb is submitted to the firmware. This function checks 9020 * txq to flush the iocbs in txq to Firmware before 9021 * submitting new iocbs to the Firmware. 9022 * If there are iocbs in the txq which need to be submitted 9023 * to firmware, lpfc_sli_next_iocb returns the first element 9024 * of the txq after dequeuing it from txq. 9025 * If there is no iocb in the txq then the function will return 9026 * *piocb and *piocb is set to NULL. Caller needs to check 9027 * *piocb to find if there are more commands in the txq. 9028 **/ 9029 static struct lpfc_iocbq * 9030 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9031 struct lpfc_iocbq **piocb) 9032 { 9033 struct lpfc_iocbq * nextiocb; 9034 9035 lockdep_assert_held(&phba->hbalock); 9036 9037 nextiocb = lpfc_sli_ringtx_get(phba, pring); 9038 if (!nextiocb) { 9039 nextiocb = *piocb; 9040 *piocb = NULL; 9041 } 9042 9043 return nextiocb; 9044 } 9045 9046 /** 9047 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 9048 * @phba: Pointer to HBA context object. 9049 * @ring_number: SLI ring number to issue iocb on. 9050 * @piocb: Pointer to command iocb. 9051 * @flag: Flag indicating if this command can be put into txq. 9052 * 9053 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 9054 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 9055 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 9056 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 9057 * this function allows only iocbs for posting buffers. This function finds 9058 * next available slot in the command ring and posts the command to the 9059 * available slot and writes the port attention register to request HBA start 9060 * processing new iocb. If there is no slot available in the ring and 9061 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 9062 * the function returns IOCB_BUSY. 9063 * 9064 * This function is called with hbalock held. The function will return success 9065 * after it successfully submit the iocb to firmware or after adding to the 9066 * txq. 9067 **/ 9068 static int 9069 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 9070 struct lpfc_iocbq *piocb, uint32_t flag) 9071 { 9072 struct lpfc_iocbq *nextiocb; 9073 IOCB_t *iocb; 9074 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; 9075 9076 lockdep_assert_held(&phba->hbalock); 9077 9078 if (piocb->iocb_cmpl && (!piocb->vport) && 9079 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 9080 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 9081 lpfc_printf_log(phba, KERN_ERR, 9082 LOG_SLI | LOG_VPORT, 9083 "1807 IOCB x%x failed. No vport\n", 9084 piocb->iocb.ulpCommand); 9085 dump_stack(); 9086 return IOCB_ERROR; 9087 } 9088 9089 9090 /* If the PCI channel is in offline state, do not post iocbs. */ 9091 if (unlikely(pci_channel_offline(phba->pcidev))) 9092 return IOCB_ERROR; 9093 9094 /* If HBA has a deferred error attention, fail the iocb. */ 9095 if (unlikely(phba->hba_flag & DEFER_ERATT)) 9096 return IOCB_ERROR; 9097 9098 /* 9099 * We should never get an IOCB if we are in a < LINK_DOWN state 9100 */ 9101 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 9102 return IOCB_ERROR; 9103 9104 /* 9105 * Check to see if we are blocking IOCB processing because of a 9106 * outstanding event. 9107 */ 9108 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 9109 goto iocb_busy; 9110 9111 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 9112 /* 9113 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 9114 * can be issued if the link is not up. 9115 */ 9116 switch (piocb->iocb.ulpCommand) { 9117 case CMD_GEN_REQUEST64_CR: 9118 case CMD_GEN_REQUEST64_CX: 9119 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 9120 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 9121 FC_RCTL_DD_UNSOL_CMD) || 9122 (piocb->iocb.un.genreq64.w5.hcsw.Type != 9123 MENLO_TRANSPORT_TYPE)) 9124 9125 goto iocb_busy; 9126 break; 9127 case CMD_QUE_RING_BUF_CN: 9128 case CMD_QUE_RING_BUF64_CN: 9129 /* 9130 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 9131 * completion, iocb_cmpl MUST be 0. 9132 */ 9133 if (piocb->iocb_cmpl) 9134 piocb->iocb_cmpl = NULL; 9135 /*FALLTHROUGH*/ 9136 case CMD_CREATE_XRI_CR: 9137 case CMD_CLOSE_XRI_CN: 9138 case CMD_CLOSE_XRI_CX: 9139 break; 9140 default: 9141 goto iocb_busy; 9142 } 9143 9144 /* 9145 * For FCP commands, we must be in a state where we can process link 9146 * attention events. 9147 */ 9148 } else if (unlikely(pring->ringno == LPFC_FCP_RING && 9149 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 9150 goto iocb_busy; 9151 } 9152 9153 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 9154 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 9155 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 9156 9157 if (iocb) 9158 lpfc_sli_update_ring(phba, pring); 9159 else 9160 lpfc_sli_update_full_ring(phba, pring); 9161 9162 if (!piocb) 9163 return IOCB_SUCCESS; 9164 9165 goto out_busy; 9166 9167 iocb_busy: 9168 pring->stats.iocb_cmd_delay++; 9169 9170 out_busy: 9171 9172 if (!(flag & SLI_IOCB_RET_IOCB)) { 9173 __lpfc_sli_ringtx_put(phba, pring, piocb); 9174 return IOCB_SUCCESS; 9175 } 9176 9177 return IOCB_BUSY; 9178 } 9179 9180 /** 9181 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 9182 * @phba: Pointer to HBA context object. 9183 * @piocb: Pointer to command iocb. 9184 * @sglq: Pointer to the scatter gather queue object. 9185 * 9186 * This routine converts the bpl or bde that is in the IOCB 9187 * to a sgl list for the sli4 hardware. The physical address 9188 * of the bpl/bde is converted back to a virtual address. 9189 * If the IOCB contains a BPL then the list of BDE's is 9190 * converted to sli4_sge's. If the IOCB contains a single 9191 * BDE then it is converted to a single sli_sge. 9192 * The IOCB is still in cpu endianess so the contents of 9193 * the bpl can be used without byte swapping. 9194 * 9195 * Returns valid XRI = Success, NO_XRI = Failure. 9196 **/ 9197 static uint16_t 9198 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 9199 struct lpfc_sglq *sglq) 9200 { 9201 uint16_t xritag = NO_XRI; 9202 struct ulp_bde64 *bpl = NULL; 9203 struct ulp_bde64 bde; 9204 struct sli4_sge *sgl = NULL; 9205 struct lpfc_dmabuf *dmabuf; 9206 IOCB_t *icmd; 9207 int numBdes = 0; 9208 int i = 0; 9209 uint32_t offset = 0; /* accumulated offset in the sg request list */ 9210 int inbound = 0; /* number of sg reply entries inbound from firmware */ 9211 9212 if (!piocbq || !sglq) 9213 return xritag; 9214 9215 sgl = (struct sli4_sge *)sglq->sgl; 9216 icmd = &piocbq->iocb; 9217 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 9218 return sglq->sli4_xritag; 9219 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9220 numBdes = icmd->un.genreq64.bdl.bdeSize / 9221 sizeof(struct ulp_bde64); 9222 /* The addrHigh and addrLow fields within the IOCB 9223 * have not been byteswapped yet so there is no 9224 * need to swap them back. 9225 */ 9226 if (piocbq->context3) 9227 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 9228 else 9229 return xritag; 9230 9231 bpl = (struct ulp_bde64 *)dmabuf->virt; 9232 if (!bpl) 9233 return xritag; 9234 9235 for (i = 0; i < numBdes; i++) { 9236 /* Should already be byte swapped. */ 9237 sgl->addr_hi = bpl->addrHigh; 9238 sgl->addr_lo = bpl->addrLow; 9239 9240 sgl->word2 = le32_to_cpu(sgl->word2); 9241 if ((i+1) == numBdes) 9242 bf_set(lpfc_sli4_sge_last, sgl, 1); 9243 else 9244 bf_set(lpfc_sli4_sge_last, sgl, 0); 9245 /* swap the size field back to the cpu so we 9246 * can assign it to the sgl. 9247 */ 9248 bde.tus.w = le32_to_cpu(bpl->tus.w); 9249 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 9250 /* The offsets in the sgl need to be accumulated 9251 * separately for the request and reply lists. 9252 * The request is always first, the reply follows. 9253 */ 9254 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 9255 /* add up the reply sg entries */ 9256 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 9257 inbound++; 9258 /* first inbound? reset the offset */ 9259 if (inbound == 1) 9260 offset = 0; 9261 bf_set(lpfc_sli4_sge_offset, sgl, offset); 9262 bf_set(lpfc_sli4_sge_type, sgl, 9263 LPFC_SGE_TYPE_DATA); 9264 offset += bde.tus.f.bdeSize; 9265 } 9266 sgl->word2 = cpu_to_le32(sgl->word2); 9267 bpl++; 9268 sgl++; 9269 } 9270 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 9271 /* The addrHigh and addrLow fields of the BDE have not 9272 * been byteswapped yet so they need to be swapped 9273 * before putting them in the sgl. 9274 */ 9275 sgl->addr_hi = 9276 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 9277 sgl->addr_lo = 9278 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 9279 sgl->word2 = le32_to_cpu(sgl->word2); 9280 bf_set(lpfc_sli4_sge_last, sgl, 1); 9281 sgl->word2 = cpu_to_le32(sgl->word2); 9282 sgl->sge_len = 9283 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 9284 } 9285 return sglq->sli4_xritag; 9286 } 9287 9288 /** 9289 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 9290 * @phba: Pointer to HBA context object. 9291 * @piocb: Pointer to command iocb. 9292 * @wqe: Pointer to the work queue entry. 9293 * 9294 * This routine converts the iocb command to its Work Queue Entry 9295 * equivalent. The wqe pointer should not have any fields set when 9296 * this routine is called because it will memcpy over them. 9297 * This routine does not set the CQ_ID or the WQEC bits in the 9298 * wqe. 9299 * 9300 * Returns: 0 = Success, IOCB_ERROR = Failure. 9301 **/ 9302 static int 9303 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 9304 union lpfc_wqe128 *wqe) 9305 { 9306 uint32_t xmit_len = 0, total_len = 0; 9307 uint8_t ct = 0; 9308 uint32_t fip; 9309 uint32_t abort_tag; 9310 uint8_t command_type = ELS_COMMAND_NON_FIP; 9311 uint8_t cmnd; 9312 uint16_t xritag; 9313 uint16_t abrt_iotag; 9314 struct lpfc_iocbq *abrtiocbq; 9315 struct ulp_bde64 *bpl = NULL; 9316 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 9317 int numBdes, i; 9318 struct ulp_bde64 bde; 9319 struct lpfc_nodelist *ndlp; 9320 uint32_t *pcmd; 9321 uint32_t if_type; 9322 9323 fip = phba->hba_flag & HBA_FIP_SUPPORT; 9324 /* The fcp commands will set command type */ 9325 if (iocbq->iocb_flag & LPFC_IO_FCP) 9326 command_type = FCP_COMMAND; 9327 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 9328 command_type = ELS_COMMAND_FIP; 9329 else 9330 command_type = ELS_COMMAND_NON_FIP; 9331 9332 if (phba->fcp_embed_io) 9333 memset(wqe, 0, sizeof(union lpfc_wqe128)); 9334 /* Some of the fields are in the right position already */ 9335 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 9336 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) { 9337 /* The ct field has moved so reset */ 9338 wqe->generic.wqe_com.word7 = 0; 9339 wqe->generic.wqe_com.word10 = 0; 9340 } 9341 9342 abort_tag = (uint32_t) iocbq->iotag; 9343 xritag = iocbq->sli4_xritag; 9344 /* words0-2 bpl convert bde */ 9345 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9346 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9347 sizeof(struct ulp_bde64); 9348 bpl = (struct ulp_bde64 *) 9349 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 9350 if (!bpl) 9351 return IOCB_ERROR; 9352 9353 /* Should already be byte swapped. */ 9354 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 9355 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 9356 /* swap the size field back to the cpu so we 9357 * can assign it to the sgl. 9358 */ 9359 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 9360 xmit_len = wqe->generic.bde.tus.f.bdeSize; 9361 total_len = 0; 9362 for (i = 0; i < numBdes; i++) { 9363 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9364 total_len += bde.tus.f.bdeSize; 9365 } 9366 } else 9367 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 9368 9369 iocbq->iocb.ulpIoTag = iocbq->iotag; 9370 cmnd = iocbq->iocb.ulpCommand; 9371 9372 switch (iocbq->iocb.ulpCommand) { 9373 case CMD_ELS_REQUEST64_CR: 9374 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 9375 ndlp = iocbq->context_un.ndlp; 9376 else 9377 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9378 if (!iocbq->iocb.ulpLe) { 9379 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9380 "2007 Only Limited Edition cmd Format" 9381 " supported 0x%x\n", 9382 iocbq->iocb.ulpCommand); 9383 return IOCB_ERROR; 9384 } 9385 9386 wqe->els_req.payload_len = xmit_len; 9387 /* Els_reguest64 has a TMO */ 9388 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 9389 iocbq->iocb.ulpTimeout); 9390 /* Need a VF for word 4 set the vf bit*/ 9391 bf_set(els_req64_vf, &wqe->els_req, 0); 9392 /* And a VFID for word 12 */ 9393 bf_set(els_req64_vfid, &wqe->els_req, 0); 9394 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9395 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9396 iocbq->iocb.ulpContext); 9397 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 9398 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 9399 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 9400 if (command_type == ELS_COMMAND_FIP) 9401 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 9402 >> LPFC_FIP_ELS_ID_SHIFT); 9403 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9404 iocbq->context2)->virt); 9405 if_type = bf_get(lpfc_sli_intf_if_type, 9406 &phba->sli4_hba.sli_intf); 9407 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9408 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 9409 *pcmd == ELS_CMD_SCR || 9410 *pcmd == ELS_CMD_RSCN_XMT || 9411 *pcmd == ELS_CMD_FDISC || 9412 *pcmd == ELS_CMD_LOGO || 9413 *pcmd == ELS_CMD_PLOGI)) { 9414 bf_set(els_req64_sp, &wqe->els_req, 1); 9415 bf_set(els_req64_sid, &wqe->els_req, 9416 iocbq->vport->fc_myDID); 9417 if ((*pcmd == ELS_CMD_FLOGI) && 9418 !(phba->fc_topology == 9419 LPFC_TOPOLOGY_LOOP)) 9420 bf_set(els_req64_sid, &wqe->els_req, 0); 9421 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 9422 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9423 phba->vpi_ids[iocbq->vport->vpi]); 9424 } else if (pcmd && iocbq->context1) { 9425 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 9426 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9427 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9428 } 9429 } 9430 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 9431 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9432 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 9433 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 9434 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 9435 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 9436 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9437 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 9438 wqe->els_req.max_response_payload_len = total_len - xmit_len; 9439 break; 9440 case CMD_XMIT_SEQUENCE64_CX: 9441 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 9442 iocbq->iocb.un.ulpWord[3]); 9443 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 9444 iocbq->iocb.unsli3.rcvsli3.ox_id); 9445 /* The entire sequence is transmitted for this IOCB */ 9446 xmit_len = total_len; 9447 cmnd = CMD_XMIT_SEQUENCE64_CR; 9448 if (phba->link_flag & LS_LOOPBACK_MODE) 9449 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 9450 /* fall through */ 9451 case CMD_XMIT_SEQUENCE64_CR: 9452 /* word3 iocb=io_tag32 wqe=reserved */ 9453 wqe->xmit_sequence.rsvd3 = 0; 9454 /* word4 relative_offset memcpy */ 9455 /* word5 r_ctl/df_ctl memcpy */ 9456 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 9457 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 9458 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 9459 LPFC_WQE_IOD_WRITE); 9460 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 9461 LPFC_WQE_LENLOC_WORD12); 9462 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 9463 wqe->xmit_sequence.xmit_len = xmit_len; 9464 command_type = OTHER_COMMAND; 9465 break; 9466 case CMD_XMIT_BCAST64_CN: 9467 /* word3 iocb=iotag32 wqe=seq_payload_len */ 9468 wqe->xmit_bcast64.seq_payload_len = xmit_len; 9469 /* word4 iocb=rsvd wqe=rsvd */ 9470 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 9471 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 9472 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 9473 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9474 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 9475 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 9476 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 9477 LPFC_WQE_LENLOC_WORD3); 9478 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 9479 break; 9480 case CMD_FCP_IWRITE64_CR: 9481 command_type = FCP_COMMAND_DATA_OUT; 9482 /* word3 iocb=iotag wqe=payload_offset_len */ 9483 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9484 bf_set(payload_offset_len, &wqe->fcp_iwrite, 9485 xmit_len + sizeof(struct fcp_rsp)); 9486 bf_set(cmd_buff_len, &wqe->fcp_iwrite, 9487 0); 9488 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9489 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9490 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 9491 iocbq->iocb.ulpFCP2Rcvy); 9492 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 9493 /* Always open the exchange */ 9494 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 9495 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 9496 LPFC_WQE_LENLOC_WORD4); 9497 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 9498 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 9499 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9500 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); 9501 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 9502 if (iocbq->priority) { 9503 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9504 (iocbq->priority << 1)); 9505 } else { 9506 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9507 (phba->cfg_XLanePriority << 1)); 9508 } 9509 } 9510 /* Note, word 10 is already initialized to 0 */ 9511 9512 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9513 if (phba->cfg_enable_pbde) 9514 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1); 9515 else 9516 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); 9517 9518 if (phba->fcp_embed_io) { 9519 struct lpfc_io_buf *lpfc_cmd; 9520 struct sli4_sge *sgl; 9521 struct fcp_cmnd *fcp_cmnd; 9522 uint32_t *ptr; 9523 9524 /* 128 byte wqe support here */ 9525 9526 lpfc_cmd = iocbq->context1; 9527 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9528 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9529 9530 /* Word 0-2 - FCP_CMND */ 9531 wqe->generic.bde.tus.f.bdeFlags = 9532 BUFF_TYPE_BDE_IMMED; 9533 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9534 wqe->generic.bde.addrHigh = 0; 9535 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9536 9537 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 9538 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); 9539 9540 /* Word 22-29 FCP CMND Payload */ 9541 ptr = &wqe->words[22]; 9542 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9543 } 9544 break; 9545 case CMD_FCP_IREAD64_CR: 9546 /* word3 iocb=iotag wqe=payload_offset_len */ 9547 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9548 bf_set(payload_offset_len, &wqe->fcp_iread, 9549 xmit_len + sizeof(struct fcp_rsp)); 9550 bf_set(cmd_buff_len, &wqe->fcp_iread, 9551 0); 9552 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9553 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9554 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 9555 iocbq->iocb.ulpFCP2Rcvy); 9556 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 9557 /* Always open the exchange */ 9558 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 9559 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 9560 LPFC_WQE_LENLOC_WORD4); 9561 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 9562 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 9563 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9564 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); 9565 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); 9566 if (iocbq->priority) { 9567 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9568 (iocbq->priority << 1)); 9569 } else { 9570 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9571 (phba->cfg_XLanePriority << 1)); 9572 } 9573 } 9574 /* Note, word 10 is already initialized to 0 */ 9575 9576 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9577 if (phba->cfg_enable_pbde) 9578 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1); 9579 else 9580 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); 9581 9582 if (phba->fcp_embed_io) { 9583 struct lpfc_io_buf *lpfc_cmd; 9584 struct sli4_sge *sgl; 9585 struct fcp_cmnd *fcp_cmnd; 9586 uint32_t *ptr; 9587 9588 /* 128 byte wqe support here */ 9589 9590 lpfc_cmd = iocbq->context1; 9591 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9592 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9593 9594 /* Word 0-2 - FCP_CMND */ 9595 wqe->generic.bde.tus.f.bdeFlags = 9596 BUFF_TYPE_BDE_IMMED; 9597 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9598 wqe->generic.bde.addrHigh = 0; 9599 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9600 9601 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); 9602 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); 9603 9604 /* Word 22-29 FCP CMND Payload */ 9605 ptr = &wqe->words[22]; 9606 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9607 } 9608 break; 9609 case CMD_FCP_ICMND64_CR: 9610 /* word3 iocb=iotag wqe=payload_offset_len */ 9611 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9612 bf_set(payload_offset_len, &wqe->fcp_icmd, 9613 xmit_len + sizeof(struct fcp_rsp)); 9614 bf_set(cmd_buff_len, &wqe->fcp_icmd, 9615 0); 9616 /* word3 iocb=IO_TAG wqe=reserved */ 9617 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 9618 /* Always open the exchange */ 9619 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 9620 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 9621 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 9622 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 9623 LPFC_WQE_LENLOC_NONE); 9624 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 9625 iocbq->iocb.ulpFCP2Rcvy); 9626 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9627 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); 9628 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); 9629 if (iocbq->priority) { 9630 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9631 (iocbq->priority << 1)); 9632 } else { 9633 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9634 (phba->cfg_XLanePriority << 1)); 9635 } 9636 } 9637 /* Note, word 10 is already initialized to 0 */ 9638 9639 if (phba->fcp_embed_io) { 9640 struct lpfc_io_buf *lpfc_cmd; 9641 struct sli4_sge *sgl; 9642 struct fcp_cmnd *fcp_cmnd; 9643 uint32_t *ptr; 9644 9645 /* 128 byte wqe support here */ 9646 9647 lpfc_cmd = iocbq->context1; 9648 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9649 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9650 9651 /* Word 0-2 - FCP_CMND */ 9652 wqe->generic.bde.tus.f.bdeFlags = 9653 BUFF_TYPE_BDE_IMMED; 9654 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9655 wqe->generic.bde.addrHigh = 0; 9656 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9657 9658 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); 9659 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); 9660 9661 /* Word 22-29 FCP CMND Payload */ 9662 ptr = &wqe->words[22]; 9663 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9664 } 9665 break; 9666 case CMD_GEN_REQUEST64_CR: 9667 /* For this command calculate the xmit length of the 9668 * request bde. 9669 */ 9670 xmit_len = 0; 9671 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9672 sizeof(struct ulp_bde64); 9673 for (i = 0; i < numBdes; i++) { 9674 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9675 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 9676 break; 9677 xmit_len += bde.tus.f.bdeSize; 9678 } 9679 /* word3 iocb=IO_TAG wqe=request_payload_len */ 9680 wqe->gen_req.request_payload_len = xmit_len; 9681 /* word4 iocb=parameter wqe=relative_offset memcpy */ 9682 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 9683 /* word6 context tag copied in memcpy */ 9684 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 9685 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9686 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9687 "2015 Invalid CT %x command 0x%x\n", 9688 ct, iocbq->iocb.ulpCommand); 9689 return IOCB_ERROR; 9690 } 9691 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 9692 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 9693 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 9694 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 9695 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 9696 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 9697 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9698 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 9699 wqe->gen_req.max_response_payload_len = total_len - xmit_len; 9700 command_type = OTHER_COMMAND; 9701 break; 9702 case CMD_XMIT_ELS_RSP64_CX: 9703 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9704 /* words0-2 BDE memcpy */ 9705 /* word3 iocb=iotag32 wqe=response_payload_len */ 9706 wqe->xmit_els_rsp.response_payload_len = xmit_len; 9707 /* word4 */ 9708 wqe->xmit_els_rsp.word4 = 0; 9709 /* word5 iocb=rsvd wge=did */ 9710 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 9711 iocbq->iocb.un.xseq64.xmit_els_remoteID); 9712 9713 if_type = bf_get(lpfc_sli_intf_if_type, 9714 &phba->sli4_hba.sli_intf); 9715 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9716 if (iocbq->vport->fc_flag & FC_PT2PT) { 9717 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9718 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9719 iocbq->vport->fc_myDID); 9720 if (iocbq->vport->fc_myDID == Fabric_DID) { 9721 bf_set(wqe_els_did, 9722 &wqe->xmit_els_rsp.wqe_dest, 0); 9723 } 9724 } 9725 } 9726 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 9727 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9728 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 9729 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9730 iocbq->iocb.unsli3.rcvsli3.ox_id); 9731 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 9732 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9733 phba->vpi_ids[iocbq->vport->vpi]); 9734 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 9735 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 9736 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 9737 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 9738 LPFC_WQE_LENLOC_WORD3); 9739 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 9740 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 9741 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9742 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9743 iocbq->context2)->virt); 9744 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9745 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9746 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9747 iocbq->vport->fc_myDID); 9748 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 9749 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9750 phba->vpi_ids[phba->pport->vpi]); 9751 } 9752 command_type = OTHER_COMMAND; 9753 break; 9754 case CMD_CLOSE_XRI_CN: 9755 case CMD_ABORT_XRI_CN: 9756 case CMD_ABORT_XRI_CX: 9757 /* words 0-2 memcpy should be 0 rserved */ 9758 /* port will send abts */ 9759 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 9760 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 9761 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 9762 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 9763 } else 9764 fip = 0; 9765 9766 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 9767 /* 9768 * The link is down, or the command was ELS_FIP 9769 * so the fw does not need to send abts 9770 * on the wire. 9771 */ 9772 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 9773 else 9774 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 9775 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 9776 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 9777 wqe->abort_cmd.rsrvd5 = 0; 9778 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 9779 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9780 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 9781 /* 9782 * The abort handler will send us CMD_ABORT_XRI_CN or 9783 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 9784 */ 9785 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 9786 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 9787 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 9788 LPFC_WQE_LENLOC_NONE); 9789 cmnd = CMD_ABORT_XRI_CX; 9790 command_type = OTHER_COMMAND; 9791 xritag = 0; 9792 break; 9793 case CMD_XMIT_BLS_RSP64_CX: 9794 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9795 /* As BLS ABTS RSP WQE is very different from other WQEs, 9796 * we re-construct this WQE here based on information in 9797 * iocbq from scratch. 9798 */ 9799 memset(wqe, 0, sizeof(union lpfc_wqe)); 9800 /* OX_ID is invariable to who sent ABTS to CT exchange */ 9801 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 9802 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 9803 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 9804 LPFC_ABTS_UNSOL_INT) { 9805 /* ABTS sent by initiator to CT exchange, the 9806 * RX_ID field will be filled with the newly 9807 * allocated responder XRI. 9808 */ 9809 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9810 iocbq->sli4_xritag); 9811 } else { 9812 /* ABTS sent by responder to CT exchange, the 9813 * RX_ID field will be filled with the responder 9814 * RX_ID from ABTS. 9815 */ 9816 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9817 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 9818 } 9819 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 9820 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 9821 9822 /* Use CT=VPI */ 9823 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 9824 ndlp->nlp_DID); 9825 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 9826 iocbq->iocb.ulpContext); 9827 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 9828 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 9829 phba->vpi_ids[phba->pport->vpi]); 9830 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 9831 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 9832 LPFC_WQE_LENLOC_NONE); 9833 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 9834 command_type = OTHER_COMMAND; 9835 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 9836 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 9837 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 9838 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 9839 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 9840 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 9841 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 9842 } 9843 9844 break; 9845 case CMD_SEND_FRAME: 9846 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9847 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9848 return 0; 9849 case CMD_XRI_ABORTED_CX: 9850 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 9851 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 9852 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 9853 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 9854 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 9855 default: 9856 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9857 "2014 Invalid command 0x%x\n", 9858 iocbq->iocb.ulpCommand); 9859 return IOCB_ERROR; 9860 break; 9861 } 9862 9863 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) 9864 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 9865 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) 9866 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 9867 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) 9868 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 9869 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | 9870 LPFC_IO_DIF_INSERT); 9871 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9872 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9873 wqe->generic.wqe_com.abort_tag = abort_tag; 9874 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 9875 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 9876 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 9877 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 9878 return 0; 9879 } 9880 9881 /** 9882 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 9883 * @phba: Pointer to HBA context object. 9884 * @ring_number: SLI ring number to issue iocb on. 9885 * @piocb: Pointer to command iocb. 9886 * @flag: Flag indicating if this command can be put into txq. 9887 * 9888 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 9889 * an iocb command to an HBA with SLI-4 interface spec. 9890 * 9891 * This function is called with hbalock held. The function will return success 9892 * after it successfully submit the iocb to firmware or after adding to the 9893 * txq. 9894 **/ 9895 static int 9896 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 9897 struct lpfc_iocbq *piocb, uint32_t flag) 9898 { 9899 struct lpfc_sglq *sglq; 9900 union lpfc_wqe128 wqe; 9901 struct lpfc_queue *wq; 9902 struct lpfc_sli_ring *pring; 9903 9904 /* Get the WQ */ 9905 if ((piocb->iocb_flag & LPFC_IO_FCP) || 9906 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9907 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq; 9908 } else { 9909 wq = phba->sli4_hba.els_wq; 9910 } 9911 9912 /* Get corresponding ring */ 9913 pring = wq->pring; 9914 9915 /* 9916 * The WQE can be either 64 or 128 bytes, 9917 */ 9918 9919 lockdep_assert_held(&pring->ring_lock); 9920 9921 if (piocb->sli4_xritag == NO_XRI) { 9922 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 9923 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 9924 sglq = NULL; 9925 else { 9926 if (!list_empty(&pring->txq)) { 9927 if (!(flag & SLI_IOCB_RET_IOCB)) { 9928 __lpfc_sli_ringtx_put(phba, 9929 pring, piocb); 9930 return IOCB_SUCCESS; 9931 } else { 9932 return IOCB_BUSY; 9933 } 9934 } else { 9935 sglq = __lpfc_sli_get_els_sglq(phba, piocb); 9936 if (!sglq) { 9937 if (!(flag & SLI_IOCB_RET_IOCB)) { 9938 __lpfc_sli_ringtx_put(phba, 9939 pring, 9940 piocb); 9941 return IOCB_SUCCESS; 9942 } else 9943 return IOCB_BUSY; 9944 } 9945 } 9946 } 9947 } else if (piocb->iocb_flag & LPFC_IO_FCP) 9948 /* These IO's already have an XRI and a mapped sgl. */ 9949 sglq = NULL; 9950 else { 9951 /* 9952 * This is a continuation of a commandi,(CX) so this 9953 * sglq is on the active list 9954 */ 9955 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 9956 if (!sglq) 9957 return IOCB_ERROR; 9958 } 9959 9960 if (sglq) { 9961 piocb->sli4_lxritag = sglq->sli4_lxritag; 9962 piocb->sli4_xritag = sglq->sli4_xritag; 9963 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 9964 return IOCB_ERROR; 9965 } 9966 9967 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 9968 return IOCB_ERROR; 9969 9970 if (lpfc_sli4_wq_put(wq, &wqe)) 9971 return IOCB_ERROR; 9972 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 9973 9974 return 0; 9975 } 9976 9977 /** 9978 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 9979 * 9980 * This routine wraps the actual lockless version for issusing IOCB function 9981 * pointer from the lpfc_hba struct. 9982 * 9983 * Return codes: 9984 * IOCB_ERROR - Error 9985 * IOCB_SUCCESS - Success 9986 * IOCB_BUSY - Busy 9987 **/ 9988 int 9989 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 9990 struct lpfc_iocbq *piocb, uint32_t flag) 9991 { 9992 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9993 } 9994 9995 /** 9996 * lpfc_sli_api_table_setup - Set up sli api function jump table 9997 * @phba: The hba struct for which this call is being executed. 9998 * @dev_grp: The HBA PCI-Device group number. 9999 * 10000 * This routine sets up the SLI interface API function jump table in @phba 10001 * struct. 10002 * Returns: 0 - success, -ENODEV - failure. 10003 **/ 10004 int 10005 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 10006 { 10007 10008 switch (dev_grp) { 10009 case LPFC_PCI_DEV_LP: 10010 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 10011 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 10012 break; 10013 case LPFC_PCI_DEV_OC: 10014 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 10015 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 10016 break; 10017 default: 10018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10019 "1419 Invalid HBA PCI-device group: 0x%x\n", 10020 dev_grp); 10021 return -ENODEV; 10022 break; 10023 } 10024 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 10025 return 0; 10026 } 10027 10028 /** 10029 * lpfc_sli4_calc_ring - Calculates which ring to use 10030 * @phba: Pointer to HBA context object. 10031 * @piocb: Pointer to command iocb. 10032 * 10033 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on 10034 * hba_wqidx, thus we need to calculate the corresponding ring. 10035 * Since ABORTS must go on the same WQ of the command they are 10036 * aborting, we use command's hba_wqidx. 10037 */ 10038 struct lpfc_sli_ring * 10039 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 10040 { 10041 struct lpfc_io_buf *lpfc_cmd; 10042 10043 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { 10044 if (unlikely(!phba->sli4_hba.hdwq)) 10045 return NULL; 10046 /* 10047 * for abort iocb hba_wqidx should already 10048 * be setup based on what work queue we used. 10049 */ 10050 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 10051 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1; 10052 piocb->hba_wqidx = lpfc_cmd->hdwq_no; 10053 } 10054 return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring; 10055 } else { 10056 if (unlikely(!phba->sli4_hba.els_wq)) 10057 return NULL; 10058 piocb->hba_wqidx = 0; 10059 return phba->sli4_hba.els_wq->pring; 10060 } 10061 } 10062 10063 /** 10064 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 10065 * @phba: Pointer to HBA context object. 10066 * @pring: Pointer to driver SLI ring object. 10067 * @piocb: Pointer to command iocb. 10068 * @flag: Flag indicating if this command can be put into txq. 10069 * 10070 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 10071 * function. This function gets the hbalock and calls 10072 * __lpfc_sli_issue_iocb function and will return the error returned 10073 * by __lpfc_sli_issue_iocb function. This wrapper is used by 10074 * functions which do not hold hbalock. 10075 **/ 10076 int 10077 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 10078 struct lpfc_iocbq *piocb, uint32_t flag) 10079 { 10080 struct lpfc_sli_ring *pring; 10081 unsigned long iflags; 10082 int rc; 10083 10084 if (phba->sli_rev == LPFC_SLI_REV4) { 10085 pring = lpfc_sli4_calc_ring(phba, piocb); 10086 if (unlikely(pring == NULL)) 10087 return IOCB_ERROR; 10088 10089 spin_lock_irqsave(&pring->ring_lock, iflags); 10090 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10091 spin_unlock_irqrestore(&pring->ring_lock, iflags); 10092 } else { 10093 /* For now, SLI2/3 will still use hbalock */ 10094 spin_lock_irqsave(&phba->hbalock, iflags); 10095 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10096 spin_unlock_irqrestore(&phba->hbalock, iflags); 10097 } 10098 return rc; 10099 } 10100 10101 /** 10102 * lpfc_extra_ring_setup - Extra ring setup function 10103 * @phba: Pointer to HBA context object. 10104 * 10105 * This function is called while driver attaches with the 10106 * HBA to setup the extra ring. The extra ring is used 10107 * only when driver needs to support target mode functionality 10108 * or IP over FC functionalities. 10109 * 10110 * This function is called with no lock held. SLI3 only. 10111 **/ 10112 static int 10113 lpfc_extra_ring_setup( struct lpfc_hba *phba) 10114 { 10115 struct lpfc_sli *psli; 10116 struct lpfc_sli_ring *pring; 10117 10118 psli = &phba->sli; 10119 10120 /* Adjust cmd/rsp ring iocb entries more evenly */ 10121 10122 /* Take some away from the FCP ring */ 10123 pring = &psli->sli3_ring[LPFC_FCP_RING]; 10124 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10125 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10126 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10127 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10128 10129 /* and give them to the extra ring */ 10130 pring = &psli->sli3_ring[LPFC_EXTRA_RING]; 10131 10132 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10133 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10134 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10135 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10136 10137 /* Setup default profile for this ring */ 10138 pring->iotag_max = 4096; 10139 pring->num_mask = 1; 10140 pring->prt[0].profile = 0; /* Mask 0 */ 10141 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 10142 pring->prt[0].type = phba->cfg_multi_ring_type; 10143 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 10144 return 0; 10145 } 10146 10147 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 10148 * @phba: Pointer to HBA context object. 10149 * @iocbq: Pointer to iocb object. 10150 * 10151 * The async_event handler calls this routine when it receives 10152 * an ASYNC_STATUS_CN event from the port. The port generates 10153 * this event when an Abort Sequence request to an rport fails 10154 * twice in succession. The abort could be originated by the 10155 * driver or by the port. The ABTS could have been for an ELS 10156 * or FCP IO. The port only generates this event when an ABTS 10157 * fails to complete after one retry. 10158 */ 10159 static void 10160 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 10161 struct lpfc_iocbq *iocbq) 10162 { 10163 struct lpfc_nodelist *ndlp = NULL; 10164 uint16_t rpi = 0, vpi = 0; 10165 struct lpfc_vport *vport = NULL; 10166 10167 /* The rpi in the ulpContext is vport-sensitive. */ 10168 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 10169 rpi = iocbq->iocb.ulpContext; 10170 10171 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10172 "3092 Port generated ABTS async event " 10173 "on vpi %d rpi %d status 0x%x\n", 10174 vpi, rpi, iocbq->iocb.ulpStatus); 10175 10176 vport = lpfc_find_vport_by_vpid(phba, vpi); 10177 if (!vport) 10178 goto err_exit; 10179 ndlp = lpfc_findnode_rpi(vport, rpi); 10180 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 10181 goto err_exit; 10182 10183 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 10184 lpfc_sli_abts_recover_port(vport, ndlp); 10185 return; 10186 10187 err_exit: 10188 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10189 "3095 Event Context not found, no " 10190 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 10191 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 10192 vpi, rpi); 10193 } 10194 10195 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 10196 * @phba: pointer to HBA context object. 10197 * @ndlp: nodelist pointer for the impacted rport. 10198 * @axri: pointer to the wcqe containing the failed exchange. 10199 * 10200 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 10201 * port. The port generates this event when an abort exchange request to an 10202 * rport fails twice in succession with no reply. The abort could be originated 10203 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 10204 */ 10205 void 10206 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 10207 struct lpfc_nodelist *ndlp, 10208 struct sli4_wcqe_xri_aborted *axri) 10209 { 10210 struct lpfc_vport *vport; 10211 uint32_t ext_status = 0; 10212 10213 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 10214 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10215 "3115 Node Context not found, driver " 10216 "ignoring abts err event\n"); 10217 return; 10218 } 10219 10220 vport = ndlp->vport; 10221 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10222 "3116 Port generated FCP XRI ABORT event on " 10223 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 10224 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 10225 bf_get(lpfc_wcqe_xa_xri, axri), 10226 bf_get(lpfc_wcqe_xa_status, axri), 10227 axri->parameter); 10228 10229 /* 10230 * Catch the ABTS protocol failure case. Older OCe FW releases returned 10231 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 10232 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 10233 */ 10234 ext_status = axri->parameter & IOERR_PARAM_MASK; 10235 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 10236 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 10237 lpfc_sli_abts_recover_port(vport, ndlp); 10238 } 10239 10240 /** 10241 * lpfc_sli_async_event_handler - ASYNC iocb handler function 10242 * @phba: Pointer to HBA context object. 10243 * @pring: Pointer to driver SLI ring object. 10244 * @iocbq: Pointer to iocb object. 10245 * 10246 * This function is called by the slow ring event handler 10247 * function when there is an ASYNC event iocb in the ring. 10248 * This function is called with no lock held. 10249 * Currently this function handles only temperature related 10250 * ASYNC events. The function decodes the temperature sensor 10251 * event message and posts events for the management applications. 10252 **/ 10253 static void 10254 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 10255 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 10256 { 10257 IOCB_t *icmd; 10258 uint16_t evt_code; 10259 struct temp_event temp_event_data; 10260 struct Scsi_Host *shost; 10261 uint32_t *iocb_w; 10262 10263 icmd = &iocbq->iocb; 10264 evt_code = icmd->un.asyncstat.evt_code; 10265 10266 switch (evt_code) { 10267 case ASYNC_TEMP_WARN: 10268 case ASYNC_TEMP_SAFE: 10269 temp_event_data.data = (uint32_t) icmd->ulpContext; 10270 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 10271 if (evt_code == ASYNC_TEMP_WARN) { 10272 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 10273 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10274 "0347 Adapter is very hot, please take " 10275 "corrective action. temperature : %d Celsius\n", 10276 (uint32_t) icmd->ulpContext); 10277 } else { 10278 temp_event_data.event_code = LPFC_NORMAL_TEMP; 10279 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10280 "0340 Adapter temperature is OK now. " 10281 "temperature : %d Celsius\n", 10282 (uint32_t) icmd->ulpContext); 10283 } 10284 10285 /* Send temperature change event to applications */ 10286 shost = lpfc_shost_from_vport(phba->pport); 10287 fc_host_post_vendor_event(shost, fc_get_event_number(), 10288 sizeof(temp_event_data), (char *) &temp_event_data, 10289 LPFC_NL_VENDOR_ID); 10290 break; 10291 case ASYNC_STATUS_CN: 10292 lpfc_sli_abts_err_handler(phba, iocbq); 10293 break; 10294 default: 10295 iocb_w = (uint32_t *) icmd; 10296 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10297 "0346 Ring %d handler: unexpected ASYNC_STATUS" 10298 " evt_code 0x%x\n" 10299 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 10300 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 10301 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 10302 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 10303 pring->ringno, icmd->un.asyncstat.evt_code, 10304 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 10305 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 10306 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 10307 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 10308 10309 break; 10310 } 10311 } 10312 10313 10314 /** 10315 * lpfc_sli4_setup - SLI ring setup function 10316 * @phba: Pointer to HBA context object. 10317 * 10318 * lpfc_sli_setup sets up rings of the SLI interface with 10319 * number of iocbs per ring and iotags. This function is 10320 * called while driver attach to the HBA and before the 10321 * interrupts are enabled. So there is no need for locking. 10322 * 10323 * This function always returns 0. 10324 **/ 10325 int 10326 lpfc_sli4_setup(struct lpfc_hba *phba) 10327 { 10328 struct lpfc_sli_ring *pring; 10329 10330 pring = phba->sli4_hba.els_wq->pring; 10331 pring->num_mask = LPFC_MAX_RING_MASK; 10332 pring->prt[0].profile = 0; /* Mask 0 */ 10333 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10334 pring->prt[0].type = FC_TYPE_ELS; 10335 pring->prt[0].lpfc_sli_rcv_unsol_event = 10336 lpfc_els_unsol_event; 10337 pring->prt[1].profile = 0; /* Mask 1 */ 10338 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10339 pring->prt[1].type = FC_TYPE_ELS; 10340 pring->prt[1].lpfc_sli_rcv_unsol_event = 10341 lpfc_els_unsol_event; 10342 pring->prt[2].profile = 0; /* Mask 2 */ 10343 /* NameServer Inquiry */ 10344 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10345 /* NameServer */ 10346 pring->prt[2].type = FC_TYPE_CT; 10347 pring->prt[2].lpfc_sli_rcv_unsol_event = 10348 lpfc_ct_unsol_event; 10349 pring->prt[3].profile = 0; /* Mask 3 */ 10350 /* NameServer response */ 10351 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10352 /* NameServer */ 10353 pring->prt[3].type = FC_TYPE_CT; 10354 pring->prt[3].lpfc_sli_rcv_unsol_event = 10355 lpfc_ct_unsol_event; 10356 return 0; 10357 } 10358 10359 /** 10360 * lpfc_sli_setup - SLI ring setup function 10361 * @phba: Pointer to HBA context object. 10362 * 10363 * lpfc_sli_setup sets up rings of the SLI interface with 10364 * number of iocbs per ring and iotags. This function is 10365 * called while driver attach to the HBA and before the 10366 * interrupts are enabled. So there is no need for locking. 10367 * 10368 * This function always returns 0. SLI3 only. 10369 **/ 10370 int 10371 lpfc_sli_setup(struct lpfc_hba *phba) 10372 { 10373 int i, totiocbsize = 0; 10374 struct lpfc_sli *psli = &phba->sli; 10375 struct lpfc_sli_ring *pring; 10376 10377 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 10378 psli->sli_flag = 0; 10379 10380 psli->iocbq_lookup = NULL; 10381 psli->iocbq_lookup_len = 0; 10382 psli->last_iotag = 0; 10383 10384 for (i = 0; i < psli->num_rings; i++) { 10385 pring = &psli->sli3_ring[i]; 10386 switch (i) { 10387 case LPFC_FCP_RING: /* ring 0 - FCP */ 10388 /* numCiocb and numRiocb are used in config_port */ 10389 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 10390 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 10391 pring->sli.sli3.numCiocb += 10392 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10393 pring->sli.sli3.numRiocb += 10394 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10395 pring->sli.sli3.numCiocb += 10396 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10397 pring->sli.sli3.numRiocb += 10398 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10399 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10400 SLI3_IOCB_CMD_SIZE : 10401 SLI2_IOCB_CMD_SIZE; 10402 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10403 SLI3_IOCB_RSP_SIZE : 10404 SLI2_IOCB_RSP_SIZE; 10405 pring->iotag_ctr = 0; 10406 pring->iotag_max = 10407 (phba->cfg_hba_queue_depth * 2); 10408 pring->fast_iotag = pring->iotag_max; 10409 pring->num_mask = 0; 10410 break; 10411 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 10412 /* numCiocb and numRiocb are used in config_port */ 10413 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 10414 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 10415 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10416 SLI3_IOCB_CMD_SIZE : 10417 SLI2_IOCB_CMD_SIZE; 10418 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10419 SLI3_IOCB_RSP_SIZE : 10420 SLI2_IOCB_RSP_SIZE; 10421 pring->iotag_max = phba->cfg_hba_queue_depth; 10422 pring->num_mask = 0; 10423 break; 10424 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 10425 /* numCiocb and numRiocb are used in config_port */ 10426 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 10427 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 10428 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10429 SLI3_IOCB_CMD_SIZE : 10430 SLI2_IOCB_CMD_SIZE; 10431 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10432 SLI3_IOCB_RSP_SIZE : 10433 SLI2_IOCB_RSP_SIZE; 10434 pring->fast_iotag = 0; 10435 pring->iotag_ctr = 0; 10436 pring->iotag_max = 4096; 10437 pring->lpfc_sli_rcv_async_status = 10438 lpfc_sli_async_event_handler; 10439 pring->num_mask = LPFC_MAX_RING_MASK; 10440 pring->prt[0].profile = 0; /* Mask 0 */ 10441 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10442 pring->prt[0].type = FC_TYPE_ELS; 10443 pring->prt[0].lpfc_sli_rcv_unsol_event = 10444 lpfc_els_unsol_event; 10445 pring->prt[1].profile = 0; /* Mask 1 */ 10446 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10447 pring->prt[1].type = FC_TYPE_ELS; 10448 pring->prt[1].lpfc_sli_rcv_unsol_event = 10449 lpfc_els_unsol_event; 10450 pring->prt[2].profile = 0; /* Mask 2 */ 10451 /* NameServer Inquiry */ 10452 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10453 /* NameServer */ 10454 pring->prt[2].type = FC_TYPE_CT; 10455 pring->prt[2].lpfc_sli_rcv_unsol_event = 10456 lpfc_ct_unsol_event; 10457 pring->prt[3].profile = 0; /* Mask 3 */ 10458 /* NameServer response */ 10459 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10460 /* NameServer */ 10461 pring->prt[3].type = FC_TYPE_CT; 10462 pring->prt[3].lpfc_sli_rcv_unsol_event = 10463 lpfc_ct_unsol_event; 10464 break; 10465 } 10466 totiocbsize += (pring->sli.sli3.numCiocb * 10467 pring->sli.sli3.sizeCiocb) + 10468 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 10469 } 10470 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 10471 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 10472 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 10473 "SLI2 SLIM Data: x%x x%lx\n", 10474 phba->brd_no, totiocbsize, 10475 (unsigned long) MAX_SLIM_IOCB_SIZE); 10476 } 10477 if (phba->cfg_multi_ring_support == 2) 10478 lpfc_extra_ring_setup(phba); 10479 10480 return 0; 10481 } 10482 10483 /** 10484 * lpfc_sli4_queue_init - Queue initialization function 10485 * @phba: Pointer to HBA context object. 10486 * 10487 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each 10488 * ring. This function also initializes ring indices of each ring. 10489 * This function is called during the initialization of the SLI 10490 * interface of an HBA. 10491 * This function is called with no lock held and always returns 10492 * 1. 10493 **/ 10494 void 10495 lpfc_sli4_queue_init(struct lpfc_hba *phba) 10496 { 10497 struct lpfc_sli *psli; 10498 struct lpfc_sli_ring *pring; 10499 int i; 10500 10501 psli = &phba->sli; 10502 spin_lock_irq(&phba->hbalock); 10503 INIT_LIST_HEAD(&psli->mboxq); 10504 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10505 /* Initialize list headers for txq and txcmplq as double linked lists */ 10506 for (i = 0; i < phba->cfg_hdw_queue; i++) { 10507 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 10508 pring->flag = 0; 10509 pring->ringno = LPFC_FCP_RING; 10510 pring->txcmplq_cnt = 0; 10511 INIT_LIST_HEAD(&pring->txq); 10512 INIT_LIST_HEAD(&pring->txcmplq); 10513 INIT_LIST_HEAD(&pring->iocb_continueq); 10514 spin_lock_init(&pring->ring_lock); 10515 } 10516 pring = phba->sli4_hba.els_wq->pring; 10517 pring->flag = 0; 10518 pring->ringno = LPFC_ELS_RING; 10519 pring->txcmplq_cnt = 0; 10520 INIT_LIST_HEAD(&pring->txq); 10521 INIT_LIST_HEAD(&pring->txcmplq); 10522 INIT_LIST_HEAD(&pring->iocb_continueq); 10523 spin_lock_init(&pring->ring_lock); 10524 10525 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10526 for (i = 0; i < phba->cfg_hdw_queue; i++) { 10527 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; 10528 pring->flag = 0; 10529 pring->ringno = LPFC_FCP_RING; 10530 pring->txcmplq_cnt = 0; 10531 INIT_LIST_HEAD(&pring->txq); 10532 INIT_LIST_HEAD(&pring->txcmplq); 10533 INIT_LIST_HEAD(&pring->iocb_continueq); 10534 spin_lock_init(&pring->ring_lock); 10535 } 10536 pring = phba->sli4_hba.nvmels_wq->pring; 10537 pring->flag = 0; 10538 pring->ringno = LPFC_ELS_RING; 10539 pring->txcmplq_cnt = 0; 10540 INIT_LIST_HEAD(&pring->txq); 10541 INIT_LIST_HEAD(&pring->txcmplq); 10542 INIT_LIST_HEAD(&pring->iocb_continueq); 10543 spin_lock_init(&pring->ring_lock); 10544 } 10545 10546 spin_unlock_irq(&phba->hbalock); 10547 } 10548 10549 /** 10550 * lpfc_sli_queue_init - Queue initialization function 10551 * @phba: Pointer to HBA context object. 10552 * 10553 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each 10554 * ring. This function also initializes ring indices of each ring. 10555 * This function is called during the initialization of the SLI 10556 * interface of an HBA. 10557 * This function is called with no lock held and always returns 10558 * 1. 10559 **/ 10560 void 10561 lpfc_sli_queue_init(struct lpfc_hba *phba) 10562 { 10563 struct lpfc_sli *psli; 10564 struct lpfc_sli_ring *pring; 10565 int i; 10566 10567 psli = &phba->sli; 10568 spin_lock_irq(&phba->hbalock); 10569 INIT_LIST_HEAD(&psli->mboxq); 10570 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10571 /* Initialize list headers for txq and txcmplq as double linked lists */ 10572 for (i = 0; i < psli->num_rings; i++) { 10573 pring = &psli->sli3_ring[i]; 10574 pring->ringno = i; 10575 pring->sli.sli3.next_cmdidx = 0; 10576 pring->sli.sli3.local_getidx = 0; 10577 pring->sli.sli3.cmdidx = 0; 10578 INIT_LIST_HEAD(&pring->iocb_continueq); 10579 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 10580 INIT_LIST_HEAD(&pring->postbufq); 10581 pring->flag = 0; 10582 INIT_LIST_HEAD(&pring->txq); 10583 INIT_LIST_HEAD(&pring->txcmplq); 10584 spin_lock_init(&pring->ring_lock); 10585 } 10586 spin_unlock_irq(&phba->hbalock); 10587 } 10588 10589 /** 10590 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 10591 * @phba: Pointer to HBA context object. 10592 * 10593 * This routine flushes the mailbox command subsystem. It will unconditionally 10594 * flush all the mailbox commands in the three possible stages in the mailbox 10595 * command sub-system: pending mailbox command queue; the outstanding mailbox 10596 * command; and completed mailbox command queue. It is caller's responsibility 10597 * to make sure that the driver is in the proper state to flush the mailbox 10598 * command sub-system. Namely, the posting of mailbox commands into the 10599 * pending mailbox command queue from the various clients must be stopped; 10600 * either the HBA is in a state that it will never works on the outstanding 10601 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 10602 * mailbox command has been completed. 10603 **/ 10604 static void 10605 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 10606 { 10607 LIST_HEAD(completions); 10608 struct lpfc_sli *psli = &phba->sli; 10609 LPFC_MBOXQ_t *pmb; 10610 unsigned long iflag; 10611 10612 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10613 local_bh_disable(); 10614 10615 /* Flush all the mailbox commands in the mbox system */ 10616 spin_lock_irqsave(&phba->hbalock, iflag); 10617 10618 /* The pending mailbox command queue */ 10619 list_splice_init(&phba->sli.mboxq, &completions); 10620 /* The outstanding active mailbox command */ 10621 if (psli->mbox_active) { 10622 list_add_tail(&psli->mbox_active->list, &completions); 10623 psli->mbox_active = NULL; 10624 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10625 } 10626 /* The completed mailbox command queue */ 10627 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 10628 spin_unlock_irqrestore(&phba->hbalock, iflag); 10629 10630 /* Enable softirqs again, done with phba->hbalock */ 10631 local_bh_enable(); 10632 10633 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 10634 while (!list_empty(&completions)) { 10635 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 10636 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 10637 if (pmb->mbox_cmpl) 10638 pmb->mbox_cmpl(phba, pmb); 10639 } 10640 } 10641 10642 /** 10643 * lpfc_sli_host_down - Vport cleanup function 10644 * @vport: Pointer to virtual port object. 10645 * 10646 * lpfc_sli_host_down is called to clean up the resources 10647 * associated with a vport before destroying virtual 10648 * port data structures. 10649 * This function does following operations: 10650 * - Free discovery resources associated with this virtual 10651 * port. 10652 * - Free iocbs associated with this virtual port in 10653 * the txq. 10654 * - Send abort for all iocb commands associated with this 10655 * vport in txcmplq. 10656 * 10657 * This function is called with no lock held and always returns 1. 10658 **/ 10659 int 10660 lpfc_sli_host_down(struct lpfc_vport *vport) 10661 { 10662 LIST_HEAD(completions); 10663 struct lpfc_hba *phba = vport->phba; 10664 struct lpfc_sli *psli = &phba->sli; 10665 struct lpfc_queue *qp = NULL; 10666 struct lpfc_sli_ring *pring; 10667 struct lpfc_iocbq *iocb, *next_iocb; 10668 int i; 10669 unsigned long flags = 0; 10670 uint16_t prev_pring_flag; 10671 10672 lpfc_cleanup_discovery_resources(vport); 10673 10674 spin_lock_irqsave(&phba->hbalock, flags); 10675 10676 /* 10677 * Error everything on the txq since these iocbs 10678 * have not been given to the FW yet. 10679 * Also issue ABTS for everything on the txcmplq 10680 */ 10681 if (phba->sli_rev != LPFC_SLI_REV4) { 10682 for (i = 0; i < psli->num_rings; i++) { 10683 pring = &psli->sli3_ring[i]; 10684 prev_pring_flag = pring->flag; 10685 /* Only slow rings */ 10686 if (pring->ringno == LPFC_ELS_RING) { 10687 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10688 /* Set the lpfc data pending flag */ 10689 set_bit(LPFC_DATA_READY, &phba->data_flags); 10690 } 10691 list_for_each_entry_safe(iocb, next_iocb, 10692 &pring->txq, list) { 10693 if (iocb->vport != vport) 10694 continue; 10695 list_move_tail(&iocb->list, &completions); 10696 } 10697 list_for_each_entry_safe(iocb, next_iocb, 10698 &pring->txcmplq, list) { 10699 if (iocb->vport != vport) 10700 continue; 10701 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10702 } 10703 pring->flag = prev_pring_flag; 10704 } 10705 } else { 10706 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10707 pring = qp->pring; 10708 if (!pring) 10709 continue; 10710 if (pring == phba->sli4_hba.els_wq->pring) { 10711 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10712 /* Set the lpfc data pending flag */ 10713 set_bit(LPFC_DATA_READY, &phba->data_flags); 10714 } 10715 prev_pring_flag = pring->flag; 10716 spin_lock_irq(&pring->ring_lock); 10717 list_for_each_entry_safe(iocb, next_iocb, 10718 &pring->txq, list) { 10719 if (iocb->vport != vport) 10720 continue; 10721 list_move_tail(&iocb->list, &completions); 10722 } 10723 spin_unlock_irq(&pring->ring_lock); 10724 list_for_each_entry_safe(iocb, next_iocb, 10725 &pring->txcmplq, list) { 10726 if (iocb->vport != vport) 10727 continue; 10728 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10729 } 10730 pring->flag = prev_pring_flag; 10731 } 10732 } 10733 spin_unlock_irqrestore(&phba->hbalock, flags); 10734 10735 /* Cancel all the IOCBs from the completions list */ 10736 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10737 IOERR_SLI_DOWN); 10738 return 1; 10739 } 10740 10741 /** 10742 * lpfc_sli_hba_down - Resource cleanup function for the HBA 10743 * @phba: Pointer to HBA context object. 10744 * 10745 * This function cleans up all iocb, buffers, mailbox commands 10746 * while shutting down the HBA. This function is called with no 10747 * lock held and always returns 1. 10748 * This function does the following to cleanup driver resources: 10749 * - Free discovery resources for each virtual port 10750 * - Cleanup any pending fabric iocbs 10751 * - Iterate through the iocb txq and free each entry 10752 * in the list. 10753 * - Free up any buffer posted to the HBA 10754 * - Free mailbox commands in the mailbox queue. 10755 **/ 10756 int 10757 lpfc_sli_hba_down(struct lpfc_hba *phba) 10758 { 10759 LIST_HEAD(completions); 10760 struct lpfc_sli *psli = &phba->sli; 10761 struct lpfc_queue *qp = NULL; 10762 struct lpfc_sli_ring *pring; 10763 struct lpfc_dmabuf *buf_ptr; 10764 unsigned long flags = 0; 10765 int i; 10766 10767 /* Shutdown the mailbox command sub-system */ 10768 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 10769 10770 lpfc_hba_down_prep(phba); 10771 10772 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10773 local_bh_disable(); 10774 10775 lpfc_fabric_abort_hba(phba); 10776 10777 spin_lock_irqsave(&phba->hbalock, flags); 10778 10779 /* 10780 * Error everything on the txq since these iocbs 10781 * have not been given to the FW yet. 10782 */ 10783 if (phba->sli_rev != LPFC_SLI_REV4) { 10784 for (i = 0; i < psli->num_rings; i++) { 10785 pring = &psli->sli3_ring[i]; 10786 /* Only slow rings */ 10787 if (pring->ringno == LPFC_ELS_RING) { 10788 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10789 /* Set the lpfc data pending flag */ 10790 set_bit(LPFC_DATA_READY, &phba->data_flags); 10791 } 10792 list_splice_init(&pring->txq, &completions); 10793 } 10794 } else { 10795 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10796 pring = qp->pring; 10797 if (!pring) 10798 continue; 10799 spin_lock_irq(&pring->ring_lock); 10800 list_splice_init(&pring->txq, &completions); 10801 spin_unlock_irq(&pring->ring_lock); 10802 if (pring == phba->sli4_hba.els_wq->pring) { 10803 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10804 /* Set the lpfc data pending flag */ 10805 set_bit(LPFC_DATA_READY, &phba->data_flags); 10806 } 10807 } 10808 } 10809 spin_unlock_irqrestore(&phba->hbalock, flags); 10810 10811 /* Cancel all the IOCBs from the completions list */ 10812 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10813 IOERR_SLI_DOWN); 10814 10815 spin_lock_irqsave(&phba->hbalock, flags); 10816 list_splice_init(&phba->elsbuf, &completions); 10817 phba->elsbuf_cnt = 0; 10818 phba->elsbuf_prev_cnt = 0; 10819 spin_unlock_irqrestore(&phba->hbalock, flags); 10820 10821 while (!list_empty(&completions)) { 10822 list_remove_head(&completions, buf_ptr, 10823 struct lpfc_dmabuf, list); 10824 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 10825 kfree(buf_ptr); 10826 } 10827 10828 /* Enable softirqs again, done with phba->hbalock */ 10829 local_bh_enable(); 10830 10831 /* Return any active mbox cmds */ 10832 del_timer_sync(&psli->mbox_tmo); 10833 10834 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 10835 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 10836 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 10837 10838 return 1; 10839 } 10840 10841 /** 10842 * lpfc_sli_pcimem_bcopy - SLI memory copy function 10843 * @srcp: Source memory pointer. 10844 * @destp: Destination memory pointer. 10845 * @cnt: Number of words required to be copied. 10846 * 10847 * This function is used for copying data between driver memory 10848 * and the SLI memory. This function also changes the endianness 10849 * of each word if native endianness is different from SLI 10850 * endianness. This function can be called with or without 10851 * lock. 10852 **/ 10853 void 10854 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 10855 { 10856 uint32_t *src = srcp; 10857 uint32_t *dest = destp; 10858 uint32_t ldata; 10859 int i; 10860 10861 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 10862 ldata = *src; 10863 ldata = le32_to_cpu(ldata); 10864 *dest = ldata; 10865 src++; 10866 dest++; 10867 } 10868 } 10869 10870 10871 /** 10872 * lpfc_sli_bemem_bcopy - SLI memory copy function 10873 * @srcp: Source memory pointer. 10874 * @destp: Destination memory pointer. 10875 * @cnt: Number of words required to be copied. 10876 * 10877 * This function is used for copying data between a data structure 10878 * with big endian representation to local endianness. 10879 * This function can be called with or without lock. 10880 **/ 10881 void 10882 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 10883 { 10884 uint32_t *src = srcp; 10885 uint32_t *dest = destp; 10886 uint32_t ldata; 10887 int i; 10888 10889 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 10890 ldata = *src; 10891 ldata = be32_to_cpu(ldata); 10892 *dest = ldata; 10893 src++; 10894 dest++; 10895 } 10896 } 10897 10898 /** 10899 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 10900 * @phba: Pointer to HBA context object. 10901 * @pring: Pointer to driver SLI ring object. 10902 * @mp: Pointer to driver buffer object. 10903 * 10904 * This function is called with no lock held. 10905 * It always return zero after adding the buffer to the postbufq 10906 * buffer list. 10907 **/ 10908 int 10909 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10910 struct lpfc_dmabuf *mp) 10911 { 10912 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 10913 later */ 10914 spin_lock_irq(&phba->hbalock); 10915 list_add_tail(&mp->list, &pring->postbufq); 10916 pring->postbufq_cnt++; 10917 spin_unlock_irq(&phba->hbalock); 10918 return 0; 10919 } 10920 10921 /** 10922 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 10923 * @phba: Pointer to HBA context object. 10924 * 10925 * When HBQ is enabled, buffers are searched based on tags. This function 10926 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 10927 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 10928 * does not conflict with tags of buffer posted for unsolicited events. 10929 * The function returns the allocated tag. The function is called with 10930 * no locks held. 10931 **/ 10932 uint32_t 10933 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 10934 { 10935 spin_lock_irq(&phba->hbalock); 10936 phba->buffer_tag_count++; 10937 /* 10938 * Always set the QUE_BUFTAG_BIT to distiguish between 10939 * a tag assigned by HBQ. 10940 */ 10941 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 10942 spin_unlock_irq(&phba->hbalock); 10943 return phba->buffer_tag_count; 10944 } 10945 10946 /** 10947 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 10948 * @phba: Pointer to HBA context object. 10949 * @pring: Pointer to driver SLI ring object. 10950 * @tag: Buffer tag. 10951 * 10952 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 10953 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 10954 * iocb is posted to the response ring with the tag of the buffer. 10955 * This function searches the pring->postbufq list using the tag 10956 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 10957 * iocb. If the buffer is found then lpfc_dmabuf object of the 10958 * buffer is returned to the caller else NULL is returned. 10959 * This function is called with no lock held. 10960 **/ 10961 struct lpfc_dmabuf * 10962 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10963 uint32_t tag) 10964 { 10965 struct lpfc_dmabuf *mp, *next_mp; 10966 struct list_head *slp = &pring->postbufq; 10967 10968 /* Search postbufq, from the beginning, looking for a match on tag */ 10969 spin_lock_irq(&phba->hbalock); 10970 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10971 if (mp->buffer_tag == tag) { 10972 list_del_init(&mp->list); 10973 pring->postbufq_cnt--; 10974 spin_unlock_irq(&phba->hbalock); 10975 return mp; 10976 } 10977 } 10978 10979 spin_unlock_irq(&phba->hbalock); 10980 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10981 "0402 Cannot find virtual addr for buffer tag on " 10982 "ring %d Data x%lx x%p x%p x%x\n", 10983 pring->ringno, (unsigned long) tag, 10984 slp->next, slp->prev, pring->postbufq_cnt); 10985 10986 return NULL; 10987 } 10988 10989 /** 10990 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 10991 * @phba: Pointer to HBA context object. 10992 * @pring: Pointer to driver SLI ring object. 10993 * @phys: DMA address of the buffer. 10994 * 10995 * This function searches the buffer list using the dma_address 10996 * of unsolicited event to find the driver's lpfc_dmabuf object 10997 * corresponding to the dma_address. The function returns the 10998 * lpfc_dmabuf object if a buffer is found else it returns NULL. 10999 * This function is called by the ct and els unsolicited event 11000 * handlers to get the buffer associated with the unsolicited 11001 * event. 11002 * 11003 * This function is called with no lock held. 11004 **/ 11005 struct lpfc_dmabuf * 11006 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11007 dma_addr_t phys) 11008 { 11009 struct lpfc_dmabuf *mp, *next_mp; 11010 struct list_head *slp = &pring->postbufq; 11011 11012 /* Search postbufq, from the beginning, looking for a match on phys */ 11013 spin_lock_irq(&phba->hbalock); 11014 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 11015 if (mp->phys == phys) { 11016 list_del_init(&mp->list); 11017 pring->postbufq_cnt--; 11018 spin_unlock_irq(&phba->hbalock); 11019 return mp; 11020 } 11021 } 11022 11023 spin_unlock_irq(&phba->hbalock); 11024 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11025 "0410 Cannot find virtual addr for mapped buf on " 11026 "ring %d Data x%llx x%p x%p x%x\n", 11027 pring->ringno, (unsigned long long)phys, 11028 slp->next, slp->prev, pring->postbufq_cnt); 11029 return NULL; 11030 } 11031 11032 /** 11033 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 11034 * @phba: Pointer to HBA context object. 11035 * @cmdiocb: Pointer to driver command iocb object. 11036 * @rspiocb: Pointer to driver response iocb object. 11037 * 11038 * This function is the completion handler for the abort iocbs for 11039 * ELS commands. This function is called from the ELS ring event 11040 * handler with no lock held. This function frees memory resources 11041 * associated with the abort iocb. 11042 **/ 11043 static void 11044 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11045 struct lpfc_iocbq *rspiocb) 11046 { 11047 IOCB_t *irsp = &rspiocb->iocb; 11048 uint16_t abort_iotag, abort_context; 11049 struct lpfc_iocbq *abort_iocb = NULL; 11050 11051 if (irsp->ulpStatus) { 11052 11053 /* 11054 * Assume that the port already completed and returned, or 11055 * will return the iocb. Just Log the message. 11056 */ 11057 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 11058 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 11059 11060 spin_lock_irq(&phba->hbalock); 11061 if (phba->sli_rev < LPFC_SLI_REV4) { 11062 if (irsp->ulpCommand == CMD_ABORT_XRI_CX && 11063 irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 11064 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) { 11065 spin_unlock_irq(&phba->hbalock); 11066 goto release_iocb; 11067 } 11068 if (abort_iotag != 0 && 11069 abort_iotag <= phba->sli.last_iotag) 11070 abort_iocb = 11071 phba->sli.iocbq_lookup[abort_iotag]; 11072 } else 11073 /* For sli4 the abort_tag is the XRI, 11074 * so the abort routine puts the iotag of the iocb 11075 * being aborted in the context field of the abort 11076 * IOCB. 11077 */ 11078 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 11079 11080 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 11081 "0327 Cannot abort els iocb %p " 11082 "with tag %x context %x, abort status %x, " 11083 "abort code %x\n", 11084 abort_iocb, abort_iotag, abort_context, 11085 irsp->ulpStatus, irsp->un.ulpWord[4]); 11086 11087 spin_unlock_irq(&phba->hbalock); 11088 } 11089 release_iocb: 11090 lpfc_sli_release_iocbq(phba, cmdiocb); 11091 return; 11092 } 11093 11094 /** 11095 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 11096 * @phba: Pointer to HBA context object. 11097 * @cmdiocb: Pointer to driver command iocb object. 11098 * @rspiocb: Pointer to driver response iocb object. 11099 * 11100 * The function is called from SLI ring event handler with no 11101 * lock held. This function is the completion handler for ELS commands 11102 * which are aborted. The function frees memory resources used for 11103 * the aborted ELS commands. 11104 **/ 11105 static void 11106 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11107 struct lpfc_iocbq *rspiocb) 11108 { 11109 IOCB_t *irsp = &rspiocb->iocb; 11110 11111 /* ELS cmd tag <ulpIoTag> completes */ 11112 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11113 "0139 Ignoring ELS cmd tag x%x completion Data: " 11114 "x%x x%x x%x\n", 11115 irsp->ulpIoTag, irsp->ulpStatus, 11116 irsp->un.ulpWord[4], irsp->ulpTimeout); 11117 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 11118 lpfc_ct_free_iocb(phba, cmdiocb); 11119 else 11120 lpfc_els_free_iocb(phba, cmdiocb); 11121 return; 11122 } 11123 11124 /** 11125 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 11126 * @phba: Pointer to HBA context object. 11127 * @pring: Pointer to driver SLI ring object. 11128 * @cmdiocb: Pointer to driver command iocb object. 11129 * 11130 * This function issues an abort iocb for the provided command iocb down to 11131 * the port. Other than the case the outstanding command iocb is an abort 11132 * request, this function issues abort out unconditionally. This function is 11133 * called with hbalock held. The function returns 0 when it fails due to 11134 * memory allocation failure or when the command iocb is an abort request. 11135 **/ 11136 static int 11137 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11138 struct lpfc_iocbq *cmdiocb) 11139 { 11140 struct lpfc_vport *vport = cmdiocb->vport; 11141 struct lpfc_iocbq *abtsiocbp; 11142 IOCB_t *icmd = NULL; 11143 IOCB_t *iabt = NULL; 11144 int retval; 11145 unsigned long iflags; 11146 struct lpfc_nodelist *ndlp; 11147 11148 lockdep_assert_held(&phba->hbalock); 11149 11150 /* 11151 * There are certain command types we don't want to abort. And we 11152 * don't want to abort commands that are already in the process of 11153 * being aborted. 11154 */ 11155 icmd = &cmdiocb->iocb; 11156 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11157 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11158 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11159 return 0; 11160 11161 /* issue ABTS for this IOCB based on iotag */ 11162 abtsiocbp = __lpfc_sli_get_iocbq(phba); 11163 if (abtsiocbp == NULL) 11164 return 0; 11165 11166 /* This signals the response to set the correct status 11167 * before calling the completion handler 11168 */ 11169 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 11170 11171 iabt = &abtsiocbp->iocb; 11172 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 11173 iabt->un.acxri.abortContextTag = icmd->ulpContext; 11174 if (phba->sli_rev == LPFC_SLI_REV4) { 11175 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 11176 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 11177 } else { 11178 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 11179 if (pring->ringno == LPFC_ELS_RING) { 11180 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1); 11181 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi; 11182 } 11183 } 11184 iabt->ulpLe = 1; 11185 iabt->ulpClass = icmd->ulpClass; 11186 11187 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11188 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; 11189 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 11190 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 11191 if (cmdiocb->iocb_flag & LPFC_IO_FOF) 11192 abtsiocbp->iocb_flag |= LPFC_IO_FOF; 11193 11194 if (phba->link_state >= LPFC_LINK_UP) 11195 iabt->ulpCommand = CMD_ABORT_XRI_CN; 11196 else 11197 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 11198 11199 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 11200 abtsiocbp->vport = vport; 11201 11202 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 11203 "0339 Abort xri x%x, original iotag x%x, " 11204 "abort cmd iotag x%x\n", 11205 iabt->un.acxri.abortIoTag, 11206 iabt->un.acxri.abortContextTag, 11207 abtsiocbp->iotag); 11208 11209 if (phba->sli_rev == LPFC_SLI_REV4) { 11210 pring = lpfc_sli4_calc_ring(phba, abtsiocbp); 11211 if (unlikely(pring == NULL)) 11212 return 0; 11213 /* Note: both hbalock and ring_lock need to be set here */ 11214 spin_lock_irqsave(&pring->ring_lock, iflags); 11215 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11216 abtsiocbp, 0); 11217 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11218 } else { 11219 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11220 abtsiocbp, 0); 11221 } 11222 11223 if (retval) 11224 __lpfc_sli_release_iocbq(phba, abtsiocbp); 11225 11226 /* 11227 * Caller to this routine should check for IOCB_ERROR 11228 * and handle it properly. This routine no longer removes 11229 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11230 */ 11231 return retval; 11232 } 11233 11234 /** 11235 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 11236 * @phba: Pointer to HBA context object. 11237 * @pring: Pointer to driver SLI ring object. 11238 * @cmdiocb: Pointer to driver command iocb object. 11239 * 11240 * This function issues an abort iocb for the provided command iocb. In case 11241 * of unloading, the abort iocb will not be issued to commands on the ELS 11242 * ring. Instead, the callback function shall be changed to those commands 11243 * so that nothing happens when them finishes. This function is called with 11244 * hbalock held. The function returns 0 when the command iocb is an abort 11245 * request. 11246 **/ 11247 int 11248 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11249 struct lpfc_iocbq *cmdiocb) 11250 { 11251 struct lpfc_vport *vport = cmdiocb->vport; 11252 int retval = IOCB_ERROR; 11253 IOCB_t *icmd = NULL; 11254 11255 lockdep_assert_held(&phba->hbalock); 11256 11257 /* 11258 * There are certain command types we don't want to abort. And we 11259 * don't want to abort commands that are already in the process of 11260 * being aborted. 11261 */ 11262 icmd = &cmdiocb->iocb; 11263 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11264 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11265 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11266 return 0; 11267 11268 if (!pring) { 11269 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11270 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11271 else 11272 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11273 goto abort_iotag_exit; 11274 } 11275 11276 /* 11277 * If we're unloading, don't abort iocb on the ELS ring, but change 11278 * the callback so that nothing happens when it finishes. 11279 */ 11280 if ((vport->load_flag & FC_UNLOADING) && 11281 (pring->ringno == LPFC_ELS_RING)) { 11282 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11283 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11284 else 11285 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11286 goto abort_iotag_exit; 11287 } 11288 11289 /* Now, we try to issue the abort to the cmdiocb out */ 11290 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 11291 11292 abort_iotag_exit: 11293 /* 11294 * Caller to this routine should check for IOCB_ERROR 11295 * and handle it properly. This routine no longer removes 11296 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11297 */ 11298 return retval; 11299 } 11300 11301 /** 11302 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 11303 * @phba: pointer to lpfc HBA data structure. 11304 * 11305 * This routine will abort all pending and outstanding iocbs to an HBA. 11306 **/ 11307 void 11308 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 11309 { 11310 struct lpfc_sli *psli = &phba->sli; 11311 struct lpfc_sli_ring *pring; 11312 struct lpfc_queue *qp = NULL; 11313 int i; 11314 11315 if (phba->sli_rev != LPFC_SLI_REV4) { 11316 for (i = 0; i < psli->num_rings; i++) { 11317 pring = &psli->sli3_ring[i]; 11318 lpfc_sli_abort_iocb_ring(phba, pring); 11319 } 11320 return; 11321 } 11322 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 11323 pring = qp->pring; 11324 if (!pring) 11325 continue; 11326 lpfc_sli_abort_iocb_ring(phba, pring); 11327 } 11328 } 11329 11330 /** 11331 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 11332 * @iocbq: Pointer to driver iocb object. 11333 * @vport: Pointer to driver virtual port object. 11334 * @tgt_id: SCSI ID of the target. 11335 * @lun_id: LUN ID of the scsi device. 11336 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 11337 * 11338 * This function acts as an iocb filter for functions which abort or count 11339 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 11340 * 0 if the filtering criteria is met for the given iocb and will return 11341 * 1 if the filtering criteria is not met. 11342 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 11343 * given iocb is for the SCSI device specified by vport, tgt_id and 11344 * lun_id parameter. 11345 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 11346 * given iocb is for the SCSI target specified by vport and tgt_id 11347 * parameters. 11348 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 11349 * given iocb is for the SCSI host associated with the given vport. 11350 * This function is called with no locks held. 11351 **/ 11352 static int 11353 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 11354 uint16_t tgt_id, uint64_t lun_id, 11355 lpfc_ctx_cmd ctx_cmd) 11356 { 11357 struct lpfc_io_buf *lpfc_cmd; 11358 int rc = 1; 11359 11360 if (iocbq->vport != vport) 11361 return rc; 11362 11363 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 11364 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) 11365 return rc; 11366 11367 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 11368 11369 if (lpfc_cmd->pCmd == NULL) 11370 return rc; 11371 11372 switch (ctx_cmd) { 11373 case LPFC_CTX_LUN: 11374 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11375 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 11376 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 11377 rc = 0; 11378 break; 11379 case LPFC_CTX_TGT: 11380 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11381 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 11382 rc = 0; 11383 break; 11384 case LPFC_CTX_HOST: 11385 rc = 0; 11386 break; 11387 default: 11388 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 11389 __func__, ctx_cmd); 11390 break; 11391 } 11392 11393 return rc; 11394 } 11395 11396 /** 11397 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 11398 * @vport: Pointer to virtual port. 11399 * @tgt_id: SCSI ID of the target. 11400 * @lun_id: LUN ID of the scsi device. 11401 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11402 * 11403 * This function returns number of FCP commands pending for the vport. 11404 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 11405 * commands pending on the vport associated with SCSI device specified 11406 * by tgt_id and lun_id parameters. 11407 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 11408 * commands pending on the vport associated with SCSI target specified 11409 * by tgt_id parameter. 11410 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 11411 * commands pending on the vport. 11412 * This function returns the number of iocbs which satisfy the filter. 11413 * This function is called without any lock held. 11414 **/ 11415 int 11416 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 11417 lpfc_ctx_cmd ctx_cmd) 11418 { 11419 struct lpfc_hba *phba = vport->phba; 11420 struct lpfc_iocbq *iocbq; 11421 int sum, i; 11422 11423 spin_lock_irq(&phba->hbalock); 11424 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 11425 iocbq = phba->sli.iocbq_lookup[i]; 11426 11427 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 11428 ctx_cmd) == 0) 11429 sum++; 11430 } 11431 spin_unlock_irq(&phba->hbalock); 11432 11433 return sum; 11434 } 11435 11436 /** 11437 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 11438 * @phba: Pointer to HBA context object 11439 * @cmdiocb: Pointer to command iocb object. 11440 * @rspiocb: Pointer to response iocb object. 11441 * 11442 * This function is called when an aborted FCP iocb completes. This 11443 * function is called by the ring event handler with no lock held. 11444 * This function frees the iocb. 11445 **/ 11446 void 11447 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11448 struct lpfc_iocbq *rspiocb) 11449 { 11450 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11451 "3096 ABORT_XRI_CN completing on rpi x%x " 11452 "original iotag x%x, abort cmd iotag x%x " 11453 "status 0x%x, reason 0x%x\n", 11454 cmdiocb->iocb.un.acxri.abortContextTag, 11455 cmdiocb->iocb.un.acxri.abortIoTag, 11456 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 11457 rspiocb->iocb.un.ulpWord[4]); 11458 lpfc_sli_release_iocbq(phba, cmdiocb); 11459 return; 11460 } 11461 11462 /** 11463 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 11464 * @vport: Pointer to virtual port. 11465 * @pring: Pointer to driver SLI ring object. 11466 * @tgt_id: SCSI ID of the target. 11467 * @lun_id: LUN ID of the scsi device. 11468 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11469 * 11470 * This function sends an abort command for every SCSI command 11471 * associated with the given virtual port pending on the ring 11472 * filtered by lpfc_sli_validate_fcp_iocb function. 11473 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 11474 * FCP iocbs associated with lun specified by tgt_id and lun_id 11475 * parameters 11476 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 11477 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11478 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 11479 * FCP iocbs associated with virtual port. 11480 * This function returns number of iocbs it failed to abort. 11481 * This function is called with no locks held. 11482 **/ 11483 int 11484 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11485 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 11486 { 11487 struct lpfc_hba *phba = vport->phba; 11488 struct lpfc_iocbq *iocbq; 11489 struct lpfc_iocbq *abtsiocb; 11490 struct lpfc_sli_ring *pring_s4; 11491 IOCB_t *cmd = NULL; 11492 int errcnt = 0, ret_val = 0; 11493 int i; 11494 11495 /* all I/Os are in process of being flushed */ 11496 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) 11497 return errcnt; 11498 11499 for (i = 1; i <= phba->sli.last_iotag; i++) { 11500 iocbq = phba->sli.iocbq_lookup[i]; 11501 11502 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11503 abort_cmd) != 0) 11504 continue; 11505 11506 /* 11507 * If the iocbq is already being aborted, don't take a second 11508 * action, but do count it. 11509 */ 11510 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 11511 continue; 11512 11513 /* issue ABTS for this IOCB based on iotag */ 11514 abtsiocb = lpfc_sli_get_iocbq(phba); 11515 if (abtsiocb == NULL) { 11516 errcnt++; 11517 continue; 11518 } 11519 11520 /* indicate the IO is being aborted by the driver. */ 11521 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11522 11523 cmd = &iocbq->iocb; 11524 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11525 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 11526 if (phba->sli_rev == LPFC_SLI_REV4) 11527 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 11528 else 11529 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 11530 abtsiocb->iocb.ulpLe = 1; 11531 abtsiocb->iocb.ulpClass = cmd->ulpClass; 11532 abtsiocb->vport = vport; 11533 11534 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11535 abtsiocb->hba_wqidx = iocbq->hba_wqidx; 11536 if (iocbq->iocb_flag & LPFC_IO_FCP) 11537 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 11538 if (iocbq->iocb_flag & LPFC_IO_FOF) 11539 abtsiocb->iocb_flag |= LPFC_IO_FOF; 11540 11541 if (lpfc_is_link_up(phba)) 11542 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11543 else 11544 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11545 11546 /* Setup callback routine and issue the command. */ 11547 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11548 if (phba->sli_rev == LPFC_SLI_REV4) { 11549 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); 11550 if (!pring_s4) 11551 continue; 11552 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11553 abtsiocb, 0); 11554 } else 11555 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 11556 abtsiocb, 0); 11557 if (ret_val == IOCB_ERROR) { 11558 lpfc_sli_release_iocbq(phba, abtsiocb); 11559 errcnt++; 11560 continue; 11561 } 11562 } 11563 11564 return errcnt; 11565 } 11566 11567 /** 11568 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 11569 * @vport: Pointer to virtual port. 11570 * @pring: Pointer to driver SLI ring object. 11571 * @tgt_id: SCSI ID of the target. 11572 * @lun_id: LUN ID of the scsi device. 11573 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11574 * 11575 * This function sends an abort command for every SCSI command 11576 * associated with the given virtual port pending on the ring 11577 * filtered by lpfc_sli_validate_fcp_iocb function. 11578 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 11579 * FCP iocbs associated with lun specified by tgt_id and lun_id 11580 * parameters 11581 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 11582 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11583 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 11584 * FCP iocbs associated with virtual port. 11585 * This function returns number of iocbs it aborted . 11586 * This function is called with no locks held right after a taskmgmt 11587 * command is sent. 11588 **/ 11589 int 11590 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11591 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 11592 { 11593 struct lpfc_hba *phba = vport->phba; 11594 struct lpfc_io_buf *lpfc_cmd; 11595 struct lpfc_iocbq *abtsiocbq; 11596 struct lpfc_nodelist *ndlp; 11597 struct lpfc_iocbq *iocbq; 11598 IOCB_t *icmd; 11599 int sum, i, ret_val; 11600 unsigned long iflags; 11601 struct lpfc_sli_ring *pring_s4 = NULL; 11602 11603 spin_lock_irqsave(&phba->hbalock, iflags); 11604 11605 /* all I/Os are in process of being flushed */ 11606 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 11607 spin_unlock_irqrestore(&phba->hbalock, iflags); 11608 return 0; 11609 } 11610 sum = 0; 11611 11612 for (i = 1; i <= phba->sli.last_iotag; i++) { 11613 iocbq = phba->sli.iocbq_lookup[i]; 11614 11615 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11616 cmd) != 0) 11617 continue; 11618 11619 /* Guard against IO completion being called at same time */ 11620 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 11621 spin_lock(&lpfc_cmd->buf_lock); 11622 11623 if (!lpfc_cmd->pCmd) { 11624 spin_unlock(&lpfc_cmd->buf_lock); 11625 continue; 11626 } 11627 11628 if (phba->sli_rev == LPFC_SLI_REV4) { 11629 pring_s4 = 11630 phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring; 11631 if (!pring_s4) { 11632 spin_unlock(&lpfc_cmd->buf_lock); 11633 continue; 11634 } 11635 /* Note: both hbalock and ring_lock must be set here */ 11636 spin_lock(&pring_s4->ring_lock); 11637 } 11638 11639 /* 11640 * If the iocbq is already being aborted, don't take a second 11641 * action, but do count it. 11642 */ 11643 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) || 11644 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 11645 if (phba->sli_rev == LPFC_SLI_REV4) 11646 spin_unlock(&pring_s4->ring_lock); 11647 spin_unlock(&lpfc_cmd->buf_lock); 11648 continue; 11649 } 11650 11651 /* issue ABTS for this IOCB based on iotag */ 11652 abtsiocbq = __lpfc_sli_get_iocbq(phba); 11653 if (!abtsiocbq) { 11654 if (phba->sli_rev == LPFC_SLI_REV4) 11655 spin_unlock(&pring_s4->ring_lock); 11656 spin_unlock(&lpfc_cmd->buf_lock); 11657 continue; 11658 } 11659 11660 icmd = &iocbq->iocb; 11661 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11662 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; 11663 if (phba->sli_rev == LPFC_SLI_REV4) 11664 abtsiocbq->iocb.un.acxri.abortIoTag = 11665 iocbq->sli4_xritag; 11666 else 11667 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; 11668 abtsiocbq->iocb.ulpLe = 1; 11669 abtsiocbq->iocb.ulpClass = icmd->ulpClass; 11670 abtsiocbq->vport = vport; 11671 11672 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11673 abtsiocbq->hba_wqidx = iocbq->hba_wqidx; 11674 if (iocbq->iocb_flag & LPFC_IO_FCP) 11675 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 11676 if (iocbq->iocb_flag & LPFC_IO_FOF) 11677 abtsiocbq->iocb_flag |= LPFC_IO_FOF; 11678 11679 ndlp = lpfc_cmd->rdata->pnode; 11680 11681 if (lpfc_is_link_up(phba) && 11682 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 11683 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11684 else 11685 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11686 11687 /* Setup callback routine and issue the command. */ 11688 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11689 11690 /* 11691 * Indicate the IO is being aborted by the driver and set 11692 * the caller's flag into the aborted IO. 11693 */ 11694 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11695 11696 if (phba->sli_rev == LPFC_SLI_REV4) { 11697 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11698 abtsiocbq, 0); 11699 spin_unlock(&pring_s4->ring_lock); 11700 } else { 11701 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 11702 abtsiocbq, 0); 11703 } 11704 11705 spin_unlock(&lpfc_cmd->buf_lock); 11706 11707 if (ret_val == IOCB_ERROR) 11708 __lpfc_sli_release_iocbq(phba, abtsiocbq); 11709 else 11710 sum++; 11711 } 11712 spin_unlock_irqrestore(&phba->hbalock, iflags); 11713 return sum; 11714 } 11715 11716 /** 11717 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 11718 * @phba: Pointer to HBA context object. 11719 * @cmdiocbq: Pointer to command iocb. 11720 * @rspiocbq: Pointer to response iocb. 11721 * 11722 * This function is the completion handler for iocbs issued using 11723 * lpfc_sli_issue_iocb_wait function. This function is called by the 11724 * ring event handler function without any lock held. This function 11725 * can be called from both worker thread context and interrupt 11726 * context. This function also can be called from other thread which 11727 * cleans up the SLI layer objects. 11728 * This function copy the contents of the response iocb to the 11729 * response iocb memory object provided by the caller of 11730 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 11731 * sleeps for the iocb completion. 11732 **/ 11733 static void 11734 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 11735 struct lpfc_iocbq *cmdiocbq, 11736 struct lpfc_iocbq *rspiocbq) 11737 { 11738 wait_queue_head_t *pdone_q; 11739 unsigned long iflags; 11740 struct lpfc_io_buf *lpfc_cmd; 11741 11742 spin_lock_irqsave(&phba->hbalock, iflags); 11743 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { 11744 11745 /* 11746 * A time out has occurred for the iocb. If a time out 11747 * completion handler has been supplied, call it. Otherwise, 11748 * just free the iocbq. 11749 */ 11750 11751 spin_unlock_irqrestore(&phba->hbalock, iflags); 11752 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; 11753 cmdiocbq->wait_iocb_cmpl = NULL; 11754 if (cmdiocbq->iocb_cmpl) 11755 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); 11756 else 11757 lpfc_sli_release_iocbq(phba, cmdiocbq); 11758 return; 11759 } 11760 11761 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 11762 if (cmdiocbq->context2 && rspiocbq) 11763 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 11764 &rspiocbq->iocb, sizeof(IOCB_t)); 11765 11766 /* Set the exchange busy flag for task management commands */ 11767 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 11768 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 11769 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, 11770 cur_iocbq); 11771 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 11772 } 11773 11774 pdone_q = cmdiocbq->context_un.wait_queue; 11775 if (pdone_q) 11776 wake_up(pdone_q); 11777 spin_unlock_irqrestore(&phba->hbalock, iflags); 11778 return; 11779 } 11780 11781 /** 11782 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 11783 * @phba: Pointer to HBA context object.. 11784 * @piocbq: Pointer to command iocb. 11785 * @flag: Flag to test. 11786 * 11787 * This routine grabs the hbalock and then test the iocb_flag to 11788 * see if the passed in flag is set. 11789 * Returns: 11790 * 1 if flag is set. 11791 * 0 if flag is not set. 11792 **/ 11793 static int 11794 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 11795 struct lpfc_iocbq *piocbq, uint32_t flag) 11796 { 11797 unsigned long iflags; 11798 int ret; 11799 11800 spin_lock_irqsave(&phba->hbalock, iflags); 11801 ret = piocbq->iocb_flag & flag; 11802 spin_unlock_irqrestore(&phba->hbalock, iflags); 11803 return ret; 11804 11805 } 11806 11807 /** 11808 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 11809 * @phba: Pointer to HBA context object.. 11810 * @pring: Pointer to sli ring. 11811 * @piocb: Pointer to command iocb. 11812 * @prspiocbq: Pointer to response iocb. 11813 * @timeout: Timeout in number of seconds. 11814 * 11815 * This function issues the iocb to firmware and waits for the 11816 * iocb to complete. The iocb_cmpl field of the shall be used 11817 * to handle iocbs which time out. If the field is NULL, the 11818 * function shall free the iocbq structure. If more clean up is 11819 * needed, the caller is expected to provide a completion function 11820 * that will provide the needed clean up. If the iocb command is 11821 * not completed within timeout seconds, the function will either 11822 * free the iocbq structure (if iocb_cmpl == NULL) or execute the 11823 * completion function set in the iocb_cmpl field and then return 11824 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 11825 * resources if this function returns IOCB_TIMEDOUT. 11826 * The function waits for the iocb completion using an 11827 * non-interruptible wait. 11828 * This function will sleep while waiting for iocb completion. 11829 * So, this function should not be called from any context which 11830 * does not allow sleeping. Due to the same reason, this function 11831 * cannot be called with interrupt disabled. 11832 * This function assumes that the iocb completions occur while 11833 * this function sleep. So, this function cannot be called from 11834 * the thread which process iocb completion for this ring. 11835 * This function clears the iocb_flag of the iocb object before 11836 * issuing the iocb and the iocb completion handler sets this 11837 * flag and wakes this thread when the iocb completes. 11838 * The contents of the response iocb will be copied to prspiocbq 11839 * by the completion handler when the command completes. 11840 * This function returns IOCB_SUCCESS when success. 11841 * This function is called with no lock held. 11842 **/ 11843 int 11844 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 11845 uint32_t ring_number, 11846 struct lpfc_iocbq *piocb, 11847 struct lpfc_iocbq *prspiocbq, 11848 uint32_t timeout) 11849 { 11850 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 11851 long timeleft, timeout_req = 0; 11852 int retval = IOCB_SUCCESS; 11853 uint32_t creg_val; 11854 struct lpfc_iocbq *iocb; 11855 int txq_cnt = 0; 11856 int txcmplq_cnt = 0; 11857 struct lpfc_sli_ring *pring; 11858 unsigned long iflags; 11859 bool iocb_completed = true; 11860 11861 if (phba->sli_rev >= LPFC_SLI_REV4) 11862 pring = lpfc_sli4_calc_ring(phba, piocb); 11863 else 11864 pring = &phba->sli.sli3_ring[ring_number]; 11865 /* 11866 * If the caller has provided a response iocbq buffer, then context2 11867 * is NULL or its an error. 11868 */ 11869 if (prspiocbq) { 11870 if (piocb->context2) 11871 return IOCB_ERROR; 11872 piocb->context2 = prspiocbq; 11873 } 11874 11875 piocb->wait_iocb_cmpl = piocb->iocb_cmpl; 11876 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 11877 piocb->context_un.wait_queue = &done_q; 11878 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 11879 11880 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11881 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11882 return IOCB_ERROR; 11883 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 11884 writel(creg_val, phba->HCregaddr); 11885 readl(phba->HCregaddr); /* flush */ 11886 } 11887 11888 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 11889 SLI_IOCB_RET_IOCB); 11890 if (retval == IOCB_SUCCESS) { 11891 timeout_req = msecs_to_jiffies(timeout * 1000); 11892 timeleft = wait_event_timeout(done_q, 11893 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 11894 timeout_req); 11895 spin_lock_irqsave(&phba->hbalock, iflags); 11896 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 11897 11898 /* 11899 * IOCB timed out. Inform the wake iocb wait 11900 * completion function and set local status 11901 */ 11902 11903 iocb_completed = false; 11904 piocb->iocb_flag |= LPFC_IO_WAKE_TMO; 11905 } 11906 spin_unlock_irqrestore(&phba->hbalock, iflags); 11907 if (iocb_completed) { 11908 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11909 "0331 IOCB wake signaled\n"); 11910 /* Note: we are not indicating if the IOCB has a success 11911 * status or not - that's for the caller to check. 11912 * IOCB_SUCCESS means just that the command was sent and 11913 * completed. Not that it completed successfully. 11914 * */ 11915 } else if (timeleft == 0) { 11916 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11917 "0338 IOCB wait timeout error - no " 11918 "wake response Data x%x\n", timeout); 11919 retval = IOCB_TIMEDOUT; 11920 } else { 11921 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11922 "0330 IOCB wake NOT set, " 11923 "Data x%x x%lx\n", 11924 timeout, (timeleft / jiffies)); 11925 retval = IOCB_TIMEDOUT; 11926 } 11927 } else if (retval == IOCB_BUSY) { 11928 if (phba->cfg_log_verbose & LOG_SLI) { 11929 list_for_each_entry(iocb, &pring->txq, list) { 11930 txq_cnt++; 11931 } 11932 list_for_each_entry(iocb, &pring->txcmplq, list) { 11933 txcmplq_cnt++; 11934 } 11935 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11936 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 11937 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 11938 } 11939 return retval; 11940 } else { 11941 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11942 "0332 IOCB wait issue failed, Data x%x\n", 11943 retval); 11944 retval = IOCB_ERROR; 11945 } 11946 11947 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11948 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11949 return IOCB_ERROR; 11950 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 11951 writel(creg_val, phba->HCregaddr); 11952 readl(phba->HCregaddr); /* flush */ 11953 } 11954 11955 if (prspiocbq) 11956 piocb->context2 = NULL; 11957 11958 piocb->context_un.wait_queue = NULL; 11959 piocb->iocb_cmpl = NULL; 11960 return retval; 11961 } 11962 11963 /** 11964 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 11965 * @phba: Pointer to HBA context object. 11966 * @pmboxq: Pointer to driver mailbox object. 11967 * @timeout: Timeout in number of seconds. 11968 * 11969 * This function issues the mailbox to firmware and waits for the 11970 * mailbox command to complete. If the mailbox command is not 11971 * completed within timeout seconds, it returns MBX_TIMEOUT. 11972 * The function waits for the mailbox completion using an 11973 * interruptible wait. If the thread is woken up due to a 11974 * signal, MBX_TIMEOUT error is returned to the caller. Caller 11975 * should not free the mailbox resources, if this function returns 11976 * MBX_TIMEOUT. 11977 * This function will sleep while waiting for mailbox completion. 11978 * So, this function should not be called from any context which 11979 * does not allow sleeping. Due to the same reason, this function 11980 * cannot be called with interrupt disabled. 11981 * This function assumes that the mailbox completion occurs while 11982 * this function sleep. So, this function cannot be called from 11983 * the worker thread which processes mailbox completion. 11984 * This function is called in the context of HBA management 11985 * applications. 11986 * This function returns MBX_SUCCESS when successful. 11987 * This function is called with no lock held. 11988 **/ 11989 int 11990 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 11991 uint32_t timeout) 11992 { 11993 struct completion mbox_done; 11994 int retval; 11995 unsigned long flag; 11996 11997 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 11998 /* setup wake call as IOCB callback */ 11999 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 12000 12001 /* setup context3 field to pass wait_queue pointer to wake function */ 12002 init_completion(&mbox_done); 12003 pmboxq->context3 = &mbox_done; 12004 /* now issue the command */ 12005 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 12006 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 12007 wait_for_completion_timeout(&mbox_done, 12008 msecs_to_jiffies(timeout * 1000)); 12009 12010 spin_lock_irqsave(&phba->hbalock, flag); 12011 pmboxq->context3 = NULL; 12012 /* 12013 * if LPFC_MBX_WAKE flag is set the mailbox is completed 12014 * else do not free the resources. 12015 */ 12016 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 12017 retval = MBX_SUCCESS; 12018 } else { 12019 retval = MBX_TIMEOUT; 12020 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12021 } 12022 spin_unlock_irqrestore(&phba->hbalock, flag); 12023 } 12024 return retval; 12025 } 12026 12027 /** 12028 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 12029 * @phba: Pointer to HBA context. 12030 * 12031 * This function is called to shutdown the driver's mailbox sub-system. 12032 * It first marks the mailbox sub-system is in a block state to prevent 12033 * the asynchronous mailbox command from issued off the pending mailbox 12034 * command queue. If the mailbox command sub-system shutdown is due to 12035 * HBA error conditions such as EEH or ERATT, this routine shall invoke 12036 * the mailbox sub-system flush routine to forcefully bring down the 12037 * mailbox sub-system. Otherwise, if it is due to normal condition (such 12038 * as with offline or HBA function reset), this routine will wait for the 12039 * outstanding mailbox command to complete before invoking the mailbox 12040 * sub-system flush routine to gracefully bring down mailbox sub-system. 12041 **/ 12042 void 12043 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 12044 { 12045 struct lpfc_sli *psli = &phba->sli; 12046 unsigned long timeout; 12047 12048 if (mbx_action == LPFC_MBX_NO_WAIT) { 12049 /* delay 100ms for port state */ 12050 msleep(100); 12051 lpfc_sli_mbox_sys_flush(phba); 12052 return; 12053 } 12054 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 12055 12056 /* Disable softirqs, including timers from obtaining phba->hbalock */ 12057 local_bh_disable(); 12058 12059 spin_lock_irq(&phba->hbalock); 12060 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 12061 12062 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 12063 /* Determine how long we might wait for the active mailbox 12064 * command to be gracefully completed by firmware. 12065 */ 12066 if (phba->sli.mbox_active) 12067 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 12068 phba->sli.mbox_active) * 12069 1000) + jiffies; 12070 spin_unlock_irq(&phba->hbalock); 12071 12072 /* Enable softirqs again, done with phba->hbalock */ 12073 local_bh_enable(); 12074 12075 while (phba->sli.mbox_active) { 12076 /* Check active mailbox complete status every 2ms */ 12077 msleep(2); 12078 if (time_after(jiffies, timeout)) 12079 /* Timeout, let the mailbox flush routine to 12080 * forcefully release active mailbox command 12081 */ 12082 break; 12083 } 12084 } else { 12085 spin_unlock_irq(&phba->hbalock); 12086 12087 /* Enable softirqs again, done with phba->hbalock */ 12088 local_bh_enable(); 12089 } 12090 12091 lpfc_sli_mbox_sys_flush(phba); 12092 } 12093 12094 /** 12095 * lpfc_sli_eratt_read - read sli-3 error attention events 12096 * @phba: Pointer to HBA context. 12097 * 12098 * This function is called to read the SLI3 device error attention registers 12099 * for possible error attention events. The caller must hold the hostlock 12100 * with spin_lock_irq(). 12101 * 12102 * This function returns 1 when there is Error Attention in the Host Attention 12103 * Register and returns 0 otherwise. 12104 **/ 12105 static int 12106 lpfc_sli_eratt_read(struct lpfc_hba *phba) 12107 { 12108 uint32_t ha_copy; 12109 12110 /* Read chip Host Attention (HA) register */ 12111 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12112 goto unplug_err; 12113 12114 if (ha_copy & HA_ERATT) { 12115 /* Read host status register to retrieve error event */ 12116 if (lpfc_sli_read_hs(phba)) 12117 goto unplug_err; 12118 12119 /* Check if there is a deferred error condition is active */ 12120 if ((HS_FFER1 & phba->work_hs) && 12121 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12122 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 12123 phba->hba_flag |= DEFER_ERATT; 12124 /* Clear all interrupt enable conditions */ 12125 writel(0, phba->HCregaddr); 12126 readl(phba->HCregaddr); 12127 } 12128 12129 /* Set the driver HA work bitmap */ 12130 phba->work_ha |= HA_ERATT; 12131 /* Indicate polling handles this ERATT */ 12132 phba->hba_flag |= HBA_ERATT_HANDLED; 12133 return 1; 12134 } 12135 return 0; 12136 12137 unplug_err: 12138 /* Set the driver HS work bitmap */ 12139 phba->work_hs |= UNPLUG_ERR; 12140 /* Set the driver HA work bitmap */ 12141 phba->work_ha |= HA_ERATT; 12142 /* Indicate polling handles this ERATT */ 12143 phba->hba_flag |= HBA_ERATT_HANDLED; 12144 return 1; 12145 } 12146 12147 /** 12148 * lpfc_sli4_eratt_read - read sli-4 error attention events 12149 * @phba: Pointer to HBA context. 12150 * 12151 * This function is called to read the SLI4 device error attention registers 12152 * for possible error attention events. The caller must hold the hostlock 12153 * with spin_lock_irq(). 12154 * 12155 * This function returns 1 when there is Error Attention in the Host Attention 12156 * Register and returns 0 otherwise. 12157 **/ 12158 static int 12159 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 12160 { 12161 uint32_t uerr_sta_hi, uerr_sta_lo; 12162 uint32_t if_type, portsmphr; 12163 struct lpfc_register portstat_reg; 12164 12165 /* 12166 * For now, use the SLI4 device internal unrecoverable error 12167 * registers for error attention. This can be changed later. 12168 */ 12169 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 12170 switch (if_type) { 12171 case LPFC_SLI_INTF_IF_TYPE_0: 12172 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 12173 &uerr_sta_lo) || 12174 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 12175 &uerr_sta_hi)) { 12176 phba->work_hs |= UNPLUG_ERR; 12177 phba->work_ha |= HA_ERATT; 12178 phba->hba_flag |= HBA_ERATT_HANDLED; 12179 return 1; 12180 } 12181 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 12182 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 12183 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12184 "1423 HBA Unrecoverable error: " 12185 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 12186 "ue_mask_lo_reg=0x%x, " 12187 "ue_mask_hi_reg=0x%x\n", 12188 uerr_sta_lo, uerr_sta_hi, 12189 phba->sli4_hba.ue_mask_lo, 12190 phba->sli4_hba.ue_mask_hi); 12191 phba->work_status[0] = uerr_sta_lo; 12192 phba->work_status[1] = uerr_sta_hi; 12193 phba->work_ha |= HA_ERATT; 12194 phba->hba_flag |= HBA_ERATT_HANDLED; 12195 return 1; 12196 } 12197 break; 12198 case LPFC_SLI_INTF_IF_TYPE_2: 12199 case LPFC_SLI_INTF_IF_TYPE_6: 12200 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 12201 &portstat_reg.word0) || 12202 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 12203 &portsmphr)){ 12204 phba->work_hs |= UNPLUG_ERR; 12205 phba->work_ha |= HA_ERATT; 12206 phba->hba_flag |= HBA_ERATT_HANDLED; 12207 return 1; 12208 } 12209 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 12210 phba->work_status[0] = 12211 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 12212 phba->work_status[1] = 12213 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 12214 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12215 "2885 Port Status Event: " 12216 "port status reg 0x%x, " 12217 "port smphr reg 0x%x, " 12218 "error 1=0x%x, error 2=0x%x\n", 12219 portstat_reg.word0, 12220 portsmphr, 12221 phba->work_status[0], 12222 phba->work_status[1]); 12223 phba->work_ha |= HA_ERATT; 12224 phba->hba_flag |= HBA_ERATT_HANDLED; 12225 return 1; 12226 } 12227 break; 12228 case LPFC_SLI_INTF_IF_TYPE_1: 12229 default: 12230 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12231 "2886 HBA Error Attention on unsupported " 12232 "if type %d.", if_type); 12233 return 1; 12234 } 12235 12236 return 0; 12237 } 12238 12239 /** 12240 * lpfc_sli_check_eratt - check error attention events 12241 * @phba: Pointer to HBA context. 12242 * 12243 * This function is called from timer soft interrupt context to check HBA's 12244 * error attention register bit for error attention events. 12245 * 12246 * This function returns 1 when there is Error Attention in the Host Attention 12247 * Register and returns 0 otherwise. 12248 **/ 12249 int 12250 lpfc_sli_check_eratt(struct lpfc_hba *phba) 12251 { 12252 uint32_t ha_copy; 12253 12254 /* If somebody is waiting to handle an eratt, don't process it 12255 * here. The brdkill function will do this. 12256 */ 12257 if (phba->link_flag & LS_IGNORE_ERATT) 12258 return 0; 12259 12260 /* Check if interrupt handler handles this ERATT */ 12261 spin_lock_irq(&phba->hbalock); 12262 if (phba->hba_flag & HBA_ERATT_HANDLED) { 12263 /* Interrupt handler has handled ERATT */ 12264 spin_unlock_irq(&phba->hbalock); 12265 return 0; 12266 } 12267 12268 /* 12269 * If there is deferred error attention, do not check for error 12270 * attention 12271 */ 12272 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12273 spin_unlock_irq(&phba->hbalock); 12274 return 0; 12275 } 12276 12277 /* If PCI channel is offline, don't process it */ 12278 if (unlikely(pci_channel_offline(phba->pcidev))) { 12279 spin_unlock_irq(&phba->hbalock); 12280 return 0; 12281 } 12282 12283 switch (phba->sli_rev) { 12284 case LPFC_SLI_REV2: 12285 case LPFC_SLI_REV3: 12286 /* Read chip Host Attention (HA) register */ 12287 ha_copy = lpfc_sli_eratt_read(phba); 12288 break; 12289 case LPFC_SLI_REV4: 12290 /* Read device Uncoverable Error (UERR) registers */ 12291 ha_copy = lpfc_sli4_eratt_read(phba); 12292 break; 12293 default: 12294 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12295 "0299 Invalid SLI revision (%d)\n", 12296 phba->sli_rev); 12297 ha_copy = 0; 12298 break; 12299 } 12300 spin_unlock_irq(&phba->hbalock); 12301 12302 return ha_copy; 12303 } 12304 12305 /** 12306 * lpfc_intr_state_check - Check device state for interrupt handling 12307 * @phba: Pointer to HBA context. 12308 * 12309 * This inline routine checks whether a device or its PCI slot is in a state 12310 * that the interrupt should be handled. 12311 * 12312 * This function returns 0 if the device or the PCI slot is in a state that 12313 * interrupt should be handled, otherwise -EIO. 12314 */ 12315 static inline int 12316 lpfc_intr_state_check(struct lpfc_hba *phba) 12317 { 12318 /* If the pci channel is offline, ignore all the interrupts */ 12319 if (unlikely(pci_channel_offline(phba->pcidev))) 12320 return -EIO; 12321 12322 /* Update device level interrupt statistics */ 12323 phba->sli.slistat.sli_intr++; 12324 12325 /* Ignore all interrupts during initialization. */ 12326 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 12327 return -EIO; 12328 12329 return 0; 12330 } 12331 12332 /** 12333 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 12334 * @irq: Interrupt number. 12335 * @dev_id: The device context pointer. 12336 * 12337 * This function is directly called from the PCI layer as an interrupt 12338 * service routine when device with SLI-3 interface spec is enabled with 12339 * MSI-X multi-message interrupt mode and there are slow-path events in 12340 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 12341 * interrupt mode, this function is called as part of the device-level 12342 * interrupt handler. When the PCI slot is in error recovery or the HBA 12343 * is undergoing initialization, the interrupt handler will not process 12344 * the interrupt. The link attention and ELS ring attention events are 12345 * handled by the worker thread. The interrupt handler signals the worker 12346 * thread and returns for these events. This function is called without 12347 * any lock held. It gets the hbalock to access and update SLI data 12348 * structures. 12349 * 12350 * This function returns IRQ_HANDLED when interrupt is handled else it 12351 * returns IRQ_NONE. 12352 **/ 12353 irqreturn_t 12354 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 12355 { 12356 struct lpfc_hba *phba; 12357 uint32_t ha_copy, hc_copy; 12358 uint32_t work_ha_copy; 12359 unsigned long status; 12360 unsigned long iflag; 12361 uint32_t control; 12362 12363 MAILBOX_t *mbox, *pmbox; 12364 struct lpfc_vport *vport; 12365 struct lpfc_nodelist *ndlp; 12366 struct lpfc_dmabuf *mp; 12367 LPFC_MBOXQ_t *pmb; 12368 int rc; 12369 12370 /* 12371 * Get the driver's phba structure from the dev_id and 12372 * assume the HBA is not interrupting. 12373 */ 12374 phba = (struct lpfc_hba *)dev_id; 12375 12376 if (unlikely(!phba)) 12377 return IRQ_NONE; 12378 12379 /* 12380 * Stuff needs to be attented to when this function is invoked as an 12381 * individual interrupt handler in MSI-X multi-message interrupt mode 12382 */ 12383 if (phba->intr_type == MSIX) { 12384 /* Check device state for handling interrupt */ 12385 if (lpfc_intr_state_check(phba)) 12386 return IRQ_NONE; 12387 /* Need to read HA REG for slow-path events */ 12388 spin_lock_irqsave(&phba->hbalock, iflag); 12389 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12390 goto unplug_error; 12391 /* If somebody is waiting to handle an eratt don't process it 12392 * here. The brdkill function will do this. 12393 */ 12394 if (phba->link_flag & LS_IGNORE_ERATT) 12395 ha_copy &= ~HA_ERATT; 12396 /* Check the need for handling ERATT in interrupt handler */ 12397 if (ha_copy & HA_ERATT) { 12398 if (phba->hba_flag & HBA_ERATT_HANDLED) 12399 /* ERATT polling has handled ERATT */ 12400 ha_copy &= ~HA_ERATT; 12401 else 12402 /* Indicate interrupt handler handles ERATT */ 12403 phba->hba_flag |= HBA_ERATT_HANDLED; 12404 } 12405 12406 /* 12407 * If there is deferred error attention, do not check for any 12408 * interrupt. 12409 */ 12410 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12411 spin_unlock_irqrestore(&phba->hbalock, iflag); 12412 return IRQ_NONE; 12413 } 12414 12415 /* Clear up only attention source related to slow-path */ 12416 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 12417 goto unplug_error; 12418 12419 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 12420 HC_LAINT_ENA | HC_ERINT_ENA), 12421 phba->HCregaddr); 12422 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 12423 phba->HAregaddr); 12424 writel(hc_copy, phba->HCregaddr); 12425 readl(phba->HAregaddr); /* flush */ 12426 spin_unlock_irqrestore(&phba->hbalock, iflag); 12427 } else 12428 ha_copy = phba->ha_copy; 12429 12430 work_ha_copy = ha_copy & phba->work_ha_mask; 12431 12432 if (work_ha_copy) { 12433 if (work_ha_copy & HA_LATT) { 12434 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 12435 /* 12436 * Turn off Link Attention interrupts 12437 * until CLEAR_LA done 12438 */ 12439 spin_lock_irqsave(&phba->hbalock, iflag); 12440 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 12441 if (lpfc_readl(phba->HCregaddr, &control)) 12442 goto unplug_error; 12443 control &= ~HC_LAINT_ENA; 12444 writel(control, phba->HCregaddr); 12445 readl(phba->HCregaddr); /* flush */ 12446 spin_unlock_irqrestore(&phba->hbalock, iflag); 12447 } 12448 else 12449 work_ha_copy &= ~HA_LATT; 12450 } 12451 12452 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 12453 /* 12454 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 12455 * the only slow ring. 12456 */ 12457 status = (work_ha_copy & 12458 (HA_RXMASK << (4*LPFC_ELS_RING))); 12459 status >>= (4*LPFC_ELS_RING); 12460 if (status & HA_RXMASK) { 12461 spin_lock_irqsave(&phba->hbalock, iflag); 12462 if (lpfc_readl(phba->HCregaddr, &control)) 12463 goto unplug_error; 12464 12465 lpfc_debugfs_slow_ring_trc(phba, 12466 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 12467 control, status, 12468 (uint32_t)phba->sli.slistat.sli_intr); 12469 12470 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 12471 lpfc_debugfs_slow_ring_trc(phba, 12472 "ISR Disable ring:" 12473 "pwork:x%x hawork:x%x wait:x%x", 12474 phba->work_ha, work_ha_copy, 12475 (uint32_t)((unsigned long) 12476 &phba->work_waitq)); 12477 12478 control &= 12479 ~(HC_R0INT_ENA << LPFC_ELS_RING); 12480 writel(control, phba->HCregaddr); 12481 readl(phba->HCregaddr); /* flush */ 12482 } 12483 else { 12484 lpfc_debugfs_slow_ring_trc(phba, 12485 "ISR slow ring: pwork:" 12486 "x%x hawork:x%x wait:x%x", 12487 phba->work_ha, work_ha_copy, 12488 (uint32_t)((unsigned long) 12489 &phba->work_waitq)); 12490 } 12491 spin_unlock_irqrestore(&phba->hbalock, iflag); 12492 } 12493 } 12494 spin_lock_irqsave(&phba->hbalock, iflag); 12495 if (work_ha_copy & HA_ERATT) { 12496 if (lpfc_sli_read_hs(phba)) 12497 goto unplug_error; 12498 /* 12499 * Check if there is a deferred error condition 12500 * is active 12501 */ 12502 if ((HS_FFER1 & phba->work_hs) && 12503 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12504 HS_FFER6 | HS_FFER7 | HS_FFER8) & 12505 phba->work_hs)) { 12506 phba->hba_flag |= DEFER_ERATT; 12507 /* Clear all interrupt enable conditions */ 12508 writel(0, phba->HCregaddr); 12509 readl(phba->HCregaddr); 12510 } 12511 } 12512 12513 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 12514 pmb = phba->sli.mbox_active; 12515 pmbox = &pmb->u.mb; 12516 mbox = phba->mbox; 12517 vport = pmb->vport; 12518 12519 /* First check out the status word */ 12520 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 12521 if (pmbox->mbxOwner != OWN_HOST) { 12522 spin_unlock_irqrestore(&phba->hbalock, iflag); 12523 /* 12524 * Stray Mailbox Interrupt, mbxCommand <cmd> 12525 * mbxStatus <status> 12526 */ 12527 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12528 LOG_SLI, 12529 "(%d):0304 Stray Mailbox " 12530 "Interrupt mbxCommand x%x " 12531 "mbxStatus x%x\n", 12532 (vport ? vport->vpi : 0), 12533 pmbox->mbxCommand, 12534 pmbox->mbxStatus); 12535 /* clear mailbox attention bit */ 12536 work_ha_copy &= ~HA_MBATT; 12537 } else { 12538 phba->sli.mbox_active = NULL; 12539 spin_unlock_irqrestore(&phba->hbalock, iflag); 12540 phba->last_completion_time = jiffies; 12541 del_timer(&phba->sli.mbox_tmo); 12542 if (pmb->mbox_cmpl) { 12543 lpfc_sli_pcimem_bcopy(mbox, pmbox, 12544 MAILBOX_CMD_SIZE); 12545 if (pmb->out_ext_byte_len && 12546 pmb->ctx_buf) 12547 lpfc_sli_pcimem_bcopy( 12548 phba->mbox_ext, 12549 pmb->ctx_buf, 12550 pmb->out_ext_byte_len); 12551 } 12552 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 12553 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 12554 12555 lpfc_debugfs_disc_trc(vport, 12556 LPFC_DISC_TRC_MBOX_VPORT, 12557 "MBOX dflt rpi: : " 12558 "status:x%x rpi:x%x", 12559 (uint32_t)pmbox->mbxStatus, 12560 pmbox->un.varWords[0], 0); 12561 12562 if (!pmbox->mbxStatus) { 12563 mp = (struct lpfc_dmabuf *) 12564 (pmb->ctx_buf); 12565 ndlp = (struct lpfc_nodelist *) 12566 pmb->ctx_ndlp; 12567 12568 /* Reg_LOGIN of dflt RPI was 12569 * successful. new lets get 12570 * rid of the RPI using the 12571 * same mbox buffer. 12572 */ 12573 lpfc_unreg_login(phba, 12574 vport->vpi, 12575 pmbox->un.varWords[0], 12576 pmb); 12577 pmb->mbox_cmpl = 12578 lpfc_mbx_cmpl_dflt_rpi; 12579 pmb->ctx_buf = mp; 12580 pmb->ctx_ndlp = ndlp; 12581 pmb->vport = vport; 12582 rc = lpfc_sli_issue_mbox(phba, 12583 pmb, 12584 MBX_NOWAIT); 12585 if (rc != MBX_BUSY) 12586 lpfc_printf_log(phba, 12587 KERN_ERR, 12588 LOG_MBOX | LOG_SLI, 12589 "0350 rc should have" 12590 "been MBX_BUSY\n"); 12591 if (rc != MBX_NOT_FINISHED) 12592 goto send_current_mbox; 12593 } 12594 } 12595 spin_lock_irqsave( 12596 &phba->pport->work_port_lock, 12597 iflag); 12598 phba->pport->work_port_events &= 12599 ~WORKER_MBOX_TMO; 12600 spin_unlock_irqrestore( 12601 &phba->pport->work_port_lock, 12602 iflag); 12603 lpfc_mbox_cmpl_put(phba, pmb); 12604 } 12605 } else 12606 spin_unlock_irqrestore(&phba->hbalock, iflag); 12607 12608 if ((work_ha_copy & HA_MBATT) && 12609 (phba->sli.mbox_active == NULL)) { 12610 send_current_mbox: 12611 /* Process next mailbox command if there is one */ 12612 do { 12613 rc = lpfc_sli_issue_mbox(phba, NULL, 12614 MBX_NOWAIT); 12615 } while (rc == MBX_NOT_FINISHED); 12616 if (rc != MBX_SUCCESS) 12617 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12618 LOG_SLI, "0349 rc should be " 12619 "MBX_SUCCESS\n"); 12620 } 12621 12622 spin_lock_irqsave(&phba->hbalock, iflag); 12623 phba->work_ha |= work_ha_copy; 12624 spin_unlock_irqrestore(&phba->hbalock, iflag); 12625 lpfc_worker_wake_up(phba); 12626 } 12627 return IRQ_HANDLED; 12628 unplug_error: 12629 spin_unlock_irqrestore(&phba->hbalock, iflag); 12630 return IRQ_HANDLED; 12631 12632 } /* lpfc_sli_sp_intr_handler */ 12633 12634 /** 12635 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 12636 * @irq: Interrupt number. 12637 * @dev_id: The device context pointer. 12638 * 12639 * This function is directly called from the PCI layer as an interrupt 12640 * service routine when device with SLI-3 interface spec is enabled with 12641 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 12642 * ring event in the HBA. However, when the device is enabled with either 12643 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12644 * device-level interrupt handler. When the PCI slot is in error recovery 12645 * or the HBA is undergoing initialization, the interrupt handler will not 12646 * process the interrupt. The SCSI FCP fast-path ring event are handled in 12647 * the intrrupt context. This function is called without any lock held. 12648 * It gets the hbalock to access and update SLI data structures. 12649 * 12650 * This function returns IRQ_HANDLED when interrupt is handled else it 12651 * returns IRQ_NONE. 12652 **/ 12653 irqreturn_t 12654 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 12655 { 12656 struct lpfc_hba *phba; 12657 uint32_t ha_copy; 12658 unsigned long status; 12659 unsigned long iflag; 12660 struct lpfc_sli_ring *pring; 12661 12662 /* Get the driver's phba structure from the dev_id and 12663 * assume the HBA is not interrupting. 12664 */ 12665 phba = (struct lpfc_hba *) dev_id; 12666 12667 if (unlikely(!phba)) 12668 return IRQ_NONE; 12669 12670 /* 12671 * Stuff needs to be attented to when this function is invoked as an 12672 * individual interrupt handler in MSI-X multi-message interrupt mode 12673 */ 12674 if (phba->intr_type == MSIX) { 12675 /* Check device state for handling interrupt */ 12676 if (lpfc_intr_state_check(phba)) 12677 return IRQ_NONE; 12678 /* Need to read HA REG for FCP ring and other ring events */ 12679 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12680 return IRQ_HANDLED; 12681 /* Clear up only attention source related to fast-path */ 12682 spin_lock_irqsave(&phba->hbalock, iflag); 12683 /* 12684 * If there is deferred error attention, do not check for 12685 * any interrupt. 12686 */ 12687 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12688 spin_unlock_irqrestore(&phba->hbalock, iflag); 12689 return IRQ_NONE; 12690 } 12691 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 12692 phba->HAregaddr); 12693 readl(phba->HAregaddr); /* flush */ 12694 spin_unlock_irqrestore(&phba->hbalock, iflag); 12695 } else 12696 ha_copy = phba->ha_copy; 12697 12698 /* 12699 * Process all events on FCP ring. Take the optimized path for FCP IO. 12700 */ 12701 ha_copy &= ~(phba->work_ha_mask); 12702 12703 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12704 status >>= (4*LPFC_FCP_RING); 12705 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 12706 if (status & HA_RXMASK) 12707 lpfc_sli_handle_fast_ring_event(phba, pring, status); 12708 12709 if (phba->cfg_multi_ring_support == 2) { 12710 /* 12711 * Process all events on extra ring. Take the optimized path 12712 * for extra ring IO. 12713 */ 12714 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12715 status >>= (4*LPFC_EXTRA_RING); 12716 if (status & HA_RXMASK) { 12717 lpfc_sli_handle_fast_ring_event(phba, 12718 &phba->sli.sli3_ring[LPFC_EXTRA_RING], 12719 status); 12720 } 12721 } 12722 return IRQ_HANDLED; 12723 } /* lpfc_sli_fp_intr_handler */ 12724 12725 /** 12726 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 12727 * @irq: Interrupt number. 12728 * @dev_id: The device context pointer. 12729 * 12730 * This function is the HBA device-level interrupt handler to device with 12731 * SLI-3 interface spec, called from the PCI layer when either MSI or 12732 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 12733 * requires driver attention. This function invokes the slow-path interrupt 12734 * attention handling function and fast-path interrupt attention handling 12735 * function in turn to process the relevant HBA attention events. This 12736 * function is called without any lock held. It gets the hbalock to access 12737 * and update SLI data structures. 12738 * 12739 * This function returns IRQ_HANDLED when interrupt is handled, else it 12740 * returns IRQ_NONE. 12741 **/ 12742 irqreturn_t 12743 lpfc_sli_intr_handler(int irq, void *dev_id) 12744 { 12745 struct lpfc_hba *phba; 12746 irqreturn_t sp_irq_rc, fp_irq_rc; 12747 unsigned long status1, status2; 12748 uint32_t hc_copy; 12749 12750 /* 12751 * Get the driver's phba structure from the dev_id and 12752 * assume the HBA is not interrupting. 12753 */ 12754 phba = (struct lpfc_hba *) dev_id; 12755 12756 if (unlikely(!phba)) 12757 return IRQ_NONE; 12758 12759 /* Check device state for handling interrupt */ 12760 if (lpfc_intr_state_check(phba)) 12761 return IRQ_NONE; 12762 12763 spin_lock(&phba->hbalock); 12764 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 12765 spin_unlock(&phba->hbalock); 12766 return IRQ_HANDLED; 12767 } 12768 12769 if (unlikely(!phba->ha_copy)) { 12770 spin_unlock(&phba->hbalock); 12771 return IRQ_NONE; 12772 } else if (phba->ha_copy & HA_ERATT) { 12773 if (phba->hba_flag & HBA_ERATT_HANDLED) 12774 /* ERATT polling has handled ERATT */ 12775 phba->ha_copy &= ~HA_ERATT; 12776 else 12777 /* Indicate interrupt handler handles ERATT */ 12778 phba->hba_flag |= HBA_ERATT_HANDLED; 12779 } 12780 12781 /* 12782 * If there is deferred error attention, do not check for any interrupt. 12783 */ 12784 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12785 spin_unlock(&phba->hbalock); 12786 return IRQ_NONE; 12787 } 12788 12789 /* Clear attention sources except link and error attentions */ 12790 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 12791 spin_unlock(&phba->hbalock); 12792 return IRQ_HANDLED; 12793 } 12794 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 12795 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 12796 phba->HCregaddr); 12797 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 12798 writel(hc_copy, phba->HCregaddr); 12799 readl(phba->HAregaddr); /* flush */ 12800 spin_unlock(&phba->hbalock); 12801 12802 /* 12803 * Invokes slow-path host attention interrupt handling as appropriate. 12804 */ 12805 12806 /* status of events with mailbox and link attention */ 12807 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 12808 12809 /* status of events with ELS ring */ 12810 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 12811 status2 >>= (4*LPFC_ELS_RING); 12812 12813 if (status1 || (status2 & HA_RXMASK)) 12814 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 12815 else 12816 sp_irq_rc = IRQ_NONE; 12817 12818 /* 12819 * Invoke fast-path host attention interrupt handling as appropriate. 12820 */ 12821 12822 /* status of events with FCP ring */ 12823 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12824 status1 >>= (4*LPFC_FCP_RING); 12825 12826 /* status of events with extra ring */ 12827 if (phba->cfg_multi_ring_support == 2) { 12828 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12829 status2 >>= (4*LPFC_EXTRA_RING); 12830 } else 12831 status2 = 0; 12832 12833 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 12834 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 12835 else 12836 fp_irq_rc = IRQ_NONE; 12837 12838 /* Return device-level interrupt handling status */ 12839 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 12840 } /* lpfc_sli_intr_handler */ 12841 12842 /** 12843 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 12844 * @phba: pointer to lpfc hba data structure. 12845 * 12846 * This routine is invoked by the worker thread to process all the pending 12847 * SLI4 els abort xri events. 12848 **/ 12849 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 12850 { 12851 struct lpfc_cq_event *cq_event; 12852 12853 /* First, declare the els xri abort event has been handled */ 12854 spin_lock_irq(&phba->hbalock); 12855 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 12856 spin_unlock_irq(&phba->hbalock); 12857 /* Now, handle all the els xri abort events */ 12858 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 12859 /* Get the first event from the head of the event queue */ 12860 spin_lock_irq(&phba->hbalock); 12861 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 12862 cq_event, struct lpfc_cq_event, list); 12863 spin_unlock_irq(&phba->hbalock); 12864 /* Notify aborted XRI for ELS work queue */ 12865 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12866 /* Free the event processed back to the free pool */ 12867 lpfc_sli4_cq_event_release(phba, cq_event); 12868 } 12869 } 12870 12871 /** 12872 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 12873 * @phba: pointer to lpfc hba data structure 12874 * @pIocbIn: pointer to the rspiocbq 12875 * @pIocbOut: pointer to the cmdiocbq 12876 * @wcqe: pointer to the complete wcqe 12877 * 12878 * This routine transfers the fields of a command iocbq to a response iocbq 12879 * by copying all the IOCB fields from command iocbq and transferring the 12880 * completion status information from the complete wcqe. 12881 **/ 12882 static void 12883 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 12884 struct lpfc_iocbq *pIocbIn, 12885 struct lpfc_iocbq *pIocbOut, 12886 struct lpfc_wcqe_complete *wcqe) 12887 { 12888 int numBdes, i; 12889 unsigned long iflags; 12890 uint32_t status, max_response; 12891 struct lpfc_dmabuf *dmabuf; 12892 struct ulp_bde64 *bpl, bde; 12893 size_t offset = offsetof(struct lpfc_iocbq, iocb); 12894 12895 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 12896 sizeof(struct lpfc_iocbq) - offset); 12897 /* Map WCQE parameters into irspiocb parameters */ 12898 status = bf_get(lpfc_wcqe_c_status, wcqe); 12899 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 12900 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 12901 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 12902 pIocbIn->iocb.un.fcpi.fcpi_parm = 12903 pIocbOut->iocb.un.fcpi.fcpi_parm - 12904 wcqe->total_data_placed; 12905 else 12906 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12907 else { 12908 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12909 switch (pIocbOut->iocb.ulpCommand) { 12910 case CMD_ELS_REQUEST64_CR: 12911 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12912 bpl = (struct ulp_bde64 *)dmabuf->virt; 12913 bde.tus.w = le32_to_cpu(bpl[1].tus.w); 12914 max_response = bde.tus.f.bdeSize; 12915 break; 12916 case CMD_GEN_REQUEST64_CR: 12917 max_response = 0; 12918 if (!pIocbOut->context3) 12919 break; 12920 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 12921 sizeof(struct ulp_bde64); 12922 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12923 bpl = (struct ulp_bde64 *)dmabuf->virt; 12924 for (i = 0; i < numBdes; i++) { 12925 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 12926 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 12927 max_response += bde.tus.f.bdeSize; 12928 } 12929 break; 12930 default: 12931 max_response = wcqe->total_data_placed; 12932 break; 12933 } 12934 if (max_response < wcqe->total_data_placed) 12935 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 12936 else 12937 pIocbIn->iocb.un.genreq64.bdl.bdeSize = 12938 wcqe->total_data_placed; 12939 } 12940 12941 /* Convert BG errors for completion status */ 12942 if (status == CQE_STATUS_DI_ERROR) { 12943 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 12944 12945 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 12946 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 12947 else 12948 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 12949 12950 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 12951 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 12952 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12953 BGS_GUARD_ERR_MASK; 12954 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 12955 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12956 BGS_APPTAG_ERR_MASK; 12957 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 12958 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12959 BGS_REFTAG_ERR_MASK; 12960 12961 /* Check to see if there was any good data before the error */ 12962 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 12963 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12964 BGS_HI_WATER_MARK_PRESENT_MASK; 12965 pIocbIn->iocb.unsli3.sli3_bg.bghm = 12966 wcqe->total_data_placed; 12967 } 12968 12969 /* 12970 * Set ALL the error bits to indicate we don't know what 12971 * type of error it is. 12972 */ 12973 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 12974 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12975 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 12976 BGS_GUARD_ERR_MASK); 12977 } 12978 12979 /* Pick up HBA exchange busy condition */ 12980 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 12981 spin_lock_irqsave(&phba->hbalock, iflags); 12982 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 12983 spin_unlock_irqrestore(&phba->hbalock, iflags); 12984 } 12985 } 12986 12987 /** 12988 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 12989 * @phba: Pointer to HBA context object. 12990 * @wcqe: Pointer to work-queue completion queue entry. 12991 * 12992 * This routine handles an ELS work-queue completion event and construct 12993 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 12994 * discovery engine to handle. 12995 * 12996 * Return: Pointer to the receive IOCBQ, NULL otherwise. 12997 **/ 12998 static struct lpfc_iocbq * 12999 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 13000 struct lpfc_iocbq *irspiocbq) 13001 { 13002 struct lpfc_sli_ring *pring; 13003 struct lpfc_iocbq *cmdiocbq; 13004 struct lpfc_wcqe_complete *wcqe; 13005 unsigned long iflags; 13006 13007 pring = lpfc_phba_elsring(phba); 13008 if (unlikely(!pring)) 13009 return NULL; 13010 13011 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 13012 pring->stats.iocb_event++; 13013 /* Look up the ELS command IOCB and create pseudo response IOCB */ 13014 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13015 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13016 if (unlikely(!cmdiocbq)) { 13017 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13018 "0386 ELS complete with no corresponding " 13019 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", 13020 wcqe->word0, wcqe->total_data_placed, 13021 wcqe->parameter, wcqe->word3); 13022 lpfc_sli_release_iocbq(phba, irspiocbq); 13023 return NULL; 13024 } 13025 13026 spin_lock_irqsave(&pring->ring_lock, iflags); 13027 /* Put the iocb back on the txcmplq */ 13028 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); 13029 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13030 13031 /* Fake the irspiocbq and copy necessary response information */ 13032 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 13033 13034 return irspiocbq; 13035 } 13036 13037 inline struct lpfc_cq_event * 13038 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) 13039 { 13040 struct lpfc_cq_event *cq_event; 13041 13042 /* Allocate a new internal CQ_EVENT entry */ 13043 cq_event = lpfc_sli4_cq_event_alloc(phba); 13044 if (!cq_event) { 13045 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13046 "0602 Failed to alloc CQ_EVENT entry\n"); 13047 return NULL; 13048 } 13049 13050 /* Move the CQE into the event */ 13051 memcpy(&cq_event->cqe, entry, size); 13052 return cq_event; 13053 } 13054 13055 /** 13056 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 13057 * @phba: Pointer to HBA context object. 13058 * @cqe: Pointer to mailbox completion queue entry. 13059 * 13060 * This routine process a mailbox completion queue entry with asynchrous 13061 * event. 13062 * 13063 * Return: true if work posted to worker thread, otherwise false. 13064 **/ 13065 static bool 13066 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13067 { 13068 struct lpfc_cq_event *cq_event; 13069 unsigned long iflags; 13070 13071 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13072 "0392 Async Event: word0:x%x, word1:x%x, " 13073 "word2:x%x, word3:x%x\n", mcqe->word0, 13074 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 13075 13076 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); 13077 if (!cq_event) 13078 return false; 13079 spin_lock_irqsave(&phba->hbalock, iflags); 13080 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 13081 /* Set the async event flag */ 13082 phba->hba_flag |= ASYNC_EVENT; 13083 spin_unlock_irqrestore(&phba->hbalock, iflags); 13084 13085 return true; 13086 } 13087 13088 /** 13089 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 13090 * @phba: Pointer to HBA context object. 13091 * @cqe: Pointer to mailbox completion queue entry. 13092 * 13093 * This routine process a mailbox completion queue entry with mailbox 13094 * completion event. 13095 * 13096 * Return: true if work posted to worker thread, otherwise false. 13097 **/ 13098 static bool 13099 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13100 { 13101 uint32_t mcqe_status; 13102 MAILBOX_t *mbox, *pmbox; 13103 struct lpfc_mqe *mqe; 13104 struct lpfc_vport *vport; 13105 struct lpfc_nodelist *ndlp; 13106 struct lpfc_dmabuf *mp; 13107 unsigned long iflags; 13108 LPFC_MBOXQ_t *pmb; 13109 bool workposted = false; 13110 int rc; 13111 13112 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 13113 if (!bf_get(lpfc_trailer_completed, mcqe)) 13114 goto out_no_mqe_complete; 13115 13116 /* Get the reference to the active mbox command */ 13117 spin_lock_irqsave(&phba->hbalock, iflags); 13118 pmb = phba->sli.mbox_active; 13119 if (unlikely(!pmb)) { 13120 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13121 "1832 No pending MBOX command to handle\n"); 13122 spin_unlock_irqrestore(&phba->hbalock, iflags); 13123 goto out_no_mqe_complete; 13124 } 13125 spin_unlock_irqrestore(&phba->hbalock, iflags); 13126 mqe = &pmb->u.mqe; 13127 pmbox = (MAILBOX_t *)&pmb->u.mqe; 13128 mbox = phba->mbox; 13129 vport = pmb->vport; 13130 13131 /* Reset heartbeat timer */ 13132 phba->last_completion_time = jiffies; 13133 del_timer(&phba->sli.mbox_tmo); 13134 13135 /* Move mbox data to caller's mailbox region, do endian swapping */ 13136 if (pmb->mbox_cmpl && mbox) 13137 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 13138 13139 /* 13140 * For mcqe errors, conditionally move a modified error code to 13141 * the mbox so that the error will not be missed. 13142 */ 13143 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 13144 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 13145 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 13146 bf_set(lpfc_mqe_status, mqe, 13147 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 13148 } 13149 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 13150 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 13151 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 13152 "MBOX dflt rpi: status:x%x rpi:x%x", 13153 mcqe_status, 13154 pmbox->un.varWords[0], 0); 13155 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 13156 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 13157 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 13158 /* Reg_LOGIN of dflt RPI was successful. Now lets get 13159 * RID of the PPI using the same mbox buffer. 13160 */ 13161 lpfc_unreg_login(phba, vport->vpi, 13162 pmbox->un.varWords[0], pmb); 13163 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 13164 pmb->ctx_buf = mp; 13165 pmb->ctx_ndlp = ndlp; 13166 pmb->vport = vport; 13167 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 13168 if (rc != MBX_BUSY) 13169 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 13170 LOG_SLI, "0385 rc should " 13171 "have been MBX_BUSY\n"); 13172 if (rc != MBX_NOT_FINISHED) 13173 goto send_current_mbox; 13174 } 13175 } 13176 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 13177 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 13178 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 13179 13180 /* There is mailbox completion work to do */ 13181 spin_lock_irqsave(&phba->hbalock, iflags); 13182 __lpfc_mbox_cmpl_put(phba, pmb); 13183 phba->work_ha |= HA_MBATT; 13184 spin_unlock_irqrestore(&phba->hbalock, iflags); 13185 workposted = true; 13186 13187 send_current_mbox: 13188 spin_lock_irqsave(&phba->hbalock, iflags); 13189 /* Release the mailbox command posting token */ 13190 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13191 /* Setting active mailbox pointer need to be in sync to flag clear */ 13192 phba->sli.mbox_active = NULL; 13193 spin_unlock_irqrestore(&phba->hbalock, iflags); 13194 /* Wake up worker thread to post the next pending mailbox command */ 13195 lpfc_worker_wake_up(phba); 13196 out_no_mqe_complete: 13197 if (bf_get(lpfc_trailer_consumed, mcqe)) 13198 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 13199 return workposted; 13200 } 13201 13202 /** 13203 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 13204 * @phba: Pointer to HBA context object. 13205 * @cqe: Pointer to mailbox completion queue entry. 13206 * 13207 * This routine process a mailbox completion queue entry, it invokes the 13208 * proper mailbox complete handling or asynchrous event handling routine 13209 * according to the MCQE's async bit. 13210 * 13211 * Return: true if work posted to worker thread, otherwise false. 13212 **/ 13213 static bool 13214 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13215 struct lpfc_cqe *cqe) 13216 { 13217 struct lpfc_mcqe mcqe; 13218 bool workposted; 13219 13220 cq->CQ_mbox++; 13221 13222 /* Copy the mailbox MCQE and convert endian order as needed */ 13223 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 13224 13225 /* Invoke the proper event handling routine */ 13226 if (!bf_get(lpfc_trailer_async, &mcqe)) 13227 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 13228 else 13229 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 13230 return workposted; 13231 } 13232 13233 /** 13234 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 13235 * @phba: Pointer to HBA context object. 13236 * @cq: Pointer to associated CQ 13237 * @wcqe: Pointer to work-queue completion queue entry. 13238 * 13239 * This routine handles an ELS work-queue completion event. 13240 * 13241 * Return: true if work posted to worker thread, otherwise false. 13242 **/ 13243 static bool 13244 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13245 struct lpfc_wcqe_complete *wcqe) 13246 { 13247 struct lpfc_iocbq *irspiocbq; 13248 unsigned long iflags; 13249 struct lpfc_sli_ring *pring = cq->pring; 13250 int txq_cnt = 0; 13251 int txcmplq_cnt = 0; 13252 int fcp_txcmplq_cnt = 0; 13253 13254 /* Check for response status */ 13255 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13256 /* Log the error status */ 13257 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13258 "0357 ELS CQE error: status=x%x: " 13259 "CQE: %08x %08x %08x %08x\n", 13260 bf_get(lpfc_wcqe_c_status, wcqe), 13261 wcqe->word0, wcqe->total_data_placed, 13262 wcqe->parameter, wcqe->word3); 13263 } 13264 13265 /* Get an irspiocbq for later ELS response processing use */ 13266 irspiocbq = lpfc_sli_get_iocbq(phba); 13267 if (!irspiocbq) { 13268 if (!list_empty(&pring->txq)) 13269 txq_cnt++; 13270 if (!list_empty(&pring->txcmplq)) 13271 txcmplq_cnt++; 13272 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13273 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 13274 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 13275 txq_cnt, phba->iocb_cnt, 13276 fcp_txcmplq_cnt, 13277 txcmplq_cnt); 13278 return false; 13279 } 13280 13281 /* Save off the slow-path queue event for work thread to process */ 13282 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 13283 spin_lock_irqsave(&phba->hbalock, iflags); 13284 list_add_tail(&irspiocbq->cq_event.list, 13285 &phba->sli4_hba.sp_queue_event); 13286 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13287 spin_unlock_irqrestore(&phba->hbalock, iflags); 13288 13289 return true; 13290 } 13291 13292 /** 13293 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 13294 * @phba: Pointer to HBA context object. 13295 * @wcqe: Pointer to work-queue completion queue entry. 13296 * 13297 * This routine handles slow-path WQ entry consumed event by invoking the 13298 * proper WQ release routine to the slow-path WQ. 13299 **/ 13300 static void 13301 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 13302 struct lpfc_wcqe_release *wcqe) 13303 { 13304 /* sanity check on queue memory */ 13305 if (unlikely(!phba->sli4_hba.els_wq)) 13306 return; 13307 /* Check for the slow-path ELS work queue */ 13308 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 13309 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 13310 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13311 else 13312 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13313 "2579 Slow-path wqe consume event carries " 13314 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 13315 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 13316 phba->sli4_hba.els_wq->queue_id); 13317 } 13318 13319 /** 13320 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 13321 * @phba: Pointer to HBA context object. 13322 * @cq: Pointer to a WQ completion queue. 13323 * @wcqe: Pointer to work-queue completion queue entry. 13324 * 13325 * This routine handles an XRI abort event. 13326 * 13327 * Return: true if work posted to worker thread, otherwise false. 13328 **/ 13329 static bool 13330 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 13331 struct lpfc_queue *cq, 13332 struct sli4_wcqe_xri_aborted *wcqe) 13333 { 13334 bool workposted = false; 13335 struct lpfc_cq_event *cq_event; 13336 unsigned long iflags; 13337 13338 switch (cq->subtype) { 13339 case LPFC_FCP: 13340 lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq); 13341 workposted = false; 13342 break; 13343 case LPFC_NVME_LS: /* NVME LS uses ELS resources */ 13344 case LPFC_ELS: 13345 cq_event = lpfc_cq_event_setup( 13346 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 13347 if (!cq_event) 13348 return false; 13349 cq_event->hdwq = cq->hdwq; 13350 spin_lock_irqsave(&phba->hbalock, iflags); 13351 list_add_tail(&cq_event->list, 13352 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 13353 /* Set the els xri abort event flag */ 13354 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 13355 spin_unlock_irqrestore(&phba->hbalock, iflags); 13356 workposted = true; 13357 break; 13358 case LPFC_NVME: 13359 /* Notify aborted XRI for NVME work queue */ 13360 if (phba->nvmet_support) 13361 lpfc_sli4_nvmet_xri_aborted(phba, wcqe); 13362 else 13363 lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq); 13364 13365 workposted = false; 13366 break; 13367 default: 13368 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13369 "0603 Invalid CQ subtype %d: " 13370 "%08x %08x %08x %08x\n", 13371 cq->subtype, wcqe->word0, wcqe->parameter, 13372 wcqe->word2, wcqe->word3); 13373 workposted = false; 13374 break; 13375 } 13376 return workposted; 13377 } 13378 13379 #define FC_RCTL_MDS_DIAGS 0xF4 13380 13381 /** 13382 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 13383 * @phba: Pointer to HBA context object. 13384 * @rcqe: Pointer to receive-queue completion queue entry. 13385 * 13386 * This routine process a receive-queue completion queue entry. 13387 * 13388 * Return: true if work posted to worker thread, otherwise false. 13389 **/ 13390 static bool 13391 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 13392 { 13393 bool workposted = false; 13394 struct fc_frame_header *fc_hdr; 13395 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 13396 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 13397 struct lpfc_nvmet_tgtport *tgtp; 13398 struct hbq_dmabuf *dma_buf; 13399 uint32_t status, rq_id; 13400 unsigned long iflags; 13401 13402 /* sanity check on queue memory */ 13403 if (unlikely(!hrq) || unlikely(!drq)) 13404 return workposted; 13405 13406 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13407 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13408 else 13409 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13410 if (rq_id != hrq->queue_id) 13411 goto out; 13412 13413 status = bf_get(lpfc_rcqe_status, rcqe); 13414 switch (status) { 13415 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13416 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13417 "2537 Receive Frame Truncated!!\n"); 13418 /* fall through */ 13419 case FC_STATUS_RQ_SUCCESS: 13420 spin_lock_irqsave(&phba->hbalock, iflags); 13421 lpfc_sli4_rq_release(hrq, drq); 13422 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 13423 if (!dma_buf) { 13424 hrq->RQ_no_buf_found++; 13425 spin_unlock_irqrestore(&phba->hbalock, iflags); 13426 goto out; 13427 } 13428 hrq->RQ_rcv_buf++; 13429 hrq->RQ_buf_posted--; 13430 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 13431 13432 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13433 13434 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 13435 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 13436 spin_unlock_irqrestore(&phba->hbalock, iflags); 13437 /* Handle MDS Loopback frames */ 13438 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf); 13439 break; 13440 } 13441 13442 /* save off the frame for the work thread to process */ 13443 list_add_tail(&dma_buf->cq_event.list, 13444 &phba->sli4_hba.sp_queue_event); 13445 /* Frame received */ 13446 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13447 spin_unlock_irqrestore(&phba->hbalock, iflags); 13448 workposted = true; 13449 break; 13450 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13451 if (phba->nvmet_support) { 13452 tgtp = phba->targetport->private; 13453 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13454 "6402 RQE Error x%x, posted %d err_cnt " 13455 "%d: %x %x %x\n", 13456 status, hrq->RQ_buf_posted, 13457 hrq->RQ_no_posted_buf, 13458 atomic_read(&tgtp->rcv_fcp_cmd_in), 13459 atomic_read(&tgtp->rcv_fcp_cmd_out), 13460 atomic_read(&tgtp->xmt_fcp_release)); 13461 } 13462 /* fallthrough */ 13463 13464 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13465 hrq->RQ_no_posted_buf++; 13466 /* Post more buffers if possible */ 13467 spin_lock_irqsave(&phba->hbalock, iflags); 13468 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 13469 spin_unlock_irqrestore(&phba->hbalock, iflags); 13470 workposted = true; 13471 break; 13472 } 13473 out: 13474 return workposted; 13475 } 13476 13477 /** 13478 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 13479 * @phba: Pointer to HBA context object. 13480 * @cq: Pointer to the completion queue. 13481 * @cqe: Pointer to a completion queue entry. 13482 * 13483 * This routine process a slow-path work-queue or receive queue completion queue 13484 * entry. 13485 * 13486 * Return: true if work posted to worker thread, otherwise false. 13487 **/ 13488 static bool 13489 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13490 struct lpfc_cqe *cqe) 13491 { 13492 struct lpfc_cqe cqevt; 13493 bool workposted = false; 13494 13495 /* Copy the work queue CQE and convert endian order if needed */ 13496 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 13497 13498 /* Check and process for different type of WCQE and dispatch */ 13499 switch (bf_get(lpfc_cqe_code, &cqevt)) { 13500 case CQE_CODE_COMPL_WQE: 13501 /* Process the WQ/RQ complete event */ 13502 phba->last_completion_time = jiffies; 13503 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 13504 (struct lpfc_wcqe_complete *)&cqevt); 13505 break; 13506 case CQE_CODE_RELEASE_WQE: 13507 /* Process the WQ release event */ 13508 lpfc_sli4_sp_handle_rel_wcqe(phba, 13509 (struct lpfc_wcqe_release *)&cqevt); 13510 break; 13511 case CQE_CODE_XRI_ABORTED: 13512 /* Process the WQ XRI abort event */ 13513 phba->last_completion_time = jiffies; 13514 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13515 (struct sli4_wcqe_xri_aborted *)&cqevt); 13516 break; 13517 case CQE_CODE_RECEIVE: 13518 case CQE_CODE_RECEIVE_V1: 13519 /* Process the RQ event */ 13520 phba->last_completion_time = jiffies; 13521 workposted = lpfc_sli4_sp_handle_rcqe(phba, 13522 (struct lpfc_rcqe *)&cqevt); 13523 break; 13524 default: 13525 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13526 "0388 Not a valid WCQE code: x%x\n", 13527 bf_get(lpfc_cqe_code, &cqevt)); 13528 break; 13529 } 13530 return workposted; 13531 } 13532 13533 /** 13534 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 13535 * @phba: Pointer to HBA context object. 13536 * @eqe: Pointer to fast-path event queue entry. 13537 * 13538 * This routine process a event queue entry from the slow-path event queue. 13539 * It will check the MajorCode and MinorCode to determine this is for a 13540 * completion event on a completion queue, if not, an error shall be logged 13541 * and just return. Otherwise, it will get to the corresponding completion 13542 * queue and process all the entries on that completion queue, rearm the 13543 * completion queue, and then return. 13544 * 13545 **/ 13546 static void 13547 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13548 struct lpfc_queue *speq) 13549 { 13550 struct lpfc_queue *cq = NULL, *childq; 13551 uint16_t cqid; 13552 13553 /* Get the reference to the corresponding CQ */ 13554 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13555 13556 list_for_each_entry(childq, &speq->child_list, list) { 13557 if (childq->queue_id == cqid) { 13558 cq = childq; 13559 break; 13560 } 13561 } 13562 if (unlikely(!cq)) { 13563 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13564 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13565 "0365 Slow-path CQ identifier " 13566 "(%d) does not exist\n", cqid); 13567 return; 13568 } 13569 13570 /* Save EQ associated with this CQ */ 13571 cq->assoc_qp = speq; 13572 13573 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork)) 13574 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13575 "0390 Cannot schedule soft IRQ " 13576 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13577 cqid, cq->queue_id, raw_smp_processor_id()); 13578 } 13579 13580 /** 13581 * __lpfc_sli4_process_cq - Process elements of a CQ 13582 * @phba: Pointer to HBA context object. 13583 * @cq: Pointer to CQ to be processed 13584 * @handler: Routine to process each cqe 13585 * @delay: Pointer to usdelay to set in case of rescheduling of the handler 13586 * 13587 * This routine processes completion queue entries in a CQ. While a valid 13588 * queue element is found, the handler is called. During processing checks 13589 * are made for periodic doorbell writes to let the hardware know of 13590 * element consumption. 13591 * 13592 * If the max limit on cqes to process is hit, or there are no more valid 13593 * entries, the loop stops. If we processed a sufficient number of elements, 13594 * meaning there is sufficient load, rather than rearming and generating 13595 * another interrupt, a cq rescheduling delay will be set. A delay of 0 13596 * indicates no rescheduling. 13597 * 13598 * Returns True if work scheduled, False otherwise. 13599 **/ 13600 static bool 13601 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, 13602 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, 13603 struct lpfc_cqe *), unsigned long *delay) 13604 { 13605 struct lpfc_cqe *cqe; 13606 bool workposted = false; 13607 int count = 0, consumed = 0; 13608 bool arm = true; 13609 13610 /* default - no reschedule */ 13611 *delay = 0; 13612 13613 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) 13614 goto rearm_and_exit; 13615 13616 /* Process all the entries to the CQ */ 13617 cq->q_flag = 0; 13618 cqe = lpfc_sli4_cq_get(cq); 13619 while (cqe) { 13620 workposted |= handler(phba, cq, cqe); 13621 __lpfc_sli4_consume_cqe(phba, cq, cqe); 13622 13623 consumed++; 13624 if (!(++count % cq->max_proc_limit)) 13625 break; 13626 13627 if (!(count % cq->notify_interval)) { 13628 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 13629 LPFC_QUEUE_NOARM); 13630 consumed = 0; 13631 } 13632 13633 if (count == LPFC_NVMET_CQ_NOTIFY) 13634 cq->q_flag |= HBA_NVMET_CQ_NOTIFY; 13635 13636 cqe = lpfc_sli4_cq_get(cq); 13637 } 13638 if (count >= phba->cfg_cq_poll_threshold) { 13639 *delay = 1; 13640 arm = false; 13641 } 13642 13643 /* Track the max number of CQEs processed in 1 EQ */ 13644 if (count > cq->CQ_max_cqe) 13645 cq->CQ_max_cqe = count; 13646 13647 cq->assoc_qp->EQ_cqe_cnt += count; 13648 13649 /* Catch the no cq entry condition */ 13650 if (unlikely(count == 0)) 13651 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13652 "0369 No entry from completion queue " 13653 "qid=%d\n", cq->queue_id); 13654 13655 cq->queue_claimed = 0; 13656 13657 rearm_and_exit: 13658 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 13659 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM); 13660 13661 return workposted; 13662 } 13663 13664 /** 13665 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry 13666 * @cq: pointer to CQ to process 13667 * 13668 * This routine calls the cq processing routine with a handler specific 13669 * to the type of queue bound to it. 13670 * 13671 * The CQ routine returns two values: the first is the calling status, 13672 * which indicates whether work was queued to the background discovery 13673 * thread. If true, the routine should wakeup the discovery thread; 13674 * the second is the delay parameter. If non-zero, rather than rearming 13675 * the CQ and yet another interrupt, the CQ handler should be queued so 13676 * that it is processed in a subsequent polling action. The value of 13677 * the delay indicates when to reschedule it. 13678 **/ 13679 static void 13680 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) 13681 { 13682 struct lpfc_hba *phba = cq->phba; 13683 unsigned long delay; 13684 bool workposted = false; 13685 13686 /* Process and rearm the CQ */ 13687 switch (cq->type) { 13688 case LPFC_MCQ: 13689 workposted |= __lpfc_sli4_process_cq(phba, cq, 13690 lpfc_sli4_sp_handle_mcqe, 13691 &delay); 13692 break; 13693 case LPFC_WCQ: 13694 if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME) 13695 workposted |= __lpfc_sli4_process_cq(phba, cq, 13696 lpfc_sli4_fp_handle_cqe, 13697 &delay); 13698 else 13699 workposted |= __lpfc_sli4_process_cq(phba, cq, 13700 lpfc_sli4_sp_handle_cqe, 13701 &delay); 13702 break; 13703 default: 13704 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13705 "0370 Invalid completion queue type (%d)\n", 13706 cq->type); 13707 return; 13708 } 13709 13710 if (delay) { 13711 if (!queue_delayed_work_on(cq->chann, phba->wq, 13712 &cq->sched_spwork, delay)) 13713 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13714 "0394 Cannot schedule soft IRQ " 13715 "for cqid=%d on CPU %d\n", 13716 cq->queue_id, cq->chann); 13717 } 13718 13719 /* wake up worker thread if there are works to be done */ 13720 if (workposted) 13721 lpfc_worker_wake_up(phba); 13722 } 13723 13724 /** 13725 * lpfc_sli4_sp_process_cq - slow-path work handler when started by 13726 * interrupt 13727 * @work: pointer to work element 13728 * 13729 * translates from the work handler and calls the slow-path handler. 13730 **/ 13731 static void 13732 lpfc_sli4_sp_process_cq(struct work_struct *work) 13733 { 13734 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); 13735 13736 __lpfc_sli4_sp_process_cq(cq); 13737 } 13738 13739 /** 13740 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer 13741 * @work: pointer to work element 13742 * 13743 * translates from the work handler and calls the slow-path handler. 13744 **/ 13745 static void 13746 lpfc_sli4_dly_sp_process_cq(struct work_struct *work) 13747 { 13748 struct lpfc_queue *cq = container_of(to_delayed_work(work), 13749 struct lpfc_queue, sched_spwork); 13750 13751 __lpfc_sli4_sp_process_cq(cq); 13752 } 13753 13754 /** 13755 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 13756 * @phba: Pointer to HBA context object. 13757 * @cq: Pointer to associated CQ 13758 * @wcqe: Pointer to work-queue completion queue entry. 13759 * 13760 * This routine process a fast-path work queue completion entry from fast-path 13761 * event queue for FCP command response completion. 13762 **/ 13763 static void 13764 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13765 struct lpfc_wcqe_complete *wcqe) 13766 { 13767 struct lpfc_sli_ring *pring = cq->pring; 13768 struct lpfc_iocbq *cmdiocbq; 13769 struct lpfc_iocbq irspiocbq; 13770 unsigned long iflags; 13771 13772 /* Check for response status */ 13773 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13774 /* If resource errors reported from HBA, reduce queue 13775 * depth of the SCSI device. 13776 */ 13777 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 13778 IOSTAT_LOCAL_REJECT)) && 13779 ((wcqe->parameter & IOERR_PARAM_MASK) == 13780 IOERR_NO_RESOURCES)) 13781 phba->lpfc_rampdown_queue_depth(phba); 13782 13783 /* Log the error status */ 13784 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13785 "0373 FCP CQE error: status=x%x: " 13786 "CQE: %08x %08x %08x %08x\n", 13787 bf_get(lpfc_wcqe_c_status, wcqe), 13788 wcqe->word0, wcqe->total_data_placed, 13789 wcqe->parameter, wcqe->word3); 13790 } 13791 13792 /* Look up the FCP command IOCB and create pseudo response IOCB */ 13793 spin_lock_irqsave(&pring->ring_lock, iflags); 13794 pring->stats.iocb_event++; 13795 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13796 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13797 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13798 if (unlikely(!cmdiocbq)) { 13799 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13800 "0374 FCP complete with no corresponding " 13801 "cmdiocb: iotag (%d)\n", 13802 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13803 return; 13804 } 13805 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13806 cmdiocbq->isr_timestamp = cq->isr_timestamp; 13807 #endif 13808 if (cmdiocbq->iocb_cmpl == NULL) { 13809 if (cmdiocbq->wqe_cmpl) { 13810 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13811 spin_lock_irqsave(&phba->hbalock, iflags); 13812 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13813 spin_unlock_irqrestore(&phba->hbalock, iflags); 13814 } 13815 13816 /* Pass the cmd_iocb and the wcqe to the upper layer */ 13817 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe); 13818 return; 13819 } 13820 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13821 "0375 FCP cmdiocb not callback function " 13822 "iotag: (%d)\n", 13823 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13824 return; 13825 } 13826 13827 /* Fake the irspiocb and copy necessary response information */ 13828 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 13829 13830 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13831 spin_lock_irqsave(&phba->hbalock, iflags); 13832 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13833 spin_unlock_irqrestore(&phba->hbalock, iflags); 13834 } 13835 13836 /* Pass the cmd_iocb and the rsp state to the upper layer */ 13837 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 13838 } 13839 13840 /** 13841 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 13842 * @phba: Pointer to HBA context object. 13843 * @cq: Pointer to completion queue. 13844 * @wcqe: Pointer to work-queue completion queue entry. 13845 * 13846 * This routine handles an fast-path WQ entry consumed event by invoking the 13847 * proper WQ release routine to the slow-path WQ. 13848 **/ 13849 static void 13850 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13851 struct lpfc_wcqe_release *wcqe) 13852 { 13853 struct lpfc_queue *childwq; 13854 bool wqid_matched = false; 13855 uint16_t hba_wqid; 13856 13857 /* Check for fast-path FCP work queue release */ 13858 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 13859 list_for_each_entry(childwq, &cq->child_list, list) { 13860 if (childwq->queue_id == hba_wqid) { 13861 lpfc_sli4_wq_release(childwq, 13862 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13863 if (childwq->q_flag & HBA_NVMET_WQFULL) 13864 lpfc_nvmet_wqfull_process(phba, childwq); 13865 wqid_matched = true; 13866 break; 13867 } 13868 } 13869 /* Report warning log message if no match found */ 13870 if (wqid_matched != true) 13871 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13872 "2580 Fast-path wqe consume event carries " 13873 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); 13874 } 13875 13876 /** 13877 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry 13878 * @phba: Pointer to HBA context object. 13879 * @rcqe: Pointer to receive-queue completion queue entry. 13880 * 13881 * This routine process a receive-queue completion queue entry. 13882 * 13883 * Return: true if work posted to worker thread, otherwise false. 13884 **/ 13885 static bool 13886 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13887 struct lpfc_rcqe *rcqe) 13888 { 13889 bool workposted = false; 13890 struct lpfc_queue *hrq; 13891 struct lpfc_queue *drq; 13892 struct rqb_dmabuf *dma_buf; 13893 struct fc_frame_header *fc_hdr; 13894 struct lpfc_nvmet_tgtport *tgtp; 13895 uint32_t status, rq_id; 13896 unsigned long iflags; 13897 uint32_t fctl, idx; 13898 13899 if ((phba->nvmet_support == 0) || 13900 (phba->sli4_hba.nvmet_cqset == NULL)) 13901 return workposted; 13902 13903 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; 13904 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; 13905 drq = phba->sli4_hba.nvmet_mrq_data[idx]; 13906 13907 /* sanity check on queue memory */ 13908 if (unlikely(!hrq) || unlikely(!drq)) 13909 return workposted; 13910 13911 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13912 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13913 else 13914 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13915 13916 if ((phba->nvmet_support == 0) || 13917 (rq_id != hrq->queue_id)) 13918 return workposted; 13919 13920 status = bf_get(lpfc_rcqe_status, rcqe); 13921 switch (status) { 13922 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13923 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13924 "6126 Receive Frame Truncated!!\n"); 13925 /* fall through */ 13926 case FC_STATUS_RQ_SUCCESS: 13927 spin_lock_irqsave(&phba->hbalock, iflags); 13928 lpfc_sli4_rq_release(hrq, drq); 13929 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 13930 if (!dma_buf) { 13931 hrq->RQ_no_buf_found++; 13932 spin_unlock_irqrestore(&phba->hbalock, iflags); 13933 goto out; 13934 } 13935 spin_unlock_irqrestore(&phba->hbalock, iflags); 13936 hrq->RQ_rcv_buf++; 13937 hrq->RQ_buf_posted--; 13938 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13939 13940 /* Just some basic sanity checks on FCP Command frame */ 13941 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 13942 fc_hdr->fh_f_ctl[1] << 8 | 13943 fc_hdr->fh_f_ctl[2]); 13944 if (((fctl & 13945 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != 13946 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || 13947 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ 13948 goto drop; 13949 13950 if (fc_hdr->fh_type == FC_TYPE_FCP) { 13951 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 13952 lpfc_nvmet_unsol_fcp_event( 13953 phba, idx, dma_buf, cq->isr_timestamp, 13954 cq->q_flag & HBA_NVMET_CQ_NOTIFY); 13955 return false; 13956 } 13957 drop: 13958 lpfc_rq_buf_free(phba, &dma_buf->hbuf); 13959 break; 13960 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13961 if (phba->nvmet_support) { 13962 tgtp = phba->targetport->private; 13963 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13964 "6401 RQE Error x%x, posted %d err_cnt " 13965 "%d: %x %x %x\n", 13966 status, hrq->RQ_buf_posted, 13967 hrq->RQ_no_posted_buf, 13968 atomic_read(&tgtp->rcv_fcp_cmd_in), 13969 atomic_read(&tgtp->rcv_fcp_cmd_out), 13970 atomic_read(&tgtp->xmt_fcp_release)); 13971 } 13972 /* fallthrough */ 13973 13974 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13975 hrq->RQ_no_posted_buf++; 13976 /* Post more buffers if possible */ 13977 break; 13978 } 13979 out: 13980 return workposted; 13981 } 13982 13983 /** 13984 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry 13985 * @phba: adapter with cq 13986 * @cq: Pointer to the completion queue. 13987 * @eqe: Pointer to fast-path completion queue entry. 13988 * 13989 * This routine process a fast-path work queue completion entry from fast-path 13990 * event queue for FCP command response completion. 13991 * 13992 * Return: true if work posted to worker thread, otherwise false. 13993 **/ 13994 static bool 13995 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13996 struct lpfc_cqe *cqe) 13997 { 13998 struct lpfc_wcqe_release wcqe; 13999 bool workposted = false; 14000 14001 /* Copy the work queue CQE and convert endian order if needed */ 14002 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 14003 14004 /* Check and process for different type of WCQE and dispatch */ 14005 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 14006 case CQE_CODE_COMPL_WQE: 14007 case CQE_CODE_NVME_ERSP: 14008 cq->CQ_wq++; 14009 /* Process the WQ complete event */ 14010 phba->last_completion_time = jiffies; 14011 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME)) 14012 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 14013 (struct lpfc_wcqe_complete *)&wcqe); 14014 if (cq->subtype == LPFC_NVME_LS) 14015 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 14016 (struct lpfc_wcqe_complete *)&wcqe); 14017 break; 14018 case CQE_CODE_RELEASE_WQE: 14019 cq->CQ_release_wqe++; 14020 /* Process the WQ release event */ 14021 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 14022 (struct lpfc_wcqe_release *)&wcqe); 14023 break; 14024 case CQE_CODE_XRI_ABORTED: 14025 cq->CQ_xri_aborted++; 14026 /* Process the WQ XRI abort event */ 14027 phba->last_completion_time = jiffies; 14028 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 14029 (struct sli4_wcqe_xri_aborted *)&wcqe); 14030 break; 14031 case CQE_CODE_RECEIVE_V1: 14032 case CQE_CODE_RECEIVE: 14033 phba->last_completion_time = jiffies; 14034 if (cq->subtype == LPFC_NVMET) { 14035 workposted = lpfc_sli4_nvmet_handle_rcqe( 14036 phba, cq, (struct lpfc_rcqe *)&wcqe); 14037 } 14038 break; 14039 default: 14040 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14041 "0144 Not a valid CQE code: x%x\n", 14042 bf_get(lpfc_wcqe_c_code, &wcqe)); 14043 break; 14044 } 14045 return workposted; 14046 } 14047 14048 /** 14049 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 14050 * @phba: Pointer to HBA context object. 14051 * @eqe: Pointer to fast-path event queue entry. 14052 * 14053 * This routine process a event queue entry from the fast-path event queue. 14054 * It will check the MajorCode and MinorCode to determine this is for a 14055 * completion event on a completion queue, if not, an error shall be logged 14056 * and just return. Otherwise, it will get to the corresponding completion 14057 * queue and process all the entries on the completion queue, rearm the 14058 * completion queue, and then return. 14059 **/ 14060 static void 14061 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 14062 struct lpfc_eqe *eqe) 14063 { 14064 struct lpfc_queue *cq = NULL; 14065 uint32_t qidx = eq->hdwq; 14066 uint16_t cqid, id; 14067 14068 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 14069 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14070 "0366 Not a valid completion " 14071 "event: majorcode=x%x, minorcode=x%x\n", 14072 bf_get_le32(lpfc_eqe_major_code, eqe), 14073 bf_get_le32(lpfc_eqe_minor_code, eqe)); 14074 return; 14075 } 14076 14077 /* Get the reference to the corresponding CQ */ 14078 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 14079 14080 /* Use the fast lookup method first */ 14081 if (cqid <= phba->sli4_hba.cq_max) { 14082 cq = phba->sli4_hba.cq_lookup[cqid]; 14083 if (cq) 14084 goto work_cq; 14085 } 14086 14087 /* Next check for NVMET completion */ 14088 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { 14089 id = phba->sli4_hba.nvmet_cqset[0]->queue_id; 14090 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { 14091 /* Process NVMET unsol rcv */ 14092 cq = phba->sli4_hba.nvmet_cqset[cqid - id]; 14093 goto process_cq; 14094 } 14095 } 14096 14097 if (phba->sli4_hba.nvmels_cq && 14098 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { 14099 /* Process NVME unsol rcv */ 14100 cq = phba->sli4_hba.nvmels_cq; 14101 } 14102 14103 /* Otherwise this is a Slow path event */ 14104 if (cq == NULL) { 14105 lpfc_sli4_sp_handle_eqe(phba, eqe, 14106 phba->sli4_hba.hdwq[qidx].hba_eq); 14107 return; 14108 } 14109 14110 process_cq: 14111 if (unlikely(cqid != cq->queue_id)) { 14112 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14113 "0368 Miss-matched fast-path completion " 14114 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 14115 cqid, cq->queue_id); 14116 return; 14117 } 14118 14119 work_cq: 14120 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS) 14121 if (phba->ktime_on) 14122 cq->isr_timestamp = ktime_get_ns(); 14123 else 14124 cq->isr_timestamp = 0; 14125 #endif 14126 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork)) 14127 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14128 "0363 Cannot schedule soft IRQ " 14129 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 14130 cqid, cq->queue_id, raw_smp_processor_id()); 14131 } 14132 14133 /** 14134 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry 14135 * @cq: Pointer to CQ to be processed 14136 * 14137 * This routine calls the cq processing routine with the handler for 14138 * fast path CQEs. 14139 * 14140 * The CQ routine returns two values: the first is the calling status, 14141 * which indicates whether work was queued to the background discovery 14142 * thread. If true, the routine should wakeup the discovery thread; 14143 * the second is the delay parameter. If non-zero, rather than rearming 14144 * the CQ and yet another interrupt, the CQ handler should be queued so 14145 * that it is processed in a subsequent polling action. The value of 14146 * the delay indicates when to reschedule it. 14147 **/ 14148 static void 14149 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) 14150 { 14151 struct lpfc_hba *phba = cq->phba; 14152 unsigned long delay; 14153 bool workposted = false; 14154 14155 /* process and rearm the CQ */ 14156 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, 14157 &delay); 14158 14159 if (delay) { 14160 if (!queue_delayed_work_on(cq->chann, phba->wq, 14161 &cq->sched_irqwork, delay)) 14162 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14163 "0367 Cannot schedule soft IRQ " 14164 "for cqid=%d on CPU %d\n", 14165 cq->queue_id, cq->chann); 14166 } 14167 14168 /* wake up worker thread if there are works to be done */ 14169 if (workposted) 14170 lpfc_worker_wake_up(phba); 14171 } 14172 14173 /** 14174 * lpfc_sli4_hba_process_cq - fast-path work handler when started by 14175 * interrupt 14176 * @work: pointer to work element 14177 * 14178 * translates from the work handler and calls the fast-path handler. 14179 **/ 14180 static void 14181 lpfc_sli4_hba_process_cq(struct work_struct *work) 14182 { 14183 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); 14184 14185 __lpfc_sli4_hba_process_cq(cq); 14186 } 14187 14188 /** 14189 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer 14190 * @work: pointer to work element 14191 * 14192 * translates from the work handler and calls the fast-path handler. 14193 **/ 14194 static void 14195 lpfc_sli4_dly_hba_process_cq(struct work_struct *work) 14196 { 14197 struct lpfc_queue *cq = container_of(to_delayed_work(work), 14198 struct lpfc_queue, sched_irqwork); 14199 14200 __lpfc_sli4_hba_process_cq(cq); 14201 } 14202 14203 /** 14204 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 14205 * @irq: Interrupt number. 14206 * @dev_id: The device context pointer. 14207 * 14208 * This function is directly called from the PCI layer as an interrupt 14209 * service routine when device with SLI-4 interface spec is enabled with 14210 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 14211 * ring event in the HBA. However, when the device is enabled with either 14212 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 14213 * device-level interrupt handler. When the PCI slot is in error recovery 14214 * or the HBA is undergoing initialization, the interrupt handler will not 14215 * process the interrupt. The SCSI FCP fast-path ring event are handled in 14216 * the intrrupt context. This function is called without any lock held. 14217 * It gets the hbalock to access and update SLI data structures. Note that, 14218 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 14219 * equal to that of FCP CQ index. 14220 * 14221 * The link attention and ELS ring attention events are handled 14222 * by the worker thread. The interrupt handler signals the worker thread 14223 * and returns for these events. This function is called without any lock 14224 * held. It gets the hbalock to access and update SLI data structures. 14225 * 14226 * This function returns IRQ_HANDLED when interrupt is handled else it 14227 * returns IRQ_NONE. 14228 **/ 14229 irqreturn_t 14230 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 14231 { 14232 struct lpfc_hba *phba; 14233 struct lpfc_hba_eq_hdl *hba_eq_hdl; 14234 struct lpfc_queue *fpeq; 14235 unsigned long iflag; 14236 int ecount = 0; 14237 int hba_eqidx; 14238 struct lpfc_eq_intr_info *eqi; 14239 uint32_t icnt; 14240 14241 /* Get the driver's phba structure from the dev_id */ 14242 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 14243 phba = hba_eq_hdl->phba; 14244 hba_eqidx = hba_eq_hdl->idx; 14245 14246 if (unlikely(!phba)) 14247 return IRQ_NONE; 14248 if (unlikely(!phba->sli4_hba.hdwq)) 14249 return IRQ_NONE; 14250 14251 /* Get to the EQ struct associated with this vector */ 14252 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; 14253 if (unlikely(!fpeq)) 14254 return IRQ_NONE; 14255 14256 /* Check device state for handling interrupt */ 14257 if (unlikely(lpfc_intr_state_check(phba))) { 14258 /* Check again for link_state with lock held */ 14259 spin_lock_irqsave(&phba->hbalock, iflag); 14260 if (phba->link_state < LPFC_LINK_DOWN) 14261 /* Flush, clear interrupt, and rearm the EQ */ 14262 lpfc_sli4_eq_flush(phba, fpeq); 14263 spin_unlock_irqrestore(&phba->hbalock, iflag); 14264 return IRQ_NONE; 14265 } 14266 14267 eqi = phba->sli4_hba.eq_info; 14268 icnt = this_cpu_inc_return(eqi->icnt); 14269 fpeq->last_cpu = raw_smp_processor_id(); 14270 14271 if (icnt > LPFC_EQD_ISR_TRIGGER && 14272 phba->cfg_irq_chann == 1 && 14273 phba->cfg_auto_imax && 14274 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && 14275 phba->sli.sli_flag & LPFC_SLI_USE_EQDR) 14276 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); 14277 14278 /* process and rearm the EQ */ 14279 ecount = lpfc_sli4_process_eq(phba, fpeq); 14280 14281 if (unlikely(ecount == 0)) { 14282 fpeq->EQ_no_entry++; 14283 if (phba->intr_type == MSIX) 14284 /* MSI-X treated interrupt served as no EQ share INT */ 14285 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14286 "0358 MSI-X interrupt with no EQE\n"); 14287 else 14288 /* Non MSI-X treated on interrupt as EQ share INT */ 14289 return IRQ_NONE; 14290 } 14291 14292 return IRQ_HANDLED; 14293 } /* lpfc_sli4_fp_intr_handler */ 14294 14295 /** 14296 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 14297 * @irq: Interrupt number. 14298 * @dev_id: The device context pointer. 14299 * 14300 * This function is the device-level interrupt handler to device with SLI-4 14301 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 14302 * interrupt mode is enabled and there is an event in the HBA which requires 14303 * driver attention. This function invokes the slow-path interrupt attention 14304 * handling function and fast-path interrupt attention handling function in 14305 * turn to process the relevant HBA attention events. This function is called 14306 * without any lock held. It gets the hbalock to access and update SLI data 14307 * structures. 14308 * 14309 * This function returns IRQ_HANDLED when interrupt is handled, else it 14310 * returns IRQ_NONE. 14311 **/ 14312 irqreturn_t 14313 lpfc_sli4_intr_handler(int irq, void *dev_id) 14314 { 14315 struct lpfc_hba *phba; 14316 irqreturn_t hba_irq_rc; 14317 bool hba_handled = false; 14318 int qidx; 14319 14320 /* Get the driver's phba structure from the dev_id */ 14321 phba = (struct lpfc_hba *)dev_id; 14322 14323 if (unlikely(!phba)) 14324 return IRQ_NONE; 14325 14326 /* 14327 * Invoke fast-path host attention interrupt handling as appropriate. 14328 */ 14329 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 14330 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 14331 &phba->sli4_hba.hba_eq_hdl[qidx]); 14332 if (hba_irq_rc == IRQ_HANDLED) 14333 hba_handled |= true; 14334 } 14335 14336 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 14337 } /* lpfc_sli4_intr_handler */ 14338 14339 /** 14340 * lpfc_sli4_queue_free - free a queue structure and associated memory 14341 * @queue: The queue structure to free. 14342 * 14343 * This function frees a queue structure and the DMAable memory used for 14344 * the host resident queue. This function must be called after destroying the 14345 * queue on the HBA. 14346 **/ 14347 void 14348 lpfc_sli4_queue_free(struct lpfc_queue *queue) 14349 { 14350 struct lpfc_dmabuf *dmabuf; 14351 14352 if (!queue) 14353 return; 14354 14355 if (!list_empty(&queue->wq_list)) 14356 list_del(&queue->wq_list); 14357 14358 while (!list_empty(&queue->page_list)) { 14359 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 14360 list); 14361 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size, 14362 dmabuf->virt, dmabuf->phys); 14363 kfree(dmabuf); 14364 } 14365 if (queue->rqbp) { 14366 lpfc_free_rq_buffer(queue->phba, queue); 14367 kfree(queue->rqbp); 14368 } 14369 14370 if (!list_empty(&queue->cpu_list)) 14371 list_del(&queue->cpu_list); 14372 14373 kfree(queue); 14374 return; 14375 } 14376 14377 /** 14378 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 14379 * @phba: The HBA that this queue is being created on. 14380 * @page_size: The size of a queue page 14381 * @entry_size: The size of each queue entry for this queue. 14382 * @entry count: The number of entries that this queue will handle. 14383 * @cpu: The cpu that will primarily utilize this queue. 14384 * 14385 * This function allocates a queue structure and the DMAable memory used for 14386 * the host resident queue. This function must be called before creating the 14387 * queue on the HBA. 14388 **/ 14389 struct lpfc_queue * 14390 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, 14391 uint32_t entry_size, uint32_t entry_count, int cpu) 14392 { 14393 struct lpfc_queue *queue; 14394 struct lpfc_dmabuf *dmabuf; 14395 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14396 uint16_t x, pgcnt; 14397 14398 if (!phba->sli4_hba.pc_sli4_params.supported) 14399 hw_page_size = page_size; 14400 14401 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size; 14402 14403 /* If needed, Adjust page count to match the max the adapter supports */ 14404 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt) 14405 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt; 14406 14407 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt), 14408 GFP_KERNEL, cpu_to_node(cpu)); 14409 if (!queue) 14410 return NULL; 14411 14412 INIT_LIST_HEAD(&queue->list); 14413 INIT_LIST_HEAD(&queue->wq_list); 14414 INIT_LIST_HEAD(&queue->wqfull_list); 14415 INIT_LIST_HEAD(&queue->page_list); 14416 INIT_LIST_HEAD(&queue->child_list); 14417 INIT_LIST_HEAD(&queue->cpu_list); 14418 14419 /* Set queue parameters now. If the system cannot provide memory 14420 * resources, the free routine needs to know what was allocated. 14421 */ 14422 queue->page_count = pgcnt; 14423 queue->q_pgs = (void **)&queue[1]; 14424 queue->entry_cnt_per_pg = hw_page_size / entry_size; 14425 queue->entry_size = entry_size; 14426 queue->entry_count = entry_count; 14427 queue->page_size = hw_page_size; 14428 queue->phba = phba; 14429 14430 for (x = 0; x < queue->page_count; x++) { 14431 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL, 14432 dev_to_node(&phba->pcidev->dev)); 14433 if (!dmabuf) 14434 goto out_fail; 14435 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14436 hw_page_size, &dmabuf->phys, 14437 GFP_KERNEL); 14438 if (!dmabuf->virt) { 14439 kfree(dmabuf); 14440 goto out_fail; 14441 } 14442 dmabuf->buffer_tag = x; 14443 list_add_tail(&dmabuf->list, &queue->page_list); 14444 /* use lpfc_sli4_qe to index a paritcular entry in this page */ 14445 queue->q_pgs[x] = dmabuf->virt; 14446 } 14447 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); 14448 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); 14449 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq); 14450 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq); 14451 14452 /* notify_interval will be set during q creation */ 14453 14454 return queue; 14455 out_fail: 14456 lpfc_sli4_queue_free(queue); 14457 return NULL; 14458 } 14459 14460 /** 14461 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 14462 * @phba: HBA structure that indicates port to create a queue on. 14463 * @pci_barset: PCI BAR set flag. 14464 * 14465 * This function shall perform iomap of the specified PCI BAR address to host 14466 * memory address if not already done so and return it. The returned host 14467 * memory address can be NULL. 14468 */ 14469 static void __iomem * 14470 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 14471 { 14472 if (!phba->pcidev) 14473 return NULL; 14474 14475 switch (pci_barset) { 14476 case WQ_PCI_BAR_0_AND_1: 14477 return phba->pci_bar0_memmap_p; 14478 case WQ_PCI_BAR_2_AND_3: 14479 return phba->pci_bar2_memmap_p; 14480 case WQ_PCI_BAR_4_AND_5: 14481 return phba->pci_bar4_memmap_p; 14482 default: 14483 break; 14484 } 14485 return NULL; 14486 } 14487 14488 /** 14489 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs 14490 * @phba: HBA structure that EQs are on. 14491 * @startq: The starting EQ index to modify 14492 * @numq: The number of EQs (consecutive indexes) to modify 14493 * @usdelay: amount of delay 14494 * 14495 * This function revises the EQ delay on 1 or more EQs. The EQ delay 14496 * is set either by writing to a register (if supported by the SLI Port) 14497 * or by mailbox command. The mailbox command allows several EQs to be 14498 * updated at once. 14499 * 14500 * The @phba struct is used to send a mailbox command to HBA. The @startq 14501 * is used to get the starting EQ index to change. The @numq value is 14502 * used to specify how many consecutive EQ indexes, starting at EQ index, 14503 * are to be changed. This function is asynchronous and will wait for any 14504 * mailbox commands to finish before returning. 14505 * 14506 * On success this function will return a zero. If unable to allocate 14507 * enough memory this function will return -ENOMEM. If a mailbox command 14508 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may 14509 * have had their delay multipler changed. 14510 **/ 14511 void 14512 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, 14513 uint32_t numq, uint32_t usdelay) 14514 { 14515 struct lpfc_mbx_modify_eq_delay *eq_delay; 14516 LPFC_MBOXQ_t *mbox; 14517 struct lpfc_queue *eq; 14518 int cnt = 0, rc, length; 14519 uint32_t shdr_status, shdr_add_status; 14520 uint32_t dmult; 14521 int qidx; 14522 union lpfc_sli4_cfg_shdr *shdr; 14523 14524 if (startq >= phba->cfg_irq_chann) 14525 return; 14526 14527 if (usdelay > 0xFFFF) { 14528 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME, 14529 "6429 usdelay %d too large. Scaled down to " 14530 "0xFFFF.\n", usdelay); 14531 usdelay = 0xFFFF; 14532 } 14533 14534 /* set values by EQ_DELAY register if supported */ 14535 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { 14536 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 14537 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 14538 if (!eq) 14539 continue; 14540 14541 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay); 14542 14543 if (++cnt >= numq) 14544 break; 14545 } 14546 return; 14547 } 14548 14549 /* Otherwise, set values by mailbox cmd */ 14550 14551 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14552 if (!mbox) { 14553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME, 14554 "6428 Failed allocating mailbox cmd buffer." 14555 " EQ delay was not set.\n"); 14556 return; 14557 } 14558 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 14559 sizeof(struct lpfc_sli4_cfg_mhdr)); 14560 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14561 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 14562 length, LPFC_SLI4_MBX_EMBED); 14563 eq_delay = &mbox->u.mqe.un.eq_delay; 14564 14565 /* Calculate delay multiper from maximum interrupt per second */ 14566 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC; 14567 if (dmult) 14568 dmult--; 14569 if (dmult > LPFC_DMULT_MAX) 14570 dmult = LPFC_DMULT_MAX; 14571 14572 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 14573 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 14574 if (!eq) 14575 continue; 14576 eq->q_mode = usdelay; 14577 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 14578 eq_delay->u.request.eq[cnt].phase = 0; 14579 eq_delay->u.request.eq[cnt].delay_multi = dmult; 14580 14581 if (++cnt >= numq) 14582 break; 14583 } 14584 eq_delay->u.request.num_eq = cnt; 14585 14586 mbox->vport = phba->pport; 14587 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14588 mbox->ctx_buf = NULL; 14589 mbox->ctx_ndlp = NULL; 14590 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14591 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 14592 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14593 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14594 if (shdr_status || shdr_add_status || rc) { 14595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14596 "2512 MODIFY_EQ_DELAY mailbox failed with " 14597 "status x%x add_status x%x, mbx status x%x\n", 14598 shdr_status, shdr_add_status, rc); 14599 } 14600 mempool_free(mbox, phba->mbox_mem_pool); 14601 return; 14602 } 14603 14604 /** 14605 * lpfc_eq_create - Create an Event Queue on the HBA 14606 * @phba: HBA structure that indicates port to create a queue on. 14607 * @eq: The queue structure to use to create the event queue. 14608 * @imax: The maximum interrupt per second limit. 14609 * 14610 * This function creates an event queue, as detailed in @eq, on a port, 14611 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 14612 * 14613 * The @phba struct is used to send mailbox command to HBA. The @eq struct 14614 * is used to get the entry count and entry size that are necessary to 14615 * determine the number of pages to allocate and use for this queue. This 14616 * function will send the EQ_CREATE mailbox command to the HBA to setup the 14617 * event queue. This function is asynchronous and will wait for the mailbox 14618 * command to finish before continuing. 14619 * 14620 * On success this function will return a zero. If unable to allocate enough 14621 * memory this function will return -ENOMEM. If the queue create mailbox command 14622 * fails this function will return -ENXIO. 14623 **/ 14624 int 14625 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 14626 { 14627 struct lpfc_mbx_eq_create *eq_create; 14628 LPFC_MBOXQ_t *mbox; 14629 int rc, length, status = 0; 14630 struct lpfc_dmabuf *dmabuf; 14631 uint32_t shdr_status, shdr_add_status; 14632 union lpfc_sli4_cfg_shdr *shdr; 14633 uint16_t dmult; 14634 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14635 14636 /* sanity check on queue memory */ 14637 if (!eq) 14638 return -ENODEV; 14639 if (!phba->sli4_hba.pc_sli4_params.supported) 14640 hw_page_size = SLI4_PAGE_SIZE; 14641 14642 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14643 if (!mbox) 14644 return -ENOMEM; 14645 length = (sizeof(struct lpfc_mbx_eq_create) - 14646 sizeof(struct lpfc_sli4_cfg_mhdr)); 14647 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14648 LPFC_MBOX_OPCODE_EQ_CREATE, 14649 length, LPFC_SLI4_MBX_EMBED); 14650 eq_create = &mbox->u.mqe.un.eq_create; 14651 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 14652 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 14653 eq->page_count); 14654 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 14655 LPFC_EQE_SIZE); 14656 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 14657 14658 /* Use version 2 of CREATE_EQ if eqav is set */ 14659 if (phba->sli4_hba.pc_sli4_params.eqav) { 14660 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14661 LPFC_Q_CREATE_VERSION_2); 14662 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context, 14663 phba->sli4_hba.pc_sli4_params.eqav); 14664 } 14665 14666 /* don't setup delay multiplier using EQ_CREATE */ 14667 dmult = 0; 14668 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 14669 dmult); 14670 switch (eq->entry_count) { 14671 default: 14672 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14673 "0360 Unsupported EQ count. (%d)\n", 14674 eq->entry_count); 14675 if (eq->entry_count < 256) { 14676 status = -EINVAL; 14677 goto out; 14678 } 14679 /* fall through - otherwise default to smallest count */ 14680 case 256: 14681 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14682 LPFC_EQ_CNT_256); 14683 break; 14684 case 512: 14685 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14686 LPFC_EQ_CNT_512); 14687 break; 14688 case 1024: 14689 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14690 LPFC_EQ_CNT_1024); 14691 break; 14692 case 2048: 14693 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14694 LPFC_EQ_CNT_2048); 14695 break; 14696 case 4096: 14697 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14698 LPFC_EQ_CNT_4096); 14699 break; 14700 } 14701 list_for_each_entry(dmabuf, &eq->page_list, list) { 14702 memset(dmabuf->virt, 0, hw_page_size); 14703 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14704 putPaddrLow(dmabuf->phys); 14705 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14706 putPaddrHigh(dmabuf->phys); 14707 } 14708 mbox->vport = phba->pport; 14709 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14710 mbox->ctx_buf = NULL; 14711 mbox->ctx_ndlp = NULL; 14712 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14713 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14714 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14715 if (shdr_status || shdr_add_status || rc) { 14716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14717 "2500 EQ_CREATE mailbox failed with " 14718 "status x%x add_status x%x, mbx status x%x\n", 14719 shdr_status, shdr_add_status, rc); 14720 status = -ENXIO; 14721 } 14722 eq->type = LPFC_EQ; 14723 eq->subtype = LPFC_NONE; 14724 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 14725 if (eq->queue_id == 0xFFFF) 14726 status = -ENXIO; 14727 eq->host_index = 0; 14728 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL; 14729 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT; 14730 out: 14731 mempool_free(mbox, phba->mbox_mem_pool); 14732 return status; 14733 } 14734 14735 /** 14736 * lpfc_cq_create - Create a Completion Queue on the HBA 14737 * @phba: HBA structure that indicates port to create a queue on. 14738 * @cq: The queue structure to use to create the completion queue. 14739 * @eq: The event queue to bind this completion queue to. 14740 * 14741 * This function creates a completion queue, as detailed in @wq, on a port, 14742 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 14743 * 14744 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14745 * is used to get the entry count and entry size that are necessary to 14746 * determine the number of pages to allocate and use for this queue. The @eq 14747 * is used to indicate which event queue to bind this completion queue to. This 14748 * function will send the CQ_CREATE mailbox command to the HBA to setup the 14749 * completion queue. This function is asynchronous and will wait for the mailbox 14750 * command to finish before continuing. 14751 * 14752 * On success this function will return a zero. If unable to allocate enough 14753 * memory this function will return -ENOMEM. If the queue create mailbox command 14754 * fails this function will return -ENXIO. 14755 **/ 14756 int 14757 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 14758 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 14759 { 14760 struct lpfc_mbx_cq_create *cq_create; 14761 struct lpfc_dmabuf *dmabuf; 14762 LPFC_MBOXQ_t *mbox; 14763 int rc, length, status = 0; 14764 uint32_t shdr_status, shdr_add_status; 14765 union lpfc_sli4_cfg_shdr *shdr; 14766 14767 /* sanity check on queue memory */ 14768 if (!cq || !eq) 14769 return -ENODEV; 14770 14771 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14772 if (!mbox) 14773 return -ENOMEM; 14774 length = (sizeof(struct lpfc_mbx_cq_create) - 14775 sizeof(struct lpfc_sli4_cfg_mhdr)); 14776 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14777 LPFC_MBOX_OPCODE_CQ_CREATE, 14778 length, LPFC_SLI4_MBX_EMBED); 14779 cq_create = &mbox->u.mqe.un.cq_create; 14780 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 14781 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 14782 cq->page_count); 14783 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 14784 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 14785 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14786 phba->sli4_hba.pc_sli4_params.cqv); 14787 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 14788 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 14789 (cq->page_size / SLI4_PAGE_SIZE)); 14790 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 14791 eq->queue_id); 14792 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context, 14793 phba->sli4_hba.pc_sli4_params.cqav); 14794 } else { 14795 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 14796 eq->queue_id); 14797 } 14798 switch (cq->entry_count) { 14799 case 2048: 14800 case 4096: 14801 if (phba->sli4_hba.pc_sli4_params.cqv == 14802 LPFC_Q_CREATE_VERSION_2) { 14803 cq_create->u.request.context.lpfc_cq_context_count = 14804 cq->entry_count; 14805 bf_set(lpfc_cq_context_count, 14806 &cq_create->u.request.context, 14807 LPFC_CQ_CNT_WORD7); 14808 break; 14809 } 14810 /* fall through */ 14811 default: 14812 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14813 "0361 Unsupported CQ count: " 14814 "entry cnt %d sz %d pg cnt %d\n", 14815 cq->entry_count, cq->entry_size, 14816 cq->page_count); 14817 if (cq->entry_count < 256) { 14818 status = -EINVAL; 14819 goto out; 14820 } 14821 /* fall through - otherwise default to smallest count */ 14822 case 256: 14823 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14824 LPFC_CQ_CNT_256); 14825 break; 14826 case 512: 14827 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14828 LPFC_CQ_CNT_512); 14829 break; 14830 case 1024: 14831 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14832 LPFC_CQ_CNT_1024); 14833 break; 14834 } 14835 list_for_each_entry(dmabuf, &cq->page_list, list) { 14836 memset(dmabuf->virt, 0, cq->page_size); 14837 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14838 putPaddrLow(dmabuf->phys); 14839 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14840 putPaddrHigh(dmabuf->phys); 14841 } 14842 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14843 14844 /* The IOCTL status is embedded in the mailbox subheader. */ 14845 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14846 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14847 if (shdr_status || shdr_add_status || rc) { 14848 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14849 "2501 CQ_CREATE mailbox failed with " 14850 "status x%x add_status x%x, mbx status x%x\n", 14851 shdr_status, shdr_add_status, rc); 14852 status = -ENXIO; 14853 goto out; 14854 } 14855 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14856 if (cq->queue_id == 0xFFFF) { 14857 status = -ENXIO; 14858 goto out; 14859 } 14860 /* link the cq onto the parent eq child list */ 14861 list_add_tail(&cq->list, &eq->child_list); 14862 /* Set up completion queue's type and subtype */ 14863 cq->type = type; 14864 cq->subtype = subtype; 14865 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14866 cq->assoc_qid = eq->queue_id; 14867 cq->assoc_qp = eq; 14868 cq->host_index = 0; 14869 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 14870 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); 14871 14872 if (cq->queue_id > phba->sli4_hba.cq_max) 14873 phba->sli4_hba.cq_max = cq->queue_id; 14874 out: 14875 mempool_free(mbox, phba->mbox_mem_pool); 14876 return status; 14877 } 14878 14879 /** 14880 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ 14881 * @phba: HBA structure that indicates port to create a queue on. 14882 * @cqp: The queue structure array to use to create the completion queues. 14883 * @hdwq: The hardware queue array with the EQ to bind completion queues to. 14884 * 14885 * This function creates a set of completion queue, s to support MRQ 14886 * as detailed in @cqp, on a port, 14887 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. 14888 * 14889 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14890 * is used to get the entry count and entry size that are necessary to 14891 * determine the number of pages to allocate and use for this queue. The @eq 14892 * is used to indicate which event queue to bind this completion queue to. This 14893 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the 14894 * completion queue. This function is asynchronous and will wait for the mailbox 14895 * command to finish before continuing. 14896 * 14897 * On success this function will return a zero. If unable to allocate enough 14898 * memory this function will return -ENOMEM. If the queue create mailbox command 14899 * fails this function will return -ENXIO. 14900 **/ 14901 int 14902 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 14903 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, 14904 uint32_t subtype) 14905 { 14906 struct lpfc_queue *cq; 14907 struct lpfc_queue *eq; 14908 struct lpfc_mbx_cq_create_set *cq_set; 14909 struct lpfc_dmabuf *dmabuf; 14910 LPFC_MBOXQ_t *mbox; 14911 int rc, length, alloclen, status = 0; 14912 int cnt, idx, numcq, page_idx = 0; 14913 uint32_t shdr_status, shdr_add_status; 14914 union lpfc_sli4_cfg_shdr *shdr; 14915 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14916 14917 /* sanity check on queue memory */ 14918 numcq = phba->cfg_nvmet_mrq; 14919 if (!cqp || !hdwq || !numcq) 14920 return -ENODEV; 14921 14922 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14923 if (!mbox) 14924 return -ENOMEM; 14925 14926 length = sizeof(struct lpfc_mbx_cq_create_set); 14927 length += ((numcq * cqp[0]->page_count) * 14928 sizeof(struct dma_address)); 14929 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14930 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, 14931 LPFC_SLI4_MBX_NEMBED); 14932 if (alloclen < length) { 14933 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14934 "3098 Allocated DMA memory size (%d) is " 14935 "less than the requested DMA memory size " 14936 "(%d)\n", alloclen, length); 14937 status = -ENOMEM; 14938 goto out; 14939 } 14940 cq_set = mbox->sge_array->addr[0]; 14941 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; 14942 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); 14943 14944 for (idx = 0; idx < numcq; idx++) { 14945 cq = cqp[idx]; 14946 eq = hdwq[idx].hba_eq; 14947 if (!cq || !eq) { 14948 status = -ENOMEM; 14949 goto out; 14950 } 14951 if (!phba->sli4_hba.pc_sli4_params.supported) 14952 hw_page_size = cq->page_size; 14953 14954 switch (idx) { 14955 case 0: 14956 bf_set(lpfc_mbx_cq_create_set_page_size, 14957 &cq_set->u.request, 14958 (hw_page_size / SLI4_PAGE_SIZE)); 14959 bf_set(lpfc_mbx_cq_create_set_num_pages, 14960 &cq_set->u.request, cq->page_count); 14961 bf_set(lpfc_mbx_cq_create_set_evt, 14962 &cq_set->u.request, 1); 14963 bf_set(lpfc_mbx_cq_create_set_valid, 14964 &cq_set->u.request, 1); 14965 bf_set(lpfc_mbx_cq_create_set_cqe_size, 14966 &cq_set->u.request, 0); 14967 bf_set(lpfc_mbx_cq_create_set_num_cq, 14968 &cq_set->u.request, numcq); 14969 bf_set(lpfc_mbx_cq_create_set_autovalid, 14970 &cq_set->u.request, 14971 phba->sli4_hba.pc_sli4_params.cqav); 14972 switch (cq->entry_count) { 14973 case 2048: 14974 case 4096: 14975 if (phba->sli4_hba.pc_sli4_params.cqv == 14976 LPFC_Q_CREATE_VERSION_2) { 14977 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14978 &cq_set->u.request, 14979 cq->entry_count); 14980 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14981 &cq_set->u.request, 14982 LPFC_CQ_CNT_WORD7); 14983 break; 14984 } 14985 /* fall through */ 14986 default: 14987 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14988 "3118 Bad CQ count. (%d)\n", 14989 cq->entry_count); 14990 if (cq->entry_count < 256) { 14991 status = -EINVAL; 14992 goto out; 14993 } 14994 /* fall through - otherwise default to smallest */ 14995 case 256: 14996 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14997 &cq_set->u.request, LPFC_CQ_CNT_256); 14998 break; 14999 case 512: 15000 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15001 &cq_set->u.request, LPFC_CQ_CNT_512); 15002 break; 15003 case 1024: 15004 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15005 &cq_set->u.request, LPFC_CQ_CNT_1024); 15006 break; 15007 } 15008 bf_set(lpfc_mbx_cq_create_set_eq_id0, 15009 &cq_set->u.request, eq->queue_id); 15010 break; 15011 case 1: 15012 bf_set(lpfc_mbx_cq_create_set_eq_id1, 15013 &cq_set->u.request, eq->queue_id); 15014 break; 15015 case 2: 15016 bf_set(lpfc_mbx_cq_create_set_eq_id2, 15017 &cq_set->u.request, eq->queue_id); 15018 break; 15019 case 3: 15020 bf_set(lpfc_mbx_cq_create_set_eq_id3, 15021 &cq_set->u.request, eq->queue_id); 15022 break; 15023 case 4: 15024 bf_set(lpfc_mbx_cq_create_set_eq_id4, 15025 &cq_set->u.request, eq->queue_id); 15026 break; 15027 case 5: 15028 bf_set(lpfc_mbx_cq_create_set_eq_id5, 15029 &cq_set->u.request, eq->queue_id); 15030 break; 15031 case 6: 15032 bf_set(lpfc_mbx_cq_create_set_eq_id6, 15033 &cq_set->u.request, eq->queue_id); 15034 break; 15035 case 7: 15036 bf_set(lpfc_mbx_cq_create_set_eq_id7, 15037 &cq_set->u.request, eq->queue_id); 15038 break; 15039 case 8: 15040 bf_set(lpfc_mbx_cq_create_set_eq_id8, 15041 &cq_set->u.request, eq->queue_id); 15042 break; 15043 case 9: 15044 bf_set(lpfc_mbx_cq_create_set_eq_id9, 15045 &cq_set->u.request, eq->queue_id); 15046 break; 15047 case 10: 15048 bf_set(lpfc_mbx_cq_create_set_eq_id10, 15049 &cq_set->u.request, eq->queue_id); 15050 break; 15051 case 11: 15052 bf_set(lpfc_mbx_cq_create_set_eq_id11, 15053 &cq_set->u.request, eq->queue_id); 15054 break; 15055 case 12: 15056 bf_set(lpfc_mbx_cq_create_set_eq_id12, 15057 &cq_set->u.request, eq->queue_id); 15058 break; 15059 case 13: 15060 bf_set(lpfc_mbx_cq_create_set_eq_id13, 15061 &cq_set->u.request, eq->queue_id); 15062 break; 15063 case 14: 15064 bf_set(lpfc_mbx_cq_create_set_eq_id14, 15065 &cq_set->u.request, eq->queue_id); 15066 break; 15067 case 15: 15068 bf_set(lpfc_mbx_cq_create_set_eq_id15, 15069 &cq_set->u.request, eq->queue_id); 15070 break; 15071 } 15072 15073 /* link the cq onto the parent eq child list */ 15074 list_add_tail(&cq->list, &eq->child_list); 15075 /* Set up completion queue's type and subtype */ 15076 cq->type = type; 15077 cq->subtype = subtype; 15078 cq->assoc_qid = eq->queue_id; 15079 cq->assoc_qp = eq; 15080 cq->host_index = 0; 15081 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 15082 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, 15083 cq->entry_count); 15084 cq->chann = idx; 15085 15086 rc = 0; 15087 list_for_each_entry(dmabuf, &cq->page_list, list) { 15088 memset(dmabuf->virt, 0, hw_page_size); 15089 cnt = page_idx + dmabuf->buffer_tag; 15090 cq_set->u.request.page[cnt].addr_lo = 15091 putPaddrLow(dmabuf->phys); 15092 cq_set->u.request.page[cnt].addr_hi = 15093 putPaddrHigh(dmabuf->phys); 15094 rc++; 15095 } 15096 page_idx += rc; 15097 } 15098 15099 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15100 15101 /* The IOCTL status is embedded in the mailbox subheader. */ 15102 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15103 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15104 if (shdr_status || shdr_add_status || rc) { 15105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15106 "3119 CQ_CREATE_SET mailbox failed with " 15107 "status x%x add_status x%x, mbx status x%x\n", 15108 shdr_status, shdr_add_status, rc); 15109 status = -ENXIO; 15110 goto out; 15111 } 15112 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); 15113 if (rc == 0xFFFF) { 15114 status = -ENXIO; 15115 goto out; 15116 } 15117 15118 for (idx = 0; idx < numcq; idx++) { 15119 cq = cqp[idx]; 15120 cq->queue_id = rc + idx; 15121 if (cq->queue_id > phba->sli4_hba.cq_max) 15122 phba->sli4_hba.cq_max = cq->queue_id; 15123 } 15124 15125 out: 15126 lpfc_sli4_mbox_cmd_free(phba, mbox); 15127 return status; 15128 } 15129 15130 /** 15131 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 15132 * @phba: HBA structure that indicates port to create a queue on. 15133 * @mq: The queue structure to use to create the mailbox queue. 15134 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 15135 * @cq: The completion queue to associate with this cq. 15136 * 15137 * This function provides failback (fb) functionality when the 15138 * mq_create_ext fails on older FW generations. It's purpose is identical 15139 * to mq_create_ext otherwise. 15140 * 15141 * This routine cannot fail as all attributes were previously accessed and 15142 * initialized in mq_create_ext. 15143 **/ 15144 static void 15145 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 15146 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 15147 { 15148 struct lpfc_mbx_mq_create *mq_create; 15149 struct lpfc_dmabuf *dmabuf; 15150 int length; 15151 15152 length = (sizeof(struct lpfc_mbx_mq_create) - 15153 sizeof(struct lpfc_sli4_cfg_mhdr)); 15154 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15155 LPFC_MBOX_OPCODE_MQ_CREATE, 15156 length, LPFC_SLI4_MBX_EMBED); 15157 mq_create = &mbox->u.mqe.un.mq_create; 15158 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 15159 mq->page_count); 15160 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 15161 cq->queue_id); 15162 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 15163 switch (mq->entry_count) { 15164 case 16: 15165 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15166 LPFC_MQ_RING_SIZE_16); 15167 break; 15168 case 32: 15169 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15170 LPFC_MQ_RING_SIZE_32); 15171 break; 15172 case 64: 15173 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15174 LPFC_MQ_RING_SIZE_64); 15175 break; 15176 case 128: 15177 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15178 LPFC_MQ_RING_SIZE_128); 15179 break; 15180 } 15181 list_for_each_entry(dmabuf, &mq->page_list, list) { 15182 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15183 putPaddrLow(dmabuf->phys); 15184 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15185 putPaddrHigh(dmabuf->phys); 15186 } 15187 } 15188 15189 /** 15190 * lpfc_mq_create - Create a mailbox Queue on the HBA 15191 * @phba: HBA structure that indicates port to create a queue on. 15192 * @mq: The queue structure to use to create the mailbox queue. 15193 * @cq: The completion queue to associate with this cq. 15194 * @subtype: The queue's subtype. 15195 * 15196 * This function creates a mailbox queue, as detailed in @mq, on a port, 15197 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 15198 * 15199 * The @phba struct is used to send mailbox command to HBA. The @cq struct 15200 * is used to get the entry count and entry size that are necessary to 15201 * determine the number of pages to allocate and use for this queue. This 15202 * function will send the MQ_CREATE mailbox command to the HBA to setup the 15203 * mailbox queue. This function is asynchronous and will wait for the mailbox 15204 * command to finish before continuing. 15205 * 15206 * On success this function will return a zero. If unable to allocate enough 15207 * memory this function will return -ENOMEM. If the queue create mailbox command 15208 * fails this function will return -ENXIO. 15209 **/ 15210 int32_t 15211 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 15212 struct lpfc_queue *cq, uint32_t subtype) 15213 { 15214 struct lpfc_mbx_mq_create *mq_create; 15215 struct lpfc_mbx_mq_create_ext *mq_create_ext; 15216 struct lpfc_dmabuf *dmabuf; 15217 LPFC_MBOXQ_t *mbox; 15218 int rc, length, status = 0; 15219 uint32_t shdr_status, shdr_add_status; 15220 union lpfc_sli4_cfg_shdr *shdr; 15221 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15222 15223 /* sanity check on queue memory */ 15224 if (!mq || !cq) 15225 return -ENODEV; 15226 if (!phba->sli4_hba.pc_sli4_params.supported) 15227 hw_page_size = SLI4_PAGE_SIZE; 15228 15229 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15230 if (!mbox) 15231 return -ENOMEM; 15232 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 15233 sizeof(struct lpfc_sli4_cfg_mhdr)); 15234 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15235 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 15236 length, LPFC_SLI4_MBX_EMBED); 15237 15238 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 15239 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 15240 bf_set(lpfc_mbx_mq_create_ext_num_pages, 15241 &mq_create_ext->u.request, mq->page_count); 15242 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 15243 &mq_create_ext->u.request, 1); 15244 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 15245 &mq_create_ext->u.request, 1); 15246 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 15247 &mq_create_ext->u.request, 1); 15248 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 15249 &mq_create_ext->u.request, 1); 15250 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 15251 &mq_create_ext->u.request, 1); 15252 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 15253 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15254 phba->sli4_hba.pc_sli4_params.mqv); 15255 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 15256 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 15257 cq->queue_id); 15258 else 15259 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 15260 cq->queue_id); 15261 switch (mq->entry_count) { 15262 default: 15263 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15264 "0362 Unsupported MQ count. (%d)\n", 15265 mq->entry_count); 15266 if (mq->entry_count < 16) { 15267 status = -EINVAL; 15268 goto out; 15269 } 15270 /* fall through - otherwise default to smallest count */ 15271 case 16: 15272 bf_set(lpfc_mq_context_ring_size, 15273 &mq_create_ext->u.request.context, 15274 LPFC_MQ_RING_SIZE_16); 15275 break; 15276 case 32: 15277 bf_set(lpfc_mq_context_ring_size, 15278 &mq_create_ext->u.request.context, 15279 LPFC_MQ_RING_SIZE_32); 15280 break; 15281 case 64: 15282 bf_set(lpfc_mq_context_ring_size, 15283 &mq_create_ext->u.request.context, 15284 LPFC_MQ_RING_SIZE_64); 15285 break; 15286 case 128: 15287 bf_set(lpfc_mq_context_ring_size, 15288 &mq_create_ext->u.request.context, 15289 LPFC_MQ_RING_SIZE_128); 15290 break; 15291 } 15292 list_for_each_entry(dmabuf, &mq->page_list, list) { 15293 memset(dmabuf->virt, 0, hw_page_size); 15294 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 15295 putPaddrLow(dmabuf->phys); 15296 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 15297 putPaddrHigh(dmabuf->phys); 15298 } 15299 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15300 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15301 &mq_create_ext->u.response); 15302 if (rc != MBX_SUCCESS) { 15303 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15304 "2795 MQ_CREATE_EXT failed with " 15305 "status x%x. Failback to MQ_CREATE.\n", 15306 rc); 15307 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 15308 mq_create = &mbox->u.mqe.un.mq_create; 15309 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15310 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 15311 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15312 &mq_create->u.response); 15313 } 15314 15315 /* The IOCTL status is embedded in the mailbox subheader. */ 15316 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15317 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15318 if (shdr_status || shdr_add_status || rc) { 15319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15320 "2502 MQ_CREATE mailbox failed with " 15321 "status x%x add_status x%x, mbx status x%x\n", 15322 shdr_status, shdr_add_status, rc); 15323 status = -ENXIO; 15324 goto out; 15325 } 15326 if (mq->queue_id == 0xFFFF) { 15327 status = -ENXIO; 15328 goto out; 15329 } 15330 mq->type = LPFC_MQ; 15331 mq->assoc_qid = cq->queue_id; 15332 mq->subtype = subtype; 15333 mq->host_index = 0; 15334 mq->hba_index = 0; 15335 15336 /* link the mq onto the parent cq child list */ 15337 list_add_tail(&mq->list, &cq->child_list); 15338 out: 15339 mempool_free(mbox, phba->mbox_mem_pool); 15340 return status; 15341 } 15342 15343 /** 15344 * lpfc_wq_create - Create a Work Queue on the HBA 15345 * @phba: HBA structure that indicates port to create a queue on. 15346 * @wq: The queue structure to use to create the work queue. 15347 * @cq: The completion queue to bind this work queue to. 15348 * @subtype: The subtype of the work queue indicating its functionality. 15349 * 15350 * This function creates a work queue, as detailed in @wq, on a port, described 15351 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 15352 * 15353 * The @phba struct is used to send mailbox command to HBA. The @wq struct 15354 * is used to get the entry count and entry size that are necessary to 15355 * determine the number of pages to allocate and use for this queue. The @cq 15356 * is used to indicate which completion queue to bind this work queue to. This 15357 * function will send the WQ_CREATE mailbox command to the HBA to setup the 15358 * work queue. This function is asynchronous and will wait for the mailbox 15359 * command to finish before continuing. 15360 * 15361 * On success this function will return a zero. If unable to allocate enough 15362 * memory this function will return -ENOMEM. If the queue create mailbox command 15363 * fails this function will return -ENXIO. 15364 **/ 15365 int 15366 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 15367 struct lpfc_queue *cq, uint32_t subtype) 15368 { 15369 struct lpfc_mbx_wq_create *wq_create; 15370 struct lpfc_dmabuf *dmabuf; 15371 LPFC_MBOXQ_t *mbox; 15372 int rc, length, status = 0; 15373 uint32_t shdr_status, shdr_add_status; 15374 union lpfc_sli4_cfg_shdr *shdr; 15375 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15376 struct dma_address *page; 15377 void __iomem *bar_memmap_p; 15378 uint32_t db_offset; 15379 uint16_t pci_barset; 15380 uint8_t dpp_barset; 15381 uint32_t dpp_offset; 15382 unsigned long pg_addr; 15383 uint8_t wq_create_version; 15384 15385 /* sanity check on queue memory */ 15386 if (!wq || !cq) 15387 return -ENODEV; 15388 if (!phba->sli4_hba.pc_sli4_params.supported) 15389 hw_page_size = wq->page_size; 15390 15391 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15392 if (!mbox) 15393 return -ENOMEM; 15394 length = (sizeof(struct lpfc_mbx_wq_create) - 15395 sizeof(struct lpfc_sli4_cfg_mhdr)); 15396 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15397 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 15398 length, LPFC_SLI4_MBX_EMBED); 15399 wq_create = &mbox->u.mqe.un.wq_create; 15400 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 15401 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 15402 wq->page_count); 15403 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 15404 cq->queue_id); 15405 15406 /* wqv is the earliest version supported, NOT the latest */ 15407 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15408 phba->sli4_hba.pc_sli4_params.wqv); 15409 15410 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) || 15411 (wq->page_size > SLI4_PAGE_SIZE)) 15412 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15413 else 15414 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15415 15416 15417 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) 15418 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15419 else 15420 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15421 15422 switch (wq_create_version) { 15423 case LPFC_Q_CREATE_VERSION_1: 15424 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 15425 wq->entry_count); 15426 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15427 LPFC_Q_CREATE_VERSION_1); 15428 15429 switch (wq->entry_size) { 15430 default: 15431 case 64: 15432 bf_set(lpfc_mbx_wq_create_wqe_size, 15433 &wq_create->u.request_1, 15434 LPFC_WQ_WQE_SIZE_64); 15435 break; 15436 case 128: 15437 bf_set(lpfc_mbx_wq_create_wqe_size, 15438 &wq_create->u.request_1, 15439 LPFC_WQ_WQE_SIZE_128); 15440 break; 15441 } 15442 /* Request DPP by default */ 15443 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); 15444 bf_set(lpfc_mbx_wq_create_page_size, 15445 &wq_create->u.request_1, 15446 (wq->page_size / SLI4_PAGE_SIZE)); 15447 page = wq_create->u.request_1.page; 15448 break; 15449 default: 15450 page = wq_create->u.request.page; 15451 break; 15452 } 15453 15454 list_for_each_entry(dmabuf, &wq->page_list, list) { 15455 memset(dmabuf->virt, 0, hw_page_size); 15456 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 15457 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 15458 } 15459 15460 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15461 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 15462 15463 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15464 /* The IOCTL status is embedded in the mailbox subheader. */ 15465 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15466 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15467 if (shdr_status || shdr_add_status || rc) { 15468 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15469 "2503 WQ_CREATE mailbox failed with " 15470 "status x%x add_status x%x, mbx status x%x\n", 15471 shdr_status, shdr_add_status, rc); 15472 status = -ENXIO; 15473 goto out; 15474 } 15475 15476 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) 15477 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, 15478 &wq_create->u.response); 15479 else 15480 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, 15481 &wq_create->u.response_1); 15482 15483 if (wq->queue_id == 0xFFFF) { 15484 status = -ENXIO; 15485 goto out; 15486 } 15487 15488 wq->db_format = LPFC_DB_LIST_FORMAT; 15489 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) { 15490 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15491 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 15492 &wq_create->u.response); 15493 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 15494 (wq->db_format != LPFC_DB_RING_FORMAT)) { 15495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15496 "3265 WQ[%d] doorbell format " 15497 "not supported: x%x\n", 15498 wq->queue_id, wq->db_format); 15499 status = -EINVAL; 15500 goto out; 15501 } 15502 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 15503 &wq_create->u.response); 15504 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15505 pci_barset); 15506 if (!bar_memmap_p) { 15507 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15508 "3263 WQ[%d] failed to memmap " 15509 "pci barset:x%x\n", 15510 wq->queue_id, pci_barset); 15511 status = -ENOMEM; 15512 goto out; 15513 } 15514 db_offset = wq_create->u.response.doorbell_offset; 15515 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 15516 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 15517 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15518 "3252 WQ[%d] doorbell offset " 15519 "not supported: x%x\n", 15520 wq->queue_id, db_offset); 15521 status = -EINVAL; 15522 goto out; 15523 } 15524 wq->db_regaddr = bar_memmap_p + db_offset; 15525 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15526 "3264 WQ[%d]: barset:x%x, offset:x%x, " 15527 "format:x%x\n", wq->queue_id, 15528 pci_barset, db_offset, wq->db_format); 15529 } else 15530 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15531 } else { 15532 /* Check if DPP was honored by the firmware */ 15533 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, 15534 &wq_create->u.response_1); 15535 if (wq->dpp_enable) { 15536 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set, 15537 &wq_create->u.response_1); 15538 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15539 pci_barset); 15540 if (!bar_memmap_p) { 15541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15542 "3267 WQ[%d] failed to memmap " 15543 "pci barset:x%x\n", 15544 wq->queue_id, pci_barset); 15545 status = -ENOMEM; 15546 goto out; 15547 } 15548 db_offset = wq_create->u.response_1.doorbell_offset; 15549 wq->db_regaddr = bar_memmap_p + db_offset; 15550 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, 15551 &wq_create->u.response_1); 15552 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar, 15553 &wq_create->u.response_1); 15554 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15555 dpp_barset); 15556 if (!bar_memmap_p) { 15557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15558 "3268 WQ[%d] failed to memmap " 15559 "pci barset:x%x\n", 15560 wq->queue_id, dpp_barset); 15561 status = -ENOMEM; 15562 goto out; 15563 } 15564 dpp_offset = wq_create->u.response_1.dpp_offset; 15565 wq->dpp_regaddr = bar_memmap_p + dpp_offset; 15566 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15567 "3271 WQ[%d]: barset:x%x, offset:x%x, " 15568 "dpp_id:x%x dpp_barset:x%x " 15569 "dpp_offset:x%x\n", 15570 wq->queue_id, pci_barset, db_offset, 15571 wq->dpp_id, dpp_barset, dpp_offset); 15572 15573 /* Enable combined writes for DPP aperture */ 15574 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; 15575 #ifdef CONFIG_X86 15576 rc = set_memory_wc(pg_addr, 1); 15577 if (rc) { 15578 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15579 "3272 Cannot setup Combined " 15580 "Write on WQ[%d] - disable DPP\n", 15581 wq->queue_id); 15582 phba->cfg_enable_dpp = 0; 15583 } 15584 #else 15585 phba->cfg_enable_dpp = 0; 15586 #endif 15587 } else 15588 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15589 } 15590 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); 15591 if (wq->pring == NULL) { 15592 status = -ENOMEM; 15593 goto out; 15594 } 15595 wq->type = LPFC_WQ; 15596 wq->assoc_qid = cq->queue_id; 15597 wq->subtype = subtype; 15598 wq->host_index = 0; 15599 wq->hba_index = 0; 15600 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; 15601 15602 /* link the wq onto the parent cq child list */ 15603 list_add_tail(&wq->list, &cq->child_list); 15604 out: 15605 mempool_free(mbox, phba->mbox_mem_pool); 15606 return status; 15607 } 15608 15609 /** 15610 * lpfc_rq_create - Create a Receive Queue on the HBA 15611 * @phba: HBA structure that indicates port to create a queue on. 15612 * @hrq: The queue structure to use to create the header receive queue. 15613 * @drq: The queue structure to use to create the data receive queue. 15614 * @cq: The completion queue to bind this work queue to. 15615 * 15616 * This function creates a receive buffer queue pair , as detailed in @hrq and 15617 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15618 * to the HBA. 15619 * 15620 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15621 * struct is used to get the entry count that is necessary to determine the 15622 * number of pages to use for this queue. The @cq is used to indicate which 15623 * completion queue to bind received buffers that are posted to these queues to. 15624 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15625 * receive queue pair. This function is asynchronous and will wait for the 15626 * mailbox command to finish before continuing. 15627 * 15628 * On success this function will return a zero. If unable to allocate enough 15629 * memory this function will return -ENOMEM. If the queue create mailbox command 15630 * fails this function will return -ENXIO. 15631 **/ 15632 int 15633 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 15634 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 15635 { 15636 struct lpfc_mbx_rq_create *rq_create; 15637 struct lpfc_dmabuf *dmabuf; 15638 LPFC_MBOXQ_t *mbox; 15639 int rc, length, status = 0; 15640 uint32_t shdr_status, shdr_add_status; 15641 union lpfc_sli4_cfg_shdr *shdr; 15642 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15643 void __iomem *bar_memmap_p; 15644 uint32_t db_offset; 15645 uint16_t pci_barset; 15646 15647 /* sanity check on queue memory */ 15648 if (!hrq || !drq || !cq) 15649 return -ENODEV; 15650 if (!phba->sli4_hba.pc_sli4_params.supported) 15651 hw_page_size = SLI4_PAGE_SIZE; 15652 15653 if (hrq->entry_count != drq->entry_count) 15654 return -EINVAL; 15655 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15656 if (!mbox) 15657 return -ENOMEM; 15658 length = (sizeof(struct lpfc_mbx_rq_create) - 15659 sizeof(struct lpfc_sli4_cfg_mhdr)); 15660 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15661 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15662 length, LPFC_SLI4_MBX_EMBED); 15663 rq_create = &mbox->u.mqe.un.rq_create; 15664 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15665 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15666 phba->sli4_hba.pc_sli4_params.rqv); 15667 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15668 bf_set(lpfc_rq_context_rqe_count_1, 15669 &rq_create->u.request.context, 15670 hrq->entry_count); 15671 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 15672 bf_set(lpfc_rq_context_rqe_size, 15673 &rq_create->u.request.context, 15674 LPFC_RQE_SIZE_8); 15675 bf_set(lpfc_rq_context_page_size, 15676 &rq_create->u.request.context, 15677 LPFC_RQ_PAGE_SIZE_4096); 15678 } else { 15679 switch (hrq->entry_count) { 15680 default: 15681 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15682 "2535 Unsupported RQ count. (%d)\n", 15683 hrq->entry_count); 15684 if (hrq->entry_count < 512) { 15685 status = -EINVAL; 15686 goto out; 15687 } 15688 /* fall through - otherwise default to smallest count */ 15689 case 512: 15690 bf_set(lpfc_rq_context_rqe_count, 15691 &rq_create->u.request.context, 15692 LPFC_RQ_RING_SIZE_512); 15693 break; 15694 case 1024: 15695 bf_set(lpfc_rq_context_rqe_count, 15696 &rq_create->u.request.context, 15697 LPFC_RQ_RING_SIZE_1024); 15698 break; 15699 case 2048: 15700 bf_set(lpfc_rq_context_rqe_count, 15701 &rq_create->u.request.context, 15702 LPFC_RQ_RING_SIZE_2048); 15703 break; 15704 case 4096: 15705 bf_set(lpfc_rq_context_rqe_count, 15706 &rq_create->u.request.context, 15707 LPFC_RQ_RING_SIZE_4096); 15708 break; 15709 } 15710 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 15711 LPFC_HDR_BUF_SIZE); 15712 } 15713 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15714 cq->queue_id); 15715 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15716 hrq->page_count); 15717 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15718 memset(dmabuf->virt, 0, hw_page_size); 15719 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15720 putPaddrLow(dmabuf->phys); 15721 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15722 putPaddrHigh(dmabuf->phys); 15723 } 15724 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15725 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15726 15727 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15728 /* The IOCTL status is embedded in the mailbox subheader. */ 15729 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15730 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15731 if (shdr_status || shdr_add_status || rc) { 15732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15733 "2504 RQ_CREATE mailbox failed with " 15734 "status x%x add_status x%x, mbx status x%x\n", 15735 shdr_status, shdr_add_status, rc); 15736 status = -ENXIO; 15737 goto out; 15738 } 15739 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15740 if (hrq->queue_id == 0xFFFF) { 15741 status = -ENXIO; 15742 goto out; 15743 } 15744 15745 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15746 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 15747 &rq_create->u.response); 15748 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 15749 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 15750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15751 "3262 RQ [%d] doorbell format not " 15752 "supported: x%x\n", hrq->queue_id, 15753 hrq->db_format); 15754 status = -EINVAL; 15755 goto out; 15756 } 15757 15758 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 15759 &rq_create->u.response); 15760 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 15761 if (!bar_memmap_p) { 15762 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15763 "3269 RQ[%d] failed to memmap pci " 15764 "barset:x%x\n", hrq->queue_id, 15765 pci_barset); 15766 status = -ENOMEM; 15767 goto out; 15768 } 15769 15770 db_offset = rq_create->u.response.doorbell_offset; 15771 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 15772 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 15773 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15774 "3270 RQ[%d] doorbell offset not " 15775 "supported: x%x\n", hrq->queue_id, 15776 db_offset); 15777 status = -EINVAL; 15778 goto out; 15779 } 15780 hrq->db_regaddr = bar_memmap_p + db_offset; 15781 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15782 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 15783 "format:x%x\n", hrq->queue_id, pci_barset, 15784 db_offset, hrq->db_format); 15785 } else { 15786 hrq->db_format = LPFC_DB_RING_FORMAT; 15787 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15788 } 15789 hrq->type = LPFC_HRQ; 15790 hrq->assoc_qid = cq->queue_id; 15791 hrq->subtype = subtype; 15792 hrq->host_index = 0; 15793 hrq->hba_index = 0; 15794 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 15795 15796 /* now create the data queue */ 15797 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15798 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15799 length, LPFC_SLI4_MBX_EMBED); 15800 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15801 phba->sli4_hba.pc_sli4_params.rqv); 15802 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15803 bf_set(lpfc_rq_context_rqe_count_1, 15804 &rq_create->u.request.context, hrq->entry_count); 15805 if (subtype == LPFC_NVMET) 15806 rq_create->u.request.context.buffer_size = 15807 LPFC_NVMET_DATA_BUF_SIZE; 15808 else 15809 rq_create->u.request.context.buffer_size = 15810 LPFC_DATA_BUF_SIZE; 15811 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 15812 LPFC_RQE_SIZE_8); 15813 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 15814 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15815 } else { 15816 switch (drq->entry_count) { 15817 default: 15818 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15819 "2536 Unsupported RQ count. (%d)\n", 15820 drq->entry_count); 15821 if (drq->entry_count < 512) { 15822 status = -EINVAL; 15823 goto out; 15824 } 15825 /* fall through - otherwise default to smallest count */ 15826 case 512: 15827 bf_set(lpfc_rq_context_rqe_count, 15828 &rq_create->u.request.context, 15829 LPFC_RQ_RING_SIZE_512); 15830 break; 15831 case 1024: 15832 bf_set(lpfc_rq_context_rqe_count, 15833 &rq_create->u.request.context, 15834 LPFC_RQ_RING_SIZE_1024); 15835 break; 15836 case 2048: 15837 bf_set(lpfc_rq_context_rqe_count, 15838 &rq_create->u.request.context, 15839 LPFC_RQ_RING_SIZE_2048); 15840 break; 15841 case 4096: 15842 bf_set(lpfc_rq_context_rqe_count, 15843 &rq_create->u.request.context, 15844 LPFC_RQ_RING_SIZE_4096); 15845 break; 15846 } 15847 if (subtype == LPFC_NVMET) 15848 bf_set(lpfc_rq_context_buf_size, 15849 &rq_create->u.request.context, 15850 LPFC_NVMET_DATA_BUF_SIZE); 15851 else 15852 bf_set(lpfc_rq_context_buf_size, 15853 &rq_create->u.request.context, 15854 LPFC_DATA_BUF_SIZE); 15855 } 15856 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15857 cq->queue_id); 15858 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15859 drq->page_count); 15860 list_for_each_entry(dmabuf, &drq->page_list, list) { 15861 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15862 putPaddrLow(dmabuf->phys); 15863 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15864 putPaddrHigh(dmabuf->phys); 15865 } 15866 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15867 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15869 /* The IOCTL status is embedded in the mailbox subheader. */ 15870 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15871 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15872 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15873 if (shdr_status || shdr_add_status || rc) { 15874 status = -ENXIO; 15875 goto out; 15876 } 15877 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15878 if (drq->queue_id == 0xFFFF) { 15879 status = -ENXIO; 15880 goto out; 15881 } 15882 drq->type = LPFC_DRQ; 15883 drq->assoc_qid = cq->queue_id; 15884 drq->subtype = subtype; 15885 drq->host_index = 0; 15886 drq->hba_index = 0; 15887 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 15888 15889 /* link the header and data RQs onto the parent cq child list */ 15890 list_add_tail(&hrq->list, &cq->child_list); 15891 list_add_tail(&drq->list, &cq->child_list); 15892 15893 out: 15894 mempool_free(mbox, phba->mbox_mem_pool); 15895 return status; 15896 } 15897 15898 /** 15899 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA 15900 * @phba: HBA structure that indicates port to create a queue on. 15901 * @hrqp: The queue structure array to use to create the header receive queues. 15902 * @drqp: The queue structure array to use to create the data receive queues. 15903 * @cqp: The completion queue array to bind these receive queues to. 15904 * 15905 * This function creates a receive buffer queue pair , as detailed in @hrq and 15906 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15907 * to the HBA. 15908 * 15909 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15910 * struct is used to get the entry count that is necessary to determine the 15911 * number of pages to use for this queue. The @cq is used to indicate which 15912 * completion queue to bind received buffers that are posted to these queues to. 15913 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15914 * receive queue pair. This function is asynchronous and will wait for the 15915 * mailbox command to finish before continuing. 15916 * 15917 * On success this function will return a zero. If unable to allocate enough 15918 * memory this function will return -ENOMEM. If the queue create mailbox command 15919 * fails this function will return -ENXIO. 15920 **/ 15921 int 15922 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 15923 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 15924 uint32_t subtype) 15925 { 15926 struct lpfc_queue *hrq, *drq, *cq; 15927 struct lpfc_mbx_rq_create_v2 *rq_create; 15928 struct lpfc_dmabuf *dmabuf; 15929 LPFC_MBOXQ_t *mbox; 15930 int rc, length, alloclen, status = 0; 15931 int cnt, idx, numrq, page_idx = 0; 15932 uint32_t shdr_status, shdr_add_status; 15933 union lpfc_sli4_cfg_shdr *shdr; 15934 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15935 15936 numrq = phba->cfg_nvmet_mrq; 15937 /* sanity check on array memory */ 15938 if (!hrqp || !drqp || !cqp || !numrq) 15939 return -ENODEV; 15940 if (!phba->sli4_hba.pc_sli4_params.supported) 15941 hw_page_size = SLI4_PAGE_SIZE; 15942 15943 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15944 if (!mbox) 15945 return -ENOMEM; 15946 15947 length = sizeof(struct lpfc_mbx_rq_create_v2); 15948 length += ((2 * numrq * hrqp[0]->page_count) * 15949 sizeof(struct dma_address)); 15950 15951 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15952 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, 15953 LPFC_SLI4_MBX_NEMBED); 15954 if (alloclen < length) { 15955 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15956 "3099 Allocated DMA memory size (%d) is " 15957 "less than the requested DMA memory size " 15958 "(%d)\n", alloclen, length); 15959 status = -ENOMEM; 15960 goto out; 15961 } 15962 15963 15964 15965 rq_create = mbox->sge_array->addr[0]; 15966 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; 15967 15968 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); 15969 cnt = 0; 15970 15971 for (idx = 0; idx < numrq; idx++) { 15972 hrq = hrqp[idx]; 15973 drq = drqp[idx]; 15974 cq = cqp[idx]; 15975 15976 /* sanity check on queue memory */ 15977 if (!hrq || !drq || !cq) { 15978 status = -ENODEV; 15979 goto out; 15980 } 15981 15982 if (hrq->entry_count != drq->entry_count) { 15983 status = -EINVAL; 15984 goto out; 15985 } 15986 15987 if (idx == 0) { 15988 bf_set(lpfc_mbx_rq_create_num_pages, 15989 &rq_create->u.request, 15990 hrq->page_count); 15991 bf_set(lpfc_mbx_rq_create_rq_cnt, 15992 &rq_create->u.request, (numrq * 2)); 15993 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, 15994 1); 15995 bf_set(lpfc_rq_context_base_cq, 15996 &rq_create->u.request.context, 15997 cq->queue_id); 15998 bf_set(lpfc_rq_context_data_size, 15999 &rq_create->u.request.context, 16000 LPFC_NVMET_DATA_BUF_SIZE); 16001 bf_set(lpfc_rq_context_hdr_size, 16002 &rq_create->u.request.context, 16003 LPFC_HDR_BUF_SIZE); 16004 bf_set(lpfc_rq_context_rqe_count_1, 16005 &rq_create->u.request.context, 16006 hrq->entry_count); 16007 bf_set(lpfc_rq_context_rqe_size, 16008 &rq_create->u.request.context, 16009 LPFC_RQE_SIZE_8); 16010 bf_set(lpfc_rq_context_page_size, 16011 &rq_create->u.request.context, 16012 (PAGE_SIZE/SLI4_PAGE_SIZE)); 16013 } 16014 rc = 0; 16015 list_for_each_entry(dmabuf, &hrq->page_list, list) { 16016 memset(dmabuf->virt, 0, hw_page_size); 16017 cnt = page_idx + dmabuf->buffer_tag; 16018 rq_create->u.request.page[cnt].addr_lo = 16019 putPaddrLow(dmabuf->phys); 16020 rq_create->u.request.page[cnt].addr_hi = 16021 putPaddrHigh(dmabuf->phys); 16022 rc++; 16023 } 16024 page_idx += rc; 16025 16026 rc = 0; 16027 list_for_each_entry(dmabuf, &drq->page_list, list) { 16028 memset(dmabuf->virt, 0, hw_page_size); 16029 cnt = page_idx + dmabuf->buffer_tag; 16030 rq_create->u.request.page[cnt].addr_lo = 16031 putPaddrLow(dmabuf->phys); 16032 rq_create->u.request.page[cnt].addr_hi = 16033 putPaddrHigh(dmabuf->phys); 16034 rc++; 16035 } 16036 page_idx += rc; 16037 16038 hrq->db_format = LPFC_DB_RING_FORMAT; 16039 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16040 hrq->type = LPFC_HRQ; 16041 hrq->assoc_qid = cq->queue_id; 16042 hrq->subtype = subtype; 16043 hrq->host_index = 0; 16044 hrq->hba_index = 0; 16045 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16046 16047 drq->db_format = LPFC_DB_RING_FORMAT; 16048 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16049 drq->type = LPFC_DRQ; 16050 drq->assoc_qid = cq->queue_id; 16051 drq->subtype = subtype; 16052 drq->host_index = 0; 16053 drq->hba_index = 0; 16054 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16055 16056 list_add_tail(&hrq->list, &cq->child_list); 16057 list_add_tail(&drq->list, &cq->child_list); 16058 } 16059 16060 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16061 /* The IOCTL status is embedded in the mailbox subheader. */ 16062 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16063 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16064 if (shdr_status || shdr_add_status || rc) { 16065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16066 "3120 RQ_CREATE mailbox failed with " 16067 "status x%x add_status x%x, mbx status x%x\n", 16068 shdr_status, shdr_add_status, rc); 16069 status = -ENXIO; 16070 goto out; 16071 } 16072 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 16073 if (rc == 0xFFFF) { 16074 status = -ENXIO; 16075 goto out; 16076 } 16077 16078 /* Initialize all RQs with associated queue id */ 16079 for (idx = 0; idx < numrq; idx++) { 16080 hrq = hrqp[idx]; 16081 hrq->queue_id = rc + (2 * idx); 16082 drq = drqp[idx]; 16083 drq->queue_id = rc + (2 * idx) + 1; 16084 } 16085 16086 out: 16087 lpfc_sli4_mbox_cmd_free(phba, mbox); 16088 return status; 16089 } 16090 16091 /** 16092 * lpfc_eq_destroy - Destroy an event Queue on the HBA 16093 * @eq: The queue structure associated with the queue to destroy. 16094 * 16095 * This function destroys a queue, as detailed in @eq by sending an mailbox 16096 * command, specific to the type of queue, to the HBA. 16097 * 16098 * The @eq struct is used to get the queue ID of the queue to destroy. 16099 * 16100 * On success this function will return a zero. If the queue destroy mailbox 16101 * command fails this function will return -ENXIO. 16102 **/ 16103 int 16104 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 16105 { 16106 LPFC_MBOXQ_t *mbox; 16107 int rc, length, status = 0; 16108 uint32_t shdr_status, shdr_add_status; 16109 union lpfc_sli4_cfg_shdr *shdr; 16110 16111 /* sanity check on queue memory */ 16112 if (!eq) 16113 return -ENODEV; 16114 16115 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 16116 if (!mbox) 16117 return -ENOMEM; 16118 length = (sizeof(struct lpfc_mbx_eq_destroy) - 16119 sizeof(struct lpfc_sli4_cfg_mhdr)); 16120 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16121 LPFC_MBOX_OPCODE_EQ_DESTROY, 16122 length, LPFC_SLI4_MBX_EMBED); 16123 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 16124 eq->queue_id); 16125 mbox->vport = eq->phba->pport; 16126 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16127 16128 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 16129 /* The IOCTL status is embedded in the mailbox subheader. */ 16130 shdr = (union lpfc_sli4_cfg_shdr *) 16131 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 16132 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16133 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16134 if (shdr_status || shdr_add_status || rc) { 16135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16136 "2505 EQ_DESTROY mailbox failed with " 16137 "status x%x add_status x%x, mbx status x%x\n", 16138 shdr_status, shdr_add_status, rc); 16139 status = -ENXIO; 16140 } 16141 16142 /* Remove eq from any list */ 16143 list_del_init(&eq->list); 16144 mempool_free(mbox, eq->phba->mbox_mem_pool); 16145 return status; 16146 } 16147 16148 /** 16149 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 16150 * @cq: The queue structure associated with the queue to destroy. 16151 * 16152 * This function destroys a queue, as detailed in @cq by sending an mailbox 16153 * command, specific to the type of queue, to the HBA. 16154 * 16155 * The @cq struct is used to get the queue ID of the queue to destroy. 16156 * 16157 * On success this function will return a zero. If the queue destroy mailbox 16158 * command fails this function will return -ENXIO. 16159 **/ 16160 int 16161 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 16162 { 16163 LPFC_MBOXQ_t *mbox; 16164 int rc, length, status = 0; 16165 uint32_t shdr_status, shdr_add_status; 16166 union lpfc_sli4_cfg_shdr *shdr; 16167 16168 /* sanity check on queue memory */ 16169 if (!cq) 16170 return -ENODEV; 16171 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 16172 if (!mbox) 16173 return -ENOMEM; 16174 length = (sizeof(struct lpfc_mbx_cq_destroy) - 16175 sizeof(struct lpfc_sli4_cfg_mhdr)); 16176 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16177 LPFC_MBOX_OPCODE_CQ_DESTROY, 16178 length, LPFC_SLI4_MBX_EMBED); 16179 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 16180 cq->queue_id); 16181 mbox->vport = cq->phba->pport; 16182 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16183 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 16184 /* The IOCTL status is embedded in the mailbox subheader. */ 16185 shdr = (union lpfc_sli4_cfg_shdr *) 16186 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 16187 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16188 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16189 if (shdr_status || shdr_add_status || rc) { 16190 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16191 "2506 CQ_DESTROY mailbox failed with " 16192 "status x%x add_status x%x, mbx status x%x\n", 16193 shdr_status, shdr_add_status, rc); 16194 status = -ENXIO; 16195 } 16196 /* Remove cq from any list */ 16197 list_del_init(&cq->list); 16198 mempool_free(mbox, cq->phba->mbox_mem_pool); 16199 return status; 16200 } 16201 16202 /** 16203 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 16204 * @qm: The queue structure associated with the queue to destroy. 16205 * 16206 * This function destroys a queue, as detailed in @mq by sending an mailbox 16207 * command, specific to the type of queue, to the HBA. 16208 * 16209 * The @mq struct is used to get the queue ID of the queue to destroy. 16210 * 16211 * On success this function will return a zero. If the queue destroy mailbox 16212 * command fails this function will return -ENXIO. 16213 **/ 16214 int 16215 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 16216 { 16217 LPFC_MBOXQ_t *mbox; 16218 int rc, length, status = 0; 16219 uint32_t shdr_status, shdr_add_status; 16220 union lpfc_sli4_cfg_shdr *shdr; 16221 16222 /* sanity check on queue memory */ 16223 if (!mq) 16224 return -ENODEV; 16225 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 16226 if (!mbox) 16227 return -ENOMEM; 16228 length = (sizeof(struct lpfc_mbx_mq_destroy) - 16229 sizeof(struct lpfc_sli4_cfg_mhdr)); 16230 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16231 LPFC_MBOX_OPCODE_MQ_DESTROY, 16232 length, LPFC_SLI4_MBX_EMBED); 16233 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 16234 mq->queue_id); 16235 mbox->vport = mq->phba->pport; 16236 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16237 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 16238 /* The IOCTL status is embedded in the mailbox subheader. */ 16239 shdr = (union lpfc_sli4_cfg_shdr *) 16240 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 16241 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16242 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16243 if (shdr_status || shdr_add_status || rc) { 16244 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16245 "2507 MQ_DESTROY mailbox failed with " 16246 "status x%x add_status x%x, mbx status x%x\n", 16247 shdr_status, shdr_add_status, rc); 16248 status = -ENXIO; 16249 } 16250 /* Remove mq from any list */ 16251 list_del_init(&mq->list); 16252 mempool_free(mbox, mq->phba->mbox_mem_pool); 16253 return status; 16254 } 16255 16256 /** 16257 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 16258 * @wq: The queue structure associated with the queue to destroy. 16259 * 16260 * This function destroys a queue, as detailed in @wq by sending an mailbox 16261 * command, specific to the type of queue, to the HBA. 16262 * 16263 * The @wq struct is used to get the queue ID of the queue to destroy. 16264 * 16265 * On success this function will return a zero. If the queue destroy mailbox 16266 * command fails this function will return -ENXIO. 16267 **/ 16268 int 16269 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 16270 { 16271 LPFC_MBOXQ_t *mbox; 16272 int rc, length, status = 0; 16273 uint32_t shdr_status, shdr_add_status; 16274 union lpfc_sli4_cfg_shdr *shdr; 16275 16276 /* sanity check on queue memory */ 16277 if (!wq) 16278 return -ENODEV; 16279 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 16280 if (!mbox) 16281 return -ENOMEM; 16282 length = (sizeof(struct lpfc_mbx_wq_destroy) - 16283 sizeof(struct lpfc_sli4_cfg_mhdr)); 16284 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16285 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 16286 length, LPFC_SLI4_MBX_EMBED); 16287 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 16288 wq->queue_id); 16289 mbox->vport = wq->phba->pport; 16290 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16291 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 16292 shdr = (union lpfc_sli4_cfg_shdr *) 16293 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 16294 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16295 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16296 if (shdr_status || shdr_add_status || rc) { 16297 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16298 "2508 WQ_DESTROY mailbox failed with " 16299 "status x%x add_status x%x, mbx status x%x\n", 16300 shdr_status, shdr_add_status, rc); 16301 status = -ENXIO; 16302 } 16303 /* Remove wq from any list */ 16304 list_del_init(&wq->list); 16305 kfree(wq->pring); 16306 wq->pring = NULL; 16307 mempool_free(mbox, wq->phba->mbox_mem_pool); 16308 return status; 16309 } 16310 16311 /** 16312 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 16313 * @rq: The queue structure associated with the queue to destroy. 16314 * 16315 * This function destroys a queue, as detailed in @rq by sending an mailbox 16316 * command, specific to the type of queue, to the HBA. 16317 * 16318 * The @rq struct is used to get the queue ID of the queue to destroy. 16319 * 16320 * On success this function will return a zero. If the queue destroy mailbox 16321 * command fails this function will return -ENXIO. 16322 **/ 16323 int 16324 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 16325 struct lpfc_queue *drq) 16326 { 16327 LPFC_MBOXQ_t *mbox; 16328 int rc, length, status = 0; 16329 uint32_t shdr_status, shdr_add_status; 16330 union lpfc_sli4_cfg_shdr *shdr; 16331 16332 /* sanity check on queue memory */ 16333 if (!hrq || !drq) 16334 return -ENODEV; 16335 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 16336 if (!mbox) 16337 return -ENOMEM; 16338 length = (sizeof(struct lpfc_mbx_rq_destroy) - 16339 sizeof(struct lpfc_sli4_cfg_mhdr)); 16340 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16341 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 16342 length, LPFC_SLI4_MBX_EMBED); 16343 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16344 hrq->queue_id); 16345 mbox->vport = hrq->phba->pport; 16346 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16347 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 16348 /* The IOCTL status is embedded in the mailbox subheader. */ 16349 shdr = (union lpfc_sli4_cfg_shdr *) 16350 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16351 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16352 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16353 if (shdr_status || shdr_add_status || rc) { 16354 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16355 "2509 RQ_DESTROY mailbox failed with " 16356 "status x%x add_status x%x, mbx status x%x\n", 16357 shdr_status, shdr_add_status, rc); 16358 if (rc != MBX_TIMEOUT) 16359 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16360 return -ENXIO; 16361 } 16362 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16363 drq->queue_id); 16364 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 16365 shdr = (union lpfc_sli4_cfg_shdr *) 16366 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16367 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16368 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16369 if (shdr_status || shdr_add_status || rc) { 16370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16371 "2510 RQ_DESTROY mailbox failed with " 16372 "status x%x add_status x%x, mbx status x%x\n", 16373 shdr_status, shdr_add_status, rc); 16374 status = -ENXIO; 16375 } 16376 list_del_init(&hrq->list); 16377 list_del_init(&drq->list); 16378 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16379 return status; 16380 } 16381 16382 /** 16383 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 16384 * @phba: The virtual port for which this call being executed. 16385 * @pdma_phys_addr0: Physical address of the 1st SGL page. 16386 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 16387 * @xritag: the xritag that ties this io to the SGL pages. 16388 * 16389 * This routine will post the sgl pages for the IO that has the xritag 16390 * that is in the iocbq structure. The xritag is assigned during iocbq 16391 * creation and persists for as long as the driver is loaded. 16392 * if the caller has fewer than 256 scatter gather segments to map then 16393 * pdma_phys_addr1 should be 0. 16394 * If the caller needs to map more than 256 scatter gather segment then 16395 * pdma_phys_addr1 should be a valid physical address. 16396 * physical address for SGLs must be 64 byte aligned. 16397 * If you are going to map 2 SGL's then the first one must have 256 entries 16398 * the second sgl can have between 1 and 256 entries. 16399 * 16400 * Return codes: 16401 * 0 - Success 16402 * -ENXIO, -ENOMEM - Failure 16403 **/ 16404 int 16405 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 16406 dma_addr_t pdma_phys_addr0, 16407 dma_addr_t pdma_phys_addr1, 16408 uint16_t xritag) 16409 { 16410 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 16411 LPFC_MBOXQ_t *mbox; 16412 int rc; 16413 uint32_t shdr_status, shdr_add_status; 16414 uint32_t mbox_tmo; 16415 union lpfc_sli4_cfg_shdr *shdr; 16416 16417 if (xritag == NO_XRI) { 16418 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16419 "0364 Invalid param:\n"); 16420 return -EINVAL; 16421 } 16422 16423 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16424 if (!mbox) 16425 return -ENOMEM; 16426 16427 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16428 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16429 sizeof(struct lpfc_mbx_post_sgl_pages) - 16430 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 16431 16432 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 16433 &mbox->u.mqe.un.post_sgl_pages; 16434 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 16435 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 16436 16437 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 16438 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 16439 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 16440 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 16441 16442 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 16443 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 16444 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 16445 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 16446 if (!phba->sli4_hba.intr_enable) 16447 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16448 else { 16449 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16450 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16451 } 16452 /* The IOCTL status is embedded in the mailbox subheader. */ 16453 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 16454 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16455 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16456 if (rc != MBX_TIMEOUT) 16457 mempool_free(mbox, phba->mbox_mem_pool); 16458 if (shdr_status || shdr_add_status || rc) { 16459 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16460 "2511 POST_SGL mailbox failed with " 16461 "status x%x add_status x%x, mbx status x%x\n", 16462 shdr_status, shdr_add_status, rc); 16463 } 16464 return 0; 16465 } 16466 16467 /** 16468 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 16469 * @phba: pointer to lpfc hba data structure. 16470 * 16471 * This routine is invoked to post rpi header templates to the 16472 * HBA consistent with the SLI-4 interface spec. This routine 16473 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 16474 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 16475 * 16476 * Returns 16477 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 16478 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 16479 **/ 16480 static uint16_t 16481 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 16482 { 16483 unsigned long xri; 16484 16485 /* 16486 * Fetch the next logical xri. Because this index is logical, 16487 * the driver starts at 0 each time. 16488 */ 16489 spin_lock_irq(&phba->hbalock); 16490 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 16491 phba->sli4_hba.max_cfg_param.max_xri, 0); 16492 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 16493 spin_unlock_irq(&phba->hbalock); 16494 return NO_XRI; 16495 } else { 16496 set_bit(xri, phba->sli4_hba.xri_bmask); 16497 phba->sli4_hba.max_cfg_param.xri_used++; 16498 } 16499 spin_unlock_irq(&phba->hbalock); 16500 return xri; 16501 } 16502 16503 /** 16504 * lpfc_sli4_free_xri - Release an xri for reuse. 16505 * @phba: pointer to lpfc hba data structure. 16506 * 16507 * This routine is invoked to release an xri to the pool of 16508 * available rpis maintained by the driver. 16509 **/ 16510 static void 16511 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16512 { 16513 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 16514 phba->sli4_hba.max_cfg_param.xri_used--; 16515 } 16516 } 16517 16518 /** 16519 * lpfc_sli4_free_xri - Release an xri for reuse. 16520 * @phba: pointer to lpfc hba data structure. 16521 * 16522 * This routine is invoked to release an xri to the pool of 16523 * available rpis maintained by the driver. 16524 **/ 16525 void 16526 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16527 { 16528 spin_lock_irq(&phba->hbalock); 16529 __lpfc_sli4_free_xri(phba, xri); 16530 spin_unlock_irq(&phba->hbalock); 16531 } 16532 16533 /** 16534 * lpfc_sli4_next_xritag - Get an xritag for the io 16535 * @phba: Pointer to HBA context object. 16536 * 16537 * This function gets an xritag for the iocb. If there is no unused xritag 16538 * it will return 0xffff. 16539 * The function returns the allocated xritag if successful, else returns zero. 16540 * Zero is not a valid xritag. 16541 * The caller is not required to hold any lock. 16542 **/ 16543 uint16_t 16544 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 16545 { 16546 uint16_t xri_index; 16547 16548 xri_index = lpfc_sli4_alloc_xri(phba); 16549 if (xri_index == NO_XRI) 16550 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 16551 "2004 Failed to allocate XRI.last XRITAG is %d" 16552 " Max XRI is %d, Used XRI is %d\n", 16553 xri_index, 16554 phba->sli4_hba.max_cfg_param.max_xri, 16555 phba->sli4_hba.max_cfg_param.xri_used); 16556 return xri_index; 16557 } 16558 16559 /** 16560 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. 16561 * @phba: pointer to lpfc hba data structure. 16562 * @post_sgl_list: pointer to els sgl entry list. 16563 * @count: number of els sgl entries on the list. 16564 * 16565 * This routine is invoked to post a block of driver's sgl pages to the 16566 * HBA using non-embedded mailbox command. No Lock is held. This routine 16567 * is only called when the driver is loading and after all IO has been 16568 * stopped. 16569 **/ 16570 static int 16571 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, 16572 struct list_head *post_sgl_list, 16573 int post_cnt) 16574 { 16575 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 16576 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16577 struct sgl_page_pairs *sgl_pg_pairs; 16578 void *viraddr; 16579 LPFC_MBOXQ_t *mbox; 16580 uint32_t reqlen, alloclen, pg_pairs; 16581 uint32_t mbox_tmo; 16582 uint16_t xritag_start = 0; 16583 int rc = 0; 16584 uint32_t shdr_status, shdr_add_status; 16585 union lpfc_sli4_cfg_shdr *shdr; 16586 16587 reqlen = post_cnt * sizeof(struct sgl_page_pairs) + 16588 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16589 if (reqlen > SLI4_PAGE_SIZE) { 16590 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16591 "2559 Block sgl registration required DMA " 16592 "size (%d) great than a page\n", reqlen); 16593 return -ENOMEM; 16594 } 16595 16596 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16597 if (!mbox) 16598 return -ENOMEM; 16599 16600 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16601 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16602 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 16603 LPFC_SLI4_MBX_NEMBED); 16604 16605 if (alloclen < reqlen) { 16606 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16607 "0285 Allocated DMA memory size (%d) is " 16608 "less than the requested DMA memory " 16609 "size (%d)\n", alloclen, reqlen); 16610 lpfc_sli4_mbox_cmd_free(phba, mbox); 16611 return -ENOMEM; 16612 } 16613 /* Set up the SGL pages in the non-embedded DMA pages */ 16614 viraddr = mbox->sge_array->addr[0]; 16615 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16616 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16617 16618 pg_pairs = 0; 16619 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 16620 /* Set up the sge entry */ 16621 sgl_pg_pairs->sgl_pg0_addr_lo = 16622 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 16623 sgl_pg_pairs->sgl_pg0_addr_hi = 16624 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 16625 sgl_pg_pairs->sgl_pg1_addr_lo = 16626 cpu_to_le32(putPaddrLow(0)); 16627 sgl_pg_pairs->sgl_pg1_addr_hi = 16628 cpu_to_le32(putPaddrHigh(0)); 16629 16630 /* Keep the first xritag on the list */ 16631 if (pg_pairs == 0) 16632 xritag_start = sglq_entry->sli4_xritag; 16633 sgl_pg_pairs++; 16634 pg_pairs++; 16635 } 16636 16637 /* Complete initialization and perform endian conversion. */ 16638 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16639 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); 16640 sgl->word0 = cpu_to_le32(sgl->word0); 16641 16642 if (!phba->sli4_hba.intr_enable) 16643 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16644 else { 16645 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16646 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16647 } 16648 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16649 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16650 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16651 if (rc != MBX_TIMEOUT) 16652 lpfc_sli4_mbox_cmd_free(phba, mbox); 16653 if (shdr_status || shdr_add_status || rc) { 16654 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16655 "2513 POST_SGL_BLOCK mailbox command failed " 16656 "status x%x add_status x%x mbx status x%x\n", 16657 shdr_status, shdr_add_status, rc); 16658 rc = -ENXIO; 16659 } 16660 return rc; 16661 } 16662 16663 /** 16664 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware 16665 * @phba: pointer to lpfc hba data structure. 16666 * @nblist: pointer to nvme buffer list. 16667 * @count: number of scsi buffers on the list. 16668 * 16669 * This routine is invoked to post a block of @count scsi sgl pages from a 16670 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. 16671 * No Lock is held. 16672 * 16673 **/ 16674 static int 16675 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, 16676 int count) 16677 { 16678 struct lpfc_io_buf *lpfc_ncmd; 16679 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16680 struct sgl_page_pairs *sgl_pg_pairs; 16681 void *viraddr; 16682 LPFC_MBOXQ_t *mbox; 16683 uint32_t reqlen, alloclen, pg_pairs; 16684 uint32_t mbox_tmo; 16685 uint16_t xritag_start = 0; 16686 int rc = 0; 16687 uint32_t shdr_status, shdr_add_status; 16688 dma_addr_t pdma_phys_bpl1; 16689 union lpfc_sli4_cfg_shdr *shdr; 16690 16691 /* Calculate the requested length of the dma memory */ 16692 reqlen = count * sizeof(struct sgl_page_pairs) + 16693 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16694 if (reqlen > SLI4_PAGE_SIZE) { 16695 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 16696 "6118 Block sgl registration required DMA " 16697 "size (%d) great than a page\n", reqlen); 16698 return -ENOMEM; 16699 } 16700 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16701 if (!mbox) { 16702 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16703 "6119 Failed to allocate mbox cmd memory\n"); 16704 return -ENOMEM; 16705 } 16706 16707 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16708 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16709 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16710 reqlen, LPFC_SLI4_MBX_NEMBED); 16711 16712 if (alloclen < reqlen) { 16713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16714 "6120 Allocated DMA memory size (%d) is " 16715 "less than the requested DMA memory " 16716 "size (%d)\n", alloclen, reqlen); 16717 lpfc_sli4_mbox_cmd_free(phba, mbox); 16718 return -ENOMEM; 16719 } 16720 16721 /* Get the first SGE entry from the non-embedded DMA memory */ 16722 viraddr = mbox->sge_array->addr[0]; 16723 16724 /* Set up the SGL pages in the non-embedded DMA pages */ 16725 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16726 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16727 16728 pg_pairs = 0; 16729 list_for_each_entry(lpfc_ncmd, nblist, list) { 16730 /* Set up the sge entry */ 16731 sgl_pg_pairs->sgl_pg0_addr_lo = 16732 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); 16733 sgl_pg_pairs->sgl_pg0_addr_hi = 16734 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); 16735 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 16736 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + 16737 SGL_PAGE_SIZE; 16738 else 16739 pdma_phys_bpl1 = 0; 16740 sgl_pg_pairs->sgl_pg1_addr_lo = 16741 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 16742 sgl_pg_pairs->sgl_pg1_addr_hi = 16743 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 16744 /* Keep the first xritag on the list */ 16745 if (pg_pairs == 0) 16746 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; 16747 sgl_pg_pairs++; 16748 pg_pairs++; 16749 } 16750 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16751 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 16752 /* Perform endian conversion if necessary */ 16753 sgl->word0 = cpu_to_le32(sgl->word0); 16754 16755 if (!phba->sli4_hba.intr_enable) { 16756 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16757 } else { 16758 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16759 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16760 } 16761 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; 16762 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16763 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16764 if (rc != MBX_TIMEOUT) 16765 lpfc_sli4_mbox_cmd_free(phba, mbox); 16766 if (shdr_status || shdr_add_status || rc) { 16767 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16768 "6125 POST_SGL_BLOCK mailbox command failed " 16769 "status x%x add_status x%x mbx status x%x\n", 16770 shdr_status, shdr_add_status, rc); 16771 rc = -ENXIO; 16772 } 16773 return rc; 16774 } 16775 16776 /** 16777 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list 16778 * @phba: pointer to lpfc hba data structure. 16779 * @post_nblist: pointer to the nvme buffer list. 16780 * 16781 * This routine walks a list of nvme buffers that was passed in. It attempts 16782 * to construct blocks of nvme buffer sgls which contains contiguous xris and 16783 * uses the non-embedded SGL block post mailbox commands to post to the port. 16784 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use 16785 * embedded SGL post mailbox command for posting. The @post_nblist passed in 16786 * must be local list, thus no lock is needed when manipulate the list. 16787 * 16788 * Returns: 0 = failure, non-zero number of successfully posted buffers. 16789 **/ 16790 int 16791 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, 16792 struct list_head *post_nblist, int sb_count) 16793 { 16794 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 16795 int status, sgl_size; 16796 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; 16797 dma_addr_t pdma_phys_sgl1; 16798 int last_xritag = NO_XRI; 16799 int cur_xritag; 16800 LIST_HEAD(prep_nblist); 16801 LIST_HEAD(blck_nblist); 16802 LIST_HEAD(nvme_nblist); 16803 16804 /* sanity check */ 16805 if (sb_count <= 0) 16806 return -EINVAL; 16807 16808 sgl_size = phba->cfg_sg_dma_buf_size; 16809 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { 16810 list_del_init(&lpfc_ncmd->list); 16811 block_cnt++; 16812 if ((last_xritag != NO_XRI) && 16813 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { 16814 /* a hole in xri block, form a sgl posting block */ 16815 list_splice_init(&prep_nblist, &blck_nblist); 16816 post_cnt = block_cnt - 1; 16817 /* prepare list for next posting block */ 16818 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 16819 block_cnt = 1; 16820 } else { 16821 /* prepare list for next posting block */ 16822 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 16823 /* enough sgls for non-embed sgl mbox command */ 16824 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 16825 list_splice_init(&prep_nblist, &blck_nblist); 16826 post_cnt = block_cnt; 16827 block_cnt = 0; 16828 } 16829 } 16830 num_posting++; 16831 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 16832 16833 /* end of repost sgl list condition for NVME buffers */ 16834 if (num_posting == sb_count) { 16835 if (post_cnt == 0) { 16836 /* last sgl posting block */ 16837 list_splice_init(&prep_nblist, &blck_nblist); 16838 post_cnt = block_cnt; 16839 } else if (block_cnt == 1) { 16840 /* last single sgl with non-contiguous xri */ 16841 if (sgl_size > SGL_PAGE_SIZE) 16842 pdma_phys_sgl1 = 16843 lpfc_ncmd->dma_phys_sgl + 16844 SGL_PAGE_SIZE; 16845 else 16846 pdma_phys_sgl1 = 0; 16847 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 16848 status = lpfc_sli4_post_sgl( 16849 phba, lpfc_ncmd->dma_phys_sgl, 16850 pdma_phys_sgl1, cur_xritag); 16851 if (status) { 16852 /* Post error. Buffer unavailable. */ 16853 lpfc_ncmd->flags |= 16854 LPFC_SBUF_NOT_POSTED; 16855 } else { 16856 /* Post success. Bffer available. */ 16857 lpfc_ncmd->flags &= 16858 ~LPFC_SBUF_NOT_POSTED; 16859 lpfc_ncmd->status = IOSTAT_SUCCESS; 16860 num_posted++; 16861 } 16862 /* success, put on NVME buffer sgl list */ 16863 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 16864 } 16865 } 16866 16867 /* continue until a nembed page worth of sgls */ 16868 if (post_cnt == 0) 16869 continue; 16870 16871 /* post block of NVME buffer list sgls */ 16872 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist, 16873 post_cnt); 16874 16875 /* don't reset xirtag due to hole in xri block */ 16876 if (block_cnt == 0) 16877 last_xritag = NO_XRI; 16878 16879 /* reset NVME buffer post count for next round of posting */ 16880 post_cnt = 0; 16881 16882 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ 16883 while (!list_empty(&blck_nblist)) { 16884 list_remove_head(&blck_nblist, lpfc_ncmd, 16885 struct lpfc_io_buf, list); 16886 if (status) { 16887 /* Post error. Mark buffer unavailable. */ 16888 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; 16889 } else { 16890 /* Post success, Mark buffer available. */ 16891 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; 16892 lpfc_ncmd->status = IOSTAT_SUCCESS; 16893 num_posted++; 16894 } 16895 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 16896 } 16897 } 16898 /* Push NVME buffers with sgl posted to the available list */ 16899 lpfc_io_buf_replenish(phba, &nvme_nblist); 16900 16901 return num_posted; 16902 } 16903 16904 /** 16905 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 16906 * @phba: pointer to lpfc_hba struct that the frame was received on 16907 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16908 * 16909 * This function checks the fields in the @fc_hdr to see if the FC frame is a 16910 * valid type of frame that the LPFC driver will handle. This function will 16911 * return a zero if the frame is a valid frame or a non zero value when the 16912 * frame does not pass the check. 16913 **/ 16914 static int 16915 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 16916 { 16917 /* make rctl_names static to save stack space */ 16918 struct fc_vft_header *fc_vft_hdr; 16919 uint32_t *header = (uint32_t *) fc_hdr; 16920 16921 switch (fc_hdr->fh_r_ctl) { 16922 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 16923 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 16924 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 16925 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 16926 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 16927 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 16928 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 16929 case FC_RCTL_DD_CMD_STATUS: /* command status */ 16930 case FC_RCTL_ELS_REQ: /* extended link services request */ 16931 case FC_RCTL_ELS_REP: /* extended link services reply */ 16932 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 16933 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 16934 case FC_RCTL_BA_NOP: /* basic link service NOP */ 16935 case FC_RCTL_BA_ABTS: /* basic link service abort */ 16936 case FC_RCTL_BA_RMC: /* remove connection */ 16937 case FC_RCTL_BA_ACC: /* basic accept */ 16938 case FC_RCTL_BA_RJT: /* basic reject */ 16939 case FC_RCTL_BA_PRMT: 16940 case FC_RCTL_ACK_1: /* acknowledge_1 */ 16941 case FC_RCTL_ACK_0: /* acknowledge_0 */ 16942 case FC_RCTL_P_RJT: /* port reject */ 16943 case FC_RCTL_F_RJT: /* fabric reject */ 16944 case FC_RCTL_P_BSY: /* port busy */ 16945 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 16946 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 16947 case FC_RCTL_LCR: /* link credit reset */ 16948 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ 16949 case FC_RCTL_END: /* end */ 16950 break; 16951 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 16952 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16953 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 16954 return lpfc_fc_frame_check(phba, fc_hdr); 16955 default: 16956 goto drop; 16957 } 16958 16959 switch (fc_hdr->fh_type) { 16960 case FC_TYPE_BLS: 16961 case FC_TYPE_ELS: 16962 case FC_TYPE_FCP: 16963 case FC_TYPE_CT: 16964 case FC_TYPE_NVME: 16965 break; 16966 case FC_TYPE_IP: 16967 case FC_TYPE_ILS: 16968 default: 16969 goto drop; 16970 } 16971 16972 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 16973 "2538 Received frame rctl:x%x, type:x%x, " 16974 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 16975 fc_hdr->fh_r_ctl, fc_hdr->fh_type, 16976 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 16977 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 16978 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 16979 be32_to_cpu(header[6])); 16980 return 0; 16981 drop: 16982 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 16983 "2539 Dropped frame rctl:x%x type:x%x\n", 16984 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 16985 return 1; 16986 } 16987 16988 /** 16989 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 16990 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16991 * 16992 * This function processes the FC header to retrieve the VFI from the VF 16993 * header, if one exists. This function will return the VFI if one exists 16994 * or 0 if no VSAN Header exists. 16995 **/ 16996 static uint32_t 16997 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 16998 { 16999 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 17000 17001 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 17002 return 0; 17003 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 17004 } 17005 17006 /** 17007 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 17008 * @phba: Pointer to the HBA structure to search for the vport on 17009 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 17010 * @fcfi: The FC Fabric ID that the frame came from 17011 * 17012 * This function searches the @phba for a vport that matches the content of the 17013 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 17014 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 17015 * returns the matching vport pointer or NULL if unable to match frame to a 17016 * vport. 17017 **/ 17018 static struct lpfc_vport * 17019 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 17020 uint16_t fcfi, uint32_t did) 17021 { 17022 struct lpfc_vport **vports; 17023 struct lpfc_vport *vport = NULL; 17024 int i; 17025 17026 if (did == Fabric_DID) 17027 return phba->pport; 17028 if ((phba->pport->fc_flag & FC_PT2PT) && 17029 !(phba->link_state == LPFC_HBA_READY)) 17030 return phba->pport; 17031 17032 vports = lpfc_create_vport_work_array(phba); 17033 if (vports != NULL) { 17034 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 17035 if (phba->fcf.fcfi == fcfi && 17036 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 17037 vports[i]->fc_myDID == did) { 17038 vport = vports[i]; 17039 break; 17040 } 17041 } 17042 } 17043 lpfc_destroy_vport_work_array(phba, vports); 17044 return vport; 17045 } 17046 17047 /** 17048 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 17049 * @vport: The vport to work on. 17050 * 17051 * This function updates the receive sequence time stamp for this vport. The 17052 * receive sequence time stamp indicates the time that the last frame of the 17053 * the sequence that has been idle for the longest amount of time was received. 17054 * the driver uses this time stamp to indicate if any received sequences have 17055 * timed out. 17056 **/ 17057 static void 17058 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 17059 { 17060 struct lpfc_dmabuf *h_buf; 17061 struct hbq_dmabuf *dmabuf = NULL; 17062 17063 /* get the oldest sequence on the rcv list */ 17064 h_buf = list_get_first(&vport->rcv_buffer_list, 17065 struct lpfc_dmabuf, list); 17066 if (!h_buf) 17067 return; 17068 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17069 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 17070 } 17071 17072 /** 17073 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 17074 * @vport: The vport that the received sequences were sent to. 17075 * 17076 * This function cleans up all outstanding received sequences. This is called 17077 * by the driver when a link event or user action invalidates all the received 17078 * sequences. 17079 **/ 17080 void 17081 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 17082 { 17083 struct lpfc_dmabuf *h_buf, *hnext; 17084 struct lpfc_dmabuf *d_buf, *dnext; 17085 struct hbq_dmabuf *dmabuf = NULL; 17086 17087 /* start with the oldest sequence on the rcv list */ 17088 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17089 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17090 list_del_init(&dmabuf->hbuf.list); 17091 list_for_each_entry_safe(d_buf, dnext, 17092 &dmabuf->dbuf.list, list) { 17093 list_del_init(&d_buf->list); 17094 lpfc_in_buf_free(vport->phba, d_buf); 17095 } 17096 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17097 } 17098 } 17099 17100 /** 17101 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 17102 * @vport: The vport that the received sequences were sent to. 17103 * 17104 * This function determines whether any received sequences have timed out by 17105 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 17106 * indicates that there is at least one timed out sequence this routine will 17107 * go through the received sequences one at a time from most inactive to most 17108 * active to determine which ones need to be cleaned up. Once it has determined 17109 * that a sequence needs to be cleaned up it will simply free up the resources 17110 * without sending an abort. 17111 **/ 17112 void 17113 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 17114 { 17115 struct lpfc_dmabuf *h_buf, *hnext; 17116 struct lpfc_dmabuf *d_buf, *dnext; 17117 struct hbq_dmabuf *dmabuf = NULL; 17118 unsigned long timeout; 17119 int abort_count = 0; 17120 17121 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17122 vport->rcv_buffer_time_stamp); 17123 if (list_empty(&vport->rcv_buffer_list) || 17124 time_before(jiffies, timeout)) 17125 return; 17126 /* start with the oldest sequence on the rcv list */ 17127 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17128 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17129 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17130 dmabuf->time_stamp); 17131 if (time_before(jiffies, timeout)) 17132 break; 17133 abort_count++; 17134 list_del_init(&dmabuf->hbuf.list); 17135 list_for_each_entry_safe(d_buf, dnext, 17136 &dmabuf->dbuf.list, list) { 17137 list_del_init(&d_buf->list); 17138 lpfc_in_buf_free(vport->phba, d_buf); 17139 } 17140 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17141 } 17142 if (abort_count) 17143 lpfc_update_rcv_time_stamp(vport); 17144 } 17145 17146 /** 17147 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 17148 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 17149 * 17150 * This function searches through the existing incomplete sequences that have 17151 * been sent to this @vport. If the frame matches one of the incomplete 17152 * sequences then the dbuf in the @dmabuf is added to the list of frames that 17153 * make up that sequence. If no sequence is found that matches this frame then 17154 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 17155 * This function returns a pointer to the first dmabuf in the sequence list that 17156 * the frame was linked to. 17157 **/ 17158 static struct hbq_dmabuf * 17159 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17160 { 17161 struct fc_frame_header *new_hdr; 17162 struct fc_frame_header *temp_hdr; 17163 struct lpfc_dmabuf *d_buf; 17164 struct lpfc_dmabuf *h_buf; 17165 struct hbq_dmabuf *seq_dmabuf = NULL; 17166 struct hbq_dmabuf *temp_dmabuf = NULL; 17167 uint8_t found = 0; 17168 17169 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17170 dmabuf->time_stamp = jiffies; 17171 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17172 17173 /* Use the hdr_buf to find the sequence that this frame belongs to */ 17174 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17175 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17176 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17177 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17178 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17179 continue; 17180 /* found a pending sequence that matches this frame */ 17181 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17182 break; 17183 } 17184 if (!seq_dmabuf) { 17185 /* 17186 * This indicates first frame received for this sequence. 17187 * Queue the buffer on the vport's rcv_buffer_list. 17188 */ 17189 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17190 lpfc_update_rcv_time_stamp(vport); 17191 return dmabuf; 17192 } 17193 temp_hdr = seq_dmabuf->hbuf.virt; 17194 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 17195 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17196 list_del_init(&seq_dmabuf->hbuf.list); 17197 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17198 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17199 lpfc_update_rcv_time_stamp(vport); 17200 return dmabuf; 17201 } 17202 /* move this sequence to the tail to indicate a young sequence */ 17203 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 17204 seq_dmabuf->time_stamp = jiffies; 17205 lpfc_update_rcv_time_stamp(vport); 17206 if (list_empty(&seq_dmabuf->dbuf.list)) { 17207 temp_hdr = dmabuf->hbuf.virt; 17208 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17209 return seq_dmabuf; 17210 } 17211 /* find the correct place in the sequence to insert this frame */ 17212 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); 17213 while (!found) { 17214 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17215 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 17216 /* 17217 * If the frame's sequence count is greater than the frame on 17218 * the list then insert the frame right after this frame 17219 */ 17220 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 17221 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17222 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 17223 found = 1; 17224 break; 17225 } 17226 17227 if (&d_buf->list == &seq_dmabuf->dbuf.list) 17228 break; 17229 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); 17230 } 17231 17232 if (found) 17233 return seq_dmabuf; 17234 return NULL; 17235 } 17236 17237 /** 17238 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 17239 * @vport: pointer to a vitural port 17240 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17241 * 17242 * This function tries to abort from the partially assembed sequence, described 17243 * by the information from basic abbort @dmabuf. It checks to see whether such 17244 * partially assembled sequence held by the driver. If so, it shall free up all 17245 * the frames from the partially assembled sequence. 17246 * 17247 * Return 17248 * true -- if there is matching partially assembled sequence present and all 17249 * the frames freed with the sequence; 17250 * false -- if there is no matching partially assembled sequence present so 17251 * nothing got aborted in the lower layer driver 17252 **/ 17253 static bool 17254 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 17255 struct hbq_dmabuf *dmabuf) 17256 { 17257 struct fc_frame_header *new_hdr; 17258 struct fc_frame_header *temp_hdr; 17259 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 17260 struct hbq_dmabuf *seq_dmabuf = NULL; 17261 17262 /* Use the hdr_buf to find the sequence that matches this frame */ 17263 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17264 INIT_LIST_HEAD(&dmabuf->hbuf.list); 17265 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17266 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17267 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17268 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17269 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17270 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17271 continue; 17272 /* found a pending sequence that matches this frame */ 17273 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17274 break; 17275 } 17276 17277 /* Free up all the frames from the partially assembled sequence */ 17278 if (seq_dmabuf) { 17279 list_for_each_entry_safe(d_buf, n_buf, 17280 &seq_dmabuf->dbuf.list, list) { 17281 list_del_init(&d_buf->list); 17282 lpfc_in_buf_free(vport->phba, d_buf); 17283 } 17284 return true; 17285 } 17286 return false; 17287 } 17288 17289 /** 17290 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 17291 * @vport: pointer to a vitural port 17292 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17293 * 17294 * This function tries to abort from the assembed sequence from upper level 17295 * protocol, described by the information from basic abbort @dmabuf. It 17296 * checks to see whether such pending context exists at upper level protocol. 17297 * If so, it shall clean up the pending context. 17298 * 17299 * Return 17300 * true -- if there is matching pending context of the sequence cleaned 17301 * at ulp; 17302 * false -- if there is no matching pending context of the sequence present 17303 * at ulp. 17304 **/ 17305 static bool 17306 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17307 { 17308 struct lpfc_hba *phba = vport->phba; 17309 int handled; 17310 17311 /* Accepting abort at ulp with SLI4 only */ 17312 if (phba->sli_rev < LPFC_SLI_REV4) 17313 return false; 17314 17315 /* Register all caring upper level protocols to attend abort */ 17316 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 17317 if (handled) 17318 return true; 17319 17320 return false; 17321 } 17322 17323 /** 17324 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 17325 * @phba: Pointer to HBA context object. 17326 * @cmd_iocbq: pointer to the command iocbq structure. 17327 * @rsp_iocbq: pointer to the response iocbq structure. 17328 * 17329 * This function handles the sequence abort response iocb command complete 17330 * event. It properly releases the memory allocated to the sequence abort 17331 * accept iocb. 17332 **/ 17333 static void 17334 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 17335 struct lpfc_iocbq *cmd_iocbq, 17336 struct lpfc_iocbq *rsp_iocbq) 17337 { 17338 struct lpfc_nodelist *ndlp; 17339 17340 if (cmd_iocbq) { 17341 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; 17342 lpfc_nlp_put(ndlp); 17343 lpfc_nlp_not_used(ndlp); 17344 lpfc_sli_release_iocbq(phba, cmd_iocbq); 17345 } 17346 17347 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 17348 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 17349 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17350 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 17351 rsp_iocbq->iocb.ulpStatus, 17352 rsp_iocbq->iocb.un.ulpWord[4]); 17353 } 17354 17355 /** 17356 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 17357 * @phba: Pointer to HBA context object. 17358 * @xri: xri id in transaction. 17359 * 17360 * This function validates the xri maps to the known range of XRIs allocated an 17361 * used by the driver. 17362 **/ 17363 uint16_t 17364 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 17365 uint16_t xri) 17366 { 17367 uint16_t i; 17368 17369 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 17370 if (xri == phba->sli4_hba.xri_ids[i]) 17371 return i; 17372 } 17373 return NO_XRI; 17374 } 17375 17376 /** 17377 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 17378 * @phba: Pointer to HBA context object. 17379 * @fc_hdr: pointer to a FC frame header. 17380 * 17381 * This function sends a basic response to a previous unsol sequence abort 17382 * event after aborting the sequence handling. 17383 **/ 17384 void 17385 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 17386 struct fc_frame_header *fc_hdr, bool aborted) 17387 { 17388 struct lpfc_hba *phba = vport->phba; 17389 struct lpfc_iocbq *ctiocb = NULL; 17390 struct lpfc_nodelist *ndlp; 17391 uint16_t oxid, rxid, xri, lxri; 17392 uint32_t sid, fctl; 17393 IOCB_t *icmd; 17394 int rc; 17395 17396 if (!lpfc_is_link_up(phba)) 17397 return; 17398 17399 sid = sli4_sid_from_fc_hdr(fc_hdr); 17400 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 17401 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 17402 17403 ndlp = lpfc_findnode_did(vport, sid); 17404 if (!ndlp) { 17405 ndlp = lpfc_nlp_init(vport, sid); 17406 if (!ndlp) { 17407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17408 "1268 Failed to allocate ndlp for " 17409 "oxid:x%x SID:x%x\n", oxid, sid); 17410 return; 17411 } 17412 /* Put ndlp onto pport node list */ 17413 lpfc_enqueue_node(vport, ndlp); 17414 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 17415 /* re-setup ndlp without removing from node list */ 17416 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 17417 if (!ndlp) { 17418 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17419 "3275 Failed to active ndlp found " 17420 "for oxid:x%x SID:x%x\n", oxid, sid); 17421 return; 17422 } 17423 } 17424 17425 /* Allocate buffer for rsp iocb */ 17426 ctiocb = lpfc_sli_get_iocbq(phba); 17427 if (!ctiocb) 17428 return; 17429 17430 /* Extract the F_CTL field from FC_HDR */ 17431 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 17432 17433 icmd = &ctiocb->iocb; 17434 icmd->un.xseq64.bdl.bdeSize = 0; 17435 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 17436 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 17437 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 17438 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 17439 17440 /* Fill in the rest of iocb fields */ 17441 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 17442 icmd->ulpBdeCount = 0; 17443 icmd->ulpLe = 1; 17444 icmd->ulpClass = CLASS3; 17445 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 17446 ctiocb->context1 = lpfc_nlp_get(ndlp); 17447 17448 ctiocb->iocb_cmpl = NULL; 17449 ctiocb->vport = phba->pport; 17450 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 17451 ctiocb->sli4_lxritag = NO_XRI; 17452 ctiocb->sli4_xritag = NO_XRI; 17453 17454 if (fctl & FC_FC_EX_CTX) 17455 /* Exchange responder sent the abort so we 17456 * own the oxid. 17457 */ 17458 xri = oxid; 17459 else 17460 xri = rxid; 17461 lxri = lpfc_sli4_xri_inrange(phba, xri); 17462 if (lxri != NO_XRI) 17463 lpfc_set_rrq_active(phba, ndlp, lxri, 17464 (xri == oxid) ? rxid : oxid, 0); 17465 /* For BA_ABTS from exchange responder, if the logical xri with 17466 * the oxid maps to the FCP XRI range, the port no longer has 17467 * that exchange context, send a BLS_RJT. Override the IOCB for 17468 * a BA_RJT. 17469 */ 17470 if ((fctl & FC_FC_EX_CTX) && 17471 (lxri > lpfc_sli4_get_iocb_cnt(phba))) { 17472 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17473 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17474 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17475 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17476 } 17477 17478 /* If BA_ABTS failed to abort a partially assembled receive sequence, 17479 * the driver no longer has that exchange, send a BLS_RJT. Override 17480 * the IOCB for a BA_RJT. 17481 */ 17482 if (aborted == false) { 17483 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17484 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17485 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17486 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17487 } 17488 17489 if (fctl & FC_FC_EX_CTX) { 17490 /* ABTS sent by responder to CT exchange, construction 17491 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 17492 * field and RX_ID from ABTS for RX_ID field. 17493 */ 17494 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 17495 } else { 17496 /* ABTS sent by initiator to CT exchange, construction 17497 * of BA_ACC will need to allocate a new XRI as for the 17498 * XRI_TAG field. 17499 */ 17500 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 17501 } 17502 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 17503 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 17504 17505 /* Xmit CT abts response on exchange <xid> */ 17506 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 17507 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 17508 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 17509 17510 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 17511 if (rc == IOCB_ERROR) { 17512 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 17513 "2925 Failed to issue CT ABTS RSP x%x on " 17514 "xri x%x, Data x%x\n", 17515 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 17516 phba->link_state); 17517 lpfc_nlp_put(ndlp); 17518 ctiocb->context1 = NULL; 17519 lpfc_sli_release_iocbq(phba, ctiocb); 17520 } 17521 } 17522 17523 /** 17524 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 17525 * @vport: Pointer to the vport on which this sequence was received 17526 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17527 * 17528 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 17529 * receive sequence is only partially assembed by the driver, it shall abort 17530 * the partially assembled frames for the sequence. Otherwise, if the 17531 * unsolicited receive sequence has been completely assembled and passed to 17532 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 17533 * unsolicited sequence has been aborted. After that, it will issue a basic 17534 * accept to accept the abort. 17535 **/ 17536 static void 17537 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 17538 struct hbq_dmabuf *dmabuf) 17539 { 17540 struct lpfc_hba *phba = vport->phba; 17541 struct fc_frame_header fc_hdr; 17542 uint32_t fctl; 17543 bool aborted; 17544 17545 /* Make a copy of fc_hdr before the dmabuf being released */ 17546 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 17547 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 17548 17549 if (fctl & FC_FC_EX_CTX) { 17550 /* ABTS by responder to exchange, no cleanup needed */ 17551 aborted = true; 17552 } else { 17553 /* ABTS by initiator to exchange, need to do cleanup */ 17554 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 17555 if (aborted == false) 17556 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 17557 } 17558 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17559 17560 if (phba->nvmet_support) { 17561 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); 17562 return; 17563 } 17564 17565 /* Respond with BA_ACC or BA_RJT accordingly */ 17566 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 17567 } 17568 17569 /** 17570 * lpfc_seq_complete - Indicates if a sequence is complete 17571 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17572 * 17573 * This function checks the sequence, starting with the frame described by 17574 * @dmabuf, to see if all the frames associated with this sequence are present. 17575 * the frames associated with this sequence are linked to the @dmabuf using the 17576 * dbuf list. This function looks for two major things. 1) That the first frame 17577 * has a sequence count of zero. 2) There is a frame with last frame of sequence 17578 * set. 3) That there are no holes in the sequence count. The function will 17579 * return 1 when the sequence is complete, otherwise it will return 0. 17580 **/ 17581 static int 17582 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 17583 { 17584 struct fc_frame_header *hdr; 17585 struct lpfc_dmabuf *d_buf; 17586 struct hbq_dmabuf *seq_dmabuf; 17587 uint32_t fctl; 17588 int seq_count = 0; 17589 17590 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17591 /* make sure first fame of sequence has a sequence count of zero */ 17592 if (hdr->fh_seq_cnt != seq_count) 17593 return 0; 17594 fctl = (hdr->fh_f_ctl[0] << 16 | 17595 hdr->fh_f_ctl[1] << 8 | 17596 hdr->fh_f_ctl[2]); 17597 /* If last frame of sequence we can return success. */ 17598 if (fctl & FC_FC_END_SEQ) 17599 return 1; 17600 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 17601 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17602 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17603 /* If there is a hole in the sequence count then fail. */ 17604 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 17605 return 0; 17606 fctl = (hdr->fh_f_ctl[0] << 16 | 17607 hdr->fh_f_ctl[1] << 8 | 17608 hdr->fh_f_ctl[2]); 17609 /* If last frame of sequence we can return success. */ 17610 if (fctl & FC_FC_END_SEQ) 17611 return 1; 17612 } 17613 return 0; 17614 } 17615 17616 /** 17617 * lpfc_prep_seq - Prep sequence for ULP processing 17618 * @vport: Pointer to the vport on which this sequence was received 17619 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17620 * 17621 * This function takes a sequence, described by a list of frames, and creates 17622 * a list of iocbq structures to describe the sequence. This iocbq list will be 17623 * used to issue to the generic unsolicited sequence handler. This routine 17624 * returns a pointer to the first iocbq in the list. If the function is unable 17625 * to allocate an iocbq then it throw out the received frames that were not 17626 * able to be described and return a pointer to the first iocbq. If unable to 17627 * allocate any iocbqs (including the first) this function will return NULL. 17628 **/ 17629 static struct lpfc_iocbq * 17630 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 17631 { 17632 struct hbq_dmabuf *hbq_buf; 17633 struct lpfc_dmabuf *d_buf, *n_buf; 17634 struct lpfc_iocbq *first_iocbq, *iocbq; 17635 struct fc_frame_header *fc_hdr; 17636 uint32_t sid; 17637 uint32_t len, tot_len; 17638 struct ulp_bde64 *pbde; 17639 17640 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17641 /* remove from receive buffer list */ 17642 list_del_init(&seq_dmabuf->hbuf.list); 17643 lpfc_update_rcv_time_stamp(vport); 17644 /* get the Remote Port's SID */ 17645 sid = sli4_sid_from_fc_hdr(fc_hdr); 17646 tot_len = 0; 17647 /* Get an iocbq struct to fill in. */ 17648 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 17649 if (first_iocbq) { 17650 /* Initialize the first IOCB. */ 17651 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 17652 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 17653 first_iocbq->vport = vport; 17654 17655 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 17656 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 17657 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 17658 first_iocbq->iocb.un.rcvels.parmRo = 17659 sli4_did_from_fc_hdr(fc_hdr); 17660 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 17661 } else 17662 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 17663 first_iocbq->iocb.ulpContext = NO_XRI; 17664 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 17665 be16_to_cpu(fc_hdr->fh_ox_id); 17666 /* iocbq is prepped for internal consumption. Physical vpi. */ 17667 first_iocbq->iocb.unsli3.rcvsli3.vpi = 17668 vport->phba->vpi_ids[vport->vpi]; 17669 /* put the first buffer into the first IOCBq */ 17670 tot_len = bf_get(lpfc_rcqe_length, 17671 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 17672 17673 first_iocbq->context2 = &seq_dmabuf->dbuf; 17674 first_iocbq->context3 = NULL; 17675 first_iocbq->iocb.ulpBdeCount = 1; 17676 if (tot_len > LPFC_DATA_BUF_SIZE) 17677 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17678 LPFC_DATA_BUF_SIZE; 17679 else 17680 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; 17681 17682 first_iocbq->iocb.un.rcvels.remoteID = sid; 17683 17684 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17685 } 17686 iocbq = first_iocbq; 17687 /* 17688 * Each IOCBq can have two Buffers assigned, so go through the list 17689 * of buffers for this sequence and save two buffers in each IOCBq 17690 */ 17691 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 17692 if (!iocbq) { 17693 lpfc_in_buf_free(vport->phba, d_buf); 17694 continue; 17695 } 17696 if (!iocbq->context3) { 17697 iocbq->context3 = d_buf; 17698 iocbq->iocb.ulpBdeCount++; 17699 /* We need to get the size out of the right CQE */ 17700 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17701 len = bf_get(lpfc_rcqe_length, 17702 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17703 pbde = (struct ulp_bde64 *) 17704 &iocbq->iocb.unsli3.sli3Words[4]; 17705 if (len > LPFC_DATA_BUF_SIZE) 17706 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 17707 else 17708 pbde->tus.f.bdeSize = len; 17709 17710 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 17711 tot_len += len; 17712 } else { 17713 iocbq = lpfc_sli_get_iocbq(vport->phba); 17714 if (!iocbq) { 17715 if (first_iocbq) { 17716 first_iocbq->iocb.ulpStatus = 17717 IOSTAT_FCP_RSP_ERROR; 17718 first_iocbq->iocb.un.ulpWord[4] = 17719 IOERR_NO_RESOURCES; 17720 } 17721 lpfc_in_buf_free(vport->phba, d_buf); 17722 continue; 17723 } 17724 /* We need to get the size out of the right CQE */ 17725 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17726 len = bf_get(lpfc_rcqe_length, 17727 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17728 iocbq->context2 = d_buf; 17729 iocbq->context3 = NULL; 17730 iocbq->iocb.ulpBdeCount = 1; 17731 if (len > LPFC_DATA_BUF_SIZE) 17732 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17733 LPFC_DATA_BUF_SIZE; 17734 else 17735 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; 17736 17737 tot_len += len; 17738 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17739 17740 iocbq->iocb.un.rcvels.remoteID = sid; 17741 list_add_tail(&iocbq->list, &first_iocbq->list); 17742 } 17743 } 17744 return first_iocbq; 17745 } 17746 17747 static void 17748 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 17749 struct hbq_dmabuf *seq_dmabuf) 17750 { 17751 struct fc_frame_header *fc_hdr; 17752 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 17753 struct lpfc_hba *phba = vport->phba; 17754 17755 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17756 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 17757 if (!iocbq) { 17758 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17759 "2707 Ring %d handler: Failed to allocate " 17760 "iocb Rctl x%x Type x%x received\n", 17761 LPFC_ELS_RING, 17762 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17763 return; 17764 } 17765 if (!lpfc_complete_unsol_iocb(phba, 17766 phba->sli4_hba.els_wq->pring, 17767 iocbq, fc_hdr->fh_r_ctl, 17768 fc_hdr->fh_type)) 17769 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17770 "2540 Ring %d handler: unexpected Rctl " 17771 "x%x Type x%x received\n", 17772 LPFC_ELS_RING, 17773 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17774 17775 /* Free iocb created in lpfc_prep_seq */ 17776 list_for_each_entry_safe(curr_iocb, next_iocb, 17777 &iocbq->list, list) { 17778 list_del_init(&curr_iocb->list); 17779 lpfc_sli_release_iocbq(phba, curr_iocb); 17780 } 17781 lpfc_sli_release_iocbq(phba, iocbq); 17782 } 17783 17784 static void 17785 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 17786 struct lpfc_iocbq *rspiocb) 17787 { 17788 struct lpfc_dmabuf *pcmd = cmdiocb->context2; 17789 17790 if (pcmd && pcmd->virt) 17791 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17792 kfree(pcmd); 17793 lpfc_sli_release_iocbq(phba, cmdiocb); 17794 lpfc_drain_txq(phba); 17795 } 17796 17797 static void 17798 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 17799 struct hbq_dmabuf *dmabuf) 17800 { 17801 struct fc_frame_header *fc_hdr; 17802 struct lpfc_hba *phba = vport->phba; 17803 struct lpfc_iocbq *iocbq = NULL; 17804 union lpfc_wqe *wqe; 17805 struct lpfc_dmabuf *pcmd = NULL; 17806 uint32_t frame_len; 17807 int rc; 17808 unsigned long iflags; 17809 17810 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17811 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); 17812 17813 /* Send the received frame back */ 17814 iocbq = lpfc_sli_get_iocbq(phba); 17815 if (!iocbq) { 17816 /* Queue cq event and wakeup worker thread to process it */ 17817 spin_lock_irqsave(&phba->hbalock, iflags); 17818 list_add_tail(&dmabuf->cq_event.list, 17819 &phba->sli4_hba.sp_queue_event); 17820 phba->hba_flag |= HBA_SP_QUEUE_EVT; 17821 spin_unlock_irqrestore(&phba->hbalock, iflags); 17822 lpfc_worker_wake_up(phba); 17823 return; 17824 } 17825 17826 /* Allocate buffer for command payload */ 17827 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 17828 if (pcmd) 17829 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 17830 &pcmd->phys); 17831 if (!pcmd || !pcmd->virt) 17832 goto exit; 17833 17834 INIT_LIST_HEAD(&pcmd->list); 17835 17836 /* copyin the payload */ 17837 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); 17838 17839 /* fill in BDE's for command */ 17840 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys); 17841 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys); 17842 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 17843 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len; 17844 17845 iocbq->context2 = pcmd; 17846 iocbq->vport = vport; 17847 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 17848 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 17849 17850 /* 17851 * Setup rest of the iocb as though it were a WQE 17852 * Build the SEND_FRAME WQE 17853 */ 17854 wqe = (union lpfc_wqe *)&iocbq->iocb; 17855 17856 wqe->send_frame.frame_len = frame_len; 17857 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr)); 17858 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1)); 17859 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2)); 17860 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3)); 17861 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4)); 17862 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5)); 17863 17864 iocbq->iocb.ulpCommand = CMD_SEND_FRAME; 17865 iocbq->iocb.ulpLe = 1; 17866 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl; 17867 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); 17868 if (rc == IOCB_ERROR) 17869 goto exit; 17870 17871 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17872 return; 17873 17874 exit: 17875 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 17876 "2023 Unable to process MDS loopback frame\n"); 17877 if (pcmd && pcmd->virt) 17878 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17879 kfree(pcmd); 17880 if (iocbq) 17881 lpfc_sli_release_iocbq(phba, iocbq); 17882 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17883 } 17884 17885 /** 17886 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 17887 * @phba: Pointer to HBA context object. 17888 * 17889 * This function is called with no lock held. This function processes all 17890 * the received buffers and gives it to upper layers when a received buffer 17891 * indicates that it is the final frame in the sequence. The interrupt 17892 * service routine processes received buffers at interrupt contexts. 17893 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 17894 * appropriate receive function when the final frame in a sequence is received. 17895 **/ 17896 void 17897 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 17898 struct hbq_dmabuf *dmabuf) 17899 { 17900 struct hbq_dmabuf *seq_dmabuf; 17901 struct fc_frame_header *fc_hdr; 17902 struct lpfc_vport *vport; 17903 uint32_t fcfi; 17904 uint32_t did; 17905 17906 /* Process each received buffer */ 17907 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17908 17909 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 17910 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 17911 vport = phba->pport; 17912 /* Handle MDS Loopback frames */ 17913 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 17914 return; 17915 } 17916 17917 /* check to see if this a valid type of frame */ 17918 if (lpfc_fc_frame_check(phba, fc_hdr)) { 17919 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17920 return; 17921 } 17922 17923 if ((bf_get(lpfc_cqe_code, 17924 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 17925 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 17926 &dmabuf->cq_event.cqe.rcqe_cmpl); 17927 else 17928 fcfi = bf_get(lpfc_rcqe_fcf_id, 17929 &dmabuf->cq_event.cqe.rcqe_cmpl); 17930 17931 /* d_id this frame is directed to */ 17932 did = sli4_did_from_fc_hdr(fc_hdr); 17933 17934 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); 17935 if (!vport) { 17936 /* throw out the frame */ 17937 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17938 return; 17939 } 17940 17941 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 17942 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 17943 (did != Fabric_DID)) { 17944 /* 17945 * Throw out the frame if we are not pt2pt. 17946 * The pt2pt protocol allows for discovery frames 17947 * to be received without a registered VPI. 17948 */ 17949 if (!(vport->fc_flag & FC_PT2PT) || 17950 (phba->link_state == LPFC_HBA_READY)) { 17951 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17952 return; 17953 } 17954 } 17955 17956 /* Handle the basic abort sequence (BA_ABTS) event */ 17957 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 17958 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 17959 return; 17960 } 17961 17962 /* Link this frame */ 17963 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 17964 if (!seq_dmabuf) { 17965 /* unable to add frame to vport - throw it out */ 17966 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17967 return; 17968 } 17969 /* If not last frame in sequence continue processing frames. */ 17970 if (!lpfc_seq_complete(seq_dmabuf)) 17971 return; 17972 17973 /* Send the complete sequence to the upper layer protocol */ 17974 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 17975 } 17976 17977 /** 17978 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 17979 * @phba: pointer to lpfc hba data structure. 17980 * 17981 * This routine is invoked to post rpi header templates to the 17982 * HBA consistent with the SLI-4 interface spec. This routine 17983 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 17984 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 17985 * 17986 * This routine does not require any locks. It's usage is expected 17987 * to be driver load or reset recovery when the driver is 17988 * sequential. 17989 * 17990 * Return codes 17991 * 0 - successful 17992 * -EIO - The mailbox failed to complete successfully. 17993 * When this error occurs, the driver is not guaranteed 17994 * to have any rpi regions posted to the device and 17995 * must either attempt to repost the regions or take a 17996 * fatal error. 17997 **/ 17998 int 17999 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 18000 { 18001 struct lpfc_rpi_hdr *rpi_page; 18002 uint32_t rc = 0; 18003 uint16_t lrpi = 0; 18004 18005 /* SLI4 ports that support extents do not require RPI headers. */ 18006 if (!phba->sli4_hba.rpi_hdrs_in_use) 18007 goto exit; 18008 if (phba->sli4_hba.extents_in_use) 18009 return -EIO; 18010 18011 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 18012 /* 18013 * Assign the rpi headers a physical rpi only if the driver 18014 * has not initialized those resources. A port reset only 18015 * needs the headers posted. 18016 */ 18017 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 18018 LPFC_RPI_RSRC_RDY) 18019 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 18020 18021 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 18022 if (rc != MBX_SUCCESS) { 18023 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18024 "2008 Error %d posting all rpi " 18025 "headers\n", rc); 18026 rc = -EIO; 18027 break; 18028 } 18029 } 18030 18031 exit: 18032 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 18033 LPFC_RPI_RSRC_RDY); 18034 return rc; 18035 } 18036 18037 /** 18038 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 18039 * @phba: pointer to lpfc hba data structure. 18040 * @rpi_page: pointer to the rpi memory region. 18041 * 18042 * This routine is invoked to post a single rpi header to the 18043 * HBA consistent with the SLI-4 interface spec. This memory region 18044 * maps up to 64 rpi context regions. 18045 * 18046 * Return codes 18047 * 0 - successful 18048 * -ENOMEM - No available memory 18049 * -EIO - The mailbox failed to complete successfully. 18050 **/ 18051 int 18052 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 18053 { 18054 LPFC_MBOXQ_t *mboxq; 18055 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 18056 uint32_t rc = 0; 18057 uint32_t shdr_status, shdr_add_status; 18058 union lpfc_sli4_cfg_shdr *shdr; 18059 18060 /* SLI4 ports that support extents do not require RPI headers. */ 18061 if (!phba->sli4_hba.rpi_hdrs_in_use) 18062 return rc; 18063 if (phba->sli4_hba.extents_in_use) 18064 return -EIO; 18065 18066 /* The port is notified of the header region via a mailbox command. */ 18067 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18068 if (!mboxq) { 18069 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18070 "2001 Unable to allocate memory for issuing " 18071 "SLI_CONFIG_SPECIAL mailbox command\n"); 18072 return -ENOMEM; 18073 } 18074 18075 /* Post all rpi memory regions to the port. */ 18076 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 18077 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18078 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 18079 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 18080 sizeof(struct lpfc_sli4_cfg_mhdr), 18081 LPFC_SLI4_MBX_EMBED); 18082 18083 18084 /* Post the physical rpi to the port for this rpi header. */ 18085 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 18086 rpi_page->start_rpi); 18087 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 18088 hdr_tmpl, rpi_page->page_count); 18089 18090 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 18091 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 18092 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 18093 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 18094 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18095 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18096 if (rc != MBX_TIMEOUT) 18097 mempool_free(mboxq, phba->mbox_mem_pool); 18098 if (shdr_status || shdr_add_status || rc) { 18099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18100 "2514 POST_RPI_HDR mailbox failed with " 18101 "status x%x add_status x%x, mbx status x%x\n", 18102 shdr_status, shdr_add_status, rc); 18103 rc = -ENXIO; 18104 } else { 18105 /* 18106 * The next_rpi stores the next logical module-64 rpi value used 18107 * to post physical rpis in subsequent rpi postings. 18108 */ 18109 spin_lock_irq(&phba->hbalock); 18110 phba->sli4_hba.next_rpi = rpi_page->next_rpi; 18111 spin_unlock_irq(&phba->hbalock); 18112 } 18113 return rc; 18114 } 18115 18116 /** 18117 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 18118 * @phba: pointer to lpfc hba data structure. 18119 * 18120 * This routine is invoked to post rpi header templates to the 18121 * HBA consistent with the SLI-4 interface spec. This routine 18122 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 18123 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 18124 * 18125 * Returns 18126 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 18127 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 18128 **/ 18129 int 18130 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 18131 { 18132 unsigned long rpi; 18133 uint16_t max_rpi, rpi_limit; 18134 uint16_t rpi_remaining, lrpi = 0; 18135 struct lpfc_rpi_hdr *rpi_hdr; 18136 unsigned long iflag; 18137 18138 /* 18139 * Fetch the next logical rpi. Because this index is logical, 18140 * the driver starts at 0 each time. 18141 */ 18142 spin_lock_irqsave(&phba->hbalock, iflag); 18143 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 18144 rpi_limit = phba->sli4_hba.next_rpi; 18145 18146 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 18147 if (rpi >= rpi_limit) 18148 rpi = LPFC_RPI_ALLOC_ERROR; 18149 else { 18150 set_bit(rpi, phba->sli4_hba.rpi_bmask); 18151 phba->sli4_hba.max_cfg_param.rpi_used++; 18152 phba->sli4_hba.rpi_count++; 18153 } 18154 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 18155 "0001 rpi:%x max:%x lim:%x\n", 18156 (int) rpi, max_rpi, rpi_limit); 18157 18158 /* 18159 * Don't try to allocate more rpi header regions if the device limit 18160 * has been exhausted. 18161 */ 18162 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 18163 (phba->sli4_hba.rpi_count >= max_rpi)) { 18164 spin_unlock_irqrestore(&phba->hbalock, iflag); 18165 return rpi; 18166 } 18167 18168 /* 18169 * RPI header postings are not required for SLI4 ports capable of 18170 * extents. 18171 */ 18172 if (!phba->sli4_hba.rpi_hdrs_in_use) { 18173 spin_unlock_irqrestore(&phba->hbalock, iflag); 18174 return rpi; 18175 } 18176 18177 /* 18178 * If the driver is running low on rpi resources, allocate another 18179 * page now. Note that the next_rpi value is used because 18180 * it represents how many are actually in use whereas max_rpi notes 18181 * how many are supported max by the device. 18182 */ 18183 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 18184 spin_unlock_irqrestore(&phba->hbalock, iflag); 18185 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 18186 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 18187 if (!rpi_hdr) { 18188 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18189 "2002 Error Could not grow rpi " 18190 "count\n"); 18191 } else { 18192 lrpi = rpi_hdr->start_rpi; 18193 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 18194 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 18195 } 18196 } 18197 18198 return rpi; 18199 } 18200 18201 /** 18202 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18203 * @phba: pointer to lpfc hba data structure. 18204 * 18205 * This routine is invoked to release an rpi to the pool of 18206 * available rpis maintained by the driver. 18207 **/ 18208 static void 18209 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18210 { 18211 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 18212 phba->sli4_hba.rpi_count--; 18213 phba->sli4_hba.max_cfg_param.rpi_used--; 18214 } 18215 } 18216 18217 /** 18218 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18219 * @phba: pointer to lpfc hba data structure. 18220 * 18221 * This routine is invoked to release an rpi to the pool of 18222 * available rpis maintained by the driver. 18223 **/ 18224 void 18225 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18226 { 18227 spin_lock_irq(&phba->hbalock); 18228 __lpfc_sli4_free_rpi(phba, rpi); 18229 spin_unlock_irq(&phba->hbalock); 18230 } 18231 18232 /** 18233 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 18234 * @phba: pointer to lpfc hba data structure. 18235 * 18236 * This routine is invoked to remove the memory region that 18237 * provided rpi via a bitmask. 18238 **/ 18239 void 18240 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 18241 { 18242 kfree(phba->sli4_hba.rpi_bmask); 18243 kfree(phba->sli4_hba.rpi_ids); 18244 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 18245 } 18246 18247 /** 18248 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 18249 * @phba: pointer to lpfc hba data structure. 18250 * 18251 * This routine is invoked to remove the memory region that 18252 * provided rpi via a bitmask. 18253 **/ 18254 int 18255 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 18256 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 18257 { 18258 LPFC_MBOXQ_t *mboxq; 18259 struct lpfc_hba *phba = ndlp->phba; 18260 int rc; 18261 18262 /* The port is notified of the header region via a mailbox command. */ 18263 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18264 if (!mboxq) 18265 return -ENOMEM; 18266 18267 /* Post all rpi memory regions to the port. */ 18268 lpfc_resume_rpi(mboxq, ndlp); 18269 if (cmpl) { 18270 mboxq->mbox_cmpl = cmpl; 18271 mboxq->ctx_buf = arg; 18272 mboxq->ctx_ndlp = ndlp; 18273 } else 18274 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18275 mboxq->vport = ndlp->vport; 18276 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18277 if (rc == MBX_NOT_FINISHED) { 18278 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18279 "2010 Resume RPI Mailbox failed " 18280 "status %d, mbxStatus x%x\n", rc, 18281 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18282 mempool_free(mboxq, phba->mbox_mem_pool); 18283 return -EIO; 18284 } 18285 return 0; 18286 } 18287 18288 /** 18289 * lpfc_sli4_init_vpi - Initialize a vpi with the port 18290 * @vport: Pointer to the vport for which the vpi is being initialized 18291 * 18292 * This routine is invoked to activate a vpi with the port. 18293 * 18294 * Returns: 18295 * 0 success 18296 * -Evalue otherwise 18297 **/ 18298 int 18299 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 18300 { 18301 LPFC_MBOXQ_t *mboxq; 18302 int rc = 0; 18303 int retval = MBX_SUCCESS; 18304 uint32_t mbox_tmo; 18305 struct lpfc_hba *phba = vport->phba; 18306 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18307 if (!mboxq) 18308 return -ENOMEM; 18309 lpfc_init_vpi(phba, mboxq, vport->vpi); 18310 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 18311 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 18312 if (rc != MBX_SUCCESS) { 18313 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 18314 "2022 INIT VPI Mailbox failed " 18315 "status %d, mbxStatus x%x\n", rc, 18316 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18317 retval = -EIO; 18318 } 18319 if (rc != MBX_TIMEOUT) 18320 mempool_free(mboxq, vport->phba->mbox_mem_pool); 18321 18322 return retval; 18323 } 18324 18325 /** 18326 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 18327 * @phba: pointer to lpfc hba data structure. 18328 * @mboxq: Pointer to mailbox object. 18329 * 18330 * This routine is invoked to manually add a single FCF record. The caller 18331 * must pass a completely initialized FCF_Record. This routine takes 18332 * care of the nonembedded mailbox operations. 18333 **/ 18334 static void 18335 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 18336 { 18337 void *virt_addr; 18338 union lpfc_sli4_cfg_shdr *shdr; 18339 uint32_t shdr_status, shdr_add_status; 18340 18341 virt_addr = mboxq->sge_array->addr[0]; 18342 /* The IOCTL status is embedded in the mailbox subheader. */ 18343 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 18344 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18345 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18346 18347 if ((shdr_status || shdr_add_status) && 18348 (shdr_status != STATUS_FCF_IN_USE)) 18349 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18350 "2558 ADD_FCF_RECORD mailbox failed with " 18351 "status x%x add_status x%x\n", 18352 shdr_status, shdr_add_status); 18353 18354 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18355 } 18356 18357 /** 18358 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 18359 * @phba: pointer to lpfc hba data structure. 18360 * @fcf_record: pointer to the initialized fcf record to add. 18361 * 18362 * This routine is invoked to manually add a single FCF record. The caller 18363 * must pass a completely initialized FCF_Record. This routine takes 18364 * care of the nonembedded mailbox operations. 18365 **/ 18366 int 18367 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 18368 { 18369 int rc = 0; 18370 LPFC_MBOXQ_t *mboxq; 18371 uint8_t *bytep; 18372 void *virt_addr; 18373 struct lpfc_mbx_sge sge; 18374 uint32_t alloc_len, req_len; 18375 uint32_t fcfindex; 18376 18377 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18378 if (!mboxq) { 18379 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18380 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 18381 return -ENOMEM; 18382 } 18383 18384 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 18385 sizeof(uint32_t); 18386 18387 /* Allocate DMA memory and set up the non-embedded mailbox command */ 18388 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18389 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 18390 req_len, LPFC_SLI4_MBX_NEMBED); 18391 if (alloc_len < req_len) { 18392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18393 "2523 Allocated DMA memory size (x%x) is " 18394 "less than the requested DMA memory " 18395 "size (x%x)\n", alloc_len, req_len); 18396 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18397 return -ENOMEM; 18398 } 18399 18400 /* 18401 * Get the first SGE entry from the non-embedded DMA memory. This 18402 * routine only uses a single SGE. 18403 */ 18404 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 18405 virt_addr = mboxq->sge_array->addr[0]; 18406 /* 18407 * Configure the FCF record for FCFI 0. This is the driver's 18408 * hardcoded default and gets used in nonFIP mode. 18409 */ 18410 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 18411 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 18412 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 18413 18414 /* 18415 * Copy the fcf_index and the FCF Record Data. The data starts after 18416 * the FCoE header plus word10. The data copy needs to be endian 18417 * correct. 18418 */ 18419 bytep += sizeof(uint32_t); 18420 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 18421 mboxq->vport = phba->pport; 18422 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 18423 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18424 if (rc == MBX_NOT_FINISHED) { 18425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18426 "2515 ADD_FCF_RECORD mailbox failed with " 18427 "status 0x%x\n", rc); 18428 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18429 rc = -EIO; 18430 } else 18431 rc = 0; 18432 18433 return rc; 18434 } 18435 18436 /** 18437 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 18438 * @phba: pointer to lpfc hba data structure. 18439 * @fcf_record: pointer to the fcf record to write the default data. 18440 * @fcf_index: FCF table entry index. 18441 * 18442 * This routine is invoked to build the driver's default FCF record. The 18443 * values used are hardcoded. This routine handles memory initialization. 18444 * 18445 **/ 18446 void 18447 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 18448 struct fcf_record *fcf_record, 18449 uint16_t fcf_index) 18450 { 18451 memset(fcf_record, 0, sizeof(struct fcf_record)); 18452 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 18453 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 18454 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 18455 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 18456 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 18457 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 18458 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 18459 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 18460 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 18461 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 18462 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 18463 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 18464 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 18465 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 18466 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 18467 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 18468 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 18469 /* Set the VLAN bit map */ 18470 if (phba->valid_vlan) { 18471 fcf_record->vlan_bitmap[phba->vlan_id / 8] 18472 = 1 << (phba->vlan_id % 8); 18473 } 18474 } 18475 18476 /** 18477 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 18478 * @phba: pointer to lpfc hba data structure. 18479 * @fcf_index: FCF table entry offset. 18480 * 18481 * This routine is invoked to scan the entire FCF table by reading FCF 18482 * record and processing it one at a time starting from the @fcf_index 18483 * for initial FCF discovery or fast FCF failover rediscovery. 18484 * 18485 * Return 0 if the mailbox command is submitted successfully, none 0 18486 * otherwise. 18487 **/ 18488 int 18489 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18490 { 18491 int rc = 0, error; 18492 LPFC_MBOXQ_t *mboxq; 18493 18494 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 18495 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 18496 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18497 if (!mboxq) { 18498 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18499 "2000 Failed to allocate mbox for " 18500 "READ_FCF cmd\n"); 18501 error = -ENOMEM; 18502 goto fail_fcf_scan; 18503 } 18504 /* Construct the read FCF record mailbox command */ 18505 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18506 if (rc) { 18507 error = -EINVAL; 18508 goto fail_fcf_scan; 18509 } 18510 /* Issue the mailbox command asynchronously */ 18511 mboxq->vport = phba->pport; 18512 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 18513 18514 spin_lock_irq(&phba->hbalock); 18515 phba->hba_flag |= FCF_TS_INPROG; 18516 spin_unlock_irq(&phba->hbalock); 18517 18518 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18519 if (rc == MBX_NOT_FINISHED) 18520 error = -EIO; 18521 else { 18522 /* Reset eligible FCF count for new scan */ 18523 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 18524 phba->fcf.eligible_fcf_cnt = 0; 18525 error = 0; 18526 } 18527 fail_fcf_scan: 18528 if (error) { 18529 if (mboxq) 18530 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18531 /* FCF scan failed, clear FCF_TS_INPROG flag */ 18532 spin_lock_irq(&phba->hbalock); 18533 phba->hba_flag &= ~FCF_TS_INPROG; 18534 spin_unlock_irq(&phba->hbalock); 18535 } 18536 return error; 18537 } 18538 18539 /** 18540 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 18541 * @phba: pointer to lpfc hba data structure. 18542 * @fcf_index: FCF table entry offset. 18543 * 18544 * This routine is invoked to read an FCF record indicated by @fcf_index 18545 * and to use it for FLOGI roundrobin FCF failover. 18546 * 18547 * Return 0 if the mailbox command is submitted successfully, none 0 18548 * otherwise. 18549 **/ 18550 int 18551 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18552 { 18553 int rc = 0, error; 18554 LPFC_MBOXQ_t *mboxq; 18555 18556 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18557 if (!mboxq) { 18558 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18559 "2763 Failed to allocate mbox for " 18560 "READ_FCF cmd\n"); 18561 error = -ENOMEM; 18562 goto fail_fcf_read; 18563 } 18564 /* Construct the read FCF record mailbox command */ 18565 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18566 if (rc) { 18567 error = -EINVAL; 18568 goto fail_fcf_read; 18569 } 18570 /* Issue the mailbox command asynchronously */ 18571 mboxq->vport = phba->pport; 18572 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 18573 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18574 if (rc == MBX_NOT_FINISHED) 18575 error = -EIO; 18576 else 18577 error = 0; 18578 18579 fail_fcf_read: 18580 if (error && mboxq) 18581 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18582 return error; 18583 } 18584 18585 /** 18586 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 18587 * @phba: pointer to lpfc hba data structure. 18588 * @fcf_index: FCF table entry offset. 18589 * 18590 * This routine is invoked to read an FCF record indicated by @fcf_index to 18591 * determine whether it's eligible for FLOGI roundrobin failover list. 18592 * 18593 * Return 0 if the mailbox command is submitted successfully, none 0 18594 * otherwise. 18595 **/ 18596 int 18597 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18598 { 18599 int rc = 0, error; 18600 LPFC_MBOXQ_t *mboxq; 18601 18602 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18603 if (!mboxq) { 18604 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18605 "2758 Failed to allocate mbox for " 18606 "READ_FCF cmd\n"); 18607 error = -ENOMEM; 18608 goto fail_fcf_read; 18609 } 18610 /* Construct the read FCF record mailbox command */ 18611 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18612 if (rc) { 18613 error = -EINVAL; 18614 goto fail_fcf_read; 18615 } 18616 /* Issue the mailbox command asynchronously */ 18617 mboxq->vport = phba->pport; 18618 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 18619 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18620 if (rc == MBX_NOT_FINISHED) 18621 error = -EIO; 18622 else 18623 error = 0; 18624 18625 fail_fcf_read: 18626 if (error && mboxq) 18627 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18628 return error; 18629 } 18630 18631 /** 18632 * lpfc_check_next_fcf_pri_level 18633 * phba pointer to the lpfc_hba struct for this port. 18634 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 18635 * routine when the rr_bmask is empty. The FCF indecies are put into the 18636 * rr_bmask based on their priority level. Starting from the highest priority 18637 * to the lowest. The most likely FCF candidate will be in the highest 18638 * priority group. When this routine is called it searches the fcf_pri list for 18639 * next lowest priority group and repopulates the rr_bmask with only those 18640 * fcf_indexes. 18641 * returns: 18642 * 1=success 0=failure 18643 **/ 18644 static int 18645 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 18646 { 18647 uint16_t next_fcf_pri; 18648 uint16_t last_index; 18649 struct lpfc_fcf_pri *fcf_pri; 18650 int rc; 18651 int ret = 0; 18652 18653 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 18654 LPFC_SLI4_FCF_TBL_INDX_MAX); 18655 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18656 "3060 Last IDX %d\n", last_index); 18657 18658 /* Verify the priority list has 2 or more entries */ 18659 spin_lock_irq(&phba->hbalock); 18660 if (list_empty(&phba->fcf.fcf_pri_list) || 18661 list_is_singular(&phba->fcf.fcf_pri_list)) { 18662 spin_unlock_irq(&phba->hbalock); 18663 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18664 "3061 Last IDX %d\n", last_index); 18665 return 0; /* Empty rr list */ 18666 } 18667 spin_unlock_irq(&phba->hbalock); 18668 18669 next_fcf_pri = 0; 18670 /* 18671 * Clear the rr_bmask and set all of the bits that are at this 18672 * priority. 18673 */ 18674 memset(phba->fcf.fcf_rr_bmask, 0, 18675 sizeof(*phba->fcf.fcf_rr_bmask)); 18676 spin_lock_irq(&phba->hbalock); 18677 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18678 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 18679 continue; 18680 /* 18681 * the 1st priority that has not FLOGI failed 18682 * will be the highest. 18683 */ 18684 if (!next_fcf_pri) 18685 next_fcf_pri = fcf_pri->fcf_rec.priority; 18686 spin_unlock_irq(&phba->hbalock); 18687 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18688 rc = lpfc_sli4_fcf_rr_index_set(phba, 18689 fcf_pri->fcf_rec.fcf_index); 18690 if (rc) 18691 return 0; 18692 } 18693 spin_lock_irq(&phba->hbalock); 18694 } 18695 /* 18696 * if next_fcf_pri was not set above and the list is not empty then 18697 * we have failed flogis on all of them. So reset flogi failed 18698 * and start at the beginning. 18699 */ 18700 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 18701 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18702 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 18703 /* 18704 * the 1st priority that has not FLOGI failed 18705 * will be the highest. 18706 */ 18707 if (!next_fcf_pri) 18708 next_fcf_pri = fcf_pri->fcf_rec.priority; 18709 spin_unlock_irq(&phba->hbalock); 18710 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18711 rc = lpfc_sli4_fcf_rr_index_set(phba, 18712 fcf_pri->fcf_rec.fcf_index); 18713 if (rc) 18714 return 0; 18715 } 18716 spin_lock_irq(&phba->hbalock); 18717 } 18718 } else 18719 ret = 1; 18720 spin_unlock_irq(&phba->hbalock); 18721 18722 return ret; 18723 } 18724 /** 18725 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 18726 * @phba: pointer to lpfc hba data structure. 18727 * 18728 * This routine is to get the next eligible FCF record index in a round 18729 * robin fashion. If the next eligible FCF record index equals to the 18730 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 18731 * shall be returned, otherwise, the next eligible FCF record's index 18732 * shall be returned. 18733 **/ 18734 uint16_t 18735 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 18736 { 18737 uint16_t next_fcf_index; 18738 18739 initial_priority: 18740 /* Search start from next bit of currently registered FCF index */ 18741 next_fcf_index = phba->fcf.current_rec.fcf_indx; 18742 18743 next_priority: 18744 /* Determine the next fcf index to check */ 18745 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 18746 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18747 LPFC_SLI4_FCF_TBL_INDX_MAX, 18748 next_fcf_index); 18749 18750 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 18751 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18752 /* 18753 * If we have wrapped then we need to clear the bits that 18754 * have been tested so that we can detect when we should 18755 * change the priority level. 18756 */ 18757 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18758 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 18759 } 18760 18761 18762 /* Check roundrobin failover list empty condition */ 18763 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 18764 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 18765 /* 18766 * If next fcf index is not found check if there are lower 18767 * Priority level fcf's in the fcf_priority list. 18768 * Set up the rr_bmask with all of the avaiable fcf bits 18769 * at that level and continue the selection process. 18770 */ 18771 if (lpfc_check_next_fcf_pri_level(phba)) 18772 goto initial_priority; 18773 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 18774 "2844 No roundrobin failover FCF available\n"); 18775 18776 return LPFC_FCOE_FCF_NEXT_NONE; 18777 } 18778 18779 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 18780 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 18781 LPFC_FCF_FLOGI_FAILED) { 18782 if (list_is_singular(&phba->fcf.fcf_pri_list)) 18783 return LPFC_FCOE_FCF_NEXT_NONE; 18784 18785 goto next_priority; 18786 } 18787 18788 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18789 "2845 Get next roundrobin failover FCF (x%x)\n", 18790 next_fcf_index); 18791 18792 return next_fcf_index; 18793 } 18794 18795 /** 18796 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 18797 * @phba: pointer to lpfc hba data structure. 18798 * 18799 * This routine sets the FCF record index in to the eligible bmask for 18800 * roundrobin failover search. It checks to make sure that the index 18801 * does not go beyond the range of the driver allocated bmask dimension 18802 * before setting the bit. 18803 * 18804 * Returns 0 if the index bit successfully set, otherwise, it returns 18805 * -EINVAL. 18806 **/ 18807 int 18808 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 18809 { 18810 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18811 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18812 "2610 FCF (x%x) reached driver's book " 18813 "keeping dimension:x%x\n", 18814 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18815 return -EINVAL; 18816 } 18817 /* Set the eligible FCF record index bmask */ 18818 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18819 18820 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18821 "2790 Set FCF (x%x) to roundrobin FCF failover " 18822 "bmask\n", fcf_index); 18823 18824 return 0; 18825 } 18826 18827 /** 18828 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 18829 * @phba: pointer to lpfc hba data structure. 18830 * 18831 * This routine clears the FCF record index from the eligible bmask for 18832 * roundrobin failover search. It checks to make sure that the index 18833 * does not go beyond the range of the driver allocated bmask dimension 18834 * before clearing the bit. 18835 **/ 18836 void 18837 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 18838 { 18839 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 18840 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18841 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18842 "2762 FCF (x%x) reached driver's book " 18843 "keeping dimension:x%x\n", 18844 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18845 return; 18846 } 18847 /* Clear the eligible FCF record index bmask */ 18848 spin_lock_irq(&phba->hbalock); 18849 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 18850 list) { 18851 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 18852 list_del_init(&fcf_pri->list); 18853 break; 18854 } 18855 } 18856 spin_unlock_irq(&phba->hbalock); 18857 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18858 18859 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18860 "2791 Clear FCF (x%x) from roundrobin failover " 18861 "bmask\n", fcf_index); 18862 } 18863 18864 /** 18865 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 18866 * @phba: pointer to lpfc hba data structure. 18867 * 18868 * This routine is the completion routine for the rediscover FCF table mailbox 18869 * command. If the mailbox command returned failure, it will try to stop the 18870 * FCF rediscover wait timer. 18871 **/ 18872 static void 18873 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 18874 { 18875 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18876 uint32_t shdr_status, shdr_add_status; 18877 18878 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18879 18880 shdr_status = bf_get(lpfc_mbox_hdr_status, 18881 &redisc_fcf->header.cfg_shdr.response); 18882 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 18883 &redisc_fcf->header.cfg_shdr.response); 18884 if (shdr_status || shdr_add_status) { 18885 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18886 "2746 Requesting for FCF rediscovery failed " 18887 "status x%x add_status x%x\n", 18888 shdr_status, shdr_add_status); 18889 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 18890 spin_lock_irq(&phba->hbalock); 18891 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 18892 spin_unlock_irq(&phba->hbalock); 18893 /* 18894 * CVL event triggered FCF rediscover request failed, 18895 * last resort to re-try current registered FCF entry. 18896 */ 18897 lpfc_retry_pport_discovery(phba); 18898 } else { 18899 spin_lock_irq(&phba->hbalock); 18900 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 18901 spin_unlock_irq(&phba->hbalock); 18902 /* 18903 * DEAD FCF event triggered FCF rediscover request 18904 * failed, last resort to fail over as a link down 18905 * to FCF registration. 18906 */ 18907 lpfc_sli4_fcf_dead_failthrough(phba); 18908 } 18909 } else { 18910 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18911 "2775 Start FCF rediscover quiescent timer\n"); 18912 /* 18913 * Start FCF rediscovery wait timer for pending FCF 18914 * before rescan FCF record table. 18915 */ 18916 lpfc_fcf_redisc_wait_start_timer(phba); 18917 } 18918 18919 mempool_free(mbox, phba->mbox_mem_pool); 18920 } 18921 18922 /** 18923 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 18924 * @phba: pointer to lpfc hba data structure. 18925 * 18926 * This routine is invoked to request for rediscovery of the entire FCF table 18927 * by the port. 18928 **/ 18929 int 18930 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 18931 { 18932 LPFC_MBOXQ_t *mbox; 18933 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18934 int rc, length; 18935 18936 /* Cancel retry delay timers to all vports before FCF rediscover */ 18937 lpfc_cancel_all_vport_retry_delay_timer(phba); 18938 18939 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18940 if (!mbox) { 18941 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18942 "2745 Failed to allocate mbox for " 18943 "requesting FCF rediscover.\n"); 18944 return -ENOMEM; 18945 } 18946 18947 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 18948 sizeof(struct lpfc_sli4_cfg_mhdr)); 18949 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 18950 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 18951 length, LPFC_SLI4_MBX_EMBED); 18952 18953 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18954 /* Set count to 0 for invalidating the entire FCF database */ 18955 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 18956 18957 /* Issue the mailbox command asynchronously */ 18958 mbox->vport = phba->pport; 18959 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 18960 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 18961 18962 if (rc == MBX_NOT_FINISHED) { 18963 mempool_free(mbox, phba->mbox_mem_pool); 18964 return -EIO; 18965 } 18966 return 0; 18967 } 18968 18969 /** 18970 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 18971 * @phba: pointer to lpfc hba data structure. 18972 * 18973 * This function is the failover routine as a last resort to the FCF DEAD 18974 * event when driver failed to perform fast FCF failover. 18975 **/ 18976 void 18977 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 18978 { 18979 uint32_t link_state; 18980 18981 /* 18982 * Last resort as FCF DEAD event failover will treat this as 18983 * a link down, but save the link state because we don't want 18984 * it to be changed to Link Down unless it is already down. 18985 */ 18986 link_state = phba->link_state; 18987 lpfc_linkdown(phba); 18988 phba->link_state = link_state; 18989 18990 /* Unregister FCF if no devices connected to it */ 18991 lpfc_unregister_unused_fcf(phba); 18992 } 18993 18994 /** 18995 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 18996 * @phba: pointer to lpfc hba data structure. 18997 * @rgn23_data: pointer to configure region 23 data. 18998 * 18999 * This function gets SLI3 port configure region 23 data through memory dump 19000 * mailbox command. When it successfully retrieves data, the size of the data 19001 * will be returned, otherwise, 0 will be returned. 19002 **/ 19003 static uint32_t 19004 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 19005 { 19006 LPFC_MBOXQ_t *pmb = NULL; 19007 MAILBOX_t *mb; 19008 uint32_t offset = 0; 19009 int rc; 19010 19011 if (!rgn23_data) 19012 return 0; 19013 19014 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19015 if (!pmb) { 19016 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19017 "2600 failed to allocate mailbox memory\n"); 19018 return 0; 19019 } 19020 mb = &pmb->u.mb; 19021 19022 do { 19023 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 19024 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 19025 19026 if (rc != MBX_SUCCESS) { 19027 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19028 "2601 failed to read config " 19029 "region 23, rc 0x%x Status 0x%x\n", 19030 rc, mb->mbxStatus); 19031 mb->un.varDmp.word_cnt = 0; 19032 } 19033 /* 19034 * dump mem may return a zero when finished or we got a 19035 * mailbox error, either way we are done. 19036 */ 19037 if (mb->un.varDmp.word_cnt == 0) 19038 break; 19039 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 19040 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 19041 19042 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 19043 rgn23_data + offset, 19044 mb->un.varDmp.word_cnt); 19045 offset += mb->un.varDmp.word_cnt; 19046 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 19047 19048 mempool_free(pmb, phba->mbox_mem_pool); 19049 return offset; 19050 } 19051 19052 /** 19053 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 19054 * @phba: pointer to lpfc hba data structure. 19055 * @rgn23_data: pointer to configure region 23 data. 19056 * 19057 * This function gets SLI4 port configure region 23 data through memory dump 19058 * mailbox command. When it successfully retrieves data, the size of the data 19059 * will be returned, otherwise, 0 will be returned. 19060 **/ 19061 static uint32_t 19062 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 19063 { 19064 LPFC_MBOXQ_t *mboxq = NULL; 19065 struct lpfc_dmabuf *mp = NULL; 19066 struct lpfc_mqe *mqe; 19067 uint32_t data_length = 0; 19068 int rc; 19069 19070 if (!rgn23_data) 19071 return 0; 19072 19073 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19074 if (!mboxq) { 19075 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19076 "3105 failed to allocate mailbox memory\n"); 19077 return 0; 19078 } 19079 19080 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 19081 goto out; 19082 mqe = &mboxq->u.mqe; 19083 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 19084 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 19085 if (rc) 19086 goto out; 19087 data_length = mqe->un.mb_words[5]; 19088 if (data_length == 0) 19089 goto out; 19090 if (data_length > DMP_RGN23_SIZE) { 19091 data_length = 0; 19092 goto out; 19093 } 19094 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 19095 out: 19096 mempool_free(mboxq, phba->mbox_mem_pool); 19097 if (mp) { 19098 lpfc_mbuf_free(phba, mp->virt, mp->phys); 19099 kfree(mp); 19100 } 19101 return data_length; 19102 } 19103 19104 /** 19105 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 19106 * @phba: pointer to lpfc hba data structure. 19107 * 19108 * This function read region 23 and parse TLV for port status to 19109 * decide if the user disaled the port. If the TLV indicates the 19110 * port is disabled, the hba_flag is set accordingly. 19111 **/ 19112 void 19113 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 19114 { 19115 uint8_t *rgn23_data = NULL; 19116 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 19117 uint32_t offset = 0; 19118 19119 /* Get adapter Region 23 data */ 19120 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 19121 if (!rgn23_data) 19122 goto out; 19123 19124 if (phba->sli_rev < LPFC_SLI_REV4) 19125 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 19126 else { 19127 if_type = bf_get(lpfc_sli_intf_if_type, 19128 &phba->sli4_hba.sli_intf); 19129 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 19130 goto out; 19131 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 19132 } 19133 19134 if (!data_size) 19135 goto out; 19136 19137 /* Check the region signature first */ 19138 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 19139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19140 "2619 Config region 23 has bad signature\n"); 19141 goto out; 19142 } 19143 offset += 4; 19144 19145 /* Check the data structure version */ 19146 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 19147 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19148 "2620 Config region 23 has bad version\n"); 19149 goto out; 19150 } 19151 offset += 4; 19152 19153 /* Parse TLV entries in the region */ 19154 while (offset < data_size) { 19155 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 19156 break; 19157 /* 19158 * If the TLV is not driver specific TLV or driver id is 19159 * not linux driver id, skip the record. 19160 */ 19161 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 19162 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 19163 (rgn23_data[offset + 3] != 0)) { 19164 offset += rgn23_data[offset + 1] * 4 + 4; 19165 continue; 19166 } 19167 19168 /* Driver found a driver specific TLV in the config region */ 19169 sub_tlv_len = rgn23_data[offset + 1] * 4; 19170 offset += 4; 19171 tlv_offset = 0; 19172 19173 /* 19174 * Search for configured port state sub-TLV. 19175 */ 19176 while ((offset < data_size) && 19177 (tlv_offset < sub_tlv_len)) { 19178 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 19179 offset += 4; 19180 tlv_offset += 4; 19181 break; 19182 } 19183 if (rgn23_data[offset] != PORT_STE_TYPE) { 19184 offset += rgn23_data[offset + 1] * 4 + 4; 19185 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 19186 continue; 19187 } 19188 19189 /* This HBA contains PORT_STE configured */ 19190 if (!rgn23_data[offset + 2]) 19191 phba->hba_flag |= LINK_DISABLED; 19192 19193 goto out; 19194 } 19195 } 19196 19197 out: 19198 kfree(rgn23_data); 19199 return; 19200 } 19201 19202 /** 19203 * lpfc_wr_object - write an object to the firmware 19204 * @phba: HBA structure that indicates port to create a queue on. 19205 * @dmabuf_list: list of dmabufs to write to the port. 19206 * @size: the total byte value of the objects to write to the port. 19207 * @offset: the current offset to be used to start the transfer. 19208 * 19209 * This routine will create a wr_object mailbox command to send to the port. 19210 * the mailbox command will be constructed using the dma buffers described in 19211 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 19212 * BDEs that the imbedded mailbox can support. The @offset variable will be 19213 * used to indicate the starting offset of the transfer and will also return 19214 * the offset after the write object mailbox has completed. @size is used to 19215 * determine the end of the object and whether the eof bit should be set. 19216 * 19217 * Return 0 is successful and offset will contain the the new offset to use 19218 * for the next write. 19219 * Return negative value for error cases. 19220 **/ 19221 int 19222 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 19223 uint32_t size, uint32_t *offset) 19224 { 19225 struct lpfc_mbx_wr_object *wr_object; 19226 LPFC_MBOXQ_t *mbox; 19227 int rc = 0, i = 0; 19228 uint32_t shdr_status, shdr_add_status, shdr_change_status; 19229 uint32_t mbox_tmo; 19230 struct lpfc_dmabuf *dmabuf; 19231 uint32_t written = 0; 19232 bool check_change_status = false; 19233 19234 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19235 if (!mbox) 19236 return -ENOMEM; 19237 19238 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 19239 LPFC_MBOX_OPCODE_WRITE_OBJECT, 19240 sizeof(struct lpfc_mbx_wr_object) - 19241 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 19242 19243 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 19244 wr_object->u.request.write_offset = *offset; 19245 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 19246 wr_object->u.request.object_name[0] = 19247 cpu_to_le32(wr_object->u.request.object_name[0]); 19248 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 19249 list_for_each_entry(dmabuf, dmabuf_list, list) { 19250 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 19251 break; 19252 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 19253 wr_object->u.request.bde[i].addrHigh = 19254 putPaddrHigh(dmabuf->phys); 19255 if (written + SLI4_PAGE_SIZE >= size) { 19256 wr_object->u.request.bde[i].tus.f.bdeSize = 19257 (size - written); 19258 written += (size - written); 19259 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 19260 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1); 19261 check_change_status = true; 19262 } else { 19263 wr_object->u.request.bde[i].tus.f.bdeSize = 19264 SLI4_PAGE_SIZE; 19265 written += SLI4_PAGE_SIZE; 19266 } 19267 i++; 19268 } 19269 wr_object->u.request.bde_count = i; 19270 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 19271 if (!phba->sli4_hba.intr_enable) 19272 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 19273 else { 19274 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 19275 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 19276 } 19277 /* The IOCTL status is embedded in the mailbox subheader. */ 19278 shdr_status = bf_get(lpfc_mbox_hdr_status, 19279 &wr_object->header.cfg_shdr.response); 19280 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 19281 &wr_object->header.cfg_shdr.response); 19282 if (check_change_status) { 19283 shdr_change_status = bf_get(lpfc_wr_object_change_status, 19284 &wr_object->u.response); 19285 switch (shdr_change_status) { 19286 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): 19287 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19288 "3198 Firmware write complete: System " 19289 "reboot required to instantiate\n"); 19290 break; 19291 case (LPFC_CHANGE_STATUS_FW_RESET): 19292 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19293 "3199 Firmware write complete: Firmware" 19294 " reset required to instantiate\n"); 19295 break; 19296 case (LPFC_CHANGE_STATUS_PORT_MIGRATION): 19297 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19298 "3200 Firmware write complete: Port " 19299 "Migration or PCI Reset required to " 19300 "instantiate\n"); 19301 break; 19302 case (LPFC_CHANGE_STATUS_PCI_RESET): 19303 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19304 "3201 Firmware write complete: PCI " 19305 "Reset required to instantiate\n"); 19306 break; 19307 default: 19308 break; 19309 } 19310 } 19311 if (rc != MBX_TIMEOUT) 19312 mempool_free(mbox, phba->mbox_mem_pool); 19313 if (shdr_status || shdr_add_status || rc) { 19314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19315 "3025 Write Object mailbox failed with " 19316 "status x%x add_status x%x, mbx status x%x\n", 19317 shdr_status, shdr_add_status, rc); 19318 rc = -ENXIO; 19319 *offset = shdr_add_status; 19320 } else 19321 *offset += wr_object->u.response.actual_write_length; 19322 return rc; 19323 } 19324 19325 /** 19326 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 19327 * @vport: pointer to vport data structure. 19328 * 19329 * This function iterate through the mailboxq and clean up all REG_LOGIN 19330 * and REG_VPI mailbox commands associated with the vport. This function 19331 * is called when driver want to restart discovery of the vport due to 19332 * a Clear Virtual Link event. 19333 **/ 19334 void 19335 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 19336 { 19337 struct lpfc_hba *phba = vport->phba; 19338 LPFC_MBOXQ_t *mb, *nextmb; 19339 struct lpfc_dmabuf *mp; 19340 struct lpfc_nodelist *ndlp; 19341 struct lpfc_nodelist *act_mbx_ndlp = NULL; 19342 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 19343 LIST_HEAD(mbox_cmd_list); 19344 uint8_t restart_loop; 19345 19346 /* Clean up internally queued mailbox commands with the vport */ 19347 spin_lock_irq(&phba->hbalock); 19348 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 19349 if (mb->vport != vport) 19350 continue; 19351 19352 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19353 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19354 continue; 19355 19356 list_del(&mb->list); 19357 list_add_tail(&mb->list, &mbox_cmd_list); 19358 } 19359 /* Clean up active mailbox command with the vport */ 19360 mb = phba->sli.mbox_active; 19361 if (mb && (mb->vport == vport)) { 19362 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 19363 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 19364 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19365 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19366 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19367 /* Put reference count for delayed processing */ 19368 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 19369 /* Unregister the RPI when mailbox complete */ 19370 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19371 } 19372 } 19373 /* Cleanup any mailbox completions which are not yet processed */ 19374 do { 19375 restart_loop = 0; 19376 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 19377 /* 19378 * If this mailox is already processed or it is 19379 * for another vport ignore it. 19380 */ 19381 if ((mb->vport != vport) || 19382 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 19383 continue; 19384 19385 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19386 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19387 continue; 19388 19389 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19390 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19391 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19392 /* Unregister the RPI when mailbox complete */ 19393 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19394 restart_loop = 1; 19395 spin_unlock_irq(&phba->hbalock); 19396 spin_lock(shost->host_lock); 19397 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19398 spin_unlock(shost->host_lock); 19399 spin_lock_irq(&phba->hbalock); 19400 break; 19401 } 19402 } 19403 } while (restart_loop); 19404 19405 spin_unlock_irq(&phba->hbalock); 19406 19407 /* Release the cleaned-up mailbox commands */ 19408 while (!list_empty(&mbox_cmd_list)) { 19409 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 19410 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19411 mp = (struct lpfc_dmabuf *)(mb->ctx_buf); 19412 if (mp) { 19413 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 19414 kfree(mp); 19415 } 19416 mb->ctx_buf = NULL; 19417 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19418 mb->ctx_ndlp = NULL; 19419 if (ndlp) { 19420 spin_lock(shost->host_lock); 19421 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19422 spin_unlock(shost->host_lock); 19423 lpfc_nlp_put(ndlp); 19424 } 19425 } 19426 mempool_free(mb, phba->mbox_mem_pool); 19427 } 19428 19429 /* Release the ndlp with the cleaned-up active mailbox command */ 19430 if (act_mbx_ndlp) { 19431 spin_lock(shost->host_lock); 19432 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19433 spin_unlock(shost->host_lock); 19434 lpfc_nlp_put(act_mbx_ndlp); 19435 } 19436 } 19437 19438 /** 19439 * lpfc_drain_txq - Drain the txq 19440 * @phba: Pointer to HBA context object. 19441 * 19442 * This function attempt to submit IOCBs on the txq 19443 * to the adapter. For SLI4 adapters, the txq contains 19444 * ELS IOCBs that have been deferred because the there 19445 * are no SGLs. This congestion can occur with large 19446 * vport counts during node discovery. 19447 **/ 19448 19449 uint32_t 19450 lpfc_drain_txq(struct lpfc_hba *phba) 19451 { 19452 LIST_HEAD(completions); 19453 struct lpfc_sli_ring *pring; 19454 struct lpfc_iocbq *piocbq = NULL; 19455 unsigned long iflags = 0; 19456 char *fail_msg = NULL; 19457 struct lpfc_sglq *sglq; 19458 union lpfc_wqe128 wqe; 19459 uint32_t txq_cnt = 0; 19460 struct lpfc_queue *wq; 19461 19462 if (phba->link_flag & LS_MDS_LOOPBACK) { 19463 /* MDS WQE are posted only to first WQ*/ 19464 wq = phba->sli4_hba.hdwq[0].fcp_wq; 19465 if (unlikely(!wq)) 19466 return 0; 19467 pring = wq->pring; 19468 } else { 19469 wq = phba->sli4_hba.els_wq; 19470 if (unlikely(!wq)) 19471 return 0; 19472 pring = lpfc_phba_elsring(phba); 19473 } 19474 19475 if (unlikely(!pring) || list_empty(&pring->txq)) 19476 return 0; 19477 19478 spin_lock_irqsave(&pring->ring_lock, iflags); 19479 list_for_each_entry(piocbq, &pring->txq, list) { 19480 txq_cnt++; 19481 } 19482 19483 if (txq_cnt > pring->txq_max) 19484 pring->txq_max = txq_cnt; 19485 19486 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19487 19488 while (!list_empty(&pring->txq)) { 19489 spin_lock_irqsave(&pring->ring_lock, iflags); 19490 19491 piocbq = lpfc_sli_ringtx_get(phba, pring); 19492 if (!piocbq) { 19493 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19494 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19495 "2823 txq empty and txq_cnt is %d\n ", 19496 txq_cnt); 19497 break; 19498 } 19499 sglq = __lpfc_sli_get_els_sglq(phba, piocbq); 19500 if (!sglq) { 19501 __lpfc_sli_ringtx_put(phba, pring, piocbq); 19502 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19503 break; 19504 } 19505 txq_cnt--; 19506 19507 /* The xri and iocb resources secured, 19508 * attempt to issue request 19509 */ 19510 piocbq->sli4_lxritag = sglq->sli4_lxritag; 19511 piocbq->sli4_xritag = sglq->sli4_xritag; 19512 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 19513 fail_msg = "to convert bpl to sgl"; 19514 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 19515 fail_msg = "to convert iocb to wqe"; 19516 else if (lpfc_sli4_wq_put(wq, &wqe)) 19517 fail_msg = " - Wq is full"; 19518 else 19519 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 19520 19521 if (fail_msg) { 19522 /* Failed means we can't issue and need to cancel */ 19523 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19524 "2822 IOCB failed %s iotag 0x%x " 19525 "xri 0x%x\n", 19526 fail_msg, 19527 piocbq->iotag, piocbq->sli4_xritag); 19528 list_add_tail(&piocbq->list, &completions); 19529 } 19530 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19531 } 19532 19533 /* Cancel all the IOCBs that cannot be issued */ 19534 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 19535 IOERR_SLI_ABORTED); 19536 19537 return txq_cnt; 19538 } 19539 19540 /** 19541 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. 19542 * @phba: Pointer to HBA context object. 19543 * @pwqe: Pointer to command WQE. 19544 * @sglq: Pointer to the scatter gather queue object. 19545 * 19546 * This routine converts the bpl or bde that is in the WQE 19547 * to a sgl list for the sli4 hardware. The physical address 19548 * of the bpl/bde is converted back to a virtual address. 19549 * If the WQE contains a BPL then the list of BDE's is 19550 * converted to sli4_sge's. If the WQE contains a single 19551 * BDE then it is converted to a single sli_sge. 19552 * The WQE is still in cpu endianness so the contents of 19553 * the bpl can be used without byte swapping. 19554 * 19555 * Returns valid XRI = Success, NO_XRI = Failure. 19556 */ 19557 static uint16_t 19558 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, 19559 struct lpfc_sglq *sglq) 19560 { 19561 uint16_t xritag = NO_XRI; 19562 struct ulp_bde64 *bpl = NULL; 19563 struct ulp_bde64 bde; 19564 struct sli4_sge *sgl = NULL; 19565 struct lpfc_dmabuf *dmabuf; 19566 union lpfc_wqe128 *wqe; 19567 int numBdes = 0; 19568 int i = 0; 19569 uint32_t offset = 0; /* accumulated offset in the sg request list */ 19570 int inbound = 0; /* number of sg reply entries inbound from firmware */ 19571 uint32_t cmd; 19572 19573 if (!pwqeq || !sglq) 19574 return xritag; 19575 19576 sgl = (struct sli4_sge *)sglq->sgl; 19577 wqe = &pwqeq->wqe; 19578 pwqeq->iocb.ulpIoTag = pwqeq->iotag; 19579 19580 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); 19581 if (cmd == CMD_XMIT_BLS_RSP64_WQE) 19582 return sglq->sli4_xritag; 19583 numBdes = pwqeq->rsvd2; 19584 if (numBdes) { 19585 /* The addrHigh and addrLow fields within the WQE 19586 * have not been byteswapped yet so there is no 19587 * need to swap them back. 19588 */ 19589 if (pwqeq->context3) 19590 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3; 19591 else 19592 return xritag; 19593 19594 bpl = (struct ulp_bde64 *)dmabuf->virt; 19595 if (!bpl) 19596 return xritag; 19597 19598 for (i = 0; i < numBdes; i++) { 19599 /* Should already be byte swapped. */ 19600 sgl->addr_hi = bpl->addrHigh; 19601 sgl->addr_lo = bpl->addrLow; 19602 19603 sgl->word2 = le32_to_cpu(sgl->word2); 19604 if ((i+1) == numBdes) 19605 bf_set(lpfc_sli4_sge_last, sgl, 1); 19606 else 19607 bf_set(lpfc_sli4_sge_last, sgl, 0); 19608 /* swap the size field back to the cpu so we 19609 * can assign it to the sgl. 19610 */ 19611 bde.tus.w = le32_to_cpu(bpl->tus.w); 19612 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 19613 /* The offsets in the sgl need to be accumulated 19614 * separately for the request and reply lists. 19615 * The request is always first, the reply follows. 19616 */ 19617 switch (cmd) { 19618 case CMD_GEN_REQUEST64_WQE: 19619 /* add up the reply sg entries */ 19620 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 19621 inbound++; 19622 /* first inbound? reset the offset */ 19623 if (inbound == 1) 19624 offset = 0; 19625 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19626 bf_set(lpfc_sli4_sge_type, sgl, 19627 LPFC_SGE_TYPE_DATA); 19628 offset += bde.tus.f.bdeSize; 19629 break; 19630 case CMD_FCP_TRSP64_WQE: 19631 bf_set(lpfc_sli4_sge_offset, sgl, 0); 19632 bf_set(lpfc_sli4_sge_type, sgl, 19633 LPFC_SGE_TYPE_DATA); 19634 break; 19635 case CMD_FCP_TSEND64_WQE: 19636 case CMD_FCP_TRECEIVE64_WQE: 19637 bf_set(lpfc_sli4_sge_type, sgl, 19638 bpl->tus.f.bdeFlags); 19639 if (i < 3) 19640 offset = 0; 19641 else 19642 offset += bde.tus.f.bdeSize; 19643 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19644 break; 19645 } 19646 sgl->word2 = cpu_to_le32(sgl->word2); 19647 bpl++; 19648 sgl++; 19649 } 19650 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { 19651 /* The addrHigh and addrLow fields of the BDE have not 19652 * been byteswapped yet so they need to be swapped 19653 * before putting them in the sgl. 19654 */ 19655 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); 19656 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); 19657 sgl->word2 = le32_to_cpu(sgl->word2); 19658 bf_set(lpfc_sli4_sge_last, sgl, 1); 19659 sgl->word2 = cpu_to_le32(sgl->word2); 19660 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); 19661 } 19662 return sglq->sli4_xritag; 19663 } 19664 19665 /** 19666 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) 19667 * @phba: Pointer to HBA context object. 19668 * @ring_number: Base sli ring number 19669 * @pwqe: Pointer to command WQE. 19670 **/ 19671 int 19672 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 19673 struct lpfc_iocbq *pwqe) 19674 { 19675 union lpfc_wqe128 *wqe = &pwqe->wqe; 19676 struct lpfc_nvmet_rcv_ctx *ctxp; 19677 struct lpfc_queue *wq; 19678 struct lpfc_sglq *sglq; 19679 struct lpfc_sli_ring *pring; 19680 unsigned long iflags; 19681 uint32_t ret = 0; 19682 19683 /* NVME_LS and NVME_LS ABTS requests. */ 19684 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { 19685 pring = phba->sli4_hba.nvmels_wq->pring; 19686 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19687 qp, wq_access); 19688 sglq = __lpfc_sli_get_els_sglq(phba, pwqe); 19689 if (!sglq) { 19690 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19691 return WQE_BUSY; 19692 } 19693 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19694 pwqe->sli4_xritag = sglq->sli4_xritag; 19695 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { 19696 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19697 return WQE_ERROR; 19698 } 19699 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19700 pwqe->sli4_xritag); 19701 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); 19702 if (ret) { 19703 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19704 return ret; 19705 } 19706 19707 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19708 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19709 return 0; 19710 } 19711 19712 /* NVME_FCREQ and NVME_ABTS requests */ 19713 if (pwqe->iocb_flag & LPFC_IO_NVME) { 19714 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19715 wq = qp->nvme_wq; 19716 pring = wq->pring; 19717 19718 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); 19719 19720 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19721 qp, wq_access); 19722 ret = lpfc_sli4_wq_put(wq, wqe); 19723 if (ret) { 19724 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19725 return ret; 19726 } 19727 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19728 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19729 return 0; 19730 } 19731 19732 /* NVMET requests */ 19733 if (pwqe->iocb_flag & LPFC_IO_NVMET) { 19734 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19735 wq = qp->nvme_wq; 19736 pring = wq->pring; 19737 19738 ctxp = pwqe->context2; 19739 sglq = ctxp->ctxbuf->sglq; 19740 if (pwqe->sli4_xritag == NO_XRI) { 19741 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19742 pwqe->sli4_xritag = sglq->sli4_xritag; 19743 } 19744 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19745 pwqe->sli4_xritag); 19746 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); 19747 19748 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19749 qp, wq_access); 19750 ret = lpfc_sli4_wq_put(wq, wqe); 19751 if (ret) { 19752 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19753 return ret; 19754 } 19755 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19756 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19757 return 0; 19758 } 19759 return WQE_ERROR; 19760 } 19761 19762 #ifdef LPFC_MXP_STAT 19763 /** 19764 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count 19765 * @phba: pointer to lpfc hba data structure. 19766 * @hwqid: belong to which HWQ. 19767 * 19768 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count 19769 * 15 seconds after a test case is running. 19770 * 19771 * The user should call lpfc_debugfs_multixripools_write before running a test 19772 * case to clear stat_snapshot_taken. Then the user starts a test case. During 19773 * test case is running, stat_snapshot_taken is incremented by 1 every time when 19774 * this routine is called from heartbeat timer. When stat_snapshot_taken is 19775 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken. 19776 **/ 19777 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) 19778 { 19779 struct lpfc_sli4_hdw_queue *qp; 19780 struct lpfc_multixri_pool *multixri_pool; 19781 struct lpfc_pvt_pool *pvt_pool; 19782 struct lpfc_pbl_pool *pbl_pool; 19783 u32 txcmplq_cnt; 19784 19785 qp = &phba->sli4_hba.hdwq[hwqid]; 19786 multixri_pool = qp->p_multixri_pool; 19787 if (!multixri_pool) 19788 return; 19789 19790 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { 19791 pvt_pool = &qp->p_multixri_pool->pvt_pool; 19792 pbl_pool = &qp->p_multixri_pool->pbl_pool; 19793 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 19794 if (qp->nvme_wq) 19795 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; 19796 19797 multixri_pool->stat_pbl_count = pbl_pool->count; 19798 multixri_pool->stat_pvt_count = pvt_pool->count; 19799 multixri_pool->stat_busy_count = txcmplq_cnt; 19800 } 19801 19802 multixri_pool->stat_snapshot_taken++; 19803 } 19804 #endif 19805 19806 /** 19807 * lpfc_adjust_pvt_pool_count - Adjust private pool count 19808 * @phba: pointer to lpfc hba data structure. 19809 * @hwqid: belong to which HWQ. 19810 * 19811 * This routine moves some XRIs from private to public pool when private pool 19812 * is not busy. 19813 **/ 19814 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid) 19815 { 19816 struct lpfc_multixri_pool *multixri_pool; 19817 u32 io_req_count; 19818 u32 prev_io_req_count; 19819 19820 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 19821 if (!multixri_pool) 19822 return; 19823 io_req_count = multixri_pool->io_req_count; 19824 prev_io_req_count = multixri_pool->prev_io_req_count; 19825 19826 if (prev_io_req_count != io_req_count) { 19827 /* Private pool is busy */ 19828 multixri_pool->prev_io_req_count = io_req_count; 19829 } else { 19830 /* Private pool is not busy. 19831 * Move XRIs from private to public pool. 19832 */ 19833 lpfc_move_xri_pvt_to_pbl(phba, hwqid); 19834 } 19835 } 19836 19837 /** 19838 * lpfc_adjust_high_watermark - Adjust high watermark 19839 * @phba: pointer to lpfc hba data structure. 19840 * @hwqid: belong to which HWQ. 19841 * 19842 * This routine sets high watermark as number of outstanding XRIs, 19843 * but make sure the new value is between xri_limit/2 and xri_limit. 19844 **/ 19845 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) 19846 { 19847 u32 new_watermark; 19848 u32 watermark_max; 19849 u32 watermark_min; 19850 u32 xri_limit; 19851 u32 txcmplq_cnt; 19852 u32 abts_io_bufs; 19853 struct lpfc_multixri_pool *multixri_pool; 19854 struct lpfc_sli4_hdw_queue *qp; 19855 19856 qp = &phba->sli4_hba.hdwq[hwqid]; 19857 multixri_pool = qp->p_multixri_pool; 19858 if (!multixri_pool) 19859 return; 19860 xri_limit = multixri_pool->xri_limit; 19861 19862 watermark_max = xri_limit; 19863 watermark_min = xri_limit / 2; 19864 19865 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 19866 abts_io_bufs = qp->abts_scsi_io_bufs; 19867 if (qp->nvme_wq) { 19868 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; 19869 abts_io_bufs += qp->abts_nvme_io_bufs; 19870 } 19871 19872 new_watermark = txcmplq_cnt + abts_io_bufs; 19873 new_watermark = min(watermark_max, new_watermark); 19874 new_watermark = max(watermark_min, new_watermark); 19875 multixri_pool->pvt_pool.high_watermark = new_watermark; 19876 19877 #ifdef LPFC_MXP_STAT 19878 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm, 19879 new_watermark); 19880 #endif 19881 } 19882 19883 /** 19884 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool 19885 * @phba: pointer to lpfc hba data structure. 19886 * @hwqid: belong to which HWQ. 19887 * 19888 * This routine is called from hearbeat timer when pvt_pool is idle. 19889 * All free XRIs are moved from private to public pool on hwqid with 2 steps. 19890 * The first step moves (all - low_watermark) amount of XRIs. 19891 * The second step moves the rest of XRIs. 19892 **/ 19893 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) 19894 { 19895 struct lpfc_pbl_pool *pbl_pool; 19896 struct lpfc_pvt_pool *pvt_pool; 19897 struct lpfc_sli4_hdw_queue *qp; 19898 struct lpfc_io_buf *lpfc_ncmd; 19899 struct lpfc_io_buf *lpfc_ncmd_next; 19900 unsigned long iflag; 19901 struct list_head tmp_list; 19902 u32 tmp_count; 19903 19904 qp = &phba->sli4_hba.hdwq[hwqid]; 19905 pbl_pool = &qp->p_multixri_pool->pbl_pool; 19906 pvt_pool = &qp->p_multixri_pool->pvt_pool; 19907 tmp_count = 0; 19908 19909 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); 19910 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); 19911 19912 if (pvt_pool->count > pvt_pool->low_watermark) { 19913 /* Step 1: move (all - low_watermark) from pvt_pool 19914 * to pbl_pool 19915 */ 19916 19917 /* Move low watermark of bufs from pvt_pool to tmp_list */ 19918 INIT_LIST_HEAD(&tmp_list); 19919 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 19920 &pvt_pool->list, list) { 19921 list_move_tail(&lpfc_ncmd->list, &tmp_list); 19922 tmp_count++; 19923 if (tmp_count >= pvt_pool->low_watermark) 19924 break; 19925 } 19926 19927 /* Move all bufs from pvt_pool to pbl_pool */ 19928 list_splice_init(&pvt_pool->list, &pbl_pool->list); 19929 19930 /* Move all bufs from tmp_list to pvt_pool */ 19931 list_splice(&tmp_list, &pvt_pool->list); 19932 19933 pbl_pool->count += (pvt_pool->count - tmp_count); 19934 pvt_pool->count = tmp_count; 19935 } else { 19936 /* Step 2: move the rest from pvt_pool to pbl_pool */ 19937 list_splice_init(&pvt_pool->list, &pbl_pool->list); 19938 pbl_pool->count += pvt_pool->count; 19939 pvt_pool->count = 0; 19940 } 19941 19942 spin_unlock(&pvt_pool->lock); 19943 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 19944 } 19945 19946 /** 19947 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 19948 * @phba: pointer to lpfc hba data structure 19949 * @pbl_pool: specified public free XRI pool 19950 * @pvt_pool: specified private free XRI pool 19951 * @count: number of XRIs to move 19952 * 19953 * This routine tries to move some free common bufs from the specified pbl_pool 19954 * to the specified pvt_pool. It might move less than count XRIs if there's not 19955 * enough in public pool. 19956 * 19957 * Return: 19958 * true - if XRIs are successfully moved from the specified pbl_pool to the 19959 * specified pvt_pool 19960 * false - if the specified pbl_pool is empty or locked by someone else 19961 **/ 19962 static bool 19963 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 19964 struct lpfc_pbl_pool *pbl_pool, 19965 struct lpfc_pvt_pool *pvt_pool, u32 count) 19966 { 19967 struct lpfc_io_buf *lpfc_ncmd; 19968 struct lpfc_io_buf *lpfc_ncmd_next; 19969 unsigned long iflag; 19970 int ret; 19971 19972 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag); 19973 if (ret) { 19974 if (pbl_pool->count) { 19975 /* Move a batch of XRIs from public to private pool */ 19976 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); 19977 list_for_each_entry_safe(lpfc_ncmd, 19978 lpfc_ncmd_next, 19979 &pbl_pool->list, 19980 list) { 19981 list_move_tail(&lpfc_ncmd->list, 19982 &pvt_pool->list); 19983 pvt_pool->count++; 19984 pbl_pool->count--; 19985 count--; 19986 if (count == 0) 19987 break; 19988 } 19989 19990 spin_unlock(&pvt_pool->lock); 19991 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 19992 return true; 19993 } 19994 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 19995 } 19996 19997 return false; 19998 } 19999 20000 /** 20001 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 20002 * @phba: pointer to lpfc hba data structure. 20003 * @hwqid: belong to which HWQ. 20004 * @count: number of XRIs to move 20005 * 20006 * This routine tries to find some free common bufs in one of public pools with 20007 * Round Robin method. The search always starts from local hwqid, then the next 20008 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found, 20009 * a batch of free common bufs are moved to private pool on hwqid. 20010 * It might move less than count XRIs if there's not enough in public pool. 20011 **/ 20012 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) 20013 { 20014 struct lpfc_multixri_pool *multixri_pool; 20015 struct lpfc_multixri_pool *next_multixri_pool; 20016 struct lpfc_pvt_pool *pvt_pool; 20017 struct lpfc_pbl_pool *pbl_pool; 20018 struct lpfc_sli4_hdw_queue *qp; 20019 u32 next_hwqid; 20020 u32 hwq_count; 20021 int ret; 20022 20023 qp = &phba->sli4_hba.hdwq[hwqid]; 20024 multixri_pool = qp->p_multixri_pool; 20025 pvt_pool = &multixri_pool->pvt_pool; 20026 pbl_pool = &multixri_pool->pbl_pool; 20027 20028 /* Check if local pbl_pool is available */ 20029 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); 20030 if (ret) { 20031 #ifdef LPFC_MXP_STAT 20032 multixri_pool->local_pbl_hit_count++; 20033 #endif 20034 return; 20035 } 20036 20037 hwq_count = phba->cfg_hdw_queue; 20038 20039 /* Get the next hwqid which was found last time */ 20040 next_hwqid = multixri_pool->rrb_next_hwqid; 20041 20042 do { 20043 /* Go to next hwq */ 20044 next_hwqid = (next_hwqid + 1) % hwq_count; 20045 20046 next_multixri_pool = 20047 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool; 20048 pbl_pool = &next_multixri_pool->pbl_pool; 20049 20050 /* Check if the public free xri pool is available */ 20051 ret = _lpfc_move_xri_pbl_to_pvt( 20052 phba, qp, pbl_pool, pvt_pool, count); 20053 20054 /* Exit while-loop if success or all hwqid are checked */ 20055 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); 20056 20057 /* Starting point for the next time */ 20058 multixri_pool->rrb_next_hwqid = next_hwqid; 20059 20060 if (!ret) { 20061 /* stats: all public pools are empty*/ 20062 multixri_pool->pbl_empty_count++; 20063 } 20064 20065 #ifdef LPFC_MXP_STAT 20066 if (ret) { 20067 if (next_hwqid == hwqid) 20068 multixri_pool->local_pbl_hit_count++; 20069 else 20070 multixri_pool->other_pbl_hit_count++; 20071 } 20072 #endif 20073 } 20074 20075 /** 20076 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark 20077 * @phba: pointer to lpfc hba data structure. 20078 * @qp: belong to which HWQ. 20079 * 20080 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than 20081 * low watermark. 20082 **/ 20083 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid) 20084 { 20085 struct lpfc_multixri_pool *multixri_pool; 20086 struct lpfc_pvt_pool *pvt_pool; 20087 20088 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 20089 pvt_pool = &multixri_pool->pvt_pool; 20090 20091 if (pvt_pool->count < pvt_pool->low_watermark) 20092 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 20093 } 20094 20095 /** 20096 * lpfc_release_io_buf - Return one IO buf back to free pool 20097 * @phba: pointer to lpfc hba data structure. 20098 * @lpfc_ncmd: IO buf to be returned. 20099 * @qp: belong to which HWQ. 20100 * 20101 * This routine returns one IO buf back to free pool. If this is an urgent IO, 20102 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1, 20103 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and 20104 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to 20105 * lpfc_io_buf_list_put. 20106 **/ 20107 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, 20108 struct lpfc_sli4_hdw_queue *qp) 20109 { 20110 unsigned long iflag; 20111 struct lpfc_pbl_pool *pbl_pool; 20112 struct lpfc_pvt_pool *pvt_pool; 20113 struct lpfc_epd_pool *epd_pool; 20114 u32 txcmplq_cnt; 20115 u32 xri_owned; 20116 u32 xri_limit; 20117 u32 abts_io_bufs; 20118 20119 /* MUST zero fields if buffer is reused by another protocol */ 20120 lpfc_ncmd->nvmeCmd = NULL; 20121 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; 20122 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; 20123 20124 if (phba->cfg_xri_rebalancing) { 20125 if (lpfc_ncmd->expedite) { 20126 /* Return to expedite pool */ 20127 epd_pool = &phba->epd_pool; 20128 spin_lock_irqsave(&epd_pool->lock, iflag); 20129 list_add_tail(&lpfc_ncmd->list, &epd_pool->list); 20130 epd_pool->count++; 20131 spin_unlock_irqrestore(&epd_pool->lock, iflag); 20132 return; 20133 } 20134 20135 /* Avoid invalid access if an IO sneaks in and is being rejected 20136 * just _after_ xri pools are destroyed in lpfc_offline. 20137 * Nothing much can be done at this point. 20138 */ 20139 if (!qp->p_multixri_pool) 20140 return; 20141 20142 pbl_pool = &qp->p_multixri_pool->pbl_pool; 20143 pvt_pool = &qp->p_multixri_pool->pvt_pool; 20144 20145 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 20146 abts_io_bufs = qp->abts_scsi_io_bufs; 20147 if (qp->nvme_wq) { 20148 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; 20149 abts_io_bufs += qp->abts_nvme_io_bufs; 20150 } 20151 20152 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; 20153 xri_limit = qp->p_multixri_pool->xri_limit; 20154 20155 #ifdef LPFC_MXP_STAT 20156 if (xri_owned <= xri_limit) 20157 qp->p_multixri_pool->below_limit_count++; 20158 else 20159 qp->p_multixri_pool->above_limit_count++; 20160 #endif 20161 20162 /* XRI goes to either public or private free xri pool 20163 * based on watermark and xri_limit 20164 */ 20165 if ((pvt_pool->count < pvt_pool->low_watermark) || 20166 (xri_owned < xri_limit && 20167 pvt_pool->count < pvt_pool->high_watermark)) { 20168 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, 20169 qp, free_pvt_pool); 20170 list_add_tail(&lpfc_ncmd->list, 20171 &pvt_pool->list); 20172 pvt_pool->count++; 20173 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20174 } else { 20175 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, 20176 qp, free_pub_pool); 20177 list_add_tail(&lpfc_ncmd->list, 20178 &pbl_pool->list); 20179 pbl_pool->count++; 20180 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 20181 } 20182 } else { 20183 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, 20184 qp, free_xri); 20185 list_add_tail(&lpfc_ncmd->list, 20186 &qp->lpfc_io_buf_list_put); 20187 qp->put_io_bufs++; 20188 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, 20189 iflag); 20190 } 20191 } 20192 20193 /** 20194 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool 20195 * @phba: pointer to lpfc hba data structure. 20196 * @pvt_pool: pointer to private pool data structure. 20197 * @ndlp: pointer to lpfc nodelist data structure. 20198 * 20199 * This routine tries to get one free IO buf from private pool. 20200 * 20201 * Return: 20202 * pointer to one free IO buf - if private pool is not empty 20203 * NULL - if private pool is empty 20204 **/ 20205 static struct lpfc_io_buf * 20206 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, 20207 struct lpfc_sli4_hdw_queue *qp, 20208 struct lpfc_pvt_pool *pvt_pool, 20209 struct lpfc_nodelist *ndlp) 20210 { 20211 struct lpfc_io_buf *lpfc_ncmd; 20212 struct lpfc_io_buf *lpfc_ncmd_next; 20213 unsigned long iflag; 20214 20215 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); 20216 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 20217 &pvt_pool->list, list) { 20218 if (lpfc_test_rrq_active( 20219 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) 20220 continue; 20221 list_del(&lpfc_ncmd->list); 20222 pvt_pool->count--; 20223 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20224 return lpfc_ncmd; 20225 } 20226 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20227 20228 return NULL; 20229 } 20230 20231 /** 20232 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool 20233 * @phba: pointer to lpfc hba data structure. 20234 * 20235 * This routine tries to get one free IO buf from expedite pool. 20236 * 20237 * Return: 20238 * pointer to one free IO buf - if expedite pool is not empty 20239 * NULL - if expedite pool is empty 20240 **/ 20241 static struct lpfc_io_buf * 20242 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba) 20243 { 20244 struct lpfc_io_buf *lpfc_ncmd; 20245 struct lpfc_io_buf *lpfc_ncmd_next; 20246 unsigned long iflag; 20247 struct lpfc_epd_pool *epd_pool; 20248 20249 epd_pool = &phba->epd_pool; 20250 lpfc_ncmd = NULL; 20251 20252 spin_lock_irqsave(&epd_pool->lock, iflag); 20253 if (epd_pool->count > 0) { 20254 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 20255 &epd_pool->list, list) { 20256 list_del(&lpfc_ncmd->list); 20257 epd_pool->count--; 20258 break; 20259 } 20260 } 20261 spin_unlock_irqrestore(&epd_pool->lock, iflag); 20262 20263 return lpfc_ncmd; 20264 } 20265 20266 /** 20267 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs 20268 * @phba: pointer to lpfc hba data structure. 20269 * @ndlp: pointer to lpfc nodelist data structure. 20270 * @hwqid: belong to which HWQ 20271 * @expedite: 1 means this request is urgent. 20272 * 20273 * This routine will do the following actions and then return a pointer to 20274 * one free IO buf. 20275 * 20276 * 1. If private free xri count is empty, move some XRIs from public to 20277 * private pool. 20278 * 2. Get one XRI from private free xri pool. 20279 * 3. If we fail to get one from pvt_pool and this is an expedite request, 20280 * get one free xri from expedite pool. 20281 * 20282 * Note: ndlp is only used on SCSI side for RRQ testing. 20283 * The caller should pass NULL for ndlp on NVME side. 20284 * 20285 * Return: 20286 * pointer to one free IO buf - if private pool is not empty 20287 * NULL - if private pool is empty 20288 **/ 20289 static struct lpfc_io_buf * 20290 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, 20291 struct lpfc_nodelist *ndlp, 20292 int hwqid, int expedite) 20293 { 20294 struct lpfc_sli4_hdw_queue *qp; 20295 struct lpfc_multixri_pool *multixri_pool; 20296 struct lpfc_pvt_pool *pvt_pool; 20297 struct lpfc_io_buf *lpfc_ncmd; 20298 20299 qp = &phba->sli4_hba.hdwq[hwqid]; 20300 lpfc_ncmd = NULL; 20301 multixri_pool = qp->p_multixri_pool; 20302 pvt_pool = &multixri_pool->pvt_pool; 20303 multixri_pool->io_req_count++; 20304 20305 /* If pvt_pool is empty, move some XRIs from public to private pool */ 20306 if (pvt_pool->count == 0) 20307 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 20308 20309 /* Get one XRI from private free xri pool */ 20310 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); 20311 20312 if (lpfc_ncmd) { 20313 lpfc_ncmd->hdwq = qp; 20314 lpfc_ncmd->hdwq_no = hwqid; 20315 } else if (expedite) { 20316 /* If we fail to get one from pvt_pool and this is an expedite 20317 * request, get one free xri from expedite pool. 20318 */ 20319 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba); 20320 } 20321 20322 return lpfc_ncmd; 20323 } 20324 20325 static inline struct lpfc_io_buf * 20326 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx) 20327 { 20328 struct lpfc_sli4_hdw_queue *qp; 20329 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next; 20330 20331 qp = &phba->sli4_hba.hdwq[idx]; 20332 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 20333 &qp->lpfc_io_buf_list_get, list) { 20334 if (lpfc_test_rrq_active(phba, ndlp, 20335 lpfc_cmd->cur_iocbq.sli4_lxritag)) 20336 continue; 20337 20338 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED) 20339 continue; 20340 20341 list_del_init(&lpfc_cmd->list); 20342 qp->get_io_bufs--; 20343 lpfc_cmd->hdwq = qp; 20344 lpfc_cmd->hdwq_no = idx; 20345 return lpfc_cmd; 20346 } 20347 return NULL; 20348 } 20349 20350 /** 20351 * lpfc_get_io_buf - Get one IO buffer from free pool 20352 * @phba: The HBA for which this call is being executed. 20353 * @ndlp: pointer to lpfc nodelist data structure. 20354 * @hwqid: belong to which HWQ 20355 * @expedite: 1 means this request is urgent. 20356 * 20357 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1, 20358 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes 20359 * a IO buffer from head of @hdwq io_buf_list and returns to caller. 20360 * 20361 * Note: ndlp is only used on SCSI side for RRQ testing. 20362 * The caller should pass NULL for ndlp on NVME side. 20363 * 20364 * Return codes: 20365 * NULL - Error 20366 * Pointer to lpfc_io_buf - Success 20367 **/ 20368 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, 20369 struct lpfc_nodelist *ndlp, 20370 u32 hwqid, int expedite) 20371 { 20372 struct lpfc_sli4_hdw_queue *qp; 20373 unsigned long iflag; 20374 struct lpfc_io_buf *lpfc_cmd; 20375 20376 qp = &phba->sli4_hba.hdwq[hwqid]; 20377 lpfc_cmd = NULL; 20378 20379 if (phba->cfg_xri_rebalancing) 20380 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( 20381 phba, ndlp, hwqid, expedite); 20382 else { 20383 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, 20384 qp, alloc_xri_get); 20385 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) 20386 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 20387 if (!lpfc_cmd) { 20388 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, 20389 qp, alloc_xri_put); 20390 list_splice(&qp->lpfc_io_buf_list_put, 20391 &qp->lpfc_io_buf_list_get); 20392 qp->get_io_bufs += qp->put_io_bufs; 20393 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 20394 qp->put_io_bufs = 0; 20395 spin_unlock(&qp->io_buf_list_put_lock); 20396 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || 20397 expedite) 20398 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 20399 } 20400 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); 20401 } 20402 20403 return lpfc_cmd; 20404 } 20405