1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/interrupt.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/lockdep.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 #include <scsi/fc/fc_fs.h> 37 #include <linux/aer.h> 38 #ifdef CONFIG_X86 39 #include <asm/set_memory.h> 40 #endif 41 42 #include <linux/nvme-fc-driver.h> 43 44 #include "lpfc_hw4.h" 45 #include "lpfc_hw.h" 46 #include "lpfc_sli.h" 47 #include "lpfc_sli4.h" 48 #include "lpfc_nl.h" 49 #include "lpfc_disc.h" 50 #include "lpfc.h" 51 #include "lpfc_scsi.h" 52 #include "lpfc_nvme.h" 53 #include "lpfc_nvmet.h" 54 #include "lpfc_crtn.h" 55 #include "lpfc_logmsg.h" 56 #include "lpfc_compat.h" 57 #include "lpfc_debugfs.h" 58 #include "lpfc_vport.h" 59 #include "lpfc_version.h" 60 61 /* There are only four IOCB completion types. */ 62 typedef enum _lpfc_iocb_type { 63 LPFC_UNKNOWN_IOCB, 64 LPFC_UNSOL_IOCB, 65 LPFC_SOL_IOCB, 66 LPFC_ABORT_IOCB 67 } lpfc_iocb_type; 68 69 70 /* Provide function prototypes local to this module. */ 71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 72 uint32_t); 73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 74 uint8_t *, uint32_t *); 75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 76 struct lpfc_iocbq *); 77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 78 struct hbq_dmabuf *); 79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 80 struct hbq_dmabuf *dmabuf); 81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, 82 struct lpfc_queue *cq, struct lpfc_cqe *cqe); 83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 84 int); 85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, 86 struct lpfc_queue *eq, 87 struct lpfc_eqe *eqe); 88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 90 91 static IOCB_t * 92 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 93 { 94 return &iocbq->iocb; 95 } 96 97 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) 98 /** 99 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function 100 * @srcp: Source memory pointer. 101 * @destp: Destination memory pointer. 102 * @cnt: Number of words required to be copied. 103 * Must be a multiple of sizeof(uint64_t) 104 * 105 * This function is used for copying data between driver memory 106 * and the SLI WQ. This function also changes the endianness 107 * of each word if native endianness is different from SLI 108 * endianness. This function can be called with or without 109 * lock. 110 **/ 111 void 112 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 113 { 114 uint64_t *src = srcp; 115 uint64_t *dest = destp; 116 int i; 117 118 for (i = 0; i < (int)cnt; i += sizeof(uint64_t)) 119 *dest++ = *src++; 120 } 121 #else 122 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) 123 #endif 124 125 /** 126 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 127 * @q: The Work Queue to operate on. 128 * @wqe: The work Queue Entry to put on the Work queue. 129 * 130 * This routine will copy the contents of @wqe to the next available entry on 131 * the @q. This function will then ring the Work Queue Doorbell to signal the 132 * HBA to start processing the Work Queue Entry. This function returns 0 if 133 * successful. If no entries are available on @q then this function will return 134 * -ENOMEM. 135 * The caller is expected to hold the hbalock when calling this routine. 136 **/ 137 static int 138 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) 139 { 140 union lpfc_wqe *temp_wqe; 141 struct lpfc_register doorbell; 142 uint32_t host_index; 143 uint32_t idx; 144 uint32_t i = 0; 145 uint8_t *tmp; 146 u32 if_type; 147 148 /* sanity check on queue memory */ 149 if (unlikely(!q)) 150 return -ENOMEM; 151 temp_wqe = lpfc_sli4_qe(q, q->host_index); 152 153 /* If the host has not yet processed the next entry then we are done */ 154 idx = ((q->host_index + 1) % q->entry_count); 155 if (idx == q->hba_index) { 156 q->WQ_overflow++; 157 return -EBUSY; 158 } 159 q->WQ_posted++; 160 /* set consumption flag every once in a while */ 161 if (!((q->host_index + 1) % q->notify_interval)) 162 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 163 else 164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); 165 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 166 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 167 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 168 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 169 /* write to DPP aperture taking advatage of Combined Writes */ 170 tmp = (uint8_t *)temp_wqe; 171 #ifdef __raw_writeq 172 for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) 173 __raw_writeq(*((uint64_t *)(tmp + i)), 174 q->dpp_regaddr + i); 175 #else 176 for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) 177 __raw_writel(*((uint32_t *)(tmp + i)), 178 q->dpp_regaddr + i); 179 #endif 180 } 181 /* ensure WQE bcopy and DPP flushed before doorbell write */ 182 wmb(); 183 184 /* Update the host index before invoking device */ 185 host_index = q->host_index; 186 187 q->host_index = idx; 188 189 /* Ring Doorbell */ 190 doorbell.word0 = 0; 191 if (q->db_format == LPFC_DB_LIST_FORMAT) { 192 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 193 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); 194 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); 195 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, 196 q->dpp_id); 197 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell, 198 q->queue_id); 199 } else { 200 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 201 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 202 203 /* Leave bits <23:16> clear for if_type 6 dpp */ 204 if_type = bf_get(lpfc_sli_intf_if_type, 205 &q->phba->sli4_hba.sli_intf); 206 if (if_type != LPFC_SLI_INTF_IF_TYPE_6) 207 bf_set(lpfc_wq_db_list_fm_index, &doorbell, 208 host_index); 209 } 210 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 211 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 212 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 213 } else { 214 return -EINVAL; 215 } 216 writel(doorbell.word0, q->db_regaddr); 217 218 return 0; 219 } 220 221 /** 222 * lpfc_sli4_wq_release - Updates internal hba index for WQ 223 * @q: The Work Queue to operate on. 224 * @index: The index to advance the hba index to. 225 * 226 * This routine will update the HBA index of a queue to reflect consumption of 227 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 228 * an entry the host calls this function to update the queue's internal 229 * pointers. This routine returns the number of entries that were consumed by 230 * the HBA. 231 **/ 232 static uint32_t 233 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 234 { 235 uint32_t released = 0; 236 237 /* sanity check on queue memory */ 238 if (unlikely(!q)) 239 return 0; 240 241 if (q->hba_index == index) 242 return 0; 243 do { 244 q->hba_index = ((q->hba_index + 1) % q->entry_count); 245 released++; 246 } while (q->hba_index != index); 247 return released; 248 } 249 250 /** 251 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 252 * @q: The Mailbox Queue to operate on. 253 * @wqe: The Mailbox Queue Entry to put on the Work queue. 254 * 255 * This routine will copy the contents of @mqe to the next available entry on 256 * the @q. This function will then ring the Work Queue Doorbell to signal the 257 * HBA to start processing the Work Queue Entry. This function returns 0 if 258 * successful. If no entries are available on @q then this function will return 259 * -ENOMEM. 260 * The caller is expected to hold the hbalock when calling this routine. 261 **/ 262 static uint32_t 263 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 264 { 265 struct lpfc_mqe *temp_mqe; 266 struct lpfc_register doorbell; 267 268 /* sanity check on queue memory */ 269 if (unlikely(!q)) 270 return -ENOMEM; 271 temp_mqe = lpfc_sli4_qe(q, q->host_index); 272 273 /* If the host has not yet processed the next entry then we are done */ 274 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 275 return -ENOMEM; 276 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 277 /* Save off the mailbox pointer for completion */ 278 q->phba->mbox = (MAILBOX_t *)temp_mqe; 279 280 /* Update the host index before invoking device */ 281 q->host_index = ((q->host_index + 1) % q->entry_count); 282 283 /* Ring Doorbell */ 284 doorbell.word0 = 0; 285 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 286 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 287 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 288 return 0; 289 } 290 291 /** 292 * lpfc_sli4_mq_release - Updates internal hba index for MQ 293 * @q: The Mailbox Queue to operate on. 294 * 295 * This routine will update the HBA index of a queue to reflect consumption of 296 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 297 * an entry the host calls this function to update the queue's internal 298 * pointers. This routine returns the number of entries that were consumed by 299 * the HBA. 300 **/ 301 static uint32_t 302 lpfc_sli4_mq_release(struct lpfc_queue *q) 303 { 304 /* sanity check on queue memory */ 305 if (unlikely(!q)) 306 return 0; 307 308 /* Clear the mailbox pointer for completion */ 309 q->phba->mbox = NULL; 310 q->hba_index = ((q->hba_index + 1) % q->entry_count); 311 return 1; 312 } 313 314 /** 315 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 316 * @q: The Event Queue to get the first valid EQE from 317 * 318 * This routine will get the first valid Event Queue Entry from @q, update 319 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 320 * the Queue (no more work to do), or the Queue is full of EQEs that have been 321 * processed, but not popped back to the HBA then this routine will return NULL. 322 **/ 323 static struct lpfc_eqe * 324 lpfc_sli4_eq_get(struct lpfc_queue *q) 325 { 326 struct lpfc_eqe *eqe; 327 328 /* sanity check on queue memory */ 329 if (unlikely(!q)) 330 return NULL; 331 eqe = lpfc_sli4_qe(q, q->host_index); 332 333 /* If the next EQE is not valid then we are done */ 334 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) 335 return NULL; 336 337 /* 338 * insert barrier for instruction interlock : data from the hardware 339 * must have the valid bit checked before it can be copied and acted 340 * upon. Speculative instructions were allowing a bcopy at the start 341 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately 342 * after our return, to copy data before the valid bit check above 343 * was done. As such, some of the copied data was stale. The barrier 344 * ensures the check is before any data is copied. 345 */ 346 mb(); 347 return eqe; 348 } 349 350 /** 351 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 352 * @q: The Event Queue to disable interrupts 353 * 354 **/ 355 void 356 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 357 { 358 struct lpfc_register doorbell; 359 360 doorbell.word0 = 0; 361 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 362 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 363 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 364 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 365 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 366 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 367 } 368 369 /** 370 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ 371 * @q: The Event Queue to disable interrupts 372 * 373 **/ 374 void 375 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) 376 { 377 struct lpfc_register doorbell; 378 379 doorbell.word0 = 0; 380 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 382 } 383 384 /** 385 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state 386 * @phba: adapter with EQ 387 * @q: The Event Queue that the host has completed processing for. 388 * @count: Number of elements that have been consumed 389 * @arm: Indicates whether the host wants to arms this CQ. 390 * 391 * This routine will notify the HBA, by ringing the doorbell, that count 392 * number of EQEs have been processed. The @arm parameter indicates whether 393 * the queue should be rearmed when ringing the doorbell. 394 **/ 395 void 396 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 397 uint32_t count, bool arm) 398 { 399 struct lpfc_register doorbell; 400 401 /* sanity check on queue memory */ 402 if (unlikely(!q || (count == 0 && !arm))) 403 return; 404 405 /* ring doorbell for number popped */ 406 doorbell.word0 = 0; 407 if (arm) { 408 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 409 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 410 } 411 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 412 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 413 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 414 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 415 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 416 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 417 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 418 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 419 readl(q->phba->sli4_hba.EQDBregaddr); 420 } 421 422 /** 423 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state 424 * @phba: adapter with EQ 425 * @q: The Event Queue that the host has completed processing for. 426 * @count: Number of elements that have been consumed 427 * @arm: Indicates whether the host wants to arms this CQ. 428 * 429 * This routine will notify the HBA, by ringing the doorbell, that count 430 * number of EQEs have been processed. The @arm parameter indicates whether 431 * the queue should be rearmed when ringing the doorbell. 432 **/ 433 void 434 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 435 uint32_t count, bool arm) 436 { 437 struct lpfc_register doorbell; 438 439 /* sanity check on queue memory */ 440 if (unlikely(!q || (count == 0 && !arm))) 441 return; 442 443 /* ring doorbell for number popped */ 444 doorbell.word0 = 0; 445 if (arm) 446 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); 447 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); 448 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 449 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 450 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 451 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 452 readl(q->phba->sli4_hba.EQDBregaddr); 453 } 454 455 static void 456 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 457 struct lpfc_eqe *eqe) 458 { 459 if (!phba->sli4_hba.pc_sli4_params.eqav) 460 bf_set_le32(lpfc_eqe_valid, eqe, 0); 461 462 eq->host_index = ((eq->host_index + 1) % eq->entry_count); 463 464 /* if the index wrapped around, toggle the valid bit */ 465 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index) 466 eq->qe_valid = (eq->qe_valid) ? 0 : 1; 467 } 468 469 static void 470 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 471 { 472 struct lpfc_eqe *eqe; 473 uint32_t count = 0; 474 475 /* walk all the EQ entries and drop on the floor */ 476 eqe = lpfc_sli4_eq_get(eq); 477 while (eqe) { 478 __lpfc_sli4_consume_eqe(phba, eq, eqe); 479 count++; 480 eqe = lpfc_sli4_eq_get(eq); 481 } 482 483 /* Clear and re-arm the EQ */ 484 phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM); 485 } 486 487 static int 488 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq) 489 { 490 struct lpfc_eqe *eqe; 491 int count = 0, consumed = 0; 492 493 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0) 494 goto rearm_and_exit; 495 496 eqe = lpfc_sli4_eq_get(eq); 497 while (eqe) { 498 lpfc_sli4_hba_handle_eqe(phba, eq, eqe); 499 __lpfc_sli4_consume_eqe(phba, eq, eqe); 500 501 consumed++; 502 if (!(++count % eq->max_proc_limit)) 503 break; 504 505 if (!(count % eq->notify_interval)) { 506 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, 507 LPFC_QUEUE_NOARM); 508 consumed = 0; 509 } 510 511 eqe = lpfc_sli4_eq_get(eq); 512 } 513 eq->EQ_processed += count; 514 515 /* Track the max number of EQEs processed in 1 intr */ 516 if (count > eq->EQ_max_eqe) 517 eq->EQ_max_eqe = count; 518 519 eq->queue_claimed = 0; 520 521 rearm_and_exit: 522 /* Always clear and re-arm the EQ */ 523 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM); 524 525 return count; 526 } 527 528 /** 529 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 530 * @q: The Completion Queue to get the first valid CQE from 531 * 532 * This routine will get the first valid Completion Queue Entry from @q, update 533 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 534 * the Queue (no more work to do), or the Queue is full of CQEs that have been 535 * processed, but not popped back to the HBA then this routine will return NULL. 536 **/ 537 static struct lpfc_cqe * 538 lpfc_sli4_cq_get(struct lpfc_queue *q) 539 { 540 struct lpfc_cqe *cqe; 541 542 /* sanity check on queue memory */ 543 if (unlikely(!q)) 544 return NULL; 545 cqe = lpfc_sli4_qe(q, q->host_index); 546 547 /* If the next CQE is not valid then we are done */ 548 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) 549 return NULL; 550 551 /* 552 * insert barrier for instruction interlock : data from the hardware 553 * must have the valid bit checked before it can be copied and acted 554 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative 555 * instructions allowing action on content before valid bit checked, 556 * add barrier here as well. May not be needed as "content" is a 557 * single 32-bit entity here (vs multi word structure for cq's). 558 */ 559 mb(); 560 return cqe; 561 } 562 563 static void 564 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 565 struct lpfc_cqe *cqe) 566 { 567 if (!phba->sli4_hba.pc_sli4_params.cqav) 568 bf_set_le32(lpfc_cqe_valid, cqe, 0); 569 570 cq->host_index = ((cq->host_index + 1) % cq->entry_count); 571 572 /* if the index wrapped around, toggle the valid bit */ 573 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) 574 cq->qe_valid = (cq->qe_valid) ? 0 : 1; 575 } 576 577 /** 578 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state. 579 * @phba: the adapter with the CQ 580 * @q: The Completion Queue that the host has completed processing for. 581 * @count: the number of elements that were consumed 582 * @arm: Indicates whether the host wants to arms this CQ. 583 * 584 * This routine will notify the HBA, by ringing the doorbell, that the 585 * CQEs have been processed. The @arm parameter specifies whether the 586 * queue should be rearmed when ringing the doorbell. 587 **/ 588 void 589 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 590 uint32_t count, bool arm) 591 { 592 struct lpfc_register doorbell; 593 594 /* sanity check on queue memory */ 595 if (unlikely(!q || (count == 0 && !arm))) 596 return; 597 598 /* ring doorbell for number popped */ 599 doorbell.word0 = 0; 600 if (arm) 601 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 602 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 603 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 604 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 605 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 606 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 607 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 608 } 609 610 /** 611 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state. 612 * @phba: the adapter with the CQ 613 * @q: The Completion Queue that the host has completed processing for. 614 * @count: the number of elements that were consumed 615 * @arm: Indicates whether the host wants to arms this CQ. 616 * 617 * This routine will notify the HBA, by ringing the doorbell, that the 618 * CQEs have been processed. The @arm parameter specifies whether the 619 * queue should be rearmed when ringing the doorbell. 620 **/ 621 void 622 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 623 uint32_t count, bool arm) 624 { 625 struct lpfc_register doorbell; 626 627 /* sanity check on queue memory */ 628 if (unlikely(!q || (count == 0 && !arm))) 629 return; 630 631 /* ring doorbell for number popped */ 632 doorbell.word0 = 0; 633 if (arm) 634 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); 635 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count); 636 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); 637 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 638 } 639 640 /** 641 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 642 * @q: The Header Receive Queue to operate on. 643 * @wqe: The Receive Queue Entry to put on the Receive queue. 644 * 645 * This routine will copy the contents of @wqe to the next available entry on 646 * the @q. This function will then ring the Receive Queue Doorbell to signal the 647 * HBA to start processing the Receive Queue Entry. This function returns the 648 * index that the rqe was copied to if successful. If no entries are available 649 * on @q then this function will return -ENOMEM. 650 * The caller is expected to hold the hbalock when calling this routine. 651 **/ 652 int 653 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 654 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 655 { 656 struct lpfc_rqe *temp_hrqe; 657 struct lpfc_rqe *temp_drqe; 658 struct lpfc_register doorbell; 659 int hq_put_index; 660 int dq_put_index; 661 662 /* sanity check on queue memory */ 663 if (unlikely(!hq) || unlikely(!dq)) 664 return -ENOMEM; 665 hq_put_index = hq->host_index; 666 dq_put_index = dq->host_index; 667 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index); 668 temp_drqe = lpfc_sli4_qe(dq, dq_put_index); 669 670 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 671 return -EINVAL; 672 if (hq_put_index != dq_put_index) 673 return -EINVAL; 674 /* If the host has not yet processed the next entry then we are done */ 675 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) 676 return -EBUSY; 677 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 678 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 679 680 /* Update the host index to point to the next slot */ 681 hq->host_index = ((hq_put_index + 1) % hq->entry_count); 682 dq->host_index = ((dq_put_index + 1) % dq->entry_count); 683 hq->RQ_buf_posted++; 684 685 /* Ring The Header Receive Queue Doorbell */ 686 if (!(hq->host_index % hq->notify_interval)) { 687 doorbell.word0 = 0; 688 if (hq->db_format == LPFC_DB_RING_FORMAT) { 689 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 690 hq->notify_interval); 691 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 692 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 693 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 694 hq->notify_interval); 695 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 696 hq->host_index); 697 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 698 } else { 699 return -EINVAL; 700 } 701 writel(doorbell.word0, hq->db_regaddr); 702 } 703 return hq_put_index; 704 } 705 706 /** 707 * lpfc_sli4_rq_release - Updates internal hba index for RQ 708 * @q: The Header Receive Queue to operate on. 709 * 710 * This routine will update the HBA index of a queue to reflect consumption of 711 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 712 * consumed an entry the host calls this function to update the queue's 713 * internal pointers. This routine returns the number of entries that were 714 * consumed by the HBA. 715 **/ 716 static uint32_t 717 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 718 { 719 /* sanity check on queue memory */ 720 if (unlikely(!hq) || unlikely(!dq)) 721 return 0; 722 723 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 724 return 0; 725 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 726 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 727 return 1; 728 } 729 730 /** 731 * lpfc_cmd_iocb - Get next command iocb entry in the ring 732 * @phba: Pointer to HBA context object. 733 * @pring: Pointer to driver SLI ring object. 734 * 735 * This function returns pointer to next command iocb entry 736 * in the command ring. The caller must hold hbalock to prevent 737 * other threads consume the next command iocb. 738 * SLI-2/SLI-3 provide different sized iocbs. 739 **/ 740 static inline IOCB_t * 741 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 742 { 743 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 744 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 745 } 746 747 /** 748 * lpfc_resp_iocb - Get next response iocb entry in the ring 749 * @phba: Pointer to HBA context object. 750 * @pring: Pointer to driver SLI ring object. 751 * 752 * This function returns pointer to next response iocb entry 753 * in the response ring. The caller must hold hbalock to make sure 754 * that no other thread consume the next response iocb. 755 * SLI-2/SLI-3 provide different sized iocbs. 756 **/ 757 static inline IOCB_t * 758 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 759 { 760 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 761 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 762 } 763 764 /** 765 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 766 * @phba: Pointer to HBA context object. 767 * 768 * This function is called with hbalock held. This function 769 * allocates a new driver iocb object from the iocb pool. If the 770 * allocation is successful, it returns pointer to the newly 771 * allocated iocb object else it returns NULL. 772 **/ 773 struct lpfc_iocbq * 774 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 775 { 776 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 777 struct lpfc_iocbq * iocbq = NULL; 778 779 lockdep_assert_held(&phba->hbalock); 780 781 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 782 if (iocbq) 783 phba->iocb_cnt++; 784 if (phba->iocb_cnt > phba->iocb_max) 785 phba->iocb_max = phba->iocb_cnt; 786 return iocbq; 787 } 788 789 /** 790 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 791 * @phba: Pointer to HBA context object. 792 * @xritag: XRI value. 793 * 794 * This function clears the sglq pointer from the array of acive 795 * sglq's. The xritag that is passed in is used to index into the 796 * array. Before the xritag can be used it needs to be adjusted 797 * by subtracting the xribase. 798 * 799 * Returns sglq ponter = success, NULL = Failure. 800 **/ 801 struct lpfc_sglq * 802 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 803 { 804 struct lpfc_sglq *sglq; 805 806 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 807 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 808 return sglq; 809 } 810 811 /** 812 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 813 * @phba: Pointer to HBA context object. 814 * @xritag: XRI value. 815 * 816 * This function returns the sglq pointer from the array of acive 817 * sglq's. The xritag that is passed in is used to index into the 818 * array. Before the xritag can be used it needs to be adjusted 819 * by subtracting the xribase. 820 * 821 * Returns sglq ponter = success, NULL = Failure. 822 **/ 823 struct lpfc_sglq * 824 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 825 { 826 struct lpfc_sglq *sglq; 827 828 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 829 return sglq; 830 } 831 832 /** 833 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 834 * @phba: Pointer to HBA context object. 835 * @xritag: xri used in this exchange. 836 * @rrq: The RRQ to be cleared. 837 * 838 **/ 839 void 840 lpfc_clr_rrq_active(struct lpfc_hba *phba, 841 uint16_t xritag, 842 struct lpfc_node_rrq *rrq) 843 { 844 struct lpfc_nodelist *ndlp = NULL; 845 846 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 847 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 848 849 /* The target DID could have been swapped (cable swap) 850 * we should use the ndlp from the findnode if it is 851 * available. 852 */ 853 if ((!ndlp) && rrq->ndlp) 854 ndlp = rrq->ndlp; 855 856 if (!ndlp) 857 goto out; 858 859 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 860 rrq->send_rrq = 0; 861 rrq->xritag = 0; 862 rrq->rrq_stop_time = 0; 863 } 864 out: 865 mempool_free(rrq, phba->rrq_pool); 866 } 867 868 /** 869 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 870 * @phba: Pointer to HBA context object. 871 * 872 * This function is called with hbalock held. This function 873 * Checks if stop_time (ratov from setting rrq active) has 874 * been reached, if it has and the send_rrq flag is set then 875 * it will call lpfc_send_rrq. If the send_rrq flag is not set 876 * then it will just call the routine to clear the rrq and 877 * free the rrq resource. 878 * The timer is set to the next rrq that is going to expire before 879 * leaving the routine. 880 * 881 **/ 882 void 883 lpfc_handle_rrq_active(struct lpfc_hba *phba) 884 { 885 struct lpfc_node_rrq *rrq; 886 struct lpfc_node_rrq *nextrrq; 887 unsigned long next_time; 888 unsigned long iflags; 889 LIST_HEAD(send_rrq); 890 891 spin_lock_irqsave(&phba->hbalock, iflags); 892 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 893 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 894 list_for_each_entry_safe(rrq, nextrrq, 895 &phba->active_rrq_list, list) { 896 if (time_after(jiffies, rrq->rrq_stop_time)) 897 list_move(&rrq->list, &send_rrq); 898 else if (time_before(rrq->rrq_stop_time, next_time)) 899 next_time = rrq->rrq_stop_time; 900 } 901 spin_unlock_irqrestore(&phba->hbalock, iflags); 902 if ((!list_empty(&phba->active_rrq_list)) && 903 (!(phba->pport->load_flag & FC_UNLOADING))) 904 mod_timer(&phba->rrq_tmr, next_time); 905 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 906 list_del(&rrq->list); 907 if (!rrq->send_rrq) { 908 /* this call will free the rrq */ 909 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 910 } else if (lpfc_send_rrq(phba, rrq)) { 911 /* if we send the rrq then the completion handler 912 * will clear the bit in the xribitmap. 913 */ 914 lpfc_clr_rrq_active(phba, rrq->xritag, 915 rrq); 916 } 917 } 918 } 919 920 /** 921 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 922 * @vport: Pointer to vport context object. 923 * @xri: The xri used in the exchange. 924 * @did: The targets DID for this exchange. 925 * 926 * returns NULL = rrq not found in the phba->active_rrq_list. 927 * rrq = rrq for this xri and target. 928 **/ 929 struct lpfc_node_rrq * 930 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 931 { 932 struct lpfc_hba *phba = vport->phba; 933 struct lpfc_node_rrq *rrq; 934 struct lpfc_node_rrq *nextrrq; 935 unsigned long iflags; 936 937 if (phba->sli_rev != LPFC_SLI_REV4) 938 return NULL; 939 spin_lock_irqsave(&phba->hbalock, iflags); 940 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 941 if (rrq->vport == vport && rrq->xritag == xri && 942 rrq->nlp_DID == did){ 943 list_del(&rrq->list); 944 spin_unlock_irqrestore(&phba->hbalock, iflags); 945 return rrq; 946 } 947 } 948 spin_unlock_irqrestore(&phba->hbalock, iflags); 949 return NULL; 950 } 951 952 /** 953 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 954 * @vport: Pointer to vport context object. 955 * @ndlp: Pointer to the lpfc_node_list structure. 956 * If ndlp is NULL Remove all active RRQs for this vport from the 957 * phba->active_rrq_list and clear the rrq. 958 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 959 **/ 960 void 961 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 962 963 { 964 struct lpfc_hba *phba = vport->phba; 965 struct lpfc_node_rrq *rrq; 966 struct lpfc_node_rrq *nextrrq; 967 unsigned long iflags; 968 LIST_HEAD(rrq_list); 969 970 if (phba->sli_rev != LPFC_SLI_REV4) 971 return; 972 if (!ndlp) { 973 lpfc_sli4_vport_delete_els_xri_aborted(vport); 974 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 975 } 976 spin_lock_irqsave(&phba->hbalock, iflags); 977 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 978 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 979 list_move(&rrq->list, &rrq_list); 980 spin_unlock_irqrestore(&phba->hbalock, iflags); 981 982 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 983 list_del(&rrq->list); 984 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 985 } 986 } 987 988 /** 989 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 990 * @phba: Pointer to HBA context object. 991 * @ndlp: Targets nodelist pointer for this exchange. 992 * @xritag the xri in the bitmap to test. 993 * 994 * This function returns: 995 * 0 = rrq not active for this xri 996 * 1 = rrq is valid for this xri. 997 **/ 998 int 999 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1000 uint16_t xritag) 1001 { 1002 if (!ndlp) 1003 return 0; 1004 if (!ndlp->active_rrqs_xri_bitmap) 1005 return 0; 1006 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1007 return 1; 1008 else 1009 return 0; 1010 } 1011 1012 /** 1013 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 1014 * @phba: Pointer to HBA context object. 1015 * @ndlp: nodelist pointer for this target. 1016 * @xritag: xri used in this exchange. 1017 * @rxid: Remote Exchange ID. 1018 * @send_rrq: Flag used to determine if we should send rrq els cmd. 1019 * 1020 * This function takes the hbalock. 1021 * The active bit is always set in the active rrq xri_bitmap even 1022 * if there is no slot avaiable for the other rrq information. 1023 * 1024 * returns 0 rrq actived for this xri 1025 * < 0 No memory or invalid ndlp. 1026 **/ 1027 int 1028 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1029 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 1030 { 1031 unsigned long iflags; 1032 struct lpfc_node_rrq *rrq; 1033 int empty; 1034 1035 if (!ndlp) 1036 return -EINVAL; 1037 1038 if (!phba->cfg_enable_rrq) 1039 return -EINVAL; 1040 1041 spin_lock_irqsave(&phba->hbalock, iflags); 1042 if (phba->pport->load_flag & FC_UNLOADING) { 1043 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1044 goto out; 1045 } 1046 1047 /* 1048 * set the active bit even if there is no mem available. 1049 */ 1050 if (NLP_CHK_FREE_REQ(ndlp)) 1051 goto out; 1052 1053 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 1054 goto out; 1055 1056 if (!ndlp->active_rrqs_xri_bitmap) 1057 goto out; 1058 1059 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1060 goto out; 1061 1062 spin_unlock_irqrestore(&phba->hbalock, iflags); 1063 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 1064 if (!rrq) { 1065 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1066 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 1067 " DID:0x%x Send:%d\n", 1068 xritag, rxid, ndlp->nlp_DID, send_rrq); 1069 return -EINVAL; 1070 } 1071 if (phba->cfg_enable_rrq == 1) 1072 rrq->send_rrq = send_rrq; 1073 else 1074 rrq->send_rrq = 0; 1075 rrq->xritag = xritag; 1076 rrq->rrq_stop_time = jiffies + 1077 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 1078 rrq->ndlp = ndlp; 1079 rrq->nlp_DID = ndlp->nlp_DID; 1080 rrq->vport = ndlp->vport; 1081 rrq->rxid = rxid; 1082 spin_lock_irqsave(&phba->hbalock, iflags); 1083 empty = list_empty(&phba->active_rrq_list); 1084 list_add_tail(&rrq->list, &phba->active_rrq_list); 1085 phba->hba_flag |= HBA_RRQ_ACTIVE; 1086 if (empty) 1087 lpfc_worker_wake_up(phba); 1088 spin_unlock_irqrestore(&phba->hbalock, iflags); 1089 return 0; 1090 out: 1091 spin_unlock_irqrestore(&phba->hbalock, iflags); 1092 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1093 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 1094 " DID:0x%x Send:%d\n", 1095 xritag, rxid, ndlp->nlp_DID, send_rrq); 1096 return -EINVAL; 1097 } 1098 1099 /** 1100 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool 1101 * @phba: Pointer to HBA context object. 1102 * @piocb: Pointer to the iocbq. 1103 * 1104 * The driver calls this function with either the nvme ls ring lock 1105 * or the fc els ring lock held depending on the iocb usage. This function 1106 * gets a new driver sglq object from the sglq list. If the list is not empty 1107 * then it is successful, it returns pointer to the newly allocated sglq 1108 * object else it returns NULL. 1109 **/ 1110 static struct lpfc_sglq * 1111 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1112 { 1113 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; 1114 struct lpfc_sglq *sglq = NULL; 1115 struct lpfc_sglq *start_sglq = NULL; 1116 struct lpfc_io_buf *lpfc_cmd; 1117 struct lpfc_nodelist *ndlp; 1118 struct lpfc_sli_ring *pring = NULL; 1119 int found = 0; 1120 1121 if (piocbq->iocb_flag & LPFC_IO_NVME_LS) 1122 pring = phba->sli4_hba.nvmels_wq->pring; 1123 else 1124 pring = lpfc_phba_elsring(phba); 1125 1126 lockdep_assert_held(&pring->ring_lock); 1127 1128 if (piocbq->iocb_flag & LPFC_IO_FCP) { 1129 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1; 1130 ndlp = lpfc_cmd->rdata->pnode; 1131 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 1132 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { 1133 ndlp = piocbq->context_un.ndlp; 1134 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) { 1135 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK) 1136 ndlp = NULL; 1137 else 1138 ndlp = piocbq->context_un.ndlp; 1139 } else { 1140 ndlp = piocbq->context1; 1141 } 1142 1143 spin_lock(&phba->sli4_hba.sgl_list_lock); 1144 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); 1145 start_sglq = sglq; 1146 while (!found) { 1147 if (!sglq) 1148 break; 1149 if (ndlp && ndlp->active_rrqs_xri_bitmap && 1150 test_bit(sglq->sli4_lxritag, 1151 ndlp->active_rrqs_xri_bitmap)) { 1152 /* This xri has an rrq outstanding for this DID. 1153 * put it back in the list and get another xri. 1154 */ 1155 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1156 sglq = NULL; 1157 list_remove_head(lpfc_els_sgl_list, sglq, 1158 struct lpfc_sglq, list); 1159 if (sglq == start_sglq) { 1160 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1161 sglq = NULL; 1162 break; 1163 } else 1164 continue; 1165 } 1166 sglq->ndlp = ndlp; 1167 found = 1; 1168 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1169 sglq->state = SGL_ALLOCATED; 1170 } 1171 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1172 return sglq; 1173 } 1174 1175 /** 1176 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool 1177 * @phba: Pointer to HBA context object. 1178 * @piocb: Pointer to the iocbq. 1179 * 1180 * This function is called with the sgl_list lock held. This function 1181 * gets a new driver sglq object from the sglq list. If the 1182 * list is not empty then it is successful, it returns pointer to the newly 1183 * allocated sglq object else it returns NULL. 1184 **/ 1185 struct lpfc_sglq * 1186 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1187 { 1188 struct list_head *lpfc_nvmet_sgl_list; 1189 struct lpfc_sglq *sglq = NULL; 1190 1191 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; 1192 1193 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); 1194 1195 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); 1196 if (!sglq) 1197 return NULL; 1198 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1199 sglq->state = SGL_ALLOCATED; 1200 return sglq; 1201 } 1202 1203 /** 1204 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 1205 * @phba: Pointer to HBA context object. 1206 * 1207 * This function is called with no lock held. This function 1208 * allocates a new driver iocb object from the iocb pool. If the 1209 * allocation is successful, it returns pointer to the newly 1210 * allocated iocb object else it returns NULL. 1211 **/ 1212 struct lpfc_iocbq * 1213 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 1214 { 1215 struct lpfc_iocbq * iocbq = NULL; 1216 unsigned long iflags; 1217 1218 spin_lock_irqsave(&phba->hbalock, iflags); 1219 iocbq = __lpfc_sli_get_iocbq(phba); 1220 spin_unlock_irqrestore(&phba->hbalock, iflags); 1221 return iocbq; 1222 } 1223 1224 /** 1225 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 1226 * @phba: Pointer to HBA context object. 1227 * @iocbq: Pointer to driver iocb object. 1228 * 1229 * This function is called with hbalock held to release driver 1230 * iocb object to the iocb pool. The iotag in the iocb object 1231 * does not change for each use of the iocb object. This function 1232 * clears all other fields of the iocb object when it is freed. 1233 * The sqlq structure that holds the xritag and phys and virtual 1234 * mappings for the scatter gather list is retrieved from the 1235 * active array of sglq. The get of the sglq pointer also clears 1236 * the entry in the array. If the status of the IO indiactes that 1237 * this IO was aborted then the sglq entry it put on the 1238 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1239 * IO has good status or fails for any other reason then the sglq 1240 * entry is added to the free list (lpfc_els_sgl_list). 1241 **/ 1242 static void 1243 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1244 { 1245 struct lpfc_sglq *sglq; 1246 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1247 unsigned long iflag = 0; 1248 struct lpfc_sli_ring *pring; 1249 1250 lockdep_assert_held(&phba->hbalock); 1251 1252 if (iocbq->sli4_xritag == NO_XRI) 1253 sglq = NULL; 1254 else 1255 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1256 1257 1258 if (sglq) { 1259 if (iocbq->iocb_flag & LPFC_IO_NVMET) { 1260 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1261 iflag); 1262 sglq->state = SGL_FREED; 1263 sglq->ndlp = NULL; 1264 list_add_tail(&sglq->list, 1265 &phba->sli4_hba.lpfc_nvmet_sgl_list); 1266 spin_unlock_irqrestore( 1267 &phba->sli4_hba.sgl_list_lock, iflag); 1268 goto out; 1269 } 1270 1271 pring = phba->sli4_hba.els_wq->pring; 1272 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1273 (sglq->state != SGL_XRI_ABORTED)) { 1274 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1275 iflag); 1276 list_add(&sglq->list, 1277 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1278 spin_unlock_irqrestore( 1279 &phba->sli4_hba.sgl_list_lock, iflag); 1280 } else { 1281 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1282 iflag); 1283 sglq->state = SGL_FREED; 1284 sglq->ndlp = NULL; 1285 list_add_tail(&sglq->list, 1286 &phba->sli4_hba.lpfc_els_sgl_list); 1287 spin_unlock_irqrestore( 1288 &phba->sli4_hba.sgl_list_lock, iflag); 1289 1290 /* Check if TXQ queue needs to be serviced */ 1291 if (!list_empty(&pring->txq)) 1292 lpfc_worker_wake_up(phba); 1293 } 1294 } 1295 1296 out: 1297 /* 1298 * Clean all volatile data fields, preserve iotag and node struct. 1299 */ 1300 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1301 iocbq->sli4_lxritag = NO_XRI; 1302 iocbq->sli4_xritag = NO_XRI; 1303 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | 1304 LPFC_IO_NVME_LS); 1305 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1306 } 1307 1308 1309 /** 1310 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1311 * @phba: Pointer to HBA context object. 1312 * @iocbq: Pointer to driver iocb object. 1313 * 1314 * This function is called with hbalock held to release driver 1315 * iocb object to the iocb pool. The iotag in the iocb object 1316 * does not change for each use of the iocb object. This function 1317 * clears all other fields of the iocb object when it is freed. 1318 **/ 1319 static void 1320 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1321 { 1322 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1323 1324 lockdep_assert_held(&phba->hbalock); 1325 1326 /* 1327 * Clean all volatile data fields, preserve iotag and node struct. 1328 */ 1329 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1330 iocbq->sli4_xritag = NO_XRI; 1331 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1332 } 1333 1334 /** 1335 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1336 * @phba: Pointer to HBA context object. 1337 * @iocbq: Pointer to driver iocb object. 1338 * 1339 * This function is called with hbalock held to release driver 1340 * iocb object to the iocb pool. The iotag in the iocb object 1341 * does not change for each use of the iocb object. This function 1342 * clears all other fields of the iocb object when it is freed. 1343 **/ 1344 static void 1345 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1346 { 1347 lockdep_assert_held(&phba->hbalock); 1348 1349 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1350 phba->iocb_cnt--; 1351 } 1352 1353 /** 1354 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1355 * @phba: Pointer to HBA context object. 1356 * @iocbq: Pointer to driver iocb object. 1357 * 1358 * This function is called with no lock held to release the iocb to 1359 * iocb pool. 1360 **/ 1361 void 1362 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1363 { 1364 unsigned long iflags; 1365 1366 /* 1367 * Clean all volatile data fields, preserve iotag and node struct. 1368 */ 1369 spin_lock_irqsave(&phba->hbalock, iflags); 1370 __lpfc_sli_release_iocbq(phba, iocbq); 1371 spin_unlock_irqrestore(&phba->hbalock, iflags); 1372 } 1373 1374 /** 1375 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1376 * @phba: Pointer to HBA context object. 1377 * @iocblist: List of IOCBs. 1378 * @ulpstatus: ULP status in IOCB command field. 1379 * @ulpWord4: ULP word-4 in IOCB command field. 1380 * 1381 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1382 * on the list by invoking the complete callback function associated with the 1383 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1384 * fields. 1385 **/ 1386 void 1387 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1388 uint32_t ulpstatus, uint32_t ulpWord4) 1389 { 1390 struct lpfc_iocbq *piocb; 1391 1392 while (!list_empty(iocblist)) { 1393 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1394 if (!piocb->iocb_cmpl) 1395 lpfc_sli_release_iocbq(phba, piocb); 1396 else { 1397 piocb->iocb.ulpStatus = ulpstatus; 1398 piocb->iocb.un.ulpWord[4] = ulpWord4; 1399 (piocb->iocb_cmpl) (phba, piocb, piocb); 1400 } 1401 } 1402 return; 1403 } 1404 1405 /** 1406 * lpfc_sli_iocb_cmd_type - Get the iocb type 1407 * @iocb_cmnd: iocb command code. 1408 * 1409 * This function is called by ring event handler function to get the iocb type. 1410 * This function translates the iocb command to an iocb command type used to 1411 * decide the final disposition of each completed IOCB. 1412 * The function returns 1413 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1414 * LPFC_SOL_IOCB if it is a solicited iocb completion 1415 * LPFC_ABORT_IOCB if it is an abort iocb 1416 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1417 * 1418 * The caller is not required to hold any lock. 1419 **/ 1420 static lpfc_iocb_type 1421 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1422 { 1423 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1424 1425 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1426 return 0; 1427 1428 switch (iocb_cmnd) { 1429 case CMD_XMIT_SEQUENCE_CR: 1430 case CMD_XMIT_SEQUENCE_CX: 1431 case CMD_XMIT_BCAST_CN: 1432 case CMD_XMIT_BCAST_CX: 1433 case CMD_ELS_REQUEST_CR: 1434 case CMD_ELS_REQUEST_CX: 1435 case CMD_CREATE_XRI_CR: 1436 case CMD_CREATE_XRI_CX: 1437 case CMD_GET_RPI_CN: 1438 case CMD_XMIT_ELS_RSP_CX: 1439 case CMD_GET_RPI_CR: 1440 case CMD_FCP_IWRITE_CR: 1441 case CMD_FCP_IWRITE_CX: 1442 case CMD_FCP_IREAD_CR: 1443 case CMD_FCP_IREAD_CX: 1444 case CMD_FCP_ICMND_CR: 1445 case CMD_FCP_ICMND_CX: 1446 case CMD_FCP_TSEND_CX: 1447 case CMD_FCP_TRSP_CX: 1448 case CMD_FCP_TRECEIVE_CX: 1449 case CMD_FCP_AUTO_TRSP_CX: 1450 case CMD_ADAPTER_MSG: 1451 case CMD_ADAPTER_DUMP: 1452 case CMD_XMIT_SEQUENCE64_CR: 1453 case CMD_XMIT_SEQUENCE64_CX: 1454 case CMD_XMIT_BCAST64_CN: 1455 case CMD_XMIT_BCAST64_CX: 1456 case CMD_ELS_REQUEST64_CR: 1457 case CMD_ELS_REQUEST64_CX: 1458 case CMD_FCP_IWRITE64_CR: 1459 case CMD_FCP_IWRITE64_CX: 1460 case CMD_FCP_IREAD64_CR: 1461 case CMD_FCP_IREAD64_CX: 1462 case CMD_FCP_ICMND64_CR: 1463 case CMD_FCP_ICMND64_CX: 1464 case CMD_FCP_TSEND64_CX: 1465 case CMD_FCP_TRSP64_CX: 1466 case CMD_FCP_TRECEIVE64_CX: 1467 case CMD_GEN_REQUEST64_CR: 1468 case CMD_GEN_REQUEST64_CX: 1469 case CMD_XMIT_ELS_RSP64_CX: 1470 case DSSCMD_IWRITE64_CR: 1471 case DSSCMD_IWRITE64_CX: 1472 case DSSCMD_IREAD64_CR: 1473 case DSSCMD_IREAD64_CX: 1474 type = LPFC_SOL_IOCB; 1475 break; 1476 case CMD_ABORT_XRI_CN: 1477 case CMD_ABORT_XRI_CX: 1478 case CMD_CLOSE_XRI_CN: 1479 case CMD_CLOSE_XRI_CX: 1480 case CMD_XRI_ABORTED_CX: 1481 case CMD_ABORT_MXRI64_CN: 1482 case CMD_XMIT_BLS_RSP64_CX: 1483 type = LPFC_ABORT_IOCB; 1484 break; 1485 case CMD_RCV_SEQUENCE_CX: 1486 case CMD_RCV_ELS_REQ_CX: 1487 case CMD_RCV_SEQUENCE64_CX: 1488 case CMD_RCV_ELS_REQ64_CX: 1489 case CMD_ASYNC_STATUS: 1490 case CMD_IOCB_RCV_SEQ64_CX: 1491 case CMD_IOCB_RCV_ELS64_CX: 1492 case CMD_IOCB_RCV_CONT64_CX: 1493 case CMD_IOCB_RET_XRI64_CX: 1494 type = LPFC_UNSOL_IOCB; 1495 break; 1496 case CMD_IOCB_XMIT_MSEQ64_CR: 1497 case CMD_IOCB_XMIT_MSEQ64_CX: 1498 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1499 case CMD_IOCB_RCV_ELS_LIST64_CX: 1500 case CMD_IOCB_CLOSE_EXTENDED_CN: 1501 case CMD_IOCB_ABORT_EXTENDED_CN: 1502 case CMD_IOCB_RET_HBQE64_CN: 1503 case CMD_IOCB_FCP_IBIDIR64_CR: 1504 case CMD_IOCB_FCP_IBIDIR64_CX: 1505 case CMD_IOCB_FCP_ITASKMGT64_CX: 1506 case CMD_IOCB_LOGENTRY_CN: 1507 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1508 printk("%s - Unhandled SLI-3 Command x%x\n", 1509 __func__, iocb_cmnd); 1510 type = LPFC_UNKNOWN_IOCB; 1511 break; 1512 default: 1513 type = LPFC_UNKNOWN_IOCB; 1514 break; 1515 } 1516 1517 return type; 1518 } 1519 1520 /** 1521 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1522 * @phba: Pointer to HBA context object. 1523 * 1524 * This function is called from SLI initialization code 1525 * to configure every ring of the HBA's SLI interface. The 1526 * caller is not required to hold any lock. This function issues 1527 * a config_ring mailbox command for each ring. 1528 * This function returns zero if successful else returns a negative 1529 * error code. 1530 **/ 1531 static int 1532 lpfc_sli_ring_map(struct lpfc_hba *phba) 1533 { 1534 struct lpfc_sli *psli = &phba->sli; 1535 LPFC_MBOXQ_t *pmb; 1536 MAILBOX_t *pmbox; 1537 int i, rc, ret = 0; 1538 1539 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1540 if (!pmb) 1541 return -ENOMEM; 1542 pmbox = &pmb->u.mb; 1543 phba->link_state = LPFC_INIT_MBX_CMDS; 1544 for (i = 0; i < psli->num_rings; i++) { 1545 lpfc_config_ring(phba, i, pmb); 1546 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1547 if (rc != MBX_SUCCESS) { 1548 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1549 "0446 Adapter failed to init (%d), " 1550 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1551 "ring %d\n", 1552 rc, pmbox->mbxCommand, 1553 pmbox->mbxStatus, i); 1554 phba->link_state = LPFC_HBA_ERROR; 1555 ret = -ENXIO; 1556 break; 1557 } 1558 } 1559 mempool_free(pmb, phba->mbox_mem_pool); 1560 return ret; 1561 } 1562 1563 /** 1564 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1565 * @phba: Pointer to HBA context object. 1566 * @pring: Pointer to driver SLI ring object. 1567 * @piocb: Pointer to the driver iocb object. 1568 * 1569 * The driver calls this function with the hbalock held for SLI3 ports or 1570 * the ring lock held for SLI4 ports. The function adds the 1571 * new iocb to txcmplq of the given ring. This function always returns 1572 * 0. If this function is called for ELS ring, this function checks if 1573 * there is a vport associated with the ELS command. This function also 1574 * starts els_tmofunc timer if this is an ELS command. 1575 **/ 1576 static int 1577 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1578 struct lpfc_iocbq *piocb) 1579 { 1580 if (phba->sli_rev == LPFC_SLI_REV4) 1581 lockdep_assert_held(&pring->ring_lock); 1582 else 1583 lockdep_assert_held(&phba->hbalock); 1584 1585 BUG_ON(!piocb); 1586 1587 list_add_tail(&piocb->list, &pring->txcmplq); 1588 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1589 pring->txcmplq_cnt++; 1590 1591 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1592 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1593 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1594 BUG_ON(!piocb->vport); 1595 if (!(piocb->vport->load_flag & FC_UNLOADING)) 1596 mod_timer(&piocb->vport->els_tmofunc, 1597 jiffies + 1598 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1599 } 1600 1601 return 0; 1602 } 1603 1604 /** 1605 * lpfc_sli_ringtx_get - Get first element of the txq 1606 * @phba: Pointer to HBA context object. 1607 * @pring: Pointer to driver SLI ring object. 1608 * 1609 * This function is called with hbalock held to get next 1610 * iocb in txq of the given ring. If there is any iocb in 1611 * the txq, the function returns first iocb in the list after 1612 * removing the iocb from the list, else it returns NULL. 1613 **/ 1614 struct lpfc_iocbq * 1615 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1616 { 1617 struct lpfc_iocbq *cmd_iocb; 1618 1619 lockdep_assert_held(&phba->hbalock); 1620 1621 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1622 return cmd_iocb; 1623 } 1624 1625 /** 1626 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1627 * @phba: Pointer to HBA context object. 1628 * @pring: Pointer to driver SLI ring object. 1629 * 1630 * This function is called with hbalock held and the caller must post the 1631 * iocb without releasing the lock. If the caller releases the lock, 1632 * iocb slot returned by the function is not guaranteed to be available. 1633 * The function returns pointer to the next available iocb slot if there 1634 * is available slot in the ring, else it returns NULL. 1635 * If the get index of the ring is ahead of the put index, the function 1636 * will post an error attention event to the worker thread to take the 1637 * HBA to offline state. 1638 **/ 1639 static IOCB_t * 1640 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1641 { 1642 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1643 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 1644 1645 lockdep_assert_held(&phba->hbalock); 1646 1647 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 1648 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 1649 pring->sli.sli3.next_cmdidx = 0; 1650 1651 if (unlikely(pring->sli.sli3.local_getidx == 1652 pring->sli.sli3.next_cmdidx)) { 1653 1654 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1655 1656 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1657 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1658 "0315 Ring %d issue: portCmdGet %d " 1659 "is bigger than cmd ring %d\n", 1660 pring->ringno, 1661 pring->sli.sli3.local_getidx, 1662 max_cmd_idx); 1663 1664 phba->link_state = LPFC_HBA_ERROR; 1665 /* 1666 * All error attention handlers are posted to 1667 * worker thread 1668 */ 1669 phba->work_ha |= HA_ERATT; 1670 phba->work_hs = HS_FFER3; 1671 1672 lpfc_worker_wake_up(phba); 1673 1674 return NULL; 1675 } 1676 1677 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 1678 return NULL; 1679 } 1680 1681 return lpfc_cmd_iocb(phba, pring); 1682 } 1683 1684 /** 1685 * lpfc_sli_next_iotag - Get an iotag for the iocb 1686 * @phba: Pointer to HBA context object. 1687 * @iocbq: Pointer to driver iocb object. 1688 * 1689 * This function gets an iotag for the iocb. If there is no unused iotag and 1690 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1691 * array and assigns a new iotag. 1692 * The function returns the allocated iotag if successful, else returns zero. 1693 * Zero is not a valid iotag. 1694 * The caller is not required to hold any lock. 1695 **/ 1696 uint16_t 1697 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1698 { 1699 struct lpfc_iocbq **new_arr; 1700 struct lpfc_iocbq **old_arr; 1701 size_t new_len; 1702 struct lpfc_sli *psli = &phba->sli; 1703 uint16_t iotag; 1704 1705 spin_lock_irq(&phba->hbalock); 1706 iotag = psli->last_iotag; 1707 if(++iotag < psli->iocbq_lookup_len) { 1708 psli->last_iotag = iotag; 1709 psli->iocbq_lookup[iotag] = iocbq; 1710 spin_unlock_irq(&phba->hbalock); 1711 iocbq->iotag = iotag; 1712 return iotag; 1713 } else if (psli->iocbq_lookup_len < (0xffff 1714 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1715 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1716 spin_unlock_irq(&phba->hbalock); 1717 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *), 1718 GFP_KERNEL); 1719 if (new_arr) { 1720 spin_lock_irq(&phba->hbalock); 1721 old_arr = psli->iocbq_lookup; 1722 if (new_len <= psli->iocbq_lookup_len) { 1723 /* highly unprobable case */ 1724 kfree(new_arr); 1725 iotag = psli->last_iotag; 1726 if(++iotag < psli->iocbq_lookup_len) { 1727 psli->last_iotag = iotag; 1728 psli->iocbq_lookup[iotag] = iocbq; 1729 spin_unlock_irq(&phba->hbalock); 1730 iocbq->iotag = iotag; 1731 return iotag; 1732 } 1733 spin_unlock_irq(&phba->hbalock); 1734 return 0; 1735 } 1736 if (psli->iocbq_lookup) 1737 memcpy(new_arr, old_arr, 1738 ((psli->last_iotag + 1) * 1739 sizeof (struct lpfc_iocbq *))); 1740 psli->iocbq_lookup = new_arr; 1741 psli->iocbq_lookup_len = new_len; 1742 psli->last_iotag = iotag; 1743 psli->iocbq_lookup[iotag] = iocbq; 1744 spin_unlock_irq(&phba->hbalock); 1745 iocbq->iotag = iotag; 1746 kfree(old_arr); 1747 return iotag; 1748 } 1749 } else 1750 spin_unlock_irq(&phba->hbalock); 1751 1752 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1753 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1754 psli->last_iotag); 1755 1756 return 0; 1757 } 1758 1759 /** 1760 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1761 * @phba: Pointer to HBA context object. 1762 * @pring: Pointer to driver SLI ring object. 1763 * @iocb: Pointer to iocb slot in the ring. 1764 * @nextiocb: Pointer to driver iocb object which need to be 1765 * posted to firmware. 1766 * 1767 * This function is called with hbalock held to post a new iocb to 1768 * the firmware. This function copies the new iocb to ring iocb slot and 1769 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1770 * a completion call back for this iocb else the function will free the 1771 * iocb object. 1772 **/ 1773 static void 1774 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1775 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1776 { 1777 lockdep_assert_held(&phba->hbalock); 1778 /* 1779 * Set up an iotag 1780 */ 1781 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1782 1783 1784 if (pring->ringno == LPFC_ELS_RING) { 1785 lpfc_debugfs_slow_ring_trc(phba, 1786 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1787 *(((uint32_t *) &nextiocb->iocb) + 4), 1788 *(((uint32_t *) &nextiocb->iocb) + 6), 1789 *(((uint32_t *) &nextiocb->iocb) + 7)); 1790 } 1791 1792 /* 1793 * Issue iocb command to adapter 1794 */ 1795 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1796 wmb(); 1797 pring->stats.iocb_cmd++; 1798 1799 /* 1800 * If there is no completion routine to call, we can release the 1801 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1802 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1803 */ 1804 if (nextiocb->iocb_cmpl) 1805 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1806 else 1807 __lpfc_sli_release_iocbq(phba, nextiocb); 1808 1809 /* 1810 * Let the HBA know what IOCB slot will be the next one the 1811 * driver will put a command into. 1812 */ 1813 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 1814 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1815 } 1816 1817 /** 1818 * lpfc_sli_update_full_ring - Update the chip attention register 1819 * @phba: Pointer to HBA context object. 1820 * @pring: Pointer to driver SLI ring object. 1821 * 1822 * The caller is not required to hold any lock for calling this function. 1823 * This function updates the chip attention bits for the ring to inform firmware 1824 * that there are pending work to be done for this ring and requests an 1825 * interrupt when there is space available in the ring. This function is 1826 * called when the driver is unable to post more iocbs to the ring due 1827 * to unavailability of space in the ring. 1828 **/ 1829 static void 1830 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1831 { 1832 int ringno = pring->ringno; 1833 1834 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1835 1836 wmb(); 1837 1838 /* 1839 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1840 * The HBA will tell us when an IOCB entry is available. 1841 */ 1842 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1843 readl(phba->CAregaddr); /* flush */ 1844 1845 pring->stats.iocb_cmd_full++; 1846 } 1847 1848 /** 1849 * lpfc_sli_update_ring - Update chip attention register 1850 * @phba: Pointer to HBA context object. 1851 * @pring: Pointer to driver SLI ring object. 1852 * 1853 * This function updates the chip attention register bit for the 1854 * given ring to inform HBA that there is more work to be done 1855 * in this ring. The caller is not required to hold any lock. 1856 **/ 1857 static void 1858 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1859 { 1860 int ringno = pring->ringno; 1861 1862 /* 1863 * Tell the HBA that there is work to do in this ring. 1864 */ 1865 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1866 wmb(); 1867 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1868 readl(phba->CAregaddr); /* flush */ 1869 } 1870 } 1871 1872 /** 1873 * lpfc_sli_resume_iocb - Process iocbs in the txq 1874 * @phba: Pointer to HBA context object. 1875 * @pring: Pointer to driver SLI ring object. 1876 * 1877 * This function is called with hbalock held to post pending iocbs 1878 * in the txq to the firmware. This function is called when driver 1879 * detects space available in the ring. 1880 **/ 1881 static void 1882 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1883 { 1884 IOCB_t *iocb; 1885 struct lpfc_iocbq *nextiocb; 1886 1887 lockdep_assert_held(&phba->hbalock); 1888 1889 /* 1890 * Check to see if: 1891 * (a) there is anything on the txq to send 1892 * (b) link is up 1893 * (c) link attention events can be processed (fcp ring only) 1894 * (d) IOCB processing is not blocked by the outstanding mbox command. 1895 */ 1896 1897 if (lpfc_is_link_up(phba) && 1898 (!list_empty(&pring->txq)) && 1899 (pring->ringno != LPFC_FCP_RING || 1900 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1901 1902 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1903 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1904 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1905 1906 if (iocb) 1907 lpfc_sli_update_ring(phba, pring); 1908 else 1909 lpfc_sli_update_full_ring(phba, pring); 1910 } 1911 1912 return; 1913 } 1914 1915 /** 1916 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1917 * @phba: Pointer to HBA context object. 1918 * @hbqno: HBQ number. 1919 * 1920 * This function is called with hbalock held to get the next 1921 * available slot for the given HBQ. If there is free slot 1922 * available for the HBQ it will return pointer to the next available 1923 * HBQ entry else it will return NULL. 1924 **/ 1925 static struct lpfc_hbq_entry * 1926 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1927 { 1928 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1929 1930 lockdep_assert_held(&phba->hbalock); 1931 1932 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1933 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1934 hbqp->next_hbqPutIdx = 0; 1935 1936 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1937 uint32_t raw_index = phba->hbq_get[hbqno]; 1938 uint32_t getidx = le32_to_cpu(raw_index); 1939 1940 hbqp->local_hbqGetIdx = getidx; 1941 1942 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1943 lpfc_printf_log(phba, KERN_ERR, 1944 LOG_SLI | LOG_VPORT, 1945 "1802 HBQ %d: local_hbqGetIdx " 1946 "%u is > than hbqp->entry_count %u\n", 1947 hbqno, hbqp->local_hbqGetIdx, 1948 hbqp->entry_count); 1949 1950 phba->link_state = LPFC_HBA_ERROR; 1951 return NULL; 1952 } 1953 1954 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1955 return NULL; 1956 } 1957 1958 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1959 hbqp->hbqPutIdx; 1960 } 1961 1962 /** 1963 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1964 * @phba: Pointer to HBA context object. 1965 * 1966 * This function is called with no lock held to free all the 1967 * hbq buffers while uninitializing the SLI interface. It also 1968 * frees the HBQ buffers returned by the firmware but not yet 1969 * processed by the upper layers. 1970 **/ 1971 void 1972 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1973 { 1974 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1975 struct hbq_dmabuf *hbq_buf; 1976 unsigned long flags; 1977 int i, hbq_count; 1978 1979 hbq_count = lpfc_sli_hbq_count(); 1980 /* Return all memory used by all HBQs */ 1981 spin_lock_irqsave(&phba->hbalock, flags); 1982 for (i = 0; i < hbq_count; ++i) { 1983 list_for_each_entry_safe(dmabuf, next_dmabuf, 1984 &phba->hbqs[i].hbq_buffer_list, list) { 1985 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1986 list_del(&hbq_buf->dbuf.list); 1987 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1988 } 1989 phba->hbqs[i].buffer_count = 0; 1990 } 1991 1992 /* Mark the HBQs not in use */ 1993 phba->hbq_in_use = 0; 1994 spin_unlock_irqrestore(&phba->hbalock, flags); 1995 } 1996 1997 /** 1998 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1999 * @phba: Pointer to HBA context object. 2000 * @hbqno: HBQ number. 2001 * @hbq_buf: Pointer to HBQ buffer. 2002 * 2003 * This function is called with the hbalock held to post a 2004 * hbq buffer to the firmware. If the function finds an empty 2005 * slot in the HBQ, it will post the buffer. The function will return 2006 * pointer to the hbq entry if it successfully post the buffer 2007 * else it will return NULL. 2008 **/ 2009 static int 2010 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 2011 struct hbq_dmabuf *hbq_buf) 2012 { 2013 lockdep_assert_held(&phba->hbalock); 2014 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 2015 } 2016 2017 /** 2018 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 2019 * @phba: Pointer to HBA context object. 2020 * @hbqno: HBQ number. 2021 * @hbq_buf: Pointer to HBQ buffer. 2022 * 2023 * This function is called with the hbalock held to post a hbq buffer to the 2024 * firmware. If the function finds an empty slot in the HBQ, it will post the 2025 * buffer and place it on the hbq_buffer_list. The function will return zero if 2026 * it successfully post the buffer else it will return an error. 2027 **/ 2028 static int 2029 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 2030 struct hbq_dmabuf *hbq_buf) 2031 { 2032 struct lpfc_hbq_entry *hbqe; 2033 dma_addr_t physaddr = hbq_buf->dbuf.phys; 2034 2035 lockdep_assert_held(&phba->hbalock); 2036 /* Get next HBQ entry slot to use */ 2037 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 2038 if (hbqe) { 2039 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 2040 2041 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 2042 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 2043 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; 2044 hbqe->bde.tus.f.bdeFlags = 0; 2045 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 2046 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 2047 /* Sync SLIM */ 2048 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 2049 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 2050 /* flush */ 2051 readl(phba->hbq_put + hbqno); 2052 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 2053 return 0; 2054 } else 2055 return -ENOMEM; 2056 } 2057 2058 /** 2059 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 2060 * @phba: Pointer to HBA context object. 2061 * @hbqno: HBQ number. 2062 * @hbq_buf: Pointer to HBQ buffer. 2063 * 2064 * This function is called with the hbalock held to post an RQE to the SLI4 2065 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 2066 * the hbq_buffer_list and return zero, otherwise it will return an error. 2067 **/ 2068 static int 2069 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 2070 struct hbq_dmabuf *hbq_buf) 2071 { 2072 int rc; 2073 struct lpfc_rqe hrqe; 2074 struct lpfc_rqe drqe; 2075 struct lpfc_queue *hrq; 2076 struct lpfc_queue *drq; 2077 2078 if (hbqno != LPFC_ELS_HBQ) 2079 return 1; 2080 hrq = phba->sli4_hba.hdr_rq; 2081 drq = phba->sli4_hba.dat_rq; 2082 2083 lockdep_assert_held(&phba->hbalock); 2084 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 2085 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 2086 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 2087 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 2088 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 2089 if (rc < 0) 2090 return rc; 2091 hbq_buf->tag = (rc | (hbqno << 16)); 2092 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 2093 return 0; 2094 } 2095 2096 /* HBQ for ELS and CT traffic. */ 2097 static struct lpfc_hbq_init lpfc_els_hbq = { 2098 .rn = 1, 2099 .entry_count = 256, 2100 .mask_count = 0, 2101 .profile = 0, 2102 .ring_mask = (1 << LPFC_ELS_RING), 2103 .buffer_count = 0, 2104 .init_count = 40, 2105 .add_count = 40, 2106 }; 2107 2108 /* Array of HBQs */ 2109 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 2110 &lpfc_els_hbq, 2111 }; 2112 2113 /** 2114 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 2115 * @phba: Pointer to HBA context object. 2116 * @hbqno: HBQ number. 2117 * @count: Number of HBQ buffers to be posted. 2118 * 2119 * This function is called with no lock held to post more hbq buffers to the 2120 * given HBQ. The function returns the number of HBQ buffers successfully 2121 * posted. 2122 **/ 2123 static int 2124 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 2125 { 2126 uint32_t i, posted = 0; 2127 unsigned long flags; 2128 struct hbq_dmabuf *hbq_buffer; 2129 LIST_HEAD(hbq_buf_list); 2130 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 2131 return 0; 2132 2133 if ((phba->hbqs[hbqno].buffer_count + count) > 2134 lpfc_hbq_defs[hbqno]->entry_count) 2135 count = lpfc_hbq_defs[hbqno]->entry_count - 2136 phba->hbqs[hbqno].buffer_count; 2137 if (!count) 2138 return 0; 2139 /* Allocate HBQ entries */ 2140 for (i = 0; i < count; i++) { 2141 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 2142 if (!hbq_buffer) 2143 break; 2144 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 2145 } 2146 /* Check whether HBQ is still in use */ 2147 spin_lock_irqsave(&phba->hbalock, flags); 2148 if (!phba->hbq_in_use) 2149 goto err; 2150 while (!list_empty(&hbq_buf_list)) { 2151 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2152 dbuf.list); 2153 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 2154 (hbqno << 16)); 2155 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 2156 phba->hbqs[hbqno].buffer_count++; 2157 posted++; 2158 } else 2159 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2160 } 2161 spin_unlock_irqrestore(&phba->hbalock, flags); 2162 return posted; 2163 err: 2164 spin_unlock_irqrestore(&phba->hbalock, flags); 2165 while (!list_empty(&hbq_buf_list)) { 2166 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2167 dbuf.list); 2168 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2169 } 2170 return 0; 2171 } 2172 2173 /** 2174 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 2175 * @phba: Pointer to HBA context object. 2176 * @qno: HBQ number. 2177 * 2178 * This function posts more buffers to the HBQ. This function 2179 * is called with no lock held. The function returns the number of HBQ entries 2180 * successfully allocated. 2181 **/ 2182 int 2183 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 2184 { 2185 if (phba->sli_rev == LPFC_SLI_REV4) 2186 return 0; 2187 else 2188 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2189 lpfc_hbq_defs[qno]->add_count); 2190 } 2191 2192 /** 2193 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 2194 * @phba: Pointer to HBA context object. 2195 * @qno: HBQ queue number. 2196 * 2197 * This function is called from SLI initialization code path with 2198 * no lock held to post initial HBQ buffers to firmware. The 2199 * function returns the number of HBQ entries successfully allocated. 2200 **/ 2201 static int 2202 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 2203 { 2204 if (phba->sli_rev == LPFC_SLI_REV4) 2205 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2206 lpfc_hbq_defs[qno]->entry_count); 2207 else 2208 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2209 lpfc_hbq_defs[qno]->init_count); 2210 } 2211 2212 /** 2213 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 2214 * @phba: Pointer to HBA context object. 2215 * @hbqno: HBQ number. 2216 * 2217 * This function removes the first hbq buffer on an hbq list and returns a 2218 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2219 **/ 2220 static struct hbq_dmabuf * 2221 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 2222 { 2223 struct lpfc_dmabuf *d_buf; 2224 2225 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 2226 if (!d_buf) 2227 return NULL; 2228 return container_of(d_buf, struct hbq_dmabuf, dbuf); 2229 } 2230 2231 /** 2232 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list 2233 * @phba: Pointer to HBA context object. 2234 * @hbqno: HBQ number. 2235 * 2236 * This function removes the first RQ buffer on an RQ buffer list and returns a 2237 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2238 **/ 2239 static struct rqb_dmabuf * 2240 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) 2241 { 2242 struct lpfc_dmabuf *h_buf; 2243 struct lpfc_rqb *rqbp; 2244 2245 rqbp = hrq->rqbp; 2246 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 2247 struct lpfc_dmabuf, list); 2248 if (!h_buf) 2249 return NULL; 2250 rqbp->buffer_count--; 2251 return container_of(h_buf, struct rqb_dmabuf, hbuf); 2252 } 2253 2254 /** 2255 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 2256 * @phba: Pointer to HBA context object. 2257 * @tag: Tag of the hbq buffer. 2258 * 2259 * This function searches for the hbq buffer associated with the given tag in 2260 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer 2261 * otherwise it returns NULL. 2262 **/ 2263 static struct hbq_dmabuf * 2264 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 2265 { 2266 struct lpfc_dmabuf *d_buf; 2267 struct hbq_dmabuf *hbq_buf; 2268 uint32_t hbqno; 2269 2270 hbqno = tag >> 16; 2271 if (hbqno >= LPFC_MAX_HBQS) 2272 return NULL; 2273 2274 spin_lock_irq(&phba->hbalock); 2275 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2276 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2277 if (hbq_buf->tag == tag) { 2278 spin_unlock_irq(&phba->hbalock); 2279 return hbq_buf; 2280 } 2281 } 2282 spin_unlock_irq(&phba->hbalock); 2283 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2284 "1803 Bad hbq tag. Data: x%x x%x\n", 2285 tag, phba->hbqs[tag >> 16].buffer_count); 2286 return NULL; 2287 } 2288 2289 /** 2290 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2291 * @phba: Pointer to HBA context object. 2292 * @hbq_buffer: Pointer to HBQ buffer. 2293 * 2294 * This function is called with hbalock. This function gives back 2295 * the hbq buffer to firmware. If the HBQ does not have space to 2296 * post the buffer, it will free the buffer. 2297 **/ 2298 void 2299 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2300 { 2301 uint32_t hbqno; 2302 2303 if (hbq_buffer) { 2304 hbqno = hbq_buffer->tag >> 16; 2305 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2306 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2307 } 2308 } 2309 2310 /** 2311 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2312 * @mbxCommand: mailbox command code. 2313 * 2314 * This function is called by the mailbox event handler function to verify 2315 * that the completed mailbox command is a legitimate mailbox command. If the 2316 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2317 * and the mailbox event handler will take the HBA offline. 2318 **/ 2319 static int 2320 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2321 { 2322 uint8_t ret; 2323 2324 switch (mbxCommand) { 2325 case MBX_LOAD_SM: 2326 case MBX_READ_NV: 2327 case MBX_WRITE_NV: 2328 case MBX_WRITE_VPARMS: 2329 case MBX_RUN_BIU_DIAG: 2330 case MBX_INIT_LINK: 2331 case MBX_DOWN_LINK: 2332 case MBX_CONFIG_LINK: 2333 case MBX_CONFIG_RING: 2334 case MBX_RESET_RING: 2335 case MBX_READ_CONFIG: 2336 case MBX_READ_RCONFIG: 2337 case MBX_READ_SPARM: 2338 case MBX_READ_STATUS: 2339 case MBX_READ_RPI: 2340 case MBX_READ_XRI: 2341 case MBX_READ_REV: 2342 case MBX_READ_LNK_STAT: 2343 case MBX_REG_LOGIN: 2344 case MBX_UNREG_LOGIN: 2345 case MBX_CLEAR_LA: 2346 case MBX_DUMP_MEMORY: 2347 case MBX_DUMP_CONTEXT: 2348 case MBX_RUN_DIAGS: 2349 case MBX_RESTART: 2350 case MBX_UPDATE_CFG: 2351 case MBX_DOWN_LOAD: 2352 case MBX_DEL_LD_ENTRY: 2353 case MBX_RUN_PROGRAM: 2354 case MBX_SET_MASK: 2355 case MBX_SET_VARIABLE: 2356 case MBX_UNREG_D_ID: 2357 case MBX_KILL_BOARD: 2358 case MBX_CONFIG_FARP: 2359 case MBX_BEACON: 2360 case MBX_LOAD_AREA: 2361 case MBX_RUN_BIU_DIAG64: 2362 case MBX_CONFIG_PORT: 2363 case MBX_READ_SPARM64: 2364 case MBX_READ_RPI64: 2365 case MBX_REG_LOGIN64: 2366 case MBX_READ_TOPOLOGY: 2367 case MBX_WRITE_WWN: 2368 case MBX_SET_DEBUG: 2369 case MBX_LOAD_EXP_ROM: 2370 case MBX_ASYNCEVT_ENABLE: 2371 case MBX_REG_VPI: 2372 case MBX_UNREG_VPI: 2373 case MBX_HEARTBEAT: 2374 case MBX_PORT_CAPABILITIES: 2375 case MBX_PORT_IOV_CONTROL: 2376 case MBX_SLI4_CONFIG: 2377 case MBX_SLI4_REQ_FTRS: 2378 case MBX_REG_FCFI: 2379 case MBX_UNREG_FCFI: 2380 case MBX_REG_VFI: 2381 case MBX_UNREG_VFI: 2382 case MBX_INIT_VPI: 2383 case MBX_INIT_VFI: 2384 case MBX_RESUME_RPI: 2385 case MBX_READ_EVENT_LOG_STATUS: 2386 case MBX_READ_EVENT_LOG: 2387 case MBX_SECURITY_MGMT: 2388 case MBX_AUTH_PORT: 2389 case MBX_ACCESS_VDATA: 2390 ret = mbxCommand; 2391 break; 2392 default: 2393 ret = MBX_SHUTDOWN; 2394 break; 2395 } 2396 return ret; 2397 } 2398 2399 /** 2400 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2401 * @phba: Pointer to HBA context object. 2402 * @pmboxq: Pointer to mailbox command. 2403 * 2404 * This is completion handler function for mailbox commands issued from 2405 * lpfc_sli_issue_mbox_wait function. This function is called by the 2406 * mailbox event handler function with no lock held. This function 2407 * will wake up thread waiting on the wait queue pointed by context1 2408 * of the mailbox. 2409 **/ 2410 void 2411 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2412 { 2413 unsigned long drvr_flag; 2414 struct completion *pmbox_done; 2415 2416 /* 2417 * If pmbox_done is empty, the driver thread gave up waiting and 2418 * continued running. 2419 */ 2420 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2421 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2422 pmbox_done = (struct completion *)pmboxq->context3; 2423 if (pmbox_done) 2424 complete(pmbox_done); 2425 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2426 return; 2427 } 2428 2429 2430 /** 2431 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2432 * @phba: Pointer to HBA context object. 2433 * @pmb: Pointer to mailbox object. 2434 * 2435 * This function is the default mailbox completion handler. It 2436 * frees the memory resources associated with the completed mailbox 2437 * command. If the completed command is a REG_LOGIN mailbox command, 2438 * this function will issue a UREG_LOGIN to re-claim the RPI. 2439 **/ 2440 void 2441 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2442 { 2443 struct lpfc_vport *vport = pmb->vport; 2444 struct lpfc_dmabuf *mp; 2445 struct lpfc_nodelist *ndlp; 2446 struct Scsi_Host *shost; 2447 uint16_t rpi, vpi; 2448 int rc; 2449 2450 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 2451 2452 if (mp) { 2453 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2454 kfree(mp); 2455 } 2456 2457 /* 2458 * If a REG_LOGIN succeeded after node is destroyed or node 2459 * is in re-discovery driver need to cleanup the RPI. 2460 */ 2461 if (!(phba->pport->load_flag & FC_UNLOADING) && 2462 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2463 !pmb->u.mb.mbxStatus) { 2464 rpi = pmb->u.mb.un.varWords[0]; 2465 vpi = pmb->u.mb.un.varRegLogin.vpi; 2466 lpfc_unreg_login(phba, vpi, rpi, pmb); 2467 pmb->vport = vport; 2468 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2469 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2470 if (rc != MBX_NOT_FINISHED) 2471 return; 2472 } 2473 2474 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2475 !(phba->pport->load_flag & FC_UNLOADING) && 2476 !pmb->u.mb.mbxStatus) { 2477 shost = lpfc_shost_from_vport(vport); 2478 spin_lock_irq(shost->host_lock); 2479 vport->vpi_state |= LPFC_VPI_REGISTERED; 2480 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2481 spin_unlock_irq(shost->host_lock); 2482 } 2483 2484 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2485 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2486 lpfc_nlp_put(ndlp); 2487 pmb->ctx_buf = NULL; 2488 pmb->ctx_ndlp = NULL; 2489 } 2490 2491 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2492 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2493 2494 /* Check to see if there are any deferred events to process */ 2495 if (ndlp) { 2496 lpfc_printf_vlog( 2497 vport, 2498 KERN_INFO, LOG_MBOX | LOG_DISCOVERY, 2499 "1438 UNREG cmpl deferred mbox x%x " 2500 "on NPort x%x Data: x%x x%x %p\n", 2501 ndlp->nlp_rpi, ndlp->nlp_DID, 2502 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp); 2503 2504 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2505 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { 2506 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2507 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 2508 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2509 } else { 2510 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2511 } 2512 pmb->ctx_ndlp = NULL; 2513 } 2514 } 2515 2516 /* Check security permission status on INIT_LINK mailbox command */ 2517 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2518 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2519 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2520 "2860 SLI authentication is required " 2521 "for INIT_LINK but has not done yet\n"); 2522 2523 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2524 lpfc_sli4_mbox_cmd_free(phba, pmb); 2525 else 2526 mempool_free(pmb, phba->mbox_mem_pool); 2527 } 2528 /** 2529 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler 2530 * @phba: Pointer to HBA context object. 2531 * @pmb: Pointer to mailbox object. 2532 * 2533 * This function is the unreg rpi mailbox completion handler. It 2534 * frees the memory resources associated with the completed mailbox 2535 * command. An additional refrenece is put on the ndlp to prevent 2536 * lpfc_nlp_release from freeing the rpi bit in the bitmask before 2537 * the unreg mailbox command completes, this routine puts the 2538 * reference back. 2539 * 2540 **/ 2541 void 2542 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2543 { 2544 struct lpfc_vport *vport = pmb->vport; 2545 struct lpfc_nodelist *ndlp; 2546 2547 ndlp = pmb->ctx_ndlp; 2548 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2549 if (phba->sli_rev == LPFC_SLI_REV4 && 2550 (bf_get(lpfc_sli_intf_if_type, 2551 &phba->sli4_hba.sli_intf) >= 2552 LPFC_SLI_INTF_IF_TYPE_2)) { 2553 if (ndlp) { 2554 lpfc_printf_vlog( 2555 vport, KERN_INFO, LOG_MBOX | LOG_SLI, 2556 "0010 UNREG_LOGIN vpi:%x " 2557 "rpi:%x DID:%x defer x%x flg x%x " 2558 "map:%x %p\n", 2559 vport->vpi, ndlp->nlp_rpi, 2560 ndlp->nlp_DID, ndlp->nlp_defer_did, 2561 ndlp->nlp_flag, 2562 ndlp->nlp_usg_map, ndlp); 2563 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2564 lpfc_nlp_put(ndlp); 2565 2566 /* Check to see if there are any deferred 2567 * events to process 2568 */ 2569 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2570 (ndlp->nlp_defer_did != 2571 NLP_EVT_NOTHING_PENDING)) { 2572 lpfc_printf_vlog( 2573 vport, KERN_INFO, LOG_DISCOVERY, 2574 "4111 UNREG cmpl deferred " 2575 "clr x%x on " 2576 "NPort x%x Data: x%x %p\n", 2577 ndlp->nlp_rpi, ndlp->nlp_DID, 2578 ndlp->nlp_defer_did, ndlp); 2579 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2580 ndlp->nlp_defer_did = 2581 NLP_EVT_NOTHING_PENDING; 2582 lpfc_issue_els_plogi( 2583 vport, ndlp->nlp_DID, 0); 2584 } else { 2585 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2586 } 2587 } 2588 } 2589 } 2590 2591 mempool_free(pmb, phba->mbox_mem_pool); 2592 } 2593 2594 /** 2595 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2596 * @phba: Pointer to HBA context object. 2597 * 2598 * This function is called with no lock held. This function processes all 2599 * the completed mailbox commands and gives it to upper layers. The interrupt 2600 * service routine processes mailbox completion interrupt and adds completed 2601 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2602 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2603 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2604 * function returns the mailbox commands to the upper layer by calling the 2605 * completion handler function of each mailbox. 2606 **/ 2607 int 2608 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2609 { 2610 MAILBOX_t *pmbox; 2611 LPFC_MBOXQ_t *pmb; 2612 int rc; 2613 LIST_HEAD(cmplq); 2614 2615 phba->sli.slistat.mbox_event++; 2616 2617 /* Get all completed mailboxe buffers into the cmplq */ 2618 spin_lock_irq(&phba->hbalock); 2619 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2620 spin_unlock_irq(&phba->hbalock); 2621 2622 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2623 do { 2624 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2625 if (pmb == NULL) 2626 break; 2627 2628 pmbox = &pmb->u.mb; 2629 2630 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2631 if (pmb->vport) { 2632 lpfc_debugfs_disc_trc(pmb->vport, 2633 LPFC_DISC_TRC_MBOX_VPORT, 2634 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2635 (uint32_t)pmbox->mbxCommand, 2636 pmbox->un.varWords[0], 2637 pmbox->un.varWords[1]); 2638 } 2639 else { 2640 lpfc_debugfs_disc_trc(phba->pport, 2641 LPFC_DISC_TRC_MBOX, 2642 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2643 (uint32_t)pmbox->mbxCommand, 2644 pmbox->un.varWords[0], 2645 pmbox->un.varWords[1]); 2646 } 2647 } 2648 2649 /* 2650 * It is a fatal error if unknown mbox command completion. 2651 */ 2652 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2653 MBX_SHUTDOWN) { 2654 /* Unknown mailbox command compl */ 2655 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2656 "(%d):0323 Unknown Mailbox command " 2657 "x%x (x%x/x%x) Cmpl\n", 2658 pmb->vport ? pmb->vport->vpi : 0, 2659 pmbox->mbxCommand, 2660 lpfc_sli_config_mbox_subsys_get(phba, 2661 pmb), 2662 lpfc_sli_config_mbox_opcode_get(phba, 2663 pmb)); 2664 phba->link_state = LPFC_HBA_ERROR; 2665 phba->work_hs = HS_FFER3; 2666 lpfc_handle_eratt(phba); 2667 continue; 2668 } 2669 2670 if (pmbox->mbxStatus) { 2671 phba->sli.slistat.mbox_stat_err++; 2672 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2673 /* Mbox cmd cmpl error - RETRYing */ 2674 lpfc_printf_log(phba, KERN_INFO, 2675 LOG_MBOX | LOG_SLI, 2676 "(%d):0305 Mbox cmd cmpl " 2677 "error - RETRYing Data: x%x " 2678 "(x%x/x%x) x%x x%x x%x\n", 2679 pmb->vport ? pmb->vport->vpi : 0, 2680 pmbox->mbxCommand, 2681 lpfc_sli_config_mbox_subsys_get(phba, 2682 pmb), 2683 lpfc_sli_config_mbox_opcode_get(phba, 2684 pmb), 2685 pmbox->mbxStatus, 2686 pmbox->un.varWords[0], 2687 pmb->vport->port_state); 2688 pmbox->mbxStatus = 0; 2689 pmbox->mbxOwner = OWN_HOST; 2690 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2691 if (rc != MBX_NOT_FINISHED) 2692 continue; 2693 } 2694 } 2695 2696 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2697 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2698 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2699 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2700 "x%x x%x x%x\n", 2701 pmb->vport ? pmb->vport->vpi : 0, 2702 pmbox->mbxCommand, 2703 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2704 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2705 pmb->mbox_cmpl, 2706 *((uint32_t *) pmbox), 2707 pmbox->un.varWords[0], 2708 pmbox->un.varWords[1], 2709 pmbox->un.varWords[2], 2710 pmbox->un.varWords[3], 2711 pmbox->un.varWords[4], 2712 pmbox->un.varWords[5], 2713 pmbox->un.varWords[6], 2714 pmbox->un.varWords[7], 2715 pmbox->un.varWords[8], 2716 pmbox->un.varWords[9], 2717 pmbox->un.varWords[10]); 2718 2719 if (pmb->mbox_cmpl) 2720 pmb->mbox_cmpl(phba,pmb); 2721 } while (1); 2722 return 0; 2723 } 2724 2725 /** 2726 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2727 * @phba: Pointer to HBA context object. 2728 * @pring: Pointer to driver SLI ring object. 2729 * @tag: buffer tag. 2730 * 2731 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2732 * is set in the tag the buffer is posted for a particular exchange, 2733 * the function will return the buffer without replacing the buffer. 2734 * If the buffer is for unsolicited ELS or CT traffic, this function 2735 * returns the buffer and also posts another buffer to the firmware. 2736 **/ 2737 static struct lpfc_dmabuf * 2738 lpfc_sli_get_buff(struct lpfc_hba *phba, 2739 struct lpfc_sli_ring *pring, 2740 uint32_t tag) 2741 { 2742 struct hbq_dmabuf *hbq_entry; 2743 2744 if (tag & QUE_BUFTAG_BIT) 2745 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2746 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2747 if (!hbq_entry) 2748 return NULL; 2749 return &hbq_entry->dbuf; 2750 } 2751 2752 /** 2753 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2754 * @phba: Pointer to HBA context object. 2755 * @pring: Pointer to driver SLI ring object. 2756 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2757 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2758 * @fch_type: the type for the first frame of the sequence. 2759 * 2760 * This function is called with no lock held. This function uses the r_ctl and 2761 * type of the received sequence to find the correct callback function to call 2762 * to process the sequence. 2763 **/ 2764 static int 2765 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2766 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2767 uint32_t fch_type) 2768 { 2769 int i; 2770 2771 switch (fch_type) { 2772 case FC_TYPE_NVME: 2773 lpfc_nvmet_unsol_ls_event(phba, pring, saveq); 2774 return 1; 2775 default: 2776 break; 2777 } 2778 2779 /* unSolicited Responses */ 2780 if (pring->prt[0].profile) { 2781 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2782 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2783 saveq); 2784 return 1; 2785 } 2786 /* We must search, based on rctl / type 2787 for the right routine */ 2788 for (i = 0; i < pring->num_mask; i++) { 2789 if ((pring->prt[i].rctl == fch_r_ctl) && 2790 (pring->prt[i].type == fch_type)) { 2791 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2792 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2793 (phba, pring, saveq); 2794 return 1; 2795 } 2796 } 2797 return 0; 2798 } 2799 2800 /** 2801 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2802 * @phba: Pointer to HBA context object. 2803 * @pring: Pointer to driver SLI ring object. 2804 * @saveq: Pointer to the unsolicited iocb. 2805 * 2806 * This function is called with no lock held by the ring event handler 2807 * when there is an unsolicited iocb posted to the response ring by the 2808 * firmware. This function gets the buffer associated with the iocbs 2809 * and calls the event handler for the ring. This function handles both 2810 * qring buffers and hbq buffers. 2811 * When the function returns 1 the caller can free the iocb object otherwise 2812 * upper layer functions will free the iocb objects. 2813 **/ 2814 static int 2815 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2816 struct lpfc_iocbq *saveq) 2817 { 2818 IOCB_t * irsp; 2819 WORD5 * w5p; 2820 uint32_t Rctl, Type; 2821 struct lpfc_iocbq *iocbq; 2822 struct lpfc_dmabuf *dmzbuf; 2823 2824 irsp = &(saveq->iocb); 2825 2826 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2827 if (pring->lpfc_sli_rcv_async_status) 2828 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2829 else 2830 lpfc_printf_log(phba, 2831 KERN_WARNING, 2832 LOG_SLI, 2833 "0316 Ring %d handler: unexpected " 2834 "ASYNC_STATUS iocb received evt_code " 2835 "0x%x\n", 2836 pring->ringno, 2837 irsp->un.asyncstat.evt_code); 2838 return 1; 2839 } 2840 2841 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2842 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2843 if (irsp->ulpBdeCount > 0) { 2844 dmzbuf = lpfc_sli_get_buff(phba, pring, 2845 irsp->un.ulpWord[3]); 2846 lpfc_in_buf_free(phba, dmzbuf); 2847 } 2848 2849 if (irsp->ulpBdeCount > 1) { 2850 dmzbuf = lpfc_sli_get_buff(phba, pring, 2851 irsp->unsli3.sli3Words[3]); 2852 lpfc_in_buf_free(phba, dmzbuf); 2853 } 2854 2855 if (irsp->ulpBdeCount > 2) { 2856 dmzbuf = lpfc_sli_get_buff(phba, pring, 2857 irsp->unsli3.sli3Words[7]); 2858 lpfc_in_buf_free(phba, dmzbuf); 2859 } 2860 2861 return 1; 2862 } 2863 2864 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2865 if (irsp->ulpBdeCount != 0) { 2866 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2867 irsp->un.ulpWord[3]); 2868 if (!saveq->context2) 2869 lpfc_printf_log(phba, 2870 KERN_ERR, 2871 LOG_SLI, 2872 "0341 Ring %d Cannot find buffer for " 2873 "an unsolicited iocb. tag 0x%x\n", 2874 pring->ringno, 2875 irsp->un.ulpWord[3]); 2876 } 2877 if (irsp->ulpBdeCount == 2) { 2878 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2879 irsp->unsli3.sli3Words[7]); 2880 if (!saveq->context3) 2881 lpfc_printf_log(phba, 2882 KERN_ERR, 2883 LOG_SLI, 2884 "0342 Ring %d Cannot find buffer for an" 2885 " unsolicited iocb. tag 0x%x\n", 2886 pring->ringno, 2887 irsp->unsli3.sli3Words[7]); 2888 } 2889 list_for_each_entry(iocbq, &saveq->list, list) { 2890 irsp = &(iocbq->iocb); 2891 if (irsp->ulpBdeCount != 0) { 2892 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2893 irsp->un.ulpWord[3]); 2894 if (!iocbq->context2) 2895 lpfc_printf_log(phba, 2896 KERN_ERR, 2897 LOG_SLI, 2898 "0343 Ring %d Cannot find " 2899 "buffer for an unsolicited iocb" 2900 ". tag 0x%x\n", pring->ringno, 2901 irsp->un.ulpWord[3]); 2902 } 2903 if (irsp->ulpBdeCount == 2) { 2904 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2905 irsp->unsli3.sli3Words[7]); 2906 if (!iocbq->context3) 2907 lpfc_printf_log(phba, 2908 KERN_ERR, 2909 LOG_SLI, 2910 "0344 Ring %d Cannot find " 2911 "buffer for an unsolicited " 2912 "iocb. tag 0x%x\n", 2913 pring->ringno, 2914 irsp->unsli3.sli3Words[7]); 2915 } 2916 } 2917 } 2918 if (irsp->ulpBdeCount != 0 && 2919 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2920 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2921 int found = 0; 2922 2923 /* search continue save q for same XRI */ 2924 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2925 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2926 saveq->iocb.unsli3.rcvsli3.ox_id) { 2927 list_add_tail(&saveq->list, &iocbq->list); 2928 found = 1; 2929 break; 2930 } 2931 } 2932 if (!found) 2933 list_add_tail(&saveq->clist, 2934 &pring->iocb_continue_saveq); 2935 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2936 list_del_init(&iocbq->clist); 2937 saveq = iocbq; 2938 irsp = &(saveq->iocb); 2939 } else 2940 return 0; 2941 } 2942 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2943 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2944 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2945 Rctl = FC_RCTL_ELS_REQ; 2946 Type = FC_TYPE_ELS; 2947 } else { 2948 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2949 Rctl = w5p->hcsw.Rctl; 2950 Type = w5p->hcsw.Type; 2951 2952 /* Firmware Workaround */ 2953 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2954 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2955 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2956 Rctl = FC_RCTL_ELS_REQ; 2957 Type = FC_TYPE_ELS; 2958 w5p->hcsw.Rctl = Rctl; 2959 w5p->hcsw.Type = Type; 2960 } 2961 } 2962 2963 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2964 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2965 "0313 Ring %d handler: unexpected Rctl x%x " 2966 "Type x%x received\n", 2967 pring->ringno, Rctl, Type); 2968 2969 return 1; 2970 } 2971 2972 /** 2973 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2974 * @phba: Pointer to HBA context object. 2975 * @pring: Pointer to driver SLI ring object. 2976 * @prspiocb: Pointer to response iocb object. 2977 * 2978 * This function looks up the iocb_lookup table to get the command iocb 2979 * corresponding to the given response iocb using the iotag of the 2980 * response iocb. The driver calls this function with the hbalock held 2981 * for SLI3 ports or the ring lock held for SLI4 ports. 2982 * This function returns the command iocb object if it finds the command 2983 * iocb else returns NULL. 2984 **/ 2985 static struct lpfc_iocbq * 2986 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2987 struct lpfc_sli_ring *pring, 2988 struct lpfc_iocbq *prspiocb) 2989 { 2990 struct lpfc_iocbq *cmd_iocb = NULL; 2991 uint16_t iotag; 2992 spinlock_t *temp_lock = NULL; 2993 unsigned long iflag = 0; 2994 2995 if (phba->sli_rev == LPFC_SLI_REV4) 2996 temp_lock = &pring->ring_lock; 2997 else 2998 temp_lock = &phba->hbalock; 2999 3000 spin_lock_irqsave(temp_lock, iflag); 3001 iotag = prspiocb->iocb.ulpIoTag; 3002 3003 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3004 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3005 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 3006 /* remove from txcmpl queue list */ 3007 list_del_init(&cmd_iocb->list); 3008 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 3009 pring->txcmplq_cnt--; 3010 spin_unlock_irqrestore(temp_lock, iflag); 3011 return cmd_iocb; 3012 } 3013 } 3014 3015 spin_unlock_irqrestore(temp_lock, iflag); 3016 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3017 "0317 iotag x%x is out of " 3018 "range: max iotag x%x wd0 x%x\n", 3019 iotag, phba->sli.last_iotag, 3020 *(((uint32_t *) &prspiocb->iocb) + 7)); 3021 return NULL; 3022 } 3023 3024 /** 3025 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 3026 * @phba: Pointer to HBA context object. 3027 * @pring: Pointer to driver SLI ring object. 3028 * @iotag: IOCB tag. 3029 * 3030 * This function looks up the iocb_lookup table to get the command iocb 3031 * corresponding to the given iotag. The driver calls this function with 3032 * the ring lock held because this function is an SLI4 port only helper. 3033 * This function returns the command iocb object if it finds the command 3034 * iocb else returns NULL. 3035 **/ 3036 static struct lpfc_iocbq * 3037 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 3038 struct lpfc_sli_ring *pring, uint16_t iotag) 3039 { 3040 struct lpfc_iocbq *cmd_iocb = NULL; 3041 spinlock_t *temp_lock = NULL; 3042 unsigned long iflag = 0; 3043 3044 if (phba->sli_rev == LPFC_SLI_REV4) 3045 temp_lock = &pring->ring_lock; 3046 else 3047 temp_lock = &phba->hbalock; 3048 3049 spin_lock_irqsave(temp_lock, iflag); 3050 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3051 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3052 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 3053 /* remove from txcmpl queue list */ 3054 list_del_init(&cmd_iocb->list); 3055 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 3056 pring->txcmplq_cnt--; 3057 spin_unlock_irqrestore(temp_lock, iflag); 3058 return cmd_iocb; 3059 } 3060 } 3061 3062 spin_unlock_irqrestore(temp_lock, iflag); 3063 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3064 "0372 iotag x%x lookup error: max iotag (x%x) " 3065 "iocb_flag x%x\n", 3066 iotag, phba->sli.last_iotag, 3067 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff); 3068 return NULL; 3069 } 3070 3071 /** 3072 * lpfc_sli_process_sol_iocb - process solicited iocb completion 3073 * @phba: Pointer to HBA context object. 3074 * @pring: Pointer to driver SLI ring object. 3075 * @saveq: Pointer to the response iocb to be processed. 3076 * 3077 * This function is called by the ring event handler for non-fcp 3078 * rings when there is a new response iocb in the response ring. 3079 * The caller is not required to hold any locks. This function 3080 * gets the command iocb associated with the response iocb and 3081 * calls the completion handler for the command iocb. If there 3082 * is no completion handler, the function will free the resources 3083 * associated with command iocb. If the response iocb is for 3084 * an already aborted command iocb, the status of the completion 3085 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 3086 * This function always returns 1. 3087 **/ 3088 static int 3089 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3090 struct lpfc_iocbq *saveq) 3091 { 3092 struct lpfc_iocbq *cmdiocbp; 3093 int rc = 1; 3094 unsigned long iflag; 3095 3096 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 3097 if (cmdiocbp) { 3098 if (cmdiocbp->iocb_cmpl) { 3099 /* 3100 * If an ELS command failed send an event to mgmt 3101 * application. 3102 */ 3103 if (saveq->iocb.ulpStatus && 3104 (pring->ringno == LPFC_ELS_RING) && 3105 (cmdiocbp->iocb.ulpCommand == 3106 CMD_ELS_REQUEST64_CR)) 3107 lpfc_send_els_failure_event(phba, 3108 cmdiocbp, saveq); 3109 3110 /* 3111 * Post all ELS completions to the worker thread. 3112 * All other are passed to the completion callback. 3113 */ 3114 if (pring->ringno == LPFC_ELS_RING) { 3115 if ((phba->sli_rev < LPFC_SLI_REV4) && 3116 (cmdiocbp->iocb_flag & 3117 LPFC_DRIVER_ABORTED)) { 3118 spin_lock_irqsave(&phba->hbalock, 3119 iflag); 3120 cmdiocbp->iocb_flag &= 3121 ~LPFC_DRIVER_ABORTED; 3122 spin_unlock_irqrestore(&phba->hbalock, 3123 iflag); 3124 saveq->iocb.ulpStatus = 3125 IOSTAT_LOCAL_REJECT; 3126 saveq->iocb.un.ulpWord[4] = 3127 IOERR_SLI_ABORTED; 3128 3129 /* Firmware could still be in progress 3130 * of DMAing payload, so don't free data 3131 * buffer till after a hbeat. 3132 */ 3133 spin_lock_irqsave(&phba->hbalock, 3134 iflag); 3135 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 3136 spin_unlock_irqrestore(&phba->hbalock, 3137 iflag); 3138 } 3139 if (phba->sli_rev == LPFC_SLI_REV4) { 3140 if (saveq->iocb_flag & 3141 LPFC_EXCHANGE_BUSY) { 3142 /* Set cmdiocb flag for the 3143 * exchange busy so sgl (xri) 3144 * will not be released until 3145 * the abort xri is received 3146 * from hba. 3147 */ 3148 spin_lock_irqsave( 3149 &phba->hbalock, iflag); 3150 cmdiocbp->iocb_flag |= 3151 LPFC_EXCHANGE_BUSY; 3152 spin_unlock_irqrestore( 3153 &phba->hbalock, iflag); 3154 } 3155 if (cmdiocbp->iocb_flag & 3156 LPFC_DRIVER_ABORTED) { 3157 /* 3158 * Clear LPFC_DRIVER_ABORTED 3159 * bit in case it was driver 3160 * initiated abort. 3161 */ 3162 spin_lock_irqsave( 3163 &phba->hbalock, iflag); 3164 cmdiocbp->iocb_flag &= 3165 ~LPFC_DRIVER_ABORTED; 3166 spin_unlock_irqrestore( 3167 &phba->hbalock, iflag); 3168 cmdiocbp->iocb.ulpStatus = 3169 IOSTAT_LOCAL_REJECT; 3170 cmdiocbp->iocb.un.ulpWord[4] = 3171 IOERR_ABORT_REQUESTED; 3172 /* 3173 * For SLI4, irsiocb contains 3174 * NO_XRI in sli_xritag, it 3175 * shall not affect releasing 3176 * sgl (xri) process. 3177 */ 3178 saveq->iocb.ulpStatus = 3179 IOSTAT_LOCAL_REJECT; 3180 saveq->iocb.un.ulpWord[4] = 3181 IOERR_SLI_ABORTED; 3182 spin_lock_irqsave( 3183 &phba->hbalock, iflag); 3184 saveq->iocb_flag |= 3185 LPFC_DELAY_MEM_FREE; 3186 spin_unlock_irqrestore( 3187 &phba->hbalock, iflag); 3188 } 3189 } 3190 } 3191 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 3192 } else 3193 lpfc_sli_release_iocbq(phba, cmdiocbp); 3194 } else { 3195 /* 3196 * Unknown initiating command based on the response iotag. 3197 * This could be the case on the ELS ring because of 3198 * lpfc_els_abort(). 3199 */ 3200 if (pring->ringno != LPFC_ELS_RING) { 3201 /* 3202 * Ring <ringno> handler: unexpected completion IoTag 3203 * <IoTag> 3204 */ 3205 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3206 "0322 Ring %d handler: " 3207 "unexpected completion IoTag x%x " 3208 "Data: x%x x%x x%x x%x\n", 3209 pring->ringno, 3210 saveq->iocb.ulpIoTag, 3211 saveq->iocb.ulpStatus, 3212 saveq->iocb.un.ulpWord[4], 3213 saveq->iocb.ulpCommand, 3214 saveq->iocb.ulpContext); 3215 } 3216 } 3217 3218 return rc; 3219 } 3220 3221 /** 3222 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 3223 * @phba: Pointer to HBA context object. 3224 * @pring: Pointer to driver SLI ring object. 3225 * 3226 * This function is called from the iocb ring event handlers when 3227 * put pointer is ahead of the get pointer for a ring. This function signal 3228 * an error attention condition to the worker thread and the worker 3229 * thread will transition the HBA to offline state. 3230 **/ 3231 static void 3232 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3233 { 3234 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3235 /* 3236 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3237 * rsp ring <portRspMax> 3238 */ 3239 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3240 "0312 Ring %d handler: portRspPut %d " 3241 "is bigger than rsp ring %d\n", 3242 pring->ringno, le32_to_cpu(pgp->rspPutInx), 3243 pring->sli.sli3.numRiocb); 3244 3245 phba->link_state = LPFC_HBA_ERROR; 3246 3247 /* 3248 * All error attention handlers are posted to 3249 * worker thread 3250 */ 3251 phba->work_ha |= HA_ERATT; 3252 phba->work_hs = HS_FFER3; 3253 3254 lpfc_worker_wake_up(phba); 3255 3256 return; 3257 } 3258 3259 /** 3260 * lpfc_poll_eratt - Error attention polling timer timeout handler 3261 * @ptr: Pointer to address of HBA context object. 3262 * 3263 * This function is invoked by the Error Attention polling timer when the 3264 * timer times out. It will check the SLI Error Attention register for 3265 * possible attention events. If so, it will post an Error Attention event 3266 * and wake up worker thread to process it. Otherwise, it will set up the 3267 * Error Attention polling timer for the next poll. 3268 **/ 3269 void lpfc_poll_eratt(struct timer_list *t) 3270 { 3271 struct lpfc_hba *phba; 3272 uint32_t eratt = 0; 3273 uint64_t sli_intr, cnt; 3274 3275 phba = from_timer(phba, t, eratt_poll); 3276 3277 /* Here we will also keep track of interrupts per sec of the hba */ 3278 sli_intr = phba->sli.slistat.sli_intr; 3279 3280 if (phba->sli.slistat.sli_prev_intr > sli_intr) 3281 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 3282 sli_intr); 3283 else 3284 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 3285 3286 /* 64-bit integer division not supported on 32-bit x86 - use do_div */ 3287 do_div(cnt, phba->eratt_poll_interval); 3288 phba->sli.slistat.sli_ips = cnt; 3289 3290 phba->sli.slistat.sli_prev_intr = sli_intr; 3291 3292 /* Check chip HA register for error event */ 3293 eratt = lpfc_sli_check_eratt(phba); 3294 3295 if (eratt) 3296 /* Tell the worker thread there is work to do */ 3297 lpfc_worker_wake_up(phba); 3298 else 3299 /* Restart the timer for next eratt poll */ 3300 mod_timer(&phba->eratt_poll, 3301 jiffies + 3302 msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 3303 return; 3304 } 3305 3306 3307 /** 3308 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 3309 * @phba: Pointer to HBA context object. 3310 * @pring: Pointer to driver SLI ring object. 3311 * @mask: Host attention register mask for this ring. 3312 * 3313 * This function is called from the interrupt context when there is a ring 3314 * event for the fcp ring. The caller does not hold any lock. 3315 * The function processes each response iocb in the response ring until it 3316 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 3317 * LE bit set. The function will call the completion handler of the command iocb 3318 * if the response iocb indicates a completion for a command iocb or it is 3319 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 3320 * function if this is an unsolicited iocb. 3321 * This routine presumes LPFC_FCP_RING handling and doesn't bother 3322 * to check it explicitly. 3323 */ 3324 int 3325 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 3326 struct lpfc_sli_ring *pring, uint32_t mask) 3327 { 3328 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3329 IOCB_t *irsp = NULL; 3330 IOCB_t *entry = NULL; 3331 struct lpfc_iocbq *cmdiocbq = NULL; 3332 struct lpfc_iocbq rspiocbq; 3333 uint32_t status; 3334 uint32_t portRspPut, portRspMax; 3335 int rc = 1; 3336 lpfc_iocb_type type; 3337 unsigned long iflag; 3338 uint32_t rsp_cmpl = 0; 3339 3340 spin_lock_irqsave(&phba->hbalock, iflag); 3341 pring->stats.iocb_event++; 3342 3343 /* 3344 * The next available response entry should never exceed the maximum 3345 * entries. If it does, treat it as an adapter hardware error. 3346 */ 3347 portRspMax = pring->sli.sli3.numRiocb; 3348 portRspPut = le32_to_cpu(pgp->rspPutInx); 3349 if (unlikely(portRspPut >= portRspMax)) { 3350 lpfc_sli_rsp_pointers_error(phba, pring); 3351 spin_unlock_irqrestore(&phba->hbalock, iflag); 3352 return 1; 3353 } 3354 if (phba->fcp_ring_in_use) { 3355 spin_unlock_irqrestore(&phba->hbalock, iflag); 3356 return 1; 3357 } else 3358 phba->fcp_ring_in_use = 1; 3359 3360 rmb(); 3361 while (pring->sli.sli3.rspidx != portRspPut) { 3362 /* 3363 * Fetch an entry off the ring and copy it into a local data 3364 * structure. The copy involves a byte-swap since the 3365 * network byte order and pci byte orders are different. 3366 */ 3367 entry = lpfc_resp_iocb(phba, pring); 3368 phba->last_completion_time = jiffies; 3369 3370 if (++pring->sli.sli3.rspidx >= portRspMax) 3371 pring->sli.sli3.rspidx = 0; 3372 3373 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 3374 (uint32_t *) &rspiocbq.iocb, 3375 phba->iocb_rsp_size); 3376 INIT_LIST_HEAD(&(rspiocbq.list)); 3377 irsp = &rspiocbq.iocb; 3378 3379 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 3380 pring->stats.iocb_rsp++; 3381 rsp_cmpl++; 3382 3383 if (unlikely(irsp->ulpStatus)) { 3384 /* 3385 * If resource errors reported from HBA, reduce 3386 * queuedepths of the SCSI device. 3387 */ 3388 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3389 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3390 IOERR_NO_RESOURCES)) { 3391 spin_unlock_irqrestore(&phba->hbalock, iflag); 3392 phba->lpfc_rampdown_queue_depth(phba); 3393 spin_lock_irqsave(&phba->hbalock, iflag); 3394 } 3395 3396 /* Rsp ring <ringno> error: IOCB */ 3397 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3398 "0336 Rsp Ring %d error: IOCB Data: " 3399 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 3400 pring->ringno, 3401 irsp->un.ulpWord[0], 3402 irsp->un.ulpWord[1], 3403 irsp->un.ulpWord[2], 3404 irsp->un.ulpWord[3], 3405 irsp->un.ulpWord[4], 3406 irsp->un.ulpWord[5], 3407 *(uint32_t *)&irsp->un1, 3408 *((uint32_t *)&irsp->un1 + 1)); 3409 } 3410 3411 switch (type) { 3412 case LPFC_ABORT_IOCB: 3413 case LPFC_SOL_IOCB: 3414 /* 3415 * Idle exchange closed via ABTS from port. No iocb 3416 * resources need to be recovered. 3417 */ 3418 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 3419 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3420 "0333 IOCB cmd 0x%x" 3421 " processed. Skipping" 3422 " completion\n", 3423 irsp->ulpCommand); 3424 break; 3425 } 3426 3427 spin_unlock_irqrestore(&phba->hbalock, iflag); 3428 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 3429 &rspiocbq); 3430 spin_lock_irqsave(&phba->hbalock, iflag); 3431 if (unlikely(!cmdiocbq)) 3432 break; 3433 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 3434 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3435 if (cmdiocbq->iocb_cmpl) { 3436 spin_unlock_irqrestore(&phba->hbalock, iflag); 3437 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 3438 &rspiocbq); 3439 spin_lock_irqsave(&phba->hbalock, iflag); 3440 } 3441 break; 3442 case LPFC_UNSOL_IOCB: 3443 spin_unlock_irqrestore(&phba->hbalock, iflag); 3444 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 3445 spin_lock_irqsave(&phba->hbalock, iflag); 3446 break; 3447 default: 3448 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3449 char adaptermsg[LPFC_MAX_ADPTMSG]; 3450 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3451 memcpy(&adaptermsg[0], (uint8_t *) irsp, 3452 MAX_MSG_DATA); 3453 dev_warn(&((phba->pcidev)->dev), 3454 "lpfc%d: %s\n", 3455 phba->brd_no, adaptermsg); 3456 } else { 3457 /* Unknown IOCB command */ 3458 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3459 "0334 Unknown IOCB command " 3460 "Data: x%x, x%x x%x x%x x%x\n", 3461 type, irsp->ulpCommand, 3462 irsp->ulpStatus, 3463 irsp->ulpIoTag, 3464 irsp->ulpContext); 3465 } 3466 break; 3467 } 3468 3469 /* 3470 * The response IOCB has been processed. Update the ring 3471 * pointer in SLIM. If the port response put pointer has not 3472 * been updated, sync the pgp->rspPutInx and fetch the new port 3473 * response put pointer. 3474 */ 3475 writel(pring->sli.sli3.rspidx, 3476 &phba->host_gp[pring->ringno].rspGetInx); 3477 3478 if (pring->sli.sli3.rspidx == portRspPut) 3479 portRspPut = le32_to_cpu(pgp->rspPutInx); 3480 } 3481 3482 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3483 pring->stats.iocb_rsp_full++; 3484 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3485 writel(status, phba->CAregaddr); 3486 readl(phba->CAregaddr); 3487 } 3488 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3489 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3490 pring->stats.iocb_cmd_empty++; 3491 3492 /* Force update of the local copy of cmdGetInx */ 3493 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3494 lpfc_sli_resume_iocb(phba, pring); 3495 3496 if ((pring->lpfc_sli_cmd_available)) 3497 (pring->lpfc_sli_cmd_available) (phba, pring); 3498 3499 } 3500 3501 phba->fcp_ring_in_use = 0; 3502 spin_unlock_irqrestore(&phba->hbalock, iflag); 3503 return rc; 3504 } 3505 3506 /** 3507 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3508 * @phba: Pointer to HBA context object. 3509 * @pring: Pointer to driver SLI ring object. 3510 * @rspiocbp: Pointer to driver response IOCB object. 3511 * 3512 * This function is called from the worker thread when there is a slow-path 3513 * response IOCB to process. This function chains all the response iocbs until 3514 * seeing the iocb with the LE bit set. The function will call 3515 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3516 * completion of a command iocb. The function will call the 3517 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3518 * The function frees the resources or calls the completion handler if this 3519 * iocb is an abort completion. The function returns NULL when the response 3520 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3521 * this function shall chain the iocb on to the iocb_continueq and return the 3522 * response iocb passed in. 3523 **/ 3524 static struct lpfc_iocbq * 3525 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3526 struct lpfc_iocbq *rspiocbp) 3527 { 3528 struct lpfc_iocbq *saveq; 3529 struct lpfc_iocbq *cmdiocbp; 3530 struct lpfc_iocbq *next_iocb; 3531 IOCB_t *irsp = NULL; 3532 uint32_t free_saveq; 3533 uint8_t iocb_cmd_type; 3534 lpfc_iocb_type type; 3535 unsigned long iflag; 3536 int rc; 3537 3538 spin_lock_irqsave(&phba->hbalock, iflag); 3539 /* First add the response iocb to the countinueq list */ 3540 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3541 pring->iocb_continueq_cnt++; 3542 3543 /* Now, determine whether the list is completed for processing */ 3544 irsp = &rspiocbp->iocb; 3545 if (irsp->ulpLe) { 3546 /* 3547 * By default, the driver expects to free all resources 3548 * associated with this iocb completion. 3549 */ 3550 free_saveq = 1; 3551 saveq = list_get_first(&pring->iocb_continueq, 3552 struct lpfc_iocbq, list); 3553 irsp = &(saveq->iocb); 3554 list_del_init(&pring->iocb_continueq); 3555 pring->iocb_continueq_cnt = 0; 3556 3557 pring->stats.iocb_rsp++; 3558 3559 /* 3560 * If resource errors reported from HBA, reduce 3561 * queuedepths of the SCSI device. 3562 */ 3563 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3564 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3565 IOERR_NO_RESOURCES)) { 3566 spin_unlock_irqrestore(&phba->hbalock, iflag); 3567 phba->lpfc_rampdown_queue_depth(phba); 3568 spin_lock_irqsave(&phba->hbalock, iflag); 3569 } 3570 3571 if (irsp->ulpStatus) { 3572 /* Rsp ring <ringno> error: IOCB */ 3573 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3574 "0328 Rsp Ring %d error: " 3575 "IOCB Data: " 3576 "x%x x%x x%x x%x " 3577 "x%x x%x x%x x%x " 3578 "x%x x%x x%x x%x " 3579 "x%x x%x x%x x%x\n", 3580 pring->ringno, 3581 irsp->un.ulpWord[0], 3582 irsp->un.ulpWord[1], 3583 irsp->un.ulpWord[2], 3584 irsp->un.ulpWord[3], 3585 irsp->un.ulpWord[4], 3586 irsp->un.ulpWord[5], 3587 *(((uint32_t *) irsp) + 6), 3588 *(((uint32_t *) irsp) + 7), 3589 *(((uint32_t *) irsp) + 8), 3590 *(((uint32_t *) irsp) + 9), 3591 *(((uint32_t *) irsp) + 10), 3592 *(((uint32_t *) irsp) + 11), 3593 *(((uint32_t *) irsp) + 12), 3594 *(((uint32_t *) irsp) + 13), 3595 *(((uint32_t *) irsp) + 14), 3596 *(((uint32_t *) irsp) + 15)); 3597 } 3598 3599 /* 3600 * Fetch the IOCB command type and call the correct completion 3601 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3602 * get freed back to the lpfc_iocb_list by the discovery 3603 * kernel thread. 3604 */ 3605 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3606 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3607 switch (type) { 3608 case LPFC_SOL_IOCB: 3609 spin_unlock_irqrestore(&phba->hbalock, iflag); 3610 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3611 spin_lock_irqsave(&phba->hbalock, iflag); 3612 break; 3613 3614 case LPFC_UNSOL_IOCB: 3615 spin_unlock_irqrestore(&phba->hbalock, iflag); 3616 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3617 spin_lock_irqsave(&phba->hbalock, iflag); 3618 if (!rc) 3619 free_saveq = 0; 3620 break; 3621 3622 case LPFC_ABORT_IOCB: 3623 cmdiocbp = NULL; 3624 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) { 3625 spin_unlock_irqrestore(&phba->hbalock, iflag); 3626 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3627 saveq); 3628 spin_lock_irqsave(&phba->hbalock, iflag); 3629 } 3630 if (cmdiocbp) { 3631 /* Call the specified completion routine */ 3632 if (cmdiocbp->iocb_cmpl) { 3633 spin_unlock_irqrestore(&phba->hbalock, 3634 iflag); 3635 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3636 saveq); 3637 spin_lock_irqsave(&phba->hbalock, 3638 iflag); 3639 } else 3640 __lpfc_sli_release_iocbq(phba, 3641 cmdiocbp); 3642 } 3643 break; 3644 3645 case LPFC_UNKNOWN_IOCB: 3646 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3647 char adaptermsg[LPFC_MAX_ADPTMSG]; 3648 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3649 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3650 MAX_MSG_DATA); 3651 dev_warn(&((phba->pcidev)->dev), 3652 "lpfc%d: %s\n", 3653 phba->brd_no, adaptermsg); 3654 } else { 3655 /* Unknown IOCB command */ 3656 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3657 "0335 Unknown IOCB " 3658 "command Data: x%x " 3659 "x%x x%x x%x\n", 3660 irsp->ulpCommand, 3661 irsp->ulpStatus, 3662 irsp->ulpIoTag, 3663 irsp->ulpContext); 3664 } 3665 break; 3666 } 3667 3668 if (free_saveq) { 3669 list_for_each_entry_safe(rspiocbp, next_iocb, 3670 &saveq->list, list) { 3671 list_del_init(&rspiocbp->list); 3672 __lpfc_sli_release_iocbq(phba, rspiocbp); 3673 } 3674 __lpfc_sli_release_iocbq(phba, saveq); 3675 } 3676 rspiocbp = NULL; 3677 } 3678 spin_unlock_irqrestore(&phba->hbalock, iflag); 3679 return rspiocbp; 3680 } 3681 3682 /** 3683 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3684 * @phba: Pointer to HBA context object. 3685 * @pring: Pointer to driver SLI ring object. 3686 * @mask: Host attention register mask for this ring. 3687 * 3688 * This routine wraps the actual slow_ring event process routine from the 3689 * API jump table function pointer from the lpfc_hba struct. 3690 **/ 3691 void 3692 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3693 struct lpfc_sli_ring *pring, uint32_t mask) 3694 { 3695 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3696 } 3697 3698 /** 3699 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3700 * @phba: Pointer to HBA context object. 3701 * @pring: Pointer to driver SLI ring object. 3702 * @mask: Host attention register mask for this ring. 3703 * 3704 * This function is called from the worker thread when there is a ring event 3705 * for non-fcp rings. The caller does not hold any lock. The function will 3706 * remove each response iocb in the response ring and calls the handle 3707 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3708 **/ 3709 static void 3710 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3711 struct lpfc_sli_ring *pring, uint32_t mask) 3712 { 3713 struct lpfc_pgp *pgp; 3714 IOCB_t *entry; 3715 IOCB_t *irsp = NULL; 3716 struct lpfc_iocbq *rspiocbp = NULL; 3717 uint32_t portRspPut, portRspMax; 3718 unsigned long iflag; 3719 uint32_t status; 3720 3721 pgp = &phba->port_gp[pring->ringno]; 3722 spin_lock_irqsave(&phba->hbalock, iflag); 3723 pring->stats.iocb_event++; 3724 3725 /* 3726 * The next available response entry should never exceed the maximum 3727 * entries. If it does, treat it as an adapter hardware error. 3728 */ 3729 portRspMax = pring->sli.sli3.numRiocb; 3730 portRspPut = le32_to_cpu(pgp->rspPutInx); 3731 if (portRspPut >= portRspMax) { 3732 /* 3733 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3734 * rsp ring <portRspMax> 3735 */ 3736 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3737 "0303 Ring %d handler: portRspPut %d " 3738 "is bigger than rsp ring %d\n", 3739 pring->ringno, portRspPut, portRspMax); 3740 3741 phba->link_state = LPFC_HBA_ERROR; 3742 spin_unlock_irqrestore(&phba->hbalock, iflag); 3743 3744 phba->work_hs = HS_FFER3; 3745 lpfc_handle_eratt(phba); 3746 3747 return; 3748 } 3749 3750 rmb(); 3751 while (pring->sli.sli3.rspidx != portRspPut) { 3752 /* 3753 * Build a completion list and call the appropriate handler. 3754 * The process is to get the next available response iocb, get 3755 * a free iocb from the list, copy the response data into the 3756 * free iocb, insert to the continuation list, and update the 3757 * next response index to slim. This process makes response 3758 * iocb's in the ring available to DMA as fast as possible but 3759 * pays a penalty for a copy operation. Since the iocb is 3760 * only 32 bytes, this penalty is considered small relative to 3761 * the PCI reads for register values and a slim write. When 3762 * the ulpLe field is set, the entire Command has been 3763 * received. 3764 */ 3765 entry = lpfc_resp_iocb(phba, pring); 3766 3767 phba->last_completion_time = jiffies; 3768 rspiocbp = __lpfc_sli_get_iocbq(phba); 3769 if (rspiocbp == NULL) { 3770 printk(KERN_ERR "%s: out of buffers! Failing " 3771 "completion.\n", __func__); 3772 break; 3773 } 3774 3775 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3776 phba->iocb_rsp_size); 3777 irsp = &rspiocbp->iocb; 3778 3779 if (++pring->sli.sli3.rspidx >= portRspMax) 3780 pring->sli.sli3.rspidx = 0; 3781 3782 if (pring->ringno == LPFC_ELS_RING) { 3783 lpfc_debugfs_slow_ring_trc(phba, 3784 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3785 *(((uint32_t *) irsp) + 4), 3786 *(((uint32_t *) irsp) + 6), 3787 *(((uint32_t *) irsp) + 7)); 3788 } 3789 3790 writel(pring->sli.sli3.rspidx, 3791 &phba->host_gp[pring->ringno].rspGetInx); 3792 3793 spin_unlock_irqrestore(&phba->hbalock, iflag); 3794 /* Handle the response IOCB */ 3795 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3796 spin_lock_irqsave(&phba->hbalock, iflag); 3797 3798 /* 3799 * If the port response put pointer has not been updated, sync 3800 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3801 * response put pointer. 3802 */ 3803 if (pring->sli.sli3.rspidx == portRspPut) { 3804 portRspPut = le32_to_cpu(pgp->rspPutInx); 3805 } 3806 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 3807 3808 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3809 /* At least one response entry has been freed */ 3810 pring->stats.iocb_rsp_full++; 3811 /* SET RxRE_RSP in Chip Att register */ 3812 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3813 writel(status, phba->CAregaddr); 3814 readl(phba->CAregaddr); /* flush */ 3815 } 3816 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3817 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3818 pring->stats.iocb_cmd_empty++; 3819 3820 /* Force update of the local copy of cmdGetInx */ 3821 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3822 lpfc_sli_resume_iocb(phba, pring); 3823 3824 if ((pring->lpfc_sli_cmd_available)) 3825 (pring->lpfc_sli_cmd_available) (phba, pring); 3826 3827 } 3828 3829 spin_unlock_irqrestore(&phba->hbalock, iflag); 3830 return; 3831 } 3832 3833 /** 3834 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3835 * @phba: Pointer to HBA context object. 3836 * @pring: Pointer to driver SLI ring object. 3837 * @mask: Host attention register mask for this ring. 3838 * 3839 * This function is called from the worker thread when there is a pending 3840 * ELS response iocb on the driver internal slow-path response iocb worker 3841 * queue. The caller does not hold any lock. The function will remove each 3842 * response iocb from the response worker queue and calls the handle 3843 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3844 **/ 3845 static void 3846 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3847 struct lpfc_sli_ring *pring, uint32_t mask) 3848 { 3849 struct lpfc_iocbq *irspiocbq; 3850 struct hbq_dmabuf *dmabuf; 3851 struct lpfc_cq_event *cq_event; 3852 unsigned long iflag; 3853 int count = 0; 3854 3855 spin_lock_irqsave(&phba->hbalock, iflag); 3856 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3857 spin_unlock_irqrestore(&phba->hbalock, iflag); 3858 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3859 /* Get the response iocb from the head of work queue */ 3860 spin_lock_irqsave(&phba->hbalock, iflag); 3861 list_remove_head(&phba->sli4_hba.sp_queue_event, 3862 cq_event, struct lpfc_cq_event, list); 3863 spin_unlock_irqrestore(&phba->hbalock, iflag); 3864 3865 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3866 case CQE_CODE_COMPL_WQE: 3867 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3868 cq_event); 3869 /* Translate ELS WCQE to response IOCBQ */ 3870 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3871 irspiocbq); 3872 if (irspiocbq) 3873 lpfc_sli_sp_handle_rspiocb(phba, pring, 3874 irspiocbq); 3875 count++; 3876 break; 3877 case CQE_CODE_RECEIVE: 3878 case CQE_CODE_RECEIVE_V1: 3879 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3880 cq_event); 3881 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3882 count++; 3883 break; 3884 default: 3885 break; 3886 } 3887 3888 /* Limit the number of events to 64 to avoid soft lockups */ 3889 if (count == 64) 3890 break; 3891 } 3892 } 3893 3894 /** 3895 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3896 * @phba: Pointer to HBA context object. 3897 * @pring: Pointer to driver SLI ring object. 3898 * 3899 * This function aborts all iocbs in the given ring and frees all the iocb 3900 * objects in txq. This function issues an abort iocb for all the iocb commands 3901 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3902 * the return of this function. The caller is not required to hold any locks. 3903 **/ 3904 void 3905 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3906 { 3907 LIST_HEAD(completions); 3908 struct lpfc_iocbq *iocb, *next_iocb; 3909 3910 if (pring->ringno == LPFC_ELS_RING) { 3911 lpfc_fabric_abort_hba(phba); 3912 } 3913 3914 /* Error everything on txq and txcmplq 3915 * First do the txq. 3916 */ 3917 if (phba->sli_rev >= LPFC_SLI_REV4) { 3918 spin_lock_irq(&pring->ring_lock); 3919 list_splice_init(&pring->txq, &completions); 3920 pring->txq_cnt = 0; 3921 spin_unlock_irq(&pring->ring_lock); 3922 3923 spin_lock_irq(&phba->hbalock); 3924 /* Next issue ABTS for everything on the txcmplq */ 3925 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3926 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3927 spin_unlock_irq(&phba->hbalock); 3928 } else { 3929 spin_lock_irq(&phba->hbalock); 3930 list_splice_init(&pring->txq, &completions); 3931 pring->txq_cnt = 0; 3932 3933 /* Next issue ABTS for everything on the txcmplq */ 3934 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3935 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3936 spin_unlock_irq(&phba->hbalock); 3937 } 3938 3939 /* Cancel all the IOCBs from the completions list */ 3940 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3941 IOERR_SLI_ABORTED); 3942 } 3943 3944 /** 3945 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 3946 * @phba: Pointer to HBA context object. 3947 * @pring: Pointer to driver SLI ring object. 3948 * 3949 * This function aborts all iocbs in FCP rings and frees all the iocb 3950 * objects in txq. This function issues an abort iocb for all the iocb commands 3951 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3952 * the return of this function. The caller is not required to hold any locks. 3953 **/ 3954 void 3955 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 3956 { 3957 struct lpfc_sli *psli = &phba->sli; 3958 struct lpfc_sli_ring *pring; 3959 uint32_t i; 3960 3961 /* Look on all the FCP Rings for the iotag */ 3962 if (phba->sli_rev >= LPFC_SLI_REV4) { 3963 for (i = 0; i < phba->cfg_hdw_queue; i++) { 3964 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 3965 lpfc_sli_abort_iocb_ring(phba, pring); 3966 } 3967 } else { 3968 pring = &psli->sli3_ring[LPFC_FCP_RING]; 3969 lpfc_sli_abort_iocb_ring(phba, pring); 3970 } 3971 } 3972 3973 /** 3974 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3975 * @phba: Pointer to HBA context object. 3976 * 3977 * This function flushes all iocbs in the fcp ring and frees all the iocb 3978 * objects in txq and txcmplq. This function will not issue abort iocbs 3979 * for all the iocb commands in txcmplq, they will just be returned with 3980 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3981 * slot has been permanently disabled. 3982 **/ 3983 void 3984 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3985 { 3986 LIST_HEAD(txq); 3987 LIST_HEAD(txcmplq); 3988 struct lpfc_sli *psli = &phba->sli; 3989 struct lpfc_sli_ring *pring; 3990 uint32_t i; 3991 struct lpfc_iocbq *piocb, *next_iocb; 3992 3993 spin_lock_irq(&phba->hbalock); 3994 /* Indicate the I/O queues are flushed */ 3995 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3996 spin_unlock_irq(&phba->hbalock); 3997 3998 /* Look on all the FCP Rings for the iotag */ 3999 if (phba->sli_rev >= LPFC_SLI_REV4) { 4000 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4001 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 4002 4003 spin_lock_irq(&pring->ring_lock); 4004 /* Retrieve everything on txq */ 4005 list_splice_init(&pring->txq, &txq); 4006 list_for_each_entry_safe(piocb, next_iocb, 4007 &pring->txcmplq, list) 4008 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4009 /* Retrieve everything on the txcmplq */ 4010 list_splice_init(&pring->txcmplq, &txcmplq); 4011 pring->txq_cnt = 0; 4012 pring->txcmplq_cnt = 0; 4013 spin_unlock_irq(&pring->ring_lock); 4014 4015 /* Flush the txq */ 4016 lpfc_sli_cancel_iocbs(phba, &txq, 4017 IOSTAT_LOCAL_REJECT, 4018 IOERR_SLI_DOWN); 4019 /* Flush the txcmpq */ 4020 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4021 IOSTAT_LOCAL_REJECT, 4022 IOERR_SLI_DOWN); 4023 } 4024 } else { 4025 pring = &psli->sli3_ring[LPFC_FCP_RING]; 4026 4027 spin_lock_irq(&phba->hbalock); 4028 /* Retrieve everything on txq */ 4029 list_splice_init(&pring->txq, &txq); 4030 list_for_each_entry_safe(piocb, next_iocb, 4031 &pring->txcmplq, list) 4032 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4033 /* Retrieve everything on the txcmplq */ 4034 list_splice_init(&pring->txcmplq, &txcmplq); 4035 pring->txq_cnt = 0; 4036 pring->txcmplq_cnt = 0; 4037 spin_unlock_irq(&phba->hbalock); 4038 4039 /* Flush the txq */ 4040 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 4041 IOERR_SLI_DOWN); 4042 /* Flush the txcmpq */ 4043 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 4044 IOERR_SLI_DOWN); 4045 } 4046 } 4047 4048 /** 4049 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings 4050 * @phba: Pointer to HBA context object. 4051 * 4052 * This function flushes all wqes in the nvme rings and frees all resources 4053 * in the txcmplq. This function does not issue abort wqes for the IO 4054 * commands in txcmplq, they will just be returned with 4055 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 4056 * slot has been permanently disabled. 4057 **/ 4058 void 4059 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) 4060 { 4061 LIST_HEAD(txcmplq); 4062 struct lpfc_sli_ring *pring; 4063 uint32_t i; 4064 struct lpfc_iocbq *piocb, *next_iocb; 4065 4066 if ((phba->sli_rev < LPFC_SLI_REV4) || 4067 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 4068 return; 4069 4070 /* Hint to other driver operations that a flush is in progress. */ 4071 spin_lock_irq(&phba->hbalock); 4072 phba->hba_flag |= HBA_NVME_IOQ_FLUSH; 4073 spin_unlock_irq(&phba->hbalock); 4074 4075 /* Cycle through all NVME rings and complete each IO with 4076 * a local driver reason code. This is a flush so no 4077 * abort exchange to FW. 4078 */ 4079 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4080 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; 4081 4082 spin_lock_irq(&pring->ring_lock); 4083 list_for_each_entry_safe(piocb, next_iocb, 4084 &pring->txcmplq, list) 4085 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4086 /* Retrieve everything on the txcmplq */ 4087 list_splice_init(&pring->txcmplq, &txcmplq); 4088 pring->txcmplq_cnt = 0; 4089 spin_unlock_irq(&pring->ring_lock); 4090 4091 /* Flush the txcmpq &&&PAE */ 4092 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4093 IOSTAT_LOCAL_REJECT, 4094 IOERR_SLI_DOWN); 4095 } 4096 } 4097 4098 /** 4099 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 4100 * @phba: Pointer to HBA context object. 4101 * @mask: Bit mask to be checked. 4102 * 4103 * This function reads the host status register and compares 4104 * with the provided bit mask to check if HBA completed 4105 * the restart. This function will wait in a loop for the 4106 * HBA to complete restart. If the HBA does not restart within 4107 * 15 iterations, the function will reset the HBA again. The 4108 * function returns 1 when HBA fail to restart otherwise returns 4109 * zero. 4110 **/ 4111 static int 4112 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 4113 { 4114 uint32_t status; 4115 int i = 0; 4116 int retval = 0; 4117 4118 /* Read the HBA Host Status Register */ 4119 if (lpfc_readl(phba->HSregaddr, &status)) 4120 return 1; 4121 4122 /* 4123 * Check status register every 100ms for 5 retries, then every 4124 * 500ms for 5, then every 2.5 sec for 5, then reset board and 4125 * every 2.5 sec for 4. 4126 * Break our of the loop if errors occurred during init. 4127 */ 4128 while (((status & mask) != mask) && 4129 !(status & HS_FFERM) && 4130 i++ < 20) { 4131 4132 if (i <= 5) 4133 msleep(10); 4134 else if (i <= 10) 4135 msleep(500); 4136 else 4137 msleep(2500); 4138 4139 if (i == 15) { 4140 /* Do post */ 4141 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4142 lpfc_sli_brdrestart(phba); 4143 } 4144 /* Read the HBA Host Status Register */ 4145 if (lpfc_readl(phba->HSregaddr, &status)) { 4146 retval = 1; 4147 break; 4148 } 4149 } 4150 4151 /* Check to see if any errors occurred during init */ 4152 if ((status & HS_FFERM) || (i >= 20)) { 4153 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4154 "2751 Adapter failed to restart, " 4155 "status reg x%x, FW Data: A8 x%x AC x%x\n", 4156 status, 4157 readl(phba->MBslimaddr + 0xa8), 4158 readl(phba->MBslimaddr + 0xac)); 4159 phba->link_state = LPFC_HBA_ERROR; 4160 retval = 1; 4161 } 4162 4163 return retval; 4164 } 4165 4166 /** 4167 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 4168 * @phba: Pointer to HBA context object. 4169 * @mask: Bit mask to be checked. 4170 * 4171 * This function checks the host status register to check if HBA is 4172 * ready. This function will wait in a loop for the HBA to be ready 4173 * If the HBA is not ready , the function will will reset the HBA PCI 4174 * function again. The function returns 1 when HBA fail to be ready 4175 * otherwise returns zero. 4176 **/ 4177 static int 4178 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 4179 { 4180 uint32_t status; 4181 int retval = 0; 4182 4183 /* Read the HBA Host Status Register */ 4184 status = lpfc_sli4_post_status_check(phba); 4185 4186 if (status) { 4187 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4188 lpfc_sli_brdrestart(phba); 4189 status = lpfc_sli4_post_status_check(phba); 4190 } 4191 4192 /* Check to see if any errors occurred during init */ 4193 if (status) { 4194 phba->link_state = LPFC_HBA_ERROR; 4195 retval = 1; 4196 } else 4197 phba->sli4_hba.intr_enable = 0; 4198 4199 return retval; 4200 } 4201 4202 /** 4203 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 4204 * @phba: Pointer to HBA context object. 4205 * @mask: Bit mask to be checked. 4206 * 4207 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 4208 * from the API jump table function pointer from the lpfc_hba struct. 4209 **/ 4210 int 4211 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 4212 { 4213 return phba->lpfc_sli_brdready(phba, mask); 4214 } 4215 4216 #define BARRIER_TEST_PATTERN (0xdeadbeef) 4217 4218 /** 4219 * lpfc_reset_barrier - Make HBA ready for HBA reset 4220 * @phba: Pointer to HBA context object. 4221 * 4222 * This function is called before resetting an HBA. This function is called 4223 * with hbalock held and requests HBA to quiesce DMAs before a reset. 4224 **/ 4225 void lpfc_reset_barrier(struct lpfc_hba *phba) 4226 { 4227 uint32_t __iomem *resp_buf; 4228 uint32_t __iomem *mbox_buf; 4229 volatile uint32_t mbox; 4230 uint32_t hc_copy, ha_copy, resp_data; 4231 int i; 4232 uint8_t hdrtype; 4233 4234 lockdep_assert_held(&phba->hbalock); 4235 4236 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 4237 if (hdrtype != 0x80 || 4238 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 4239 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 4240 return; 4241 4242 /* 4243 * Tell the other part of the chip to suspend temporarily all 4244 * its DMA activity. 4245 */ 4246 resp_buf = phba->MBslimaddr; 4247 4248 /* Disable the error attention */ 4249 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 4250 return; 4251 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 4252 readl(phba->HCregaddr); /* flush */ 4253 phba->link_flag |= LS_IGNORE_ERATT; 4254 4255 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4256 return; 4257 if (ha_copy & HA_ERATT) { 4258 /* Clear Chip error bit */ 4259 writel(HA_ERATT, phba->HAregaddr); 4260 phba->pport->stopped = 1; 4261 } 4262 4263 mbox = 0; 4264 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 4265 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 4266 4267 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 4268 mbox_buf = phba->MBslimaddr; 4269 writel(mbox, mbox_buf); 4270 4271 for (i = 0; i < 50; i++) { 4272 if (lpfc_readl((resp_buf + 1), &resp_data)) 4273 return; 4274 if (resp_data != ~(BARRIER_TEST_PATTERN)) 4275 mdelay(1); 4276 else 4277 break; 4278 } 4279 resp_data = 0; 4280 if (lpfc_readl((resp_buf + 1), &resp_data)) 4281 return; 4282 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 4283 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 4284 phba->pport->stopped) 4285 goto restore_hc; 4286 else 4287 goto clear_errat; 4288 } 4289 4290 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 4291 resp_data = 0; 4292 for (i = 0; i < 500; i++) { 4293 if (lpfc_readl(resp_buf, &resp_data)) 4294 return; 4295 if (resp_data != mbox) 4296 mdelay(1); 4297 else 4298 break; 4299 } 4300 4301 clear_errat: 4302 4303 while (++i < 500) { 4304 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4305 return; 4306 if (!(ha_copy & HA_ERATT)) 4307 mdelay(1); 4308 else 4309 break; 4310 } 4311 4312 if (readl(phba->HAregaddr) & HA_ERATT) { 4313 writel(HA_ERATT, phba->HAregaddr); 4314 phba->pport->stopped = 1; 4315 } 4316 4317 restore_hc: 4318 phba->link_flag &= ~LS_IGNORE_ERATT; 4319 writel(hc_copy, phba->HCregaddr); 4320 readl(phba->HCregaddr); /* flush */ 4321 } 4322 4323 /** 4324 * lpfc_sli_brdkill - Issue a kill_board mailbox command 4325 * @phba: Pointer to HBA context object. 4326 * 4327 * This function issues a kill_board mailbox command and waits for 4328 * the error attention interrupt. This function is called for stopping 4329 * the firmware processing. The caller is not required to hold any 4330 * locks. This function calls lpfc_hba_down_post function to free 4331 * any pending commands after the kill. The function will return 1 when it 4332 * fails to kill the board else will return 0. 4333 **/ 4334 int 4335 lpfc_sli_brdkill(struct lpfc_hba *phba) 4336 { 4337 struct lpfc_sli *psli; 4338 LPFC_MBOXQ_t *pmb; 4339 uint32_t status; 4340 uint32_t ha_copy; 4341 int retval; 4342 int i = 0; 4343 4344 psli = &phba->sli; 4345 4346 /* Kill HBA */ 4347 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4348 "0329 Kill HBA Data: x%x x%x\n", 4349 phba->pport->port_state, psli->sli_flag); 4350 4351 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4352 if (!pmb) 4353 return 1; 4354 4355 /* Disable the error attention */ 4356 spin_lock_irq(&phba->hbalock); 4357 if (lpfc_readl(phba->HCregaddr, &status)) { 4358 spin_unlock_irq(&phba->hbalock); 4359 mempool_free(pmb, phba->mbox_mem_pool); 4360 return 1; 4361 } 4362 status &= ~HC_ERINT_ENA; 4363 writel(status, phba->HCregaddr); 4364 readl(phba->HCregaddr); /* flush */ 4365 phba->link_flag |= LS_IGNORE_ERATT; 4366 spin_unlock_irq(&phba->hbalock); 4367 4368 lpfc_kill_board(phba, pmb); 4369 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4370 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4371 4372 if (retval != MBX_SUCCESS) { 4373 if (retval != MBX_BUSY) 4374 mempool_free(pmb, phba->mbox_mem_pool); 4375 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4376 "2752 KILL_BOARD command failed retval %d\n", 4377 retval); 4378 spin_lock_irq(&phba->hbalock); 4379 phba->link_flag &= ~LS_IGNORE_ERATT; 4380 spin_unlock_irq(&phba->hbalock); 4381 return 1; 4382 } 4383 4384 spin_lock_irq(&phba->hbalock); 4385 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 4386 spin_unlock_irq(&phba->hbalock); 4387 4388 mempool_free(pmb, phba->mbox_mem_pool); 4389 4390 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 4391 * attention every 100ms for 3 seconds. If we don't get ERATT after 4392 * 3 seconds we still set HBA_ERROR state because the status of the 4393 * board is now undefined. 4394 */ 4395 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4396 return 1; 4397 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 4398 mdelay(100); 4399 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4400 return 1; 4401 } 4402 4403 del_timer_sync(&psli->mbox_tmo); 4404 if (ha_copy & HA_ERATT) { 4405 writel(HA_ERATT, phba->HAregaddr); 4406 phba->pport->stopped = 1; 4407 } 4408 spin_lock_irq(&phba->hbalock); 4409 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4410 psli->mbox_active = NULL; 4411 phba->link_flag &= ~LS_IGNORE_ERATT; 4412 spin_unlock_irq(&phba->hbalock); 4413 4414 lpfc_hba_down_post(phba); 4415 phba->link_state = LPFC_HBA_ERROR; 4416 4417 return ha_copy & HA_ERATT ? 0 : 1; 4418 } 4419 4420 /** 4421 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 4422 * @phba: Pointer to HBA context object. 4423 * 4424 * This function resets the HBA by writing HC_INITFF to the control 4425 * register. After the HBA resets, this function resets all the iocb ring 4426 * indices. This function disables PCI layer parity checking during 4427 * the reset. 4428 * This function returns 0 always. 4429 * The caller is not required to hold any locks. 4430 **/ 4431 int 4432 lpfc_sli_brdreset(struct lpfc_hba *phba) 4433 { 4434 struct lpfc_sli *psli; 4435 struct lpfc_sli_ring *pring; 4436 uint16_t cfg_value; 4437 int i; 4438 4439 psli = &phba->sli; 4440 4441 /* Reset HBA */ 4442 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4443 "0325 Reset HBA Data: x%x x%x\n", 4444 (phba->pport) ? phba->pport->port_state : 0, 4445 psli->sli_flag); 4446 4447 /* perform board reset */ 4448 phba->fc_eventTag = 0; 4449 phba->link_events = 0; 4450 if (phba->pport) { 4451 phba->pport->fc_myDID = 0; 4452 phba->pport->fc_prevDID = 0; 4453 } 4454 4455 /* Turn off parity checking and serr during the physical reset */ 4456 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) 4457 return -EIO; 4458 4459 pci_write_config_word(phba->pcidev, PCI_COMMAND, 4460 (cfg_value & 4461 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4462 4463 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 4464 4465 /* Now toggle INITFF bit in the Host Control Register */ 4466 writel(HC_INITFF, phba->HCregaddr); 4467 mdelay(1); 4468 readl(phba->HCregaddr); /* flush */ 4469 writel(0, phba->HCregaddr); 4470 readl(phba->HCregaddr); /* flush */ 4471 4472 /* Restore PCI cmd register */ 4473 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4474 4475 /* Initialize relevant SLI info */ 4476 for (i = 0; i < psli->num_rings; i++) { 4477 pring = &psli->sli3_ring[i]; 4478 pring->flag = 0; 4479 pring->sli.sli3.rspidx = 0; 4480 pring->sli.sli3.next_cmdidx = 0; 4481 pring->sli.sli3.local_getidx = 0; 4482 pring->sli.sli3.cmdidx = 0; 4483 pring->missbufcnt = 0; 4484 } 4485 4486 phba->link_state = LPFC_WARM_START; 4487 return 0; 4488 } 4489 4490 /** 4491 * lpfc_sli4_brdreset - Reset a sli-4 HBA 4492 * @phba: Pointer to HBA context object. 4493 * 4494 * This function resets a SLI4 HBA. This function disables PCI layer parity 4495 * checking during resets the device. The caller is not required to hold 4496 * any locks. 4497 * 4498 * This function returns 0 always. 4499 **/ 4500 int 4501 lpfc_sli4_brdreset(struct lpfc_hba *phba) 4502 { 4503 struct lpfc_sli *psli = &phba->sli; 4504 uint16_t cfg_value; 4505 int rc = 0; 4506 4507 /* Reset HBA */ 4508 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4509 "0295 Reset HBA Data: x%x x%x x%x\n", 4510 phba->pport->port_state, psli->sli_flag, 4511 phba->hba_flag); 4512 4513 /* perform board reset */ 4514 phba->fc_eventTag = 0; 4515 phba->link_events = 0; 4516 phba->pport->fc_myDID = 0; 4517 phba->pport->fc_prevDID = 0; 4518 4519 spin_lock_irq(&phba->hbalock); 4520 psli->sli_flag &= ~(LPFC_PROCESS_LA); 4521 phba->fcf.fcf_flag = 0; 4522 spin_unlock_irq(&phba->hbalock); 4523 4524 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ 4525 if (phba->hba_flag & HBA_FW_DUMP_OP) { 4526 phba->hba_flag &= ~HBA_FW_DUMP_OP; 4527 return rc; 4528 } 4529 4530 /* Now physically reset the device */ 4531 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4532 "0389 Performing PCI function reset!\n"); 4533 4534 /* Turn off parity checking and serr during the physical reset */ 4535 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) { 4536 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4537 "3205 PCI read Config failed\n"); 4538 return -EIO; 4539 } 4540 4541 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 4542 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4543 4544 /* Perform FCoE PCI function reset before freeing queue memory */ 4545 rc = lpfc_pci_function_reset(phba); 4546 4547 /* Restore PCI cmd register */ 4548 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4549 4550 return rc; 4551 } 4552 4553 /** 4554 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 4555 * @phba: Pointer to HBA context object. 4556 * 4557 * This function is called in the SLI initialization code path to 4558 * restart the HBA. The caller is not required to hold any lock. 4559 * This function writes MBX_RESTART mailbox command to the SLIM and 4560 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 4561 * function to free any pending commands. The function enables 4562 * POST only during the first initialization. The function returns zero. 4563 * The function does not guarantee completion of MBX_RESTART mailbox 4564 * command before the return of this function. 4565 **/ 4566 static int 4567 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 4568 { 4569 MAILBOX_t *mb; 4570 struct lpfc_sli *psli; 4571 volatile uint32_t word0; 4572 void __iomem *to_slim; 4573 uint32_t hba_aer_enabled; 4574 4575 spin_lock_irq(&phba->hbalock); 4576 4577 /* Take PCIe device Advanced Error Reporting (AER) state */ 4578 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4579 4580 psli = &phba->sli; 4581 4582 /* Restart HBA */ 4583 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4584 "0337 Restart HBA Data: x%x x%x\n", 4585 (phba->pport) ? phba->pport->port_state : 0, 4586 psli->sli_flag); 4587 4588 word0 = 0; 4589 mb = (MAILBOX_t *) &word0; 4590 mb->mbxCommand = MBX_RESTART; 4591 mb->mbxHc = 1; 4592 4593 lpfc_reset_barrier(phba); 4594 4595 to_slim = phba->MBslimaddr; 4596 writel(*(uint32_t *) mb, to_slim); 4597 readl(to_slim); /* flush */ 4598 4599 /* Only skip post after fc_ffinit is completed */ 4600 if (phba->pport && phba->pport->port_state) 4601 word0 = 1; /* This is really setting up word1 */ 4602 else 4603 word0 = 0; /* This is really setting up word1 */ 4604 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4605 writel(*(uint32_t *) mb, to_slim); 4606 readl(to_slim); /* flush */ 4607 4608 lpfc_sli_brdreset(phba); 4609 if (phba->pport) 4610 phba->pport->stopped = 0; 4611 phba->link_state = LPFC_INIT_START; 4612 phba->hba_flag = 0; 4613 spin_unlock_irq(&phba->hbalock); 4614 4615 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4616 psli->stats_start = ktime_get_seconds(); 4617 4618 /* Give the INITFF and Post time to settle. */ 4619 mdelay(100); 4620 4621 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4622 if (hba_aer_enabled) 4623 pci_disable_pcie_error_reporting(phba->pcidev); 4624 4625 lpfc_hba_down_post(phba); 4626 4627 return 0; 4628 } 4629 4630 /** 4631 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4632 * @phba: Pointer to HBA context object. 4633 * 4634 * This function is called in the SLI initialization code path to restart 4635 * a SLI4 HBA. The caller is not required to hold any lock. 4636 * At the end of the function, it calls lpfc_hba_down_post function to 4637 * free any pending commands. 4638 **/ 4639 static int 4640 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4641 { 4642 struct lpfc_sli *psli = &phba->sli; 4643 uint32_t hba_aer_enabled; 4644 int rc; 4645 4646 /* Restart HBA */ 4647 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4648 "0296 Restart HBA Data: x%x x%x\n", 4649 phba->pport->port_state, psli->sli_flag); 4650 4651 /* Take PCIe device Advanced Error Reporting (AER) state */ 4652 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4653 4654 rc = lpfc_sli4_brdreset(phba); 4655 if (rc) 4656 return rc; 4657 4658 spin_lock_irq(&phba->hbalock); 4659 phba->pport->stopped = 0; 4660 phba->link_state = LPFC_INIT_START; 4661 phba->hba_flag = 0; 4662 spin_unlock_irq(&phba->hbalock); 4663 4664 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4665 psli->stats_start = ktime_get_seconds(); 4666 4667 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4668 if (hba_aer_enabled) 4669 pci_disable_pcie_error_reporting(phba->pcidev); 4670 4671 lpfc_hba_down_post(phba); 4672 lpfc_sli4_queue_destroy(phba); 4673 4674 return rc; 4675 } 4676 4677 /** 4678 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4679 * @phba: Pointer to HBA context object. 4680 * 4681 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4682 * API jump table function pointer from the lpfc_hba struct. 4683 **/ 4684 int 4685 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4686 { 4687 return phba->lpfc_sli_brdrestart(phba); 4688 } 4689 4690 /** 4691 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4692 * @phba: Pointer to HBA context object. 4693 * 4694 * This function is called after a HBA restart to wait for successful 4695 * restart of the HBA. Successful restart of the HBA is indicated by 4696 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4697 * iteration, the function will restart the HBA again. The function returns 4698 * zero if HBA successfully restarted else returns negative error code. 4699 **/ 4700 int 4701 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4702 { 4703 uint32_t status, i = 0; 4704 4705 /* Read the HBA Host Status Register */ 4706 if (lpfc_readl(phba->HSregaddr, &status)) 4707 return -EIO; 4708 4709 /* Check status register to see what current state is */ 4710 i = 0; 4711 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4712 4713 /* Check every 10ms for 10 retries, then every 100ms for 90 4714 * retries, then every 1 sec for 50 retires for a total of 4715 * ~60 seconds before reset the board again and check every 4716 * 1 sec for 50 retries. The up to 60 seconds before the 4717 * board ready is required by the Falcon FIPS zeroization 4718 * complete, and any reset the board in between shall cause 4719 * restart of zeroization, further delay the board ready. 4720 */ 4721 if (i++ >= 200) { 4722 /* Adapter failed to init, timeout, status reg 4723 <status> */ 4724 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4725 "0436 Adapter failed to init, " 4726 "timeout, status reg x%x, " 4727 "FW Data: A8 x%x AC x%x\n", status, 4728 readl(phba->MBslimaddr + 0xa8), 4729 readl(phba->MBslimaddr + 0xac)); 4730 phba->link_state = LPFC_HBA_ERROR; 4731 return -ETIMEDOUT; 4732 } 4733 4734 /* Check to see if any errors occurred during init */ 4735 if (status & HS_FFERM) { 4736 /* ERROR: During chipset initialization */ 4737 /* Adapter failed to init, chipset, status reg 4738 <status> */ 4739 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4740 "0437 Adapter failed to init, " 4741 "chipset, status reg x%x, " 4742 "FW Data: A8 x%x AC x%x\n", status, 4743 readl(phba->MBslimaddr + 0xa8), 4744 readl(phba->MBslimaddr + 0xac)); 4745 phba->link_state = LPFC_HBA_ERROR; 4746 return -EIO; 4747 } 4748 4749 if (i <= 10) 4750 msleep(10); 4751 else if (i <= 100) 4752 msleep(100); 4753 else 4754 msleep(1000); 4755 4756 if (i == 150) { 4757 /* Do post */ 4758 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4759 lpfc_sli_brdrestart(phba); 4760 } 4761 /* Read the HBA Host Status Register */ 4762 if (lpfc_readl(phba->HSregaddr, &status)) 4763 return -EIO; 4764 } 4765 4766 /* Check to see if any errors occurred during init */ 4767 if (status & HS_FFERM) { 4768 /* ERROR: During chipset initialization */ 4769 /* Adapter failed to init, chipset, status reg <status> */ 4770 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4771 "0438 Adapter failed to init, chipset, " 4772 "status reg x%x, " 4773 "FW Data: A8 x%x AC x%x\n", status, 4774 readl(phba->MBslimaddr + 0xa8), 4775 readl(phba->MBslimaddr + 0xac)); 4776 phba->link_state = LPFC_HBA_ERROR; 4777 return -EIO; 4778 } 4779 4780 /* Clear all interrupt enable conditions */ 4781 writel(0, phba->HCregaddr); 4782 readl(phba->HCregaddr); /* flush */ 4783 4784 /* setup host attn register */ 4785 writel(0xffffffff, phba->HAregaddr); 4786 readl(phba->HAregaddr); /* flush */ 4787 return 0; 4788 } 4789 4790 /** 4791 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4792 * 4793 * This function calculates and returns the number of HBQs required to be 4794 * configured. 4795 **/ 4796 int 4797 lpfc_sli_hbq_count(void) 4798 { 4799 return ARRAY_SIZE(lpfc_hbq_defs); 4800 } 4801 4802 /** 4803 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4804 * 4805 * This function adds the number of hbq entries in every HBQ to get 4806 * the total number of hbq entries required for the HBA and returns 4807 * the total count. 4808 **/ 4809 static int 4810 lpfc_sli_hbq_entry_count(void) 4811 { 4812 int hbq_count = lpfc_sli_hbq_count(); 4813 int count = 0; 4814 int i; 4815 4816 for (i = 0; i < hbq_count; ++i) 4817 count += lpfc_hbq_defs[i]->entry_count; 4818 return count; 4819 } 4820 4821 /** 4822 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4823 * 4824 * This function calculates amount of memory required for all hbq entries 4825 * to be configured and returns the total memory required. 4826 **/ 4827 int 4828 lpfc_sli_hbq_size(void) 4829 { 4830 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4831 } 4832 4833 /** 4834 * lpfc_sli_hbq_setup - configure and initialize HBQs 4835 * @phba: Pointer to HBA context object. 4836 * 4837 * This function is called during the SLI initialization to configure 4838 * all the HBQs and post buffers to the HBQ. The caller is not 4839 * required to hold any locks. This function will return zero if successful 4840 * else it will return negative error code. 4841 **/ 4842 static int 4843 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4844 { 4845 int hbq_count = lpfc_sli_hbq_count(); 4846 LPFC_MBOXQ_t *pmb; 4847 MAILBOX_t *pmbox; 4848 uint32_t hbqno; 4849 uint32_t hbq_entry_index; 4850 4851 /* Get a Mailbox buffer to setup mailbox 4852 * commands for HBA initialization 4853 */ 4854 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4855 4856 if (!pmb) 4857 return -ENOMEM; 4858 4859 pmbox = &pmb->u.mb; 4860 4861 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4862 phba->link_state = LPFC_INIT_MBX_CMDS; 4863 phba->hbq_in_use = 1; 4864 4865 hbq_entry_index = 0; 4866 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4867 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4868 phba->hbqs[hbqno].hbqPutIdx = 0; 4869 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4870 phba->hbqs[hbqno].entry_count = 4871 lpfc_hbq_defs[hbqno]->entry_count; 4872 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4873 hbq_entry_index, pmb); 4874 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4875 4876 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4877 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4878 mbxStatus <status>, ring <num> */ 4879 4880 lpfc_printf_log(phba, KERN_ERR, 4881 LOG_SLI | LOG_VPORT, 4882 "1805 Adapter failed to init. " 4883 "Data: x%x x%x x%x\n", 4884 pmbox->mbxCommand, 4885 pmbox->mbxStatus, hbqno); 4886 4887 phba->link_state = LPFC_HBA_ERROR; 4888 mempool_free(pmb, phba->mbox_mem_pool); 4889 return -ENXIO; 4890 } 4891 } 4892 phba->hbq_count = hbq_count; 4893 4894 mempool_free(pmb, phba->mbox_mem_pool); 4895 4896 /* Initially populate or replenish the HBQs */ 4897 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4898 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4899 return 0; 4900 } 4901 4902 /** 4903 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4904 * @phba: Pointer to HBA context object. 4905 * 4906 * This function is called during the SLI initialization to configure 4907 * all the HBQs and post buffers to the HBQ. The caller is not 4908 * required to hold any locks. This function will return zero if successful 4909 * else it will return negative error code. 4910 **/ 4911 static int 4912 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4913 { 4914 phba->hbq_in_use = 1; 4915 phba->hbqs[LPFC_ELS_HBQ].entry_count = 4916 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; 4917 phba->hbq_count = 1; 4918 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); 4919 /* Initially populate or replenish the HBQs */ 4920 return 0; 4921 } 4922 4923 /** 4924 * lpfc_sli_config_port - Issue config port mailbox command 4925 * @phba: Pointer to HBA context object. 4926 * @sli_mode: sli mode - 2/3 4927 * 4928 * This function is called by the sli initialization code path 4929 * to issue config_port mailbox command. This function restarts the 4930 * HBA firmware and issues a config_port mailbox command to configure 4931 * the SLI interface in the sli mode specified by sli_mode 4932 * variable. The caller is not required to hold any locks. 4933 * The function returns 0 if successful, else returns negative error 4934 * code. 4935 **/ 4936 int 4937 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4938 { 4939 LPFC_MBOXQ_t *pmb; 4940 uint32_t resetcount = 0, rc = 0, done = 0; 4941 4942 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4943 if (!pmb) { 4944 phba->link_state = LPFC_HBA_ERROR; 4945 return -ENOMEM; 4946 } 4947 4948 phba->sli_rev = sli_mode; 4949 while (resetcount < 2 && !done) { 4950 spin_lock_irq(&phba->hbalock); 4951 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4952 spin_unlock_irq(&phba->hbalock); 4953 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4954 lpfc_sli_brdrestart(phba); 4955 rc = lpfc_sli_chipset_init(phba); 4956 if (rc) 4957 break; 4958 4959 spin_lock_irq(&phba->hbalock); 4960 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4961 spin_unlock_irq(&phba->hbalock); 4962 resetcount++; 4963 4964 /* Call pre CONFIG_PORT mailbox command initialization. A 4965 * value of 0 means the call was successful. Any other 4966 * nonzero value is a failure, but if ERESTART is returned, 4967 * the driver may reset the HBA and try again. 4968 */ 4969 rc = lpfc_config_port_prep(phba); 4970 if (rc == -ERESTART) { 4971 phba->link_state = LPFC_LINK_UNKNOWN; 4972 continue; 4973 } else if (rc) 4974 break; 4975 4976 phba->link_state = LPFC_INIT_MBX_CMDS; 4977 lpfc_config_port(phba, pmb); 4978 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4979 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4980 LPFC_SLI3_HBQ_ENABLED | 4981 LPFC_SLI3_CRP_ENABLED | 4982 LPFC_SLI3_DSS_ENABLED); 4983 if (rc != MBX_SUCCESS) { 4984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4985 "0442 Adapter failed to init, mbxCmd x%x " 4986 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4987 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4988 spin_lock_irq(&phba->hbalock); 4989 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4990 spin_unlock_irq(&phba->hbalock); 4991 rc = -ENXIO; 4992 } else { 4993 /* Allow asynchronous mailbox command to go through */ 4994 spin_lock_irq(&phba->hbalock); 4995 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4996 spin_unlock_irq(&phba->hbalock); 4997 done = 1; 4998 4999 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 5000 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 5001 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5002 "3110 Port did not grant ASABT\n"); 5003 } 5004 } 5005 if (!done) { 5006 rc = -EINVAL; 5007 goto do_prep_failed; 5008 } 5009 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 5010 if (!pmb->u.mb.un.varCfgPort.cMA) { 5011 rc = -ENXIO; 5012 goto do_prep_failed; 5013 } 5014 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 5015 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 5016 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 5017 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 5018 phba->max_vpi : phba->max_vports; 5019 5020 } else 5021 phba->max_vpi = 0; 5022 phba->fips_level = 0; 5023 phba->fips_spec_rev = 0; 5024 if (pmb->u.mb.un.varCfgPort.gdss) { 5025 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 5026 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 5027 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 5028 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5029 "2850 Security Crypto Active. FIPS x%d " 5030 "(Spec Rev: x%d)", 5031 phba->fips_level, phba->fips_spec_rev); 5032 } 5033 if (pmb->u.mb.un.varCfgPort.sec_err) { 5034 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5035 "2856 Config Port Security Crypto " 5036 "Error: x%x ", 5037 pmb->u.mb.un.varCfgPort.sec_err); 5038 } 5039 if (pmb->u.mb.un.varCfgPort.gerbm) 5040 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 5041 if (pmb->u.mb.un.varCfgPort.gcrp) 5042 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 5043 5044 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 5045 phba->port_gp = phba->mbox->us.s3_pgp.port; 5046 5047 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 5048 if (pmb->u.mb.un.varCfgPort.gbg == 0) { 5049 phba->cfg_enable_bg = 0; 5050 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 5051 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5052 "0443 Adapter did not grant " 5053 "BlockGuard\n"); 5054 } 5055 } 5056 } else { 5057 phba->hbq_get = NULL; 5058 phba->port_gp = phba->mbox->us.s2.port; 5059 phba->max_vpi = 0; 5060 } 5061 do_prep_failed: 5062 mempool_free(pmb, phba->mbox_mem_pool); 5063 return rc; 5064 } 5065 5066 5067 /** 5068 * lpfc_sli_hba_setup - SLI initialization function 5069 * @phba: Pointer to HBA context object. 5070 * 5071 * This function is the main SLI initialization function. This function 5072 * is called by the HBA initialization code, HBA reset code and HBA 5073 * error attention handler code. Caller is not required to hold any 5074 * locks. This function issues config_port mailbox command to configure 5075 * the SLI, setup iocb rings and HBQ rings. In the end the function 5076 * calls the config_port_post function to issue init_link mailbox 5077 * command and to start the discovery. The function will return zero 5078 * if successful, else it will return negative error code. 5079 **/ 5080 int 5081 lpfc_sli_hba_setup(struct lpfc_hba *phba) 5082 { 5083 uint32_t rc; 5084 int mode = 3, i; 5085 int longs; 5086 5087 switch (phba->cfg_sli_mode) { 5088 case 2: 5089 if (phba->cfg_enable_npiv) { 5090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5091 "1824 NPIV enabled: Override sli_mode " 5092 "parameter (%d) to auto (0).\n", 5093 phba->cfg_sli_mode); 5094 break; 5095 } 5096 mode = 2; 5097 break; 5098 case 0: 5099 case 3: 5100 break; 5101 default: 5102 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5103 "1819 Unrecognized sli_mode parameter: %d.\n", 5104 phba->cfg_sli_mode); 5105 5106 break; 5107 } 5108 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 5109 5110 rc = lpfc_sli_config_port(phba, mode); 5111 5112 if (rc && phba->cfg_sli_mode == 3) 5113 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5114 "1820 Unable to select SLI-3. " 5115 "Not supported by adapter.\n"); 5116 if (rc && mode != 2) 5117 rc = lpfc_sli_config_port(phba, 2); 5118 else if (rc && mode == 2) 5119 rc = lpfc_sli_config_port(phba, 3); 5120 if (rc) 5121 goto lpfc_sli_hba_setup_error; 5122 5123 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 5124 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 5125 rc = pci_enable_pcie_error_reporting(phba->pcidev); 5126 if (!rc) { 5127 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5128 "2709 This device supports " 5129 "Advanced Error Reporting (AER)\n"); 5130 spin_lock_irq(&phba->hbalock); 5131 phba->hba_flag |= HBA_AER_ENABLED; 5132 spin_unlock_irq(&phba->hbalock); 5133 } else { 5134 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5135 "2708 This device does not support " 5136 "Advanced Error Reporting (AER): %d\n", 5137 rc); 5138 phba->cfg_aer_support = 0; 5139 } 5140 } 5141 5142 if (phba->sli_rev == 3) { 5143 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 5144 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 5145 } else { 5146 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 5147 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 5148 phba->sli3_options = 0; 5149 } 5150 5151 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5152 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 5153 phba->sli_rev, phba->max_vpi); 5154 rc = lpfc_sli_ring_map(phba); 5155 5156 if (rc) 5157 goto lpfc_sli_hba_setup_error; 5158 5159 /* Initialize VPIs. */ 5160 if (phba->sli_rev == LPFC_SLI_REV3) { 5161 /* 5162 * The VPI bitmask and physical ID array are allocated 5163 * and initialized once only - at driver load. A port 5164 * reset doesn't need to reinitialize this memory. 5165 */ 5166 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 5167 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 5168 phba->vpi_bmask = kcalloc(longs, 5169 sizeof(unsigned long), 5170 GFP_KERNEL); 5171 if (!phba->vpi_bmask) { 5172 rc = -ENOMEM; 5173 goto lpfc_sli_hba_setup_error; 5174 } 5175 5176 phba->vpi_ids = kcalloc(phba->max_vpi + 1, 5177 sizeof(uint16_t), 5178 GFP_KERNEL); 5179 if (!phba->vpi_ids) { 5180 kfree(phba->vpi_bmask); 5181 rc = -ENOMEM; 5182 goto lpfc_sli_hba_setup_error; 5183 } 5184 for (i = 0; i < phba->max_vpi; i++) 5185 phba->vpi_ids[i] = i; 5186 } 5187 } 5188 5189 /* Init HBQs */ 5190 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 5191 rc = lpfc_sli_hbq_setup(phba); 5192 if (rc) 5193 goto lpfc_sli_hba_setup_error; 5194 } 5195 spin_lock_irq(&phba->hbalock); 5196 phba->sli.sli_flag |= LPFC_PROCESS_LA; 5197 spin_unlock_irq(&phba->hbalock); 5198 5199 rc = lpfc_config_port_post(phba); 5200 if (rc) 5201 goto lpfc_sli_hba_setup_error; 5202 5203 return rc; 5204 5205 lpfc_sli_hba_setup_error: 5206 phba->link_state = LPFC_HBA_ERROR; 5207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5208 "0445 Firmware initialization failed\n"); 5209 return rc; 5210 } 5211 5212 /** 5213 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 5214 * @phba: Pointer to HBA context object. 5215 * @mboxq: mailbox pointer. 5216 * This function issue a dump mailbox command to read config region 5217 * 23 and parse the records in the region and populate driver 5218 * data structure. 5219 **/ 5220 static int 5221 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 5222 { 5223 LPFC_MBOXQ_t *mboxq; 5224 struct lpfc_dmabuf *mp; 5225 struct lpfc_mqe *mqe; 5226 uint32_t data_length; 5227 int rc; 5228 5229 /* Program the default value of vlan_id and fc_map */ 5230 phba->valid_vlan = 0; 5231 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5232 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5233 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5234 5235 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5236 if (!mboxq) 5237 return -ENOMEM; 5238 5239 mqe = &mboxq->u.mqe; 5240 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 5241 rc = -ENOMEM; 5242 goto out_free_mboxq; 5243 } 5244 5245 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 5246 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5247 5248 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5249 "(%d):2571 Mailbox cmd x%x Status x%x " 5250 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5251 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5252 "CQ: x%x x%x x%x x%x\n", 5253 mboxq->vport ? mboxq->vport->vpi : 0, 5254 bf_get(lpfc_mqe_command, mqe), 5255 bf_get(lpfc_mqe_status, mqe), 5256 mqe->un.mb_words[0], mqe->un.mb_words[1], 5257 mqe->un.mb_words[2], mqe->un.mb_words[3], 5258 mqe->un.mb_words[4], mqe->un.mb_words[5], 5259 mqe->un.mb_words[6], mqe->un.mb_words[7], 5260 mqe->un.mb_words[8], mqe->un.mb_words[9], 5261 mqe->un.mb_words[10], mqe->un.mb_words[11], 5262 mqe->un.mb_words[12], mqe->un.mb_words[13], 5263 mqe->un.mb_words[14], mqe->un.mb_words[15], 5264 mqe->un.mb_words[16], mqe->un.mb_words[50], 5265 mboxq->mcqe.word0, 5266 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5267 mboxq->mcqe.trailer); 5268 5269 if (rc) { 5270 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5271 kfree(mp); 5272 rc = -EIO; 5273 goto out_free_mboxq; 5274 } 5275 data_length = mqe->un.mb_words[5]; 5276 if (data_length > DMP_RGN23_SIZE) { 5277 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5278 kfree(mp); 5279 rc = -EIO; 5280 goto out_free_mboxq; 5281 } 5282 5283 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 5284 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5285 kfree(mp); 5286 rc = 0; 5287 5288 out_free_mboxq: 5289 mempool_free(mboxq, phba->mbox_mem_pool); 5290 return rc; 5291 } 5292 5293 /** 5294 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 5295 * @phba: pointer to lpfc hba data structure. 5296 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 5297 * @vpd: pointer to the memory to hold resulting port vpd data. 5298 * @vpd_size: On input, the number of bytes allocated to @vpd. 5299 * On output, the number of data bytes in @vpd. 5300 * 5301 * This routine executes a READ_REV SLI4 mailbox command. In 5302 * addition, this routine gets the port vpd data. 5303 * 5304 * Return codes 5305 * 0 - successful 5306 * -ENOMEM - could not allocated memory. 5307 **/ 5308 static int 5309 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5310 uint8_t *vpd, uint32_t *vpd_size) 5311 { 5312 int rc = 0; 5313 uint32_t dma_size; 5314 struct lpfc_dmabuf *dmabuf; 5315 struct lpfc_mqe *mqe; 5316 5317 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5318 if (!dmabuf) 5319 return -ENOMEM; 5320 5321 /* 5322 * Get a DMA buffer for the vpd data resulting from the READ_REV 5323 * mailbox command. 5324 */ 5325 dma_size = *vpd_size; 5326 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, 5327 &dmabuf->phys, GFP_KERNEL); 5328 if (!dmabuf->virt) { 5329 kfree(dmabuf); 5330 return -ENOMEM; 5331 } 5332 5333 /* 5334 * The SLI4 implementation of READ_REV conflicts at word1, 5335 * bits 31:16 and SLI4 adds vpd functionality not present 5336 * in SLI3. This code corrects the conflicts. 5337 */ 5338 lpfc_read_rev(phba, mboxq); 5339 mqe = &mboxq->u.mqe; 5340 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 5341 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 5342 mqe->un.read_rev.word1 &= 0x0000FFFF; 5343 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 5344 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 5345 5346 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5347 if (rc) { 5348 dma_free_coherent(&phba->pcidev->dev, dma_size, 5349 dmabuf->virt, dmabuf->phys); 5350 kfree(dmabuf); 5351 return -EIO; 5352 } 5353 5354 /* 5355 * The available vpd length cannot be bigger than the 5356 * DMA buffer passed to the port. Catch the less than 5357 * case and update the caller's size. 5358 */ 5359 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 5360 *vpd_size = mqe->un.read_rev.avail_vpd_len; 5361 5362 memcpy(vpd, dmabuf->virt, *vpd_size); 5363 5364 dma_free_coherent(&phba->pcidev->dev, dma_size, 5365 dmabuf->virt, dmabuf->phys); 5366 kfree(dmabuf); 5367 return 0; 5368 } 5369 5370 /** 5371 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes 5372 * @phba: pointer to lpfc hba data structure. 5373 * 5374 * This routine retrieves SLI4 device physical port name this PCI function 5375 * is attached to. 5376 * 5377 * Return codes 5378 * 0 - successful 5379 * otherwise - failed to retrieve controller attributes 5380 **/ 5381 static int 5382 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) 5383 { 5384 LPFC_MBOXQ_t *mboxq; 5385 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 5386 struct lpfc_controller_attribute *cntl_attr; 5387 void *virtaddr = NULL; 5388 uint32_t alloclen, reqlen; 5389 uint32_t shdr_status, shdr_add_status; 5390 union lpfc_sli4_cfg_shdr *shdr; 5391 int rc; 5392 5393 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5394 if (!mboxq) 5395 return -ENOMEM; 5396 5397 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */ 5398 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 5399 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5400 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 5401 LPFC_SLI4_MBX_NEMBED); 5402 5403 if (alloclen < reqlen) { 5404 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5405 "3084 Allocated DMA memory size (%d) is " 5406 "less than the requested DMA memory size " 5407 "(%d)\n", alloclen, reqlen); 5408 rc = -ENOMEM; 5409 goto out_free_mboxq; 5410 } 5411 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5412 virtaddr = mboxq->sge_array->addr[0]; 5413 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 5414 shdr = &mbx_cntl_attr->cfg_shdr; 5415 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5416 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5417 if (shdr_status || shdr_add_status || rc) { 5418 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5419 "3085 Mailbox x%x (x%x/x%x) failed, " 5420 "rc:x%x, status:x%x, add_status:x%x\n", 5421 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5422 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5423 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5424 rc, shdr_status, shdr_add_status); 5425 rc = -ENXIO; 5426 goto out_free_mboxq; 5427 } 5428 5429 cntl_attr = &mbx_cntl_attr->cntl_attr; 5430 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 5431 phba->sli4_hba.lnk_info.lnk_tp = 5432 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 5433 phba->sli4_hba.lnk_info.lnk_no = 5434 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 5435 5436 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); 5437 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, 5438 sizeof(phba->BIOSVersion)); 5439 5440 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5441 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n", 5442 phba->sli4_hba.lnk_info.lnk_tp, 5443 phba->sli4_hba.lnk_info.lnk_no, 5444 phba->BIOSVersion); 5445 out_free_mboxq: 5446 if (rc != MBX_TIMEOUT) { 5447 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5448 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5449 else 5450 mempool_free(mboxq, phba->mbox_mem_pool); 5451 } 5452 return rc; 5453 } 5454 5455 /** 5456 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 5457 * @phba: pointer to lpfc hba data structure. 5458 * 5459 * This routine retrieves SLI4 device physical port name this PCI function 5460 * is attached to. 5461 * 5462 * Return codes 5463 * 0 - successful 5464 * otherwise - failed to retrieve physical port name 5465 **/ 5466 static int 5467 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 5468 { 5469 LPFC_MBOXQ_t *mboxq; 5470 struct lpfc_mbx_get_port_name *get_port_name; 5471 uint32_t shdr_status, shdr_add_status; 5472 union lpfc_sli4_cfg_shdr *shdr; 5473 char cport_name = 0; 5474 int rc; 5475 5476 /* We assume nothing at this point */ 5477 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5478 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 5479 5480 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5481 if (!mboxq) 5482 return -ENOMEM; 5483 /* obtain link type and link number via READ_CONFIG */ 5484 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5485 lpfc_sli4_read_config(phba); 5486 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 5487 goto retrieve_ppname; 5488 5489 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 5490 rc = lpfc_sli4_get_ctl_attr(phba); 5491 if (rc) 5492 goto out_free_mboxq; 5493 5494 retrieve_ppname: 5495 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5496 LPFC_MBOX_OPCODE_GET_PORT_NAME, 5497 sizeof(struct lpfc_mbx_get_port_name) - 5498 sizeof(struct lpfc_sli4_cfg_mhdr), 5499 LPFC_SLI4_MBX_EMBED); 5500 get_port_name = &mboxq->u.mqe.un.get_port_name; 5501 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 5502 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 5503 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 5504 phba->sli4_hba.lnk_info.lnk_tp); 5505 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5506 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5507 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5508 if (shdr_status || shdr_add_status || rc) { 5509 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5510 "3087 Mailbox x%x (x%x/x%x) failed: " 5511 "rc:x%x, status:x%x, add_status:x%x\n", 5512 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5513 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5514 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5515 rc, shdr_status, shdr_add_status); 5516 rc = -ENXIO; 5517 goto out_free_mboxq; 5518 } 5519 switch (phba->sli4_hba.lnk_info.lnk_no) { 5520 case LPFC_LINK_NUMBER_0: 5521 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 5522 &get_port_name->u.response); 5523 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5524 break; 5525 case LPFC_LINK_NUMBER_1: 5526 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 5527 &get_port_name->u.response); 5528 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5529 break; 5530 case LPFC_LINK_NUMBER_2: 5531 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 5532 &get_port_name->u.response); 5533 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5534 break; 5535 case LPFC_LINK_NUMBER_3: 5536 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 5537 &get_port_name->u.response); 5538 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5539 break; 5540 default: 5541 break; 5542 } 5543 5544 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 5545 phba->Port[0] = cport_name; 5546 phba->Port[1] = '\0'; 5547 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5548 "3091 SLI get port name: %s\n", phba->Port); 5549 } 5550 5551 out_free_mboxq: 5552 if (rc != MBX_TIMEOUT) { 5553 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5554 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5555 else 5556 mempool_free(mboxq, phba->mbox_mem_pool); 5557 } 5558 return rc; 5559 } 5560 5561 /** 5562 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 5563 * @phba: pointer to lpfc hba data structure. 5564 * 5565 * This routine is called to explicitly arm the SLI4 device's completion and 5566 * event queues 5567 **/ 5568 static void 5569 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 5570 { 5571 int qidx; 5572 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 5573 struct lpfc_sli4_hdw_queue *qp; 5574 5575 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM); 5576 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); 5577 if (sli4_hba->nvmels_cq) 5578 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, 5579 LPFC_QUEUE_REARM); 5580 5581 qp = sli4_hba->hdwq; 5582 if (sli4_hba->hdwq) { 5583 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 5584 sli4_hba->sli4_write_cq_db(phba, qp[qidx].fcp_cq, 0, 5585 LPFC_QUEUE_REARM); 5586 sli4_hba->sli4_write_cq_db(phba, qp[qidx].nvme_cq, 0, 5587 LPFC_QUEUE_REARM); 5588 } 5589 5590 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) 5591 sli4_hba->sli4_write_eq_db(phba, qp[qidx].hba_eq, 5592 0, LPFC_QUEUE_REARM); 5593 } 5594 5595 if (phba->nvmet_support) { 5596 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { 5597 sli4_hba->sli4_write_cq_db(phba, 5598 sli4_hba->nvmet_cqset[qidx], 0, 5599 LPFC_QUEUE_REARM); 5600 } 5601 } 5602 } 5603 5604 /** 5605 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 5606 * @phba: Pointer to HBA context object. 5607 * @type: The resource extent type. 5608 * @extnt_count: buffer to hold port available extent count. 5609 * @extnt_size: buffer to hold element count per extent. 5610 * 5611 * This function calls the port and retrievs the number of available 5612 * extents and their size for a particular extent type. 5613 * 5614 * Returns: 0 if successful. Nonzero otherwise. 5615 **/ 5616 int 5617 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 5618 uint16_t *extnt_count, uint16_t *extnt_size) 5619 { 5620 int rc = 0; 5621 uint32_t length; 5622 uint32_t mbox_tmo; 5623 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 5624 LPFC_MBOXQ_t *mbox; 5625 5626 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5627 if (!mbox) 5628 return -ENOMEM; 5629 5630 /* Find out how many extents are available for this resource type */ 5631 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 5632 sizeof(struct lpfc_sli4_cfg_mhdr)); 5633 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5634 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 5635 length, LPFC_SLI4_MBX_EMBED); 5636 5637 /* Send an extents count of 0 - the GET doesn't use it. */ 5638 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5639 LPFC_SLI4_MBX_EMBED); 5640 if (unlikely(rc)) { 5641 rc = -EIO; 5642 goto err_exit; 5643 } 5644 5645 if (!phba->sli4_hba.intr_enable) 5646 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5647 else { 5648 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5649 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5650 } 5651 if (unlikely(rc)) { 5652 rc = -EIO; 5653 goto err_exit; 5654 } 5655 5656 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5657 if (bf_get(lpfc_mbox_hdr_status, 5658 &rsrc_info->header.cfg_shdr.response)) { 5659 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5660 "2930 Failed to get resource extents " 5661 "Status 0x%x Add'l Status 0x%x\n", 5662 bf_get(lpfc_mbox_hdr_status, 5663 &rsrc_info->header.cfg_shdr.response), 5664 bf_get(lpfc_mbox_hdr_add_status, 5665 &rsrc_info->header.cfg_shdr.response)); 5666 rc = -EIO; 5667 goto err_exit; 5668 } 5669 5670 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 5671 &rsrc_info->u.rsp); 5672 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 5673 &rsrc_info->u.rsp); 5674 5675 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5676 "3162 Retrieved extents type-%d from port: count:%d, " 5677 "size:%d\n", type, *extnt_count, *extnt_size); 5678 5679 err_exit: 5680 mempool_free(mbox, phba->mbox_mem_pool); 5681 return rc; 5682 } 5683 5684 /** 5685 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 5686 * @phba: Pointer to HBA context object. 5687 * @type: The extent type to check. 5688 * 5689 * This function reads the current available extents from the port and checks 5690 * if the extent count or extent size has changed since the last access. 5691 * Callers use this routine post port reset to understand if there is a 5692 * extent reprovisioning requirement. 5693 * 5694 * Returns: 5695 * -Error: error indicates problem. 5696 * 1: Extent count or size has changed. 5697 * 0: No changes. 5698 **/ 5699 static int 5700 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5701 { 5702 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5703 uint16_t size_diff, rsrc_ext_size; 5704 int rc = 0; 5705 struct lpfc_rsrc_blks *rsrc_entry; 5706 struct list_head *rsrc_blk_list = NULL; 5707 5708 size_diff = 0; 5709 curr_ext_cnt = 0; 5710 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5711 &rsrc_ext_cnt, 5712 &rsrc_ext_size); 5713 if (unlikely(rc)) 5714 return -EIO; 5715 5716 switch (type) { 5717 case LPFC_RSC_TYPE_FCOE_RPI: 5718 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5719 break; 5720 case LPFC_RSC_TYPE_FCOE_VPI: 5721 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5722 break; 5723 case LPFC_RSC_TYPE_FCOE_XRI: 5724 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5725 break; 5726 case LPFC_RSC_TYPE_FCOE_VFI: 5727 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5728 break; 5729 default: 5730 break; 5731 } 5732 5733 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5734 curr_ext_cnt++; 5735 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5736 size_diff++; 5737 } 5738 5739 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5740 rc = 1; 5741 5742 return rc; 5743 } 5744 5745 /** 5746 * lpfc_sli4_cfg_post_extnts - 5747 * @phba: Pointer to HBA context object. 5748 * @extnt_cnt - number of available extents. 5749 * @type - the extent type (rpi, xri, vfi, vpi). 5750 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5751 * @mbox - pointer to the caller's allocated mailbox structure. 5752 * 5753 * This function executes the extents allocation request. It also 5754 * takes care of the amount of memory needed to allocate or get the 5755 * allocated extents. It is the caller's responsibility to evaluate 5756 * the response. 5757 * 5758 * Returns: 5759 * -Error: Error value describes the condition found. 5760 * 0: if successful 5761 **/ 5762 static int 5763 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5764 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5765 { 5766 int rc = 0; 5767 uint32_t req_len; 5768 uint32_t emb_len; 5769 uint32_t alloc_len, mbox_tmo; 5770 5771 /* Calculate the total requested length of the dma memory */ 5772 req_len = extnt_cnt * sizeof(uint16_t); 5773 5774 /* 5775 * Calculate the size of an embedded mailbox. The uint32_t 5776 * accounts for extents-specific word. 5777 */ 5778 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5779 sizeof(uint32_t); 5780 5781 /* 5782 * Presume the allocation and response will fit into an embedded 5783 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5784 */ 5785 *emb = LPFC_SLI4_MBX_EMBED; 5786 if (req_len > emb_len) { 5787 req_len = extnt_cnt * sizeof(uint16_t) + 5788 sizeof(union lpfc_sli4_cfg_shdr) + 5789 sizeof(uint32_t); 5790 *emb = LPFC_SLI4_MBX_NEMBED; 5791 } 5792 5793 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5794 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5795 req_len, *emb); 5796 if (alloc_len < req_len) { 5797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5798 "2982 Allocated DMA memory size (x%x) is " 5799 "less than the requested DMA memory " 5800 "size (x%x)\n", alloc_len, req_len); 5801 return -ENOMEM; 5802 } 5803 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5804 if (unlikely(rc)) 5805 return -EIO; 5806 5807 if (!phba->sli4_hba.intr_enable) 5808 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5809 else { 5810 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5811 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5812 } 5813 5814 if (unlikely(rc)) 5815 rc = -EIO; 5816 return rc; 5817 } 5818 5819 /** 5820 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5821 * @phba: Pointer to HBA context object. 5822 * @type: The resource extent type to allocate. 5823 * 5824 * This function allocates the number of elements for the specified 5825 * resource type. 5826 **/ 5827 static int 5828 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5829 { 5830 bool emb = false; 5831 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5832 uint16_t rsrc_id, rsrc_start, j, k; 5833 uint16_t *ids; 5834 int i, rc; 5835 unsigned long longs; 5836 unsigned long *bmask; 5837 struct lpfc_rsrc_blks *rsrc_blks; 5838 LPFC_MBOXQ_t *mbox; 5839 uint32_t length; 5840 struct lpfc_id_range *id_array = NULL; 5841 void *virtaddr = NULL; 5842 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5843 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5844 struct list_head *ext_blk_list; 5845 5846 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5847 &rsrc_cnt, 5848 &rsrc_size); 5849 if (unlikely(rc)) 5850 return -EIO; 5851 5852 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5853 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5854 "3009 No available Resource Extents " 5855 "for resource type 0x%x: Count: 0x%x, " 5856 "Size 0x%x\n", type, rsrc_cnt, 5857 rsrc_size); 5858 return -ENOMEM; 5859 } 5860 5861 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5862 "2903 Post resource extents type-0x%x: " 5863 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5864 5865 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5866 if (!mbox) 5867 return -ENOMEM; 5868 5869 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5870 if (unlikely(rc)) { 5871 rc = -EIO; 5872 goto err_exit; 5873 } 5874 5875 /* 5876 * Figure out where the response is located. Then get local pointers 5877 * to the response data. The port does not guarantee to respond to 5878 * all extents counts request so update the local variable with the 5879 * allocated count from the port. 5880 */ 5881 if (emb == LPFC_SLI4_MBX_EMBED) { 5882 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5883 id_array = &rsrc_ext->u.rsp.id[0]; 5884 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5885 } else { 5886 virtaddr = mbox->sge_array->addr[0]; 5887 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5888 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5889 id_array = &n_rsrc->id; 5890 } 5891 5892 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5893 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5894 5895 /* 5896 * Based on the resource size and count, correct the base and max 5897 * resource values. 5898 */ 5899 length = sizeof(struct lpfc_rsrc_blks); 5900 switch (type) { 5901 case LPFC_RSC_TYPE_FCOE_RPI: 5902 phba->sli4_hba.rpi_bmask = kcalloc(longs, 5903 sizeof(unsigned long), 5904 GFP_KERNEL); 5905 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5906 rc = -ENOMEM; 5907 goto err_exit; 5908 } 5909 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, 5910 sizeof(uint16_t), 5911 GFP_KERNEL); 5912 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5913 kfree(phba->sli4_hba.rpi_bmask); 5914 rc = -ENOMEM; 5915 goto err_exit; 5916 } 5917 5918 /* 5919 * The next_rpi was initialized with the maximum available 5920 * count but the port may allocate a smaller number. Catch 5921 * that case and update the next_rpi. 5922 */ 5923 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5924 5925 /* Initialize local ptrs for common extent processing later. */ 5926 bmask = phba->sli4_hba.rpi_bmask; 5927 ids = phba->sli4_hba.rpi_ids; 5928 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5929 break; 5930 case LPFC_RSC_TYPE_FCOE_VPI: 5931 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 5932 GFP_KERNEL); 5933 if (unlikely(!phba->vpi_bmask)) { 5934 rc = -ENOMEM; 5935 goto err_exit; 5936 } 5937 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), 5938 GFP_KERNEL); 5939 if (unlikely(!phba->vpi_ids)) { 5940 kfree(phba->vpi_bmask); 5941 rc = -ENOMEM; 5942 goto err_exit; 5943 } 5944 5945 /* Initialize local ptrs for common extent processing later. */ 5946 bmask = phba->vpi_bmask; 5947 ids = phba->vpi_ids; 5948 ext_blk_list = &phba->lpfc_vpi_blk_list; 5949 break; 5950 case LPFC_RSC_TYPE_FCOE_XRI: 5951 phba->sli4_hba.xri_bmask = kcalloc(longs, 5952 sizeof(unsigned long), 5953 GFP_KERNEL); 5954 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5955 rc = -ENOMEM; 5956 goto err_exit; 5957 } 5958 phba->sli4_hba.max_cfg_param.xri_used = 0; 5959 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, 5960 sizeof(uint16_t), 5961 GFP_KERNEL); 5962 if (unlikely(!phba->sli4_hba.xri_ids)) { 5963 kfree(phba->sli4_hba.xri_bmask); 5964 rc = -ENOMEM; 5965 goto err_exit; 5966 } 5967 5968 /* Initialize local ptrs for common extent processing later. */ 5969 bmask = phba->sli4_hba.xri_bmask; 5970 ids = phba->sli4_hba.xri_ids; 5971 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5972 break; 5973 case LPFC_RSC_TYPE_FCOE_VFI: 5974 phba->sli4_hba.vfi_bmask = kcalloc(longs, 5975 sizeof(unsigned long), 5976 GFP_KERNEL); 5977 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5978 rc = -ENOMEM; 5979 goto err_exit; 5980 } 5981 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, 5982 sizeof(uint16_t), 5983 GFP_KERNEL); 5984 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5985 kfree(phba->sli4_hba.vfi_bmask); 5986 rc = -ENOMEM; 5987 goto err_exit; 5988 } 5989 5990 /* Initialize local ptrs for common extent processing later. */ 5991 bmask = phba->sli4_hba.vfi_bmask; 5992 ids = phba->sli4_hba.vfi_ids; 5993 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5994 break; 5995 default: 5996 /* Unsupported Opcode. Fail call. */ 5997 id_array = NULL; 5998 bmask = NULL; 5999 ids = NULL; 6000 ext_blk_list = NULL; 6001 goto err_exit; 6002 } 6003 6004 /* 6005 * Complete initializing the extent configuration with the 6006 * allocated ids assigned to this function. The bitmask serves 6007 * as an index into the array and manages the available ids. The 6008 * array just stores the ids communicated to the port via the wqes. 6009 */ 6010 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 6011 if ((i % 2) == 0) 6012 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 6013 &id_array[k]); 6014 else 6015 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 6016 &id_array[k]); 6017 6018 rsrc_blks = kzalloc(length, GFP_KERNEL); 6019 if (unlikely(!rsrc_blks)) { 6020 rc = -ENOMEM; 6021 kfree(bmask); 6022 kfree(ids); 6023 goto err_exit; 6024 } 6025 rsrc_blks->rsrc_start = rsrc_id; 6026 rsrc_blks->rsrc_size = rsrc_size; 6027 list_add_tail(&rsrc_blks->list, ext_blk_list); 6028 rsrc_start = rsrc_id; 6029 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { 6030 phba->sli4_hba.io_xri_start = rsrc_start + 6031 lpfc_sli4_get_iocb_cnt(phba); 6032 } 6033 6034 while (rsrc_id < (rsrc_start + rsrc_size)) { 6035 ids[j] = rsrc_id; 6036 rsrc_id++; 6037 j++; 6038 } 6039 /* Entire word processed. Get next word.*/ 6040 if ((i % 2) == 1) 6041 k++; 6042 } 6043 err_exit: 6044 lpfc_sli4_mbox_cmd_free(phba, mbox); 6045 return rc; 6046 } 6047 6048 6049 6050 /** 6051 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 6052 * @phba: Pointer to HBA context object. 6053 * @type: the extent's type. 6054 * 6055 * This function deallocates all extents of a particular resource type. 6056 * SLI4 does not allow for deallocating a particular extent range. It 6057 * is the caller's responsibility to release all kernel memory resources. 6058 **/ 6059 static int 6060 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 6061 { 6062 int rc; 6063 uint32_t length, mbox_tmo = 0; 6064 LPFC_MBOXQ_t *mbox; 6065 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 6066 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 6067 6068 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6069 if (!mbox) 6070 return -ENOMEM; 6071 6072 /* 6073 * This function sends an embedded mailbox because it only sends the 6074 * the resource type. All extents of this type are released by the 6075 * port. 6076 */ 6077 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 6078 sizeof(struct lpfc_sli4_cfg_mhdr)); 6079 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6080 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 6081 length, LPFC_SLI4_MBX_EMBED); 6082 6083 /* Send an extents count of 0 - the dealloc doesn't use it. */ 6084 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 6085 LPFC_SLI4_MBX_EMBED); 6086 if (unlikely(rc)) { 6087 rc = -EIO; 6088 goto out_free_mbox; 6089 } 6090 if (!phba->sli4_hba.intr_enable) 6091 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6092 else { 6093 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6094 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6095 } 6096 if (unlikely(rc)) { 6097 rc = -EIO; 6098 goto out_free_mbox; 6099 } 6100 6101 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 6102 if (bf_get(lpfc_mbox_hdr_status, 6103 &dealloc_rsrc->header.cfg_shdr.response)) { 6104 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6105 "2919 Failed to release resource extents " 6106 "for type %d - Status 0x%x Add'l Status 0x%x. " 6107 "Resource memory not released.\n", 6108 type, 6109 bf_get(lpfc_mbox_hdr_status, 6110 &dealloc_rsrc->header.cfg_shdr.response), 6111 bf_get(lpfc_mbox_hdr_add_status, 6112 &dealloc_rsrc->header.cfg_shdr.response)); 6113 rc = -EIO; 6114 goto out_free_mbox; 6115 } 6116 6117 /* Release kernel memory resources for the specific type. */ 6118 switch (type) { 6119 case LPFC_RSC_TYPE_FCOE_VPI: 6120 kfree(phba->vpi_bmask); 6121 kfree(phba->vpi_ids); 6122 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6123 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6124 &phba->lpfc_vpi_blk_list, list) { 6125 list_del_init(&rsrc_blk->list); 6126 kfree(rsrc_blk); 6127 } 6128 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6129 break; 6130 case LPFC_RSC_TYPE_FCOE_XRI: 6131 kfree(phba->sli4_hba.xri_bmask); 6132 kfree(phba->sli4_hba.xri_ids); 6133 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6134 &phba->sli4_hba.lpfc_xri_blk_list, list) { 6135 list_del_init(&rsrc_blk->list); 6136 kfree(rsrc_blk); 6137 } 6138 break; 6139 case LPFC_RSC_TYPE_FCOE_VFI: 6140 kfree(phba->sli4_hba.vfi_bmask); 6141 kfree(phba->sli4_hba.vfi_ids); 6142 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6143 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6144 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 6145 list_del_init(&rsrc_blk->list); 6146 kfree(rsrc_blk); 6147 } 6148 break; 6149 case LPFC_RSC_TYPE_FCOE_RPI: 6150 /* RPI bitmask and physical id array are cleaned up earlier. */ 6151 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6152 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 6153 list_del_init(&rsrc_blk->list); 6154 kfree(rsrc_blk); 6155 } 6156 break; 6157 default: 6158 break; 6159 } 6160 6161 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6162 6163 out_free_mbox: 6164 mempool_free(mbox, phba->mbox_mem_pool); 6165 return rc; 6166 } 6167 6168 static void 6169 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, 6170 uint32_t feature) 6171 { 6172 uint32_t len; 6173 6174 len = sizeof(struct lpfc_mbx_set_feature) - 6175 sizeof(struct lpfc_sli4_cfg_mhdr); 6176 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6177 LPFC_MBOX_OPCODE_SET_FEATURES, len, 6178 LPFC_SLI4_MBX_EMBED); 6179 6180 switch (feature) { 6181 case LPFC_SET_UE_RECOVERY: 6182 bf_set(lpfc_mbx_set_feature_UER, 6183 &mbox->u.mqe.un.set_feature, 1); 6184 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; 6185 mbox->u.mqe.un.set_feature.param_len = 8; 6186 break; 6187 case LPFC_SET_MDS_DIAGS: 6188 bf_set(lpfc_mbx_set_feature_mds, 6189 &mbox->u.mqe.un.set_feature, 1); 6190 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 6191 &mbox->u.mqe.un.set_feature, 1); 6192 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 6193 mbox->u.mqe.un.set_feature.param_len = 8; 6194 break; 6195 } 6196 6197 return; 6198 } 6199 6200 /** 6201 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter 6202 * @phba: Pointer to HBA context object. 6203 * 6204 * Disable FW logging into host memory on the adapter. To 6205 * be done before reading logs from the host memory. 6206 **/ 6207 void 6208 lpfc_ras_stop_fwlog(struct lpfc_hba *phba) 6209 { 6210 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6211 6212 ras_fwlog->ras_active = false; 6213 6214 /* Disable FW logging to host memory */ 6215 writel(LPFC_CTL_PDEV_CTL_DDL_RAS, 6216 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 6217 } 6218 6219 /** 6220 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. 6221 * @phba: Pointer to HBA context object. 6222 * 6223 * This function is called to free memory allocated for RAS FW logging 6224 * support in the driver. 6225 **/ 6226 void 6227 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) 6228 { 6229 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6230 struct lpfc_dmabuf *dmabuf, *next; 6231 6232 if (!list_empty(&ras_fwlog->fwlog_buff_list)) { 6233 list_for_each_entry_safe(dmabuf, next, 6234 &ras_fwlog->fwlog_buff_list, 6235 list) { 6236 list_del(&dmabuf->list); 6237 dma_free_coherent(&phba->pcidev->dev, 6238 LPFC_RAS_MAX_ENTRY_SIZE, 6239 dmabuf->virt, dmabuf->phys); 6240 kfree(dmabuf); 6241 } 6242 } 6243 6244 if (ras_fwlog->lwpd.virt) { 6245 dma_free_coherent(&phba->pcidev->dev, 6246 sizeof(uint32_t) * 2, 6247 ras_fwlog->lwpd.virt, 6248 ras_fwlog->lwpd.phys); 6249 ras_fwlog->lwpd.virt = NULL; 6250 } 6251 6252 ras_fwlog->ras_active = false; 6253 } 6254 6255 /** 6256 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support 6257 * @phba: Pointer to HBA context object. 6258 * @fwlog_buff_count: Count of buffers to be created. 6259 * 6260 * This routine DMA memory for Log Write Position Data[LPWD] and buffer 6261 * to update FW log is posted to the adapter. 6262 * Buffer count is calculated based on module param ras_fwlog_buffsize 6263 * Size of each buffer posted to FW is 64K. 6264 **/ 6265 6266 static int 6267 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, 6268 uint32_t fwlog_buff_count) 6269 { 6270 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6271 struct lpfc_dmabuf *dmabuf; 6272 int rc = 0, i = 0; 6273 6274 /* Initialize List */ 6275 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list); 6276 6277 /* Allocate memory for the LWPD */ 6278 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev, 6279 sizeof(uint32_t) * 2, 6280 &ras_fwlog->lwpd.phys, 6281 GFP_KERNEL); 6282 if (!ras_fwlog->lwpd.virt) { 6283 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6284 "6185 LWPD Memory Alloc Failed\n"); 6285 6286 return -ENOMEM; 6287 } 6288 6289 ras_fwlog->fw_buffcount = fwlog_buff_count; 6290 for (i = 0; i < ras_fwlog->fw_buffcount; i++) { 6291 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 6292 GFP_KERNEL); 6293 if (!dmabuf) { 6294 rc = -ENOMEM; 6295 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6296 "6186 Memory Alloc failed FW logging"); 6297 goto free_mem; 6298 } 6299 6300 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6301 LPFC_RAS_MAX_ENTRY_SIZE, 6302 &dmabuf->phys, GFP_KERNEL); 6303 if (!dmabuf->virt) { 6304 kfree(dmabuf); 6305 rc = -ENOMEM; 6306 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6307 "6187 DMA Alloc Failed FW logging"); 6308 goto free_mem; 6309 } 6310 dmabuf->buffer_tag = i; 6311 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list); 6312 } 6313 6314 free_mem: 6315 if (rc) 6316 lpfc_sli4_ras_dma_free(phba); 6317 6318 return rc; 6319 } 6320 6321 /** 6322 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command 6323 * @phba: pointer to lpfc hba data structure. 6324 * @pmboxq: pointer to the driver internal queue element for mailbox command. 6325 * 6326 * Completion handler for driver's RAS MBX command to the device. 6327 **/ 6328 static void 6329 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6330 { 6331 MAILBOX_t *mb; 6332 union lpfc_sli4_cfg_shdr *shdr; 6333 uint32_t shdr_status, shdr_add_status; 6334 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6335 6336 mb = &pmb->u.mb; 6337 6338 shdr = (union lpfc_sli4_cfg_shdr *) 6339 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr; 6340 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6341 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6342 6343 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { 6344 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 6345 "6188 FW LOG mailbox " 6346 "completed with status x%x add_status x%x," 6347 " mbx status x%x\n", 6348 shdr_status, shdr_add_status, mb->mbxStatus); 6349 6350 ras_fwlog->ras_hwsupport = false; 6351 goto disable_ras; 6352 } 6353 6354 ras_fwlog->ras_active = true; 6355 mempool_free(pmb, phba->mbox_mem_pool); 6356 6357 return; 6358 6359 disable_ras: 6360 /* Free RAS DMA memory */ 6361 lpfc_sli4_ras_dma_free(phba); 6362 mempool_free(pmb, phba->mbox_mem_pool); 6363 } 6364 6365 /** 6366 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command 6367 * @phba: pointer to lpfc hba data structure. 6368 * @fwlog_level: Logging verbosity level. 6369 * @fwlog_enable: Enable/Disable logging. 6370 * 6371 * Initialize memory and post mailbox command to enable FW logging in host 6372 * memory. 6373 **/ 6374 int 6375 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, 6376 uint32_t fwlog_level, 6377 uint32_t fwlog_enable) 6378 { 6379 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6380 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL; 6381 struct lpfc_dmabuf *dmabuf; 6382 LPFC_MBOXQ_t *mbox; 6383 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; 6384 int rc = 0; 6385 6386 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * 6387 phba->cfg_ras_fwlog_buffsize); 6388 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); 6389 6390 /* 6391 * If re-enabling FW logging support use earlier allocated 6392 * DMA buffers while posting MBX command. 6393 **/ 6394 if (!ras_fwlog->lwpd.virt) { 6395 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count); 6396 if (rc) { 6397 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6398 "6189 FW Log Memory Allocation Failed"); 6399 return rc; 6400 } 6401 } 6402 6403 /* Setup Mailbox command */ 6404 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6405 if (!mbox) { 6406 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6407 "6190 RAS MBX Alloc Failed"); 6408 rc = -ENOMEM; 6409 goto mem_free; 6410 } 6411 6412 ras_fwlog->fw_loglevel = fwlog_level; 6413 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) - 6414 sizeof(struct lpfc_sli4_cfg_mhdr)); 6415 6416 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL, 6417 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION, 6418 len, LPFC_SLI4_MBX_EMBED); 6419 6420 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog; 6421 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request, 6422 fwlog_enable); 6423 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request, 6424 ras_fwlog->fw_loglevel); 6425 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request, 6426 ras_fwlog->fw_buffcount); 6427 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request, 6428 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE); 6429 6430 /* Update DMA buffer address */ 6431 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) { 6432 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); 6433 6434 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo = 6435 putPaddrLow(dmabuf->phys); 6436 6437 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi = 6438 putPaddrHigh(dmabuf->phys); 6439 } 6440 6441 /* Update LPWD address */ 6442 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); 6443 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); 6444 6445 mbox->vport = phba->pport; 6446 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; 6447 6448 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6449 6450 if (rc == MBX_NOT_FINISHED) { 6451 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6452 "6191 FW-Log Mailbox failed. " 6453 "status %d mbxStatus : x%x", rc, 6454 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 6455 mempool_free(mbox, phba->mbox_mem_pool); 6456 rc = -EIO; 6457 goto mem_free; 6458 } else 6459 rc = 0; 6460 mem_free: 6461 if (rc) 6462 lpfc_sli4_ras_dma_free(phba); 6463 6464 return rc; 6465 } 6466 6467 /** 6468 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter 6469 * @phba: Pointer to HBA context object. 6470 * 6471 * Check if RAS is supported on the adapter and initialize it. 6472 **/ 6473 void 6474 lpfc_sli4_ras_setup(struct lpfc_hba *phba) 6475 { 6476 /* Check RAS FW Log needs to be enabled or not */ 6477 if (lpfc_check_fwlog_support(phba)) 6478 return; 6479 6480 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, 6481 LPFC_RAS_ENABLE_LOGGING); 6482 } 6483 6484 /** 6485 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 6486 * @phba: Pointer to HBA context object. 6487 * 6488 * This function allocates all SLI4 resource identifiers. 6489 **/ 6490 int 6491 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 6492 { 6493 int i, rc, error = 0; 6494 uint16_t count, base; 6495 unsigned long longs; 6496 6497 if (!phba->sli4_hba.rpi_hdrs_in_use) 6498 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 6499 if (phba->sli4_hba.extents_in_use) { 6500 /* 6501 * The port supports resource extents. The XRI, VPI, VFI, RPI 6502 * resource extent count must be read and allocated before 6503 * provisioning the resource id arrays. 6504 */ 6505 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6506 LPFC_IDX_RSRC_RDY) { 6507 /* 6508 * Extent-based resources are set - the driver could 6509 * be in a port reset. Figure out if any corrective 6510 * actions need to be taken. 6511 */ 6512 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6513 LPFC_RSC_TYPE_FCOE_VFI); 6514 if (rc != 0) 6515 error++; 6516 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6517 LPFC_RSC_TYPE_FCOE_VPI); 6518 if (rc != 0) 6519 error++; 6520 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6521 LPFC_RSC_TYPE_FCOE_XRI); 6522 if (rc != 0) 6523 error++; 6524 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6525 LPFC_RSC_TYPE_FCOE_RPI); 6526 if (rc != 0) 6527 error++; 6528 6529 /* 6530 * It's possible that the number of resources 6531 * provided to this port instance changed between 6532 * resets. Detect this condition and reallocate 6533 * resources. Otherwise, there is no action. 6534 */ 6535 if (error) { 6536 lpfc_printf_log(phba, KERN_INFO, 6537 LOG_MBOX | LOG_INIT, 6538 "2931 Detected extent resource " 6539 "change. Reallocating all " 6540 "extents.\n"); 6541 rc = lpfc_sli4_dealloc_extent(phba, 6542 LPFC_RSC_TYPE_FCOE_VFI); 6543 rc = lpfc_sli4_dealloc_extent(phba, 6544 LPFC_RSC_TYPE_FCOE_VPI); 6545 rc = lpfc_sli4_dealloc_extent(phba, 6546 LPFC_RSC_TYPE_FCOE_XRI); 6547 rc = lpfc_sli4_dealloc_extent(phba, 6548 LPFC_RSC_TYPE_FCOE_RPI); 6549 } else 6550 return 0; 6551 } 6552 6553 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6554 if (unlikely(rc)) 6555 goto err_exit; 6556 6557 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6558 if (unlikely(rc)) 6559 goto err_exit; 6560 6561 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6562 if (unlikely(rc)) 6563 goto err_exit; 6564 6565 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6566 if (unlikely(rc)) 6567 goto err_exit; 6568 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6569 LPFC_IDX_RSRC_RDY); 6570 return rc; 6571 } else { 6572 /* 6573 * The port does not support resource extents. The XRI, VPI, 6574 * VFI, RPI resource ids were determined from READ_CONFIG. 6575 * Just allocate the bitmasks and provision the resource id 6576 * arrays. If a port reset is active, the resources don't 6577 * need any action - just exit. 6578 */ 6579 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6580 LPFC_IDX_RSRC_RDY) { 6581 lpfc_sli4_dealloc_resource_identifiers(phba); 6582 lpfc_sli4_remove_rpis(phba); 6583 } 6584 /* RPIs. */ 6585 count = phba->sli4_hba.max_cfg_param.max_rpi; 6586 if (count <= 0) { 6587 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6588 "3279 Invalid provisioning of " 6589 "rpi:%d\n", count); 6590 rc = -EINVAL; 6591 goto err_exit; 6592 } 6593 base = phba->sli4_hba.max_cfg_param.rpi_base; 6594 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6595 phba->sli4_hba.rpi_bmask = kcalloc(longs, 6596 sizeof(unsigned long), 6597 GFP_KERNEL); 6598 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 6599 rc = -ENOMEM; 6600 goto err_exit; 6601 } 6602 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), 6603 GFP_KERNEL); 6604 if (unlikely(!phba->sli4_hba.rpi_ids)) { 6605 rc = -ENOMEM; 6606 goto free_rpi_bmask; 6607 } 6608 6609 for (i = 0; i < count; i++) 6610 phba->sli4_hba.rpi_ids[i] = base + i; 6611 6612 /* VPIs. */ 6613 count = phba->sli4_hba.max_cfg_param.max_vpi; 6614 if (count <= 0) { 6615 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6616 "3280 Invalid provisioning of " 6617 "vpi:%d\n", count); 6618 rc = -EINVAL; 6619 goto free_rpi_ids; 6620 } 6621 base = phba->sli4_hba.max_cfg_param.vpi_base; 6622 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6623 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 6624 GFP_KERNEL); 6625 if (unlikely(!phba->vpi_bmask)) { 6626 rc = -ENOMEM; 6627 goto free_rpi_ids; 6628 } 6629 phba->vpi_ids = kcalloc(count, sizeof(uint16_t), 6630 GFP_KERNEL); 6631 if (unlikely(!phba->vpi_ids)) { 6632 rc = -ENOMEM; 6633 goto free_vpi_bmask; 6634 } 6635 6636 for (i = 0; i < count; i++) 6637 phba->vpi_ids[i] = base + i; 6638 6639 /* XRIs. */ 6640 count = phba->sli4_hba.max_cfg_param.max_xri; 6641 if (count <= 0) { 6642 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6643 "3281 Invalid provisioning of " 6644 "xri:%d\n", count); 6645 rc = -EINVAL; 6646 goto free_vpi_ids; 6647 } 6648 base = phba->sli4_hba.max_cfg_param.xri_base; 6649 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6650 phba->sli4_hba.xri_bmask = kcalloc(longs, 6651 sizeof(unsigned long), 6652 GFP_KERNEL); 6653 if (unlikely(!phba->sli4_hba.xri_bmask)) { 6654 rc = -ENOMEM; 6655 goto free_vpi_ids; 6656 } 6657 phba->sli4_hba.max_cfg_param.xri_used = 0; 6658 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), 6659 GFP_KERNEL); 6660 if (unlikely(!phba->sli4_hba.xri_ids)) { 6661 rc = -ENOMEM; 6662 goto free_xri_bmask; 6663 } 6664 6665 for (i = 0; i < count; i++) 6666 phba->sli4_hba.xri_ids[i] = base + i; 6667 6668 /* VFIs. */ 6669 count = phba->sli4_hba.max_cfg_param.max_vfi; 6670 if (count <= 0) { 6671 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6672 "3282 Invalid provisioning of " 6673 "vfi:%d\n", count); 6674 rc = -EINVAL; 6675 goto free_xri_ids; 6676 } 6677 base = phba->sli4_hba.max_cfg_param.vfi_base; 6678 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6679 phba->sli4_hba.vfi_bmask = kcalloc(longs, 6680 sizeof(unsigned long), 6681 GFP_KERNEL); 6682 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 6683 rc = -ENOMEM; 6684 goto free_xri_ids; 6685 } 6686 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), 6687 GFP_KERNEL); 6688 if (unlikely(!phba->sli4_hba.vfi_ids)) { 6689 rc = -ENOMEM; 6690 goto free_vfi_bmask; 6691 } 6692 6693 for (i = 0; i < count; i++) 6694 phba->sli4_hba.vfi_ids[i] = base + i; 6695 6696 /* 6697 * Mark all resources ready. An HBA reset doesn't need 6698 * to reset the initialization. 6699 */ 6700 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6701 LPFC_IDX_RSRC_RDY); 6702 return 0; 6703 } 6704 6705 free_vfi_bmask: 6706 kfree(phba->sli4_hba.vfi_bmask); 6707 phba->sli4_hba.vfi_bmask = NULL; 6708 free_xri_ids: 6709 kfree(phba->sli4_hba.xri_ids); 6710 phba->sli4_hba.xri_ids = NULL; 6711 free_xri_bmask: 6712 kfree(phba->sli4_hba.xri_bmask); 6713 phba->sli4_hba.xri_bmask = NULL; 6714 free_vpi_ids: 6715 kfree(phba->vpi_ids); 6716 phba->vpi_ids = NULL; 6717 free_vpi_bmask: 6718 kfree(phba->vpi_bmask); 6719 phba->vpi_bmask = NULL; 6720 free_rpi_ids: 6721 kfree(phba->sli4_hba.rpi_ids); 6722 phba->sli4_hba.rpi_ids = NULL; 6723 free_rpi_bmask: 6724 kfree(phba->sli4_hba.rpi_bmask); 6725 phba->sli4_hba.rpi_bmask = NULL; 6726 err_exit: 6727 return rc; 6728 } 6729 6730 /** 6731 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 6732 * @phba: Pointer to HBA context object. 6733 * 6734 * This function allocates the number of elements for the specified 6735 * resource type. 6736 **/ 6737 int 6738 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 6739 { 6740 if (phba->sli4_hba.extents_in_use) { 6741 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6742 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6743 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6744 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6745 } else { 6746 kfree(phba->vpi_bmask); 6747 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6748 kfree(phba->vpi_ids); 6749 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6750 kfree(phba->sli4_hba.xri_bmask); 6751 kfree(phba->sli4_hba.xri_ids); 6752 kfree(phba->sli4_hba.vfi_bmask); 6753 kfree(phba->sli4_hba.vfi_ids); 6754 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6755 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6756 } 6757 6758 return 0; 6759 } 6760 6761 /** 6762 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 6763 * @phba: Pointer to HBA context object. 6764 * @type: The resource extent type. 6765 * @extnt_count: buffer to hold port extent count response 6766 * @extnt_size: buffer to hold port extent size response. 6767 * 6768 * This function calls the port to read the host allocated extents 6769 * for a particular type. 6770 **/ 6771 int 6772 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 6773 uint16_t *extnt_cnt, uint16_t *extnt_size) 6774 { 6775 bool emb; 6776 int rc = 0; 6777 uint16_t curr_blks = 0; 6778 uint32_t req_len, emb_len; 6779 uint32_t alloc_len, mbox_tmo; 6780 struct list_head *blk_list_head; 6781 struct lpfc_rsrc_blks *rsrc_blk; 6782 LPFC_MBOXQ_t *mbox; 6783 void *virtaddr = NULL; 6784 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 6785 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 6786 union lpfc_sli4_cfg_shdr *shdr; 6787 6788 switch (type) { 6789 case LPFC_RSC_TYPE_FCOE_VPI: 6790 blk_list_head = &phba->lpfc_vpi_blk_list; 6791 break; 6792 case LPFC_RSC_TYPE_FCOE_XRI: 6793 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 6794 break; 6795 case LPFC_RSC_TYPE_FCOE_VFI: 6796 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 6797 break; 6798 case LPFC_RSC_TYPE_FCOE_RPI: 6799 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 6800 break; 6801 default: 6802 return -EIO; 6803 } 6804 6805 /* Count the number of extents currently allocatd for this type. */ 6806 list_for_each_entry(rsrc_blk, blk_list_head, list) { 6807 if (curr_blks == 0) { 6808 /* 6809 * The GET_ALLOCATED mailbox does not return the size, 6810 * just the count. The size should be just the size 6811 * stored in the current allocated block and all sizes 6812 * for an extent type are the same so set the return 6813 * value now. 6814 */ 6815 *extnt_size = rsrc_blk->rsrc_size; 6816 } 6817 curr_blks++; 6818 } 6819 6820 /* 6821 * Calculate the size of an embedded mailbox. The uint32_t 6822 * accounts for extents-specific word. 6823 */ 6824 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 6825 sizeof(uint32_t); 6826 6827 /* 6828 * Presume the allocation and response will fit into an embedded 6829 * mailbox. If not true, reconfigure to a non-embedded mailbox. 6830 */ 6831 emb = LPFC_SLI4_MBX_EMBED; 6832 req_len = emb_len; 6833 if (req_len > emb_len) { 6834 req_len = curr_blks * sizeof(uint16_t) + 6835 sizeof(union lpfc_sli4_cfg_shdr) + 6836 sizeof(uint32_t); 6837 emb = LPFC_SLI4_MBX_NEMBED; 6838 } 6839 6840 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6841 if (!mbox) 6842 return -ENOMEM; 6843 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 6844 6845 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6846 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 6847 req_len, emb); 6848 if (alloc_len < req_len) { 6849 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6850 "2983 Allocated DMA memory size (x%x) is " 6851 "less than the requested DMA memory " 6852 "size (x%x)\n", alloc_len, req_len); 6853 rc = -ENOMEM; 6854 goto err_exit; 6855 } 6856 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 6857 if (unlikely(rc)) { 6858 rc = -EIO; 6859 goto err_exit; 6860 } 6861 6862 if (!phba->sli4_hba.intr_enable) 6863 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6864 else { 6865 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6866 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6867 } 6868 6869 if (unlikely(rc)) { 6870 rc = -EIO; 6871 goto err_exit; 6872 } 6873 6874 /* 6875 * Figure out where the response is located. Then get local pointers 6876 * to the response data. The port does not guarantee to respond to 6877 * all extents counts request so update the local variable with the 6878 * allocated count from the port. 6879 */ 6880 if (emb == LPFC_SLI4_MBX_EMBED) { 6881 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 6882 shdr = &rsrc_ext->header.cfg_shdr; 6883 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 6884 } else { 6885 virtaddr = mbox->sge_array->addr[0]; 6886 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 6887 shdr = &n_rsrc->cfg_shdr; 6888 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 6889 } 6890 6891 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 6892 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6893 "2984 Failed to read allocated resources " 6894 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 6895 type, 6896 bf_get(lpfc_mbox_hdr_status, &shdr->response), 6897 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 6898 rc = -EIO; 6899 goto err_exit; 6900 } 6901 err_exit: 6902 lpfc_sli4_mbox_cmd_free(phba, mbox); 6903 return rc; 6904 } 6905 6906 /** 6907 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block 6908 * @phba: pointer to lpfc hba data structure. 6909 * @pring: Pointer to driver SLI ring object. 6910 * @sgl_list: linked link of sgl buffers to post 6911 * @cnt: number of linked list buffers 6912 * 6913 * This routine walks the list of buffers that have been allocated and 6914 * repost them to the port by using SGL block post. This is needed after a 6915 * pci_function_reset/warm_start or start. It attempts to construct blocks 6916 * of buffer sgls which contains contiguous xris and uses the non-embedded 6917 * SGL block post mailbox commands to post them to the port. For single 6918 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 6919 * mailbox command for posting. 6920 * 6921 * Returns: 0 = success, non-zero failure. 6922 **/ 6923 static int 6924 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, 6925 struct list_head *sgl_list, int cnt) 6926 { 6927 struct lpfc_sglq *sglq_entry = NULL; 6928 struct lpfc_sglq *sglq_entry_next = NULL; 6929 struct lpfc_sglq *sglq_entry_first = NULL; 6930 int status, total_cnt; 6931 int post_cnt = 0, num_posted = 0, block_cnt = 0; 6932 int last_xritag = NO_XRI; 6933 LIST_HEAD(prep_sgl_list); 6934 LIST_HEAD(blck_sgl_list); 6935 LIST_HEAD(allc_sgl_list); 6936 LIST_HEAD(post_sgl_list); 6937 LIST_HEAD(free_sgl_list); 6938 6939 spin_lock_irq(&phba->hbalock); 6940 spin_lock(&phba->sli4_hba.sgl_list_lock); 6941 list_splice_init(sgl_list, &allc_sgl_list); 6942 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6943 spin_unlock_irq(&phba->hbalock); 6944 6945 total_cnt = cnt; 6946 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6947 &allc_sgl_list, list) { 6948 list_del_init(&sglq_entry->list); 6949 block_cnt++; 6950 if ((last_xritag != NO_XRI) && 6951 (sglq_entry->sli4_xritag != last_xritag + 1)) { 6952 /* a hole in xri block, form a sgl posting block */ 6953 list_splice_init(&prep_sgl_list, &blck_sgl_list); 6954 post_cnt = block_cnt - 1; 6955 /* prepare list for next posting block */ 6956 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6957 block_cnt = 1; 6958 } else { 6959 /* prepare list for next posting block */ 6960 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6961 /* enough sgls for non-embed sgl mbox command */ 6962 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 6963 list_splice_init(&prep_sgl_list, 6964 &blck_sgl_list); 6965 post_cnt = block_cnt; 6966 block_cnt = 0; 6967 } 6968 } 6969 num_posted++; 6970 6971 /* keep track of last sgl's xritag */ 6972 last_xritag = sglq_entry->sli4_xritag; 6973 6974 /* end of repost sgl list condition for buffers */ 6975 if (num_posted == total_cnt) { 6976 if (post_cnt == 0) { 6977 list_splice_init(&prep_sgl_list, 6978 &blck_sgl_list); 6979 post_cnt = block_cnt; 6980 } else if (block_cnt == 1) { 6981 status = lpfc_sli4_post_sgl(phba, 6982 sglq_entry->phys, 0, 6983 sglq_entry->sli4_xritag); 6984 if (!status) { 6985 /* successful, put sgl to posted list */ 6986 list_add_tail(&sglq_entry->list, 6987 &post_sgl_list); 6988 } else { 6989 /* Failure, put sgl to free list */ 6990 lpfc_printf_log(phba, KERN_WARNING, 6991 LOG_SLI, 6992 "3159 Failed to post " 6993 "sgl, xritag:x%x\n", 6994 sglq_entry->sli4_xritag); 6995 list_add_tail(&sglq_entry->list, 6996 &free_sgl_list); 6997 total_cnt--; 6998 } 6999 } 7000 } 7001 7002 /* continue until a nembed page worth of sgls */ 7003 if (post_cnt == 0) 7004 continue; 7005 7006 /* post the buffer list sgls as a block */ 7007 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, 7008 post_cnt); 7009 7010 if (!status) { 7011 /* success, put sgl list to posted sgl list */ 7012 list_splice_init(&blck_sgl_list, &post_sgl_list); 7013 } else { 7014 /* Failure, put sgl list to free sgl list */ 7015 sglq_entry_first = list_first_entry(&blck_sgl_list, 7016 struct lpfc_sglq, 7017 list); 7018 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 7019 "3160 Failed to post sgl-list, " 7020 "xritag:x%x-x%x\n", 7021 sglq_entry_first->sli4_xritag, 7022 (sglq_entry_first->sli4_xritag + 7023 post_cnt - 1)); 7024 list_splice_init(&blck_sgl_list, &free_sgl_list); 7025 total_cnt -= post_cnt; 7026 } 7027 7028 /* don't reset xirtag due to hole in xri block */ 7029 if (block_cnt == 0) 7030 last_xritag = NO_XRI; 7031 7032 /* reset sgl post count for next round of posting */ 7033 post_cnt = 0; 7034 } 7035 7036 /* free the sgls failed to post */ 7037 lpfc_free_sgl_list(phba, &free_sgl_list); 7038 7039 /* push sgls posted to the available list */ 7040 if (!list_empty(&post_sgl_list)) { 7041 spin_lock_irq(&phba->hbalock); 7042 spin_lock(&phba->sli4_hba.sgl_list_lock); 7043 list_splice_init(&post_sgl_list, sgl_list); 7044 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7045 spin_unlock_irq(&phba->hbalock); 7046 } else { 7047 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7048 "3161 Failure to post sgl to port.\n"); 7049 return -EIO; 7050 } 7051 7052 /* return the number of XRIs actually posted */ 7053 return total_cnt; 7054 } 7055 7056 /** 7057 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls 7058 * @phba: pointer to lpfc hba data structure. 7059 * 7060 * This routine walks the list of nvme buffers that have been allocated and 7061 * repost them to the port by using SGL block post. This is needed after a 7062 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 7063 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list 7064 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. 7065 * 7066 * Returns: 0 = success, non-zero failure. 7067 **/ 7068 static int 7069 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) 7070 { 7071 LIST_HEAD(post_nblist); 7072 int num_posted, rc = 0; 7073 7074 /* get all NVME buffers need to repost to a local list */ 7075 lpfc_io_buf_flush(phba, &post_nblist); 7076 7077 /* post the list of nvme buffer sgls to port if available */ 7078 if (!list_empty(&post_nblist)) { 7079 num_posted = lpfc_sli4_post_io_sgl_list( 7080 phba, &post_nblist, phba->sli4_hba.io_xri_cnt); 7081 /* failed to post any nvme buffer, return error */ 7082 if (num_posted == 0) 7083 rc = -EIO; 7084 } 7085 return rc; 7086 } 7087 7088 static void 7089 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 7090 { 7091 uint32_t len; 7092 7093 len = sizeof(struct lpfc_mbx_set_host_data) - 7094 sizeof(struct lpfc_sli4_cfg_mhdr); 7095 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7096 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 7097 LPFC_SLI4_MBX_EMBED); 7098 7099 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; 7100 mbox->u.mqe.un.set_host_data.param_len = 7101 LPFC_HOST_OS_DRIVER_VERSION_SIZE; 7102 snprintf(mbox->u.mqe.un.set_host_data.data, 7103 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 7104 "Linux %s v"LPFC_DRIVER_VERSION, 7105 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 7106 } 7107 7108 int 7109 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, 7110 struct lpfc_queue *drq, int count, int idx) 7111 { 7112 int rc, i; 7113 struct lpfc_rqe hrqe; 7114 struct lpfc_rqe drqe; 7115 struct lpfc_rqb *rqbp; 7116 unsigned long flags; 7117 struct rqb_dmabuf *rqb_buffer; 7118 LIST_HEAD(rqb_buf_list); 7119 7120 spin_lock_irqsave(&phba->hbalock, flags); 7121 rqbp = hrq->rqbp; 7122 for (i = 0; i < count; i++) { 7123 /* IF RQ is already full, don't bother */ 7124 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) 7125 break; 7126 rqb_buffer = rqbp->rqb_alloc_buffer(phba); 7127 if (!rqb_buffer) 7128 break; 7129 rqb_buffer->hrq = hrq; 7130 rqb_buffer->drq = drq; 7131 rqb_buffer->idx = idx; 7132 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); 7133 } 7134 while (!list_empty(&rqb_buf_list)) { 7135 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, 7136 hbuf.list); 7137 7138 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); 7139 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); 7140 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); 7141 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); 7142 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 7143 if (rc < 0) { 7144 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7145 "6421 Cannot post to HRQ %d: %x %x %x " 7146 "DRQ %x %x\n", 7147 hrq->queue_id, 7148 hrq->host_index, 7149 hrq->hba_index, 7150 hrq->entry_count, 7151 drq->host_index, 7152 drq->hba_index); 7153 rqbp->rqb_free_buffer(phba, rqb_buffer); 7154 } else { 7155 list_add_tail(&rqb_buffer->hbuf.list, 7156 &rqbp->rqb_buffer_list); 7157 rqbp->buffer_count++; 7158 } 7159 } 7160 spin_unlock_irqrestore(&phba->hbalock, flags); 7161 return 1; 7162 } 7163 7164 /** 7165 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 7166 * @phba: Pointer to HBA context object. 7167 * 7168 * This function is the main SLI4 device initialization PCI function. This 7169 * function is called by the HBA initialization code, HBA reset code and 7170 * HBA error attention handler code. Caller is not required to hold any 7171 * locks. 7172 **/ 7173 int 7174 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 7175 { 7176 int rc, i, cnt, len; 7177 LPFC_MBOXQ_t *mboxq; 7178 struct lpfc_mqe *mqe; 7179 uint8_t *vpd; 7180 uint32_t vpd_size; 7181 uint32_t ftr_rsp = 0; 7182 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 7183 struct lpfc_vport *vport = phba->pport; 7184 struct lpfc_dmabuf *mp; 7185 struct lpfc_rqb *rqbp; 7186 7187 /* Perform a PCI function reset to start from clean */ 7188 rc = lpfc_pci_function_reset(phba); 7189 if (unlikely(rc)) 7190 return -ENODEV; 7191 7192 /* Check the HBA Host Status Register for readyness */ 7193 rc = lpfc_sli4_post_status_check(phba); 7194 if (unlikely(rc)) 7195 return -ENODEV; 7196 else { 7197 spin_lock_irq(&phba->hbalock); 7198 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 7199 spin_unlock_irq(&phba->hbalock); 7200 } 7201 7202 /* 7203 * Allocate a single mailbox container for initializing the 7204 * port. 7205 */ 7206 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7207 if (!mboxq) 7208 return -ENOMEM; 7209 7210 /* Issue READ_REV to collect vpd and FW information. */ 7211 vpd_size = SLI4_PAGE_SIZE; 7212 vpd = kzalloc(vpd_size, GFP_KERNEL); 7213 if (!vpd) { 7214 rc = -ENOMEM; 7215 goto out_free_mbox; 7216 } 7217 7218 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 7219 if (unlikely(rc)) { 7220 kfree(vpd); 7221 goto out_free_mbox; 7222 } 7223 7224 mqe = &mboxq->u.mqe; 7225 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 7226 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 7227 phba->hba_flag |= HBA_FCOE_MODE; 7228 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 7229 } else { 7230 phba->hba_flag &= ~HBA_FCOE_MODE; 7231 } 7232 7233 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 7234 LPFC_DCBX_CEE_MODE) 7235 phba->hba_flag |= HBA_FIP_SUPPORT; 7236 else 7237 phba->hba_flag &= ~HBA_FIP_SUPPORT; 7238 7239 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; 7240 7241 if (phba->sli_rev != LPFC_SLI_REV4) { 7242 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7243 "0376 READ_REV Error. SLI Level %d " 7244 "FCoE enabled %d\n", 7245 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 7246 rc = -EIO; 7247 kfree(vpd); 7248 goto out_free_mbox; 7249 } 7250 7251 /* 7252 * Continue initialization with default values even if driver failed 7253 * to read FCoE param config regions, only read parameters if the 7254 * board is FCoE 7255 */ 7256 if (phba->hba_flag & HBA_FCOE_MODE && 7257 lpfc_sli4_read_fcoe_params(phba)) 7258 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 7259 "2570 Failed to read FCoE parameters\n"); 7260 7261 /* 7262 * Retrieve sli4 device physical port name, failure of doing it 7263 * is considered as non-fatal. 7264 */ 7265 rc = lpfc_sli4_retrieve_pport_name(phba); 7266 if (!rc) 7267 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7268 "3080 Successful retrieving SLI4 device " 7269 "physical port name: %s.\n", phba->Port); 7270 7271 rc = lpfc_sli4_get_ctl_attr(phba); 7272 if (!rc) 7273 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7274 "8351 Successful retrieving SLI4 device " 7275 "CTL ATTR\n"); 7276 7277 /* 7278 * Evaluate the read rev and vpd data. Populate the driver 7279 * state with the results. If this routine fails, the failure 7280 * is not fatal as the driver will use generic values. 7281 */ 7282 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 7283 if (unlikely(!rc)) { 7284 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7285 "0377 Error %d parsing vpd. " 7286 "Using defaults.\n", rc); 7287 rc = 0; 7288 } 7289 kfree(vpd); 7290 7291 /* Save information as VPD data */ 7292 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 7293 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 7294 7295 /* 7296 * This is because first G7 ASIC doesn't support the standard 7297 * 0x5a NVME cmd descriptor type/subtype 7298 */ 7299 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7300 LPFC_SLI_INTF_IF_TYPE_6) && 7301 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) && 7302 (phba->vpd.rev.smRev == 0) && 7303 (phba->cfg_nvme_embed_cmd == 1)) 7304 phba->cfg_nvme_embed_cmd = 0; 7305 7306 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 7307 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 7308 &mqe->un.read_rev); 7309 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 7310 &mqe->un.read_rev); 7311 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 7312 &mqe->un.read_rev); 7313 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 7314 &mqe->un.read_rev); 7315 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 7316 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 7317 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 7318 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 7319 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 7320 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 7321 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7322 "(%d):0380 READ_REV Status x%x " 7323 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 7324 mboxq->vport ? mboxq->vport->vpi : 0, 7325 bf_get(lpfc_mqe_status, mqe), 7326 phba->vpd.rev.opFwName, 7327 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 7328 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 7329 7330 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 7331 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); 7332 if (phba->pport->cfg_lun_queue_depth > rc) { 7333 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7334 "3362 LUN queue depth changed from %d to %d\n", 7335 phba->pport->cfg_lun_queue_depth, rc); 7336 phba->pport->cfg_lun_queue_depth = rc; 7337 } 7338 7339 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7340 LPFC_SLI_INTF_IF_TYPE_0) { 7341 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); 7342 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7343 if (rc == MBX_SUCCESS) { 7344 phba->hba_flag |= HBA_RECOVERABLE_UE; 7345 /* Set 1Sec interval to detect UE */ 7346 phba->eratt_poll_interval = 1; 7347 phba->sli4_hba.ue_to_sr = bf_get( 7348 lpfc_mbx_set_feature_UESR, 7349 &mboxq->u.mqe.un.set_feature); 7350 phba->sli4_hba.ue_to_rp = bf_get( 7351 lpfc_mbx_set_feature_UERP, 7352 &mboxq->u.mqe.un.set_feature); 7353 } 7354 } 7355 7356 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { 7357 /* Enable MDS Diagnostics only if the SLI Port supports it */ 7358 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); 7359 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7360 if (rc != MBX_SUCCESS) 7361 phba->mds_diags_support = 0; 7362 } 7363 7364 /* 7365 * Discover the port's supported feature set and match it against the 7366 * hosts requests. 7367 */ 7368 lpfc_request_features(phba, mboxq); 7369 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7370 if (unlikely(rc)) { 7371 rc = -EIO; 7372 goto out_free_mbox; 7373 } 7374 7375 /* 7376 * The port must support FCP initiator mode as this is the 7377 * only mode running in the host. 7378 */ 7379 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 7380 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7381 "0378 No support for fcpi mode.\n"); 7382 ftr_rsp++; 7383 } 7384 7385 /* Performance Hints are ONLY for FCoE */ 7386 if (phba->hba_flag & HBA_FCOE_MODE) { 7387 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 7388 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 7389 else 7390 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 7391 } 7392 7393 /* 7394 * If the port cannot support the host's requested features 7395 * then turn off the global config parameters to disable the 7396 * feature in the driver. This is not a fatal error. 7397 */ 7398 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 7399 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { 7400 phba->cfg_enable_bg = 0; 7401 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 7402 ftr_rsp++; 7403 } 7404 } 7405 7406 if (phba->max_vpi && phba->cfg_enable_npiv && 7407 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7408 ftr_rsp++; 7409 7410 if (ftr_rsp) { 7411 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7412 "0379 Feature Mismatch Data: x%08x %08x " 7413 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 7414 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 7415 phba->cfg_enable_npiv, phba->max_vpi); 7416 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 7417 phba->cfg_enable_bg = 0; 7418 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7419 phba->cfg_enable_npiv = 0; 7420 } 7421 7422 /* These SLI3 features are assumed in SLI4 */ 7423 spin_lock_irq(&phba->hbalock); 7424 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 7425 spin_unlock_irq(&phba->hbalock); 7426 7427 /* 7428 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 7429 * calls depends on these resources to complete port setup. 7430 */ 7431 rc = lpfc_sli4_alloc_resource_identifiers(phba); 7432 if (rc) { 7433 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7434 "2920 Failed to alloc Resource IDs " 7435 "rc = x%x\n", rc); 7436 goto out_free_mbox; 7437 } 7438 7439 lpfc_set_host_data(phba, mboxq); 7440 7441 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7442 if (rc) { 7443 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7444 "2134 Failed to set host os driver version %x", 7445 rc); 7446 } 7447 7448 /* Read the port's service parameters. */ 7449 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 7450 if (rc) { 7451 phba->link_state = LPFC_HBA_ERROR; 7452 rc = -ENOMEM; 7453 goto out_free_mbox; 7454 } 7455 7456 mboxq->vport = vport; 7457 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7458 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 7459 if (rc == MBX_SUCCESS) { 7460 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 7461 rc = 0; 7462 } 7463 7464 /* 7465 * This memory was allocated by the lpfc_read_sparam routine. Release 7466 * it to the mbuf pool. 7467 */ 7468 lpfc_mbuf_free(phba, mp->virt, mp->phys); 7469 kfree(mp); 7470 mboxq->ctx_buf = NULL; 7471 if (unlikely(rc)) { 7472 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7473 "0382 READ_SPARAM command failed " 7474 "status %d, mbxStatus x%x\n", 7475 rc, bf_get(lpfc_mqe_status, mqe)); 7476 phba->link_state = LPFC_HBA_ERROR; 7477 rc = -EIO; 7478 goto out_free_mbox; 7479 } 7480 7481 lpfc_update_vport_wwn(vport); 7482 7483 /* Update the fc_host data structures with new wwn. */ 7484 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 7485 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 7486 7487 /* Create all the SLI4 queues */ 7488 rc = lpfc_sli4_queue_create(phba); 7489 if (rc) { 7490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7491 "3089 Failed to allocate queues\n"); 7492 rc = -ENODEV; 7493 goto out_free_mbox; 7494 } 7495 /* Set up all the queues to the device */ 7496 rc = lpfc_sli4_queue_setup(phba); 7497 if (unlikely(rc)) { 7498 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7499 "0381 Error %d during queue setup.\n ", rc); 7500 goto out_stop_timers; 7501 } 7502 /* Initialize the driver internal SLI layer lists. */ 7503 lpfc_sli4_setup(phba); 7504 lpfc_sli4_queue_init(phba); 7505 7506 /* update host els xri-sgl sizes and mappings */ 7507 rc = lpfc_sli4_els_sgl_update(phba); 7508 if (unlikely(rc)) { 7509 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7510 "1400 Failed to update xri-sgl size and " 7511 "mapping: %d\n", rc); 7512 goto out_destroy_queue; 7513 } 7514 7515 /* register the els sgl pool to the port */ 7516 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, 7517 phba->sli4_hba.els_xri_cnt); 7518 if (unlikely(rc < 0)) { 7519 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7520 "0582 Error %d during els sgl post " 7521 "operation\n", rc); 7522 rc = -ENODEV; 7523 goto out_destroy_queue; 7524 } 7525 phba->sli4_hba.els_xri_cnt = rc; 7526 7527 if (phba->nvmet_support) { 7528 /* update host nvmet xri-sgl sizes and mappings */ 7529 rc = lpfc_sli4_nvmet_sgl_update(phba); 7530 if (unlikely(rc)) { 7531 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7532 "6308 Failed to update nvmet-sgl size " 7533 "and mapping: %d\n", rc); 7534 goto out_destroy_queue; 7535 } 7536 7537 /* register the nvmet sgl pool to the port */ 7538 rc = lpfc_sli4_repost_sgl_list( 7539 phba, 7540 &phba->sli4_hba.lpfc_nvmet_sgl_list, 7541 phba->sli4_hba.nvmet_xri_cnt); 7542 if (unlikely(rc < 0)) { 7543 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7544 "3117 Error %d during nvmet " 7545 "sgl post\n", rc); 7546 rc = -ENODEV; 7547 goto out_destroy_queue; 7548 } 7549 phba->sli4_hba.nvmet_xri_cnt = rc; 7550 7551 cnt = phba->cfg_iocb_cnt * 1024; 7552 /* We need 1 iocbq for every SGL, for IO processing */ 7553 cnt += phba->sli4_hba.nvmet_xri_cnt; 7554 } else { 7555 /* update host common xri-sgl sizes and mappings */ 7556 rc = lpfc_sli4_io_sgl_update(phba); 7557 if (unlikely(rc)) { 7558 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7559 "6082 Failed to update nvme-sgl size " 7560 "and mapping: %d\n", rc); 7561 goto out_destroy_queue; 7562 } 7563 7564 /* register the allocated common sgl pool to the port */ 7565 rc = lpfc_sli4_repost_io_sgl_list(phba); 7566 if (unlikely(rc)) { 7567 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7568 "6116 Error %d during nvme sgl post " 7569 "operation\n", rc); 7570 /* Some NVME buffers were moved to abort nvme list */ 7571 /* A pci function reset will repost them */ 7572 rc = -ENODEV; 7573 goto out_destroy_queue; 7574 } 7575 cnt = phba->cfg_iocb_cnt * 1024; 7576 } 7577 7578 if (!phba->sli.iocbq_lookup) { 7579 /* Initialize and populate the iocb list per host */ 7580 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7581 "2821 initialize iocb list %d total %d\n", 7582 phba->cfg_iocb_cnt, cnt); 7583 rc = lpfc_init_iocb_list(phba, cnt); 7584 if (rc) { 7585 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7586 "1413 Failed to init iocb list.\n"); 7587 goto out_destroy_queue; 7588 } 7589 } 7590 7591 if (phba->nvmet_support) 7592 lpfc_nvmet_create_targetport(phba); 7593 7594 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 7595 /* Post initial buffers to all RQs created */ 7596 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 7597 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 7598 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 7599 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 7600 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 7601 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; 7602 rqbp->buffer_count = 0; 7603 7604 lpfc_post_rq_buffer( 7605 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 7606 phba->sli4_hba.nvmet_mrq_data[i], 7607 phba->cfg_nvmet_mrq_post, i); 7608 } 7609 } 7610 7611 /* Post the rpi header region to the device. */ 7612 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 7613 if (unlikely(rc)) { 7614 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7615 "0393 Error %d during rpi post operation\n", 7616 rc); 7617 rc = -ENODEV; 7618 goto out_destroy_queue; 7619 } 7620 lpfc_sli4_node_prep(phba); 7621 7622 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 7623 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { 7624 /* 7625 * The FC Port needs to register FCFI (index 0) 7626 */ 7627 lpfc_reg_fcfi(phba, mboxq); 7628 mboxq->vport = phba->pport; 7629 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7630 if (rc != MBX_SUCCESS) 7631 goto out_unset_queue; 7632 rc = 0; 7633 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 7634 &mboxq->u.mqe.un.reg_fcfi); 7635 } else { 7636 /* We are a NVME Target mode with MRQ > 1 */ 7637 7638 /* First register the FCFI */ 7639 lpfc_reg_fcfi_mrq(phba, mboxq, 0); 7640 mboxq->vport = phba->pport; 7641 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7642 if (rc != MBX_SUCCESS) 7643 goto out_unset_queue; 7644 rc = 0; 7645 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, 7646 &mboxq->u.mqe.un.reg_fcfi_mrq); 7647 7648 /* Next register the MRQs */ 7649 lpfc_reg_fcfi_mrq(phba, mboxq, 1); 7650 mboxq->vport = phba->pport; 7651 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7652 if (rc != MBX_SUCCESS) 7653 goto out_unset_queue; 7654 rc = 0; 7655 } 7656 /* Check if the port is configured to be disabled */ 7657 lpfc_sli_read_link_ste(phba); 7658 } 7659 7660 /* Don't post more new bufs if repost already recovered 7661 * the nvme sgls. 7662 */ 7663 if (phba->nvmet_support == 0) { 7664 if (phba->sli4_hba.io_xri_cnt == 0) { 7665 len = lpfc_new_io_buf( 7666 phba, phba->sli4_hba.io_xri_max); 7667 if (len == 0) { 7668 rc = -ENOMEM; 7669 goto out_unset_queue; 7670 } 7671 7672 if (phba->cfg_xri_rebalancing) 7673 lpfc_create_multixri_pools(phba); 7674 } 7675 } else { 7676 phba->cfg_xri_rebalancing = 0; 7677 } 7678 7679 /* Allow asynchronous mailbox command to go through */ 7680 spin_lock_irq(&phba->hbalock); 7681 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7682 spin_unlock_irq(&phba->hbalock); 7683 7684 /* Post receive buffers to the device */ 7685 lpfc_sli4_rb_setup(phba); 7686 7687 /* Reset HBA FCF states after HBA reset */ 7688 phba->fcf.fcf_flag = 0; 7689 phba->fcf.current_rec.flag = 0; 7690 7691 /* Start the ELS watchdog timer */ 7692 mod_timer(&vport->els_tmofunc, 7693 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 7694 7695 /* Start heart beat timer */ 7696 mod_timer(&phba->hb_tmofunc, 7697 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 7698 phba->hb_outstanding = 0; 7699 phba->last_completion_time = jiffies; 7700 7701 /* start eq_delay heartbeat */ 7702 if (phba->cfg_auto_imax) 7703 queue_delayed_work(phba->wq, &phba->eq_delay_work, 7704 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 7705 7706 /* Start error attention (ERATT) polling timer */ 7707 mod_timer(&phba->eratt_poll, 7708 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 7709 7710 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 7711 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 7712 rc = pci_enable_pcie_error_reporting(phba->pcidev); 7713 if (!rc) { 7714 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7715 "2829 This device supports " 7716 "Advanced Error Reporting (AER)\n"); 7717 spin_lock_irq(&phba->hbalock); 7718 phba->hba_flag |= HBA_AER_ENABLED; 7719 spin_unlock_irq(&phba->hbalock); 7720 } else { 7721 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7722 "2830 This device does not support " 7723 "Advanced Error Reporting (AER)\n"); 7724 phba->cfg_aer_support = 0; 7725 } 7726 rc = 0; 7727 } 7728 7729 /* 7730 * The port is ready, set the host's link state to LINK_DOWN 7731 * in preparation for link interrupts. 7732 */ 7733 spin_lock_irq(&phba->hbalock); 7734 phba->link_state = LPFC_LINK_DOWN; 7735 7736 /* Check if physical ports are trunked */ 7737 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) 7738 phba->trunk_link.link0.state = LPFC_LINK_DOWN; 7739 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) 7740 phba->trunk_link.link1.state = LPFC_LINK_DOWN; 7741 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) 7742 phba->trunk_link.link2.state = LPFC_LINK_DOWN; 7743 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) 7744 phba->trunk_link.link3.state = LPFC_LINK_DOWN; 7745 spin_unlock_irq(&phba->hbalock); 7746 7747 /* Arm the CQs and then EQs on device */ 7748 lpfc_sli4_arm_cqeq_intr(phba); 7749 7750 /* Indicate device interrupt mode */ 7751 phba->sli4_hba.intr_enable = 1; 7752 7753 if (!(phba->hba_flag & HBA_FCOE_MODE) && 7754 (phba->hba_flag & LINK_DISABLED)) { 7755 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7756 "3103 Adapter Link is disabled.\n"); 7757 lpfc_down_link(phba, mboxq); 7758 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7759 if (rc != MBX_SUCCESS) { 7760 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7761 "3104 Adapter failed to issue " 7762 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 7763 goto out_io_buff_free; 7764 } 7765 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 7766 /* don't perform init_link on SLI4 FC port loopback test */ 7767 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 7768 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 7769 if (rc) 7770 goto out_io_buff_free; 7771 } 7772 } 7773 mempool_free(mboxq, phba->mbox_mem_pool); 7774 return rc; 7775 out_io_buff_free: 7776 /* Free allocated IO Buffers */ 7777 lpfc_io_free(phba); 7778 out_unset_queue: 7779 /* Unset all the queues set up in this routine when error out */ 7780 lpfc_sli4_queue_unset(phba); 7781 out_destroy_queue: 7782 lpfc_free_iocb_list(phba); 7783 lpfc_sli4_queue_destroy(phba); 7784 out_stop_timers: 7785 lpfc_stop_hba_timers(phba); 7786 out_free_mbox: 7787 mempool_free(mboxq, phba->mbox_mem_pool); 7788 return rc; 7789 } 7790 7791 /** 7792 * lpfc_mbox_timeout - Timeout call back function for mbox timer 7793 * @ptr: context object - pointer to hba structure. 7794 * 7795 * This is the callback function for mailbox timer. The mailbox 7796 * timer is armed when a new mailbox command is issued and the timer 7797 * is deleted when the mailbox complete. The function is called by 7798 * the kernel timer code when a mailbox does not complete within 7799 * expected time. This function wakes up the worker thread to 7800 * process the mailbox timeout and returns. All the processing is 7801 * done by the worker thread function lpfc_mbox_timeout_handler. 7802 **/ 7803 void 7804 lpfc_mbox_timeout(struct timer_list *t) 7805 { 7806 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); 7807 unsigned long iflag; 7808 uint32_t tmo_posted; 7809 7810 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 7811 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 7812 if (!tmo_posted) 7813 phba->pport->work_port_events |= WORKER_MBOX_TMO; 7814 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 7815 7816 if (!tmo_posted) 7817 lpfc_worker_wake_up(phba); 7818 return; 7819 } 7820 7821 /** 7822 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 7823 * are pending 7824 * @phba: Pointer to HBA context object. 7825 * 7826 * This function checks if any mailbox completions are present on the mailbox 7827 * completion queue. 7828 **/ 7829 static bool 7830 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 7831 { 7832 7833 uint32_t idx; 7834 struct lpfc_queue *mcq; 7835 struct lpfc_mcqe *mcqe; 7836 bool pending_completions = false; 7837 uint8_t qe_valid; 7838 7839 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7840 return false; 7841 7842 /* Check for completions on mailbox completion queue */ 7843 7844 mcq = phba->sli4_hba.mbx_cq; 7845 idx = mcq->hba_index; 7846 qe_valid = mcq->qe_valid; 7847 while (bf_get_le32(lpfc_cqe_valid, 7848 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) { 7849 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx)); 7850 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 7851 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 7852 pending_completions = true; 7853 break; 7854 } 7855 idx = (idx + 1) % mcq->entry_count; 7856 if (mcq->hba_index == idx) 7857 break; 7858 7859 /* if the index wrapped around, toggle the valid bit */ 7860 if (phba->sli4_hba.pc_sli4_params.cqav && !idx) 7861 qe_valid = (qe_valid) ? 0 : 1; 7862 } 7863 return pending_completions; 7864 7865 } 7866 7867 /** 7868 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 7869 * that were missed. 7870 * @phba: Pointer to HBA context object. 7871 * 7872 * For sli4, it is possible to miss an interrupt. As such mbox completions 7873 * maybe missed causing erroneous mailbox timeouts to occur. This function 7874 * checks to see if mbox completions are on the mailbox completion queue 7875 * and will process all the completions associated with the eq for the 7876 * mailbox completion queue. 7877 **/ 7878 bool 7879 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 7880 { 7881 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 7882 uint32_t eqidx; 7883 struct lpfc_queue *fpeq = NULL; 7884 bool mbox_pending; 7885 7886 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7887 return false; 7888 7889 /* Find the eq associated with the mcq */ 7890 7891 if (sli4_hba->hdwq) 7892 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) 7893 if (sli4_hba->hdwq[eqidx].hba_eq->queue_id == 7894 sli4_hba->mbx_cq->assoc_qid) { 7895 fpeq = sli4_hba->hdwq[eqidx].hba_eq; 7896 break; 7897 } 7898 if (!fpeq) 7899 return false; 7900 7901 /* Turn off interrupts from this EQ */ 7902 7903 sli4_hba->sli4_eq_clr_intr(fpeq); 7904 7905 /* Check to see if a mbox completion is pending */ 7906 7907 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 7908 7909 /* 7910 * If a mbox completion is pending, process all the events on EQ 7911 * associated with the mbox completion queue (this could include 7912 * mailbox commands, async events, els commands, receive queue data 7913 * and fcp commands) 7914 */ 7915 7916 if (mbox_pending) 7917 /* process and rearm the EQ */ 7918 lpfc_sli4_process_eq(phba, fpeq); 7919 else 7920 /* Always clear and re-arm the EQ */ 7921 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); 7922 7923 return mbox_pending; 7924 7925 } 7926 7927 /** 7928 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 7929 * @phba: Pointer to HBA context object. 7930 * 7931 * This function is called from worker thread when a mailbox command times out. 7932 * The caller is not required to hold any locks. This function will reset the 7933 * HBA and recover all the pending commands. 7934 **/ 7935 void 7936 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 7937 { 7938 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 7939 MAILBOX_t *mb = NULL; 7940 7941 struct lpfc_sli *psli = &phba->sli; 7942 7943 /* If the mailbox completed, process the completion and return */ 7944 if (lpfc_sli4_process_missed_mbox_completions(phba)) 7945 return; 7946 7947 if (pmbox != NULL) 7948 mb = &pmbox->u.mb; 7949 /* Check the pmbox pointer first. There is a race condition 7950 * between the mbox timeout handler getting executed in the 7951 * worklist and the mailbox actually completing. When this 7952 * race condition occurs, the mbox_active will be NULL. 7953 */ 7954 spin_lock_irq(&phba->hbalock); 7955 if (pmbox == NULL) { 7956 lpfc_printf_log(phba, KERN_WARNING, 7957 LOG_MBOX | LOG_SLI, 7958 "0353 Active Mailbox cleared - mailbox timeout " 7959 "exiting\n"); 7960 spin_unlock_irq(&phba->hbalock); 7961 return; 7962 } 7963 7964 /* Mbox cmd <mbxCommand> timeout */ 7965 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7966 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 7967 mb->mbxCommand, 7968 phba->pport->port_state, 7969 phba->sli.sli_flag, 7970 phba->sli.mbox_active); 7971 spin_unlock_irq(&phba->hbalock); 7972 7973 /* Setting state unknown so lpfc_sli_abort_iocb_ring 7974 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 7975 * it to fail all outstanding SCSI IO. 7976 */ 7977 spin_lock_irq(&phba->pport->work_port_lock); 7978 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 7979 spin_unlock_irq(&phba->pport->work_port_lock); 7980 spin_lock_irq(&phba->hbalock); 7981 phba->link_state = LPFC_LINK_UNKNOWN; 7982 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7983 spin_unlock_irq(&phba->hbalock); 7984 7985 lpfc_sli_abort_fcp_rings(phba); 7986 7987 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7988 "0345 Resetting board due to mailbox timeout\n"); 7989 7990 /* Reset the HBA device */ 7991 lpfc_reset_hba(phba); 7992 } 7993 7994 /** 7995 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 7996 * @phba: Pointer to HBA context object. 7997 * @pmbox: Pointer to mailbox object. 7998 * @flag: Flag indicating how the mailbox need to be processed. 7999 * 8000 * This function is called by discovery code and HBA management code 8001 * to submit a mailbox command to firmware with SLI-3 interface spec. This 8002 * function gets the hbalock to protect the data structures. 8003 * The mailbox command can be submitted in polling mode, in which case 8004 * this function will wait in a polling loop for the completion of the 8005 * mailbox. 8006 * If the mailbox is submitted in no_wait mode (not polling) the 8007 * function will submit the command and returns immediately without waiting 8008 * for the mailbox completion. The no_wait is supported only when HBA 8009 * is in SLI2/SLI3 mode - interrupts are enabled. 8010 * The SLI interface allows only one mailbox pending at a time. If the 8011 * mailbox is issued in polling mode and there is already a mailbox 8012 * pending, then the function will return an error. If the mailbox is issued 8013 * in NO_WAIT mode and there is a mailbox pending already, the function 8014 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 8015 * The sli layer owns the mailbox object until the completion of mailbox 8016 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 8017 * return codes the caller owns the mailbox command after the return of 8018 * the function. 8019 **/ 8020 static int 8021 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 8022 uint32_t flag) 8023 { 8024 MAILBOX_t *mbx; 8025 struct lpfc_sli *psli = &phba->sli; 8026 uint32_t status, evtctr; 8027 uint32_t ha_copy, hc_copy; 8028 int i; 8029 unsigned long timeout; 8030 unsigned long drvr_flag = 0; 8031 uint32_t word0, ldata; 8032 void __iomem *to_slim; 8033 int processing_queue = 0; 8034 8035 spin_lock_irqsave(&phba->hbalock, drvr_flag); 8036 if (!pmbox) { 8037 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8038 /* processing mbox queue from intr_handler */ 8039 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8040 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8041 return MBX_SUCCESS; 8042 } 8043 processing_queue = 1; 8044 pmbox = lpfc_mbox_get(phba); 8045 if (!pmbox) { 8046 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8047 return MBX_SUCCESS; 8048 } 8049 } 8050 8051 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 8052 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 8053 if(!pmbox->vport) { 8054 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8055 lpfc_printf_log(phba, KERN_ERR, 8056 LOG_MBOX | LOG_VPORT, 8057 "1806 Mbox x%x failed. No vport\n", 8058 pmbox->u.mb.mbxCommand); 8059 dump_stack(); 8060 goto out_not_finished; 8061 } 8062 } 8063 8064 /* If the PCI channel is in offline state, do not post mbox. */ 8065 if (unlikely(pci_channel_offline(phba->pcidev))) { 8066 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8067 goto out_not_finished; 8068 } 8069 8070 /* If HBA has a deferred error attention, fail the iocb. */ 8071 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8072 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8073 goto out_not_finished; 8074 } 8075 8076 psli = &phba->sli; 8077 8078 mbx = &pmbox->u.mb; 8079 status = MBX_SUCCESS; 8080 8081 if (phba->link_state == LPFC_HBA_ERROR) { 8082 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8083 8084 /* Mbox command <mbxCommand> cannot issue */ 8085 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8086 "(%d):0311 Mailbox command x%x cannot " 8087 "issue Data: x%x x%x\n", 8088 pmbox->vport ? pmbox->vport->vpi : 0, 8089 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 8090 goto out_not_finished; 8091 } 8092 8093 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 8094 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 8095 !(hc_copy & HC_MBINT_ENA)) { 8096 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8097 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8098 "(%d):2528 Mailbox command x%x cannot " 8099 "issue Data: x%x x%x\n", 8100 pmbox->vport ? pmbox->vport->vpi : 0, 8101 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 8102 goto out_not_finished; 8103 } 8104 } 8105 8106 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8107 /* Polling for a mbox command when another one is already active 8108 * is not allowed in SLI. Also, the driver must have established 8109 * SLI2 mode to queue and process multiple mbox commands. 8110 */ 8111 8112 if (flag & MBX_POLL) { 8113 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8114 8115 /* Mbox command <mbxCommand> cannot issue */ 8116 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8117 "(%d):2529 Mailbox command x%x " 8118 "cannot issue Data: x%x x%x\n", 8119 pmbox->vport ? pmbox->vport->vpi : 0, 8120 pmbox->u.mb.mbxCommand, 8121 psli->sli_flag, flag); 8122 goto out_not_finished; 8123 } 8124 8125 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 8126 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8127 /* Mbox command <mbxCommand> cannot issue */ 8128 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8129 "(%d):2530 Mailbox command x%x " 8130 "cannot issue Data: x%x x%x\n", 8131 pmbox->vport ? pmbox->vport->vpi : 0, 8132 pmbox->u.mb.mbxCommand, 8133 psli->sli_flag, flag); 8134 goto out_not_finished; 8135 } 8136 8137 /* Another mailbox command is still being processed, queue this 8138 * command to be processed later. 8139 */ 8140 lpfc_mbox_put(phba, pmbox); 8141 8142 /* Mbox cmd issue - BUSY */ 8143 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8144 "(%d):0308 Mbox cmd issue - BUSY Data: " 8145 "x%x x%x x%x x%x\n", 8146 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 8147 mbx->mbxCommand, 8148 phba->pport ? phba->pport->port_state : 0xff, 8149 psli->sli_flag, flag); 8150 8151 psli->slistat.mbox_busy++; 8152 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8153 8154 if (pmbox->vport) { 8155 lpfc_debugfs_disc_trc(pmbox->vport, 8156 LPFC_DISC_TRC_MBOX_VPORT, 8157 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 8158 (uint32_t)mbx->mbxCommand, 8159 mbx->un.varWords[0], mbx->un.varWords[1]); 8160 } 8161 else { 8162 lpfc_debugfs_disc_trc(phba->pport, 8163 LPFC_DISC_TRC_MBOX, 8164 "MBOX Bsy: cmd:x%x mb:x%x x%x", 8165 (uint32_t)mbx->mbxCommand, 8166 mbx->un.varWords[0], mbx->un.varWords[1]); 8167 } 8168 8169 return MBX_BUSY; 8170 } 8171 8172 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8173 8174 /* If we are not polling, we MUST be in SLI2 mode */ 8175 if (flag != MBX_POLL) { 8176 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 8177 (mbx->mbxCommand != MBX_KILL_BOARD)) { 8178 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8179 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8180 /* Mbox command <mbxCommand> cannot issue */ 8181 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8182 "(%d):2531 Mailbox command x%x " 8183 "cannot issue Data: x%x x%x\n", 8184 pmbox->vport ? pmbox->vport->vpi : 0, 8185 pmbox->u.mb.mbxCommand, 8186 psli->sli_flag, flag); 8187 goto out_not_finished; 8188 } 8189 /* timeout active mbox command */ 8190 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8191 1000); 8192 mod_timer(&psli->mbox_tmo, jiffies + timeout); 8193 } 8194 8195 /* Mailbox cmd <cmd> issue */ 8196 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8197 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 8198 "x%x\n", 8199 pmbox->vport ? pmbox->vport->vpi : 0, 8200 mbx->mbxCommand, 8201 phba->pport ? phba->pport->port_state : 0xff, 8202 psli->sli_flag, flag); 8203 8204 if (mbx->mbxCommand != MBX_HEARTBEAT) { 8205 if (pmbox->vport) { 8206 lpfc_debugfs_disc_trc(pmbox->vport, 8207 LPFC_DISC_TRC_MBOX_VPORT, 8208 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8209 (uint32_t)mbx->mbxCommand, 8210 mbx->un.varWords[0], mbx->un.varWords[1]); 8211 } 8212 else { 8213 lpfc_debugfs_disc_trc(phba->pport, 8214 LPFC_DISC_TRC_MBOX, 8215 "MBOX Send: cmd:x%x mb:x%x x%x", 8216 (uint32_t)mbx->mbxCommand, 8217 mbx->un.varWords[0], mbx->un.varWords[1]); 8218 } 8219 } 8220 8221 psli->slistat.mbox_cmd++; 8222 evtctr = psli->slistat.mbox_event; 8223 8224 /* next set own bit for the adapter and copy over command word */ 8225 mbx->mbxOwner = OWN_CHIP; 8226 8227 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8228 /* Populate mbox extension offset word. */ 8229 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 8230 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8231 = (uint8_t *)phba->mbox_ext 8232 - (uint8_t *)phba->mbox; 8233 } 8234 8235 /* Copy the mailbox extension data */ 8236 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) { 8237 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf, 8238 (uint8_t *)phba->mbox_ext, 8239 pmbox->in_ext_byte_len); 8240 } 8241 /* Copy command data to host SLIM area */ 8242 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 8243 } else { 8244 /* Populate mbox extension offset word. */ 8245 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 8246 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8247 = MAILBOX_HBA_EXT_OFFSET; 8248 8249 /* Copy the mailbox extension data */ 8250 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) 8251 lpfc_memcpy_to_slim(phba->MBslimaddr + 8252 MAILBOX_HBA_EXT_OFFSET, 8253 pmbox->ctx_buf, pmbox->in_ext_byte_len); 8254 8255 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8256 /* copy command data into host mbox for cmpl */ 8257 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, 8258 MAILBOX_CMD_SIZE); 8259 8260 /* First copy mbox command data to HBA SLIM, skip past first 8261 word */ 8262 to_slim = phba->MBslimaddr + sizeof (uint32_t); 8263 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 8264 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 8265 8266 /* Next copy over first word, with mbxOwner set */ 8267 ldata = *((uint32_t *)mbx); 8268 to_slim = phba->MBslimaddr; 8269 writel(ldata, to_slim); 8270 readl(to_slim); /* flush */ 8271 8272 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8273 /* switch over to host mailbox */ 8274 psli->sli_flag |= LPFC_SLI_ACTIVE; 8275 } 8276 8277 wmb(); 8278 8279 switch (flag) { 8280 case MBX_NOWAIT: 8281 /* Set up reference to mailbox command */ 8282 psli->mbox_active = pmbox; 8283 /* Interrupt board to do it */ 8284 writel(CA_MBATT, phba->CAregaddr); 8285 readl(phba->CAregaddr); /* flush */ 8286 /* Don't wait for it to finish, just return */ 8287 break; 8288 8289 case MBX_POLL: 8290 /* Set up null reference to mailbox command */ 8291 psli->mbox_active = NULL; 8292 /* Interrupt board to do it */ 8293 writel(CA_MBATT, phba->CAregaddr); 8294 readl(phba->CAregaddr); /* flush */ 8295 8296 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8297 /* First read mbox status word */ 8298 word0 = *((uint32_t *)phba->mbox); 8299 word0 = le32_to_cpu(word0); 8300 } else { 8301 /* First read mbox status word */ 8302 if (lpfc_readl(phba->MBslimaddr, &word0)) { 8303 spin_unlock_irqrestore(&phba->hbalock, 8304 drvr_flag); 8305 goto out_not_finished; 8306 } 8307 } 8308 8309 /* Read the HBA Host Attention Register */ 8310 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8311 spin_unlock_irqrestore(&phba->hbalock, 8312 drvr_flag); 8313 goto out_not_finished; 8314 } 8315 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8316 1000) + jiffies; 8317 i = 0; 8318 /* Wait for command to complete */ 8319 while (((word0 & OWN_CHIP) == OWN_CHIP) || 8320 (!(ha_copy & HA_MBATT) && 8321 (phba->link_state > LPFC_WARM_START))) { 8322 if (time_after(jiffies, timeout)) { 8323 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8324 spin_unlock_irqrestore(&phba->hbalock, 8325 drvr_flag); 8326 goto out_not_finished; 8327 } 8328 8329 /* Check if we took a mbox interrupt while we were 8330 polling */ 8331 if (((word0 & OWN_CHIP) != OWN_CHIP) 8332 && (evtctr != psli->slistat.mbox_event)) 8333 break; 8334 8335 if (i++ > 10) { 8336 spin_unlock_irqrestore(&phba->hbalock, 8337 drvr_flag); 8338 msleep(1); 8339 spin_lock_irqsave(&phba->hbalock, drvr_flag); 8340 } 8341 8342 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8343 /* First copy command data */ 8344 word0 = *((uint32_t *)phba->mbox); 8345 word0 = le32_to_cpu(word0); 8346 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 8347 MAILBOX_t *slimmb; 8348 uint32_t slimword0; 8349 /* Check real SLIM for any errors */ 8350 slimword0 = readl(phba->MBslimaddr); 8351 slimmb = (MAILBOX_t *) & slimword0; 8352 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 8353 && slimmb->mbxStatus) { 8354 psli->sli_flag &= 8355 ~LPFC_SLI_ACTIVE; 8356 word0 = slimword0; 8357 } 8358 } 8359 } else { 8360 /* First copy command data */ 8361 word0 = readl(phba->MBslimaddr); 8362 } 8363 /* Read the HBA Host Attention Register */ 8364 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8365 spin_unlock_irqrestore(&phba->hbalock, 8366 drvr_flag); 8367 goto out_not_finished; 8368 } 8369 } 8370 8371 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8372 /* copy results back to user */ 8373 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, 8374 MAILBOX_CMD_SIZE); 8375 /* Copy the mailbox extension data */ 8376 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { 8377 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 8378 pmbox->ctx_buf, 8379 pmbox->out_ext_byte_len); 8380 } 8381 } else { 8382 /* First copy command data */ 8383 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 8384 MAILBOX_CMD_SIZE); 8385 /* Copy the mailbox extension data */ 8386 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { 8387 lpfc_memcpy_from_slim( 8388 pmbox->ctx_buf, 8389 phba->MBslimaddr + 8390 MAILBOX_HBA_EXT_OFFSET, 8391 pmbox->out_ext_byte_len); 8392 } 8393 } 8394 8395 writel(HA_MBATT, phba->HAregaddr); 8396 readl(phba->HAregaddr); /* flush */ 8397 8398 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8399 status = mbx->mbxStatus; 8400 } 8401 8402 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8403 return status; 8404 8405 out_not_finished: 8406 if (processing_queue) { 8407 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 8408 lpfc_mbox_cmpl_put(phba, pmbox); 8409 } 8410 return MBX_NOT_FINISHED; 8411 } 8412 8413 /** 8414 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 8415 * @phba: Pointer to HBA context object. 8416 * 8417 * The function blocks the posting of SLI4 asynchronous mailbox commands from 8418 * the driver internal pending mailbox queue. It will then try to wait out the 8419 * possible outstanding mailbox command before return. 8420 * 8421 * Returns: 8422 * 0 - the outstanding mailbox command completed; otherwise, the wait for 8423 * the outstanding mailbox command timed out. 8424 **/ 8425 static int 8426 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 8427 { 8428 struct lpfc_sli *psli = &phba->sli; 8429 int rc = 0; 8430 unsigned long timeout = 0; 8431 8432 /* Mark the asynchronous mailbox command posting as blocked */ 8433 spin_lock_irq(&phba->hbalock); 8434 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 8435 /* Determine how long we might wait for the active mailbox 8436 * command to be gracefully completed by firmware. 8437 */ 8438 if (phba->sli.mbox_active) 8439 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 8440 phba->sli.mbox_active) * 8441 1000) + jiffies; 8442 spin_unlock_irq(&phba->hbalock); 8443 8444 /* Make sure the mailbox is really active */ 8445 if (timeout) 8446 lpfc_sli4_process_missed_mbox_completions(phba); 8447 8448 /* Wait for the outstnading mailbox command to complete */ 8449 while (phba->sli.mbox_active) { 8450 /* Check active mailbox complete status every 2ms */ 8451 msleep(2); 8452 if (time_after(jiffies, timeout)) { 8453 /* Timeout, marked the outstanding cmd not complete */ 8454 rc = 1; 8455 break; 8456 } 8457 } 8458 8459 /* Can not cleanly block async mailbox command, fails it */ 8460 if (rc) { 8461 spin_lock_irq(&phba->hbalock); 8462 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8463 spin_unlock_irq(&phba->hbalock); 8464 } 8465 return rc; 8466 } 8467 8468 /** 8469 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 8470 * @phba: Pointer to HBA context object. 8471 * 8472 * The function unblocks and resume posting of SLI4 asynchronous mailbox 8473 * commands from the driver internal pending mailbox queue. It makes sure 8474 * that there is no outstanding mailbox command before resuming posting 8475 * asynchronous mailbox commands. If, for any reason, there is outstanding 8476 * mailbox command, it will try to wait it out before resuming asynchronous 8477 * mailbox command posting. 8478 **/ 8479 static void 8480 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 8481 { 8482 struct lpfc_sli *psli = &phba->sli; 8483 8484 spin_lock_irq(&phba->hbalock); 8485 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8486 /* Asynchronous mailbox posting is not blocked, do nothing */ 8487 spin_unlock_irq(&phba->hbalock); 8488 return; 8489 } 8490 8491 /* Outstanding synchronous mailbox command is guaranteed to be done, 8492 * successful or timeout, after timing-out the outstanding mailbox 8493 * command shall always be removed, so just unblock posting async 8494 * mailbox command and resume 8495 */ 8496 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8497 spin_unlock_irq(&phba->hbalock); 8498 8499 /* wake up worker thread to post asynchronlous mailbox command */ 8500 lpfc_worker_wake_up(phba); 8501 } 8502 8503 /** 8504 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 8505 * @phba: Pointer to HBA context object. 8506 * @mboxq: Pointer to mailbox object. 8507 * 8508 * The function waits for the bootstrap mailbox register ready bit from 8509 * port for twice the regular mailbox command timeout value. 8510 * 8511 * 0 - no timeout on waiting for bootstrap mailbox register ready. 8512 * MBXERR_ERROR - wait for bootstrap mailbox register timed out. 8513 **/ 8514 static int 8515 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8516 { 8517 uint32_t db_ready; 8518 unsigned long timeout; 8519 struct lpfc_register bmbx_reg; 8520 8521 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 8522 * 1000) + jiffies; 8523 8524 do { 8525 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 8526 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 8527 if (!db_ready) 8528 mdelay(2); 8529 8530 if (time_after(jiffies, timeout)) 8531 return MBXERR_ERROR; 8532 } while (!db_ready); 8533 8534 return 0; 8535 } 8536 8537 /** 8538 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 8539 * @phba: Pointer to HBA context object. 8540 * @mboxq: Pointer to mailbox object. 8541 * 8542 * The function posts a mailbox to the port. The mailbox is expected 8543 * to be comletely filled in and ready for the port to operate on it. 8544 * This routine executes a synchronous completion operation on the 8545 * mailbox by polling for its completion. 8546 * 8547 * The caller must not be holding any locks when calling this routine. 8548 * 8549 * Returns: 8550 * MBX_SUCCESS - mailbox posted successfully 8551 * Any of the MBX error values. 8552 **/ 8553 static int 8554 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8555 { 8556 int rc = MBX_SUCCESS; 8557 unsigned long iflag; 8558 uint32_t mcqe_status; 8559 uint32_t mbx_cmnd; 8560 struct lpfc_sli *psli = &phba->sli; 8561 struct lpfc_mqe *mb = &mboxq->u.mqe; 8562 struct lpfc_bmbx_create *mbox_rgn; 8563 struct dma_address *dma_address; 8564 8565 /* 8566 * Only one mailbox can be active to the bootstrap mailbox region 8567 * at a time and there is no queueing provided. 8568 */ 8569 spin_lock_irqsave(&phba->hbalock, iflag); 8570 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8571 spin_unlock_irqrestore(&phba->hbalock, iflag); 8572 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8573 "(%d):2532 Mailbox command x%x (x%x/x%x) " 8574 "cannot issue Data: x%x x%x\n", 8575 mboxq->vport ? mboxq->vport->vpi : 0, 8576 mboxq->u.mb.mbxCommand, 8577 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8578 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8579 psli->sli_flag, MBX_POLL); 8580 return MBXERR_ERROR; 8581 } 8582 /* The server grabs the token and owns it until release */ 8583 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8584 phba->sli.mbox_active = mboxq; 8585 spin_unlock_irqrestore(&phba->hbalock, iflag); 8586 8587 /* wait for bootstrap mbox register for readyness */ 8588 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8589 if (rc) 8590 goto exit; 8591 /* 8592 * Initialize the bootstrap memory region to avoid stale data areas 8593 * in the mailbox post. Then copy the caller's mailbox contents to 8594 * the bmbx mailbox region. 8595 */ 8596 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 8597 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 8598 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 8599 sizeof(struct lpfc_mqe)); 8600 8601 /* Post the high mailbox dma address to the port and wait for ready. */ 8602 dma_address = &phba->sli4_hba.bmbx.dma_address; 8603 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 8604 8605 /* wait for bootstrap mbox register for hi-address write done */ 8606 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8607 if (rc) 8608 goto exit; 8609 8610 /* Post the low mailbox dma address to the port. */ 8611 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 8612 8613 /* wait for bootstrap mbox register for low address write done */ 8614 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8615 if (rc) 8616 goto exit; 8617 8618 /* 8619 * Read the CQ to ensure the mailbox has completed. 8620 * If so, update the mailbox status so that the upper layers 8621 * can complete the request normally. 8622 */ 8623 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 8624 sizeof(struct lpfc_mqe)); 8625 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 8626 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 8627 sizeof(struct lpfc_mcqe)); 8628 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 8629 /* 8630 * When the CQE status indicates a failure and the mailbox status 8631 * indicates success then copy the CQE status into the mailbox status 8632 * (and prefix it with x4000). 8633 */ 8634 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 8635 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 8636 bf_set(lpfc_mqe_status, mb, 8637 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 8638 rc = MBXERR_ERROR; 8639 } else 8640 lpfc_sli4_swap_str(phba, mboxq); 8641 8642 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8643 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 8644 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 8645 " x%x x%x CQ: x%x x%x x%x x%x\n", 8646 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8647 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8648 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8649 bf_get(lpfc_mqe_status, mb), 8650 mb->un.mb_words[0], mb->un.mb_words[1], 8651 mb->un.mb_words[2], mb->un.mb_words[3], 8652 mb->un.mb_words[4], mb->un.mb_words[5], 8653 mb->un.mb_words[6], mb->un.mb_words[7], 8654 mb->un.mb_words[8], mb->un.mb_words[9], 8655 mb->un.mb_words[10], mb->un.mb_words[11], 8656 mb->un.mb_words[12], mboxq->mcqe.word0, 8657 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 8658 mboxq->mcqe.trailer); 8659 exit: 8660 /* We are holding the token, no needed for lock when release */ 8661 spin_lock_irqsave(&phba->hbalock, iflag); 8662 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8663 phba->sli.mbox_active = NULL; 8664 spin_unlock_irqrestore(&phba->hbalock, iflag); 8665 return rc; 8666 } 8667 8668 /** 8669 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 8670 * @phba: Pointer to HBA context object. 8671 * @pmbox: Pointer to mailbox object. 8672 * @flag: Flag indicating how the mailbox need to be processed. 8673 * 8674 * This function is called by discovery code and HBA management code to submit 8675 * a mailbox command to firmware with SLI-4 interface spec. 8676 * 8677 * Return codes the caller owns the mailbox command after the return of the 8678 * function. 8679 **/ 8680 static int 8681 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 8682 uint32_t flag) 8683 { 8684 struct lpfc_sli *psli = &phba->sli; 8685 unsigned long iflags; 8686 int rc; 8687 8688 /* dump from issue mailbox command if setup */ 8689 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 8690 8691 rc = lpfc_mbox_dev_check(phba); 8692 if (unlikely(rc)) { 8693 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8694 "(%d):2544 Mailbox command x%x (x%x/x%x) " 8695 "cannot issue Data: x%x x%x\n", 8696 mboxq->vport ? mboxq->vport->vpi : 0, 8697 mboxq->u.mb.mbxCommand, 8698 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8699 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8700 psli->sli_flag, flag); 8701 goto out_not_finished; 8702 } 8703 8704 /* Detect polling mode and jump to a handler */ 8705 if (!phba->sli4_hba.intr_enable) { 8706 if (flag == MBX_POLL) 8707 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8708 else 8709 rc = -EIO; 8710 if (rc != MBX_SUCCESS) 8711 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8712 "(%d):2541 Mailbox command x%x " 8713 "(x%x/x%x) failure: " 8714 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8715 "Data: x%x x%x\n,", 8716 mboxq->vport ? mboxq->vport->vpi : 0, 8717 mboxq->u.mb.mbxCommand, 8718 lpfc_sli_config_mbox_subsys_get(phba, 8719 mboxq), 8720 lpfc_sli_config_mbox_opcode_get(phba, 8721 mboxq), 8722 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8723 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8724 bf_get(lpfc_mcqe_ext_status, 8725 &mboxq->mcqe), 8726 psli->sli_flag, flag); 8727 return rc; 8728 } else if (flag == MBX_POLL) { 8729 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8730 "(%d):2542 Try to issue mailbox command " 8731 "x%x (x%x/x%x) synchronously ahead of async " 8732 "mailbox command queue: x%x x%x\n", 8733 mboxq->vport ? mboxq->vport->vpi : 0, 8734 mboxq->u.mb.mbxCommand, 8735 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8736 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8737 psli->sli_flag, flag); 8738 /* Try to block the asynchronous mailbox posting */ 8739 rc = lpfc_sli4_async_mbox_block(phba); 8740 if (!rc) { 8741 /* Successfully blocked, now issue sync mbox cmd */ 8742 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8743 if (rc != MBX_SUCCESS) 8744 lpfc_printf_log(phba, KERN_WARNING, 8745 LOG_MBOX | LOG_SLI, 8746 "(%d):2597 Sync Mailbox command " 8747 "x%x (x%x/x%x) failure: " 8748 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8749 "Data: x%x x%x\n,", 8750 mboxq->vport ? mboxq->vport->vpi : 0, 8751 mboxq->u.mb.mbxCommand, 8752 lpfc_sli_config_mbox_subsys_get(phba, 8753 mboxq), 8754 lpfc_sli_config_mbox_opcode_get(phba, 8755 mboxq), 8756 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8757 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8758 bf_get(lpfc_mcqe_ext_status, 8759 &mboxq->mcqe), 8760 psli->sli_flag, flag); 8761 /* Unblock the async mailbox posting afterward */ 8762 lpfc_sli4_async_mbox_unblock(phba); 8763 } 8764 return rc; 8765 } 8766 8767 /* Now, interrupt mode asynchrous mailbox command */ 8768 rc = lpfc_mbox_cmd_check(phba, mboxq); 8769 if (rc) { 8770 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8771 "(%d):2543 Mailbox command x%x (x%x/x%x) " 8772 "cannot issue Data: x%x x%x\n", 8773 mboxq->vport ? mboxq->vport->vpi : 0, 8774 mboxq->u.mb.mbxCommand, 8775 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8776 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8777 psli->sli_flag, flag); 8778 goto out_not_finished; 8779 } 8780 8781 /* Put the mailbox command to the driver internal FIFO */ 8782 psli->slistat.mbox_busy++; 8783 spin_lock_irqsave(&phba->hbalock, iflags); 8784 lpfc_mbox_put(phba, mboxq); 8785 spin_unlock_irqrestore(&phba->hbalock, iflags); 8786 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8787 "(%d):0354 Mbox cmd issue - Enqueue Data: " 8788 "x%x (x%x/x%x) x%x x%x x%x\n", 8789 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 8790 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8791 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8792 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8793 phba->pport->port_state, 8794 psli->sli_flag, MBX_NOWAIT); 8795 /* Wake up worker thread to transport mailbox command from head */ 8796 lpfc_worker_wake_up(phba); 8797 8798 return MBX_BUSY; 8799 8800 out_not_finished: 8801 return MBX_NOT_FINISHED; 8802 } 8803 8804 /** 8805 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 8806 * @phba: Pointer to HBA context object. 8807 * 8808 * This function is called by worker thread to send a mailbox command to 8809 * SLI4 HBA firmware. 8810 * 8811 **/ 8812 int 8813 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 8814 { 8815 struct lpfc_sli *psli = &phba->sli; 8816 LPFC_MBOXQ_t *mboxq; 8817 int rc = MBX_SUCCESS; 8818 unsigned long iflags; 8819 struct lpfc_mqe *mqe; 8820 uint32_t mbx_cmnd; 8821 8822 /* Check interrupt mode before post async mailbox command */ 8823 if (unlikely(!phba->sli4_hba.intr_enable)) 8824 return MBX_NOT_FINISHED; 8825 8826 /* Check for mailbox command service token */ 8827 spin_lock_irqsave(&phba->hbalock, iflags); 8828 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8829 spin_unlock_irqrestore(&phba->hbalock, iflags); 8830 return MBX_NOT_FINISHED; 8831 } 8832 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8833 spin_unlock_irqrestore(&phba->hbalock, iflags); 8834 return MBX_NOT_FINISHED; 8835 } 8836 if (unlikely(phba->sli.mbox_active)) { 8837 spin_unlock_irqrestore(&phba->hbalock, iflags); 8838 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8839 "0384 There is pending active mailbox cmd\n"); 8840 return MBX_NOT_FINISHED; 8841 } 8842 /* Take the mailbox command service token */ 8843 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8844 8845 /* Get the next mailbox command from head of queue */ 8846 mboxq = lpfc_mbox_get(phba); 8847 8848 /* If no more mailbox command waiting for post, we're done */ 8849 if (!mboxq) { 8850 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8851 spin_unlock_irqrestore(&phba->hbalock, iflags); 8852 return MBX_SUCCESS; 8853 } 8854 phba->sli.mbox_active = mboxq; 8855 spin_unlock_irqrestore(&phba->hbalock, iflags); 8856 8857 /* Check device readiness for posting mailbox command */ 8858 rc = lpfc_mbox_dev_check(phba); 8859 if (unlikely(rc)) 8860 /* Driver clean routine will clean up pending mailbox */ 8861 goto out_not_finished; 8862 8863 /* Prepare the mbox command to be posted */ 8864 mqe = &mboxq->u.mqe; 8865 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 8866 8867 /* Start timer for the mbox_tmo and log some mailbox post messages */ 8868 mod_timer(&psli->mbox_tmo, (jiffies + 8869 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 8870 8871 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8872 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 8873 "x%x x%x\n", 8874 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8875 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8876 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8877 phba->pport->port_state, psli->sli_flag); 8878 8879 if (mbx_cmnd != MBX_HEARTBEAT) { 8880 if (mboxq->vport) { 8881 lpfc_debugfs_disc_trc(mboxq->vport, 8882 LPFC_DISC_TRC_MBOX_VPORT, 8883 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8884 mbx_cmnd, mqe->un.mb_words[0], 8885 mqe->un.mb_words[1]); 8886 } else { 8887 lpfc_debugfs_disc_trc(phba->pport, 8888 LPFC_DISC_TRC_MBOX, 8889 "MBOX Send: cmd:x%x mb:x%x x%x", 8890 mbx_cmnd, mqe->un.mb_words[0], 8891 mqe->un.mb_words[1]); 8892 } 8893 } 8894 psli->slistat.mbox_cmd++; 8895 8896 /* Post the mailbox command to the port */ 8897 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 8898 if (rc != MBX_SUCCESS) { 8899 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8900 "(%d):2533 Mailbox command x%x (x%x/x%x) " 8901 "cannot issue Data: x%x x%x\n", 8902 mboxq->vport ? mboxq->vport->vpi : 0, 8903 mboxq->u.mb.mbxCommand, 8904 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8905 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8906 psli->sli_flag, MBX_NOWAIT); 8907 goto out_not_finished; 8908 } 8909 8910 return rc; 8911 8912 out_not_finished: 8913 spin_lock_irqsave(&phba->hbalock, iflags); 8914 if (phba->sli.mbox_active) { 8915 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8916 __lpfc_mbox_cmpl_put(phba, mboxq); 8917 /* Release the token */ 8918 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8919 phba->sli.mbox_active = NULL; 8920 } 8921 spin_unlock_irqrestore(&phba->hbalock, iflags); 8922 8923 return MBX_NOT_FINISHED; 8924 } 8925 8926 /** 8927 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 8928 * @phba: Pointer to HBA context object. 8929 * @pmbox: Pointer to mailbox object. 8930 * @flag: Flag indicating how the mailbox need to be processed. 8931 * 8932 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 8933 * the API jump table function pointer from the lpfc_hba struct. 8934 * 8935 * Return codes the caller owns the mailbox command after the return of the 8936 * function. 8937 **/ 8938 int 8939 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 8940 { 8941 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 8942 } 8943 8944 /** 8945 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 8946 * @phba: The hba struct for which this call is being executed. 8947 * @dev_grp: The HBA PCI-Device group number. 8948 * 8949 * This routine sets up the mbox interface API function jump table in @phba 8950 * struct. 8951 * Returns: 0 - success, -ENODEV - failure. 8952 **/ 8953 int 8954 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8955 { 8956 8957 switch (dev_grp) { 8958 case LPFC_PCI_DEV_LP: 8959 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 8960 phba->lpfc_sli_handle_slow_ring_event = 8961 lpfc_sli_handle_slow_ring_event_s3; 8962 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 8963 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 8964 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 8965 break; 8966 case LPFC_PCI_DEV_OC: 8967 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 8968 phba->lpfc_sli_handle_slow_ring_event = 8969 lpfc_sli_handle_slow_ring_event_s4; 8970 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 8971 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 8972 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 8973 break; 8974 default: 8975 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8976 "1420 Invalid HBA PCI-device group: 0x%x\n", 8977 dev_grp); 8978 return -ENODEV; 8979 break; 8980 } 8981 return 0; 8982 } 8983 8984 /** 8985 * __lpfc_sli_ringtx_put - Add an iocb to the txq 8986 * @phba: Pointer to HBA context object. 8987 * @pring: Pointer to driver SLI ring object. 8988 * @piocb: Pointer to address of newly added command iocb. 8989 * 8990 * This function is called with hbalock held to add a command 8991 * iocb to the txq when SLI layer cannot submit the command iocb 8992 * to the ring. 8993 **/ 8994 void 8995 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8996 struct lpfc_iocbq *piocb) 8997 { 8998 lockdep_assert_held(&phba->hbalock); 8999 /* Insert the caller's iocb in the txq tail for later processing. */ 9000 list_add_tail(&piocb->list, &pring->txq); 9001 } 9002 9003 /** 9004 * lpfc_sli_next_iocb - Get the next iocb in the txq 9005 * @phba: Pointer to HBA context object. 9006 * @pring: Pointer to driver SLI ring object. 9007 * @piocb: Pointer to address of newly added command iocb. 9008 * 9009 * This function is called with hbalock held before a new 9010 * iocb is submitted to the firmware. This function checks 9011 * txq to flush the iocbs in txq to Firmware before 9012 * submitting new iocbs to the Firmware. 9013 * If there are iocbs in the txq which need to be submitted 9014 * to firmware, lpfc_sli_next_iocb returns the first element 9015 * of the txq after dequeuing it from txq. 9016 * If there is no iocb in the txq then the function will return 9017 * *piocb and *piocb is set to NULL. Caller needs to check 9018 * *piocb to find if there are more commands in the txq. 9019 **/ 9020 static struct lpfc_iocbq * 9021 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9022 struct lpfc_iocbq **piocb) 9023 { 9024 struct lpfc_iocbq * nextiocb; 9025 9026 lockdep_assert_held(&phba->hbalock); 9027 9028 nextiocb = lpfc_sli_ringtx_get(phba, pring); 9029 if (!nextiocb) { 9030 nextiocb = *piocb; 9031 *piocb = NULL; 9032 } 9033 9034 return nextiocb; 9035 } 9036 9037 /** 9038 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 9039 * @phba: Pointer to HBA context object. 9040 * @ring_number: SLI ring number to issue iocb on. 9041 * @piocb: Pointer to command iocb. 9042 * @flag: Flag indicating if this command can be put into txq. 9043 * 9044 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 9045 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 9046 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 9047 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 9048 * this function allows only iocbs for posting buffers. This function finds 9049 * next available slot in the command ring and posts the command to the 9050 * available slot and writes the port attention register to request HBA start 9051 * processing new iocb. If there is no slot available in the ring and 9052 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 9053 * the function returns IOCB_BUSY. 9054 * 9055 * This function is called with hbalock held. The function will return success 9056 * after it successfully submit the iocb to firmware or after adding to the 9057 * txq. 9058 **/ 9059 static int 9060 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 9061 struct lpfc_iocbq *piocb, uint32_t flag) 9062 { 9063 struct lpfc_iocbq *nextiocb; 9064 IOCB_t *iocb; 9065 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; 9066 9067 lockdep_assert_held(&phba->hbalock); 9068 9069 if (piocb->iocb_cmpl && (!piocb->vport) && 9070 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 9071 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 9072 lpfc_printf_log(phba, KERN_ERR, 9073 LOG_SLI | LOG_VPORT, 9074 "1807 IOCB x%x failed. No vport\n", 9075 piocb->iocb.ulpCommand); 9076 dump_stack(); 9077 return IOCB_ERROR; 9078 } 9079 9080 9081 /* If the PCI channel is in offline state, do not post iocbs. */ 9082 if (unlikely(pci_channel_offline(phba->pcidev))) 9083 return IOCB_ERROR; 9084 9085 /* If HBA has a deferred error attention, fail the iocb. */ 9086 if (unlikely(phba->hba_flag & DEFER_ERATT)) 9087 return IOCB_ERROR; 9088 9089 /* 9090 * We should never get an IOCB if we are in a < LINK_DOWN state 9091 */ 9092 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 9093 return IOCB_ERROR; 9094 9095 /* 9096 * Check to see if we are blocking IOCB processing because of a 9097 * outstanding event. 9098 */ 9099 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 9100 goto iocb_busy; 9101 9102 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 9103 /* 9104 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 9105 * can be issued if the link is not up. 9106 */ 9107 switch (piocb->iocb.ulpCommand) { 9108 case CMD_GEN_REQUEST64_CR: 9109 case CMD_GEN_REQUEST64_CX: 9110 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 9111 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 9112 FC_RCTL_DD_UNSOL_CMD) || 9113 (piocb->iocb.un.genreq64.w5.hcsw.Type != 9114 MENLO_TRANSPORT_TYPE)) 9115 9116 goto iocb_busy; 9117 break; 9118 case CMD_QUE_RING_BUF_CN: 9119 case CMD_QUE_RING_BUF64_CN: 9120 /* 9121 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 9122 * completion, iocb_cmpl MUST be 0. 9123 */ 9124 if (piocb->iocb_cmpl) 9125 piocb->iocb_cmpl = NULL; 9126 /*FALLTHROUGH*/ 9127 case CMD_CREATE_XRI_CR: 9128 case CMD_CLOSE_XRI_CN: 9129 case CMD_CLOSE_XRI_CX: 9130 break; 9131 default: 9132 goto iocb_busy; 9133 } 9134 9135 /* 9136 * For FCP commands, we must be in a state where we can process link 9137 * attention events. 9138 */ 9139 } else if (unlikely(pring->ringno == LPFC_FCP_RING && 9140 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 9141 goto iocb_busy; 9142 } 9143 9144 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 9145 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 9146 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 9147 9148 if (iocb) 9149 lpfc_sli_update_ring(phba, pring); 9150 else 9151 lpfc_sli_update_full_ring(phba, pring); 9152 9153 if (!piocb) 9154 return IOCB_SUCCESS; 9155 9156 goto out_busy; 9157 9158 iocb_busy: 9159 pring->stats.iocb_cmd_delay++; 9160 9161 out_busy: 9162 9163 if (!(flag & SLI_IOCB_RET_IOCB)) { 9164 __lpfc_sli_ringtx_put(phba, pring, piocb); 9165 return IOCB_SUCCESS; 9166 } 9167 9168 return IOCB_BUSY; 9169 } 9170 9171 /** 9172 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 9173 * @phba: Pointer to HBA context object. 9174 * @piocb: Pointer to command iocb. 9175 * @sglq: Pointer to the scatter gather queue object. 9176 * 9177 * This routine converts the bpl or bde that is in the IOCB 9178 * to a sgl list for the sli4 hardware. The physical address 9179 * of the bpl/bde is converted back to a virtual address. 9180 * If the IOCB contains a BPL then the list of BDE's is 9181 * converted to sli4_sge's. If the IOCB contains a single 9182 * BDE then it is converted to a single sli_sge. 9183 * The IOCB is still in cpu endianess so the contents of 9184 * the bpl can be used without byte swapping. 9185 * 9186 * Returns valid XRI = Success, NO_XRI = Failure. 9187 **/ 9188 static uint16_t 9189 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 9190 struct lpfc_sglq *sglq) 9191 { 9192 uint16_t xritag = NO_XRI; 9193 struct ulp_bde64 *bpl = NULL; 9194 struct ulp_bde64 bde; 9195 struct sli4_sge *sgl = NULL; 9196 struct lpfc_dmabuf *dmabuf; 9197 IOCB_t *icmd; 9198 int numBdes = 0; 9199 int i = 0; 9200 uint32_t offset = 0; /* accumulated offset in the sg request list */ 9201 int inbound = 0; /* number of sg reply entries inbound from firmware */ 9202 9203 if (!piocbq || !sglq) 9204 return xritag; 9205 9206 sgl = (struct sli4_sge *)sglq->sgl; 9207 icmd = &piocbq->iocb; 9208 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 9209 return sglq->sli4_xritag; 9210 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9211 numBdes = icmd->un.genreq64.bdl.bdeSize / 9212 sizeof(struct ulp_bde64); 9213 /* The addrHigh and addrLow fields within the IOCB 9214 * have not been byteswapped yet so there is no 9215 * need to swap them back. 9216 */ 9217 if (piocbq->context3) 9218 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 9219 else 9220 return xritag; 9221 9222 bpl = (struct ulp_bde64 *)dmabuf->virt; 9223 if (!bpl) 9224 return xritag; 9225 9226 for (i = 0; i < numBdes; i++) { 9227 /* Should already be byte swapped. */ 9228 sgl->addr_hi = bpl->addrHigh; 9229 sgl->addr_lo = bpl->addrLow; 9230 9231 sgl->word2 = le32_to_cpu(sgl->word2); 9232 if ((i+1) == numBdes) 9233 bf_set(lpfc_sli4_sge_last, sgl, 1); 9234 else 9235 bf_set(lpfc_sli4_sge_last, sgl, 0); 9236 /* swap the size field back to the cpu so we 9237 * can assign it to the sgl. 9238 */ 9239 bde.tus.w = le32_to_cpu(bpl->tus.w); 9240 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 9241 /* The offsets in the sgl need to be accumulated 9242 * separately for the request and reply lists. 9243 * The request is always first, the reply follows. 9244 */ 9245 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 9246 /* add up the reply sg entries */ 9247 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 9248 inbound++; 9249 /* first inbound? reset the offset */ 9250 if (inbound == 1) 9251 offset = 0; 9252 bf_set(lpfc_sli4_sge_offset, sgl, offset); 9253 bf_set(lpfc_sli4_sge_type, sgl, 9254 LPFC_SGE_TYPE_DATA); 9255 offset += bde.tus.f.bdeSize; 9256 } 9257 sgl->word2 = cpu_to_le32(sgl->word2); 9258 bpl++; 9259 sgl++; 9260 } 9261 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 9262 /* The addrHigh and addrLow fields of the BDE have not 9263 * been byteswapped yet so they need to be swapped 9264 * before putting them in the sgl. 9265 */ 9266 sgl->addr_hi = 9267 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 9268 sgl->addr_lo = 9269 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 9270 sgl->word2 = le32_to_cpu(sgl->word2); 9271 bf_set(lpfc_sli4_sge_last, sgl, 1); 9272 sgl->word2 = cpu_to_le32(sgl->word2); 9273 sgl->sge_len = 9274 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 9275 } 9276 return sglq->sli4_xritag; 9277 } 9278 9279 /** 9280 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 9281 * @phba: Pointer to HBA context object. 9282 * @piocb: Pointer to command iocb. 9283 * @wqe: Pointer to the work queue entry. 9284 * 9285 * This routine converts the iocb command to its Work Queue Entry 9286 * equivalent. The wqe pointer should not have any fields set when 9287 * this routine is called because it will memcpy over them. 9288 * This routine does not set the CQ_ID or the WQEC bits in the 9289 * wqe. 9290 * 9291 * Returns: 0 = Success, IOCB_ERROR = Failure. 9292 **/ 9293 static int 9294 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 9295 union lpfc_wqe128 *wqe) 9296 { 9297 uint32_t xmit_len = 0, total_len = 0; 9298 uint8_t ct = 0; 9299 uint32_t fip; 9300 uint32_t abort_tag; 9301 uint8_t command_type = ELS_COMMAND_NON_FIP; 9302 uint8_t cmnd; 9303 uint16_t xritag; 9304 uint16_t abrt_iotag; 9305 struct lpfc_iocbq *abrtiocbq; 9306 struct ulp_bde64 *bpl = NULL; 9307 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 9308 int numBdes, i; 9309 struct ulp_bde64 bde; 9310 struct lpfc_nodelist *ndlp; 9311 uint32_t *pcmd; 9312 uint32_t if_type; 9313 9314 fip = phba->hba_flag & HBA_FIP_SUPPORT; 9315 /* The fcp commands will set command type */ 9316 if (iocbq->iocb_flag & LPFC_IO_FCP) 9317 command_type = FCP_COMMAND; 9318 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 9319 command_type = ELS_COMMAND_FIP; 9320 else 9321 command_type = ELS_COMMAND_NON_FIP; 9322 9323 if (phba->fcp_embed_io) 9324 memset(wqe, 0, sizeof(union lpfc_wqe128)); 9325 /* Some of the fields are in the right position already */ 9326 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 9327 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) { 9328 /* The ct field has moved so reset */ 9329 wqe->generic.wqe_com.word7 = 0; 9330 wqe->generic.wqe_com.word10 = 0; 9331 } 9332 9333 abort_tag = (uint32_t) iocbq->iotag; 9334 xritag = iocbq->sli4_xritag; 9335 /* words0-2 bpl convert bde */ 9336 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9337 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9338 sizeof(struct ulp_bde64); 9339 bpl = (struct ulp_bde64 *) 9340 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 9341 if (!bpl) 9342 return IOCB_ERROR; 9343 9344 /* Should already be byte swapped. */ 9345 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 9346 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 9347 /* swap the size field back to the cpu so we 9348 * can assign it to the sgl. 9349 */ 9350 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 9351 xmit_len = wqe->generic.bde.tus.f.bdeSize; 9352 total_len = 0; 9353 for (i = 0; i < numBdes; i++) { 9354 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9355 total_len += bde.tus.f.bdeSize; 9356 } 9357 } else 9358 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 9359 9360 iocbq->iocb.ulpIoTag = iocbq->iotag; 9361 cmnd = iocbq->iocb.ulpCommand; 9362 9363 switch (iocbq->iocb.ulpCommand) { 9364 case CMD_ELS_REQUEST64_CR: 9365 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 9366 ndlp = iocbq->context_un.ndlp; 9367 else 9368 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9369 if (!iocbq->iocb.ulpLe) { 9370 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9371 "2007 Only Limited Edition cmd Format" 9372 " supported 0x%x\n", 9373 iocbq->iocb.ulpCommand); 9374 return IOCB_ERROR; 9375 } 9376 9377 wqe->els_req.payload_len = xmit_len; 9378 /* Els_reguest64 has a TMO */ 9379 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 9380 iocbq->iocb.ulpTimeout); 9381 /* Need a VF for word 4 set the vf bit*/ 9382 bf_set(els_req64_vf, &wqe->els_req, 0); 9383 /* And a VFID for word 12 */ 9384 bf_set(els_req64_vfid, &wqe->els_req, 0); 9385 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9386 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9387 iocbq->iocb.ulpContext); 9388 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 9389 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 9390 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 9391 if (command_type == ELS_COMMAND_FIP) 9392 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 9393 >> LPFC_FIP_ELS_ID_SHIFT); 9394 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9395 iocbq->context2)->virt); 9396 if_type = bf_get(lpfc_sli_intf_if_type, 9397 &phba->sli4_hba.sli_intf); 9398 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9399 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 9400 *pcmd == ELS_CMD_SCR || 9401 *pcmd == ELS_CMD_FDISC || 9402 *pcmd == ELS_CMD_LOGO || 9403 *pcmd == ELS_CMD_PLOGI)) { 9404 bf_set(els_req64_sp, &wqe->els_req, 1); 9405 bf_set(els_req64_sid, &wqe->els_req, 9406 iocbq->vport->fc_myDID); 9407 if ((*pcmd == ELS_CMD_FLOGI) && 9408 !(phba->fc_topology == 9409 LPFC_TOPOLOGY_LOOP)) 9410 bf_set(els_req64_sid, &wqe->els_req, 0); 9411 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 9412 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9413 phba->vpi_ids[iocbq->vport->vpi]); 9414 } else if (pcmd && iocbq->context1) { 9415 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 9416 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9417 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9418 } 9419 } 9420 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 9421 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9422 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 9423 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 9424 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 9425 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 9426 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9427 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 9428 wqe->els_req.max_response_payload_len = total_len - xmit_len; 9429 break; 9430 case CMD_XMIT_SEQUENCE64_CX: 9431 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 9432 iocbq->iocb.un.ulpWord[3]); 9433 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 9434 iocbq->iocb.unsli3.rcvsli3.ox_id); 9435 /* The entire sequence is transmitted for this IOCB */ 9436 xmit_len = total_len; 9437 cmnd = CMD_XMIT_SEQUENCE64_CR; 9438 if (phba->link_flag & LS_LOOPBACK_MODE) 9439 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 9440 /* fall through */ 9441 case CMD_XMIT_SEQUENCE64_CR: 9442 /* word3 iocb=io_tag32 wqe=reserved */ 9443 wqe->xmit_sequence.rsvd3 = 0; 9444 /* word4 relative_offset memcpy */ 9445 /* word5 r_ctl/df_ctl memcpy */ 9446 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 9447 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 9448 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 9449 LPFC_WQE_IOD_WRITE); 9450 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 9451 LPFC_WQE_LENLOC_WORD12); 9452 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 9453 wqe->xmit_sequence.xmit_len = xmit_len; 9454 command_type = OTHER_COMMAND; 9455 break; 9456 case CMD_XMIT_BCAST64_CN: 9457 /* word3 iocb=iotag32 wqe=seq_payload_len */ 9458 wqe->xmit_bcast64.seq_payload_len = xmit_len; 9459 /* word4 iocb=rsvd wqe=rsvd */ 9460 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 9461 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 9462 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 9463 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9464 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 9465 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 9466 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 9467 LPFC_WQE_LENLOC_WORD3); 9468 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 9469 break; 9470 case CMD_FCP_IWRITE64_CR: 9471 command_type = FCP_COMMAND_DATA_OUT; 9472 /* word3 iocb=iotag wqe=payload_offset_len */ 9473 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9474 bf_set(payload_offset_len, &wqe->fcp_iwrite, 9475 xmit_len + sizeof(struct fcp_rsp)); 9476 bf_set(cmd_buff_len, &wqe->fcp_iwrite, 9477 0); 9478 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9479 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9480 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 9481 iocbq->iocb.ulpFCP2Rcvy); 9482 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 9483 /* Always open the exchange */ 9484 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 9485 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 9486 LPFC_WQE_LENLOC_WORD4); 9487 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 9488 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 9489 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9490 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); 9491 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 9492 if (iocbq->priority) { 9493 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9494 (iocbq->priority << 1)); 9495 } else { 9496 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9497 (phba->cfg_XLanePriority << 1)); 9498 } 9499 } 9500 /* Note, word 10 is already initialized to 0 */ 9501 9502 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9503 if (phba->cfg_enable_pbde) 9504 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1); 9505 else 9506 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); 9507 9508 if (phba->fcp_embed_io) { 9509 struct lpfc_io_buf *lpfc_cmd; 9510 struct sli4_sge *sgl; 9511 struct fcp_cmnd *fcp_cmnd; 9512 uint32_t *ptr; 9513 9514 /* 128 byte wqe support here */ 9515 9516 lpfc_cmd = iocbq->context1; 9517 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9518 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9519 9520 /* Word 0-2 - FCP_CMND */ 9521 wqe->generic.bde.tus.f.bdeFlags = 9522 BUFF_TYPE_BDE_IMMED; 9523 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9524 wqe->generic.bde.addrHigh = 0; 9525 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9526 9527 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 9528 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); 9529 9530 /* Word 22-29 FCP CMND Payload */ 9531 ptr = &wqe->words[22]; 9532 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9533 } 9534 break; 9535 case CMD_FCP_IREAD64_CR: 9536 /* word3 iocb=iotag wqe=payload_offset_len */ 9537 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9538 bf_set(payload_offset_len, &wqe->fcp_iread, 9539 xmit_len + sizeof(struct fcp_rsp)); 9540 bf_set(cmd_buff_len, &wqe->fcp_iread, 9541 0); 9542 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9543 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9544 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 9545 iocbq->iocb.ulpFCP2Rcvy); 9546 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 9547 /* Always open the exchange */ 9548 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 9549 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 9550 LPFC_WQE_LENLOC_WORD4); 9551 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 9552 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 9553 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9554 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); 9555 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); 9556 if (iocbq->priority) { 9557 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9558 (iocbq->priority << 1)); 9559 } else { 9560 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9561 (phba->cfg_XLanePriority << 1)); 9562 } 9563 } 9564 /* Note, word 10 is already initialized to 0 */ 9565 9566 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9567 if (phba->cfg_enable_pbde) 9568 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1); 9569 else 9570 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); 9571 9572 if (phba->fcp_embed_io) { 9573 struct lpfc_io_buf *lpfc_cmd; 9574 struct sli4_sge *sgl; 9575 struct fcp_cmnd *fcp_cmnd; 9576 uint32_t *ptr; 9577 9578 /* 128 byte wqe support here */ 9579 9580 lpfc_cmd = iocbq->context1; 9581 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9582 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9583 9584 /* Word 0-2 - FCP_CMND */ 9585 wqe->generic.bde.tus.f.bdeFlags = 9586 BUFF_TYPE_BDE_IMMED; 9587 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9588 wqe->generic.bde.addrHigh = 0; 9589 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9590 9591 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); 9592 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); 9593 9594 /* Word 22-29 FCP CMND Payload */ 9595 ptr = &wqe->words[22]; 9596 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9597 } 9598 break; 9599 case CMD_FCP_ICMND64_CR: 9600 /* word3 iocb=iotag wqe=payload_offset_len */ 9601 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9602 bf_set(payload_offset_len, &wqe->fcp_icmd, 9603 xmit_len + sizeof(struct fcp_rsp)); 9604 bf_set(cmd_buff_len, &wqe->fcp_icmd, 9605 0); 9606 /* word3 iocb=IO_TAG wqe=reserved */ 9607 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 9608 /* Always open the exchange */ 9609 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 9610 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 9611 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 9612 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 9613 LPFC_WQE_LENLOC_NONE); 9614 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 9615 iocbq->iocb.ulpFCP2Rcvy); 9616 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9617 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); 9618 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); 9619 if (iocbq->priority) { 9620 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9621 (iocbq->priority << 1)); 9622 } else { 9623 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9624 (phba->cfg_XLanePriority << 1)); 9625 } 9626 } 9627 /* Note, word 10 is already initialized to 0 */ 9628 9629 if (phba->fcp_embed_io) { 9630 struct lpfc_io_buf *lpfc_cmd; 9631 struct sli4_sge *sgl; 9632 struct fcp_cmnd *fcp_cmnd; 9633 uint32_t *ptr; 9634 9635 /* 128 byte wqe support here */ 9636 9637 lpfc_cmd = iocbq->context1; 9638 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9639 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9640 9641 /* Word 0-2 - FCP_CMND */ 9642 wqe->generic.bde.tus.f.bdeFlags = 9643 BUFF_TYPE_BDE_IMMED; 9644 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9645 wqe->generic.bde.addrHigh = 0; 9646 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9647 9648 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); 9649 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); 9650 9651 /* Word 22-29 FCP CMND Payload */ 9652 ptr = &wqe->words[22]; 9653 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9654 } 9655 break; 9656 case CMD_GEN_REQUEST64_CR: 9657 /* For this command calculate the xmit length of the 9658 * request bde. 9659 */ 9660 xmit_len = 0; 9661 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9662 sizeof(struct ulp_bde64); 9663 for (i = 0; i < numBdes; i++) { 9664 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9665 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 9666 break; 9667 xmit_len += bde.tus.f.bdeSize; 9668 } 9669 /* word3 iocb=IO_TAG wqe=request_payload_len */ 9670 wqe->gen_req.request_payload_len = xmit_len; 9671 /* word4 iocb=parameter wqe=relative_offset memcpy */ 9672 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 9673 /* word6 context tag copied in memcpy */ 9674 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 9675 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9676 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9677 "2015 Invalid CT %x command 0x%x\n", 9678 ct, iocbq->iocb.ulpCommand); 9679 return IOCB_ERROR; 9680 } 9681 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 9682 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 9683 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 9684 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 9685 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 9686 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 9687 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9688 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 9689 wqe->gen_req.max_response_payload_len = total_len - xmit_len; 9690 command_type = OTHER_COMMAND; 9691 break; 9692 case CMD_XMIT_ELS_RSP64_CX: 9693 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9694 /* words0-2 BDE memcpy */ 9695 /* word3 iocb=iotag32 wqe=response_payload_len */ 9696 wqe->xmit_els_rsp.response_payload_len = xmit_len; 9697 /* word4 */ 9698 wqe->xmit_els_rsp.word4 = 0; 9699 /* word5 iocb=rsvd wge=did */ 9700 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 9701 iocbq->iocb.un.xseq64.xmit_els_remoteID); 9702 9703 if_type = bf_get(lpfc_sli_intf_if_type, 9704 &phba->sli4_hba.sli_intf); 9705 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9706 if (iocbq->vport->fc_flag & FC_PT2PT) { 9707 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9708 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9709 iocbq->vport->fc_myDID); 9710 if (iocbq->vport->fc_myDID == Fabric_DID) { 9711 bf_set(wqe_els_did, 9712 &wqe->xmit_els_rsp.wqe_dest, 0); 9713 } 9714 } 9715 } 9716 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 9717 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9718 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 9719 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9720 iocbq->iocb.unsli3.rcvsli3.ox_id); 9721 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 9722 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9723 phba->vpi_ids[iocbq->vport->vpi]); 9724 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 9725 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 9726 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 9727 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 9728 LPFC_WQE_LENLOC_WORD3); 9729 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 9730 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 9731 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9732 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9733 iocbq->context2)->virt); 9734 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9735 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9736 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9737 iocbq->vport->fc_myDID); 9738 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 9739 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9740 phba->vpi_ids[phba->pport->vpi]); 9741 } 9742 command_type = OTHER_COMMAND; 9743 break; 9744 case CMD_CLOSE_XRI_CN: 9745 case CMD_ABORT_XRI_CN: 9746 case CMD_ABORT_XRI_CX: 9747 /* words 0-2 memcpy should be 0 rserved */ 9748 /* port will send abts */ 9749 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 9750 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 9751 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 9752 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 9753 } else 9754 fip = 0; 9755 9756 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 9757 /* 9758 * The link is down, or the command was ELS_FIP 9759 * so the fw does not need to send abts 9760 * on the wire. 9761 */ 9762 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 9763 else 9764 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 9765 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 9766 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 9767 wqe->abort_cmd.rsrvd5 = 0; 9768 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 9769 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9770 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 9771 /* 9772 * The abort handler will send us CMD_ABORT_XRI_CN or 9773 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 9774 */ 9775 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 9776 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 9777 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 9778 LPFC_WQE_LENLOC_NONE); 9779 cmnd = CMD_ABORT_XRI_CX; 9780 command_type = OTHER_COMMAND; 9781 xritag = 0; 9782 break; 9783 case CMD_XMIT_BLS_RSP64_CX: 9784 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9785 /* As BLS ABTS RSP WQE is very different from other WQEs, 9786 * we re-construct this WQE here based on information in 9787 * iocbq from scratch. 9788 */ 9789 memset(wqe, 0, sizeof(union lpfc_wqe)); 9790 /* OX_ID is invariable to who sent ABTS to CT exchange */ 9791 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 9792 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 9793 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 9794 LPFC_ABTS_UNSOL_INT) { 9795 /* ABTS sent by initiator to CT exchange, the 9796 * RX_ID field will be filled with the newly 9797 * allocated responder XRI. 9798 */ 9799 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9800 iocbq->sli4_xritag); 9801 } else { 9802 /* ABTS sent by responder to CT exchange, the 9803 * RX_ID field will be filled with the responder 9804 * RX_ID from ABTS. 9805 */ 9806 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9807 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 9808 } 9809 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 9810 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 9811 9812 /* Use CT=VPI */ 9813 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 9814 ndlp->nlp_DID); 9815 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 9816 iocbq->iocb.ulpContext); 9817 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 9818 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 9819 phba->vpi_ids[phba->pport->vpi]); 9820 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 9821 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 9822 LPFC_WQE_LENLOC_NONE); 9823 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 9824 command_type = OTHER_COMMAND; 9825 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 9826 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 9827 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 9828 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 9829 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 9830 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 9831 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 9832 } 9833 9834 break; 9835 case CMD_SEND_FRAME: 9836 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9837 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9838 return 0; 9839 case CMD_XRI_ABORTED_CX: 9840 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 9841 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 9842 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 9843 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 9844 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 9845 default: 9846 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9847 "2014 Invalid command 0x%x\n", 9848 iocbq->iocb.ulpCommand); 9849 return IOCB_ERROR; 9850 break; 9851 } 9852 9853 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) 9854 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 9855 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) 9856 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 9857 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) 9858 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 9859 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | 9860 LPFC_IO_DIF_INSERT); 9861 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9862 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9863 wqe->generic.wqe_com.abort_tag = abort_tag; 9864 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 9865 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 9866 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 9867 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 9868 return 0; 9869 } 9870 9871 /** 9872 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 9873 * @phba: Pointer to HBA context object. 9874 * @ring_number: SLI ring number to issue iocb on. 9875 * @piocb: Pointer to command iocb. 9876 * @flag: Flag indicating if this command can be put into txq. 9877 * 9878 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 9879 * an iocb command to an HBA with SLI-4 interface spec. 9880 * 9881 * This function is called with hbalock held. The function will return success 9882 * after it successfully submit the iocb to firmware or after adding to the 9883 * txq. 9884 **/ 9885 static int 9886 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 9887 struct lpfc_iocbq *piocb, uint32_t flag) 9888 { 9889 struct lpfc_sglq *sglq; 9890 union lpfc_wqe128 wqe; 9891 struct lpfc_queue *wq; 9892 struct lpfc_sli_ring *pring; 9893 9894 /* Get the WQ */ 9895 if ((piocb->iocb_flag & LPFC_IO_FCP) || 9896 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9897 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq; 9898 } else { 9899 wq = phba->sli4_hba.els_wq; 9900 } 9901 9902 /* Get corresponding ring */ 9903 pring = wq->pring; 9904 9905 /* 9906 * The WQE can be either 64 or 128 bytes, 9907 */ 9908 9909 lockdep_assert_held(&pring->ring_lock); 9910 9911 if (piocb->sli4_xritag == NO_XRI) { 9912 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 9913 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 9914 sglq = NULL; 9915 else { 9916 if (!list_empty(&pring->txq)) { 9917 if (!(flag & SLI_IOCB_RET_IOCB)) { 9918 __lpfc_sli_ringtx_put(phba, 9919 pring, piocb); 9920 return IOCB_SUCCESS; 9921 } else { 9922 return IOCB_BUSY; 9923 } 9924 } else { 9925 sglq = __lpfc_sli_get_els_sglq(phba, piocb); 9926 if (!sglq) { 9927 if (!(flag & SLI_IOCB_RET_IOCB)) { 9928 __lpfc_sli_ringtx_put(phba, 9929 pring, 9930 piocb); 9931 return IOCB_SUCCESS; 9932 } else 9933 return IOCB_BUSY; 9934 } 9935 } 9936 } 9937 } else if (piocb->iocb_flag & LPFC_IO_FCP) 9938 /* These IO's already have an XRI and a mapped sgl. */ 9939 sglq = NULL; 9940 else { 9941 /* 9942 * This is a continuation of a commandi,(CX) so this 9943 * sglq is on the active list 9944 */ 9945 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 9946 if (!sglq) 9947 return IOCB_ERROR; 9948 } 9949 9950 if (sglq) { 9951 piocb->sli4_lxritag = sglq->sli4_lxritag; 9952 piocb->sli4_xritag = sglq->sli4_xritag; 9953 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 9954 return IOCB_ERROR; 9955 } 9956 9957 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 9958 return IOCB_ERROR; 9959 9960 if (lpfc_sli4_wq_put(wq, &wqe)) 9961 return IOCB_ERROR; 9962 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 9963 9964 return 0; 9965 } 9966 9967 /** 9968 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 9969 * 9970 * This routine wraps the actual lockless version for issusing IOCB function 9971 * pointer from the lpfc_hba struct. 9972 * 9973 * Return codes: 9974 * IOCB_ERROR - Error 9975 * IOCB_SUCCESS - Success 9976 * IOCB_BUSY - Busy 9977 **/ 9978 int 9979 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 9980 struct lpfc_iocbq *piocb, uint32_t flag) 9981 { 9982 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9983 } 9984 9985 /** 9986 * lpfc_sli_api_table_setup - Set up sli api function jump table 9987 * @phba: The hba struct for which this call is being executed. 9988 * @dev_grp: The HBA PCI-Device group number. 9989 * 9990 * This routine sets up the SLI interface API function jump table in @phba 9991 * struct. 9992 * Returns: 0 - success, -ENODEV - failure. 9993 **/ 9994 int 9995 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 9996 { 9997 9998 switch (dev_grp) { 9999 case LPFC_PCI_DEV_LP: 10000 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 10001 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 10002 break; 10003 case LPFC_PCI_DEV_OC: 10004 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 10005 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 10006 break; 10007 default: 10008 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10009 "1419 Invalid HBA PCI-device group: 0x%x\n", 10010 dev_grp); 10011 return -ENODEV; 10012 break; 10013 } 10014 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 10015 return 0; 10016 } 10017 10018 /** 10019 * lpfc_sli4_calc_ring - Calculates which ring to use 10020 * @phba: Pointer to HBA context object. 10021 * @piocb: Pointer to command iocb. 10022 * 10023 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on 10024 * hba_wqidx, thus we need to calculate the corresponding ring. 10025 * Since ABORTS must go on the same WQ of the command they are 10026 * aborting, we use command's hba_wqidx. 10027 */ 10028 struct lpfc_sli_ring * 10029 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 10030 { 10031 struct lpfc_io_buf *lpfc_cmd; 10032 10033 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { 10034 if (unlikely(!phba->sli4_hba.hdwq)) 10035 return NULL; 10036 /* 10037 * for abort iocb hba_wqidx should already 10038 * be setup based on what work queue we used. 10039 */ 10040 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 10041 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1; 10042 piocb->hba_wqidx = lpfc_cmd->hdwq_no; 10043 } 10044 return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring; 10045 } else { 10046 if (unlikely(!phba->sli4_hba.els_wq)) 10047 return NULL; 10048 piocb->hba_wqidx = 0; 10049 return phba->sli4_hba.els_wq->pring; 10050 } 10051 } 10052 10053 /** 10054 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 10055 * @phba: Pointer to HBA context object. 10056 * @pring: Pointer to driver SLI ring object. 10057 * @piocb: Pointer to command iocb. 10058 * @flag: Flag indicating if this command can be put into txq. 10059 * 10060 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 10061 * function. This function gets the hbalock and calls 10062 * __lpfc_sli_issue_iocb function and will return the error returned 10063 * by __lpfc_sli_issue_iocb function. This wrapper is used by 10064 * functions which do not hold hbalock. 10065 **/ 10066 int 10067 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 10068 struct lpfc_iocbq *piocb, uint32_t flag) 10069 { 10070 struct lpfc_sli_ring *pring; 10071 unsigned long iflags; 10072 int rc; 10073 10074 if (phba->sli_rev == LPFC_SLI_REV4) { 10075 pring = lpfc_sli4_calc_ring(phba, piocb); 10076 if (unlikely(pring == NULL)) 10077 return IOCB_ERROR; 10078 10079 spin_lock_irqsave(&pring->ring_lock, iflags); 10080 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10081 spin_unlock_irqrestore(&pring->ring_lock, iflags); 10082 } else { 10083 /* For now, SLI2/3 will still use hbalock */ 10084 spin_lock_irqsave(&phba->hbalock, iflags); 10085 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10086 spin_unlock_irqrestore(&phba->hbalock, iflags); 10087 } 10088 return rc; 10089 } 10090 10091 /** 10092 * lpfc_extra_ring_setup - Extra ring setup function 10093 * @phba: Pointer to HBA context object. 10094 * 10095 * This function is called while driver attaches with the 10096 * HBA to setup the extra ring. The extra ring is used 10097 * only when driver needs to support target mode functionality 10098 * or IP over FC functionalities. 10099 * 10100 * This function is called with no lock held. SLI3 only. 10101 **/ 10102 static int 10103 lpfc_extra_ring_setup( struct lpfc_hba *phba) 10104 { 10105 struct lpfc_sli *psli; 10106 struct lpfc_sli_ring *pring; 10107 10108 psli = &phba->sli; 10109 10110 /* Adjust cmd/rsp ring iocb entries more evenly */ 10111 10112 /* Take some away from the FCP ring */ 10113 pring = &psli->sli3_ring[LPFC_FCP_RING]; 10114 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10115 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10116 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10117 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10118 10119 /* and give them to the extra ring */ 10120 pring = &psli->sli3_ring[LPFC_EXTRA_RING]; 10121 10122 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10123 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10124 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10125 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10126 10127 /* Setup default profile for this ring */ 10128 pring->iotag_max = 4096; 10129 pring->num_mask = 1; 10130 pring->prt[0].profile = 0; /* Mask 0 */ 10131 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 10132 pring->prt[0].type = phba->cfg_multi_ring_type; 10133 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 10134 return 0; 10135 } 10136 10137 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 10138 * @phba: Pointer to HBA context object. 10139 * @iocbq: Pointer to iocb object. 10140 * 10141 * The async_event handler calls this routine when it receives 10142 * an ASYNC_STATUS_CN event from the port. The port generates 10143 * this event when an Abort Sequence request to an rport fails 10144 * twice in succession. The abort could be originated by the 10145 * driver or by the port. The ABTS could have been for an ELS 10146 * or FCP IO. The port only generates this event when an ABTS 10147 * fails to complete after one retry. 10148 */ 10149 static void 10150 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 10151 struct lpfc_iocbq *iocbq) 10152 { 10153 struct lpfc_nodelist *ndlp = NULL; 10154 uint16_t rpi = 0, vpi = 0; 10155 struct lpfc_vport *vport = NULL; 10156 10157 /* The rpi in the ulpContext is vport-sensitive. */ 10158 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 10159 rpi = iocbq->iocb.ulpContext; 10160 10161 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10162 "3092 Port generated ABTS async event " 10163 "on vpi %d rpi %d status 0x%x\n", 10164 vpi, rpi, iocbq->iocb.ulpStatus); 10165 10166 vport = lpfc_find_vport_by_vpid(phba, vpi); 10167 if (!vport) 10168 goto err_exit; 10169 ndlp = lpfc_findnode_rpi(vport, rpi); 10170 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 10171 goto err_exit; 10172 10173 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 10174 lpfc_sli_abts_recover_port(vport, ndlp); 10175 return; 10176 10177 err_exit: 10178 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10179 "3095 Event Context not found, no " 10180 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 10181 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 10182 vpi, rpi); 10183 } 10184 10185 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 10186 * @phba: pointer to HBA context object. 10187 * @ndlp: nodelist pointer for the impacted rport. 10188 * @axri: pointer to the wcqe containing the failed exchange. 10189 * 10190 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 10191 * port. The port generates this event when an abort exchange request to an 10192 * rport fails twice in succession with no reply. The abort could be originated 10193 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 10194 */ 10195 void 10196 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 10197 struct lpfc_nodelist *ndlp, 10198 struct sli4_wcqe_xri_aborted *axri) 10199 { 10200 struct lpfc_vport *vport; 10201 uint32_t ext_status = 0; 10202 10203 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 10204 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10205 "3115 Node Context not found, driver " 10206 "ignoring abts err event\n"); 10207 return; 10208 } 10209 10210 vport = ndlp->vport; 10211 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10212 "3116 Port generated FCP XRI ABORT event on " 10213 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 10214 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 10215 bf_get(lpfc_wcqe_xa_xri, axri), 10216 bf_get(lpfc_wcqe_xa_status, axri), 10217 axri->parameter); 10218 10219 /* 10220 * Catch the ABTS protocol failure case. Older OCe FW releases returned 10221 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 10222 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 10223 */ 10224 ext_status = axri->parameter & IOERR_PARAM_MASK; 10225 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 10226 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 10227 lpfc_sli_abts_recover_port(vport, ndlp); 10228 } 10229 10230 /** 10231 * lpfc_sli_async_event_handler - ASYNC iocb handler function 10232 * @phba: Pointer to HBA context object. 10233 * @pring: Pointer to driver SLI ring object. 10234 * @iocbq: Pointer to iocb object. 10235 * 10236 * This function is called by the slow ring event handler 10237 * function when there is an ASYNC event iocb in the ring. 10238 * This function is called with no lock held. 10239 * Currently this function handles only temperature related 10240 * ASYNC events. The function decodes the temperature sensor 10241 * event message and posts events for the management applications. 10242 **/ 10243 static void 10244 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 10245 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 10246 { 10247 IOCB_t *icmd; 10248 uint16_t evt_code; 10249 struct temp_event temp_event_data; 10250 struct Scsi_Host *shost; 10251 uint32_t *iocb_w; 10252 10253 icmd = &iocbq->iocb; 10254 evt_code = icmd->un.asyncstat.evt_code; 10255 10256 switch (evt_code) { 10257 case ASYNC_TEMP_WARN: 10258 case ASYNC_TEMP_SAFE: 10259 temp_event_data.data = (uint32_t) icmd->ulpContext; 10260 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 10261 if (evt_code == ASYNC_TEMP_WARN) { 10262 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 10263 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10264 "0347 Adapter is very hot, please take " 10265 "corrective action. temperature : %d Celsius\n", 10266 (uint32_t) icmd->ulpContext); 10267 } else { 10268 temp_event_data.event_code = LPFC_NORMAL_TEMP; 10269 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10270 "0340 Adapter temperature is OK now. " 10271 "temperature : %d Celsius\n", 10272 (uint32_t) icmd->ulpContext); 10273 } 10274 10275 /* Send temperature change event to applications */ 10276 shost = lpfc_shost_from_vport(phba->pport); 10277 fc_host_post_vendor_event(shost, fc_get_event_number(), 10278 sizeof(temp_event_data), (char *) &temp_event_data, 10279 LPFC_NL_VENDOR_ID); 10280 break; 10281 case ASYNC_STATUS_CN: 10282 lpfc_sli_abts_err_handler(phba, iocbq); 10283 break; 10284 default: 10285 iocb_w = (uint32_t *) icmd; 10286 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10287 "0346 Ring %d handler: unexpected ASYNC_STATUS" 10288 " evt_code 0x%x\n" 10289 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 10290 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 10291 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 10292 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 10293 pring->ringno, icmd->un.asyncstat.evt_code, 10294 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 10295 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 10296 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 10297 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 10298 10299 break; 10300 } 10301 } 10302 10303 10304 /** 10305 * lpfc_sli4_setup - SLI ring setup function 10306 * @phba: Pointer to HBA context object. 10307 * 10308 * lpfc_sli_setup sets up rings of the SLI interface with 10309 * number of iocbs per ring and iotags. This function is 10310 * called while driver attach to the HBA and before the 10311 * interrupts are enabled. So there is no need for locking. 10312 * 10313 * This function always returns 0. 10314 **/ 10315 int 10316 lpfc_sli4_setup(struct lpfc_hba *phba) 10317 { 10318 struct lpfc_sli_ring *pring; 10319 10320 pring = phba->sli4_hba.els_wq->pring; 10321 pring->num_mask = LPFC_MAX_RING_MASK; 10322 pring->prt[0].profile = 0; /* Mask 0 */ 10323 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10324 pring->prt[0].type = FC_TYPE_ELS; 10325 pring->prt[0].lpfc_sli_rcv_unsol_event = 10326 lpfc_els_unsol_event; 10327 pring->prt[1].profile = 0; /* Mask 1 */ 10328 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10329 pring->prt[1].type = FC_TYPE_ELS; 10330 pring->prt[1].lpfc_sli_rcv_unsol_event = 10331 lpfc_els_unsol_event; 10332 pring->prt[2].profile = 0; /* Mask 2 */ 10333 /* NameServer Inquiry */ 10334 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10335 /* NameServer */ 10336 pring->prt[2].type = FC_TYPE_CT; 10337 pring->prt[2].lpfc_sli_rcv_unsol_event = 10338 lpfc_ct_unsol_event; 10339 pring->prt[3].profile = 0; /* Mask 3 */ 10340 /* NameServer response */ 10341 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10342 /* NameServer */ 10343 pring->prt[3].type = FC_TYPE_CT; 10344 pring->prt[3].lpfc_sli_rcv_unsol_event = 10345 lpfc_ct_unsol_event; 10346 return 0; 10347 } 10348 10349 /** 10350 * lpfc_sli_setup - SLI ring setup function 10351 * @phba: Pointer to HBA context object. 10352 * 10353 * lpfc_sli_setup sets up rings of the SLI interface with 10354 * number of iocbs per ring and iotags. This function is 10355 * called while driver attach to the HBA and before the 10356 * interrupts are enabled. So there is no need for locking. 10357 * 10358 * This function always returns 0. SLI3 only. 10359 **/ 10360 int 10361 lpfc_sli_setup(struct lpfc_hba *phba) 10362 { 10363 int i, totiocbsize = 0; 10364 struct lpfc_sli *psli = &phba->sli; 10365 struct lpfc_sli_ring *pring; 10366 10367 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 10368 psli->sli_flag = 0; 10369 10370 psli->iocbq_lookup = NULL; 10371 psli->iocbq_lookup_len = 0; 10372 psli->last_iotag = 0; 10373 10374 for (i = 0; i < psli->num_rings; i++) { 10375 pring = &psli->sli3_ring[i]; 10376 switch (i) { 10377 case LPFC_FCP_RING: /* ring 0 - FCP */ 10378 /* numCiocb and numRiocb are used in config_port */ 10379 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 10380 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 10381 pring->sli.sli3.numCiocb += 10382 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10383 pring->sli.sli3.numRiocb += 10384 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10385 pring->sli.sli3.numCiocb += 10386 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10387 pring->sli.sli3.numRiocb += 10388 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10389 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10390 SLI3_IOCB_CMD_SIZE : 10391 SLI2_IOCB_CMD_SIZE; 10392 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10393 SLI3_IOCB_RSP_SIZE : 10394 SLI2_IOCB_RSP_SIZE; 10395 pring->iotag_ctr = 0; 10396 pring->iotag_max = 10397 (phba->cfg_hba_queue_depth * 2); 10398 pring->fast_iotag = pring->iotag_max; 10399 pring->num_mask = 0; 10400 break; 10401 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 10402 /* numCiocb and numRiocb are used in config_port */ 10403 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 10404 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 10405 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10406 SLI3_IOCB_CMD_SIZE : 10407 SLI2_IOCB_CMD_SIZE; 10408 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10409 SLI3_IOCB_RSP_SIZE : 10410 SLI2_IOCB_RSP_SIZE; 10411 pring->iotag_max = phba->cfg_hba_queue_depth; 10412 pring->num_mask = 0; 10413 break; 10414 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 10415 /* numCiocb and numRiocb are used in config_port */ 10416 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 10417 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 10418 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10419 SLI3_IOCB_CMD_SIZE : 10420 SLI2_IOCB_CMD_SIZE; 10421 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10422 SLI3_IOCB_RSP_SIZE : 10423 SLI2_IOCB_RSP_SIZE; 10424 pring->fast_iotag = 0; 10425 pring->iotag_ctr = 0; 10426 pring->iotag_max = 4096; 10427 pring->lpfc_sli_rcv_async_status = 10428 lpfc_sli_async_event_handler; 10429 pring->num_mask = LPFC_MAX_RING_MASK; 10430 pring->prt[0].profile = 0; /* Mask 0 */ 10431 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10432 pring->prt[0].type = FC_TYPE_ELS; 10433 pring->prt[0].lpfc_sli_rcv_unsol_event = 10434 lpfc_els_unsol_event; 10435 pring->prt[1].profile = 0; /* Mask 1 */ 10436 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10437 pring->prt[1].type = FC_TYPE_ELS; 10438 pring->prt[1].lpfc_sli_rcv_unsol_event = 10439 lpfc_els_unsol_event; 10440 pring->prt[2].profile = 0; /* Mask 2 */ 10441 /* NameServer Inquiry */ 10442 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10443 /* NameServer */ 10444 pring->prt[2].type = FC_TYPE_CT; 10445 pring->prt[2].lpfc_sli_rcv_unsol_event = 10446 lpfc_ct_unsol_event; 10447 pring->prt[3].profile = 0; /* Mask 3 */ 10448 /* NameServer response */ 10449 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10450 /* NameServer */ 10451 pring->prt[3].type = FC_TYPE_CT; 10452 pring->prt[3].lpfc_sli_rcv_unsol_event = 10453 lpfc_ct_unsol_event; 10454 break; 10455 } 10456 totiocbsize += (pring->sli.sli3.numCiocb * 10457 pring->sli.sli3.sizeCiocb) + 10458 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 10459 } 10460 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 10461 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 10462 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 10463 "SLI2 SLIM Data: x%x x%lx\n", 10464 phba->brd_no, totiocbsize, 10465 (unsigned long) MAX_SLIM_IOCB_SIZE); 10466 } 10467 if (phba->cfg_multi_ring_support == 2) 10468 lpfc_extra_ring_setup(phba); 10469 10470 return 0; 10471 } 10472 10473 /** 10474 * lpfc_sli4_queue_init - Queue initialization function 10475 * @phba: Pointer to HBA context object. 10476 * 10477 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each 10478 * ring. This function also initializes ring indices of each ring. 10479 * This function is called during the initialization of the SLI 10480 * interface of an HBA. 10481 * This function is called with no lock held and always returns 10482 * 1. 10483 **/ 10484 void 10485 lpfc_sli4_queue_init(struct lpfc_hba *phba) 10486 { 10487 struct lpfc_sli *psli; 10488 struct lpfc_sli_ring *pring; 10489 int i; 10490 10491 psli = &phba->sli; 10492 spin_lock_irq(&phba->hbalock); 10493 INIT_LIST_HEAD(&psli->mboxq); 10494 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10495 /* Initialize list headers for txq and txcmplq as double linked lists */ 10496 for (i = 0; i < phba->cfg_hdw_queue; i++) { 10497 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 10498 pring->flag = 0; 10499 pring->ringno = LPFC_FCP_RING; 10500 pring->txcmplq_cnt = 0; 10501 INIT_LIST_HEAD(&pring->txq); 10502 INIT_LIST_HEAD(&pring->txcmplq); 10503 INIT_LIST_HEAD(&pring->iocb_continueq); 10504 spin_lock_init(&pring->ring_lock); 10505 } 10506 pring = phba->sli4_hba.els_wq->pring; 10507 pring->flag = 0; 10508 pring->ringno = LPFC_ELS_RING; 10509 pring->txcmplq_cnt = 0; 10510 INIT_LIST_HEAD(&pring->txq); 10511 INIT_LIST_HEAD(&pring->txcmplq); 10512 INIT_LIST_HEAD(&pring->iocb_continueq); 10513 spin_lock_init(&pring->ring_lock); 10514 10515 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10516 for (i = 0; i < phba->cfg_hdw_queue; i++) { 10517 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; 10518 pring->flag = 0; 10519 pring->ringno = LPFC_FCP_RING; 10520 pring->txcmplq_cnt = 0; 10521 INIT_LIST_HEAD(&pring->txq); 10522 INIT_LIST_HEAD(&pring->txcmplq); 10523 INIT_LIST_HEAD(&pring->iocb_continueq); 10524 spin_lock_init(&pring->ring_lock); 10525 } 10526 pring = phba->sli4_hba.nvmels_wq->pring; 10527 pring->flag = 0; 10528 pring->ringno = LPFC_ELS_RING; 10529 pring->txcmplq_cnt = 0; 10530 INIT_LIST_HEAD(&pring->txq); 10531 INIT_LIST_HEAD(&pring->txcmplq); 10532 INIT_LIST_HEAD(&pring->iocb_continueq); 10533 spin_lock_init(&pring->ring_lock); 10534 } 10535 10536 spin_unlock_irq(&phba->hbalock); 10537 } 10538 10539 /** 10540 * lpfc_sli_queue_init - Queue initialization function 10541 * @phba: Pointer to HBA context object. 10542 * 10543 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each 10544 * ring. This function also initializes ring indices of each ring. 10545 * This function is called during the initialization of the SLI 10546 * interface of an HBA. 10547 * This function is called with no lock held and always returns 10548 * 1. 10549 **/ 10550 void 10551 lpfc_sli_queue_init(struct lpfc_hba *phba) 10552 { 10553 struct lpfc_sli *psli; 10554 struct lpfc_sli_ring *pring; 10555 int i; 10556 10557 psli = &phba->sli; 10558 spin_lock_irq(&phba->hbalock); 10559 INIT_LIST_HEAD(&psli->mboxq); 10560 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10561 /* Initialize list headers for txq and txcmplq as double linked lists */ 10562 for (i = 0; i < psli->num_rings; i++) { 10563 pring = &psli->sli3_ring[i]; 10564 pring->ringno = i; 10565 pring->sli.sli3.next_cmdidx = 0; 10566 pring->sli.sli3.local_getidx = 0; 10567 pring->sli.sli3.cmdidx = 0; 10568 INIT_LIST_HEAD(&pring->iocb_continueq); 10569 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 10570 INIT_LIST_HEAD(&pring->postbufq); 10571 pring->flag = 0; 10572 INIT_LIST_HEAD(&pring->txq); 10573 INIT_LIST_HEAD(&pring->txcmplq); 10574 spin_lock_init(&pring->ring_lock); 10575 } 10576 spin_unlock_irq(&phba->hbalock); 10577 } 10578 10579 /** 10580 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 10581 * @phba: Pointer to HBA context object. 10582 * 10583 * This routine flushes the mailbox command subsystem. It will unconditionally 10584 * flush all the mailbox commands in the three possible stages in the mailbox 10585 * command sub-system: pending mailbox command queue; the outstanding mailbox 10586 * command; and completed mailbox command queue. It is caller's responsibility 10587 * to make sure that the driver is in the proper state to flush the mailbox 10588 * command sub-system. Namely, the posting of mailbox commands into the 10589 * pending mailbox command queue from the various clients must be stopped; 10590 * either the HBA is in a state that it will never works on the outstanding 10591 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 10592 * mailbox command has been completed. 10593 **/ 10594 static void 10595 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 10596 { 10597 LIST_HEAD(completions); 10598 struct lpfc_sli *psli = &phba->sli; 10599 LPFC_MBOXQ_t *pmb; 10600 unsigned long iflag; 10601 10602 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10603 local_bh_disable(); 10604 10605 /* Flush all the mailbox commands in the mbox system */ 10606 spin_lock_irqsave(&phba->hbalock, iflag); 10607 10608 /* The pending mailbox command queue */ 10609 list_splice_init(&phba->sli.mboxq, &completions); 10610 /* The outstanding active mailbox command */ 10611 if (psli->mbox_active) { 10612 list_add_tail(&psli->mbox_active->list, &completions); 10613 psli->mbox_active = NULL; 10614 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10615 } 10616 /* The completed mailbox command queue */ 10617 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 10618 spin_unlock_irqrestore(&phba->hbalock, iflag); 10619 10620 /* Enable softirqs again, done with phba->hbalock */ 10621 local_bh_enable(); 10622 10623 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 10624 while (!list_empty(&completions)) { 10625 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 10626 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 10627 if (pmb->mbox_cmpl) 10628 pmb->mbox_cmpl(phba, pmb); 10629 } 10630 } 10631 10632 /** 10633 * lpfc_sli_host_down - Vport cleanup function 10634 * @vport: Pointer to virtual port object. 10635 * 10636 * lpfc_sli_host_down is called to clean up the resources 10637 * associated with a vport before destroying virtual 10638 * port data structures. 10639 * This function does following operations: 10640 * - Free discovery resources associated with this virtual 10641 * port. 10642 * - Free iocbs associated with this virtual port in 10643 * the txq. 10644 * - Send abort for all iocb commands associated with this 10645 * vport in txcmplq. 10646 * 10647 * This function is called with no lock held and always returns 1. 10648 **/ 10649 int 10650 lpfc_sli_host_down(struct lpfc_vport *vport) 10651 { 10652 LIST_HEAD(completions); 10653 struct lpfc_hba *phba = vport->phba; 10654 struct lpfc_sli *psli = &phba->sli; 10655 struct lpfc_queue *qp = NULL; 10656 struct lpfc_sli_ring *pring; 10657 struct lpfc_iocbq *iocb, *next_iocb; 10658 int i; 10659 unsigned long flags = 0; 10660 uint16_t prev_pring_flag; 10661 10662 lpfc_cleanup_discovery_resources(vport); 10663 10664 spin_lock_irqsave(&phba->hbalock, flags); 10665 10666 /* 10667 * Error everything on the txq since these iocbs 10668 * have not been given to the FW yet. 10669 * Also issue ABTS for everything on the txcmplq 10670 */ 10671 if (phba->sli_rev != LPFC_SLI_REV4) { 10672 for (i = 0; i < psli->num_rings; i++) { 10673 pring = &psli->sli3_ring[i]; 10674 prev_pring_flag = pring->flag; 10675 /* Only slow rings */ 10676 if (pring->ringno == LPFC_ELS_RING) { 10677 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10678 /* Set the lpfc data pending flag */ 10679 set_bit(LPFC_DATA_READY, &phba->data_flags); 10680 } 10681 list_for_each_entry_safe(iocb, next_iocb, 10682 &pring->txq, list) { 10683 if (iocb->vport != vport) 10684 continue; 10685 list_move_tail(&iocb->list, &completions); 10686 } 10687 list_for_each_entry_safe(iocb, next_iocb, 10688 &pring->txcmplq, list) { 10689 if (iocb->vport != vport) 10690 continue; 10691 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10692 } 10693 pring->flag = prev_pring_flag; 10694 } 10695 } else { 10696 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10697 pring = qp->pring; 10698 if (!pring) 10699 continue; 10700 if (pring == phba->sli4_hba.els_wq->pring) { 10701 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10702 /* Set the lpfc data pending flag */ 10703 set_bit(LPFC_DATA_READY, &phba->data_flags); 10704 } 10705 prev_pring_flag = pring->flag; 10706 spin_lock_irq(&pring->ring_lock); 10707 list_for_each_entry_safe(iocb, next_iocb, 10708 &pring->txq, list) { 10709 if (iocb->vport != vport) 10710 continue; 10711 list_move_tail(&iocb->list, &completions); 10712 } 10713 spin_unlock_irq(&pring->ring_lock); 10714 list_for_each_entry_safe(iocb, next_iocb, 10715 &pring->txcmplq, list) { 10716 if (iocb->vport != vport) 10717 continue; 10718 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10719 } 10720 pring->flag = prev_pring_flag; 10721 } 10722 } 10723 spin_unlock_irqrestore(&phba->hbalock, flags); 10724 10725 /* Cancel all the IOCBs from the completions list */ 10726 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10727 IOERR_SLI_DOWN); 10728 return 1; 10729 } 10730 10731 /** 10732 * lpfc_sli_hba_down - Resource cleanup function for the HBA 10733 * @phba: Pointer to HBA context object. 10734 * 10735 * This function cleans up all iocb, buffers, mailbox commands 10736 * while shutting down the HBA. This function is called with no 10737 * lock held and always returns 1. 10738 * This function does the following to cleanup driver resources: 10739 * - Free discovery resources for each virtual port 10740 * - Cleanup any pending fabric iocbs 10741 * - Iterate through the iocb txq and free each entry 10742 * in the list. 10743 * - Free up any buffer posted to the HBA 10744 * - Free mailbox commands in the mailbox queue. 10745 **/ 10746 int 10747 lpfc_sli_hba_down(struct lpfc_hba *phba) 10748 { 10749 LIST_HEAD(completions); 10750 struct lpfc_sli *psli = &phba->sli; 10751 struct lpfc_queue *qp = NULL; 10752 struct lpfc_sli_ring *pring; 10753 struct lpfc_dmabuf *buf_ptr; 10754 unsigned long flags = 0; 10755 int i; 10756 10757 /* Shutdown the mailbox command sub-system */ 10758 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 10759 10760 lpfc_hba_down_prep(phba); 10761 10762 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10763 local_bh_disable(); 10764 10765 lpfc_fabric_abort_hba(phba); 10766 10767 spin_lock_irqsave(&phba->hbalock, flags); 10768 10769 /* 10770 * Error everything on the txq since these iocbs 10771 * have not been given to the FW yet. 10772 */ 10773 if (phba->sli_rev != LPFC_SLI_REV4) { 10774 for (i = 0; i < psli->num_rings; i++) { 10775 pring = &psli->sli3_ring[i]; 10776 /* Only slow rings */ 10777 if (pring->ringno == LPFC_ELS_RING) { 10778 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10779 /* Set the lpfc data pending flag */ 10780 set_bit(LPFC_DATA_READY, &phba->data_flags); 10781 } 10782 list_splice_init(&pring->txq, &completions); 10783 } 10784 } else { 10785 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10786 pring = qp->pring; 10787 if (!pring) 10788 continue; 10789 spin_lock_irq(&pring->ring_lock); 10790 list_splice_init(&pring->txq, &completions); 10791 spin_unlock_irq(&pring->ring_lock); 10792 if (pring == phba->sli4_hba.els_wq->pring) { 10793 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10794 /* Set the lpfc data pending flag */ 10795 set_bit(LPFC_DATA_READY, &phba->data_flags); 10796 } 10797 } 10798 } 10799 spin_unlock_irqrestore(&phba->hbalock, flags); 10800 10801 /* Cancel all the IOCBs from the completions list */ 10802 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10803 IOERR_SLI_DOWN); 10804 10805 spin_lock_irqsave(&phba->hbalock, flags); 10806 list_splice_init(&phba->elsbuf, &completions); 10807 phba->elsbuf_cnt = 0; 10808 phba->elsbuf_prev_cnt = 0; 10809 spin_unlock_irqrestore(&phba->hbalock, flags); 10810 10811 while (!list_empty(&completions)) { 10812 list_remove_head(&completions, buf_ptr, 10813 struct lpfc_dmabuf, list); 10814 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 10815 kfree(buf_ptr); 10816 } 10817 10818 /* Enable softirqs again, done with phba->hbalock */ 10819 local_bh_enable(); 10820 10821 /* Return any active mbox cmds */ 10822 del_timer_sync(&psli->mbox_tmo); 10823 10824 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 10825 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 10826 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 10827 10828 return 1; 10829 } 10830 10831 /** 10832 * lpfc_sli_pcimem_bcopy - SLI memory copy function 10833 * @srcp: Source memory pointer. 10834 * @destp: Destination memory pointer. 10835 * @cnt: Number of words required to be copied. 10836 * 10837 * This function is used for copying data between driver memory 10838 * and the SLI memory. This function also changes the endianness 10839 * of each word if native endianness is different from SLI 10840 * endianness. This function can be called with or without 10841 * lock. 10842 **/ 10843 void 10844 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 10845 { 10846 uint32_t *src = srcp; 10847 uint32_t *dest = destp; 10848 uint32_t ldata; 10849 int i; 10850 10851 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 10852 ldata = *src; 10853 ldata = le32_to_cpu(ldata); 10854 *dest = ldata; 10855 src++; 10856 dest++; 10857 } 10858 } 10859 10860 10861 /** 10862 * lpfc_sli_bemem_bcopy - SLI memory copy function 10863 * @srcp: Source memory pointer. 10864 * @destp: Destination memory pointer. 10865 * @cnt: Number of words required to be copied. 10866 * 10867 * This function is used for copying data between a data structure 10868 * with big endian representation to local endianness. 10869 * This function can be called with or without lock. 10870 **/ 10871 void 10872 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 10873 { 10874 uint32_t *src = srcp; 10875 uint32_t *dest = destp; 10876 uint32_t ldata; 10877 int i; 10878 10879 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 10880 ldata = *src; 10881 ldata = be32_to_cpu(ldata); 10882 *dest = ldata; 10883 src++; 10884 dest++; 10885 } 10886 } 10887 10888 /** 10889 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 10890 * @phba: Pointer to HBA context object. 10891 * @pring: Pointer to driver SLI ring object. 10892 * @mp: Pointer to driver buffer object. 10893 * 10894 * This function is called with no lock held. 10895 * It always return zero after adding the buffer to the postbufq 10896 * buffer list. 10897 **/ 10898 int 10899 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10900 struct lpfc_dmabuf *mp) 10901 { 10902 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 10903 later */ 10904 spin_lock_irq(&phba->hbalock); 10905 list_add_tail(&mp->list, &pring->postbufq); 10906 pring->postbufq_cnt++; 10907 spin_unlock_irq(&phba->hbalock); 10908 return 0; 10909 } 10910 10911 /** 10912 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 10913 * @phba: Pointer to HBA context object. 10914 * 10915 * When HBQ is enabled, buffers are searched based on tags. This function 10916 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 10917 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 10918 * does not conflict with tags of buffer posted for unsolicited events. 10919 * The function returns the allocated tag. The function is called with 10920 * no locks held. 10921 **/ 10922 uint32_t 10923 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 10924 { 10925 spin_lock_irq(&phba->hbalock); 10926 phba->buffer_tag_count++; 10927 /* 10928 * Always set the QUE_BUFTAG_BIT to distiguish between 10929 * a tag assigned by HBQ. 10930 */ 10931 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 10932 spin_unlock_irq(&phba->hbalock); 10933 return phba->buffer_tag_count; 10934 } 10935 10936 /** 10937 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 10938 * @phba: Pointer to HBA context object. 10939 * @pring: Pointer to driver SLI ring object. 10940 * @tag: Buffer tag. 10941 * 10942 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 10943 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 10944 * iocb is posted to the response ring with the tag of the buffer. 10945 * This function searches the pring->postbufq list using the tag 10946 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 10947 * iocb. If the buffer is found then lpfc_dmabuf object of the 10948 * buffer is returned to the caller else NULL is returned. 10949 * This function is called with no lock held. 10950 **/ 10951 struct lpfc_dmabuf * 10952 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10953 uint32_t tag) 10954 { 10955 struct lpfc_dmabuf *mp, *next_mp; 10956 struct list_head *slp = &pring->postbufq; 10957 10958 /* Search postbufq, from the beginning, looking for a match on tag */ 10959 spin_lock_irq(&phba->hbalock); 10960 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10961 if (mp->buffer_tag == tag) { 10962 list_del_init(&mp->list); 10963 pring->postbufq_cnt--; 10964 spin_unlock_irq(&phba->hbalock); 10965 return mp; 10966 } 10967 } 10968 10969 spin_unlock_irq(&phba->hbalock); 10970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10971 "0402 Cannot find virtual addr for buffer tag on " 10972 "ring %d Data x%lx x%p x%p x%x\n", 10973 pring->ringno, (unsigned long) tag, 10974 slp->next, slp->prev, pring->postbufq_cnt); 10975 10976 return NULL; 10977 } 10978 10979 /** 10980 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 10981 * @phba: Pointer to HBA context object. 10982 * @pring: Pointer to driver SLI ring object. 10983 * @phys: DMA address of the buffer. 10984 * 10985 * This function searches the buffer list using the dma_address 10986 * of unsolicited event to find the driver's lpfc_dmabuf object 10987 * corresponding to the dma_address. The function returns the 10988 * lpfc_dmabuf object if a buffer is found else it returns NULL. 10989 * This function is called by the ct and els unsolicited event 10990 * handlers to get the buffer associated with the unsolicited 10991 * event. 10992 * 10993 * This function is called with no lock held. 10994 **/ 10995 struct lpfc_dmabuf * 10996 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10997 dma_addr_t phys) 10998 { 10999 struct lpfc_dmabuf *mp, *next_mp; 11000 struct list_head *slp = &pring->postbufq; 11001 11002 /* Search postbufq, from the beginning, looking for a match on phys */ 11003 spin_lock_irq(&phba->hbalock); 11004 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 11005 if (mp->phys == phys) { 11006 list_del_init(&mp->list); 11007 pring->postbufq_cnt--; 11008 spin_unlock_irq(&phba->hbalock); 11009 return mp; 11010 } 11011 } 11012 11013 spin_unlock_irq(&phba->hbalock); 11014 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11015 "0410 Cannot find virtual addr for mapped buf on " 11016 "ring %d Data x%llx x%p x%p x%x\n", 11017 pring->ringno, (unsigned long long)phys, 11018 slp->next, slp->prev, pring->postbufq_cnt); 11019 return NULL; 11020 } 11021 11022 /** 11023 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 11024 * @phba: Pointer to HBA context object. 11025 * @cmdiocb: Pointer to driver command iocb object. 11026 * @rspiocb: Pointer to driver response iocb object. 11027 * 11028 * This function is the completion handler for the abort iocbs for 11029 * ELS commands. This function is called from the ELS ring event 11030 * handler with no lock held. This function frees memory resources 11031 * associated with the abort iocb. 11032 **/ 11033 static void 11034 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11035 struct lpfc_iocbq *rspiocb) 11036 { 11037 IOCB_t *irsp = &rspiocb->iocb; 11038 uint16_t abort_iotag, abort_context; 11039 struct lpfc_iocbq *abort_iocb = NULL; 11040 11041 if (irsp->ulpStatus) { 11042 11043 /* 11044 * Assume that the port already completed and returned, or 11045 * will return the iocb. Just Log the message. 11046 */ 11047 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 11048 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 11049 11050 spin_lock_irq(&phba->hbalock); 11051 if (phba->sli_rev < LPFC_SLI_REV4) { 11052 if (irsp->ulpCommand == CMD_ABORT_XRI_CX && 11053 irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 11054 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) { 11055 spin_unlock_irq(&phba->hbalock); 11056 goto release_iocb; 11057 } 11058 if (abort_iotag != 0 && 11059 abort_iotag <= phba->sli.last_iotag) 11060 abort_iocb = 11061 phba->sli.iocbq_lookup[abort_iotag]; 11062 } else 11063 /* For sli4 the abort_tag is the XRI, 11064 * so the abort routine puts the iotag of the iocb 11065 * being aborted in the context field of the abort 11066 * IOCB. 11067 */ 11068 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 11069 11070 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 11071 "0327 Cannot abort els iocb %p " 11072 "with tag %x context %x, abort status %x, " 11073 "abort code %x\n", 11074 abort_iocb, abort_iotag, abort_context, 11075 irsp->ulpStatus, irsp->un.ulpWord[4]); 11076 11077 spin_unlock_irq(&phba->hbalock); 11078 } 11079 release_iocb: 11080 lpfc_sli_release_iocbq(phba, cmdiocb); 11081 return; 11082 } 11083 11084 /** 11085 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 11086 * @phba: Pointer to HBA context object. 11087 * @cmdiocb: Pointer to driver command iocb object. 11088 * @rspiocb: Pointer to driver response iocb object. 11089 * 11090 * The function is called from SLI ring event handler with no 11091 * lock held. This function is the completion handler for ELS commands 11092 * which are aborted. The function frees memory resources used for 11093 * the aborted ELS commands. 11094 **/ 11095 static void 11096 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11097 struct lpfc_iocbq *rspiocb) 11098 { 11099 IOCB_t *irsp = &rspiocb->iocb; 11100 11101 /* ELS cmd tag <ulpIoTag> completes */ 11102 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11103 "0139 Ignoring ELS cmd tag x%x completion Data: " 11104 "x%x x%x x%x\n", 11105 irsp->ulpIoTag, irsp->ulpStatus, 11106 irsp->un.ulpWord[4], irsp->ulpTimeout); 11107 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 11108 lpfc_ct_free_iocb(phba, cmdiocb); 11109 else 11110 lpfc_els_free_iocb(phba, cmdiocb); 11111 return; 11112 } 11113 11114 /** 11115 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 11116 * @phba: Pointer to HBA context object. 11117 * @pring: Pointer to driver SLI ring object. 11118 * @cmdiocb: Pointer to driver command iocb object. 11119 * 11120 * This function issues an abort iocb for the provided command iocb down to 11121 * the port. Other than the case the outstanding command iocb is an abort 11122 * request, this function issues abort out unconditionally. This function is 11123 * called with hbalock held. The function returns 0 when it fails due to 11124 * memory allocation failure or when the command iocb is an abort request. 11125 **/ 11126 static int 11127 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11128 struct lpfc_iocbq *cmdiocb) 11129 { 11130 struct lpfc_vport *vport = cmdiocb->vport; 11131 struct lpfc_iocbq *abtsiocbp; 11132 IOCB_t *icmd = NULL; 11133 IOCB_t *iabt = NULL; 11134 int retval; 11135 unsigned long iflags; 11136 struct lpfc_nodelist *ndlp; 11137 11138 lockdep_assert_held(&phba->hbalock); 11139 11140 /* 11141 * There are certain command types we don't want to abort. And we 11142 * don't want to abort commands that are already in the process of 11143 * being aborted. 11144 */ 11145 icmd = &cmdiocb->iocb; 11146 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11147 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11148 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11149 return 0; 11150 11151 /* issue ABTS for this IOCB based on iotag */ 11152 abtsiocbp = __lpfc_sli_get_iocbq(phba); 11153 if (abtsiocbp == NULL) 11154 return 0; 11155 11156 /* This signals the response to set the correct status 11157 * before calling the completion handler 11158 */ 11159 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 11160 11161 iabt = &abtsiocbp->iocb; 11162 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 11163 iabt->un.acxri.abortContextTag = icmd->ulpContext; 11164 if (phba->sli_rev == LPFC_SLI_REV4) { 11165 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 11166 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 11167 } else { 11168 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 11169 if (pring->ringno == LPFC_ELS_RING) { 11170 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1); 11171 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi; 11172 } 11173 } 11174 iabt->ulpLe = 1; 11175 iabt->ulpClass = icmd->ulpClass; 11176 11177 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11178 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; 11179 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 11180 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 11181 if (cmdiocb->iocb_flag & LPFC_IO_FOF) 11182 abtsiocbp->iocb_flag |= LPFC_IO_FOF; 11183 11184 if (phba->link_state >= LPFC_LINK_UP) 11185 iabt->ulpCommand = CMD_ABORT_XRI_CN; 11186 else 11187 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 11188 11189 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 11190 abtsiocbp->vport = vport; 11191 11192 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 11193 "0339 Abort xri x%x, original iotag x%x, " 11194 "abort cmd iotag x%x\n", 11195 iabt->un.acxri.abortIoTag, 11196 iabt->un.acxri.abortContextTag, 11197 abtsiocbp->iotag); 11198 11199 if (phba->sli_rev == LPFC_SLI_REV4) { 11200 pring = lpfc_sli4_calc_ring(phba, abtsiocbp); 11201 if (unlikely(pring == NULL)) 11202 return 0; 11203 /* Note: both hbalock and ring_lock need to be set here */ 11204 spin_lock_irqsave(&pring->ring_lock, iflags); 11205 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11206 abtsiocbp, 0); 11207 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11208 } else { 11209 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11210 abtsiocbp, 0); 11211 } 11212 11213 if (retval) 11214 __lpfc_sli_release_iocbq(phba, abtsiocbp); 11215 11216 /* 11217 * Caller to this routine should check for IOCB_ERROR 11218 * and handle it properly. This routine no longer removes 11219 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11220 */ 11221 return retval; 11222 } 11223 11224 /** 11225 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 11226 * @phba: Pointer to HBA context object. 11227 * @pring: Pointer to driver SLI ring object. 11228 * @cmdiocb: Pointer to driver command iocb object. 11229 * 11230 * This function issues an abort iocb for the provided command iocb. In case 11231 * of unloading, the abort iocb will not be issued to commands on the ELS 11232 * ring. Instead, the callback function shall be changed to those commands 11233 * so that nothing happens when them finishes. This function is called with 11234 * hbalock held. The function returns 0 when the command iocb is an abort 11235 * request. 11236 **/ 11237 int 11238 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11239 struct lpfc_iocbq *cmdiocb) 11240 { 11241 struct lpfc_vport *vport = cmdiocb->vport; 11242 int retval = IOCB_ERROR; 11243 IOCB_t *icmd = NULL; 11244 11245 lockdep_assert_held(&phba->hbalock); 11246 11247 /* 11248 * There are certain command types we don't want to abort. And we 11249 * don't want to abort commands that are already in the process of 11250 * being aborted. 11251 */ 11252 icmd = &cmdiocb->iocb; 11253 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11254 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11255 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11256 return 0; 11257 11258 if (!pring) { 11259 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11260 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11261 else 11262 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11263 goto abort_iotag_exit; 11264 } 11265 11266 /* 11267 * If we're unloading, don't abort iocb on the ELS ring, but change 11268 * the callback so that nothing happens when it finishes. 11269 */ 11270 if ((vport->load_flag & FC_UNLOADING) && 11271 (pring->ringno == LPFC_ELS_RING)) { 11272 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11273 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11274 else 11275 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11276 goto abort_iotag_exit; 11277 } 11278 11279 /* Now, we try to issue the abort to the cmdiocb out */ 11280 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 11281 11282 abort_iotag_exit: 11283 /* 11284 * Caller to this routine should check for IOCB_ERROR 11285 * and handle it properly. This routine no longer removes 11286 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11287 */ 11288 return retval; 11289 } 11290 11291 /** 11292 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 11293 * @phba: pointer to lpfc HBA data structure. 11294 * 11295 * This routine will abort all pending and outstanding iocbs to an HBA. 11296 **/ 11297 void 11298 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 11299 { 11300 struct lpfc_sli *psli = &phba->sli; 11301 struct lpfc_sli_ring *pring; 11302 struct lpfc_queue *qp = NULL; 11303 int i; 11304 11305 if (phba->sli_rev != LPFC_SLI_REV4) { 11306 for (i = 0; i < psli->num_rings; i++) { 11307 pring = &psli->sli3_ring[i]; 11308 lpfc_sli_abort_iocb_ring(phba, pring); 11309 } 11310 return; 11311 } 11312 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 11313 pring = qp->pring; 11314 if (!pring) 11315 continue; 11316 lpfc_sli_abort_iocb_ring(phba, pring); 11317 } 11318 } 11319 11320 /** 11321 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 11322 * @iocbq: Pointer to driver iocb object. 11323 * @vport: Pointer to driver virtual port object. 11324 * @tgt_id: SCSI ID of the target. 11325 * @lun_id: LUN ID of the scsi device. 11326 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 11327 * 11328 * This function acts as an iocb filter for functions which abort or count 11329 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 11330 * 0 if the filtering criteria is met for the given iocb and will return 11331 * 1 if the filtering criteria is not met. 11332 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 11333 * given iocb is for the SCSI device specified by vport, tgt_id and 11334 * lun_id parameter. 11335 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 11336 * given iocb is for the SCSI target specified by vport and tgt_id 11337 * parameters. 11338 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 11339 * given iocb is for the SCSI host associated with the given vport. 11340 * This function is called with no locks held. 11341 **/ 11342 static int 11343 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 11344 uint16_t tgt_id, uint64_t lun_id, 11345 lpfc_ctx_cmd ctx_cmd) 11346 { 11347 struct lpfc_io_buf *lpfc_cmd; 11348 int rc = 1; 11349 11350 if (iocbq->vport != vport) 11351 return rc; 11352 11353 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 11354 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) 11355 return rc; 11356 11357 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 11358 11359 if (lpfc_cmd->pCmd == NULL) 11360 return rc; 11361 11362 switch (ctx_cmd) { 11363 case LPFC_CTX_LUN: 11364 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11365 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 11366 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 11367 rc = 0; 11368 break; 11369 case LPFC_CTX_TGT: 11370 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11371 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 11372 rc = 0; 11373 break; 11374 case LPFC_CTX_HOST: 11375 rc = 0; 11376 break; 11377 default: 11378 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 11379 __func__, ctx_cmd); 11380 break; 11381 } 11382 11383 return rc; 11384 } 11385 11386 /** 11387 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 11388 * @vport: Pointer to virtual port. 11389 * @tgt_id: SCSI ID of the target. 11390 * @lun_id: LUN ID of the scsi device. 11391 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11392 * 11393 * This function returns number of FCP commands pending for the vport. 11394 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 11395 * commands pending on the vport associated with SCSI device specified 11396 * by tgt_id and lun_id parameters. 11397 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 11398 * commands pending on the vport associated with SCSI target specified 11399 * by tgt_id parameter. 11400 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 11401 * commands pending on the vport. 11402 * This function returns the number of iocbs which satisfy the filter. 11403 * This function is called without any lock held. 11404 **/ 11405 int 11406 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 11407 lpfc_ctx_cmd ctx_cmd) 11408 { 11409 struct lpfc_hba *phba = vport->phba; 11410 struct lpfc_iocbq *iocbq; 11411 int sum, i; 11412 11413 spin_lock_irq(&phba->hbalock); 11414 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 11415 iocbq = phba->sli.iocbq_lookup[i]; 11416 11417 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 11418 ctx_cmd) == 0) 11419 sum++; 11420 } 11421 spin_unlock_irq(&phba->hbalock); 11422 11423 return sum; 11424 } 11425 11426 /** 11427 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 11428 * @phba: Pointer to HBA context object 11429 * @cmdiocb: Pointer to command iocb object. 11430 * @rspiocb: Pointer to response iocb object. 11431 * 11432 * This function is called when an aborted FCP iocb completes. This 11433 * function is called by the ring event handler with no lock held. 11434 * This function frees the iocb. 11435 **/ 11436 void 11437 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11438 struct lpfc_iocbq *rspiocb) 11439 { 11440 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11441 "3096 ABORT_XRI_CN completing on rpi x%x " 11442 "original iotag x%x, abort cmd iotag x%x " 11443 "status 0x%x, reason 0x%x\n", 11444 cmdiocb->iocb.un.acxri.abortContextTag, 11445 cmdiocb->iocb.un.acxri.abortIoTag, 11446 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 11447 rspiocb->iocb.un.ulpWord[4]); 11448 lpfc_sli_release_iocbq(phba, cmdiocb); 11449 return; 11450 } 11451 11452 /** 11453 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 11454 * @vport: Pointer to virtual port. 11455 * @pring: Pointer to driver SLI ring object. 11456 * @tgt_id: SCSI ID of the target. 11457 * @lun_id: LUN ID of the scsi device. 11458 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11459 * 11460 * This function sends an abort command for every SCSI command 11461 * associated with the given virtual port pending on the ring 11462 * filtered by lpfc_sli_validate_fcp_iocb function. 11463 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 11464 * FCP iocbs associated with lun specified by tgt_id and lun_id 11465 * parameters 11466 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 11467 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11468 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 11469 * FCP iocbs associated with virtual port. 11470 * This function returns number of iocbs it failed to abort. 11471 * This function is called with no locks held. 11472 **/ 11473 int 11474 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11475 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 11476 { 11477 struct lpfc_hba *phba = vport->phba; 11478 struct lpfc_iocbq *iocbq; 11479 struct lpfc_iocbq *abtsiocb; 11480 struct lpfc_sli_ring *pring_s4; 11481 IOCB_t *cmd = NULL; 11482 int errcnt = 0, ret_val = 0; 11483 int i; 11484 11485 /* all I/Os are in process of being flushed */ 11486 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) 11487 return errcnt; 11488 11489 for (i = 1; i <= phba->sli.last_iotag; i++) { 11490 iocbq = phba->sli.iocbq_lookup[i]; 11491 11492 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11493 abort_cmd) != 0) 11494 continue; 11495 11496 /* 11497 * If the iocbq is already being aborted, don't take a second 11498 * action, but do count it. 11499 */ 11500 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 11501 continue; 11502 11503 /* issue ABTS for this IOCB based on iotag */ 11504 abtsiocb = lpfc_sli_get_iocbq(phba); 11505 if (abtsiocb == NULL) { 11506 errcnt++; 11507 continue; 11508 } 11509 11510 /* indicate the IO is being aborted by the driver. */ 11511 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11512 11513 cmd = &iocbq->iocb; 11514 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11515 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 11516 if (phba->sli_rev == LPFC_SLI_REV4) 11517 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 11518 else 11519 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 11520 abtsiocb->iocb.ulpLe = 1; 11521 abtsiocb->iocb.ulpClass = cmd->ulpClass; 11522 abtsiocb->vport = vport; 11523 11524 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11525 abtsiocb->hba_wqidx = iocbq->hba_wqidx; 11526 if (iocbq->iocb_flag & LPFC_IO_FCP) 11527 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 11528 if (iocbq->iocb_flag & LPFC_IO_FOF) 11529 abtsiocb->iocb_flag |= LPFC_IO_FOF; 11530 11531 if (lpfc_is_link_up(phba)) 11532 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11533 else 11534 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11535 11536 /* Setup callback routine and issue the command. */ 11537 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11538 if (phba->sli_rev == LPFC_SLI_REV4) { 11539 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); 11540 if (!pring_s4) 11541 continue; 11542 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11543 abtsiocb, 0); 11544 } else 11545 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 11546 abtsiocb, 0); 11547 if (ret_val == IOCB_ERROR) { 11548 lpfc_sli_release_iocbq(phba, abtsiocb); 11549 errcnt++; 11550 continue; 11551 } 11552 } 11553 11554 return errcnt; 11555 } 11556 11557 /** 11558 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 11559 * @vport: Pointer to virtual port. 11560 * @pring: Pointer to driver SLI ring object. 11561 * @tgt_id: SCSI ID of the target. 11562 * @lun_id: LUN ID of the scsi device. 11563 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11564 * 11565 * This function sends an abort command for every SCSI command 11566 * associated with the given virtual port pending on the ring 11567 * filtered by lpfc_sli_validate_fcp_iocb function. 11568 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 11569 * FCP iocbs associated with lun specified by tgt_id and lun_id 11570 * parameters 11571 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 11572 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11573 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 11574 * FCP iocbs associated with virtual port. 11575 * This function returns number of iocbs it aborted . 11576 * This function is called with no locks held right after a taskmgmt 11577 * command is sent. 11578 **/ 11579 int 11580 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11581 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 11582 { 11583 struct lpfc_hba *phba = vport->phba; 11584 struct lpfc_io_buf *lpfc_cmd; 11585 struct lpfc_iocbq *abtsiocbq; 11586 struct lpfc_nodelist *ndlp; 11587 struct lpfc_iocbq *iocbq; 11588 IOCB_t *icmd; 11589 int sum, i, ret_val; 11590 unsigned long iflags; 11591 struct lpfc_sli_ring *pring_s4 = NULL; 11592 11593 spin_lock_irqsave(&phba->hbalock, iflags); 11594 11595 /* all I/Os are in process of being flushed */ 11596 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 11597 spin_unlock_irqrestore(&phba->hbalock, iflags); 11598 return 0; 11599 } 11600 sum = 0; 11601 11602 for (i = 1; i <= phba->sli.last_iotag; i++) { 11603 iocbq = phba->sli.iocbq_lookup[i]; 11604 11605 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11606 cmd) != 0) 11607 continue; 11608 11609 /* Guard against IO completion being called at same time */ 11610 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 11611 spin_lock(&lpfc_cmd->buf_lock); 11612 11613 if (!lpfc_cmd->pCmd) { 11614 spin_unlock(&lpfc_cmd->buf_lock); 11615 continue; 11616 } 11617 11618 if (phba->sli_rev == LPFC_SLI_REV4) { 11619 pring_s4 = 11620 phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring; 11621 if (!pring_s4) { 11622 spin_unlock(&lpfc_cmd->buf_lock); 11623 continue; 11624 } 11625 /* Note: both hbalock and ring_lock must be set here */ 11626 spin_lock(&pring_s4->ring_lock); 11627 } 11628 11629 /* 11630 * If the iocbq is already being aborted, don't take a second 11631 * action, but do count it. 11632 */ 11633 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) || 11634 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 11635 if (phba->sli_rev == LPFC_SLI_REV4) 11636 spin_unlock(&pring_s4->ring_lock); 11637 spin_unlock(&lpfc_cmd->buf_lock); 11638 continue; 11639 } 11640 11641 /* issue ABTS for this IOCB based on iotag */ 11642 abtsiocbq = __lpfc_sli_get_iocbq(phba); 11643 if (!abtsiocbq) { 11644 if (phba->sli_rev == LPFC_SLI_REV4) 11645 spin_unlock(&pring_s4->ring_lock); 11646 spin_unlock(&lpfc_cmd->buf_lock); 11647 continue; 11648 } 11649 11650 icmd = &iocbq->iocb; 11651 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11652 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; 11653 if (phba->sli_rev == LPFC_SLI_REV4) 11654 abtsiocbq->iocb.un.acxri.abortIoTag = 11655 iocbq->sli4_xritag; 11656 else 11657 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; 11658 abtsiocbq->iocb.ulpLe = 1; 11659 abtsiocbq->iocb.ulpClass = icmd->ulpClass; 11660 abtsiocbq->vport = vport; 11661 11662 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11663 abtsiocbq->hba_wqidx = iocbq->hba_wqidx; 11664 if (iocbq->iocb_flag & LPFC_IO_FCP) 11665 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 11666 if (iocbq->iocb_flag & LPFC_IO_FOF) 11667 abtsiocbq->iocb_flag |= LPFC_IO_FOF; 11668 11669 ndlp = lpfc_cmd->rdata->pnode; 11670 11671 if (lpfc_is_link_up(phba) && 11672 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 11673 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11674 else 11675 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11676 11677 /* Setup callback routine and issue the command. */ 11678 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11679 11680 /* 11681 * Indicate the IO is being aborted by the driver and set 11682 * the caller's flag into the aborted IO. 11683 */ 11684 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11685 11686 if (phba->sli_rev == LPFC_SLI_REV4) { 11687 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11688 abtsiocbq, 0); 11689 spin_unlock(&pring_s4->ring_lock); 11690 } else { 11691 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 11692 abtsiocbq, 0); 11693 } 11694 11695 spin_unlock(&lpfc_cmd->buf_lock); 11696 11697 if (ret_val == IOCB_ERROR) 11698 __lpfc_sli_release_iocbq(phba, abtsiocbq); 11699 else 11700 sum++; 11701 } 11702 spin_unlock_irqrestore(&phba->hbalock, iflags); 11703 return sum; 11704 } 11705 11706 /** 11707 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 11708 * @phba: Pointer to HBA context object. 11709 * @cmdiocbq: Pointer to command iocb. 11710 * @rspiocbq: Pointer to response iocb. 11711 * 11712 * This function is the completion handler for iocbs issued using 11713 * lpfc_sli_issue_iocb_wait function. This function is called by the 11714 * ring event handler function without any lock held. This function 11715 * can be called from both worker thread context and interrupt 11716 * context. This function also can be called from other thread which 11717 * cleans up the SLI layer objects. 11718 * This function copy the contents of the response iocb to the 11719 * response iocb memory object provided by the caller of 11720 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 11721 * sleeps for the iocb completion. 11722 **/ 11723 static void 11724 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 11725 struct lpfc_iocbq *cmdiocbq, 11726 struct lpfc_iocbq *rspiocbq) 11727 { 11728 wait_queue_head_t *pdone_q; 11729 unsigned long iflags; 11730 struct lpfc_io_buf *lpfc_cmd; 11731 11732 spin_lock_irqsave(&phba->hbalock, iflags); 11733 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { 11734 11735 /* 11736 * A time out has occurred for the iocb. If a time out 11737 * completion handler has been supplied, call it. Otherwise, 11738 * just free the iocbq. 11739 */ 11740 11741 spin_unlock_irqrestore(&phba->hbalock, iflags); 11742 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; 11743 cmdiocbq->wait_iocb_cmpl = NULL; 11744 if (cmdiocbq->iocb_cmpl) 11745 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); 11746 else 11747 lpfc_sli_release_iocbq(phba, cmdiocbq); 11748 return; 11749 } 11750 11751 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 11752 if (cmdiocbq->context2 && rspiocbq) 11753 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 11754 &rspiocbq->iocb, sizeof(IOCB_t)); 11755 11756 /* Set the exchange busy flag for task management commands */ 11757 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 11758 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 11759 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, 11760 cur_iocbq); 11761 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 11762 } 11763 11764 pdone_q = cmdiocbq->context_un.wait_queue; 11765 if (pdone_q) 11766 wake_up(pdone_q); 11767 spin_unlock_irqrestore(&phba->hbalock, iflags); 11768 return; 11769 } 11770 11771 /** 11772 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 11773 * @phba: Pointer to HBA context object.. 11774 * @piocbq: Pointer to command iocb. 11775 * @flag: Flag to test. 11776 * 11777 * This routine grabs the hbalock and then test the iocb_flag to 11778 * see if the passed in flag is set. 11779 * Returns: 11780 * 1 if flag is set. 11781 * 0 if flag is not set. 11782 **/ 11783 static int 11784 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 11785 struct lpfc_iocbq *piocbq, uint32_t flag) 11786 { 11787 unsigned long iflags; 11788 int ret; 11789 11790 spin_lock_irqsave(&phba->hbalock, iflags); 11791 ret = piocbq->iocb_flag & flag; 11792 spin_unlock_irqrestore(&phba->hbalock, iflags); 11793 return ret; 11794 11795 } 11796 11797 /** 11798 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 11799 * @phba: Pointer to HBA context object.. 11800 * @pring: Pointer to sli ring. 11801 * @piocb: Pointer to command iocb. 11802 * @prspiocbq: Pointer to response iocb. 11803 * @timeout: Timeout in number of seconds. 11804 * 11805 * This function issues the iocb to firmware and waits for the 11806 * iocb to complete. The iocb_cmpl field of the shall be used 11807 * to handle iocbs which time out. If the field is NULL, the 11808 * function shall free the iocbq structure. If more clean up is 11809 * needed, the caller is expected to provide a completion function 11810 * that will provide the needed clean up. If the iocb command is 11811 * not completed within timeout seconds, the function will either 11812 * free the iocbq structure (if iocb_cmpl == NULL) or execute the 11813 * completion function set in the iocb_cmpl field and then return 11814 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 11815 * resources if this function returns IOCB_TIMEDOUT. 11816 * The function waits for the iocb completion using an 11817 * non-interruptible wait. 11818 * This function will sleep while waiting for iocb completion. 11819 * So, this function should not be called from any context which 11820 * does not allow sleeping. Due to the same reason, this function 11821 * cannot be called with interrupt disabled. 11822 * This function assumes that the iocb completions occur while 11823 * this function sleep. So, this function cannot be called from 11824 * the thread which process iocb completion for this ring. 11825 * This function clears the iocb_flag of the iocb object before 11826 * issuing the iocb and the iocb completion handler sets this 11827 * flag and wakes this thread when the iocb completes. 11828 * The contents of the response iocb will be copied to prspiocbq 11829 * by the completion handler when the command completes. 11830 * This function returns IOCB_SUCCESS when success. 11831 * This function is called with no lock held. 11832 **/ 11833 int 11834 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 11835 uint32_t ring_number, 11836 struct lpfc_iocbq *piocb, 11837 struct lpfc_iocbq *prspiocbq, 11838 uint32_t timeout) 11839 { 11840 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 11841 long timeleft, timeout_req = 0; 11842 int retval = IOCB_SUCCESS; 11843 uint32_t creg_val; 11844 struct lpfc_iocbq *iocb; 11845 int txq_cnt = 0; 11846 int txcmplq_cnt = 0; 11847 struct lpfc_sli_ring *pring; 11848 unsigned long iflags; 11849 bool iocb_completed = true; 11850 11851 if (phba->sli_rev >= LPFC_SLI_REV4) 11852 pring = lpfc_sli4_calc_ring(phba, piocb); 11853 else 11854 pring = &phba->sli.sli3_ring[ring_number]; 11855 /* 11856 * If the caller has provided a response iocbq buffer, then context2 11857 * is NULL or its an error. 11858 */ 11859 if (prspiocbq) { 11860 if (piocb->context2) 11861 return IOCB_ERROR; 11862 piocb->context2 = prspiocbq; 11863 } 11864 11865 piocb->wait_iocb_cmpl = piocb->iocb_cmpl; 11866 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 11867 piocb->context_un.wait_queue = &done_q; 11868 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 11869 11870 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11871 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11872 return IOCB_ERROR; 11873 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 11874 writel(creg_val, phba->HCregaddr); 11875 readl(phba->HCregaddr); /* flush */ 11876 } 11877 11878 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 11879 SLI_IOCB_RET_IOCB); 11880 if (retval == IOCB_SUCCESS) { 11881 timeout_req = msecs_to_jiffies(timeout * 1000); 11882 timeleft = wait_event_timeout(done_q, 11883 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 11884 timeout_req); 11885 spin_lock_irqsave(&phba->hbalock, iflags); 11886 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 11887 11888 /* 11889 * IOCB timed out. Inform the wake iocb wait 11890 * completion function and set local status 11891 */ 11892 11893 iocb_completed = false; 11894 piocb->iocb_flag |= LPFC_IO_WAKE_TMO; 11895 } 11896 spin_unlock_irqrestore(&phba->hbalock, iflags); 11897 if (iocb_completed) { 11898 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11899 "0331 IOCB wake signaled\n"); 11900 /* Note: we are not indicating if the IOCB has a success 11901 * status or not - that's for the caller to check. 11902 * IOCB_SUCCESS means just that the command was sent and 11903 * completed. Not that it completed successfully. 11904 * */ 11905 } else if (timeleft == 0) { 11906 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11907 "0338 IOCB wait timeout error - no " 11908 "wake response Data x%x\n", timeout); 11909 retval = IOCB_TIMEDOUT; 11910 } else { 11911 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11912 "0330 IOCB wake NOT set, " 11913 "Data x%x x%lx\n", 11914 timeout, (timeleft / jiffies)); 11915 retval = IOCB_TIMEDOUT; 11916 } 11917 } else if (retval == IOCB_BUSY) { 11918 if (phba->cfg_log_verbose & LOG_SLI) { 11919 list_for_each_entry(iocb, &pring->txq, list) { 11920 txq_cnt++; 11921 } 11922 list_for_each_entry(iocb, &pring->txcmplq, list) { 11923 txcmplq_cnt++; 11924 } 11925 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11926 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 11927 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 11928 } 11929 return retval; 11930 } else { 11931 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11932 "0332 IOCB wait issue failed, Data x%x\n", 11933 retval); 11934 retval = IOCB_ERROR; 11935 } 11936 11937 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11938 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11939 return IOCB_ERROR; 11940 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 11941 writel(creg_val, phba->HCregaddr); 11942 readl(phba->HCregaddr); /* flush */ 11943 } 11944 11945 if (prspiocbq) 11946 piocb->context2 = NULL; 11947 11948 piocb->context_un.wait_queue = NULL; 11949 piocb->iocb_cmpl = NULL; 11950 return retval; 11951 } 11952 11953 /** 11954 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 11955 * @phba: Pointer to HBA context object. 11956 * @pmboxq: Pointer to driver mailbox object. 11957 * @timeout: Timeout in number of seconds. 11958 * 11959 * This function issues the mailbox to firmware and waits for the 11960 * mailbox command to complete. If the mailbox command is not 11961 * completed within timeout seconds, it returns MBX_TIMEOUT. 11962 * The function waits for the mailbox completion using an 11963 * interruptible wait. If the thread is woken up due to a 11964 * signal, MBX_TIMEOUT error is returned to the caller. Caller 11965 * should not free the mailbox resources, if this function returns 11966 * MBX_TIMEOUT. 11967 * This function will sleep while waiting for mailbox completion. 11968 * So, this function should not be called from any context which 11969 * does not allow sleeping. Due to the same reason, this function 11970 * cannot be called with interrupt disabled. 11971 * This function assumes that the mailbox completion occurs while 11972 * this function sleep. So, this function cannot be called from 11973 * the worker thread which processes mailbox completion. 11974 * This function is called in the context of HBA management 11975 * applications. 11976 * This function returns MBX_SUCCESS when successful. 11977 * This function is called with no lock held. 11978 **/ 11979 int 11980 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 11981 uint32_t timeout) 11982 { 11983 struct completion mbox_done; 11984 int retval; 11985 unsigned long flag; 11986 11987 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 11988 /* setup wake call as IOCB callback */ 11989 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 11990 11991 /* setup context3 field to pass wait_queue pointer to wake function */ 11992 init_completion(&mbox_done); 11993 pmboxq->context3 = &mbox_done; 11994 /* now issue the command */ 11995 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 11996 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 11997 wait_for_completion_timeout(&mbox_done, 11998 msecs_to_jiffies(timeout * 1000)); 11999 12000 spin_lock_irqsave(&phba->hbalock, flag); 12001 pmboxq->context3 = NULL; 12002 /* 12003 * if LPFC_MBX_WAKE flag is set the mailbox is completed 12004 * else do not free the resources. 12005 */ 12006 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 12007 retval = MBX_SUCCESS; 12008 } else { 12009 retval = MBX_TIMEOUT; 12010 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12011 } 12012 spin_unlock_irqrestore(&phba->hbalock, flag); 12013 } 12014 return retval; 12015 } 12016 12017 /** 12018 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 12019 * @phba: Pointer to HBA context. 12020 * 12021 * This function is called to shutdown the driver's mailbox sub-system. 12022 * It first marks the mailbox sub-system is in a block state to prevent 12023 * the asynchronous mailbox command from issued off the pending mailbox 12024 * command queue. If the mailbox command sub-system shutdown is due to 12025 * HBA error conditions such as EEH or ERATT, this routine shall invoke 12026 * the mailbox sub-system flush routine to forcefully bring down the 12027 * mailbox sub-system. Otherwise, if it is due to normal condition (such 12028 * as with offline or HBA function reset), this routine will wait for the 12029 * outstanding mailbox command to complete before invoking the mailbox 12030 * sub-system flush routine to gracefully bring down mailbox sub-system. 12031 **/ 12032 void 12033 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 12034 { 12035 struct lpfc_sli *psli = &phba->sli; 12036 unsigned long timeout; 12037 12038 if (mbx_action == LPFC_MBX_NO_WAIT) { 12039 /* delay 100ms for port state */ 12040 msleep(100); 12041 lpfc_sli_mbox_sys_flush(phba); 12042 return; 12043 } 12044 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 12045 12046 /* Disable softirqs, including timers from obtaining phba->hbalock */ 12047 local_bh_disable(); 12048 12049 spin_lock_irq(&phba->hbalock); 12050 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 12051 12052 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 12053 /* Determine how long we might wait for the active mailbox 12054 * command to be gracefully completed by firmware. 12055 */ 12056 if (phba->sli.mbox_active) 12057 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 12058 phba->sli.mbox_active) * 12059 1000) + jiffies; 12060 spin_unlock_irq(&phba->hbalock); 12061 12062 /* Enable softirqs again, done with phba->hbalock */ 12063 local_bh_enable(); 12064 12065 while (phba->sli.mbox_active) { 12066 /* Check active mailbox complete status every 2ms */ 12067 msleep(2); 12068 if (time_after(jiffies, timeout)) 12069 /* Timeout, let the mailbox flush routine to 12070 * forcefully release active mailbox command 12071 */ 12072 break; 12073 } 12074 } else { 12075 spin_unlock_irq(&phba->hbalock); 12076 12077 /* Enable softirqs again, done with phba->hbalock */ 12078 local_bh_enable(); 12079 } 12080 12081 lpfc_sli_mbox_sys_flush(phba); 12082 } 12083 12084 /** 12085 * lpfc_sli_eratt_read - read sli-3 error attention events 12086 * @phba: Pointer to HBA context. 12087 * 12088 * This function is called to read the SLI3 device error attention registers 12089 * for possible error attention events. The caller must hold the hostlock 12090 * with spin_lock_irq(). 12091 * 12092 * This function returns 1 when there is Error Attention in the Host Attention 12093 * Register and returns 0 otherwise. 12094 **/ 12095 static int 12096 lpfc_sli_eratt_read(struct lpfc_hba *phba) 12097 { 12098 uint32_t ha_copy; 12099 12100 /* Read chip Host Attention (HA) register */ 12101 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12102 goto unplug_err; 12103 12104 if (ha_copy & HA_ERATT) { 12105 /* Read host status register to retrieve error event */ 12106 if (lpfc_sli_read_hs(phba)) 12107 goto unplug_err; 12108 12109 /* Check if there is a deferred error condition is active */ 12110 if ((HS_FFER1 & phba->work_hs) && 12111 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12112 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 12113 phba->hba_flag |= DEFER_ERATT; 12114 /* Clear all interrupt enable conditions */ 12115 writel(0, phba->HCregaddr); 12116 readl(phba->HCregaddr); 12117 } 12118 12119 /* Set the driver HA work bitmap */ 12120 phba->work_ha |= HA_ERATT; 12121 /* Indicate polling handles this ERATT */ 12122 phba->hba_flag |= HBA_ERATT_HANDLED; 12123 return 1; 12124 } 12125 return 0; 12126 12127 unplug_err: 12128 /* Set the driver HS work bitmap */ 12129 phba->work_hs |= UNPLUG_ERR; 12130 /* Set the driver HA work bitmap */ 12131 phba->work_ha |= HA_ERATT; 12132 /* Indicate polling handles this ERATT */ 12133 phba->hba_flag |= HBA_ERATT_HANDLED; 12134 return 1; 12135 } 12136 12137 /** 12138 * lpfc_sli4_eratt_read - read sli-4 error attention events 12139 * @phba: Pointer to HBA context. 12140 * 12141 * This function is called to read the SLI4 device error attention registers 12142 * for possible error attention events. The caller must hold the hostlock 12143 * with spin_lock_irq(). 12144 * 12145 * This function returns 1 when there is Error Attention in the Host Attention 12146 * Register and returns 0 otherwise. 12147 **/ 12148 static int 12149 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 12150 { 12151 uint32_t uerr_sta_hi, uerr_sta_lo; 12152 uint32_t if_type, portsmphr; 12153 struct lpfc_register portstat_reg; 12154 12155 /* 12156 * For now, use the SLI4 device internal unrecoverable error 12157 * registers for error attention. This can be changed later. 12158 */ 12159 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 12160 switch (if_type) { 12161 case LPFC_SLI_INTF_IF_TYPE_0: 12162 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 12163 &uerr_sta_lo) || 12164 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 12165 &uerr_sta_hi)) { 12166 phba->work_hs |= UNPLUG_ERR; 12167 phba->work_ha |= HA_ERATT; 12168 phba->hba_flag |= HBA_ERATT_HANDLED; 12169 return 1; 12170 } 12171 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 12172 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 12173 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12174 "1423 HBA Unrecoverable error: " 12175 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 12176 "ue_mask_lo_reg=0x%x, " 12177 "ue_mask_hi_reg=0x%x\n", 12178 uerr_sta_lo, uerr_sta_hi, 12179 phba->sli4_hba.ue_mask_lo, 12180 phba->sli4_hba.ue_mask_hi); 12181 phba->work_status[0] = uerr_sta_lo; 12182 phba->work_status[1] = uerr_sta_hi; 12183 phba->work_ha |= HA_ERATT; 12184 phba->hba_flag |= HBA_ERATT_HANDLED; 12185 return 1; 12186 } 12187 break; 12188 case LPFC_SLI_INTF_IF_TYPE_2: 12189 case LPFC_SLI_INTF_IF_TYPE_6: 12190 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 12191 &portstat_reg.word0) || 12192 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 12193 &portsmphr)){ 12194 phba->work_hs |= UNPLUG_ERR; 12195 phba->work_ha |= HA_ERATT; 12196 phba->hba_flag |= HBA_ERATT_HANDLED; 12197 return 1; 12198 } 12199 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 12200 phba->work_status[0] = 12201 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 12202 phba->work_status[1] = 12203 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 12204 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12205 "2885 Port Status Event: " 12206 "port status reg 0x%x, " 12207 "port smphr reg 0x%x, " 12208 "error 1=0x%x, error 2=0x%x\n", 12209 portstat_reg.word0, 12210 portsmphr, 12211 phba->work_status[0], 12212 phba->work_status[1]); 12213 phba->work_ha |= HA_ERATT; 12214 phba->hba_flag |= HBA_ERATT_HANDLED; 12215 return 1; 12216 } 12217 break; 12218 case LPFC_SLI_INTF_IF_TYPE_1: 12219 default: 12220 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12221 "2886 HBA Error Attention on unsupported " 12222 "if type %d.", if_type); 12223 return 1; 12224 } 12225 12226 return 0; 12227 } 12228 12229 /** 12230 * lpfc_sli_check_eratt - check error attention events 12231 * @phba: Pointer to HBA context. 12232 * 12233 * This function is called from timer soft interrupt context to check HBA's 12234 * error attention register bit for error attention events. 12235 * 12236 * This function returns 1 when there is Error Attention in the Host Attention 12237 * Register and returns 0 otherwise. 12238 **/ 12239 int 12240 lpfc_sli_check_eratt(struct lpfc_hba *phba) 12241 { 12242 uint32_t ha_copy; 12243 12244 /* If somebody is waiting to handle an eratt, don't process it 12245 * here. The brdkill function will do this. 12246 */ 12247 if (phba->link_flag & LS_IGNORE_ERATT) 12248 return 0; 12249 12250 /* Check if interrupt handler handles this ERATT */ 12251 spin_lock_irq(&phba->hbalock); 12252 if (phba->hba_flag & HBA_ERATT_HANDLED) { 12253 /* Interrupt handler has handled ERATT */ 12254 spin_unlock_irq(&phba->hbalock); 12255 return 0; 12256 } 12257 12258 /* 12259 * If there is deferred error attention, do not check for error 12260 * attention 12261 */ 12262 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12263 spin_unlock_irq(&phba->hbalock); 12264 return 0; 12265 } 12266 12267 /* If PCI channel is offline, don't process it */ 12268 if (unlikely(pci_channel_offline(phba->pcidev))) { 12269 spin_unlock_irq(&phba->hbalock); 12270 return 0; 12271 } 12272 12273 switch (phba->sli_rev) { 12274 case LPFC_SLI_REV2: 12275 case LPFC_SLI_REV3: 12276 /* Read chip Host Attention (HA) register */ 12277 ha_copy = lpfc_sli_eratt_read(phba); 12278 break; 12279 case LPFC_SLI_REV4: 12280 /* Read device Uncoverable Error (UERR) registers */ 12281 ha_copy = lpfc_sli4_eratt_read(phba); 12282 break; 12283 default: 12284 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12285 "0299 Invalid SLI revision (%d)\n", 12286 phba->sli_rev); 12287 ha_copy = 0; 12288 break; 12289 } 12290 spin_unlock_irq(&phba->hbalock); 12291 12292 return ha_copy; 12293 } 12294 12295 /** 12296 * lpfc_intr_state_check - Check device state for interrupt handling 12297 * @phba: Pointer to HBA context. 12298 * 12299 * This inline routine checks whether a device or its PCI slot is in a state 12300 * that the interrupt should be handled. 12301 * 12302 * This function returns 0 if the device or the PCI slot is in a state that 12303 * interrupt should be handled, otherwise -EIO. 12304 */ 12305 static inline int 12306 lpfc_intr_state_check(struct lpfc_hba *phba) 12307 { 12308 /* If the pci channel is offline, ignore all the interrupts */ 12309 if (unlikely(pci_channel_offline(phba->pcidev))) 12310 return -EIO; 12311 12312 /* Update device level interrupt statistics */ 12313 phba->sli.slistat.sli_intr++; 12314 12315 /* Ignore all interrupts during initialization. */ 12316 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 12317 return -EIO; 12318 12319 return 0; 12320 } 12321 12322 /** 12323 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 12324 * @irq: Interrupt number. 12325 * @dev_id: The device context pointer. 12326 * 12327 * This function is directly called from the PCI layer as an interrupt 12328 * service routine when device with SLI-3 interface spec is enabled with 12329 * MSI-X multi-message interrupt mode and there are slow-path events in 12330 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 12331 * interrupt mode, this function is called as part of the device-level 12332 * interrupt handler. When the PCI slot is in error recovery or the HBA 12333 * is undergoing initialization, the interrupt handler will not process 12334 * the interrupt. The link attention and ELS ring attention events are 12335 * handled by the worker thread. The interrupt handler signals the worker 12336 * thread and returns for these events. This function is called without 12337 * any lock held. It gets the hbalock to access and update SLI data 12338 * structures. 12339 * 12340 * This function returns IRQ_HANDLED when interrupt is handled else it 12341 * returns IRQ_NONE. 12342 **/ 12343 irqreturn_t 12344 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 12345 { 12346 struct lpfc_hba *phba; 12347 uint32_t ha_copy, hc_copy; 12348 uint32_t work_ha_copy; 12349 unsigned long status; 12350 unsigned long iflag; 12351 uint32_t control; 12352 12353 MAILBOX_t *mbox, *pmbox; 12354 struct lpfc_vport *vport; 12355 struct lpfc_nodelist *ndlp; 12356 struct lpfc_dmabuf *mp; 12357 LPFC_MBOXQ_t *pmb; 12358 int rc; 12359 12360 /* 12361 * Get the driver's phba structure from the dev_id and 12362 * assume the HBA is not interrupting. 12363 */ 12364 phba = (struct lpfc_hba *)dev_id; 12365 12366 if (unlikely(!phba)) 12367 return IRQ_NONE; 12368 12369 /* 12370 * Stuff needs to be attented to when this function is invoked as an 12371 * individual interrupt handler in MSI-X multi-message interrupt mode 12372 */ 12373 if (phba->intr_type == MSIX) { 12374 /* Check device state for handling interrupt */ 12375 if (lpfc_intr_state_check(phba)) 12376 return IRQ_NONE; 12377 /* Need to read HA REG for slow-path events */ 12378 spin_lock_irqsave(&phba->hbalock, iflag); 12379 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12380 goto unplug_error; 12381 /* If somebody is waiting to handle an eratt don't process it 12382 * here. The brdkill function will do this. 12383 */ 12384 if (phba->link_flag & LS_IGNORE_ERATT) 12385 ha_copy &= ~HA_ERATT; 12386 /* Check the need for handling ERATT in interrupt handler */ 12387 if (ha_copy & HA_ERATT) { 12388 if (phba->hba_flag & HBA_ERATT_HANDLED) 12389 /* ERATT polling has handled ERATT */ 12390 ha_copy &= ~HA_ERATT; 12391 else 12392 /* Indicate interrupt handler handles ERATT */ 12393 phba->hba_flag |= HBA_ERATT_HANDLED; 12394 } 12395 12396 /* 12397 * If there is deferred error attention, do not check for any 12398 * interrupt. 12399 */ 12400 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12401 spin_unlock_irqrestore(&phba->hbalock, iflag); 12402 return IRQ_NONE; 12403 } 12404 12405 /* Clear up only attention source related to slow-path */ 12406 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 12407 goto unplug_error; 12408 12409 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 12410 HC_LAINT_ENA | HC_ERINT_ENA), 12411 phba->HCregaddr); 12412 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 12413 phba->HAregaddr); 12414 writel(hc_copy, phba->HCregaddr); 12415 readl(phba->HAregaddr); /* flush */ 12416 spin_unlock_irqrestore(&phba->hbalock, iflag); 12417 } else 12418 ha_copy = phba->ha_copy; 12419 12420 work_ha_copy = ha_copy & phba->work_ha_mask; 12421 12422 if (work_ha_copy) { 12423 if (work_ha_copy & HA_LATT) { 12424 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 12425 /* 12426 * Turn off Link Attention interrupts 12427 * until CLEAR_LA done 12428 */ 12429 spin_lock_irqsave(&phba->hbalock, iflag); 12430 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 12431 if (lpfc_readl(phba->HCregaddr, &control)) 12432 goto unplug_error; 12433 control &= ~HC_LAINT_ENA; 12434 writel(control, phba->HCregaddr); 12435 readl(phba->HCregaddr); /* flush */ 12436 spin_unlock_irqrestore(&phba->hbalock, iflag); 12437 } 12438 else 12439 work_ha_copy &= ~HA_LATT; 12440 } 12441 12442 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 12443 /* 12444 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 12445 * the only slow ring. 12446 */ 12447 status = (work_ha_copy & 12448 (HA_RXMASK << (4*LPFC_ELS_RING))); 12449 status >>= (4*LPFC_ELS_RING); 12450 if (status & HA_RXMASK) { 12451 spin_lock_irqsave(&phba->hbalock, iflag); 12452 if (lpfc_readl(phba->HCregaddr, &control)) 12453 goto unplug_error; 12454 12455 lpfc_debugfs_slow_ring_trc(phba, 12456 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 12457 control, status, 12458 (uint32_t)phba->sli.slistat.sli_intr); 12459 12460 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 12461 lpfc_debugfs_slow_ring_trc(phba, 12462 "ISR Disable ring:" 12463 "pwork:x%x hawork:x%x wait:x%x", 12464 phba->work_ha, work_ha_copy, 12465 (uint32_t)((unsigned long) 12466 &phba->work_waitq)); 12467 12468 control &= 12469 ~(HC_R0INT_ENA << LPFC_ELS_RING); 12470 writel(control, phba->HCregaddr); 12471 readl(phba->HCregaddr); /* flush */ 12472 } 12473 else { 12474 lpfc_debugfs_slow_ring_trc(phba, 12475 "ISR slow ring: pwork:" 12476 "x%x hawork:x%x wait:x%x", 12477 phba->work_ha, work_ha_copy, 12478 (uint32_t)((unsigned long) 12479 &phba->work_waitq)); 12480 } 12481 spin_unlock_irqrestore(&phba->hbalock, iflag); 12482 } 12483 } 12484 spin_lock_irqsave(&phba->hbalock, iflag); 12485 if (work_ha_copy & HA_ERATT) { 12486 if (lpfc_sli_read_hs(phba)) 12487 goto unplug_error; 12488 /* 12489 * Check if there is a deferred error condition 12490 * is active 12491 */ 12492 if ((HS_FFER1 & phba->work_hs) && 12493 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12494 HS_FFER6 | HS_FFER7 | HS_FFER8) & 12495 phba->work_hs)) { 12496 phba->hba_flag |= DEFER_ERATT; 12497 /* Clear all interrupt enable conditions */ 12498 writel(0, phba->HCregaddr); 12499 readl(phba->HCregaddr); 12500 } 12501 } 12502 12503 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 12504 pmb = phba->sli.mbox_active; 12505 pmbox = &pmb->u.mb; 12506 mbox = phba->mbox; 12507 vport = pmb->vport; 12508 12509 /* First check out the status word */ 12510 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 12511 if (pmbox->mbxOwner != OWN_HOST) { 12512 spin_unlock_irqrestore(&phba->hbalock, iflag); 12513 /* 12514 * Stray Mailbox Interrupt, mbxCommand <cmd> 12515 * mbxStatus <status> 12516 */ 12517 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12518 LOG_SLI, 12519 "(%d):0304 Stray Mailbox " 12520 "Interrupt mbxCommand x%x " 12521 "mbxStatus x%x\n", 12522 (vport ? vport->vpi : 0), 12523 pmbox->mbxCommand, 12524 pmbox->mbxStatus); 12525 /* clear mailbox attention bit */ 12526 work_ha_copy &= ~HA_MBATT; 12527 } else { 12528 phba->sli.mbox_active = NULL; 12529 spin_unlock_irqrestore(&phba->hbalock, iflag); 12530 phba->last_completion_time = jiffies; 12531 del_timer(&phba->sli.mbox_tmo); 12532 if (pmb->mbox_cmpl) { 12533 lpfc_sli_pcimem_bcopy(mbox, pmbox, 12534 MAILBOX_CMD_SIZE); 12535 if (pmb->out_ext_byte_len && 12536 pmb->ctx_buf) 12537 lpfc_sli_pcimem_bcopy( 12538 phba->mbox_ext, 12539 pmb->ctx_buf, 12540 pmb->out_ext_byte_len); 12541 } 12542 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 12543 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 12544 12545 lpfc_debugfs_disc_trc(vport, 12546 LPFC_DISC_TRC_MBOX_VPORT, 12547 "MBOX dflt rpi: : " 12548 "status:x%x rpi:x%x", 12549 (uint32_t)pmbox->mbxStatus, 12550 pmbox->un.varWords[0], 0); 12551 12552 if (!pmbox->mbxStatus) { 12553 mp = (struct lpfc_dmabuf *) 12554 (pmb->ctx_buf); 12555 ndlp = (struct lpfc_nodelist *) 12556 pmb->ctx_ndlp; 12557 12558 /* Reg_LOGIN of dflt RPI was 12559 * successful. new lets get 12560 * rid of the RPI using the 12561 * same mbox buffer. 12562 */ 12563 lpfc_unreg_login(phba, 12564 vport->vpi, 12565 pmbox->un.varWords[0], 12566 pmb); 12567 pmb->mbox_cmpl = 12568 lpfc_mbx_cmpl_dflt_rpi; 12569 pmb->ctx_buf = mp; 12570 pmb->ctx_ndlp = ndlp; 12571 pmb->vport = vport; 12572 rc = lpfc_sli_issue_mbox(phba, 12573 pmb, 12574 MBX_NOWAIT); 12575 if (rc != MBX_BUSY) 12576 lpfc_printf_log(phba, 12577 KERN_ERR, 12578 LOG_MBOX | LOG_SLI, 12579 "0350 rc should have" 12580 "been MBX_BUSY\n"); 12581 if (rc != MBX_NOT_FINISHED) 12582 goto send_current_mbox; 12583 } 12584 } 12585 spin_lock_irqsave( 12586 &phba->pport->work_port_lock, 12587 iflag); 12588 phba->pport->work_port_events &= 12589 ~WORKER_MBOX_TMO; 12590 spin_unlock_irqrestore( 12591 &phba->pport->work_port_lock, 12592 iflag); 12593 lpfc_mbox_cmpl_put(phba, pmb); 12594 } 12595 } else 12596 spin_unlock_irqrestore(&phba->hbalock, iflag); 12597 12598 if ((work_ha_copy & HA_MBATT) && 12599 (phba->sli.mbox_active == NULL)) { 12600 send_current_mbox: 12601 /* Process next mailbox command if there is one */ 12602 do { 12603 rc = lpfc_sli_issue_mbox(phba, NULL, 12604 MBX_NOWAIT); 12605 } while (rc == MBX_NOT_FINISHED); 12606 if (rc != MBX_SUCCESS) 12607 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12608 LOG_SLI, "0349 rc should be " 12609 "MBX_SUCCESS\n"); 12610 } 12611 12612 spin_lock_irqsave(&phba->hbalock, iflag); 12613 phba->work_ha |= work_ha_copy; 12614 spin_unlock_irqrestore(&phba->hbalock, iflag); 12615 lpfc_worker_wake_up(phba); 12616 } 12617 return IRQ_HANDLED; 12618 unplug_error: 12619 spin_unlock_irqrestore(&phba->hbalock, iflag); 12620 return IRQ_HANDLED; 12621 12622 } /* lpfc_sli_sp_intr_handler */ 12623 12624 /** 12625 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 12626 * @irq: Interrupt number. 12627 * @dev_id: The device context pointer. 12628 * 12629 * This function is directly called from the PCI layer as an interrupt 12630 * service routine when device with SLI-3 interface spec is enabled with 12631 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 12632 * ring event in the HBA. However, when the device is enabled with either 12633 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12634 * device-level interrupt handler. When the PCI slot is in error recovery 12635 * or the HBA is undergoing initialization, the interrupt handler will not 12636 * process the interrupt. The SCSI FCP fast-path ring event are handled in 12637 * the intrrupt context. This function is called without any lock held. 12638 * It gets the hbalock to access and update SLI data structures. 12639 * 12640 * This function returns IRQ_HANDLED when interrupt is handled else it 12641 * returns IRQ_NONE. 12642 **/ 12643 irqreturn_t 12644 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 12645 { 12646 struct lpfc_hba *phba; 12647 uint32_t ha_copy; 12648 unsigned long status; 12649 unsigned long iflag; 12650 struct lpfc_sli_ring *pring; 12651 12652 /* Get the driver's phba structure from the dev_id and 12653 * assume the HBA is not interrupting. 12654 */ 12655 phba = (struct lpfc_hba *) dev_id; 12656 12657 if (unlikely(!phba)) 12658 return IRQ_NONE; 12659 12660 /* 12661 * Stuff needs to be attented to when this function is invoked as an 12662 * individual interrupt handler in MSI-X multi-message interrupt mode 12663 */ 12664 if (phba->intr_type == MSIX) { 12665 /* Check device state for handling interrupt */ 12666 if (lpfc_intr_state_check(phba)) 12667 return IRQ_NONE; 12668 /* Need to read HA REG for FCP ring and other ring events */ 12669 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12670 return IRQ_HANDLED; 12671 /* Clear up only attention source related to fast-path */ 12672 spin_lock_irqsave(&phba->hbalock, iflag); 12673 /* 12674 * If there is deferred error attention, do not check for 12675 * any interrupt. 12676 */ 12677 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12678 spin_unlock_irqrestore(&phba->hbalock, iflag); 12679 return IRQ_NONE; 12680 } 12681 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 12682 phba->HAregaddr); 12683 readl(phba->HAregaddr); /* flush */ 12684 spin_unlock_irqrestore(&phba->hbalock, iflag); 12685 } else 12686 ha_copy = phba->ha_copy; 12687 12688 /* 12689 * Process all events on FCP ring. Take the optimized path for FCP IO. 12690 */ 12691 ha_copy &= ~(phba->work_ha_mask); 12692 12693 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12694 status >>= (4*LPFC_FCP_RING); 12695 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 12696 if (status & HA_RXMASK) 12697 lpfc_sli_handle_fast_ring_event(phba, pring, status); 12698 12699 if (phba->cfg_multi_ring_support == 2) { 12700 /* 12701 * Process all events on extra ring. Take the optimized path 12702 * for extra ring IO. 12703 */ 12704 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12705 status >>= (4*LPFC_EXTRA_RING); 12706 if (status & HA_RXMASK) { 12707 lpfc_sli_handle_fast_ring_event(phba, 12708 &phba->sli.sli3_ring[LPFC_EXTRA_RING], 12709 status); 12710 } 12711 } 12712 return IRQ_HANDLED; 12713 } /* lpfc_sli_fp_intr_handler */ 12714 12715 /** 12716 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 12717 * @irq: Interrupt number. 12718 * @dev_id: The device context pointer. 12719 * 12720 * This function is the HBA device-level interrupt handler to device with 12721 * SLI-3 interface spec, called from the PCI layer when either MSI or 12722 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 12723 * requires driver attention. This function invokes the slow-path interrupt 12724 * attention handling function and fast-path interrupt attention handling 12725 * function in turn to process the relevant HBA attention events. This 12726 * function is called without any lock held. It gets the hbalock to access 12727 * and update SLI data structures. 12728 * 12729 * This function returns IRQ_HANDLED when interrupt is handled, else it 12730 * returns IRQ_NONE. 12731 **/ 12732 irqreturn_t 12733 lpfc_sli_intr_handler(int irq, void *dev_id) 12734 { 12735 struct lpfc_hba *phba; 12736 irqreturn_t sp_irq_rc, fp_irq_rc; 12737 unsigned long status1, status2; 12738 uint32_t hc_copy; 12739 12740 /* 12741 * Get the driver's phba structure from the dev_id and 12742 * assume the HBA is not interrupting. 12743 */ 12744 phba = (struct lpfc_hba *) dev_id; 12745 12746 if (unlikely(!phba)) 12747 return IRQ_NONE; 12748 12749 /* Check device state for handling interrupt */ 12750 if (lpfc_intr_state_check(phba)) 12751 return IRQ_NONE; 12752 12753 spin_lock(&phba->hbalock); 12754 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 12755 spin_unlock(&phba->hbalock); 12756 return IRQ_HANDLED; 12757 } 12758 12759 if (unlikely(!phba->ha_copy)) { 12760 spin_unlock(&phba->hbalock); 12761 return IRQ_NONE; 12762 } else if (phba->ha_copy & HA_ERATT) { 12763 if (phba->hba_flag & HBA_ERATT_HANDLED) 12764 /* ERATT polling has handled ERATT */ 12765 phba->ha_copy &= ~HA_ERATT; 12766 else 12767 /* Indicate interrupt handler handles ERATT */ 12768 phba->hba_flag |= HBA_ERATT_HANDLED; 12769 } 12770 12771 /* 12772 * If there is deferred error attention, do not check for any interrupt. 12773 */ 12774 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12775 spin_unlock(&phba->hbalock); 12776 return IRQ_NONE; 12777 } 12778 12779 /* Clear attention sources except link and error attentions */ 12780 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 12781 spin_unlock(&phba->hbalock); 12782 return IRQ_HANDLED; 12783 } 12784 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 12785 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 12786 phba->HCregaddr); 12787 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 12788 writel(hc_copy, phba->HCregaddr); 12789 readl(phba->HAregaddr); /* flush */ 12790 spin_unlock(&phba->hbalock); 12791 12792 /* 12793 * Invokes slow-path host attention interrupt handling as appropriate. 12794 */ 12795 12796 /* status of events with mailbox and link attention */ 12797 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 12798 12799 /* status of events with ELS ring */ 12800 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 12801 status2 >>= (4*LPFC_ELS_RING); 12802 12803 if (status1 || (status2 & HA_RXMASK)) 12804 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 12805 else 12806 sp_irq_rc = IRQ_NONE; 12807 12808 /* 12809 * Invoke fast-path host attention interrupt handling as appropriate. 12810 */ 12811 12812 /* status of events with FCP ring */ 12813 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12814 status1 >>= (4*LPFC_FCP_RING); 12815 12816 /* status of events with extra ring */ 12817 if (phba->cfg_multi_ring_support == 2) { 12818 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12819 status2 >>= (4*LPFC_EXTRA_RING); 12820 } else 12821 status2 = 0; 12822 12823 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 12824 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 12825 else 12826 fp_irq_rc = IRQ_NONE; 12827 12828 /* Return device-level interrupt handling status */ 12829 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 12830 } /* lpfc_sli_intr_handler */ 12831 12832 /** 12833 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 12834 * @phba: pointer to lpfc hba data structure. 12835 * 12836 * This routine is invoked by the worker thread to process all the pending 12837 * SLI4 els abort xri events. 12838 **/ 12839 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 12840 { 12841 struct lpfc_cq_event *cq_event; 12842 12843 /* First, declare the els xri abort event has been handled */ 12844 spin_lock_irq(&phba->hbalock); 12845 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 12846 spin_unlock_irq(&phba->hbalock); 12847 /* Now, handle all the els xri abort events */ 12848 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 12849 /* Get the first event from the head of the event queue */ 12850 spin_lock_irq(&phba->hbalock); 12851 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 12852 cq_event, struct lpfc_cq_event, list); 12853 spin_unlock_irq(&phba->hbalock); 12854 /* Notify aborted XRI for ELS work queue */ 12855 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12856 /* Free the event processed back to the free pool */ 12857 lpfc_sli4_cq_event_release(phba, cq_event); 12858 } 12859 } 12860 12861 /** 12862 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 12863 * @phba: pointer to lpfc hba data structure 12864 * @pIocbIn: pointer to the rspiocbq 12865 * @pIocbOut: pointer to the cmdiocbq 12866 * @wcqe: pointer to the complete wcqe 12867 * 12868 * This routine transfers the fields of a command iocbq to a response iocbq 12869 * by copying all the IOCB fields from command iocbq and transferring the 12870 * completion status information from the complete wcqe. 12871 **/ 12872 static void 12873 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 12874 struct lpfc_iocbq *pIocbIn, 12875 struct lpfc_iocbq *pIocbOut, 12876 struct lpfc_wcqe_complete *wcqe) 12877 { 12878 int numBdes, i; 12879 unsigned long iflags; 12880 uint32_t status, max_response; 12881 struct lpfc_dmabuf *dmabuf; 12882 struct ulp_bde64 *bpl, bde; 12883 size_t offset = offsetof(struct lpfc_iocbq, iocb); 12884 12885 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 12886 sizeof(struct lpfc_iocbq) - offset); 12887 /* Map WCQE parameters into irspiocb parameters */ 12888 status = bf_get(lpfc_wcqe_c_status, wcqe); 12889 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 12890 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 12891 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 12892 pIocbIn->iocb.un.fcpi.fcpi_parm = 12893 pIocbOut->iocb.un.fcpi.fcpi_parm - 12894 wcqe->total_data_placed; 12895 else 12896 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12897 else { 12898 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12899 switch (pIocbOut->iocb.ulpCommand) { 12900 case CMD_ELS_REQUEST64_CR: 12901 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12902 bpl = (struct ulp_bde64 *)dmabuf->virt; 12903 bde.tus.w = le32_to_cpu(bpl[1].tus.w); 12904 max_response = bde.tus.f.bdeSize; 12905 break; 12906 case CMD_GEN_REQUEST64_CR: 12907 max_response = 0; 12908 if (!pIocbOut->context3) 12909 break; 12910 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 12911 sizeof(struct ulp_bde64); 12912 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12913 bpl = (struct ulp_bde64 *)dmabuf->virt; 12914 for (i = 0; i < numBdes; i++) { 12915 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 12916 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 12917 max_response += bde.tus.f.bdeSize; 12918 } 12919 break; 12920 default: 12921 max_response = wcqe->total_data_placed; 12922 break; 12923 } 12924 if (max_response < wcqe->total_data_placed) 12925 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 12926 else 12927 pIocbIn->iocb.un.genreq64.bdl.bdeSize = 12928 wcqe->total_data_placed; 12929 } 12930 12931 /* Convert BG errors for completion status */ 12932 if (status == CQE_STATUS_DI_ERROR) { 12933 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 12934 12935 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 12936 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 12937 else 12938 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 12939 12940 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 12941 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 12942 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12943 BGS_GUARD_ERR_MASK; 12944 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 12945 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12946 BGS_APPTAG_ERR_MASK; 12947 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 12948 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12949 BGS_REFTAG_ERR_MASK; 12950 12951 /* Check to see if there was any good data before the error */ 12952 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 12953 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12954 BGS_HI_WATER_MARK_PRESENT_MASK; 12955 pIocbIn->iocb.unsli3.sli3_bg.bghm = 12956 wcqe->total_data_placed; 12957 } 12958 12959 /* 12960 * Set ALL the error bits to indicate we don't know what 12961 * type of error it is. 12962 */ 12963 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 12964 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12965 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 12966 BGS_GUARD_ERR_MASK); 12967 } 12968 12969 /* Pick up HBA exchange busy condition */ 12970 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 12971 spin_lock_irqsave(&phba->hbalock, iflags); 12972 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 12973 spin_unlock_irqrestore(&phba->hbalock, iflags); 12974 } 12975 } 12976 12977 /** 12978 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 12979 * @phba: Pointer to HBA context object. 12980 * @wcqe: Pointer to work-queue completion queue entry. 12981 * 12982 * This routine handles an ELS work-queue completion event and construct 12983 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 12984 * discovery engine to handle. 12985 * 12986 * Return: Pointer to the receive IOCBQ, NULL otherwise. 12987 **/ 12988 static struct lpfc_iocbq * 12989 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 12990 struct lpfc_iocbq *irspiocbq) 12991 { 12992 struct lpfc_sli_ring *pring; 12993 struct lpfc_iocbq *cmdiocbq; 12994 struct lpfc_wcqe_complete *wcqe; 12995 unsigned long iflags; 12996 12997 pring = lpfc_phba_elsring(phba); 12998 if (unlikely(!pring)) 12999 return NULL; 13000 13001 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 13002 pring->stats.iocb_event++; 13003 /* Look up the ELS command IOCB and create pseudo response IOCB */ 13004 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13005 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13006 if (unlikely(!cmdiocbq)) { 13007 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13008 "0386 ELS complete with no corresponding " 13009 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", 13010 wcqe->word0, wcqe->total_data_placed, 13011 wcqe->parameter, wcqe->word3); 13012 lpfc_sli_release_iocbq(phba, irspiocbq); 13013 return NULL; 13014 } 13015 13016 spin_lock_irqsave(&pring->ring_lock, iflags); 13017 /* Put the iocb back on the txcmplq */ 13018 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); 13019 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13020 13021 /* Fake the irspiocbq and copy necessary response information */ 13022 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 13023 13024 return irspiocbq; 13025 } 13026 13027 inline struct lpfc_cq_event * 13028 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) 13029 { 13030 struct lpfc_cq_event *cq_event; 13031 13032 /* Allocate a new internal CQ_EVENT entry */ 13033 cq_event = lpfc_sli4_cq_event_alloc(phba); 13034 if (!cq_event) { 13035 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13036 "0602 Failed to alloc CQ_EVENT entry\n"); 13037 return NULL; 13038 } 13039 13040 /* Move the CQE into the event */ 13041 memcpy(&cq_event->cqe, entry, size); 13042 return cq_event; 13043 } 13044 13045 /** 13046 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 13047 * @phba: Pointer to HBA context object. 13048 * @cqe: Pointer to mailbox completion queue entry. 13049 * 13050 * This routine process a mailbox completion queue entry with asynchrous 13051 * event. 13052 * 13053 * Return: true if work posted to worker thread, otherwise false. 13054 **/ 13055 static bool 13056 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13057 { 13058 struct lpfc_cq_event *cq_event; 13059 unsigned long iflags; 13060 13061 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13062 "0392 Async Event: word0:x%x, word1:x%x, " 13063 "word2:x%x, word3:x%x\n", mcqe->word0, 13064 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 13065 13066 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); 13067 if (!cq_event) 13068 return false; 13069 spin_lock_irqsave(&phba->hbalock, iflags); 13070 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 13071 /* Set the async event flag */ 13072 phba->hba_flag |= ASYNC_EVENT; 13073 spin_unlock_irqrestore(&phba->hbalock, iflags); 13074 13075 return true; 13076 } 13077 13078 /** 13079 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 13080 * @phba: Pointer to HBA context object. 13081 * @cqe: Pointer to mailbox completion queue entry. 13082 * 13083 * This routine process a mailbox completion queue entry with mailbox 13084 * completion event. 13085 * 13086 * Return: true if work posted to worker thread, otherwise false. 13087 **/ 13088 static bool 13089 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13090 { 13091 uint32_t mcqe_status; 13092 MAILBOX_t *mbox, *pmbox; 13093 struct lpfc_mqe *mqe; 13094 struct lpfc_vport *vport; 13095 struct lpfc_nodelist *ndlp; 13096 struct lpfc_dmabuf *mp; 13097 unsigned long iflags; 13098 LPFC_MBOXQ_t *pmb; 13099 bool workposted = false; 13100 int rc; 13101 13102 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 13103 if (!bf_get(lpfc_trailer_completed, mcqe)) 13104 goto out_no_mqe_complete; 13105 13106 /* Get the reference to the active mbox command */ 13107 spin_lock_irqsave(&phba->hbalock, iflags); 13108 pmb = phba->sli.mbox_active; 13109 if (unlikely(!pmb)) { 13110 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13111 "1832 No pending MBOX command to handle\n"); 13112 spin_unlock_irqrestore(&phba->hbalock, iflags); 13113 goto out_no_mqe_complete; 13114 } 13115 spin_unlock_irqrestore(&phba->hbalock, iflags); 13116 mqe = &pmb->u.mqe; 13117 pmbox = (MAILBOX_t *)&pmb->u.mqe; 13118 mbox = phba->mbox; 13119 vport = pmb->vport; 13120 13121 /* Reset heartbeat timer */ 13122 phba->last_completion_time = jiffies; 13123 del_timer(&phba->sli.mbox_tmo); 13124 13125 /* Move mbox data to caller's mailbox region, do endian swapping */ 13126 if (pmb->mbox_cmpl && mbox) 13127 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 13128 13129 /* 13130 * For mcqe errors, conditionally move a modified error code to 13131 * the mbox so that the error will not be missed. 13132 */ 13133 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 13134 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 13135 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 13136 bf_set(lpfc_mqe_status, mqe, 13137 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 13138 } 13139 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 13140 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 13141 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 13142 "MBOX dflt rpi: status:x%x rpi:x%x", 13143 mcqe_status, 13144 pmbox->un.varWords[0], 0); 13145 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 13146 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 13147 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 13148 /* Reg_LOGIN of dflt RPI was successful. Now lets get 13149 * RID of the PPI using the same mbox buffer. 13150 */ 13151 lpfc_unreg_login(phba, vport->vpi, 13152 pmbox->un.varWords[0], pmb); 13153 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 13154 pmb->ctx_buf = mp; 13155 pmb->ctx_ndlp = ndlp; 13156 pmb->vport = vport; 13157 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 13158 if (rc != MBX_BUSY) 13159 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 13160 LOG_SLI, "0385 rc should " 13161 "have been MBX_BUSY\n"); 13162 if (rc != MBX_NOT_FINISHED) 13163 goto send_current_mbox; 13164 } 13165 } 13166 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 13167 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 13168 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 13169 13170 /* There is mailbox completion work to do */ 13171 spin_lock_irqsave(&phba->hbalock, iflags); 13172 __lpfc_mbox_cmpl_put(phba, pmb); 13173 phba->work_ha |= HA_MBATT; 13174 spin_unlock_irqrestore(&phba->hbalock, iflags); 13175 workposted = true; 13176 13177 send_current_mbox: 13178 spin_lock_irqsave(&phba->hbalock, iflags); 13179 /* Release the mailbox command posting token */ 13180 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13181 /* Setting active mailbox pointer need to be in sync to flag clear */ 13182 phba->sli.mbox_active = NULL; 13183 spin_unlock_irqrestore(&phba->hbalock, iflags); 13184 /* Wake up worker thread to post the next pending mailbox command */ 13185 lpfc_worker_wake_up(phba); 13186 out_no_mqe_complete: 13187 if (bf_get(lpfc_trailer_consumed, mcqe)) 13188 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 13189 return workposted; 13190 } 13191 13192 /** 13193 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 13194 * @phba: Pointer to HBA context object. 13195 * @cqe: Pointer to mailbox completion queue entry. 13196 * 13197 * This routine process a mailbox completion queue entry, it invokes the 13198 * proper mailbox complete handling or asynchrous event handling routine 13199 * according to the MCQE's async bit. 13200 * 13201 * Return: true if work posted to worker thread, otherwise false. 13202 **/ 13203 static bool 13204 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13205 struct lpfc_cqe *cqe) 13206 { 13207 struct lpfc_mcqe mcqe; 13208 bool workposted; 13209 13210 cq->CQ_mbox++; 13211 13212 /* Copy the mailbox MCQE and convert endian order as needed */ 13213 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 13214 13215 /* Invoke the proper event handling routine */ 13216 if (!bf_get(lpfc_trailer_async, &mcqe)) 13217 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 13218 else 13219 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 13220 return workposted; 13221 } 13222 13223 /** 13224 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 13225 * @phba: Pointer to HBA context object. 13226 * @cq: Pointer to associated CQ 13227 * @wcqe: Pointer to work-queue completion queue entry. 13228 * 13229 * This routine handles an ELS work-queue completion event. 13230 * 13231 * Return: true if work posted to worker thread, otherwise false. 13232 **/ 13233 static bool 13234 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13235 struct lpfc_wcqe_complete *wcqe) 13236 { 13237 struct lpfc_iocbq *irspiocbq; 13238 unsigned long iflags; 13239 struct lpfc_sli_ring *pring = cq->pring; 13240 int txq_cnt = 0; 13241 int txcmplq_cnt = 0; 13242 int fcp_txcmplq_cnt = 0; 13243 13244 /* Check for response status */ 13245 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13246 /* Log the error status */ 13247 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13248 "0357 ELS CQE error: status=x%x: " 13249 "CQE: %08x %08x %08x %08x\n", 13250 bf_get(lpfc_wcqe_c_status, wcqe), 13251 wcqe->word0, wcqe->total_data_placed, 13252 wcqe->parameter, wcqe->word3); 13253 } 13254 13255 /* Get an irspiocbq for later ELS response processing use */ 13256 irspiocbq = lpfc_sli_get_iocbq(phba); 13257 if (!irspiocbq) { 13258 if (!list_empty(&pring->txq)) 13259 txq_cnt++; 13260 if (!list_empty(&pring->txcmplq)) 13261 txcmplq_cnt++; 13262 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13263 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 13264 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 13265 txq_cnt, phba->iocb_cnt, 13266 fcp_txcmplq_cnt, 13267 txcmplq_cnt); 13268 return false; 13269 } 13270 13271 /* Save off the slow-path queue event for work thread to process */ 13272 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 13273 spin_lock_irqsave(&phba->hbalock, iflags); 13274 list_add_tail(&irspiocbq->cq_event.list, 13275 &phba->sli4_hba.sp_queue_event); 13276 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13277 spin_unlock_irqrestore(&phba->hbalock, iflags); 13278 13279 return true; 13280 } 13281 13282 /** 13283 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 13284 * @phba: Pointer to HBA context object. 13285 * @wcqe: Pointer to work-queue completion queue entry. 13286 * 13287 * This routine handles slow-path WQ entry consumed event by invoking the 13288 * proper WQ release routine to the slow-path WQ. 13289 **/ 13290 static void 13291 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 13292 struct lpfc_wcqe_release *wcqe) 13293 { 13294 /* sanity check on queue memory */ 13295 if (unlikely(!phba->sli4_hba.els_wq)) 13296 return; 13297 /* Check for the slow-path ELS work queue */ 13298 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 13299 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 13300 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13301 else 13302 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13303 "2579 Slow-path wqe consume event carries " 13304 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 13305 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 13306 phba->sli4_hba.els_wq->queue_id); 13307 } 13308 13309 /** 13310 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 13311 * @phba: Pointer to HBA context object. 13312 * @cq: Pointer to a WQ completion queue. 13313 * @wcqe: Pointer to work-queue completion queue entry. 13314 * 13315 * This routine handles an XRI abort event. 13316 * 13317 * Return: true if work posted to worker thread, otherwise false. 13318 **/ 13319 static bool 13320 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 13321 struct lpfc_queue *cq, 13322 struct sli4_wcqe_xri_aborted *wcqe) 13323 { 13324 bool workposted = false; 13325 struct lpfc_cq_event *cq_event; 13326 unsigned long iflags; 13327 13328 switch (cq->subtype) { 13329 case LPFC_FCP: 13330 lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq); 13331 workposted = false; 13332 break; 13333 case LPFC_NVME_LS: /* NVME LS uses ELS resources */ 13334 case LPFC_ELS: 13335 cq_event = lpfc_cq_event_setup( 13336 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 13337 if (!cq_event) 13338 return false; 13339 cq_event->hdwq = cq->hdwq; 13340 spin_lock_irqsave(&phba->hbalock, iflags); 13341 list_add_tail(&cq_event->list, 13342 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 13343 /* Set the els xri abort event flag */ 13344 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 13345 spin_unlock_irqrestore(&phba->hbalock, iflags); 13346 workposted = true; 13347 break; 13348 case LPFC_NVME: 13349 /* Notify aborted XRI for NVME work queue */ 13350 if (phba->nvmet_support) 13351 lpfc_sli4_nvmet_xri_aborted(phba, wcqe); 13352 else 13353 lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq); 13354 13355 workposted = false; 13356 break; 13357 default: 13358 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13359 "0603 Invalid CQ subtype %d: " 13360 "%08x %08x %08x %08x\n", 13361 cq->subtype, wcqe->word0, wcqe->parameter, 13362 wcqe->word2, wcqe->word3); 13363 workposted = false; 13364 break; 13365 } 13366 return workposted; 13367 } 13368 13369 #define FC_RCTL_MDS_DIAGS 0xF4 13370 13371 /** 13372 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 13373 * @phba: Pointer to HBA context object. 13374 * @rcqe: Pointer to receive-queue completion queue entry. 13375 * 13376 * This routine process a receive-queue completion queue entry. 13377 * 13378 * Return: true if work posted to worker thread, otherwise false. 13379 **/ 13380 static bool 13381 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 13382 { 13383 bool workposted = false; 13384 struct fc_frame_header *fc_hdr; 13385 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 13386 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 13387 struct lpfc_nvmet_tgtport *tgtp; 13388 struct hbq_dmabuf *dma_buf; 13389 uint32_t status, rq_id; 13390 unsigned long iflags; 13391 13392 /* sanity check on queue memory */ 13393 if (unlikely(!hrq) || unlikely(!drq)) 13394 return workposted; 13395 13396 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13397 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13398 else 13399 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13400 if (rq_id != hrq->queue_id) 13401 goto out; 13402 13403 status = bf_get(lpfc_rcqe_status, rcqe); 13404 switch (status) { 13405 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13406 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13407 "2537 Receive Frame Truncated!!\n"); 13408 /* fall through */ 13409 case FC_STATUS_RQ_SUCCESS: 13410 spin_lock_irqsave(&phba->hbalock, iflags); 13411 lpfc_sli4_rq_release(hrq, drq); 13412 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 13413 if (!dma_buf) { 13414 hrq->RQ_no_buf_found++; 13415 spin_unlock_irqrestore(&phba->hbalock, iflags); 13416 goto out; 13417 } 13418 hrq->RQ_rcv_buf++; 13419 hrq->RQ_buf_posted--; 13420 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 13421 13422 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13423 13424 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 13425 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 13426 spin_unlock_irqrestore(&phba->hbalock, iflags); 13427 /* Handle MDS Loopback frames */ 13428 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf); 13429 break; 13430 } 13431 13432 /* save off the frame for the work thread to process */ 13433 list_add_tail(&dma_buf->cq_event.list, 13434 &phba->sli4_hba.sp_queue_event); 13435 /* Frame received */ 13436 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13437 spin_unlock_irqrestore(&phba->hbalock, iflags); 13438 workposted = true; 13439 break; 13440 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13441 if (phba->nvmet_support) { 13442 tgtp = phba->targetport->private; 13443 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13444 "6402 RQE Error x%x, posted %d err_cnt " 13445 "%d: %x %x %x\n", 13446 status, hrq->RQ_buf_posted, 13447 hrq->RQ_no_posted_buf, 13448 atomic_read(&tgtp->rcv_fcp_cmd_in), 13449 atomic_read(&tgtp->rcv_fcp_cmd_out), 13450 atomic_read(&tgtp->xmt_fcp_release)); 13451 } 13452 /* fallthrough */ 13453 13454 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13455 hrq->RQ_no_posted_buf++; 13456 /* Post more buffers if possible */ 13457 spin_lock_irqsave(&phba->hbalock, iflags); 13458 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 13459 spin_unlock_irqrestore(&phba->hbalock, iflags); 13460 workposted = true; 13461 break; 13462 } 13463 out: 13464 return workposted; 13465 } 13466 13467 /** 13468 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 13469 * @phba: Pointer to HBA context object. 13470 * @cq: Pointer to the completion queue. 13471 * @cqe: Pointer to a completion queue entry. 13472 * 13473 * This routine process a slow-path work-queue or receive queue completion queue 13474 * entry. 13475 * 13476 * Return: true if work posted to worker thread, otherwise false. 13477 **/ 13478 static bool 13479 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13480 struct lpfc_cqe *cqe) 13481 { 13482 struct lpfc_cqe cqevt; 13483 bool workposted = false; 13484 13485 /* Copy the work queue CQE and convert endian order if needed */ 13486 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 13487 13488 /* Check and process for different type of WCQE and dispatch */ 13489 switch (bf_get(lpfc_cqe_code, &cqevt)) { 13490 case CQE_CODE_COMPL_WQE: 13491 /* Process the WQ/RQ complete event */ 13492 phba->last_completion_time = jiffies; 13493 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 13494 (struct lpfc_wcqe_complete *)&cqevt); 13495 break; 13496 case CQE_CODE_RELEASE_WQE: 13497 /* Process the WQ release event */ 13498 lpfc_sli4_sp_handle_rel_wcqe(phba, 13499 (struct lpfc_wcqe_release *)&cqevt); 13500 break; 13501 case CQE_CODE_XRI_ABORTED: 13502 /* Process the WQ XRI abort event */ 13503 phba->last_completion_time = jiffies; 13504 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13505 (struct sli4_wcqe_xri_aborted *)&cqevt); 13506 break; 13507 case CQE_CODE_RECEIVE: 13508 case CQE_CODE_RECEIVE_V1: 13509 /* Process the RQ event */ 13510 phba->last_completion_time = jiffies; 13511 workposted = lpfc_sli4_sp_handle_rcqe(phba, 13512 (struct lpfc_rcqe *)&cqevt); 13513 break; 13514 default: 13515 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13516 "0388 Not a valid WCQE code: x%x\n", 13517 bf_get(lpfc_cqe_code, &cqevt)); 13518 break; 13519 } 13520 return workposted; 13521 } 13522 13523 /** 13524 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 13525 * @phba: Pointer to HBA context object. 13526 * @eqe: Pointer to fast-path event queue entry. 13527 * 13528 * This routine process a event queue entry from the slow-path event queue. 13529 * It will check the MajorCode and MinorCode to determine this is for a 13530 * completion event on a completion queue, if not, an error shall be logged 13531 * and just return. Otherwise, it will get to the corresponding completion 13532 * queue and process all the entries on that completion queue, rearm the 13533 * completion queue, and then return. 13534 * 13535 **/ 13536 static void 13537 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13538 struct lpfc_queue *speq) 13539 { 13540 struct lpfc_queue *cq = NULL, *childq; 13541 uint16_t cqid; 13542 13543 /* Get the reference to the corresponding CQ */ 13544 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13545 13546 list_for_each_entry(childq, &speq->child_list, list) { 13547 if (childq->queue_id == cqid) { 13548 cq = childq; 13549 break; 13550 } 13551 } 13552 if (unlikely(!cq)) { 13553 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13554 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13555 "0365 Slow-path CQ identifier " 13556 "(%d) does not exist\n", cqid); 13557 return; 13558 } 13559 13560 /* Save EQ associated with this CQ */ 13561 cq->assoc_qp = speq; 13562 13563 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork)) 13564 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13565 "0390 Cannot schedule soft IRQ " 13566 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13567 cqid, cq->queue_id, raw_smp_processor_id()); 13568 } 13569 13570 /** 13571 * __lpfc_sli4_process_cq - Process elements of a CQ 13572 * @phba: Pointer to HBA context object. 13573 * @cq: Pointer to CQ to be processed 13574 * @handler: Routine to process each cqe 13575 * @delay: Pointer to usdelay to set in case of rescheduling of the handler 13576 * 13577 * This routine processes completion queue entries in a CQ. While a valid 13578 * queue element is found, the handler is called. During processing checks 13579 * are made for periodic doorbell writes to let the hardware know of 13580 * element consumption. 13581 * 13582 * If the max limit on cqes to process is hit, or there are no more valid 13583 * entries, the loop stops. If we processed a sufficient number of elements, 13584 * meaning there is sufficient load, rather than rearming and generating 13585 * another interrupt, a cq rescheduling delay will be set. A delay of 0 13586 * indicates no rescheduling. 13587 * 13588 * Returns True if work scheduled, False otherwise. 13589 **/ 13590 static bool 13591 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, 13592 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, 13593 struct lpfc_cqe *), unsigned long *delay) 13594 { 13595 struct lpfc_cqe *cqe; 13596 bool workposted = false; 13597 int count = 0, consumed = 0; 13598 bool arm = true; 13599 13600 /* default - no reschedule */ 13601 *delay = 0; 13602 13603 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) 13604 goto rearm_and_exit; 13605 13606 /* Process all the entries to the CQ */ 13607 cqe = lpfc_sli4_cq_get(cq); 13608 while (cqe) { 13609 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS) && defined(BUILD_NVME) 13610 if (phba->ktime_on) 13611 cq->isr_timestamp = ktime_get_ns(); 13612 else 13613 cq->isr_timestamp = 0; 13614 #endif 13615 workposted |= handler(phba, cq, cqe); 13616 __lpfc_sli4_consume_cqe(phba, cq, cqe); 13617 13618 consumed++; 13619 if (!(++count % cq->max_proc_limit)) 13620 break; 13621 13622 if (!(count % cq->notify_interval)) { 13623 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 13624 LPFC_QUEUE_NOARM); 13625 consumed = 0; 13626 } 13627 13628 cqe = lpfc_sli4_cq_get(cq); 13629 } 13630 if (count >= phba->cfg_cq_poll_threshold) { 13631 *delay = 1; 13632 arm = false; 13633 } 13634 13635 /* Track the max number of CQEs processed in 1 EQ */ 13636 if (count > cq->CQ_max_cqe) 13637 cq->CQ_max_cqe = count; 13638 13639 cq->assoc_qp->EQ_cqe_cnt += count; 13640 13641 /* Catch the no cq entry condition */ 13642 if (unlikely(count == 0)) 13643 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13644 "0369 No entry from completion queue " 13645 "qid=%d\n", cq->queue_id); 13646 13647 cq->queue_claimed = 0; 13648 13649 rearm_and_exit: 13650 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 13651 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM); 13652 13653 return workposted; 13654 } 13655 13656 /** 13657 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry 13658 * @cq: pointer to CQ to process 13659 * 13660 * This routine calls the cq processing routine with a handler specific 13661 * to the type of queue bound to it. 13662 * 13663 * The CQ routine returns two values: the first is the calling status, 13664 * which indicates whether work was queued to the background discovery 13665 * thread. If true, the routine should wakeup the discovery thread; 13666 * the second is the delay parameter. If non-zero, rather than rearming 13667 * the CQ and yet another interrupt, the CQ handler should be queued so 13668 * that it is processed in a subsequent polling action. The value of 13669 * the delay indicates when to reschedule it. 13670 **/ 13671 static void 13672 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) 13673 { 13674 struct lpfc_hba *phba = cq->phba; 13675 unsigned long delay; 13676 bool workposted = false; 13677 13678 /* Process and rearm the CQ */ 13679 switch (cq->type) { 13680 case LPFC_MCQ: 13681 workposted |= __lpfc_sli4_process_cq(phba, cq, 13682 lpfc_sli4_sp_handle_mcqe, 13683 &delay); 13684 break; 13685 case LPFC_WCQ: 13686 if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME) 13687 workposted |= __lpfc_sli4_process_cq(phba, cq, 13688 lpfc_sli4_fp_handle_cqe, 13689 &delay); 13690 else 13691 workposted |= __lpfc_sli4_process_cq(phba, cq, 13692 lpfc_sli4_sp_handle_cqe, 13693 &delay); 13694 break; 13695 default: 13696 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13697 "0370 Invalid completion queue type (%d)\n", 13698 cq->type); 13699 return; 13700 } 13701 13702 if (delay) { 13703 if (!queue_delayed_work_on(cq->chann, phba->wq, 13704 &cq->sched_spwork, delay)) 13705 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13706 "0394 Cannot schedule soft IRQ " 13707 "for cqid=%d on CPU %d\n", 13708 cq->queue_id, cq->chann); 13709 } 13710 13711 /* wake up worker thread if there are works to be done */ 13712 if (workposted) 13713 lpfc_worker_wake_up(phba); 13714 } 13715 13716 /** 13717 * lpfc_sli4_sp_process_cq - slow-path work handler when started by 13718 * interrupt 13719 * @work: pointer to work element 13720 * 13721 * translates from the work handler and calls the slow-path handler. 13722 **/ 13723 static void 13724 lpfc_sli4_sp_process_cq(struct work_struct *work) 13725 { 13726 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); 13727 13728 __lpfc_sli4_sp_process_cq(cq); 13729 } 13730 13731 /** 13732 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer 13733 * @work: pointer to work element 13734 * 13735 * translates from the work handler and calls the slow-path handler. 13736 **/ 13737 static void 13738 lpfc_sli4_dly_sp_process_cq(struct work_struct *work) 13739 { 13740 struct lpfc_queue *cq = container_of(to_delayed_work(work), 13741 struct lpfc_queue, sched_spwork); 13742 13743 __lpfc_sli4_sp_process_cq(cq); 13744 } 13745 13746 /** 13747 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 13748 * @phba: Pointer to HBA context object. 13749 * @cq: Pointer to associated CQ 13750 * @wcqe: Pointer to work-queue completion queue entry. 13751 * 13752 * This routine process a fast-path work queue completion entry from fast-path 13753 * event queue for FCP command response completion. 13754 **/ 13755 static void 13756 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13757 struct lpfc_wcqe_complete *wcqe) 13758 { 13759 struct lpfc_sli_ring *pring = cq->pring; 13760 struct lpfc_iocbq *cmdiocbq; 13761 struct lpfc_iocbq irspiocbq; 13762 unsigned long iflags; 13763 13764 /* Check for response status */ 13765 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13766 /* If resource errors reported from HBA, reduce queue 13767 * depth of the SCSI device. 13768 */ 13769 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 13770 IOSTAT_LOCAL_REJECT)) && 13771 ((wcqe->parameter & IOERR_PARAM_MASK) == 13772 IOERR_NO_RESOURCES)) 13773 phba->lpfc_rampdown_queue_depth(phba); 13774 13775 /* Log the error status */ 13776 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13777 "0373 FCP CQE error: status=x%x: " 13778 "CQE: %08x %08x %08x %08x\n", 13779 bf_get(lpfc_wcqe_c_status, wcqe), 13780 wcqe->word0, wcqe->total_data_placed, 13781 wcqe->parameter, wcqe->word3); 13782 } 13783 13784 /* Look up the FCP command IOCB and create pseudo response IOCB */ 13785 spin_lock_irqsave(&pring->ring_lock, iflags); 13786 pring->stats.iocb_event++; 13787 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13788 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13789 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13790 if (unlikely(!cmdiocbq)) { 13791 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13792 "0374 FCP complete with no corresponding " 13793 "cmdiocb: iotag (%d)\n", 13794 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13795 return; 13796 } 13797 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13798 cmdiocbq->isr_timestamp = cq->isr_timestamp; 13799 #endif 13800 if (cmdiocbq->iocb_cmpl == NULL) { 13801 if (cmdiocbq->wqe_cmpl) { 13802 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13803 spin_lock_irqsave(&phba->hbalock, iflags); 13804 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13805 spin_unlock_irqrestore(&phba->hbalock, iflags); 13806 } 13807 13808 /* Pass the cmd_iocb and the wcqe to the upper layer */ 13809 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe); 13810 return; 13811 } 13812 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13813 "0375 FCP cmdiocb not callback function " 13814 "iotag: (%d)\n", 13815 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13816 return; 13817 } 13818 13819 /* Fake the irspiocb and copy necessary response information */ 13820 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 13821 13822 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13823 spin_lock_irqsave(&phba->hbalock, iflags); 13824 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13825 spin_unlock_irqrestore(&phba->hbalock, iflags); 13826 } 13827 13828 /* Pass the cmd_iocb and the rsp state to the upper layer */ 13829 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 13830 } 13831 13832 /** 13833 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 13834 * @phba: Pointer to HBA context object. 13835 * @cq: Pointer to completion queue. 13836 * @wcqe: Pointer to work-queue completion queue entry. 13837 * 13838 * This routine handles an fast-path WQ entry consumed event by invoking the 13839 * proper WQ release routine to the slow-path WQ. 13840 **/ 13841 static void 13842 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13843 struct lpfc_wcqe_release *wcqe) 13844 { 13845 struct lpfc_queue *childwq; 13846 bool wqid_matched = false; 13847 uint16_t hba_wqid; 13848 13849 /* Check for fast-path FCP work queue release */ 13850 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 13851 list_for_each_entry(childwq, &cq->child_list, list) { 13852 if (childwq->queue_id == hba_wqid) { 13853 lpfc_sli4_wq_release(childwq, 13854 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13855 if (childwq->q_flag & HBA_NVMET_WQFULL) 13856 lpfc_nvmet_wqfull_process(phba, childwq); 13857 wqid_matched = true; 13858 break; 13859 } 13860 } 13861 /* Report warning log message if no match found */ 13862 if (wqid_matched != true) 13863 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13864 "2580 Fast-path wqe consume event carries " 13865 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); 13866 } 13867 13868 /** 13869 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry 13870 * @phba: Pointer to HBA context object. 13871 * @rcqe: Pointer to receive-queue completion queue entry. 13872 * 13873 * This routine process a receive-queue completion queue entry. 13874 * 13875 * Return: true if work posted to worker thread, otherwise false. 13876 **/ 13877 static bool 13878 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13879 struct lpfc_rcqe *rcqe) 13880 { 13881 bool workposted = false; 13882 struct lpfc_queue *hrq; 13883 struct lpfc_queue *drq; 13884 struct rqb_dmabuf *dma_buf; 13885 struct fc_frame_header *fc_hdr; 13886 struct lpfc_nvmet_tgtport *tgtp; 13887 uint32_t status, rq_id; 13888 unsigned long iflags; 13889 uint32_t fctl, idx; 13890 13891 if ((phba->nvmet_support == 0) || 13892 (phba->sli4_hba.nvmet_cqset == NULL)) 13893 return workposted; 13894 13895 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; 13896 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; 13897 drq = phba->sli4_hba.nvmet_mrq_data[idx]; 13898 13899 /* sanity check on queue memory */ 13900 if (unlikely(!hrq) || unlikely(!drq)) 13901 return workposted; 13902 13903 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13904 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13905 else 13906 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13907 13908 if ((phba->nvmet_support == 0) || 13909 (rq_id != hrq->queue_id)) 13910 return workposted; 13911 13912 status = bf_get(lpfc_rcqe_status, rcqe); 13913 switch (status) { 13914 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13915 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13916 "6126 Receive Frame Truncated!!\n"); 13917 /* fall through */ 13918 case FC_STATUS_RQ_SUCCESS: 13919 spin_lock_irqsave(&phba->hbalock, iflags); 13920 lpfc_sli4_rq_release(hrq, drq); 13921 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 13922 if (!dma_buf) { 13923 hrq->RQ_no_buf_found++; 13924 spin_unlock_irqrestore(&phba->hbalock, iflags); 13925 goto out; 13926 } 13927 spin_unlock_irqrestore(&phba->hbalock, iflags); 13928 hrq->RQ_rcv_buf++; 13929 hrq->RQ_buf_posted--; 13930 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13931 13932 /* Just some basic sanity checks on FCP Command frame */ 13933 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 13934 fc_hdr->fh_f_ctl[1] << 8 | 13935 fc_hdr->fh_f_ctl[2]); 13936 if (((fctl & 13937 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != 13938 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || 13939 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ 13940 goto drop; 13941 13942 if (fc_hdr->fh_type == FC_TYPE_FCP) { 13943 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 13944 lpfc_nvmet_unsol_fcp_event( 13945 phba, idx, dma_buf, 13946 cq->isr_timestamp); 13947 return false; 13948 } 13949 drop: 13950 lpfc_rq_buf_free(phba, &dma_buf->hbuf); 13951 break; 13952 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13953 if (phba->nvmet_support) { 13954 tgtp = phba->targetport->private; 13955 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13956 "6401 RQE Error x%x, posted %d err_cnt " 13957 "%d: %x %x %x\n", 13958 status, hrq->RQ_buf_posted, 13959 hrq->RQ_no_posted_buf, 13960 atomic_read(&tgtp->rcv_fcp_cmd_in), 13961 atomic_read(&tgtp->rcv_fcp_cmd_out), 13962 atomic_read(&tgtp->xmt_fcp_release)); 13963 } 13964 /* fallthrough */ 13965 13966 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13967 hrq->RQ_no_posted_buf++; 13968 /* Post more buffers if possible */ 13969 break; 13970 } 13971 out: 13972 return workposted; 13973 } 13974 13975 /** 13976 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry 13977 * @phba: adapter with cq 13978 * @cq: Pointer to the completion queue. 13979 * @eqe: Pointer to fast-path completion queue entry. 13980 * 13981 * This routine process a fast-path work queue completion entry from fast-path 13982 * event queue for FCP command response completion. 13983 * 13984 * Return: true if work posted to worker thread, otherwise false. 13985 **/ 13986 static bool 13987 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13988 struct lpfc_cqe *cqe) 13989 { 13990 struct lpfc_wcqe_release wcqe; 13991 bool workposted = false; 13992 13993 /* Copy the work queue CQE and convert endian order if needed */ 13994 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 13995 13996 /* Check and process for different type of WCQE and dispatch */ 13997 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 13998 case CQE_CODE_COMPL_WQE: 13999 case CQE_CODE_NVME_ERSP: 14000 cq->CQ_wq++; 14001 /* Process the WQ complete event */ 14002 phba->last_completion_time = jiffies; 14003 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME)) 14004 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 14005 (struct lpfc_wcqe_complete *)&wcqe); 14006 if (cq->subtype == LPFC_NVME_LS) 14007 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 14008 (struct lpfc_wcqe_complete *)&wcqe); 14009 break; 14010 case CQE_CODE_RELEASE_WQE: 14011 cq->CQ_release_wqe++; 14012 /* Process the WQ release event */ 14013 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 14014 (struct lpfc_wcqe_release *)&wcqe); 14015 break; 14016 case CQE_CODE_XRI_ABORTED: 14017 cq->CQ_xri_aborted++; 14018 /* Process the WQ XRI abort event */ 14019 phba->last_completion_time = jiffies; 14020 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 14021 (struct sli4_wcqe_xri_aborted *)&wcqe); 14022 break; 14023 case CQE_CODE_RECEIVE_V1: 14024 case CQE_CODE_RECEIVE: 14025 phba->last_completion_time = jiffies; 14026 if (cq->subtype == LPFC_NVMET) { 14027 workposted = lpfc_sli4_nvmet_handle_rcqe( 14028 phba, cq, (struct lpfc_rcqe *)&wcqe); 14029 } 14030 break; 14031 default: 14032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14033 "0144 Not a valid CQE code: x%x\n", 14034 bf_get(lpfc_wcqe_c_code, &wcqe)); 14035 break; 14036 } 14037 return workposted; 14038 } 14039 14040 /** 14041 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 14042 * @phba: Pointer to HBA context object. 14043 * @eqe: Pointer to fast-path event queue entry. 14044 * 14045 * This routine process a event queue entry from the fast-path event queue. 14046 * It will check the MajorCode and MinorCode to determine this is for a 14047 * completion event on a completion queue, if not, an error shall be logged 14048 * and just return. Otherwise, it will get to the corresponding completion 14049 * queue and process all the entries on the completion queue, rearm the 14050 * completion queue, and then return. 14051 **/ 14052 static void 14053 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 14054 struct lpfc_eqe *eqe) 14055 { 14056 struct lpfc_queue *cq = NULL; 14057 uint32_t qidx = eq->hdwq; 14058 uint16_t cqid, id; 14059 14060 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 14061 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14062 "0366 Not a valid completion " 14063 "event: majorcode=x%x, minorcode=x%x\n", 14064 bf_get_le32(lpfc_eqe_major_code, eqe), 14065 bf_get_le32(lpfc_eqe_minor_code, eqe)); 14066 return; 14067 } 14068 14069 /* Get the reference to the corresponding CQ */ 14070 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 14071 14072 /* Use the fast lookup method first */ 14073 if (cqid <= phba->sli4_hba.cq_max) { 14074 cq = phba->sli4_hba.cq_lookup[cqid]; 14075 if (cq) 14076 goto work_cq; 14077 } 14078 14079 /* Next check for NVMET completion */ 14080 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { 14081 id = phba->sli4_hba.nvmet_cqset[0]->queue_id; 14082 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { 14083 /* Process NVMET unsol rcv */ 14084 cq = phba->sli4_hba.nvmet_cqset[cqid - id]; 14085 goto process_cq; 14086 } 14087 } 14088 14089 if (phba->sli4_hba.nvmels_cq && 14090 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { 14091 /* Process NVME unsol rcv */ 14092 cq = phba->sli4_hba.nvmels_cq; 14093 } 14094 14095 /* Otherwise this is a Slow path event */ 14096 if (cq == NULL) { 14097 lpfc_sli4_sp_handle_eqe(phba, eqe, 14098 phba->sli4_hba.hdwq[qidx].hba_eq); 14099 return; 14100 } 14101 14102 process_cq: 14103 if (unlikely(cqid != cq->queue_id)) { 14104 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14105 "0368 Miss-matched fast-path completion " 14106 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 14107 cqid, cq->queue_id); 14108 return; 14109 } 14110 14111 work_cq: 14112 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork)) 14113 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14114 "0363 Cannot schedule soft IRQ " 14115 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 14116 cqid, cq->queue_id, raw_smp_processor_id()); 14117 } 14118 14119 /** 14120 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry 14121 * @cq: Pointer to CQ to be processed 14122 * 14123 * This routine calls the cq processing routine with the handler for 14124 * fast path CQEs. 14125 * 14126 * The CQ routine returns two values: the first is the calling status, 14127 * which indicates whether work was queued to the background discovery 14128 * thread. If true, the routine should wakeup the discovery thread; 14129 * the second is the delay parameter. If non-zero, rather than rearming 14130 * the CQ and yet another interrupt, the CQ handler should be queued so 14131 * that it is processed in a subsequent polling action. The value of 14132 * the delay indicates when to reschedule it. 14133 **/ 14134 static void 14135 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) 14136 { 14137 struct lpfc_hba *phba = cq->phba; 14138 unsigned long delay; 14139 bool workposted = false; 14140 14141 /* process and rearm the CQ */ 14142 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, 14143 &delay); 14144 14145 if (delay) { 14146 if (!queue_delayed_work_on(cq->chann, phba->wq, 14147 &cq->sched_irqwork, delay)) 14148 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14149 "0367 Cannot schedule soft IRQ " 14150 "for cqid=%d on CPU %d\n", 14151 cq->queue_id, cq->chann); 14152 } 14153 14154 /* wake up worker thread if there are works to be done */ 14155 if (workposted) 14156 lpfc_worker_wake_up(phba); 14157 } 14158 14159 /** 14160 * lpfc_sli4_hba_process_cq - fast-path work handler when started by 14161 * interrupt 14162 * @work: pointer to work element 14163 * 14164 * translates from the work handler and calls the fast-path handler. 14165 **/ 14166 static void 14167 lpfc_sli4_hba_process_cq(struct work_struct *work) 14168 { 14169 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); 14170 14171 __lpfc_sli4_hba_process_cq(cq); 14172 } 14173 14174 /** 14175 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer 14176 * @work: pointer to work element 14177 * 14178 * translates from the work handler and calls the fast-path handler. 14179 **/ 14180 static void 14181 lpfc_sli4_dly_hba_process_cq(struct work_struct *work) 14182 { 14183 struct lpfc_queue *cq = container_of(to_delayed_work(work), 14184 struct lpfc_queue, sched_irqwork); 14185 14186 __lpfc_sli4_hba_process_cq(cq); 14187 } 14188 14189 /** 14190 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 14191 * @irq: Interrupt number. 14192 * @dev_id: The device context pointer. 14193 * 14194 * This function is directly called from the PCI layer as an interrupt 14195 * service routine when device with SLI-4 interface spec is enabled with 14196 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 14197 * ring event in the HBA. However, when the device is enabled with either 14198 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 14199 * device-level interrupt handler. When the PCI slot is in error recovery 14200 * or the HBA is undergoing initialization, the interrupt handler will not 14201 * process the interrupt. The SCSI FCP fast-path ring event are handled in 14202 * the intrrupt context. This function is called without any lock held. 14203 * It gets the hbalock to access and update SLI data structures. Note that, 14204 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 14205 * equal to that of FCP CQ index. 14206 * 14207 * The link attention and ELS ring attention events are handled 14208 * by the worker thread. The interrupt handler signals the worker thread 14209 * and returns for these events. This function is called without any lock 14210 * held. It gets the hbalock to access and update SLI data structures. 14211 * 14212 * This function returns IRQ_HANDLED when interrupt is handled else it 14213 * returns IRQ_NONE. 14214 **/ 14215 irqreturn_t 14216 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 14217 { 14218 struct lpfc_hba *phba; 14219 struct lpfc_hba_eq_hdl *hba_eq_hdl; 14220 struct lpfc_queue *fpeq; 14221 unsigned long iflag; 14222 int ecount = 0; 14223 int hba_eqidx; 14224 struct lpfc_eq_intr_info *eqi; 14225 uint32_t icnt; 14226 14227 /* Get the driver's phba structure from the dev_id */ 14228 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 14229 phba = hba_eq_hdl->phba; 14230 hba_eqidx = hba_eq_hdl->idx; 14231 14232 if (unlikely(!phba)) 14233 return IRQ_NONE; 14234 if (unlikely(!phba->sli4_hba.hdwq)) 14235 return IRQ_NONE; 14236 14237 /* Get to the EQ struct associated with this vector */ 14238 fpeq = phba->sli4_hba.hdwq[hba_eqidx].hba_eq; 14239 if (unlikely(!fpeq)) 14240 return IRQ_NONE; 14241 14242 /* Check device state for handling interrupt */ 14243 if (unlikely(lpfc_intr_state_check(phba))) { 14244 /* Check again for link_state with lock held */ 14245 spin_lock_irqsave(&phba->hbalock, iflag); 14246 if (phba->link_state < LPFC_LINK_DOWN) 14247 /* Flush, clear interrupt, and rearm the EQ */ 14248 lpfc_sli4_eq_flush(phba, fpeq); 14249 spin_unlock_irqrestore(&phba->hbalock, iflag); 14250 return IRQ_NONE; 14251 } 14252 14253 eqi = phba->sli4_hba.eq_info; 14254 icnt = this_cpu_inc_return(eqi->icnt); 14255 fpeq->last_cpu = raw_smp_processor_id(); 14256 14257 if (icnt > LPFC_EQD_ISR_TRIGGER && 14258 phba->cfg_irq_chann == 1 && 14259 phba->cfg_auto_imax && 14260 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && 14261 phba->sli.sli_flag & LPFC_SLI_USE_EQDR) 14262 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); 14263 14264 /* process and rearm the EQ */ 14265 ecount = lpfc_sli4_process_eq(phba, fpeq); 14266 14267 if (unlikely(ecount == 0)) { 14268 fpeq->EQ_no_entry++; 14269 if (phba->intr_type == MSIX) 14270 /* MSI-X treated interrupt served as no EQ share INT */ 14271 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14272 "0358 MSI-X interrupt with no EQE\n"); 14273 else 14274 /* Non MSI-X treated on interrupt as EQ share INT */ 14275 return IRQ_NONE; 14276 } 14277 14278 return IRQ_HANDLED; 14279 } /* lpfc_sli4_fp_intr_handler */ 14280 14281 /** 14282 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 14283 * @irq: Interrupt number. 14284 * @dev_id: The device context pointer. 14285 * 14286 * This function is the device-level interrupt handler to device with SLI-4 14287 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 14288 * interrupt mode is enabled and there is an event in the HBA which requires 14289 * driver attention. This function invokes the slow-path interrupt attention 14290 * handling function and fast-path interrupt attention handling function in 14291 * turn to process the relevant HBA attention events. This function is called 14292 * without any lock held. It gets the hbalock to access and update SLI data 14293 * structures. 14294 * 14295 * This function returns IRQ_HANDLED when interrupt is handled, else it 14296 * returns IRQ_NONE. 14297 **/ 14298 irqreturn_t 14299 lpfc_sli4_intr_handler(int irq, void *dev_id) 14300 { 14301 struct lpfc_hba *phba; 14302 irqreturn_t hba_irq_rc; 14303 bool hba_handled = false; 14304 int qidx; 14305 14306 /* Get the driver's phba structure from the dev_id */ 14307 phba = (struct lpfc_hba *)dev_id; 14308 14309 if (unlikely(!phba)) 14310 return IRQ_NONE; 14311 14312 /* 14313 * Invoke fast-path host attention interrupt handling as appropriate. 14314 */ 14315 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 14316 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 14317 &phba->sli4_hba.hba_eq_hdl[qidx]); 14318 if (hba_irq_rc == IRQ_HANDLED) 14319 hba_handled |= true; 14320 } 14321 14322 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 14323 } /* lpfc_sli4_intr_handler */ 14324 14325 /** 14326 * lpfc_sli4_queue_free - free a queue structure and associated memory 14327 * @queue: The queue structure to free. 14328 * 14329 * This function frees a queue structure and the DMAable memory used for 14330 * the host resident queue. This function must be called after destroying the 14331 * queue on the HBA. 14332 **/ 14333 void 14334 lpfc_sli4_queue_free(struct lpfc_queue *queue) 14335 { 14336 struct lpfc_dmabuf *dmabuf; 14337 14338 if (!queue) 14339 return; 14340 14341 if (!list_empty(&queue->wq_list)) 14342 list_del(&queue->wq_list); 14343 14344 while (!list_empty(&queue->page_list)) { 14345 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 14346 list); 14347 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size, 14348 dmabuf->virt, dmabuf->phys); 14349 kfree(dmabuf); 14350 } 14351 if (queue->rqbp) { 14352 lpfc_free_rq_buffer(queue->phba, queue); 14353 kfree(queue->rqbp); 14354 } 14355 14356 if (!list_empty(&queue->cpu_list)) 14357 list_del(&queue->cpu_list); 14358 14359 kfree(queue); 14360 return; 14361 } 14362 14363 /** 14364 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 14365 * @phba: The HBA that this queue is being created on. 14366 * @page_size: The size of a queue page 14367 * @entry_size: The size of each queue entry for this queue. 14368 * @entry count: The number of entries that this queue will handle. 14369 * @cpu: The cpu that will primarily utilize this queue. 14370 * 14371 * This function allocates a queue structure and the DMAable memory used for 14372 * the host resident queue. This function must be called before creating the 14373 * queue on the HBA. 14374 **/ 14375 struct lpfc_queue * 14376 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, 14377 uint32_t entry_size, uint32_t entry_count, int cpu) 14378 { 14379 struct lpfc_queue *queue; 14380 struct lpfc_dmabuf *dmabuf; 14381 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14382 uint16_t x, pgcnt; 14383 14384 if (!phba->sli4_hba.pc_sli4_params.supported) 14385 hw_page_size = page_size; 14386 14387 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size; 14388 14389 /* If needed, Adjust page count to match the max the adapter supports */ 14390 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt) 14391 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt; 14392 14393 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt), 14394 GFP_KERNEL, cpu_to_node(cpu)); 14395 if (!queue) 14396 return NULL; 14397 14398 INIT_LIST_HEAD(&queue->list); 14399 INIT_LIST_HEAD(&queue->wq_list); 14400 INIT_LIST_HEAD(&queue->wqfull_list); 14401 INIT_LIST_HEAD(&queue->page_list); 14402 INIT_LIST_HEAD(&queue->child_list); 14403 INIT_LIST_HEAD(&queue->cpu_list); 14404 14405 /* Set queue parameters now. If the system cannot provide memory 14406 * resources, the free routine needs to know what was allocated. 14407 */ 14408 queue->page_count = pgcnt; 14409 queue->q_pgs = (void **)&queue[1]; 14410 queue->entry_cnt_per_pg = hw_page_size / entry_size; 14411 queue->entry_size = entry_size; 14412 queue->entry_count = entry_count; 14413 queue->page_size = hw_page_size; 14414 queue->phba = phba; 14415 14416 for (x = 0; x < queue->page_count; x++) { 14417 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL, 14418 dev_to_node(&phba->pcidev->dev)); 14419 if (!dmabuf) 14420 goto out_fail; 14421 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14422 hw_page_size, &dmabuf->phys, 14423 GFP_KERNEL); 14424 if (!dmabuf->virt) { 14425 kfree(dmabuf); 14426 goto out_fail; 14427 } 14428 dmabuf->buffer_tag = x; 14429 list_add_tail(&dmabuf->list, &queue->page_list); 14430 /* use lpfc_sli4_qe to index a paritcular entry in this page */ 14431 queue->q_pgs[x] = dmabuf->virt; 14432 } 14433 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); 14434 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); 14435 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq); 14436 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq); 14437 14438 /* notify_interval will be set during q creation */ 14439 14440 return queue; 14441 out_fail: 14442 lpfc_sli4_queue_free(queue); 14443 return NULL; 14444 } 14445 14446 /** 14447 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 14448 * @phba: HBA structure that indicates port to create a queue on. 14449 * @pci_barset: PCI BAR set flag. 14450 * 14451 * This function shall perform iomap of the specified PCI BAR address to host 14452 * memory address if not already done so and return it. The returned host 14453 * memory address can be NULL. 14454 */ 14455 static void __iomem * 14456 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 14457 { 14458 if (!phba->pcidev) 14459 return NULL; 14460 14461 switch (pci_barset) { 14462 case WQ_PCI_BAR_0_AND_1: 14463 return phba->pci_bar0_memmap_p; 14464 case WQ_PCI_BAR_2_AND_3: 14465 return phba->pci_bar2_memmap_p; 14466 case WQ_PCI_BAR_4_AND_5: 14467 return phba->pci_bar4_memmap_p; 14468 default: 14469 break; 14470 } 14471 return NULL; 14472 } 14473 14474 /** 14475 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs 14476 * @phba: HBA structure that EQs are on. 14477 * @startq: The starting EQ index to modify 14478 * @numq: The number of EQs (consecutive indexes) to modify 14479 * @usdelay: amount of delay 14480 * 14481 * This function revises the EQ delay on 1 or more EQs. The EQ delay 14482 * is set either by writing to a register (if supported by the SLI Port) 14483 * or by mailbox command. The mailbox command allows several EQs to be 14484 * updated at once. 14485 * 14486 * The @phba struct is used to send a mailbox command to HBA. The @startq 14487 * is used to get the starting EQ index to change. The @numq value is 14488 * used to specify how many consecutive EQ indexes, starting at EQ index, 14489 * are to be changed. This function is asynchronous and will wait for any 14490 * mailbox commands to finish before returning. 14491 * 14492 * On success this function will return a zero. If unable to allocate 14493 * enough memory this function will return -ENOMEM. If a mailbox command 14494 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may 14495 * have had their delay multipler changed. 14496 **/ 14497 void 14498 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, 14499 uint32_t numq, uint32_t usdelay) 14500 { 14501 struct lpfc_mbx_modify_eq_delay *eq_delay; 14502 LPFC_MBOXQ_t *mbox; 14503 struct lpfc_queue *eq; 14504 int cnt = 0, rc, length; 14505 uint32_t shdr_status, shdr_add_status; 14506 uint32_t dmult; 14507 int qidx; 14508 union lpfc_sli4_cfg_shdr *shdr; 14509 14510 if (startq >= phba->cfg_irq_chann) 14511 return; 14512 14513 if (usdelay > 0xFFFF) { 14514 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME, 14515 "6429 usdelay %d too large. Scaled down to " 14516 "0xFFFF.\n", usdelay); 14517 usdelay = 0xFFFF; 14518 } 14519 14520 /* set values by EQ_DELAY register if supported */ 14521 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { 14522 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 14523 eq = phba->sli4_hba.hdwq[qidx].hba_eq; 14524 if (!eq) 14525 continue; 14526 14527 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay); 14528 14529 if (++cnt >= numq) 14530 break; 14531 } 14532 14533 return; 14534 } 14535 14536 /* Otherwise, set values by mailbox cmd */ 14537 14538 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14539 if (!mbox) { 14540 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME, 14541 "6428 Failed allocating mailbox cmd buffer." 14542 " EQ delay was not set.\n"); 14543 return; 14544 } 14545 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 14546 sizeof(struct lpfc_sli4_cfg_mhdr)); 14547 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14548 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 14549 length, LPFC_SLI4_MBX_EMBED); 14550 eq_delay = &mbox->u.mqe.un.eq_delay; 14551 14552 /* Calculate delay multiper from maximum interrupt per second */ 14553 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC; 14554 if (dmult) 14555 dmult--; 14556 if (dmult > LPFC_DMULT_MAX) 14557 dmult = LPFC_DMULT_MAX; 14558 14559 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 14560 eq = phba->sli4_hba.hdwq[qidx].hba_eq; 14561 if (!eq) 14562 continue; 14563 eq->q_mode = usdelay; 14564 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 14565 eq_delay->u.request.eq[cnt].phase = 0; 14566 eq_delay->u.request.eq[cnt].delay_multi = dmult; 14567 14568 if (++cnt >= numq) 14569 break; 14570 } 14571 eq_delay->u.request.num_eq = cnt; 14572 14573 mbox->vport = phba->pport; 14574 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14575 mbox->ctx_buf = NULL; 14576 mbox->ctx_ndlp = NULL; 14577 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14578 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 14579 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14580 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14581 if (shdr_status || shdr_add_status || rc) { 14582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14583 "2512 MODIFY_EQ_DELAY mailbox failed with " 14584 "status x%x add_status x%x, mbx status x%x\n", 14585 shdr_status, shdr_add_status, rc); 14586 } 14587 mempool_free(mbox, phba->mbox_mem_pool); 14588 return; 14589 } 14590 14591 /** 14592 * lpfc_eq_create - Create an Event Queue on the HBA 14593 * @phba: HBA structure that indicates port to create a queue on. 14594 * @eq: The queue structure to use to create the event queue. 14595 * @imax: The maximum interrupt per second limit. 14596 * 14597 * This function creates an event queue, as detailed in @eq, on a port, 14598 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 14599 * 14600 * The @phba struct is used to send mailbox command to HBA. The @eq struct 14601 * is used to get the entry count and entry size that are necessary to 14602 * determine the number of pages to allocate and use for this queue. This 14603 * function will send the EQ_CREATE mailbox command to the HBA to setup the 14604 * event queue. This function is asynchronous and will wait for the mailbox 14605 * command to finish before continuing. 14606 * 14607 * On success this function will return a zero. If unable to allocate enough 14608 * memory this function will return -ENOMEM. If the queue create mailbox command 14609 * fails this function will return -ENXIO. 14610 **/ 14611 int 14612 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 14613 { 14614 struct lpfc_mbx_eq_create *eq_create; 14615 LPFC_MBOXQ_t *mbox; 14616 int rc, length, status = 0; 14617 struct lpfc_dmabuf *dmabuf; 14618 uint32_t shdr_status, shdr_add_status; 14619 union lpfc_sli4_cfg_shdr *shdr; 14620 uint16_t dmult; 14621 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14622 14623 /* sanity check on queue memory */ 14624 if (!eq) 14625 return -ENODEV; 14626 if (!phba->sli4_hba.pc_sli4_params.supported) 14627 hw_page_size = SLI4_PAGE_SIZE; 14628 14629 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14630 if (!mbox) 14631 return -ENOMEM; 14632 length = (sizeof(struct lpfc_mbx_eq_create) - 14633 sizeof(struct lpfc_sli4_cfg_mhdr)); 14634 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14635 LPFC_MBOX_OPCODE_EQ_CREATE, 14636 length, LPFC_SLI4_MBX_EMBED); 14637 eq_create = &mbox->u.mqe.un.eq_create; 14638 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 14639 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 14640 eq->page_count); 14641 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 14642 LPFC_EQE_SIZE); 14643 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 14644 14645 /* Use version 2 of CREATE_EQ if eqav is set */ 14646 if (phba->sli4_hba.pc_sli4_params.eqav) { 14647 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14648 LPFC_Q_CREATE_VERSION_2); 14649 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context, 14650 phba->sli4_hba.pc_sli4_params.eqav); 14651 } 14652 14653 /* don't setup delay multiplier using EQ_CREATE */ 14654 dmult = 0; 14655 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 14656 dmult); 14657 switch (eq->entry_count) { 14658 default: 14659 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14660 "0360 Unsupported EQ count. (%d)\n", 14661 eq->entry_count); 14662 if (eq->entry_count < 256) 14663 return -EINVAL; 14664 /* fall through - otherwise default to smallest count */ 14665 case 256: 14666 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14667 LPFC_EQ_CNT_256); 14668 break; 14669 case 512: 14670 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14671 LPFC_EQ_CNT_512); 14672 break; 14673 case 1024: 14674 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14675 LPFC_EQ_CNT_1024); 14676 break; 14677 case 2048: 14678 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14679 LPFC_EQ_CNT_2048); 14680 break; 14681 case 4096: 14682 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14683 LPFC_EQ_CNT_4096); 14684 break; 14685 } 14686 list_for_each_entry(dmabuf, &eq->page_list, list) { 14687 memset(dmabuf->virt, 0, hw_page_size); 14688 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14689 putPaddrLow(dmabuf->phys); 14690 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14691 putPaddrHigh(dmabuf->phys); 14692 } 14693 mbox->vport = phba->pport; 14694 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14695 mbox->ctx_buf = NULL; 14696 mbox->ctx_ndlp = NULL; 14697 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14698 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14699 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14700 if (shdr_status || shdr_add_status || rc) { 14701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14702 "2500 EQ_CREATE mailbox failed with " 14703 "status x%x add_status x%x, mbx status x%x\n", 14704 shdr_status, shdr_add_status, rc); 14705 status = -ENXIO; 14706 } 14707 eq->type = LPFC_EQ; 14708 eq->subtype = LPFC_NONE; 14709 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 14710 if (eq->queue_id == 0xFFFF) 14711 status = -ENXIO; 14712 eq->host_index = 0; 14713 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL; 14714 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT; 14715 14716 mempool_free(mbox, phba->mbox_mem_pool); 14717 return status; 14718 } 14719 14720 /** 14721 * lpfc_cq_create - Create a Completion Queue on the HBA 14722 * @phba: HBA structure that indicates port to create a queue on. 14723 * @cq: The queue structure to use to create the completion queue. 14724 * @eq: The event queue to bind this completion queue to. 14725 * 14726 * This function creates a completion queue, as detailed in @wq, on a port, 14727 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 14728 * 14729 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14730 * is used to get the entry count and entry size that are necessary to 14731 * determine the number of pages to allocate and use for this queue. The @eq 14732 * is used to indicate which event queue to bind this completion queue to. This 14733 * function will send the CQ_CREATE mailbox command to the HBA to setup the 14734 * completion queue. This function is asynchronous and will wait for the mailbox 14735 * command to finish before continuing. 14736 * 14737 * On success this function will return a zero. If unable to allocate enough 14738 * memory this function will return -ENOMEM. If the queue create mailbox command 14739 * fails this function will return -ENXIO. 14740 **/ 14741 int 14742 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 14743 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 14744 { 14745 struct lpfc_mbx_cq_create *cq_create; 14746 struct lpfc_dmabuf *dmabuf; 14747 LPFC_MBOXQ_t *mbox; 14748 int rc, length, status = 0; 14749 uint32_t shdr_status, shdr_add_status; 14750 union lpfc_sli4_cfg_shdr *shdr; 14751 14752 /* sanity check on queue memory */ 14753 if (!cq || !eq) 14754 return -ENODEV; 14755 14756 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14757 if (!mbox) 14758 return -ENOMEM; 14759 length = (sizeof(struct lpfc_mbx_cq_create) - 14760 sizeof(struct lpfc_sli4_cfg_mhdr)); 14761 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14762 LPFC_MBOX_OPCODE_CQ_CREATE, 14763 length, LPFC_SLI4_MBX_EMBED); 14764 cq_create = &mbox->u.mqe.un.cq_create; 14765 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 14766 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 14767 cq->page_count); 14768 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 14769 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 14770 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14771 phba->sli4_hba.pc_sli4_params.cqv); 14772 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 14773 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 14774 (cq->page_size / SLI4_PAGE_SIZE)); 14775 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 14776 eq->queue_id); 14777 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context, 14778 phba->sli4_hba.pc_sli4_params.cqav); 14779 } else { 14780 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 14781 eq->queue_id); 14782 } 14783 switch (cq->entry_count) { 14784 case 2048: 14785 case 4096: 14786 if (phba->sli4_hba.pc_sli4_params.cqv == 14787 LPFC_Q_CREATE_VERSION_2) { 14788 cq_create->u.request.context.lpfc_cq_context_count = 14789 cq->entry_count; 14790 bf_set(lpfc_cq_context_count, 14791 &cq_create->u.request.context, 14792 LPFC_CQ_CNT_WORD7); 14793 break; 14794 } 14795 /* fall through */ 14796 default: 14797 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14798 "0361 Unsupported CQ count: " 14799 "entry cnt %d sz %d pg cnt %d\n", 14800 cq->entry_count, cq->entry_size, 14801 cq->page_count); 14802 if (cq->entry_count < 256) { 14803 status = -EINVAL; 14804 goto out; 14805 } 14806 /* fall through - otherwise default to smallest count */ 14807 case 256: 14808 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14809 LPFC_CQ_CNT_256); 14810 break; 14811 case 512: 14812 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14813 LPFC_CQ_CNT_512); 14814 break; 14815 case 1024: 14816 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14817 LPFC_CQ_CNT_1024); 14818 break; 14819 } 14820 list_for_each_entry(dmabuf, &cq->page_list, list) { 14821 memset(dmabuf->virt, 0, cq->page_size); 14822 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14823 putPaddrLow(dmabuf->phys); 14824 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14825 putPaddrHigh(dmabuf->phys); 14826 } 14827 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14828 14829 /* The IOCTL status is embedded in the mailbox subheader. */ 14830 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14831 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14832 if (shdr_status || shdr_add_status || rc) { 14833 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14834 "2501 CQ_CREATE mailbox failed with " 14835 "status x%x add_status x%x, mbx status x%x\n", 14836 shdr_status, shdr_add_status, rc); 14837 status = -ENXIO; 14838 goto out; 14839 } 14840 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14841 if (cq->queue_id == 0xFFFF) { 14842 status = -ENXIO; 14843 goto out; 14844 } 14845 /* link the cq onto the parent eq child list */ 14846 list_add_tail(&cq->list, &eq->child_list); 14847 /* Set up completion queue's type and subtype */ 14848 cq->type = type; 14849 cq->subtype = subtype; 14850 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14851 cq->assoc_qid = eq->queue_id; 14852 cq->assoc_qp = eq; 14853 cq->host_index = 0; 14854 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 14855 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); 14856 14857 if (cq->queue_id > phba->sli4_hba.cq_max) 14858 phba->sli4_hba.cq_max = cq->queue_id; 14859 out: 14860 mempool_free(mbox, phba->mbox_mem_pool); 14861 return status; 14862 } 14863 14864 /** 14865 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ 14866 * @phba: HBA structure that indicates port to create a queue on. 14867 * @cqp: The queue structure array to use to create the completion queues. 14868 * @hdwq: The hardware queue array with the EQ to bind completion queues to. 14869 * 14870 * This function creates a set of completion queue, s to support MRQ 14871 * as detailed in @cqp, on a port, 14872 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. 14873 * 14874 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14875 * is used to get the entry count and entry size that are necessary to 14876 * determine the number of pages to allocate and use for this queue. The @eq 14877 * is used to indicate which event queue to bind this completion queue to. This 14878 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the 14879 * completion queue. This function is asynchronous and will wait for the mailbox 14880 * command to finish before continuing. 14881 * 14882 * On success this function will return a zero. If unable to allocate enough 14883 * memory this function will return -ENOMEM. If the queue create mailbox command 14884 * fails this function will return -ENXIO. 14885 **/ 14886 int 14887 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 14888 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, 14889 uint32_t subtype) 14890 { 14891 struct lpfc_queue *cq; 14892 struct lpfc_queue *eq; 14893 struct lpfc_mbx_cq_create_set *cq_set; 14894 struct lpfc_dmabuf *dmabuf; 14895 LPFC_MBOXQ_t *mbox; 14896 int rc, length, alloclen, status = 0; 14897 int cnt, idx, numcq, page_idx = 0; 14898 uint32_t shdr_status, shdr_add_status; 14899 union lpfc_sli4_cfg_shdr *shdr; 14900 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14901 14902 /* sanity check on queue memory */ 14903 numcq = phba->cfg_nvmet_mrq; 14904 if (!cqp || !hdwq || !numcq) 14905 return -ENODEV; 14906 14907 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14908 if (!mbox) 14909 return -ENOMEM; 14910 14911 length = sizeof(struct lpfc_mbx_cq_create_set); 14912 length += ((numcq * cqp[0]->page_count) * 14913 sizeof(struct dma_address)); 14914 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14915 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, 14916 LPFC_SLI4_MBX_NEMBED); 14917 if (alloclen < length) { 14918 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14919 "3098 Allocated DMA memory size (%d) is " 14920 "less than the requested DMA memory size " 14921 "(%d)\n", alloclen, length); 14922 status = -ENOMEM; 14923 goto out; 14924 } 14925 cq_set = mbox->sge_array->addr[0]; 14926 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; 14927 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); 14928 14929 for (idx = 0; idx < numcq; idx++) { 14930 cq = cqp[idx]; 14931 eq = hdwq[idx].hba_eq; 14932 if (!cq || !eq) { 14933 status = -ENOMEM; 14934 goto out; 14935 } 14936 if (!phba->sli4_hba.pc_sli4_params.supported) 14937 hw_page_size = cq->page_size; 14938 14939 switch (idx) { 14940 case 0: 14941 bf_set(lpfc_mbx_cq_create_set_page_size, 14942 &cq_set->u.request, 14943 (hw_page_size / SLI4_PAGE_SIZE)); 14944 bf_set(lpfc_mbx_cq_create_set_num_pages, 14945 &cq_set->u.request, cq->page_count); 14946 bf_set(lpfc_mbx_cq_create_set_evt, 14947 &cq_set->u.request, 1); 14948 bf_set(lpfc_mbx_cq_create_set_valid, 14949 &cq_set->u.request, 1); 14950 bf_set(lpfc_mbx_cq_create_set_cqe_size, 14951 &cq_set->u.request, 0); 14952 bf_set(lpfc_mbx_cq_create_set_num_cq, 14953 &cq_set->u.request, numcq); 14954 bf_set(lpfc_mbx_cq_create_set_autovalid, 14955 &cq_set->u.request, 14956 phba->sli4_hba.pc_sli4_params.cqav); 14957 switch (cq->entry_count) { 14958 case 2048: 14959 case 4096: 14960 if (phba->sli4_hba.pc_sli4_params.cqv == 14961 LPFC_Q_CREATE_VERSION_2) { 14962 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14963 &cq_set->u.request, 14964 cq->entry_count); 14965 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14966 &cq_set->u.request, 14967 LPFC_CQ_CNT_WORD7); 14968 break; 14969 } 14970 /* fall through */ 14971 default: 14972 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14973 "3118 Bad CQ count. (%d)\n", 14974 cq->entry_count); 14975 if (cq->entry_count < 256) { 14976 status = -EINVAL; 14977 goto out; 14978 } 14979 /* fall through - otherwise default to smallest */ 14980 case 256: 14981 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14982 &cq_set->u.request, LPFC_CQ_CNT_256); 14983 break; 14984 case 512: 14985 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14986 &cq_set->u.request, LPFC_CQ_CNT_512); 14987 break; 14988 case 1024: 14989 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14990 &cq_set->u.request, LPFC_CQ_CNT_1024); 14991 break; 14992 } 14993 bf_set(lpfc_mbx_cq_create_set_eq_id0, 14994 &cq_set->u.request, eq->queue_id); 14995 break; 14996 case 1: 14997 bf_set(lpfc_mbx_cq_create_set_eq_id1, 14998 &cq_set->u.request, eq->queue_id); 14999 break; 15000 case 2: 15001 bf_set(lpfc_mbx_cq_create_set_eq_id2, 15002 &cq_set->u.request, eq->queue_id); 15003 break; 15004 case 3: 15005 bf_set(lpfc_mbx_cq_create_set_eq_id3, 15006 &cq_set->u.request, eq->queue_id); 15007 break; 15008 case 4: 15009 bf_set(lpfc_mbx_cq_create_set_eq_id4, 15010 &cq_set->u.request, eq->queue_id); 15011 break; 15012 case 5: 15013 bf_set(lpfc_mbx_cq_create_set_eq_id5, 15014 &cq_set->u.request, eq->queue_id); 15015 break; 15016 case 6: 15017 bf_set(lpfc_mbx_cq_create_set_eq_id6, 15018 &cq_set->u.request, eq->queue_id); 15019 break; 15020 case 7: 15021 bf_set(lpfc_mbx_cq_create_set_eq_id7, 15022 &cq_set->u.request, eq->queue_id); 15023 break; 15024 case 8: 15025 bf_set(lpfc_mbx_cq_create_set_eq_id8, 15026 &cq_set->u.request, eq->queue_id); 15027 break; 15028 case 9: 15029 bf_set(lpfc_mbx_cq_create_set_eq_id9, 15030 &cq_set->u.request, eq->queue_id); 15031 break; 15032 case 10: 15033 bf_set(lpfc_mbx_cq_create_set_eq_id10, 15034 &cq_set->u.request, eq->queue_id); 15035 break; 15036 case 11: 15037 bf_set(lpfc_mbx_cq_create_set_eq_id11, 15038 &cq_set->u.request, eq->queue_id); 15039 break; 15040 case 12: 15041 bf_set(lpfc_mbx_cq_create_set_eq_id12, 15042 &cq_set->u.request, eq->queue_id); 15043 break; 15044 case 13: 15045 bf_set(lpfc_mbx_cq_create_set_eq_id13, 15046 &cq_set->u.request, eq->queue_id); 15047 break; 15048 case 14: 15049 bf_set(lpfc_mbx_cq_create_set_eq_id14, 15050 &cq_set->u.request, eq->queue_id); 15051 break; 15052 case 15: 15053 bf_set(lpfc_mbx_cq_create_set_eq_id15, 15054 &cq_set->u.request, eq->queue_id); 15055 break; 15056 } 15057 15058 /* link the cq onto the parent eq child list */ 15059 list_add_tail(&cq->list, &eq->child_list); 15060 /* Set up completion queue's type and subtype */ 15061 cq->type = type; 15062 cq->subtype = subtype; 15063 cq->assoc_qid = eq->queue_id; 15064 cq->assoc_qp = eq; 15065 cq->host_index = 0; 15066 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 15067 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, 15068 cq->entry_count); 15069 cq->chann = idx; 15070 15071 rc = 0; 15072 list_for_each_entry(dmabuf, &cq->page_list, list) { 15073 memset(dmabuf->virt, 0, hw_page_size); 15074 cnt = page_idx + dmabuf->buffer_tag; 15075 cq_set->u.request.page[cnt].addr_lo = 15076 putPaddrLow(dmabuf->phys); 15077 cq_set->u.request.page[cnt].addr_hi = 15078 putPaddrHigh(dmabuf->phys); 15079 rc++; 15080 } 15081 page_idx += rc; 15082 } 15083 15084 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15085 15086 /* The IOCTL status is embedded in the mailbox subheader. */ 15087 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15088 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15089 if (shdr_status || shdr_add_status || rc) { 15090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15091 "3119 CQ_CREATE_SET mailbox failed with " 15092 "status x%x add_status x%x, mbx status x%x\n", 15093 shdr_status, shdr_add_status, rc); 15094 status = -ENXIO; 15095 goto out; 15096 } 15097 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); 15098 if (rc == 0xFFFF) { 15099 status = -ENXIO; 15100 goto out; 15101 } 15102 15103 for (idx = 0; idx < numcq; idx++) { 15104 cq = cqp[idx]; 15105 cq->queue_id = rc + idx; 15106 if (cq->queue_id > phba->sli4_hba.cq_max) 15107 phba->sli4_hba.cq_max = cq->queue_id; 15108 } 15109 15110 out: 15111 lpfc_sli4_mbox_cmd_free(phba, mbox); 15112 return status; 15113 } 15114 15115 /** 15116 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 15117 * @phba: HBA structure that indicates port to create a queue on. 15118 * @mq: The queue structure to use to create the mailbox queue. 15119 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 15120 * @cq: The completion queue to associate with this cq. 15121 * 15122 * This function provides failback (fb) functionality when the 15123 * mq_create_ext fails on older FW generations. It's purpose is identical 15124 * to mq_create_ext otherwise. 15125 * 15126 * This routine cannot fail as all attributes were previously accessed and 15127 * initialized in mq_create_ext. 15128 **/ 15129 static void 15130 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 15131 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 15132 { 15133 struct lpfc_mbx_mq_create *mq_create; 15134 struct lpfc_dmabuf *dmabuf; 15135 int length; 15136 15137 length = (sizeof(struct lpfc_mbx_mq_create) - 15138 sizeof(struct lpfc_sli4_cfg_mhdr)); 15139 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15140 LPFC_MBOX_OPCODE_MQ_CREATE, 15141 length, LPFC_SLI4_MBX_EMBED); 15142 mq_create = &mbox->u.mqe.un.mq_create; 15143 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 15144 mq->page_count); 15145 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 15146 cq->queue_id); 15147 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 15148 switch (mq->entry_count) { 15149 case 16: 15150 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15151 LPFC_MQ_RING_SIZE_16); 15152 break; 15153 case 32: 15154 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15155 LPFC_MQ_RING_SIZE_32); 15156 break; 15157 case 64: 15158 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15159 LPFC_MQ_RING_SIZE_64); 15160 break; 15161 case 128: 15162 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15163 LPFC_MQ_RING_SIZE_128); 15164 break; 15165 } 15166 list_for_each_entry(dmabuf, &mq->page_list, list) { 15167 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15168 putPaddrLow(dmabuf->phys); 15169 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15170 putPaddrHigh(dmabuf->phys); 15171 } 15172 } 15173 15174 /** 15175 * lpfc_mq_create - Create a mailbox Queue on the HBA 15176 * @phba: HBA structure that indicates port to create a queue on. 15177 * @mq: The queue structure to use to create the mailbox queue. 15178 * @cq: The completion queue to associate with this cq. 15179 * @subtype: The queue's subtype. 15180 * 15181 * This function creates a mailbox queue, as detailed in @mq, on a port, 15182 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 15183 * 15184 * The @phba struct is used to send mailbox command to HBA. The @cq struct 15185 * is used to get the entry count and entry size that are necessary to 15186 * determine the number of pages to allocate and use for this queue. This 15187 * function will send the MQ_CREATE mailbox command to the HBA to setup the 15188 * mailbox queue. This function is asynchronous and will wait for the mailbox 15189 * command to finish before continuing. 15190 * 15191 * On success this function will return a zero. If unable to allocate enough 15192 * memory this function will return -ENOMEM. If the queue create mailbox command 15193 * fails this function will return -ENXIO. 15194 **/ 15195 int32_t 15196 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 15197 struct lpfc_queue *cq, uint32_t subtype) 15198 { 15199 struct lpfc_mbx_mq_create *mq_create; 15200 struct lpfc_mbx_mq_create_ext *mq_create_ext; 15201 struct lpfc_dmabuf *dmabuf; 15202 LPFC_MBOXQ_t *mbox; 15203 int rc, length, status = 0; 15204 uint32_t shdr_status, shdr_add_status; 15205 union lpfc_sli4_cfg_shdr *shdr; 15206 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15207 15208 /* sanity check on queue memory */ 15209 if (!mq || !cq) 15210 return -ENODEV; 15211 if (!phba->sli4_hba.pc_sli4_params.supported) 15212 hw_page_size = SLI4_PAGE_SIZE; 15213 15214 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15215 if (!mbox) 15216 return -ENOMEM; 15217 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 15218 sizeof(struct lpfc_sli4_cfg_mhdr)); 15219 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15220 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 15221 length, LPFC_SLI4_MBX_EMBED); 15222 15223 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 15224 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 15225 bf_set(lpfc_mbx_mq_create_ext_num_pages, 15226 &mq_create_ext->u.request, mq->page_count); 15227 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 15228 &mq_create_ext->u.request, 1); 15229 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 15230 &mq_create_ext->u.request, 1); 15231 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 15232 &mq_create_ext->u.request, 1); 15233 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 15234 &mq_create_ext->u.request, 1); 15235 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 15236 &mq_create_ext->u.request, 1); 15237 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 15238 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15239 phba->sli4_hba.pc_sli4_params.mqv); 15240 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 15241 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 15242 cq->queue_id); 15243 else 15244 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 15245 cq->queue_id); 15246 switch (mq->entry_count) { 15247 default: 15248 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15249 "0362 Unsupported MQ count. (%d)\n", 15250 mq->entry_count); 15251 if (mq->entry_count < 16) { 15252 status = -EINVAL; 15253 goto out; 15254 } 15255 /* fall through - otherwise default to smallest count */ 15256 case 16: 15257 bf_set(lpfc_mq_context_ring_size, 15258 &mq_create_ext->u.request.context, 15259 LPFC_MQ_RING_SIZE_16); 15260 break; 15261 case 32: 15262 bf_set(lpfc_mq_context_ring_size, 15263 &mq_create_ext->u.request.context, 15264 LPFC_MQ_RING_SIZE_32); 15265 break; 15266 case 64: 15267 bf_set(lpfc_mq_context_ring_size, 15268 &mq_create_ext->u.request.context, 15269 LPFC_MQ_RING_SIZE_64); 15270 break; 15271 case 128: 15272 bf_set(lpfc_mq_context_ring_size, 15273 &mq_create_ext->u.request.context, 15274 LPFC_MQ_RING_SIZE_128); 15275 break; 15276 } 15277 list_for_each_entry(dmabuf, &mq->page_list, list) { 15278 memset(dmabuf->virt, 0, hw_page_size); 15279 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 15280 putPaddrLow(dmabuf->phys); 15281 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 15282 putPaddrHigh(dmabuf->phys); 15283 } 15284 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15285 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15286 &mq_create_ext->u.response); 15287 if (rc != MBX_SUCCESS) { 15288 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15289 "2795 MQ_CREATE_EXT failed with " 15290 "status x%x. Failback to MQ_CREATE.\n", 15291 rc); 15292 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 15293 mq_create = &mbox->u.mqe.un.mq_create; 15294 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15295 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 15296 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15297 &mq_create->u.response); 15298 } 15299 15300 /* The IOCTL status is embedded in the mailbox subheader. */ 15301 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15302 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15303 if (shdr_status || shdr_add_status || rc) { 15304 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15305 "2502 MQ_CREATE mailbox failed with " 15306 "status x%x add_status x%x, mbx status x%x\n", 15307 shdr_status, shdr_add_status, rc); 15308 status = -ENXIO; 15309 goto out; 15310 } 15311 if (mq->queue_id == 0xFFFF) { 15312 status = -ENXIO; 15313 goto out; 15314 } 15315 mq->type = LPFC_MQ; 15316 mq->assoc_qid = cq->queue_id; 15317 mq->subtype = subtype; 15318 mq->host_index = 0; 15319 mq->hba_index = 0; 15320 15321 /* link the mq onto the parent cq child list */ 15322 list_add_tail(&mq->list, &cq->child_list); 15323 out: 15324 mempool_free(mbox, phba->mbox_mem_pool); 15325 return status; 15326 } 15327 15328 /** 15329 * lpfc_wq_create - Create a Work Queue on the HBA 15330 * @phba: HBA structure that indicates port to create a queue on. 15331 * @wq: The queue structure to use to create the work queue. 15332 * @cq: The completion queue to bind this work queue to. 15333 * @subtype: The subtype of the work queue indicating its functionality. 15334 * 15335 * This function creates a work queue, as detailed in @wq, on a port, described 15336 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 15337 * 15338 * The @phba struct is used to send mailbox command to HBA. The @wq struct 15339 * is used to get the entry count and entry size that are necessary to 15340 * determine the number of pages to allocate and use for this queue. The @cq 15341 * is used to indicate which completion queue to bind this work queue to. This 15342 * function will send the WQ_CREATE mailbox command to the HBA to setup the 15343 * work queue. This function is asynchronous and will wait for the mailbox 15344 * command to finish before continuing. 15345 * 15346 * On success this function will return a zero. If unable to allocate enough 15347 * memory this function will return -ENOMEM. If the queue create mailbox command 15348 * fails this function will return -ENXIO. 15349 **/ 15350 int 15351 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 15352 struct lpfc_queue *cq, uint32_t subtype) 15353 { 15354 struct lpfc_mbx_wq_create *wq_create; 15355 struct lpfc_dmabuf *dmabuf; 15356 LPFC_MBOXQ_t *mbox; 15357 int rc, length, status = 0; 15358 uint32_t shdr_status, shdr_add_status; 15359 union lpfc_sli4_cfg_shdr *shdr; 15360 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15361 struct dma_address *page; 15362 void __iomem *bar_memmap_p; 15363 uint32_t db_offset; 15364 uint16_t pci_barset; 15365 uint8_t dpp_barset; 15366 uint32_t dpp_offset; 15367 unsigned long pg_addr; 15368 uint8_t wq_create_version; 15369 15370 /* sanity check on queue memory */ 15371 if (!wq || !cq) 15372 return -ENODEV; 15373 if (!phba->sli4_hba.pc_sli4_params.supported) 15374 hw_page_size = wq->page_size; 15375 15376 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15377 if (!mbox) 15378 return -ENOMEM; 15379 length = (sizeof(struct lpfc_mbx_wq_create) - 15380 sizeof(struct lpfc_sli4_cfg_mhdr)); 15381 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15382 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 15383 length, LPFC_SLI4_MBX_EMBED); 15384 wq_create = &mbox->u.mqe.un.wq_create; 15385 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 15386 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 15387 wq->page_count); 15388 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 15389 cq->queue_id); 15390 15391 /* wqv is the earliest version supported, NOT the latest */ 15392 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15393 phba->sli4_hba.pc_sli4_params.wqv); 15394 15395 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) || 15396 (wq->page_size > SLI4_PAGE_SIZE)) 15397 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15398 else 15399 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15400 15401 15402 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) 15403 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15404 else 15405 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15406 15407 switch (wq_create_version) { 15408 case LPFC_Q_CREATE_VERSION_1: 15409 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 15410 wq->entry_count); 15411 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15412 LPFC_Q_CREATE_VERSION_1); 15413 15414 switch (wq->entry_size) { 15415 default: 15416 case 64: 15417 bf_set(lpfc_mbx_wq_create_wqe_size, 15418 &wq_create->u.request_1, 15419 LPFC_WQ_WQE_SIZE_64); 15420 break; 15421 case 128: 15422 bf_set(lpfc_mbx_wq_create_wqe_size, 15423 &wq_create->u.request_1, 15424 LPFC_WQ_WQE_SIZE_128); 15425 break; 15426 } 15427 /* Request DPP by default */ 15428 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); 15429 bf_set(lpfc_mbx_wq_create_page_size, 15430 &wq_create->u.request_1, 15431 (wq->page_size / SLI4_PAGE_SIZE)); 15432 page = wq_create->u.request_1.page; 15433 break; 15434 default: 15435 page = wq_create->u.request.page; 15436 break; 15437 } 15438 15439 list_for_each_entry(dmabuf, &wq->page_list, list) { 15440 memset(dmabuf->virt, 0, hw_page_size); 15441 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 15442 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 15443 } 15444 15445 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15446 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 15447 15448 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15449 /* The IOCTL status is embedded in the mailbox subheader. */ 15450 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15451 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15452 if (shdr_status || shdr_add_status || rc) { 15453 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15454 "2503 WQ_CREATE mailbox failed with " 15455 "status x%x add_status x%x, mbx status x%x\n", 15456 shdr_status, shdr_add_status, rc); 15457 status = -ENXIO; 15458 goto out; 15459 } 15460 15461 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) 15462 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, 15463 &wq_create->u.response); 15464 else 15465 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, 15466 &wq_create->u.response_1); 15467 15468 if (wq->queue_id == 0xFFFF) { 15469 status = -ENXIO; 15470 goto out; 15471 } 15472 15473 wq->db_format = LPFC_DB_LIST_FORMAT; 15474 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) { 15475 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15476 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 15477 &wq_create->u.response); 15478 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 15479 (wq->db_format != LPFC_DB_RING_FORMAT)) { 15480 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15481 "3265 WQ[%d] doorbell format " 15482 "not supported: x%x\n", 15483 wq->queue_id, wq->db_format); 15484 status = -EINVAL; 15485 goto out; 15486 } 15487 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 15488 &wq_create->u.response); 15489 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15490 pci_barset); 15491 if (!bar_memmap_p) { 15492 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15493 "3263 WQ[%d] failed to memmap " 15494 "pci barset:x%x\n", 15495 wq->queue_id, pci_barset); 15496 status = -ENOMEM; 15497 goto out; 15498 } 15499 db_offset = wq_create->u.response.doorbell_offset; 15500 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 15501 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 15502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15503 "3252 WQ[%d] doorbell offset " 15504 "not supported: x%x\n", 15505 wq->queue_id, db_offset); 15506 status = -EINVAL; 15507 goto out; 15508 } 15509 wq->db_regaddr = bar_memmap_p + db_offset; 15510 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15511 "3264 WQ[%d]: barset:x%x, offset:x%x, " 15512 "format:x%x\n", wq->queue_id, 15513 pci_barset, db_offset, wq->db_format); 15514 } else 15515 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15516 } else { 15517 /* Check if DPP was honored by the firmware */ 15518 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, 15519 &wq_create->u.response_1); 15520 if (wq->dpp_enable) { 15521 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set, 15522 &wq_create->u.response_1); 15523 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15524 pci_barset); 15525 if (!bar_memmap_p) { 15526 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15527 "3267 WQ[%d] failed to memmap " 15528 "pci barset:x%x\n", 15529 wq->queue_id, pci_barset); 15530 status = -ENOMEM; 15531 goto out; 15532 } 15533 db_offset = wq_create->u.response_1.doorbell_offset; 15534 wq->db_regaddr = bar_memmap_p + db_offset; 15535 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, 15536 &wq_create->u.response_1); 15537 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar, 15538 &wq_create->u.response_1); 15539 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15540 dpp_barset); 15541 if (!bar_memmap_p) { 15542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15543 "3268 WQ[%d] failed to memmap " 15544 "pci barset:x%x\n", 15545 wq->queue_id, dpp_barset); 15546 status = -ENOMEM; 15547 goto out; 15548 } 15549 dpp_offset = wq_create->u.response_1.dpp_offset; 15550 wq->dpp_regaddr = bar_memmap_p + dpp_offset; 15551 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15552 "3271 WQ[%d]: barset:x%x, offset:x%x, " 15553 "dpp_id:x%x dpp_barset:x%x " 15554 "dpp_offset:x%x\n", 15555 wq->queue_id, pci_barset, db_offset, 15556 wq->dpp_id, dpp_barset, dpp_offset); 15557 15558 /* Enable combined writes for DPP aperture */ 15559 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; 15560 #ifdef CONFIG_X86 15561 rc = set_memory_wc(pg_addr, 1); 15562 if (rc) { 15563 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15564 "3272 Cannot setup Combined " 15565 "Write on WQ[%d] - disable DPP\n", 15566 wq->queue_id); 15567 phba->cfg_enable_dpp = 0; 15568 } 15569 #else 15570 phba->cfg_enable_dpp = 0; 15571 #endif 15572 } else 15573 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15574 } 15575 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); 15576 if (wq->pring == NULL) { 15577 status = -ENOMEM; 15578 goto out; 15579 } 15580 wq->type = LPFC_WQ; 15581 wq->assoc_qid = cq->queue_id; 15582 wq->subtype = subtype; 15583 wq->host_index = 0; 15584 wq->hba_index = 0; 15585 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; 15586 15587 /* link the wq onto the parent cq child list */ 15588 list_add_tail(&wq->list, &cq->child_list); 15589 out: 15590 mempool_free(mbox, phba->mbox_mem_pool); 15591 return status; 15592 } 15593 15594 /** 15595 * lpfc_rq_create - Create a Receive Queue on the HBA 15596 * @phba: HBA structure that indicates port to create a queue on. 15597 * @hrq: The queue structure to use to create the header receive queue. 15598 * @drq: The queue structure to use to create the data receive queue. 15599 * @cq: The completion queue to bind this work queue to. 15600 * 15601 * This function creates a receive buffer queue pair , as detailed in @hrq and 15602 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15603 * to the HBA. 15604 * 15605 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15606 * struct is used to get the entry count that is necessary to determine the 15607 * number of pages to use for this queue. The @cq is used to indicate which 15608 * completion queue to bind received buffers that are posted to these queues to. 15609 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15610 * receive queue pair. This function is asynchronous and will wait for the 15611 * mailbox command to finish before continuing. 15612 * 15613 * On success this function will return a zero. If unable to allocate enough 15614 * memory this function will return -ENOMEM. If the queue create mailbox command 15615 * fails this function will return -ENXIO. 15616 **/ 15617 int 15618 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 15619 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 15620 { 15621 struct lpfc_mbx_rq_create *rq_create; 15622 struct lpfc_dmabuf *dmabuf; 15623 LPFC_MBOXQ_t *mbox; 15624 int rc, length, status = 0; 15625 uint32_t shdr_status, shdr_add_status; 15626 union lpfc_sli4_cfg_shdr *shdr; 15627 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15628 void __iomem *bar_memmap_p; 15629 uint32_t db_offset; 15630 uint16_t pci_barset; 15631 15632 /* sanity check on queue memory */ 15633 if (!hrq || !drq || !cq) 15634 return -ENODEV; 15635 if (!phba->sli4_hba.pc_sli4_params.supported) 15636 hw_page_size = SLI4_PAGE_SIZE; 15637 15638 if (hrq->entry_count != drq->entry_count) 15639 return -EINVAL; 15640 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15641 if (!mbox) 15642 return -ENOMEM; 15643 length = (sizeof(struct lpfc_mbx_rq_create) - 15644 sizeof(struct lpfc_sli4_cfg_mhdr)); 15645 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15646 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15647 length, LPFC_SLI4_MBX_EMBED); 15648 rq_create = &mbox->u.mqe.un.rq_create; 15649 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15650 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15651 phba->sli4_hba.pc_sli4_params.rqv); 15652 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15653 bf_set(lpfc_rq_context_rqe_count_1, 15654 &rq_create->u.request.context, 15655 hrq->entry_count); 15656 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 15657 bf_set(lpfc_rq_context_rqe_size, 15658 &rq_create->u.request.context, 15659 LPFC_RQE_SIZE_8); 15660 bf_set(lpfc_rq_context_page_size, 15661 &rq_create->u.request.context, 15662 LPFC_RQ_PAGE_SIZE_4096); 15663 } else { 15664 switch (hrq->entry_count) { 15665 default: 15666 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15667 "2535 Unsupported RQ count. (%d)\n", 15668 hrq->entry_count); 15669 if (hrq->entry_count < 512) { 15670 status = -EINVAL; 15671 goto out; 15672 } 15673 /* fall through - otherwise default to smallest count */ 15674 case 512: 15675 bf_set(lpfc_rq_context_rqe_count, 15676 &rq_create->u.request.context, 15677 LPFC_RQ_RING_SIZE_512); 15678 break; 15679 case 1024: 15680 bf_set(lpfc_rq_context_rqe_count, 15681 &rq_create->u.request.context, 15682 LPFC_RQ_RING_SIZE_1024); 15683 break; 15684 case 2048: 15685 bf_set(lpfc_rq_context_rqe_count, 15686 &rq_create->u.request.context, 15687 LPFC_RQ_RING_SIZE_2048); 15688 break; 15689 case 4096: 15690 bf_set(lpfc_rq_context_rqe_count, 15691 &rq_create->u.request.context, 15692 LPFC_RQ_RING_SIZE_4096); 15693 break; 15694 } 15695 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 15696 LPFC_HDR_BUF_SIZE); 15697 } 15698 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15699 cq->queue_id); 15700 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15701 hrq->page_count); 15702 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15703 memset(dmabuf->virt, 0, hw_page_size); 15704 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15705 putPaddrLow(dmabuf->phys); 15706 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15707 putPaddrHigh(dmabuf->phys); 15708 } 15709 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15710 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15711 15712 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15713 /* The IOCTL status is embedded in the mailbox subheader. */ 15714 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15715 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15716 if (shdr_status || shdr_add_status || rc) { 15717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15718 "2504 RQ_CREATE mailbox failed with " 15719 "status x%x add_status x%x, mbx status x%x\n", 15720 shdr_status, shdr_add_status, rc); 15721 status = -ENXIO; 15722 goto out; 15723 } 15724 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15725 if (hrq->queue_id == 0xFFFF) { 15726 status = -ENXIO; 15727 goto out; 15728 } 15729 15730 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15731 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 15732 &rq_create->u.response); 15733 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 15734 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 15735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15736 "3262 RQ [%d] doorbell format not " 15737 "supported: x%x\n", hrq->queue_id, 15738 hrq->db_format); 15739 status = -EINVAL; 15740 goto out; 15741 } 15742 15743 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 15744 &rq_create->u.response); 15745 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 15746 if (!bar_memmap_p) { 15747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15748 "3269 RQ[%d] failed to memmap pci " 15749 "barset:x%x\n", hrq->queue_id, 15750 pci_barset); 15751 status = -ENOMEM; 15752 goto out; 15753 } 15754 15755 db_offset = rq_create->u.response.doorbell_offset; 15756 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 15757 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 15758 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15759 "3270 RQ[%d] doorbell offset not " 15760 "supported: x%x\n", hrq->queue_id, 15761 db_offset); 15762 status = -EINVAL; 15763 goto out; 15764 } 15765 hrq->db_regaddr = bar_memmap_p + db_offset; 15766 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15767 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 15768 "format:x%x\n", hrq->queue_id, pci_barset, 15769 db_offset, hrq->db_format); 15770 } else { 15771 hrq->db_format = LPFC_DB_RING_FORMAT; 15772 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15773 } 15774 hrq->type = LPFC_HRQ; 15775 hrq->assoc_qid = cq->queue_id; 15776 hrq->subtype = subtype; 15777 hrq->host_index = 0; 15778 hrq->hba_index = 0; 15779 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 15780 15781 /* now create the data queue */ 15782 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15783 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15784 length, LPFC_SLI4_MBX_EMBED); 15785 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15786 phba->sli4_hba.pc_sli4_params.rqv); 15787 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15788 bf_set(lpfc_rq_context_rqe_count_1, 15789 &rq_create->u.request.context, hrq->entry_count); 15790 if (subtype == LPFC_NVMET) 15791 rq_create->u.request.context.buffer_size = 15792 LPFC_NVMET_DATA_BUF_SIZE; 15793 else 15794 rq_create->u.request.context.buffer_size = 15795 LPFC_DATA_BUF_SIZE; 15796 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 15797 LPFC_RQE_SIZE_8); 15798 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 15799 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15800 } else { 15801 switch (drq->entry_count) { 15802 default: 15803 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15804 "2536 Unsupported RQ count. (%d)\n", 15805 drq->entry_count); 15806 if (drq->entry_count < 512) { 15807 status = -EINVAL; 15808 goto out; 15809 } 15810 /* fall through - otherwise default to smallest count */ 15811 case 512: 15812 bf_set(lpfc_rq_context_rqe_count, 15813 &rq_create->u.request.context, 15814 LPFC_RQ_RING_SIZE_512); 15815 break; 15816 case 1024: 15817 bf_set(lpfc_rq_context_rqe_count, 15818 &rq_create->u.request.context, 15819 LPFC_RQ_RING_SIZE_1024); 15820 break; 15821 case 2048: 15822 bf_set(lpfc_rq_context_rqe_count, 15823 &rq_create->u.request.context, 15824 LPFC_RQ_RING_SIZE_2048); 15825 break; 15826 case 4096: 15827 bf_set(lpfc_rq_context_rqe_count, 15828 &rq_create->u.request.context, 15829 LPFC_RQ_RING_SIZE_4096); 15830 break; 15831 } 15832 if (subtype == LPFC_NVMET) 15833 bf_set(lpfc_rq_context_buf_size, 15834 &rq_create->u.request.context, 15835 LPFC_NVMET_DATA_BUF_SIZE); 15836 else 15837 bf_set(lpfc_rq_context_buf_size, 15838 &rq_create->u.request.context, 15839 LPFC_DATA_BUF_SIZE); 15840 } 15841 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15842 cq->queue_id); 15843 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15844 drq->page_count); 15845 list_for_each_entry(dmabuf, &drq->page_list, list) { 15846 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15847 putPaddrLow(dmabuf->phys); 15848 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15849 putPaddrHigh(dmabuf->phys); 15850 } 15851 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15852 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15853 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15854 /* The IOCTL status is embedded in the mailbox subheader. */ 15855 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15856 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15857 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15858 if (shdr_status || shdr_add_status || rc) { 15859 status = -ENXIO; 15860 goto out; 15861 } 15862 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15863 if (drq->queue_id == 0xFFFF) { 15864 status = -ENXIO; 15865 goto out; 15866 } 15867 drq->type = LPFC_DRQ; 15868 drq->assoc_qid = cq->queue_id; 15869 drq->subtype = subtype; 15870 drq->host_index = 0; 15871 drq->hba_index = 0; 15872 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 15873 15874 /* link the header and data RQs onto the parent cq child list */ 15875 list_add_tail(&hrq->list, &cq->child_list); 15876 list_add_tail(&drq->list, &cq->child_list); 15877 15878 out: 15879 mempool_free(mbox, phba->mbox_mem_pool); 15880 return status; 15881 } 15882 15883 /** 15884 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA 15885 * @phba: HBA structure that indicates port to create a queue on. 15886 * @hrqp: The queue structure array to use to create the header receive queues. 15887 * @drqp: The queue structure array to use to create the data receive queues. 15888 * @cqp: The completion queue array to bind these receive queues to. 15889 * 15890 * This function creates a receive buffer queue pair , as detailed in @hrq and 15891 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15892 * to the HBA. 15893 * 15894 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15895 * struct is used to get the entry count that is necessary to determine the 15896 * number of pages to use for this queue. The @cq is used to indicate which 15897 * completion queue to bind received buffers that are posted to these queues to. 15898 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15899 * receive queue pair. This function is asynchronous and will wait for the 15900 * mailbox command to finish before continuing. 15901 * 15902 * On success this function will return a zero. If unable to allocate enough 15903 * memory this function will return -ENOMEM. If the queue create mailbox command 15904 * fails this function will return -ENXIO. 15905 **/ 15906 int 15907 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 15908 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 15909 uint32_t subtype) 15910 { 15911 struct lpfc_queue *hrq, *drq, *cq; 15912 struct lpfc_mbx_rq_create_v2 *rq_create; 15913 struct lpfc_dmabuf *dmabuf; 15914 LPFC_MBOXQ_t *mbox; 15915 int rc, length, alloclen, status = 0; 15916 int cnt, idx, numrq, page_idx = 0; 15917 uint32_t shdr_status, shdr_add_status; 15918 union lpfc_sli4_cfg_shdr *shdr; 15919 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15920 15921 numrq = phba->cfg_nvmet_mrq; 15922 /* sanity check on array memory */ 15923 if (!hrqp || !drqp || !cqp || !numrq) 15924 return -ENODEV; 15925 if (!phba->sli4_hba.pc_sli4_params.supported) 15926 hw_page_size = SLI4_PAGE_SIZE; 15927 15928 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15929 if (!mbox) 15930 return -ENOMEM; 15931 15932 length = sizeof(struct lpfc_mbx_rq_create_v2); 15933 length += ((2 * numrq * hrqp[0]->page_count) * 15934 sizeof(struct dma_address)); 15935 15936 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15937 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, 15938 LPFC_SLI4_MBX_NEMBED); 15939 if (alloclen < length) { 15940 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15941 "3099 Allocated DMA memory size (%d) is " 15942 "less than the requested DMA memory size " 15943 "(%d)\n", alloclen, length); 15944 status = -ENOMEM; 15945 goto out; 15946 } 15947 15948 15949 15950 rq_create = mbox->sge_array->addr[0]; 15951 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; 15952 15953 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); 15954 cnt = 0; 15955 15956 for (idx = 0; idx < numrq; idx++) { 15957 hrq = hrqp[idx]; 15958 drq = drqp[idx]; 15959 cq = cqp[idx]; 15960 15961 /* sanity check on queue memory */ 15962 if (!hrq || !drq || !cq) { 15963 status = -ENODEV; 15964 goto out; 15965 } 15966 15967 if (hrq->entry_count != drq->entry_count) { 15968 status = -EINVAL; 15969 goto out; 15970 } 15971 15972 if (idx == 0) { 15973 bf_set(lpfc_mbx_rq_create_num_pages, 15974 &rq_create->u.request, 15975 hrq->page_count); 15976 bf_set(lpfc_mbx_rq_create_rq_cnt, 15977 &rq_create->u.request, (numrq * 2)); 15978 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, 15979 1); 15980 bf_set(lpfc_rq_context_base_cq, 15981 &rq_create->u.request.context, 15982 cq->queue_id); 15983 bf_set(lpfc_rq_context_data_size, 15984 &rq_create->u.request.context, 15985 LPFC_NVMET_DATA_BUF_SIZE); 15986 bf_set(lpfc_rq_context_hdr_size, 15987 &rq_create->u.request.context, 15988 LPFC_HDR_BUF_SIZE); 15989 bf_set(lpfc_rq_context_rqe_count_1, 15990 &rq_create->u.request.context, 15991 hrq->entry_count); 15992 bf_set(lpfc_rq_context_rqe_size, 15993 &rq_create->u.request.context, 15994 LPFC_RQE_SIZE_8); 15995 bf_set(lpfc_rq_context_page_size, 15996 &rq_create->u.request.context, 15997 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15998 } 15999 rc = 0; 16000 list_for_each_entry(dmabuf, &hrq->page_list, list) { 16001 memset(dmabuf->virt, 0, hw_page_size); 16002 cnt = page_idx + dmabuf->buffer_tag; 16003 rq_create->u.request.page[cnt].addr_lo = 16004 putPaddrLow(dmabuf->phys); 16005 rq_create->u.request.page[cnt].addr_hi = 16006 putPaddrHigh(dmabuf->phys); 16007 rc++; 16008 } 16009 page_idx += rc; 16010 16011 rc = 0; 16012 list_for_each_entry(dmabuf, &drq->page_list, list) { 16013 memset(dmabuf->virt, 0, hw_page_size); 16014 cnt = page_idx + dmabuf->buffer_tag; 16015 rq_create->u.request.page[cnt].addr_lo = 16016 putPaddrLow(dmabuf->phys); 16017 rq_create->u.request.page[cnt].addr_hi = 16018 putPaddrHigh(dmabuf->phys); 16019 rc++; 16020 } 16021 page_idx += rc; 16022 16023 hrq->db_format = LPFC_DB_RING_FORMAT; 16024 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16025 hrq->type = LPFC_HRQ; 16026 hrq->assoc_qid = cq->queue_id; 16027 hrq->subtype = subtype; 16028 hrq->host_index = 0; 16029 hrq->hba_index = 0; 16030 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16031 16032 drq->db_format = LPFC_DB_RING_FORMAT; 16033 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16034 drq->type = LPFC_DRQ; 16035 drq->assoc_qid = cq->queue_id; 16036 drq->subtype = subtype; 16037 drq->host_index = 0; 16038 drq->hba_index = 0; 16039 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16040 16041 list_add_tail(&hrq->list, &cq->child_list); 16042 list_add_tail(&drq->list, &cq->child_list); 16043 } 16044 16045 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16046 /* The IOCTL status is embedded in the mailbox subheader. */ 16047 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16048 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16049 if (shdr_status || shdr_add_status || rc) { 16050 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16051 "3120 RQ_CREATE mailbox failed with " 16052 "status x%x add_status x%x, mbx status x%x\n", 16053 shdr_status, shdr_add_status, rc); 16054 status = -ENXIO; 16055 goto out; 16056 } 16057 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 16058 if (rc == 0xFFFF) { 16059 status = -ENXIO; 16060 goto out; 16061 } 16062 16063 /* Initialize all RQs with associated queue id */ 16064 for (idx = 0; idx < numrq; idx++) { 16065 hrq = hrqp[idx]; 16066 hrq->queue_id = rc + (2 * idx); 16067 drq = drqp[idx]; 16068 drq->queue_id = rc + (2 * idx) + 1; 16069 } 16070 16071 out: 16072 lpfc_sli4_mbox_cmd_free(phba, mbox); 16073 return status; 16074 } 16075 16076 /** 16077 * lpfc_eq_destroy - Destroy an event Queue on the HBA 16078 * @eq: The queue structure associated with the queue to destroy. 16079 * 16080 * This function destroys a queue, as detailed in @eq by sending an mailbox 16081 * command, specific to the type of queue, to the HBA. 16082 * 16083 * The @eq struct is used to get the queue ID of the queue to destroy. 16084 * 16085 * On success this function will return a zero. If the queue destroy mailbox 16086 * command fails this function will return -ENXIO. 16087 **/ 16088 int 16089 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 16090 { 16091 LPFC_MBOXQ_t *mbox; 16092 int rc, length, status = 0; 16093 uint32_t shdr_status, shdr_add_status; 16094 union lpfc_sli4_cfg_shdr *shdr; 16095 16096 /* sanity check on queue memory */ 16097 if (!eq) 16098 return -ENODEV; 16099 16100 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 16101 if (!mbox) 16102 return -ENOMEM; 16103 length = (sizeof(struct lpfc_mbx_eq_destroy) - 16104 sizeof(struct lpfc_sli4_cfg_mhdr)); 16105 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16106 LPFC_MBOX_OPCODE_EQ_DESTROY, 16107 length, LPFC_SLI4_MBX_EMBED); 16108 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 16109 eq->queue_id); 16110 mbox->vport = eq->phba->pport; 16111 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16112 16113 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 16114 /* The IOCTL status is embedded in the mailbox subheader. */ 16115 shdr = (union lpfc_sli4_cfg_shdr *) 16116 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 16117 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16118 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16119 if (shdr_status || shdr_add_status || rc) { 16120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16121 "2505 EQ_DESTROY mailbox failed with " 16122 "status x%x add_status x%x, mbx status x%x\n", 16123 shdr_status, shdr_add_status, rc); 16124 status = -ENXIO; 16125 } 16126 16127 /* Remove eq from any list */ 16128 list_del_init(&eq->list); 16129 mempool_free(mbox, eq->phba->mbox_mem_pool); 16130 return status; 16131 } 16132 16133 /** 16134 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 16135 * @cq: The queue structure associated with the queue to destroy. 16136 * 16137 * This function destroys a queue, as detailed in @cq by sending an mailbox 16138 * command, specific to the type of queue, to the HBA. 16139 * 16140 * The @cq struct is used to get the queue ID of the queue to destroy. 16141 * 16142 * On success this function will return a zero. If the queue destroy mailbox 16143 * command fails this function will return -ENXIO. 16144 **/ 16145 int 16146 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 16147 { 16148 LPFC_MBOXQ_t *mbox; 16149 int rc, length, status = 0; 16150 uint32_t shdr_status, shdr_add_status; 16151 union lpfc_sli4_cfg_shdr *shdr; 16152 16153 /* sanity check on queue memory */ 16154 if (!cq) 16155 return -ENODEV; 16156 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 16157 if (!mbox) 16158 return -ENOMEM; 16159 length = (sizeof(struct lpfc_mbx_cq_destroy) - 16160 sizeof(struct lpfc_sli4_cfg_mhdr)); 16161 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16162 LPFC_MBOX_OPCODE_CQ_DESTROY, 16163 length, LPFC_SLI4_MBX_EMBED); 16164 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 16165 cq->queue_id); 16166 mbox->vport = cq->phba->pport; 16167 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16168 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 16169 /* The IOCTL status is embedded in the mailbox subheader. */ 16170 shdr = (union lpfc_sli4_cfg_shdr *) 16171 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 16172 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16173 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16174 if (shdr_status || shdr_add_status || rc) { 16175 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16176 "2506 CQ_DESTROY mailbox failed with " 16177 "status x%x add_status x%x, mbx status x%x\n", 16178 shdr_status, shdr_add_status, rc); 16179 status = -ENXIO; 16180 } 16181 /* Remove cq from any list */ 16182 list_del_init(&cq->list); 16183 mempool_free(mbox, cq->phba->mbox_mem_pool); 16184 return status; 16185 } 16186 16187 /** 16188 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 16189 * @qm: The queue structure associated with the queue to destroy. 16190 * 16191 * This function destroys a queue, as detailed in @mq by sending an mailbox 16192 * command, specific to the type of queue, to the HBA. 16193 * 16194 * The @mq struct is used to get the queue ID of the queue to destroy. 16195 * 16196 * On success this function will return a zero. If the queue destroy mailbox 16197 * command fails this function will return -ENXIO. 16198 **/ 16199 int 16200 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 16201 { 16202 LPFC_MBOXQ_t *mbox; 16203 int rc, length, status = 0; 16204 uint32_t shdr_status, shdr_add_status; 16205 union lpfc_sli4_cfg_shdr *shdr; 16206 16207 /* sanity check on queue memory */ 16208 if (!mq) 16209 return -ENODEV; 16210 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 16211 if (!mbox) 16212 return -ENOMEM; 16213 length = (sizeof(struct lpfc_mbx_mq_destroy) - 16214 sizeof(struct lpfc_sli4_cfg_mhdr)); 16215 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16216 LPFC_MBOX_OPCODE_MQ_DESTROY, 16217 length, LPFC_SLI4_MBX_EMBED); 16218 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 16219 mq->queue_id); 16220 mbox->vport = mq->phba->pport; 16221 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16222 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 16223 /* The IOCTL status is embedded in the mailbox subheader. */ 16224 shdr = (union lpfc_sli4_cfg_shdr *) 16225 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 16226 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16227 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16228 if (shdr_status || shdr_add_status || rc) { 16229 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16230 "2507 MQ_DESTROY mailbox failed with " 16231 "status x%x add_status x%x, mbx status x%x\n", 16232 shdr_status, shdr_add_status, rc); 16233 status = -ENXIO; 16234 } 16235 /* Remove mq from any list */ 16236 list_del_init(&mq->list); 16237 mempool_free(mbox, mq->phba->mbox_mem_pool); 16238 return status; 16239 } 16240 16241 /** 16242 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 16243 * @wq: The queue structure associated with the queue to destroy. 16244 * 16245 * This function destroys a queue, as detailed in @wq by sending an mailbox 16246 * command, specific to the type of queue, to the HBA. 16247 * 16248 * The @wq struct is used to get the queue ID of the queue to destroy. 16249 * 16250 * On success this function will return a zero. If the queue destroy mailbox 16251 * command fails this function will return -ENXIO. 16252 **/ 16253 int 16254 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 16255 { 16256 LPFC_MBOXQ_t *mbox; 16257 int rc, length, status = 0; 16258 uint32_t shdr_status, shdr_add_status; 16259 union lpfc_sli4_cfg_shdr *shdr; 16260 16261 /* sanity check on queue memory */ 16262 if (!wq) 16263 return -ENODEV; 16264 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 16265 if (!mbox) 16266 return -ENOMEM; 16267 length = (sizeof(struct lpfc_mbx_wq_destroy) - 16268 sizeof(struct lpfc_sli4_cfg_mhdr)); 16269 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16270 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 16271 length, LPFC_SLI4_MBX_EMBED); 16272 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 16273 wq->queue_id); 16274 mbox->vport = wq->phba->pport; 16275 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16276 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 16277 shdr = (union lpfc_sli4_cfg_shdr *) 16278 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 16279 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16280 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16281 if (shdr_status || shdr_add_status || rc) { 16282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16283 "2508 WQ_DESTROY mailbox failed with " 16284 "status x%x add_status x%x, mbx status x%x\n", 16285 shdr_status, shdr_add_status, rc); 16286 status = -ENXIO; 16287 } 16288 /* Remove wq from any list */ 16289 list_del_init(&wq->list); 16290 kfree(wq->pring); 16291 wq->pring = NULL; 16292 mempool_free(mbox, wq->phba->mbox_mem_pool); 16293 return status; 16294 } 16295 16296 /** 16297 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 16298 * @rq: The queue structure associated with the queue to destroy. 16299 * 16300 * This function destroys a queue, as detailed in @rq by sending an mailbox 16301 * command, specific to the type of queue, to the HBA. 16302 * 16303 * The @rq struct is used to get the queue ID of the queue to destroy. 16304 * 16305 * On success this function will return a zero. If the queue destroy mailbox 16306 * command fails this function will return -ENXIO. 16307 **/ 16308 int 16309 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 16310 struct lpfc_queue *drq) 16311 { 16312 LPFC_MBOXQ_t *mbox; 16313 int rc, length, status = 0; 16314 uint32_t shdr_status, shdr_add_status; 16315 union lpfc_sli4_cfg_shdr *shdr; 16316 16317 /* sanity check on queue memory */ 16318 if (!hrq || !drq) 16319 return -ENODEV; 16320 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 16321 if (!mbox) 16322 return -ENOMEM; 16323 length = (sizeof(struct lpfc_mbx_rq_destroy) - 16324 sizeof(struct lpfc_sli4_cfg_mhdr)); 16325 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16326 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 16327 length, LPFC_SLI4_MBX_EMBED); 16328 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16329 hrq->queue_id); 16330 mbox->vport = hrq->phba->pport; 16331 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16332 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 16333 /* The IOCTL status is embedded in the mailbox subheader. */ 16334 shdr = (union lpfc_sli4_cfg_shdr *) 16335 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16336 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16337 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16338 if (shdr_status || shdr_add_status || rc) { 16339 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16340 "2509 RQ_DESTROY mailbox failed with " 16341 "status x%x add_status x%x, mbx status x%x\n", 16342 shdr_status, shdr_add_status, rc); 16343 if (rc != MBX_TIMEOUT) 16344 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16345 return -ENXIO; 16346 } 16347 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16348 drq->queue_id); 16349 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 16350 shdr = (union lpfc_sli4_cfg_shdr *) 16351 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16352 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16353 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16354 if (shdr_status || shdr_add_status || rc) { 16355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16356 "2510 RQ_DESTROY mailbox failed with " 16357 "status x%x add_status x%x, mbx status x%x\n", 16358 shdr_status, shdr_add_status, rc); 16359 status = -ENXIO; 16360 } 16361 list_del_init(&hrq->list); 16362 list_del_init(&drq->list); 16363 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16364 return status; 16365 } 16366 16367 /** 16368 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 16369 * @phba: The virtual port for which this call being executed. 16370 * @pdma_phys_addr0: Physical address of the 1st SGL page. 16371 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 16372 * @xritag: the xritag that ties this io to the SGL pages. 16373 * 16374 * This routine will post the sgl pages for the IO that has the xritag 16375 * that is in the iocbq structure. The xritag is assigned during iocbq 16376 * creation and persists for as long as the driver is loaded. 16377 * if the caller has fewer than 256 scatter gather segments to map then 16378 * pdma_phys_addr1 should be 0. 16379 * If the caller needs to map more than 256 scatter gather segment then 16380 * pdma_phys_addr1 should be a valid physical address. 16381 * physical address for SGLs must be 64 byte aligned. 16382 * If you are going to map 2 SGL's then the first one must have 256 entries 16383 * the second sgl can have between 1 and 256 entries. 16384 * 16385 * Return codes: 16386 * 0 - Success 16387 * -ENXIO, -ENOMEM - Failure 16388 **/ 16389 int 16390 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 16391 dma_addr_t pdma_phys_addr0, 16392 dma_addr_t pdma_phys_addr1, 16393 uint16_t xritag) 16394 { 16395 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 16396 LPFC_MBOXQ_t *mbox; 16397 int rc; 16398 uint32_t shdr_status, shdr_add_status; 16399 uint32_t mbox_tmo; 16400 union lpfc_sli4_cfg_shdr *shdr; 16401 16402 if (xritag == NO_XRI) { 16403 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16404 "0364 Invalid param:\n"); 16405 return -EINVAL; 16406 } 16407 16408 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16409 if (!mbox) 16410 return -ENOMEM; 16411 16412 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16413 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16414 sizeof(struct lpfc_mbx_post_sgl_pages) - 16415 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 16416 16417 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 16418 &mbox->u.mqe.un.post_sgl_pages; 16419 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 16420 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 16421 16422 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 16423 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 16424 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 16425 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 16426 16427 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 16428 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 16429 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 16430 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 16431 if (!phba->sli4_hba.intr_enable) 16432 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16433 else { 16434 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16435 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16436 } 16437 /* The IOCTL status is embedded in the mailbox subheader. */ 16438 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 16439 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16440 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16441 if (rc != MBX_TIMEOUT) 16442 mempool_free(mbox, phba->mbox_mem_pool); 16443 if (shdr_status || shdr_add_status || rc) { 16444 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16445 "2511 POST_SGL mailbox failed with " 16446 "status x%x add_status x%x, mbx status x%x\n", 16447 shdr_status, shdr_add_status, rc); 16448 } 16449 return 0; 16450 } 16451 16452 /** 16453 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 16454 * @phba: pointer to lpfc hba data structure. 16455 * 16456 * This routine is invoked to post rpi header templates to the 16457 * HBA consistent with the SLI-4 interface spec. This routine 16458 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 16459 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 16460 * 16461 * Returns 16462 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 16463 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 16464 **/ 16465 static uint16_t 16466 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 16467 { 16468 unsigned long xri; 16469 16470 /* 16471 * Fetch the next logical xri. Because this index is logical, 16472 * the driver starts at 0 each time. 16473 */ 16474 spin_lock_irq(&phba->hbalock); 16475 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 16476 phba->sli4_hba.max_cfg_param.max_xri, 0); 16477 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 16478 spin_unlock_irq(&phba->hbalock); 16479 return NO_XRI; 16480 } else { 16481 set_bit(xri, phba->sli4_hba.xri_bmask); 16482 phba->sli4_hba.max_cfg_param.xri_used++; 16483 } 16484 spin_unlock_irq(&phba->hbalock); 16485 return xri; 16486 } 16487 16488 /** 16489 * lpfc_sli4_free_xri - Release an xri for reuse. 16490 * @phba: pointer to lpfc hba data structure. 16491 * 16492 * This routine is invoked to release an xri to the pool of 16493 * available rpis maintained by the driver. 16494 **/ 16495 static void 16496 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16497 { 16498 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 16499 phba->sli4_hba.max_cfg_param.xri_used--; 16500 } 16501 } 16502 16503 /** 16504 * lpfc_sli4_free_xri - Release an xri for reuse. 16505 * @phba: pointer to lpfc hba data structure. 16506 * 16507 * This routine is invoked to release an xri to the pool of 16508 * available rpis maintained by the driver. 16509 **/ 16510 void 16511 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16512 { 16513 spin_lock_irq(&phba->hbalock); 16514 __lpfc_sli4_free_xri(phba, xri); 16515 spin_unlock_irq(&phba->hbalock); 16516 } 16517 16518 /** 16519 * lpfc_sli4_next_xritag - Get an xritag for the io 16520 * @phba: Pointer to HBA context object. 16521 * 16522 * This function gets an xritag for the iocb. If there is no unused xritag 16523 * it will return 0xffff. 16524 * The function returns the allocated xritag if successful, else returns zero. 16525 * Zero is not a valid xritag. 16526 * The caller is not required to hold any lock. 16527 **/ 16528 uint16_t 16529 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 16530 { 16531 uint16_t xri_index; 16532 16533 xri_index = lpfc_sli4_alloc_xri(phba); 16534 if (xri_index == NO_XRI) 16535 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 16536 "2004 Failed to allocate XRI.last XRITAG is %d" 16537 " Max XRI is %d, Used XRI is %d\n", 16538 xri_index, 16539 phba->sli4_hba.max_cfg_param.max_xri, 16540 phba->sli4_hba.max_cfg_param.xri_used); 16541 return xri_index; 16542 } 16543 16544 /** 16545 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. 16546 * @phba: pointer to lpfc hba data structure. 16547 * @post_sgl_list: pointer to els sgl entry list. 16548 * @count: number of els sgl entries on the list. 16549 * 16550 * This routine is invoked to post a block of driver's sgl pages to the 16551 * HBA using non-embedded mailbox command. No Lock is held. This routine 16552 * is only called when the driver is loading and after all IO has been 16553 * stopped. 16554 **/ 16555 static int 16556 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, 16557 struct list_head *post_sgl_list, 16558 int post_cnt) 16559 { 16560 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 16561 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16562 struct sgl_page_pairs *sgl_pg_pairs; 16563 void *viraddr; 16564 LPFC_MBOXQ_t *mbox; 16565 uint32_t reqlen, alloclen, pg_pairs; 16566 uint32_t mbox_tmo; 16567 uint16_t xritag_start = 0; 16568 int rc = 0; 16569 uint32_t shdr_status, shdr_add_status; 16570 union lpfc_sli4_cfg_shdr *shdr; 16571 16572 reqlen = post_cnt * sizeof(struct sgl_page_pairs) + 16573 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16574 if (reqlen > SLI4_PAGE_SIZE) { 16575 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16576 "2559 Block sgl registration required DMA " 16577 "size (%d) great than a page\n", reqlen); 16578 return -ENOMEM; 16579 } 16580 16581 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16582 if (!mbox) 16583 return -ENOMEM; 16584 16585 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16586 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16587 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 16588 LPFC_SLI4_MBX_NEMBED); 16589 16590 if (alloclen < reqlen) { 16591 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16592 "0285 Allocated DMA memory size (%d) is " 16593 "less than the requested DMA memory " 16594 "size (%d)\n", alloclen, reqlen); 16595 lpfc_sli4_mbox_cmd_free(phba, mbox); 16596 return -ENOMEM; 16597 } 16598 /* Set up the SGL pages in the non-embedded DMA pages */ 16599 viraddr = mbox->sge_array->addr[0]; 16600 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16601 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16602 16603 pg_pairs = 0; 16604 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 16605 /* Set up the sge entry */ 16606 sgl_pg_pairs->sgl_pg0_addr_lo = 16607 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 16608 sgl_pg_pairs->sgl_pg0_addr_hi = 16609 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 16610 sgl_pg_pairs->sgl_pg1_addr_lo = 16611 cpu_to_le32(putPaddrLow(0)); 16612 sgl_pg_pairs->sgl_pg1_addr_hi = 16613 cpu_to_le32(putPaddrHigh(0)); 16614 16615 /* Keep the first xritag on the list */ 16616 if (pg_pairs == 0) 16617 xritag_start = sglq_entry->sli4_xritag; 16618 sgl_pg_pairs++; 16619 pg_pairs++; 16620 } 16621 16622 /* Complete initialization and perform endian conversion. */ 16623 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16624 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); 16625 sgl->word0 = cpu_to_le32(sgl->word0); 16626 16627 if (!phba->sli4_hba.intr_enable) 16628 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16629 else { 16630 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16631 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16632 } 16633 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16634 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16635 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16636 if (rc != MBX_TIMEOUT) 16637 lpfc_sli4_mbox_cmd_free(phba, mbox); 16638 if (shdr_status || shdr_add_status || rc) { 16639 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16640 "2513 POST_SGL_BLOCK mailbox command failed " 16641 "status x%x add_status x%x mbx status x%x\n", 16642 shdr_status, shdr_add_status, rc); 16643 rc = -ENXIO; 16644 } 16645 return rc; 16646 } 16647 16648 /** 16649 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware 16650 * @phba: pointer to lpfc hba data structure. 16651 * @nblist: pointer to nvme buffer list. 16652 * @count: number of scsi buffers on the list. 16653 * 16654 * This routine is invoked to post a block of @count scsi sgl pages from a 16655 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. 16656 * No Lock is held. 16657 * 16658 **/ 16659 static int 16660 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, 16661 int count) 16662 { 16663 struct lpfc_io_buf *lpfc_ncmd; 16664 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16665 struct sgl_page_pairs *sgl_pg_pairs; 16666 void *viraddr; 16667 LPFC_MBOXQ_t *mbox; 16668 uint32_t reqlen, alloclen, pg_pairs; 16669 uint32_t mbox_tmo; 16670 uint16_t xritag_start = 0; 16671 int rc = 0; 16672 uint32_t shdr_status, shdr_add_status; 16673 dma_addr_t pdma_phys_bpl1; 16674 union lpfc_sli4_cfg_shdr *shdr; 16675 16676 /* Calculate the requested length of the dma memory */ 16677 reqlen = count * sizeof(struct sgl_page_pairs) + 16678 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16679 if (reqlen > SLI4_PAGE_SIZE) { 16680 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 16681 "6118 Block sgl registration required DMA " 16682 "size (%d) great than a page\n", reqlen); 16683 return -ENOMEM; 16684 } 16685 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16686 if (!mbox) { 16687 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16688 "6119 Failed to allocate mbox cmd memory\n"); 16689 return -ENOMEM; 16690 } 16691 16692 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16693 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16694 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16695 reqlen, LPFC_SLI4_MBX_NEMBED); 16696 16697 if (alloclen < reqlen) { 16698 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16699 "6120 Allocated DMA memory size (%d) is " 16700 "less than the requested DMA memory " 16701 "size (%d)\n", alloclen, reqlen); 16702 lpfc_sli4_mbox_cmd_free(phba, mbox); 16703 return -ENOMEM; 16704 } 16705 16706 /* Get the first SGE entry from the non-embedded DMA memory */ 16707 viraddr = mbox->sge_array->addr[0]; 16708 16709 /* Set up the SGL pages in the non-embedded DMA pages */ 16710 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16711 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16712 16713 pg_pairs = 0; 16714 list_for_each_entry(lpfc_ncmd, nblist, list) { 16715 /* Set up the sge entry */ 16716 sgl_pg_pairs->sgl_pg0_addr_lo = 16717 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); 16718 sgl_pg_pairs->sgl_pg0_addr_hi = 16719 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); 16720 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 16721 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + 16722 SGL_PAGE_SIZE; 16723 else 16724 pdma_phys_bpl1 = 0; 16725 sgl_pg_pairs->sgl_pg1_addr_lo = 16726 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 16727 sgl_pg_pairs->sgl_pg1_addr_hi = 16728 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 16729 /* Keep the first xritag on the list */ 16730 if (pg_pairs == 0) 16731 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; 16732 sgl_pg_pairs++; 16733 pg_pairs++; 16734 } 16735 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16736 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 16737 /* Perform endian conversion if necessary */ 16738 sgl->word0 = cpu_to_le32(sgl->word0); 16739 16740 if (!phba->sli4_hba.intr_enable) { 16741 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16742 } else { 16743 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16744 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16745 } 16746 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; 16747 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16748 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16749 if (rc != MBX_TIMEOUT) 16750 lpfc_sli4_mbox_cmd_free(phba, mbox); 16751 if (shdr_status || shdr_add_status || rc) { 16752 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16753 "6125 POST_SGL_BLOCK mailbox command failed " 16754 "status x%x add_status x%x mbx status x%x\n", 16755 shdr_status, shdr_add_status, rc); 16756 rc = -ENXIO; 16757 } 16758 return rc; 16759 } 16760 16761 /** 16762 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list 16763 * @phba: pointer to lpfc hba data structure. 16764 * @post_nblist: pointer to the nvme buffer list. 16765 * 16766 * This routine walks a list of nvme buffers that was passed in. It attempts 16767 * to construct blocks of nvme buffer sgls which contains contiguous xris and 16768 * uses the non-embedded SGL block post mailbox commands to post to the port. 16769 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use 16770 * embedded SGL post mailbox command for posting. The @post_nblist passed in 16771 * must be local list, thus no lock is needed when manipulate the list. 16772 * 16773 * Returns: 0 = failure, non-zero number of successfully posted buffers. 16774 **/ 16775 int 16776 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, 16777 struct list_head *post_nblist, int sb_count) 16778 { 16779 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 16780 int status, sgl_size; 16781 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; 16782 dma_addr_t pdma_phys_sgl1; 16783 int last_xritag = NO_XRI; 16784 int cur_xritag; 16785 LIST_HEAD(prep_nblist); 16786 LIST_HEAD(blck_nblist); 16787 LIST_HEAD(nvme_nblist); 16788 16789 /* sanity check */ 16790 if (sb_count <= 0) 16791 return -EINVAL; 16792 16793 sgl_size = phba->cfg_sg_dma_buf_size; 16794 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { 16795 list_del_init(&lpfc_ncmd->list); 16796 block_cnt++; 16797 if ((last_xritag != NO_XRI) && 16798 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { 16799 /* a hole in xri block, form a sgl posting block */ 16800 list_splice_init(&prep_nblist, &blck_nblist); 16801 post_cnt = block_cnt - 1; 16802 /* prepare list for next posting block */ 16803 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 16804 block_cnt = 1; 16805 } else { 16806 /* prepare list for next posting block */ 16807 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 16808 /* enough sgls for non-embed sgl mbox command */ 16809 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 16810 list_splice_init(&prep_nblist, &blck_nblist); 16811 post_cnt = block_cnt; 16812 block_cnt = 0; 16813 } 16814 } 16815 num_posting++; 16816 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 16817 16818 /* end of repost sgl list condition for NVME buffers */ 16819 if (num_posting == sb_count) { 16820 if (post_cnt == 0) { 16821 /* last sgl posting block */ 16822 list_splice_init(&prep_nblist, &blck_nblist); 16823 post_cnt = block_cnt; 16824 } else if (block_cnt == 1) { 16825 /* last single sgl with non-contiguous xri */ 16826 if (sgl_size > SGL_PAGE_SIZE) 16827 pdma_phys_sgl1 = 16828 lpfc_ncmd->dma_phys_sgl + 16829 SGL_PAGE_SIZE; 16830 else 16831 pdma_phys_sgl1 = 0; 16832 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 16833 status = lpfc_sli4_post_sgl( 16834 phba, lpfc_ncmd->dma_phys_sgl, 16835 pdma_phys_sgl1, cur_xritag); 16836 if (status) { 16837 /* Post error. Buffer unavailable. */ 16838 lpfc_ncmd->flags |= 16839 LPFC_SBUF_NOT_POSTED; 16840 } else { 16841 /* Post success. Bffer available. */ 16842 lpfc_ncmd->flags &= 16843 ~LPFC_SBUF_NOT_POSTED; 16844 lpfc_ncmd->status = IOSTAT_SUCCESS; 16845 num_posted++; 16846 } 16847 /* success, put on NVME buffer sgl list */ 16848 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 16849 } 16850 } 16851 16852 /* continue until a nembed page worth of sgls */ 16853 if (post_cnt == 0) 16854 continue; 16855 16856 /* post block of NVME buffer list sgls */ 16857 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist, 16858 post_cnt); 16859 16860 /* don't reset xirtag due to hole in xri block */ 16861 if (block_cnt == 0) 16862 last_xritag = NO_XRI; 16863 16864 /* reset NVME buffer post count for next round of posting */ 16865 post_cnt = 0; 16866 16867 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ 16868 while (!list_empty(&blck_nblist)) { 16869 list_remove_head(&blck_nblist, lpfc_ncmd, 16870 struct lpfc_io_buf, list); 16871 if (status) { 16872 /* Post error. Mark buffer unavailable. */ 16873 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; 16874 } else { 16875 /* Post success, Mark buffer available. */ 16876 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; 16877 lpfc_ncmd->status = IOSTAT_SUCCESS; 16878 num_posted++; 16879 } 16880 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 16881 } 16882 } 16883 /* Push NVME buffers with sgl posted to the available list */ 16884 lpfc_io_buf_replenish(phba, &nvme_nblist); 16885 16886 return num_posted; 16887 } 16888 16889 /** 16890 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 16891 * @phba: pointer to lpfc_hba struct that the frame was received on 16892 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16893 * 16894 * This function checks the fields in the @fc_hdr to see if the FC frame is a 16895 * valid type of frame that the LPFC driver will handle. This function will 16896 * return a zero if the frame is a valid frame or a non zero value when the 16897 * frame does not pass the check. 16898 **/ 16899 static int 16900 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 16901 { 16902 /* make rctl_names static to save stack space */ 16903 struct fc_vft_header *fc_vft_hdr; 16904 uint32_t *header = (uint32_t *) fc_hdr; 16905 16906 switch (fc_hdr->fh_r_ctl) { 16907 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 16908 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 16909 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 16910 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 16911 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 16912 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 16913 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 16914 case FC_RCTL_DD_CMD_STATUS: /* command status */ 16915 case FC_RCTL_ELS_REQ: /* extended link services request */ 16916 case FC_RCTL_ELS_REP: /* extended link services reply */ 16917 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 16918 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 16919 case FC_RCTL_BA_NOP: /* basic link service NOP */ 16920 case FC_RCTL_BA_ABTS: /* basic link service abort */ 16921 case FC_RCTL_BA_RMC: /* remove connection */ 16922 case FC_RCTL_BA_ACC: /* basic accept */ 16923 case FC_RCTL_BA_RJT: /* basic reject */ 16924 case FC_RCTL_BA_PRMT: 16925 case FC_RCTL_ACK_1: /* acknowledge_1 */ 16926 case FC_RCTL_ACK_0: /* acknowledge_0 */ 16927 case FC_RCTL_P_RJT: /* port reject */ 16928 case FC_RCTL_F_RJT: /* fabric reject */ 16929 case FC_RCTL_P_BSY: /* port busy */ 16930 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 16931 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 16932 case FC_RCTL_LCR: /* link credit reset */ 16933 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ 16934 case FC_RCTL_END: /* end */ 16935 break; 16936 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 16937 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16938 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 16939 return lpfc_fc_frame_check(phba, fc_hdr); 16940 default: 16941 goto drop; 16942 } 16943 16944 switch (fc_hdr->fh_type) { 16945 case FC_TYPE_BLS: 16946 case FC_TYPE_ELS: 16947 case FC_TYPE_FCP: 16948 case FC_TYPE_CT: 16949 case FC_TYPE_NVME: 16950 break; 16951 case FC_TYPE_IP: 16952 case FC_TYPE_ILS: 16953 default: 16954 goto drop; 16955 } 16956 16957 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 16958 "2538 Received frame rctl:x%x, type:x%x, " 16959 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 16960 fc_hdr->fh_r_ctl, fc_hdr->fh_type, 16961 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 16962 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 16963 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 16964 be32_to_cpu(header[6])); 16965 return 0; 16966 drop: 16967 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 16968 "2539 Dropped frame rctl:x%x type:x%x\n", 16969 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 16970 return 1; 16971 } 16972 16973 /** 16974 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 16975 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16976 * 16977 * This function processes the FC header to retrieve the VFI from the VF 16978 * header, if one exists. This function will return the VFI if one exists 16979 * or 0 if no VSAN Header exists. 16980 **/ 16981 static uint32_t 16982 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 16983 { 16984 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16985 16986 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 16987 return 0; 16988 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 16989 } 16990 16991 /** 16992 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 16993 * @phba: Pointer to the HBA structure to search for the vport on 16994 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16995 * @fcfi: The FC Fabric ID that the frame came from 16996 * 16997 * This function searches the @phba for a vport that matches the content of the 16998 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 16999 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 17000 * returns the matching vport pointer or NULL if unable to match frame to a 17001 * vport. 17002 **/ 17003 static struct lpfc_vport * 17004 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 17005 uint16_t fcfi, uint32_t did) 17006 { 17007 struct lpfc_vport **vports; 17008 struct lpfc_vport *vport = NULL; 17009 int i; 17010 17011 if (did == Fabric_DID) 17012 return phba->pport; 17013 if ((phba->pport->fc_flag & FC_PT2PT) && 17014 !(phba->link_state == LPFC_HBA_READY)) 17015 return phba->pport; 17016 17017 vports = lpfc_create_vport_work_array(phba); 17018 if (vports != NULL) { 17019 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 17020 if (phba->fcf.fcfi == fcfi && 17021 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 17022 vports[i]->fc_myDID == did) { 17023 vport = vports[i]; 17024 break; 17025 } 17026 } 17027 } 17028 lpfc_destroy_vport_work_array(phba, vports); 17029 return vport; 17030 } 17031 17032 /** 17033 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 17034 * @vport: The vport to work on. 17035 * 17036 * This function updates the receive sequence time stamp for this vport. The 17037 * receive sequence time stamp indicates the time that the last frame of the 17038 * the sequence that has been idle for the longest amount of time was received. 17039 * the driver uses this time stamp to indicate if any received sequences have 17040 * timed out. 17041 **/ 17042 static void 17043 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 17044 { 17045 struct lpfc_dmabuf *h_buf; 17046 struct hbq_dmabuf *dmabuf = NULL; 17047 17048 /* get the oldest sequence on the rcv list */ 17049 h_buf = list_get_first(&vport->rcv_buffer_list, 17050 struct lpfc_dmabuf, list); 17051 if (!h_buf) 17052 return; 17053 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17054 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 17055 } 17056 17057 /** 17058 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 17059 * @vport: The vport that the received sequences were sent to. 17060 * 17061 * This function cleans up all outstanding received sequences. This is called 17062 * by the driver when a link event or user action invalidates all the received 17063 * sequences. 17064 **/ 17065 void 17066 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 17067 { 17068 struct lpfc_dmabuf *h_buf, *hnext; 17069 struct lpfc_dmabuf *d_buf, *dnext; 17070 struct hbq_dmabuf *dmabuf = NULL; 17071 17072 /* start with the oldest sequence on the rcv list */ 17073 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17074 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17075 list_del_init(&dmabuf->hbuf.list); 17076 list_for_each_entry_safe(d_buf, dnext, 17077 &dmabuf->dbuf.list, list) { 17078 list_del_init(&d_buf->list); 17079 lpfc_in_buf_free(vport->phba, d_buf); 17080 } 17081 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17082 } 17083 } 17084 17085 /** 17086 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 17087 * @vport: The vport that the received sequences were sent to. 17088 * 17089 * This function determines whether any received sequences have timed out by 17090 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 17091 * indicates that there is at least one timed out sequence this routine will 17092 * go through the received sequences one at a time from most inactive to most 17093 * active to determine which ones need to be cleaned up. Once it has determined 17094 * that a sequence needs to be cleaned up it will simply free up the resources 17095 * without sending an abort. 17096 **/ 17097 void 17098 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 17099 { 17100 struct lpfc_dmabuf *h_buf, *hnext; 17101 struct lpfc_dmabuf *d_buf, *dnext; 17102 struct hbq_dmabuf *dmabuf = NULL; 17103 unsigned long timeout; 17104 int abort_count = 0; 17105 17106 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17107 vport->rcv_buffer_time_stamp); 17108 if (list_empty(&vport->rcv_buffer_list) || 17109 time_before(jiffies, timeout)) 17110 return; 17111 /* start with the oldest sequence on the rcv list */ 17112 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17113 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17114 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17115 dmabuf->time_stamp); 17116 if (time_before(jiffies, timeout)) 17117 break; 17118 abort_count++; 17119 list_del_init(&dmabuf->hbuf.list); 17120 list_for_each_entry_safe(d_buf, dnext, 17121 &dmabuf->dbuf.list, list) { 17122 list_del_init(&d_buf->list); 17123 lpfc_in_buf_free(vport->phba, d_buf); 17124 } 17125 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17126 } 17127 if (abort_count) 17128 lpfc_update_rcv_time_stamp(vport); 17129 } 17130 17131 /** 17132 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 17133 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 17134 * 17135 * This function searches through the existing incomplete sequences that have 17136 * been sent to this @vport. If the frame matches one of the incomplete 17137 * sequences then the dbuf in the @dmabuf is added to the list of frames that 17138 * make up that sequence. If no sequence is found that matches this frame then 17139 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 17140 * This function returns a pointer to the first dmabuf in the sequence list that 17141 * the frame was linked to. 17142 **/ 17143 static struct hbq_dmabuf * 17144 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17145 { 17146 struct fc_frame_header *new_hdr; 17147 struct fc_frame_header *temp_hdr; 17148 struct lpfc_dmabuf *d_buf; 17149 struct lpfc_dmabuf *h_buf; 17150 struct hbq_dmabuf *seq_dmabuf = NULL; 17151 struct hbq_dmabuf *temp_dmabuf = NULL; 17152 uint8_t found = 0; 17153 17154 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17155 dmabuf->time_stamp = jiffies; 17156 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17157 17158 /* Use the hdr_buf to find the sequence that this frame belongs to */ 17159 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17160 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17161 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17162 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17163 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17164 continue; 17165 /* found a pending sequence that matches this frame */ 17166 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17167 break; 17168 } 17169 if (!seq_dmabuf) { 17170 /* 17171 * This indicates first frame received for this sequence. 17172 * Queue the buffer on the vport's rcv_buffer_list. 17173 */ 17174 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17175 lpfc_update_rcv_time_stamp(vport); 17176 return dmabuf; 17177 } 17178 temp_hdr = seq_dmabuf->hbuf.virt; 17179 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 17180 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17181 list_del_init(&seq_dmabuf->hbuf.list); 17182 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17183 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17184 lpfc_update_rcv_time_stamp(vport); 17185 return dmabuf; 17186 } 17187 /* move this sequence to the tail to indicate a young sequence */ 17188 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 17189 seq_dmabuf->time_stamp = jiffies; 17190 lpfc_update_rcv_time_stamp(vport); 17191 if (list_empty(&seq_dmabuf->dbuf.list)) { 17192 temp_hdr = dmabuf->hbuf.virt; 17193 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17194 return seq_dmabuf; 17195 } 17196 /* find the correct place in the sequence to insert this frame */ 17197 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); 17198 while (!found) { 17199 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17200 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 17201 /* 17202 * If the frame's sequence count is greater than the frame on 17203 * the list then insert the frame right after this frame 17204 */ 17205 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 17206 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17207 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 17208 found = 1; 17209 break; 17210 } 17211 17212 if (&d_buf->list == &seq_dmabuf->dbuf.list) 17213 break; 17214 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); 17215 } 17216 17217 if (found) 17218 return seq_dmabuf; 17219 return NULL; 17220 } 17221 17222 /** 17223 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 17224 * @vport: pointer to a vitural port 17225 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17226 * 17227 * This function tries to abort from the partially assembed sequence, described 17228 * by the information from basic abbort @dmabuf. It checks to see whether such 17229 * partially assembled sequence held by the driver. If so, it shall free up all 17230 * the frames from the partially assembled sequence. 17231 * 17232 * Return 17233 * true -- if there is matching partially assembled sequence present and all 17234 * the frames freed with the sequence; 17235 * false -- if there is no matching partially assembled sequence present so 17236 * nothing got aborted in the lower layer driver 17237 **/ 17238 static bool 17239 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 17240 struct hbq_dmabuf *dmabuf) 17241 { 17242 struct fc_frame_header *new_hdr; 17243 struct fc_frame_header *temp_hdr; 17244 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 17245 struct hbq_dmabuf *seq_dmabuf = NULL; 17246 17247 /* Use the hdr_buf to find the sequence that matches this frame */ 17248 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17249 INIT_LIST_HEAD(&dmabuf->hbuf.list); 17250 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17251 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17252 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17253 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17254 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17255 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17256 continue; 17257 /* found a pending sequence that matches this frame */ 17258 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17259 break; 17260 } 17261 17262 /* Free up all the frames from the partially assembled sequence */ 17263 if (seq_dmabuf) { 17264 list_for_each_entry_safe(d_buf, n_buf, 17265 &seq_dmabuf->dbuf.list, list) { 17266 list_del_init(&d_buf->list); 17267 lpfc_in_buf_free(vport->phba, d_buf); 17268 } 17269 return true; 17270 } 17271 return false; 17272 } 17273 17274 /** 17275 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 17276 * @vport: pointer to a vitural port 17277 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17278 * 17279 * This function tries to abort from the assembed sequence from upper level 17280 * protocol, described by the information from basic abbort @dmabuf. It 17281 * checks to see whether such pending context exists at upper level protocol. 17282 * If so, it shall clean up the pending context. 17283 * 17284 * Return 17285 * true -- if there is matching pending context of the sequence cleaned 17286 * at ulp; 17287 * false -- if there is no matching pending context of the sequence present 17288 * at ulp. 17289 **/ 17290 static bool 17291 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17292 { 17293 struct lpfc_hba *phba = vport->phba; 17294 int handled; 17295 17296 /* Accepting abort at ulp with SLI4 only */ 17297 if (phba->sli_rev < LPFC_SLI_REV4) 17298 return false; 17299 17300 /* Register all caring upper level protocols to attend abort */ 17301 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 17302 if (handled) 17303 return true; 17304 17305 return false; 17306 } 17307 17308 /** 17309 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 17310 * @phba: Pointer to HBA context object. 17311 * @cmd_iocbq: pointer to the command iocbq structure. 17312 * @rsp_iocbq: pointer to the response iocbq structure. 17313 * 17314 * This function handles the sequence abort response iocb command complete 17315 * event. It properly releases the memory allocated to the sequence abort 17316 * accept iocb. 17317 **/ 17318 static void 17319 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 17320 struct lpfc_iocbq *cmd_iocbq, 17321 struct lpfc_iocbq *rsp_iocbq) 17322 { 17323 struct lpfc_nodelist *ndlp; 17324 17325 if (cmd_iocbq) { 17326 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; 17327 lpfc_nlp_put(ndlp); 17328 lpfc_nlp_not_used(ndlp); 17329 lpfc_sli_release_iocbq(phba, cmd_iocbq); 17330 } 17331 17332 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 17333 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 17334 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17335 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 17336 rsp_iocbq->iocb.ulpStatus, 17337 rsp_iocbq->iocb.un.ulpWord[4]); 17338 } 17339 17340 /** 17341 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 17342 * @phba: Pointer to HBA context object. 17343 * @xri: xri id in transaction. 17344 * 17345 * This function validates the xri maps to the known range of XRIs allocated an 17346 * used by the driver. 17347 **/ 17348 uint16_t 17349 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 17350 uint16_t xri) 17351 { 17352 uint16_t i; 17353 17354 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 17355 if (xri == phba->sli4_hba.xri_ids[i]) 17356 return i; 17357 } 17358 return NO_XRI; 17359 } 17360 17361 /** 17362 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 17363 * @phba: Pointer to HBA context object. 17364 * @fc_hdr: pointer to a FC frame header. 17365 * 17366 * This function sends a basic response to a previous unsol sequence abort 17367 * event after aborting the sequence handling. 17368 **/ 17369 void 17370 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 17371 struct fc_frame_header *fc_hdr, bool aborted) 17372 { 17373 struct lpfc_hba *phba = vport->phba; 17374 struct lpfc_iocbq *ctiocb = NULL; 17375 struct lpfc_nodelist *ndlp; 17376 uint16_t oxid, rxid, xri, lxri; 17377 uint32_t sid, fctl; 17378 IOCB_t *icmd; 17379 int rc; 17380 17381 if (!lpfc_is_link_up(phba)) 17382 return; 17383 17384 sid = sli4_sid_from_fc_hdr(fc_hdr); 17385 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 17386 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 17387 17388 ndlp = lpfc_findnode_did(vport, sid); 17389 if (!ndlp) { 17390 ndlp = lpfc_nlp_init(vport, sid); 17391 if (!ndlp) { 17392 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17393 "1268 Failed to allocate ndlp for " 17394 "oxid:x%x SID:x%x\n", oxid, sid); 17395 return; 17396 } 17397 /* Put ndlp onto pport node list */ 17398 lpfc_enqueue_node(vport, ndlp); 17399 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 17400 /* re-setup ndlp without removing from node list */ 17401 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 17402 if (!ndlp) { 17403 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17404 "3275 Failed to active ndlp found " 17405 "for oxid:x%x SID:x%x\n", oxid, sid); 17406 return; 17407 } 17408 } 17409 17410 /* Allocate buffer for rsp iocb */ 17411 ctiocb = lpfc_sli_get_iocbq(phba); 17412 if (!ctiocb) 17413 return; 17414 17415 /* Extract the F_CTL field from FC_HDR */ 17416 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 17417 17418 icmd = &ctiocb->iocb; 17419 icmd->un.xseq64.bdl.bdeSize = 0; 17420 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 17421 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 17422 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 17423 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 17424 17425 /* Fill in the rest of iocb fields */ 17426 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 17427 icmd->ulpBdeCount = 0; 17428 icmd->ulpLe = 1; 17429 icmd->ulpClass = CLASS3; 17430 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 17431 ctiocb->context1 = lpfc_nlp_get(ndlp); 17432 17433 ctiocb->iocb_cmpl = NULL; 17434 ctiocb->vport = phba->pport; 17435 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 17436 ctiocb->sli4_lxritag = NO_XRI; 17437 ctiocb->sli4_xritag = NO_XRI; 17438 17439 if (fctl & FC_FC_EX_CTX) 17440 /* Exchange responder sent the abort so we 17441 * own the oxid. 17442 */ 17443 xri = oxid; 17444 else 17445 xri = rxid; 17446 lxri = lpfc_sli4_xri_inrange(phba, xri); 17447 if (lxri != NO_XRI) 17448 lpfc_set_rrq_active(phba, ndlp, lxri, 17449 (xri == oxid) ? rxid : oxid, 0); 17450 /* For BA_ABTS from exchange responder, if the logical xri with 17451 * the oxid maps to the FCP XRI range, the port no longer has 17452 * that exchange context, send a BLS_RJT. Override the IOCB for 17453 * a BA_RJT. 17454 */ 17455 if ((fctl & FC_FC_EX_CTX) && 17456 (lxri > lpfc_sli4_get_iocb_cnt(phba))) { 17457 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17458 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17459 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17460 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17461 } 17462 17463 /* If BA_ABTS failed to abort a partially assembled receive sequence, 17464 * the driver no longer has that exchange, send a BLS_RJT. Override 17465 * the IOCB for a BA_RJT. 17466 */ 17467 if (aborted == false) { 17468 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17469 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17470 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17471 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17472 } 17473 17474 if (fctl & FC_FC_EX_CTX) { 17475 /* ABTS sent by responder to CT exchange, construction 17476 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 17477 * field and RX_ID from ABTS for RX_ID field. 17478 */ 17479 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 17480 } else { 17481 /* ABTS sent by initiator to CT exchange, construction 17482 * of BA_ACC will need to allocate a new XRI as for the 17483 * XRI_TAG field. 17484 */ 17485 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 17486 } 17487 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 17488 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 17489 17490 /* Xmit CT abts response on exchange <xid> */ 17491 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 17492 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 17493 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 17494 17495 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 17496 if (rc == IOCB_ERROR) { 17497 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 17498 "2925 Failed to issue CT ABTS RSP x%x on " 17499 "xri x%x, Data x%x\n", 17500 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 17501 phba->link_state); 17502 lpfc_nlp_put(ndlp); 17503 ctiocb->context1 = NULL; 17504 lpfc_sli_release_iocbq(phba, ctiocb); 17505 } 17506 } 17507 17508 /** 17509 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 17510 * @vport: Pointer to the vport on which this sequence was received 17511 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17512 * 17513 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 17514 * receive sequence is only partially assembed by the driver, it shall abort 17515 * the partially assembled frames for the sequence. Otherwise, if the 17516 * unsolicited receive sequence has been completely assembled and passed to 17517 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 17518 * unsolicited sequence has been aborted. After that, it will issue a basic 17519 * accept to accept the abort. 17520 **/ 17521 static void 17522 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 17523 struct hbq_dmabuf *dmabuf) 17524 { 17525 struct lpfc_hba *phba = vport->phba; 17526 struct fc_frame_header fc_hdr; 17527 uint32_t fctl; 17528 bool aborted; 17529 17530 /* Make a copy of fc_hdr before the dmabuf being released */ 17531 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 17532 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 17533 17534 if (fctl & FC_FC_EX_CTX) { 17535 /* ABTS by responder to exchange, no cleanup needed */ 17536 aborted = true; 17537 } else { 17538 /* ABTS by initiator to exchange, need to do cleanup */ 17539 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 17540 if (aborted == false) 17541 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 17542 } 17543 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17544 17545 if (phba->nvmet_support) { 17546 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); 17547 return; 17548 } 17549 17550 /* Respond with BA_ACC or BA_RJT accordingly */ 17551 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 17552 } 17553 17554 /** 17555 * lpfc_seq_complete - Indicates if a sequence is complete 17556 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17557 * 17558 * This function checks the sequence, starting with the frame described by 17559 * @dmabuf, to see if all the frames associated with this sequence are present. 17560 * the frames associated with this sequence are linked to the @dmabuf using the 17561 * dbuf list. This function looks for two major things. 1) That the first frame 17562 * has a sequence count of zero. 2) There is a frame with last frame of sequence 17563 * set. 3) That there are no holes in the sequence count. The function will 17564 * return 1 when the sequence is complete, otherwise it will return 0. 17565 **/ 17566 static int 17567 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 17568 { 17569 struct fc_frame_header *hdr; 17570 struct lpfc_dmabuf *d_buf; 17571 struct hbq_dmabuf *seq_dmabuf; 17572 uint32_t fctl; 17573 int seq_count = 0; 17574 17575 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17576 /* make sure first fame of sequence has a sequence count of zero */ 17577 if (hdr->fh_seq_cnt != seq_count) 17578 return 0; 17579 fctl = (hdr->fh_f_ctl[0] << 16 | 17580 hdr->fh_f_ctl[1] << 8 | 17581 hdr->fh_f_ctl[2]); 17582 /* If last frame of sequence we can return success. */ 17583 if (fctl & FC_FC_END_SEQ) 17584 return 1; 17585 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 17586 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17587 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17588 /* If there is a hole in the sequence count then fail. */ 17589 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 17590 return 0; 17591 fctl = (hdr->fh_f_ctl[0] << 16 | 17592 hdr->fh_f_ctl[1] << 8 | 17593 hdr->fh_f_ctl[2]); 17594 /* If last frame of sequence we can return success. */ 17595 if (fctl & FC_FC_END_SEQ) 17596 return 1; 17597 } 17598 return 0; 17599 } 17600 17601 /** 17602 * lpfc_prep_seq - Prep sequence for ULP processing 17603 * @vport: Pointer to the vport on which this sequence was received 17604 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17605 * 17606 * This function takes a sequence, described by a list of frames, and creates 17607 * a list of iocbq structures to describe the sequence. This iocbq list will be 17608 * used to issue to the generic unsolicited sequence handler. This routine 17609 * returns a pointer to the first iocbq in the list. If the function is unable 17610 * to allocate an iocbq then it throw out the received frames that were not 17611 * able to be described and return a pointer to the first iocbq. If unable to 17612 * allocate any iocbqs (including the first) this function will return NULL. 17613 **/ 17614 static struct lpfc_iocbq * 17615 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 17616 { 17617 struct hbq_dmabuf *hbq_buf; 17618 struct lpfc_dmabuf *d_buf, *n_buf; 17619 struct lpfc_iocbq *first_iocbq, *iocbq; 17620 struct fc_frame_header *fc_hdr; 17621 uint32_t sid; 17622 uint32_t len, tot_len; 17623 struct ulp_bde64 *pbde; 17624 17625 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17626 /* remove from receive buffer list */ 17627 list_del_init(&seq_dmabuf->hbuf.list); 17628 lpfc_update_rcv_time_stamp(vport); 17629 /* get the Remote Port's SID */ 17630 sid = sli4_sid_from_fc_hdr(fc_hdr); 17631 tot_len = 0; 17632 /* Get an iocbq struct to fill in. */ 17633 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 17634 if (first_iocbq) { 17635 /* Initialize the first IOCB. */ 17636 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 17637 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 17638 first_iocbq->vport = vport; 17639 17640 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 17641 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 17642 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 17643 first_iocbq->iocb.un.rcvels.parmRo = 17644 sli4_did_from_fc_hdr(fc_hdr); 17645 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 17646 } else 17647 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 17648 first_iocbq->iocb.ulpContext = NO_XRI; 17649 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 17650 be16_to_cpu(fc_hdr->fh_ox_id); 17651 /* iocbq is prepped for internal consumption. Physical vpi. */ 17652 first_iocbq->iocb.unsli3.rcvsli3.vpi = 17653 vport->phba->vpi_ids[vport->vpi]; 17654 /* put the first buffer into the first IOCBq */ 17655 tot_len = bf_get(lpfc_rcqe_length, 17656 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 17657 17658 first_iocbq->context2 = &seq_dmabuf->dbuf; 17659 first_iocbq->context3 = NULL; 17660 first_iocbq->iocb.ulpBdeCount = 1; 17661 if (tot_len > LPFC_DATA_BUF_SIZE) 17662 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17663 LPFC_DATA_BUF_SIZE; 17664 else 17665 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; 17666 17667 first_iocbq->iocb.un.rcvels.remoteID = sid; 17668 17669 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17670 } 17671 iocbq = first_iocbq; 17672 /* 17673 * Each IOCBq can have two Buffers assigned, so go through the list 17674 * of buffers for this sequence and save two buffers in each IOCBq 17675 */ 17676 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 17677 if (!iocbq) { 17678 lpfc_in_buf_free(vport->phba, d_buf); 17679 continue; 17680 } 17681 if (!iocbq->context3) { 17682 iocbq->context3 = d_buf; 17683 iocbq->iocb.ulpBdeCount++; 17684 /* We need to get the size out of the right CQE */ 17685 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17686 len = bf_get(lpfc_rcqe_length, 17687 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17688 pbde = (struct ulp_bde64 *) 17689 &iocbq->iocb.unsli3.sli3Words[4]; 17690 if (len > LPFC_DATA_BUF_SIZE) 17691 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 17692 else 17693 pbde->tus.f.bdeSize = len; 17694 17695 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 17696 tot_len += len; 17697 } else { 17698 iocbq = lpfc_sli_get_iocbq(vport->phba); 17699 if (!iocbq) { 17700 if (first_iocbq) { 17701 first_iocbq->iocb.ulpStatus = 17702 IOSTAT_FCP_RSP_ERROR; 17703 first_iocbq->iocb.un.ulpWord[4] = 17704 IOERR_NO_RESOURCES; 17705 } 17706 lpfc_in_buf_free(vport->phba, d_buf); 17707 continue; 17708 } 17709 /* We need to get the size out of the right CQE */ 17710 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17711 len = bf_get(lpfc_rcqe_length, 17712 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17713 iocbq->context2 = d_buf; 17714 iocbq->context3 = NULL; 17715 iocbq->iocb.ulpBdeCount = 1; 17716 if (len > LPFC_DATA_BUF_SIZE) 17717 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17718 LPFC_DATA_BUF_SIZE; 17719 else 17720 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; 17721 17722 tot_len += len; 17723 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17724 17725 iocbq->iocb.un.rcvels.remoteID = sid; 17726 list_add_tail(&iocbq->list, &first_iocbq->list); 17727 } 17728 } 17729 return first_iocbq; 17730 } 17731 17732 static void 17733 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 17734 struct hbq_dmabuf *seq_dmabuf) 17735 { 17736 struct fc_frame_header *fc_hdr; 17737 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 17738 struct lpfc_hba *phba = vport->phba; 17739 17740 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17741 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 17742 if (!iocbq) { 17743 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17744 "2707 Ring %d handler: Failed to allocate " 17745 "iocb Rctl x%x Type x%x received\n", 17746 LPFC_ELS_RING, 17747 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17748 return; 17749 } 17750 if (!lpfc_complete_unsol_iocb(phba, 17751 phba->sli4_hba.els_wq->pring, 17752 iocbq, fc_hdr->fh_r_ctl, 17753 fc_hdr->fh_type)) 17754 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17755 "2540 Ring %d handler: unexpected Rctl " 17756 "x%x Type x%x received\n", 17757 LPFC_ELS_RING, 17758 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17759 17760 /* Free iocb created in lpfc_prep_seq */ 17761 list_for_each_entry_safe(curr_iocb, next_iocb, 17762 &iocbq->list, list) { 17763 list_del_init(&curr_iocb->list); 17764 lpfc_sli_release_iocbq(phba, curr_iocb); 17765 } 17766 lpfc_sli_release_iocbq(phba, iocbq); 17767 } 17768 17769 static void 17770 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 17771 struct lpfc_iocbq *rspiocb) 17772 { 17773 struct lpfc_dmabuf *pcmd = cmdiocb->context2; 17774 17775 if (pcmd && pcmd->virt) 17776 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17777 kfree(pcmd); 17778 lpfc_sli_release_iocbq(phba, cmdiocb); 17779 lpfc_drain_txq(phba); 17780 } 17781 17782 static void 17783 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 17784 struct hbq_dmabuf *dmabuf) 17785 { 17786 struct fc_frame_header *fc_hdr; 17787 struct lpfc_hba *phba = vport->phba; 17788 struct lpfc_iocbq *iocbq = NULL; 17789 union lpfc_wqe *wqe; 17790 struct lpfc_dmabuf *pcmd = NULL; 17791 uint32_t frame_len; 17792 int rc; 17793 unsigned long iflags; 17794 17795 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17796 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); 17797 17798 /* Send the received frame back */ 17799 iocbq = lpfc_sli_get_iocbq(phba); 17800 if (!iocbq) { 17801 /* Queue cq event and wakeup worker thread to process it */ 17802 spin_lock_irqsave(&phba->hbalock, iflags); 17803 list_add_tail(&dmabuf->cq_event.list, 17804 &phba->sli4_hba.sp_queue_event); 17805 phba->hba_flag |= HBA_SP_QUEUE_EVT; 17806 spin_unlock_irqrestore(&phba->hbalock, iflags); 17807 lpfc_worker_wake_up(phba); 17808 return; 17809 } 17810 17811 /* Allocate buffer for command payload */ 17812 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 17813 if (pcmd) 17814 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 17815 &pcmd->phys); 17816 if (!pcmd || !pcmd->virt) 17817 goto exit; 17818 17819 INIT_LIST_HEAD(&pcmd->list); 17820 17821 /* copyin the payload */ 17822 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); 17823 17824 /* fill in BDE's for command */ 17825 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys); 17826 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys); 17827 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 17828 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len; 17829 17830 iocbq->context2 = pcmd; 17831 iocbq->vport = vport; 17832 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 17833 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 17834 17835 /* 17836 * Setup rest of the iocb as though it were a WQE 17837 * Build the SEND_FRAME WQE 17838 */ 17839 wqe = (union lpfc_wqe *)&iocbq->iocb; 17840 17841 wqe->send_frame.frame_len = frame_len; 17842 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr)); 17843 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1)); 17844 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2)); 17845 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3)); 17846 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4)); 17847 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5)); 17848 17849 iocbq->iocb.ulpCommand = CMD_SEND_FRAME; 17850 iocbq->iocb.ulpLe = 1; 17851 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl; 17852 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); 17853 if (rc == IOCB_ERROR) 17854 goto exit; 17855 17856 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17857 return; 17858 17859 exit: 17860 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 17861 "2023 Unable to process MDS loopback frame\n"); 17862 if (pcmd && pcmd->virt) 17863 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17864 kfree(pcmd); 17865 if (iocbq) 17866 lpfc_sli_release_iocbq(phba, iocbq); 17867 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17868 } 17869 17870 /** 17871 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 17872 * @phba: Pointer to HBA context object. 17873 * 17874 * This function is called with no lock held. This function processes all 17875 * the received buffers and gives it to upper layers when a received buffer 17876 * indicates that it is the final frame in the sequence. The interrupt 17877 * service routine processes received buffers at interrupt contexts. 17878 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 17879 * appropriate receive function when the final frame in a sequence is received. 17880 **/ 17881 void 17882 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 17883 struct hbq_dmabuf *dmabuf) 17884 { 17885 struct hbq_dmabuf *seq_dmabuf; 17886 struct fc_frame_header *fc_hdr; 17887 struct lpfc_vport *vport; 17888 uint32_t fcfi; 17889 uint32_t did; 17890 17891 /* Process each received buffer */ 17892 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17893 17894 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 17895 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 17896 vport = phba->pport; 17897 /* Handle MDS Loopback frames */ 17898 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 17899 return; 17900 } 17901 17902 /* check to see if this a valid type of frame */ 17903 if (lpfc_fc_frame_check(phba, fc_hdr)) { 17904 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17905 return; 17906 } 17907 17908 if ((bf_get(lpfc_cqe_code, 17909 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 17910 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 17911 &dmabuf->cq_event.cqe.rcqe_cmpl); 17912 else 17913 fcfi = bf_get(lpfc_rcqe_fcf_id, 17914 &dmabuf->cq_event.cqe.rcqe_cmpl); 17915 17916 /* d_id this frame is directed to */ 17917 did = sli4_did_from_fc_hdr(fc_hdr); 17918 17919 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); 17920 if (!vport) { 17921 /* throw out the frame */ 17922 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17923 return; 17924 } 17925 17926 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 17927 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 17928 (did != Fabric_DID)) { 17929 /* 17930 * Throw out the frame if we are not pt2pt. 17931 * The pt2pt protocol allows for discovery frames 17932 * to be received without a registered VPI. 17933 */ 17934 if (!(vport->fc_flag & FC_PT2PT) || 17935 (phba->link_state == LPFC_HBA_READY)) { 17936 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17937 return; 17938 } 17939 } 17940 17941 /* Handle the basic abort sequence (BA_ABTS) event */ 17942 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 17943 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 17944 return; 17945 } 17946 17947 /* Link this frame */ 17948 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 17949 if (!seq_dmabuf) { 17950 /* unable to add frame to vport - throw it out */ 17951 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17952 return; 17953 } 17954 /* If not last frame in sequence continue processing frames. */ 17955 if (!lpfc_seq_complete(seq_dmabuf)) 17956 return; 17957 17958 /* Send the complete sequence to the upper layer protocol */ 17959 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 17960 } 17961 17962 /** 17963 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 17964 * @phba: pointer to lpfc hba data structure. 17965 * 17966 * This routine is invoked to post rpi header templates to the 17967 * HBA consistent with the SLI-4 interface spec. This routine 17968 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 17969 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 17970 * 17971 * This routine does not require any locks. It's usage is expected 17972 * to be driver load or reset recovery when the driver is 17973 * sequential. 17974 * 17975 * Return codes 17976 * 0 - successful 17977 * -EIO - The mailbox failed to complete successfully. 17978 * When this error occurs, the driver is not guaranteed 17979 * to have any rpi regions posted to the device and 17980 * must either attempt to repost the regions or take a 17981 * fatal error. 17982 **/ 17983 int 17984 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 17985 { 17986 struct lpfc_rpi_hdr *rpi_page; 17987 uint32_t rc = 0; 17988 uint16_t lrpi = 0; 17989 17990 /* SLI4 ports that support extents do not require RPI headers. */ 17991 if (!phba->sli4_hba.rpi_hdrs_in_use) 17992 goto exit; 17993 if (phba->sli4_hba.extents_in_use) 17994 return -EIO; 17995 17996 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 17997 /* 17998 * Assign the rpi headers a physical rpi only if the driver 17999 * has not initialized those resources. A port reset only 18000 * needs the headers posted. 18001 */ 18002 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 18003 LPFC_RPI_RSRC_RDY) 18004 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 18005 18006 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 18007 if (rc != MBX_SUCCESS) { 18008 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18009 "2008 Error %d posting all rpi " 18010 "headers\n", rc); 18011 rc = -EIO; 18012 break; 18013 } 18014 } 18015 18016 exit: 18017 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 18018 LPFC_RPI_RSRC_RDY); 18019 return rc; 18020 } 18021 18022 /** 18023 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 18024 * @phba: pointer to lpfc hba data structure. 18025 * @rpi_page: pointer to the rpi memory region. 18026 * 18027 * This routine is invoked to post a single rpi header to the 18028 * HBA consistent with the SLI-4 interface spec. This memory region 18029 * maps up to 64 rpi context regions. 18030 * 18031 * Return codes 18032 * 0 - successful 18033 * -ENOMEM - No available memory 18034 * -EIO - The mailbox failed to complete successfully. 18035 **/ 18036 int 18037 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 18038 { 18039 LPFC_MBOXQ_t *mboxq; 18040 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 18041 uint32_t rc = 0; 18042 uint32_t shdr_status, shdr_add_status; 18043 union lpfc_sli4_cfg_shdr *shdr; 18044 18045 /* SLI4 ports that support extents do not require RPI headers. */ 18046 if (!phba->sli4_hba.rpi_hdrs_in_use) 18047 return rc; 18048 if (phba->sli4_hba.extents_in_use) 18049 return -EIO; 18050 18051 /* The port is notified of the header region via a mailbox command. */ 18052 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18053 if (!mboxq) { 18054 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18055 "2001 Unable to allocate memory for issuing " 18056 "SLI_CONFIG_SPECIAL mailbox command\n"); 18057 return -ENOMEM; 18058 } 18059 18060 /* Post all rpi memory regions to the port. */ 18061 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 18062 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18063 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 18064 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 18065 sizeof(struct lpfc_sli4_cfg_mhdr), 18066 LPFC_SLI4_MBX_EMBED); 18067 18068 18069 /* Post the physical rpi to the port for this rpi header. */ 18070 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 18071 rpi_page->start_rpi); 18072 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 18073 hdr_tmpl, rpi_page->page_count); 18074 18075 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 18076 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 18077 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 18078 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 18079 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18080 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18081 if (rc != MBX_TIMEOUT) 18082 mempool_free(mboxq, phba->mbox_mem_pool); 18083 if (shdr_status || shdr_add_status || rc) { 18084 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18085 "2514 POST_RPI_HDR mailbox failed with " 18086 "status x%x add_status x%x, mbx status x%x\n", 18087 shdr_status, shdr_add_status, rc); 18088 rc = -ENXIO; 18089 } else { 18090 /* 18091 * The next_rpi stores the next logical module-64 rpi value used 18092 * to post physical rpis in subsequent rpi postings. 18093 */ 18094 spin_lock_irq(&phba->hbalock); 18095 phba->sli4_hba.next_rpi = rpi_page->next_rpi; 18096 spin_unlock_irq(&phba->hbalock); 18097 } 18098 return rc; 18099 } 18100 18101 /** 18102 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 18103 * @phba: pointer to lpfc hba data structure. 18104 * 18105 * This routine is invoked to post rpi header templates to the 18106 * HBA consistent with the SLI-4 interface spec. This routine 18107 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 18108 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 18109 * 18110 * Returns 18111 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 18112 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 18113 **/ 18114 int 18115 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 18116 { 18117 unsigned long rpi; 18118 uint16_t max_rpi, rpi_limit; 18119 uint16_t rpi_remaining, lrpi = 0; 18120 struct lpfc_rpi_hdr *rpi_hdr; 18121 unsigned long iflag; 18122 18123 /* 18124 * Fetch the next logical rpi. Because this index is logical, 18125 * the driver starts at 0 each time. 18126 */ 18127 spin_lock_irqsave(&phba->hbalock, iflag); 18128 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 18129 rpi_limit = phba->sli4_hba.next_rpi; 18130 18131 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 18132 if (rpi >= rpi_limit) 18133 rpi = LPFC_RPI_ALLOC_ERROR; 18134 else { 18135 set_bit(rpi, phba->sli4_hba.rpi_bmask); 18136 phba->sli4_hba.max_cfg_param.rpi_used++; 18137 phba->sli4_hba.rpi_count++; 18138 } 18139 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 18140 "0001 rpi:%x max:%x lim:%x\n", 18141 (int) rpi, max_rpi, rpi_limit); 18142 18143 /* 18144 * Don't try to allocate more rpi header regions if the device limit 18145 * has been exhausted. 18146 */ 18147 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 18148 (phba->sli4_hba.rpi_count >= max_rpi)) { 18149 spin_unlock_irqrestore(&phba->hbalock, iflag); 18150 return rpi; 18151 } 18152 18153 /* 18154 * RPI header postings are not required for SLI4 ports capable of 18155 * extents. 18156 */ 18157 if (!phba->sli4_hba.rpi_hdrs_in_use) { 18158 spin_unlock_irqrestore(&phba->hbalock, iflag); 18159 return rpi; 18160 } 18161 18162 /* 18163 * If the driver is running low on rpi resources, allocate another 18164 * page now. Note that the next_rpi value is used because 18165 * it represents how many are actually in use whereas max_rpi notes 18166 * how many are supported max by the device. 18167 */ 18168 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 18169 spin_unlock_irqrestore(&phba->hbalock, iflag); 18170 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 18171 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 18172 if (!rpi_hdr) { 18173 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18174 "2002 Error Could not grow rpi " 18175 "count\n"); 18176 } else { 18177 lrpi = rpi_hdr->start_rpi; 18178 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 18179 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 18180 } 18181 } 18182 18183 return rpi; 18184 } 18185 18186 /** 18187 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18188 * @phba: pointer to lpfc hba data structure. 18189 * 18190 * This routine is invoked to release an rpi to the pool of 18191 * available rpis maintained by the driver. 18192 **/ 18193 static void 18194 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18195 { 18196 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 18197 phba->sli4_hba.rpi_count--; 18198 phba->sli4_hba.max_cfg_param.rpi_used--; 18199 } 18200 } 18201 18202 /** 18203 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18204 * @phba: pointer to lpfc hba data structure. 18205 * 18206 * This routine is invoked to release an rpi to the pool of 18207 * available rpis maintained by the driver. 18208 **/ 18209 void 18210 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18211 { 18212 spin_lock_irq(&phba->hbalock); 18213 __lpfc_sli4_free_rpi(phba, rpi); 18214 spin_unlock_irq(&phba->hbalock); 18215 } 18216 18217 /** 18218 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 18219 * @phba: pointer to lpfc hba data structure. 18220 * 18221 * This routine is invoked to remove the memory region that 18222 * provided rpi via a bitmask. 18223 **/ 18224 void 18225 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 18226 { 18227 kfree(phba->sli4_hba.rpi_bmask); 18228 kfree(phba->sli4_hba.rpi_ids); 18229 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 18230 } 18231 18232 /** 18233 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 18234 * @phba: pointer to lpfc hba data structure. 18235 * 18236 * This routine is invoked to remove the memory region that 18237 * provided rpi via a bitmask. 18238 **/ 18239 int 18240 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 18241 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 18242 { 18243 LPFC_MBOXQ_t *mboxq; 18244 struct lpfc_hba *phba = ndlp->phba; 18245 int rc; 18246 18247 /* The port is notified of the header region via a mailbox command. */ 18248 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18249 if (!mboxq) 18250 return -ENOMEM; 18251 18252 /* Post all rpi memory regions to the port. */ 18253 lpfc_resume_rpi(mboxq, ndlp); 18254 if (cmpl) { 18255 mboxq->mbox_cmpl = cmpl; 18256 mboxq->ctx_buf = arg; 18257 mboxq->ctx_ndlp = ndlp; 18258 } else 18259 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18260 mboxq->vport = ndlp->vport; 18261 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18262 if (rc == MBX_NOT_FINISHED) { 18263 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18264 "2010 Resume RPI Mailbox failed " 18265 "status %d, mbxStatus x%x\n", rc, 18266 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18267 mempool_free(mboxq, phba->mbox_mem_pool); 18268 return -EIO; 18269 } 18270 return 0; 18271 } 18272 18273 /** 18274 * lpfc_sli4_init_vpi - Initialize a vpi with the port 18275 * @vport: Pointer to the vport for which the vpi is being initialized 18276 * 18277 * This routine is invoked to activate a vpi with the port. 18278 * 18279 * Returns: 18280 * 0 success 18281 * -Evalue otherwise 18282 **/ 18283 int 18284 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 18285 { 18286 LPFC_MBOXQ_t *mboxq; 18287 int rc = 0; 18288 int retval = MBX_SUCCESS; 18289 uint32_t mbox_tmo; 18290 struct lpfc_hba *phba = vport->phba; 18291 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18292 if (!mboxq) 18293 return -ENOMEM; 18294 lpfc_init_vpi(phba, mboxq, vport->vpi); 18295 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 18296 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 18297 if (rc != MBX_SUCCESS) { 18298 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 18299 "2022 INIT VPI Mailbox failed " 18300 "status %d, mbxStatus x%x\n", rc, 18301 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18302 retval = -EIO; 18303 } 18304 if (rc != MBX_TIMEOUT) 18305 mempool_free(mboxq, vport->phba->mbox_mem_pool); 18306 18307 return retval; 18308 } 18309 18310 /** 18311 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 18312 * @phba: pointer to lpfc hba data structure. 18313 * @mboxq: Pointer to mailbox object. 18314 * 18315 * This routine is invoked to manually add a single FCF record. The caller 18316 * must pass a completely initialized FCF_Record. This routine takes 18317 * care of the nonembedded mailbox operations. 18318 **/ 18319 static void 18320 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 18321 { 18322 void *virt_addr; 18323 union lpfc_sli4_cfg_shdr *shdr; 18324 uint32_t shdr_status, shdr_add_status; 18325 18326 virt_addr = mboxq->sge_array->addr[0]; 18327 /* The IOCTL status is embedded in the mailbox subheader. */ 18328 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 18329 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18330 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18331 18332 if ((shdr_status || shdr_add_status) && 18333 (shdr_status != STATUS_FCF_IN_USE)) 18334 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18335 "2558 ADD_FCF_RECORD mailbox failed with " 18336 "status x%x add_status x%x\n", 18337 shdr_status, shdr_add_status); 18338 18339 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18340 } 18341 18342 /** 18343 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 18344 * @phba: pointer to lpfc hba data structure. 18345 * @fcf_record: pointer to the initialized fcf record to add. 18346 * 18347 * This routine is invoked to manually add a single FCF record. The caller 18348 * must pass a completely initialized FCF_Record. This routine takes 18349 * care of the nonembedded mailbox operations. 18350 **/ 18351 int 18352 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 18353 { 18354 int rc = 0; 18355 LPFC_MBOXQ_t *mboxq; 18356 uint8_t *bytep; 18357 void *virt_addr; 18358 struct lpfc_mbx_sge sge; 18359 uint32_t alloc_len, req_len; 18360 uint32_t fcfindex; 18361 18362 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18363 if (!mboxq) { 18364 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18365 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 18366 return -ENOMEM; 18367 } 18368 18369 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 18370 sizeof(uint32_t); 18371 18372 /* Allocate DMA memory and set up the non-embedded mailbox command */ 18373 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18374 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 18375 req_len, LPFC_SLI4_MBX_NEMBED); 18376 if (alloc_len < req_len) { 18377 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18378 "2523 Allocated DMA memory size (x%x) is " 18379 "less than the requested DMA memory " 18380 "size (x%x)\n", alloc_len, req_len); 18381 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18382 return -ENOMEM; 18383 } 18384 18385 /* 18386 * Get the first SGE entry from the non-embedded DMA memory. This 18387 * routine only uses a single SGE. 18388 */ 18389 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 18390 virt_addr = mboxq->sge_array->addr[0]; 18391 /* 18392 * Configure the FCF record for FCFI 0. This is the driver's 18393 * hardcoded default and gets used in nonFIP mode. 18394 */ 18395 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 18396 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 18397 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 18398 18399 /* 18400 * Copy the fcf_index and the FCF Record Data. The data starts after 18401 * the FCoE header plus word10. The data copy needs to be endian 18402 * correct. 18403 */ 18404 bytep += sizeof(uint32_t); 18405 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 18406 mboxq->vport = phba->pport; 18407 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 18408 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18409 if (rc == MBX_NOT_FINISHED) { 18410 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18411 "2515 ADD_FCF_RECORD mailbox failed with " 18412 "status 0x%x\n", rc); 18413 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18414 rc = -EIO; 18415 } else 18416 rc = 0; 18417 18418 return rc; 18419 } 18420 18421 /** 18422 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 18423 * @phba: pointer to lpfc hba data structure. 18424 * @fcf_record: pointer to the fcf record to write the default data. 18425 * @fcf_index: FCF table entry index. 18426 * 18427 * This routine is invoked to build the driver's default FCF record. The 18428 * values used are hardcoded. This routine handles memory initialization. 18429 * 18430 **/ 18431 void 18432 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 18433 struct fcf_record *fcf_record, 18434 uint16_t fcf_index) 18435 { 18436 memset(fcf_record, 0, sizeof(struct fcf_record)); 18437 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 18438 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 18439 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 18440 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 18441 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 18442 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 18443 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 18444 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 18445 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 18446 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 18447 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 18448 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 18449 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 18450 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 18451 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 18452 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 18453 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 18454 /* Set the VLAN bit map */ 18455 if (phba->valid_vlan) { 18456 fcf_record->vlan_bitmap[phba->vlan_id / 8] 18457 = 1 << (phba->vlan_id % 8); 18458 } 18459 } 18460 18461 /** 18462 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 18463 * @phba: pointer to lpfc hba data structure. 18464 * @fcf_index: FCF table entry offset. 18465 * 18466 * This routine is invoked to scan the entire FCF table by reading FCF 18467 * record and processing it one at a time starting from the @fcf_index 18468 * for initial FCF discovery or fast FCF failover rediscovery. 18469 * 18470 * Return 0 if the mailbox command is submitted successfully, none 0 18471 * otherwise. 18472 **/ 18473 int 18474 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18475 { 18476 int rc = 0, error; 18477 LPFC_MBOXQ_t *mboxq; 18478 18479 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 18480 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 18481 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18482 if (!mboxq) { 18483 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18484 "2000 Failed to allocate mbox for " 18485 "READ_FCF cmd\n"); 18486 error = -ENOMEM; 18487 goto fail_fcf_scan; 18488 } 18489 /* Construct the read FCF record mailbox command */ 18490 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18491 if (rc) { 18492 error = -EINVAL; 18493 goto fail_fcf_scan; 18494 } 18495 /* Issue the mailbox command asynchronously */ 18496 mboxq->vport = phba->pport; 18497 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 18498 18499 spin_lock_irq(&phba->hbalock); 18500 phba->hba_flag |= FCF_TS_INPROG; 18501 spin_unlock_irq(&phba->hbalock); 18502 18503 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18504 if (rc == MBX_NOT_FINISHED) 18505 error = -EIO; 18506 else { 18507 /* Reset eligible FCF count for new scan */ 18508 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 18509 phba->fcf.eligible_fcf_cnt = 0; 18510 error = 0; 18511 } 18512 fail_fcf_scan: 18513 if (error) { 18514 if (mboxq) 18515 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18516 /* FCF scan failed, clear FCF_TS_INPROG flag */ 18517 spin_lock_irq(&phba->hbalock); 18518 phba->hba_flag &= ~FCF_TS_INPROG; 18519 spin_unlock_irq(&phba->hbalock); 18520 } 18521 return error; 18522 } 18523 18524 /** 18525 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 18526 * @phba: pointer to lpfc hba data structure. 18527 * @fcf_index: FCF table entry offset. 18528 * 18529 * This routine is invoked to read an FCF record indicated by @fcf_index 18530 * and to use it for FLOGI roundrobin FCF failover. 18531 * 18532 * Return 0 if the mailbox command is submitted successfully, none 0 18533 * otherwise. 18534 **/ 18535 int 18536 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18537 { 18538 int rc = 0, error; 18539 LPFC_MBOXQ_t *mboxq; 18540 18541 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18542 if (!mboxq) { 18543 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18544 "2763 Failed to allocate mbox for " 18545 "READ_FCF cmd\n"); 18546 error = -ENOMEM; 18547 goto fail_fcf_read; 18548 } 18549 /* Construct the read FCF record mailbox command */ 18550 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18551 if (rc) { 18552 error = -EINVAL; 18553 goto fail_fcf_read; 18554 } 18555 /* Issue the mailbox command asynchronously */ 18556 mboxq->vport = phba->pport; 18557 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 18558 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18559 if (rc == MBX_NOT_FINISHED) 18560 error = -EIO; 18561 else 18562 error = 0; 18563 18564 fail_fcf_read: 18565 if (error && mboxq) 18566 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18567 return error; 18568 } 18569 18570 /** 18571 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 18572 * @phba: pointer to lpfc hba data structure. 18573 * @fcf_index: FCF table entry offset. 18574 * 18575 * This routine is invoked to read an FCF record indicated by @fcf_index to 18576 * determine whether it's eligible for FLOGI roundrobin failover list. 18577 * 18578 * Return 0 if the mailbox command is submitted successfully, none 0 18579 * otherwise. 18580 **/ 18581 int 18582 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18583 { 18584 int rc = 0, error; 18585 LPFC_MBOXQ_t *mboxq; 18586 18587 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18588 if (!mboxq) { 18589 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18590 "2758 Failed to allocate mbox for " 18591 "READ_FCF cmd\n"); 18592 error = -ENOMEM; 18593 goto fail_fcf_read; 18594 } 18595 /* Construct the read FCF record mailbox command */ 18596 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18597 if (rc) { 18598 error = -EINVAL; 18599 goto fail_fcf_read; 18600 } 18601 /* Issue the mailbox command asynchronously */ 18602 mboxq->vport = phba->pport; 18603 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 18604 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18605 if (rc == MBX_NOT_FINISHED) 18606 error = -EIO; 18607 else 18608 error = 0; 18609 18610 fail_fcf_read: 18611 if (error && mboxq) 18612 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18613 return error; 18614 } 18615 18616 /** 18617 * lpfc_check_next_fcf_pri_level 18618 * phba pointer to the lpfc_hba struct for this port. 18619 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 18620 * routine when the rr_bmask is empty. The FCF indecies are put into the 18621 * rr_bmask based on their priority level. Starting from the highest priority 18622 * to the lowest. The most likely FCF candidate will be in the highest 18623 * priority group. When this routine is called it searches the fcf_pri list for 18624 * next lowest priority group and repopulates the rr_bmask with only those 18625 * fcf_indexes. 18626 * returns: 18627 * 1=success 0=failure 18628 **/ 18629 static int 18630 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 18631 { 18632 uint16_t next_fcf_pri; 18633 uint16_t last_index; 18634 struct lpfc_fcf_pri *fcf_pri; 18635 int rc; 18636 int ret = 0; 18637 18638 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 18639 LPFC_SLI4_FCF_TBL_INDX_MAX); 18640 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18641 "3060 Last IDX %d\n", last_index); 18642 18643 /* Verify the priority list has 2 or more entries */ 18644 spin_lock_irq(&phba->hbalock); 18645 if (list_empty(&phba->fcf.fcf_pri_list) || 18646 list_is_singular(&phba->fcf.fcf_pri_list)) { 18647 spin_unlock_irq(&phba->hbalock); 18648 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18649 "3061 Last IDX %d\n", last_index); 18650 return 0; /* Empty rr list */ 18651 } 18652 spin_unlock_irq(&phba->hbalock); 18653 18654 next_fcf_pri = 0; 18655 /* 18656 * Clear the rr_bmask and set all of the bits that are at this 18657 * priority. 18658 */ 18659 memset(phba->fcf.fcf_rr_bmask, 0, 18660 sizeof(*phba->fcf.fcf_rr_bmask)); 18661 spin_lock_irq(&phba->hbalock); 18662 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18663 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 18664 continue; 18665 /* 18666 * the 1st priority that has not FLOGI failed 18667 * will be the highest. 18668 */ 18669 if (!next_fcf_pri) 18670 next_fcf_pri = fcf_pri->fcf_rec.priority; 18671 spin_unlock_irq(&phba->hbalock); 18672 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18673 rc = lpfc_sli4_fcf_rr_index_set(phba, 18674 fcf_pri->fcf_rec.fcf_index); 18675 if (rc) 18676 return 0; 18677 } 18678 spin_lock_irq(&phba->hbalock); 18679 } 18680 /* 18681 * if next_fcf_pri was not set above and the list is not empty then 18682 * we have failed flogis on all of them. So reset flogi failed 18683 * and start at the beginning. 18684 */ 18685 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 18686 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18687 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 18688 /* 18689 * the 1st priority that has not FLOGI failed 18690 * will be the highest. 18691 */ 18692 if (!next_fcf_pri) 18693 next_fcf_pri = fcf_pri->fcf_rec.priority; 18694 spin_unlock_irq(&phba->hbalock); 18695 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18696 rc = lpfc_sli4_fcf_rr_index_set(phba, 18697 fcf_pri->fcf_rec.fcf_index); 18698 if (rc) 18699 return 0; 18700 } 18701 spin_lock_irq(&phba->hbalock); 18702 } 18703 } else 18704 ret = 1; 18705 spin_unlock_irq(&phba->hbalock); 18706 18707 return ret; 18708 } 18709 /** 18710 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 18711 * @phba: pointer to lpfc hba data structure. 18712 * 18713 * This routine is to get the next eligible FCF record index in a round 18714 * robin fashion. If the next eligible FCF record index equals to the 18715 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 18716 * shall be returned, otherwise, the next eligible FCF record's index 18717 * shall be returned. 18718 **/ 18719 uint16_t 18720 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 18721 { 18722 uint16_t next_fcf_index; 18723 18724 initial_priority: 18725 /* Search start from next bit of currently registered FCF index */ 18726 next_fcf_index = phba->fcf.current_rec.fcf_indx; 18727 18728 next_priority: 18729 /* Determine the next fcf index to check */ 18730 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 18731 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18732 LPFC_SLI4_FCF_TBL_INDX_MAX, 18733 next_fcf_index); 18734 18735 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 18736 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18737 /* 18738 * If we have wrapped then we need to clear the bits that 18739 * have been tested so that we can detect when we should 18740 * change the priority level. 18741 */ 18742 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18743 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 18744 } 18745 18746 18747 /* Check roundrobin failover list empty condition */ 18748 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 18749 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 18750 /* 18751 * If next fcf index is not found check if there are lower 18752 * Priority level fcf's in the fcf_priority list. 18753 * Set up the rr_bmask with all of the avaiable fcf bits 18754 * at that level and continue the selection process. 18755 */ 18756 if (lpfc_check_next_fcf_pri_level(phba)) 18757 goto initial_priority; 18758 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 18759 "2844 No roundrobin failover FCF available\n"); 18760 18761 return LPFC_FCOE_FCF_NEXT_NONE; 18762 } 18763 18764 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 18765 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 18766 LPFC_FCF_FLOGI_FAILED) { 18767 if (list_is_singular(&phba->fcf.fcf_pri_list)) 18768 return LPFC_FCOE_FCF_NEXT_NONE; 18769 18770 goto next_priority; 18771 } 18772 18773 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18774 "2845 Get next roundrobin failover FCF (x%x)\n", 18775 next_fcf_index); 18776 18777 return next_fcf_index; 18778 } 18779 18780 /** 18781 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 18782 * @phba: pointer to lpfc hba data structure. 18783 * 18784 * This routine sets the FCF record index in to the eligible bmask for 18785 * roundrobin failover search. It checks to make sure that the index 18786 * does not go beyond the range of the driver allocated bmask dimension 18787 * before setting the bit. 18788 * 18789 * Returns 0 if the index bit successfully set, otherwise, it returns 18790 * -EINVAL. 18791 **/ 18792 int 18793 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 18794 { 18795 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18796 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18797 "2610 FCF (x%x) reached driver's book " 18798 "keeping dimension:x%x\n", 18799 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18800 return -EINVAL; 18801 } 18802 /* Set the eligible FCF record index bmask */ 18803 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18804 18805 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18806 "2790 Set FCF (x%x) to roundrobin FCF failover " 18807 "bmask\n", fcf_index); 18808 18809 return 0; 18810 } 18811 18812 /** 18813 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 18814 * @phba: pointer to lpfc hba data structure. 18815 * 18816 * This routine clears the FCF record index from the eligible bmask for 18817 * roundrobin failover search. It checks to make sure that the index 18818 * does not go beyond the range of the driver allocated bmask dimension 18819 * before clearing the bit. 18820 **/ 18821 void 18822 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 18823 { 18824 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 18825 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18826 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18827 "2762 FCF (x%x) reached driver's book " 18828 "keeping dimension:x%x\n", 18829 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18830 return; 18831 } 18832 /* Clear the eligible FCF record index bmask */ 18833 spin_lock_irq(&phba->hbalock); 18834 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 18835 list) { 18836 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 18837 list_del_init(&fcf_pri->list); 18838 break; 18839 } 18840 } 18841 spin_unlock_irq(&phba->hbalock); 18842 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18843 18844 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18845 "2791 Clear FCF (x%x) from roundrobin failover " 18846 "bmask\n", fcf_index); 18847 } 18848 18849 /** 18850 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 18851 * @phba: pointer to lpfc hba data structure. 18852 * 18853 * This routine is the completion routine for the rediscover FCF table mailbox 18854 * command. If the mailbox command returned failure, it will try to stop the 18855 * FCF rediscover wait timer. 18856 **/ 18857 static void 18858 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 18859 { 18860 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18861 uint32_t shdr_status, shdr_add_status; 18862 18863 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18864 18865 shdr_status = bf_get(lpfc_mbox_hdr_status, 18866 &redisc_fcf->header.cfg_shdr.response); 18867 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 18868 &redisc_fcf->header.cfg_shdr.response); 18869 if (shdr_status || shdr_add_status) { 18870 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18871 "2746 Requesting for FCF rediscovery failed " 18872 "status x%x add_status x%x\n", 18873 shdr_status, shdr_add_status); 18874 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 18875 spin_lock_irq(&phba->hbalock); 18876 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 18877 spin_unlock_irq(&phba->hbalock); 18878 /* 18879 * CVL event triggered FCF rediscover request failed, 18880 * last resort to re-try current registered FCF entry. 18881 */ 18882 lpfc_retry_pport_discovery(phba); 18883 } else { 18884 spin_lock_irq(&phba->hbalock); 18885 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 18886 spin_unlock_irq(&phba->hbalock); 18887 /* 18888 * DEAD FCF event triggered FCF rediscover request 18889 * failed, last resort to fail over as a link down 18890 * to FCF registration. 18891 */ 18892 lpfc_sli4_fcf_dead_failthrough(phba); 18893 } 18894 } else { 18895 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18896 "2775 Start FCF rediscover quiescent timer\n"); 18897 /* 18898 * Start FCF rediscovery wait timer for pending FCF 18899 * before rescan FCF record table. 18900 */ 18901 lpfc_fcf_redisc_wait_start_timer(phba); 18902 } 18903 18904 mempool_free(mbox, phba->mbox_mem_pool); 18905 } 18906 18907 /** 18908 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 18909 * @phba: pointer to lpfc hba data structure. 18910 * 18911 * This routine is invoked to request for rediscovery of the entire FCF table 18912 * by the port. 18913 **/ 18914 int 18915 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 18916 { 18917 LPFC_MBOXQ_t *mbox; 18918 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18919 int rc, length; 18920 18921 /* Cancel retry delay timers to all vports before FCF rediscover */ 18922 lpfc_cancel_all_vport_retry_delay_timer(phba); 18923 18924 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18925 if (!mbox) { 18926 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18927 "2745 Failed to allocate mbox for " 18928 "requesting FCF rediscover.\n"); 18929 return -ENOMEM; 18930 } 18931 18932 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 18933 sizeof(struct lpfc_sli4_cfg_mhdr)); 18934 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 18935 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 18936 length, LPFC_SLI4_MBX_EMBED); 18937 18938 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18939 /* Set count to 0 for invalidating the entire FCF database */ 18940 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 18941 18942 /* Issue the mailbox command asynchronously */ 18943 mbox->vport = phba->pport; 18944 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 18945 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 18946 18947 if (rc == MBX_NOT_FINISHED) { 18948 mempool_free(mbox, phba->mbox_mem_pool); 18949 return -EIO; 18950 } 18951 return 0; 18952 } 18953 18954 /** 18955 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 18956 * @phba: pointer to lpfc hba data structure. 18957 * 18958 * This function is the failover routine as a last resort to the FCF DEAD 18959 * event when driver failed to perform fast FCF failover. 18960 **/ 18961 void 18962 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 18963 { 18964 uint32_t link_state; 18965 18966 /* 18967 * Last resort as FCF DEAD event failover will treat this as 18968 * a link down, but save the link state because we don't want 18969 * it to be changed to Link Down unless it is already down. 18970 */ 18971 link_state = phba->link_state; 18972 lpfc_linkdown(phba); 18973 phba->link_state = link_state; 18974 18975 /* Unregister FCF if no devices connected to it */ 18976 lpfc_unregister_unused_fcf(phba); 18977 } 18978 18979 /** 18980 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 18981 * @phba: pointer to lpfc hba data structure. 18982 * @rgn23_data: pointer to configure region 23 data. 18983 * 18984 * This function gets SLI3 port configure region 23 data through memory dump 18985 * mailbox command. When it successfully retrieves data, the size of the data 18986 * will be returned, otherwise, 0 will be returned. 18987 **/ 18988 static uint32_t 18989 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 18990 { 18991 LPFC_MBOXQ_t *pmb = NULL; 18992 MAILBOX_t *mb; 18993 uint32_t offset = 0; 18994 int rc; 18995 18996 if (!rgn23_data) 18997 return 0; 18998 18999 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19000 if (!pmb) { 19001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19002 "2600 failed to allocate mailbox memory\n"); 19003 return 0; 19004 } 19005 mb = &pmb->u.mb; 19006 19007 do { 19008 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 19009 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 19010 19011 if (rc != MBX_SUCCESS) { 19012 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19013 "2601 failed to read config " 19014 "region 23, rc 0x%x Status 0x%x\n", 19015 rc, mb->mbxStatus); 19016 mb->un.varDmp.word_cnt = 0; 19017 } 19018 /* 19019 * dump mem may return a zero when finished or we got a 19020 * mailbox error, either way we are done. 19021 */ 19022 if (mb->un.varDmp.word_cnt == 0) 19023 break; 19024 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 19025 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 19026 19027 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 19028 rgn23_data + offset, 19029 mb->un.varDmp.word_cnt); 19030 offset += mb->un.varDmp.word_cnt; 19031 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 19032 19033 mempool_free(pmb, phba->mbox_mem_pool); 19034 return offset; 19035 } 19036 19037 /** 19038 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 19039 * @phba: pointer to lpfc hba data structure. 19040 * @rgn23_data: pointer to configure region 23 data. 19041 * 19042 * This function gets SLI4 port configure region 23 data through memory dump 19043 * mailbox command. When it successfully retrieves data, the size of the data 19044 * will be returned, otherwise, 0 will be returned. 19045 **/ 19046 static uint32_t 19047 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 19048 { 19049 LPFC_MBOXQ_t *mboxq = NULL; 19050 struct lpfc_dmabuf *mp = NULL; 19051 struct lpfc_mqe *mqe; 19052 uint32_t data_length = 0; 19053 int rc; 19054 19055 if (!rgn23_data) 19056 return 0; 19057 19058 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19059 if (!mboxq) { 19060 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19061 "3105 failed to allocate mailbox memory\n"); 19062 return 0; 19063 } 19064 19065 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 19066 goto out; 19067 mqe = &mboxq->u.mqe; 19068 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 19069 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 19070 if (rc) 19071 goto out; 19072 data_length = mqe->un.mb_words[5]; 19073 if (data_length == 0) 19074 goto out; 19075 if (data_length > DMP_RGN23_SIZE) { 19076 data_length = 0; 19077 goto out; 19078 } 19079 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 19080 out: 19081 mempool_free(mboxq, phba->mbox_mem_pool); 19082 if (mp) { 19083 lpfc_mbuf_free(phba, mp->virt, mp->phys); 19084 kfree(mp); 19085 } 19086 return data_length; 19087 } 19088 19089 /** 19090 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 19091 * @phba: pointer to lpfc hba data structure. 19092 * 19093 * This function read region 23 and parse TLV for port status to 19094 * decide if the user disaled the port. If the TLV indicates the 19095 * port is disabled, the hba_flag is set accordingly. 19096 **/ 19097 void 19098 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 19099 { 19100 uint8_t *rgn23_data = NULL; 19101 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 19102 uint32_t offset = 0; 19103 19104 /* Get adapter Region 23 data */ 19105 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 19106 if (!rgn23_data) 19107 goto out; 19108 19109 if (phba->sli_rev < LPFC_SLI_REV4) 19110 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 19111 else { 19112 if_type = bf_get(lpfc_sli_intf_if_type, 19113 &phba->sli4_hba.sli_intf); 19114 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 19115 goto out; 19116 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 19117 } 19118 19119 if (!data_size) 19120 goto out; 19121 19122 /* Check the region signature first */ 19123 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 19124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19125 "2619 Config region 23 has bad signature\n"); 19126 goto out; 19127 } 19128 offset += 4; 19129 19130 /* Check the data structure version */ 19131 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 19132 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19133 "2620 Config region 23 has bad version\n"); 19134 goto out; 19135 } 19136 offset += 4; 19137 19138 /* Parse TLV entries in the region */ 19139 while (offset < data_size) { 19140 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 19141 break; 19142 /* 19143 * If the TLV is not driver specific TLV or driver id is 19144 * not linux driver id, skip the record. 19145 */ 19146 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 19147 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 19148 (rgn23_data[offset + 3] != 0)) { 19149 offset += rgn23_data[offset + 1] * 4 + 4; 19150 continue; 19151 } 19152 19153 /* Driver found a driver specific TLV in the config region */ 19154 sub_tlv_len = rgn23_data[offset + 1] * 4; 19155 offset += 4; 19156 tlv_offset = 0; 19157 19158 /* 19159 * Search for configured port state sub-TLV. 19160 */ 19161 while ((offset < data_size) && 19162 (tlv_offset < sub_tlv_len)) { 19163 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 19164 offset += 4; 19165 tlv_offset += 4; 19166 break; 19167 } 19168 if (rgn23_data[offset] != PORT_STE_TYPE) { 19169 offset += rgn23_data[offset + 1] * 4 + 4; 19170 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 19171 continue; 19172 } 19173 19174 /* This HBA contains PORT_STE configured */ 19175 if (!rgn23_data[offset + 2]) 19176 phba->hba_flag |= LINK_DISABLED; 19177 19178 goto out; 19179 } 19180 } 19181 19182 out: 19183 kfree(rgn23_data); 19184 return; 19185 } 19186 19187 /** 19188 * lpfc_wr_object - write an object to the firmware 19189 * @phba: HBA structure that indicates port to create a queue on. 19190 * @dmabuf_list: list of dmabufs to write to the port. 19191 * @size: the total byte value of the objects to write to the port. 19192 * @offset: the current offset to be used to start the transfer. 19193 * 19194 * This routine will create a wr_object mailbox command to send to the port. 19195 * the mailbox command will be constructed using the dma buffers described in 19196 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 19197 * BDEs that the imbedded mailbox can support. The @offset variable will be 19198 * used to indicate the starting offset of the transfer and will also return 19199 * the offset after the write object mailbox has completed. @size is used to 19200 * determine the end of the object and whether the eof bit should be set. 19201 * 19202 * Return 0 is successful and offset will contain the the new offset to use 19203 * for the next write. 19204 * Return negative value for error cases. 19205 **/ 19206 int 19207 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 19208 uint32_t size, uint32_t *offset) 19209 { 19210 struct lpfc_mbx_wr_object *wr_object; 19211 LPFC_MBOXQ_t *mbox; 19212 int rc = 0, i = 0; 19213 uint32_t shdr_status, shdr_add_status, shdr_change_status; 19214 uint32_t mbox_tmo; 19215 struct lpfc_dmabuf *dmabuf; 19216 uint32_t written = 0; 19217 bool check_change_status = false; 19218 19219 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19220 if (!mbox) 19221 return -ENOMEM; 19222 19223 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 19224 LPFC_MBOX_OPCODE_WRITE_OBJECT, 19225 sizeof(struct lpfc_mbx_wr_object) - 19226 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 19227 19228 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 19229 wr_object->u.request.write_offset = *offset; 19230 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 19231 wr_object->u.request.object_name[0] = 19232 cpu_to_le32(wr_object->u.request.object_name[0]); 19233 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 19234 list_for_each_entry(dmabuf, dmabuf_list, list) { 19235 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 19236 break; 19237 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 19238 wr_object->u.request.bde[i].addrHigh = 19239 putPaddrHigh(dmabuf->phys); 19240 if (written + SLI4_PAGE_SIZE >= size) { 19241 wr_object->u.request.bde[i].tus.f.bdeSize = 19242 (size - written); 19243 written += (size - written); 19244 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 19245 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1); 19246 check_change_status = true; 19247 } else { 19248 wr_object->u.request.bde[i].tus.f.bdeSize = 19249 SLI4_PAGE_SIZE; 19250 written += SLI4_PAGE_SIZE; 19251 } 19252 i++; 19253 } 19254 wr_object->u.request.bde_count = i; 19255 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 19256 if (!phba->sli4_hba.intr_enable) 19257 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 19258 else { 19259 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 19260 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 19261 } 19262 /* The IOCTL status is embedded in the mailbox subheader. */ 19263 shdr_status = bf_get(lpfc_mbox_hdr_status, 19264 &wr_object->header.cfg_shdr.response); 19265 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 19266 &wr_object->header.cfg_shdr.response); 19267 if (check_change_status) { 19268 shdr_change_status = bf_get(lpfc_wr_object_change_status, 19269 &wr_object->u.response); 19270 switch (shdr_change_status) { 19271 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): 19272 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19273 "3198 Firmware write complete: System " 19274 "reboot required to instantiate\n"); 19275 break; 19276 case (LPFC_CHANGE_STATUS_FW_RESET): 19277 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19278 "3199 Firmware write complete: Firmware" 19279 " reset required to instantiate\n"); 19280 break; 19281 case (LPFC_CHANGE_STATUS_PORT_MIGRATION): 19282 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19283 "3200 Firmware write complete: Port " 19284 "Migration or PCI Reset required to " 19285 "instantiate\n"); 19286 break; 19287 case (LPFC_CHANGE_STATUS_PCI_RESET): 19288 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19289 "3201 Firmware write complete: PCI " 19290 "Reset required to instantiate\n"); 19291 break; 19292 default: 19293 break; 19294 } 19295 } 19296 if (rc != MBX_TIMEOUT) 19297 mempool_free(mbox, phba->mbox_mem_pool); 19298 if (shdr_status || shdr_add_status || rc) { 19299 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19300 "3025 Write Object mailbox failed with " 19301 "status x%x add_status x%x, mbx status x%x\n", 19302 shdr_status, shdr_add_status, rc); 19303 rc = -ENXIO; 19304 *offset = shdr_add_status; 19305 } else 19306 *offset += wr_object->u.response.actual_write_length; 19307 return rc; 19308 } 19309 19310 /** 19311 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 19312 * @vport: pointer to vport data structure. 19313 * 19314 * This function iterate through the mailboxq and clean up all REG_LOGIN 19315 * and REG_VPI mailbox commands associated with the vport. This function 19316 * is called when driver want to restart discovery of the vport due to 19317 * a Clear Virtual Link event. 19318 **/ 19319 void 19320 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 19321 { 19322 struct lpfc_hba *phba = vport->phba; 19323 LPFC_MBOXQ_t *mb, *nextmb; 19324 struct lpfc_dmabuf *mp; 19325 struct lpfc_nodelist *ndlp; 19326 struct lpfc_nodelist *act_mbx_ndlp = NULL; 19327 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 19328 LIST_HEAD(mbox_cmd_list); 19329 uint8_t restart_loop; 19330 19331 /* Clean up internally queued mailbox commands with the vport */ 19332 spin_lock_irq(&phba->hbalock); 19333 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 19334 if (mb->vport != vport) 19335 continue; 19336 19337 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19338 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19339 continue; 19340 19341 list_del(&mb->list); 19342 list_add_tail(&mb->list, &mbox_cmd_list); 19343 } 19344 /* Clean up active mailbox command with the vport */ 19345 mb = phba->sli.mbox_active; 19346 if (mb && (mb->vport == vport)) { 19347 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 19348 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 19349 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19350 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19351 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19352 /* Put reference count for delayed processing */ 19353 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 19354 /* Unregister the RPI when mailbox complete */ 19355 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19356 } 19357 } 19358 /* Cleanup any mailbox completions which are not yet processed */ 19359 do { 19360 restart_loop = 0; 19361 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 19362 /* 19363 * If this mailox is already processed or it is 19364 * for another vport ignore it. 19365 */ 19366 if ((mb->vport != vport) || 19367 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 19368 continue; 19369 19370 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19371 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19372 continue; 19373 19374 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19375 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19376 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19377 /* Unregister the RPI when mailbox complete */ 19378 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19379 restart_loop = 1; 19380 spin_unlock_irq(&phba->hbalock); 19381 spin_lock(shost->host_lock); 19382 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19383 spin_unlock(shost->host_lock); 19384 spin_lock_irq(&phba->hbalock); 19385 break; 19386 } 19387 } 19388 } while (restart_loop); 19389 19390 spin_unlock_irq(&phba->hbalock); 19391 19392 /* Release the cleaned-up mailbox commands */ 19393 while (!list_empty(&mbox_cmd_list)) { 19394 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 19395 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19396 mp = (struct lpfc_dmabuf *)(mb->ctx_buf); 19397 if (mp) { 19398 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 19399 kfree(mp); 19400 } 19401 mb->ctx_buf = NULL; 19402 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19403 mb->ctx_ndlp = NULL; 19404 if (ndlp) { 19405 spin_lock(shost->host_lock); 19406 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19407 spin_unlock(shost->host_lock); 19408 lpfc_nlp_put(ndlp); 19409 } 19410 } 19411 mempool_free(mb, phba->mbox_mem_pool); 19412 } 19413 19414 /* Release the ndlp with the cleaned-up active mailbox command */ 19415 if (act_mbx_ndlp) { 19416 spin_lock(shost->host_lock); 19417 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19418 spin_unlock(shost->host_lock); 19419 lpfc_nlp_put(act_mbx_ndlp); 19420 } 19421 } 19422 19423 /** 19424 * lpfc_drain_txq - Drain the txq 19425 * @phba: Pointer to HBA context object. 19426 * 19427 * This function attempt to submit IOCBs on the txq 19428 * to the adapter. For SLI4 adapters, the txq contains 19429 * ELS IOCBs that have been deferred because the there 19430 * are no SGLs. This congestion can occur with large 19431 * vport counts during node discovery. 19432 **/ 19433 19434 uint32_t 19435 lpfc_drain_txq(struct lpfc_hba *phba) 19436 { 19437 LIST_HEAD(completions); 19438 struct lpfc_sli_ring *pring; 19439 struct lpfc_iocbq *piocbq = NULL; 19440 unsigned long iflags = 0; 19441 char *fail_msg = NULL; 19442 struct lpfc_sglq *sglq; 19443 union lpfc_wqe128 wqe; 19444 uint32_t txq_cnt = 0; 19445 struct lpfc_queue *wq; 19446 19447 if (phba->link_flag & LS_MDS_LOOPBACK) { 19448 /* MDS WQE are posted only to first WQ*/ 19449 wq = phba->sli4_hba.hdwq[0].fcp_wq; 19450 if (unlikely(!wq)) 19451 return 0; 19452 pring = wq->pring; 19453 } else { 19454 wq = phba->sli4_hba.els_wq; 19455 if (unlikely(!wq)) 19456 return 0; 19457 pring = lpfc_phba_elsring(phba); 19458 } 19459 19460 if (unlikely(!pring) || list_empty(&pring->txq)) 19461 return 0; 19462 19463 spin_lock_irqsave(&pring->ring_lock, iflags); 19464 list_for_each_entry(piocbq, &pring->txq, list) { 19465 txq_cnt++; 19466 } 19467 19468 if (txq_cnt > pring->txq_max) 19469 pring->txq_max = txq_cnt; 19470 19471 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19472 19473 while (!list_empty(&pring->txq)) { 19474 spin_lock_irqsave(&pring->ring_lock, iflags); 19475 19476 piocbq = lpfc_sli_ringtx_get(phba, pring); 19477 if (!piocbq) { 19478 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19479 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19480 "2823 txq empty and txq_cnt is %d\n ", 19481 txq_cnt); 19482 break; 19483 } 19484 sglq = __lpfc_sli_get_els_sglq(phba, piocbq); 19485 if (!sglq) { 19486 __lpfc_sli_ringtx_put(phba, pring, piocbq); 19487 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19488 break; 19489 } 19490 txq_cnt--; 19491 19492 /* The xri and iocb resources secured, 19493 * attempt to issue request 19494 */ 19495 piocbq->sli4_lxritag = sglq->sli4_lxritag; 19496 piocbq->sli4_xritag = sglq->sli4_xritag; 19497 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 19498 fail_msg = "to convert bpl to sgl"; 19499 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 19500 fail_msg = "to convert iocb to wqe"; 19501 else if (lpfc_sli4_wq_put(wq, &wqe)) 19502 fail_msg = " - Wq is full"; 19503 else 19504 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 19505 19506 if (fail_msg) { 19507 /* Failed means we can't issue and need to cancel */ 19508 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19509 "2822 IOCB failed %s iotag 0x%x " 19510 "xri 0x%x\n", 19511 fail_msg, 19512 piocbq->iotag, piocbq->sli4_xritag); 19513 list_add_tail(&piocbq->list, &completions); 19514 } 19515 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19516 } 19517 19518 /* Cancel all the IOCBs that cannot be issued */ 19519 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 19520 IOERR_SLI_ABORTED); 19521 19522 return txq_cnt; 19523 } 19524 19525 /** 19526 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. 19527 * @phba: Pointer to HBA context object. 19528 * @pwqe: Pointer to command WQE. 19529 * @sglq: Pointer to the scatter gather queue object. 19530 * 19531 * This routine converts the bpl or bde that is in the WQE 19532 * to a sgl list for the sli4 hardware. The physical address 19533 * of the bpl/bde is converted back to a virtual address. 19534 * If the WQE contains a BPL then the list of BDE's is 19535 * converted to sli4_sge's. If the WQE contains a single 19536 * BDE then it is converted to a single sli_sge. 19537 * The WQE is still in cpu endianness so the contents of 19538 * the bpl can be used without byte swapping. 19539 * 19540 * Returns valid XRI = Success, NO_XRI = Failure. 19541 */ 19542 static uint16_t 19543 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, 19544 struct lpfc_sglq *sglq) 19545 { 19546 uint16_t xritag = NO_XRI; 19547 struct ulp_bde64 *bpl = NULL; 19548 struct ulp_bde64 bde; 19549 struct sli4_sge *sgl = NULL; 19550 struct lpfc_dmabuf *dmabuf; 19551 union lpfc_wqe128 *wqe; 19552 int numBdes = 0; 19553 int i = 0; 19554 uint32_t offset = 0; /* accumulated offset in the sg request list */ 19555 int inbound = 0; /* number of sg reply entries inbound from firmware */ 19556 uint32_t cmd; 19557 19558 if (!pwqeq || !sglq) 19559 return xritag; 19560 19561 sgl = (struct sli4_sge *)sglq->sgl; 19562 wqe = &pwqeq->wqe; 19563 pwqeq->iocb.ulpIoTag = pwqeq->iotag; 19564 19565 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); 19566 if (cmd == CMD_XMIT_BLS_RSP64_WQE) 19567 return sglq->sli4_xritag; 19568 numBdes = pwqeq->rsvd2; 19569 if (numBdes) { 19570 /* The addrHigh and addrLow fields within the WQE 19571 * have not been byteswapped yet so there is no 19572 * need to swap them back. 19573 */ 19574 if (pwqeq->context3) 19575 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3; 19576 else 19577 return xritag; 19578 19579 bpl = (struct ulp_bde64 *)dmabuf->virt; 19580 if (!bpl) 19581 return xritag; 19582 19583 for (i = 0; i < numBdes; i++) { 19584 /* Should already be byte swapped. */ 19585 sgl->addr_hi = bpl->addrHigh; 19586 sgl->addr_lo = bpl->addrLow; 19587 19588 sgl->word2 = le32_to_cpu(sgl->word2); 19589 if ((i+1) == numBdes) 19590 bf_set(lpfc_sli4_sge_last, sgl, 1); 19591 else 19592 bf_set(lpfc_sli4_sge_last, sgl, 0); 19593 /* swap the size field back to the cpu so we 19594 * can assign it to the sgl. 19595 */ 19596 bde.tus.w = le32_to_cpu(bpl->tus.w); 19597 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 19598 /* The offsets in the sgl need to be accumulated 19599 * separately for the request and reply lists. 19600 * The request is always first, the reply follows. 19601 */ 19602 switch (cmd) { 19603 case CMD_GEN_REQUEST64_WQE: 19604 /* add up the reply sg entries */ 19605 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 19606 inbound++; 19607 /* first inbound? reset the offset */ 19608 if (inbound == 1) 19609 offset = 0; 19610 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19611 bf_set(lpfc_sli4_sge_type, sgl, 19612 LPFC_SGE_TYPE_DATA); 19613 offset += bde.tus.f.bdeSize; 19614 break; 19615 case CMD_FCP_TRSP64_WQE: 19616 bf_set(lpfc_sli4_sge_offset, sgl, 0); 19617 bf_set(lpfc_sli4_sge_type, sgl, 19618 LPFC_SGE_TYPE_DATA); 19619 break; 19620 case CMD_FCP_TSEND64_WQE: 19621 case CMD_FCP_TRECEIVE64_WQE: 19622 bf_set(lpfc_sli4_sge_type, sgl, 19623 bpl->tus.f.bdeFlags); 19624 if (i < 3) 19625 offset = 0; 19626 else 19627 offset += bde.tus.f.bdeSize; 19628 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19629 break; 19630 } 19631 sgl->word2 = cpu_to_le32(sgl->word2); 19632 bpl++; 19633 sgl++; 19634 } 19635 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { 19636 /* The addrHigh and addrLow fields of the BDE have not 19637 * been byteswapped yet so they need to be swapped 19638 * before putting them in the sgl. 19639 */ 19640 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); 19641 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); 19642 sgl->word2 = le32_to_cpu(sgl->word2); 19643 bf_set(lpfc_sli4_sge_last, sgl, 1); 19644 sgl->word2 = cpu_to_le32(sgl->word2); 19645 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); 19646 } 19647 return sglq->sli4_xritag; 19648 } 19649 19650 /** 19651 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) 19652 * @phba: Pointer to HBA context object. 19653 * @ring_number: Base sli ring number 19654 * @pwqe: Pointer to command WQE. 19655 **/ 19656 int 19657 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 19658 struct lpfc_iocbq *pwqe) 19659 { 19660 union lpfc_wqe128 *wqe = &pwqe->wqe; 19661 struct lpfc_nvmet_rcv_ctx *ctxp; 19662 struct lpfc_queue *wq; 19663 struct lpfc_sglq *sglq; 19664 struct lpfc_sli_ring *pring; 19665 unsigned long iflags; 19666 uint32_t ret = 0; 19667 19668 /* NVME_LS and NVME_LS ABTS requests. */ 19669 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { 19670 pring = phba->sli4_hba.nvmels_wq->pring; 19671 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19672 qp, wq_access); 19673 sglq = __lpfc_sli_get_els_sglq(phba, pwqe); 19674 if (!sglq) { 19675 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19676 return WQE_BUSY; 19677 } 19678 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19679 pwqe->sli4_xritag = sglq->sli4_xritag; 19680 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { 19681 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19682 return WQE_ERROR; 19683 } 19684 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19685 pwqe->sli4_xritag); 19686 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); 19687 if (ret) { 19688 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19689 return ret; 19690 } 19691 19692 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19693 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19694 return 0; 19695 } 19696 19697 /* NVME_FCREQ and NVME_ABTS requests */ 19698 if (pwqe->iocb_flag & LPFC_IO_NVME) { 19699 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19700 wq = qp->nvme_wq; 19701 pring = wq->pring; 19702 19703 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); 19704 19705 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19706 qp, wq_access); 19707 ret = lpfc_sli4_wq_put(wq, wqe); 19708 if (ret) { 19709 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19710 return ret; 19711 } 19712 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19713 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19714 return 0; 19715 } 19716 19717 /* NVMET requests */ 19718 if (pwqe->iocb_flag & LPFC_IO_NVMET) { 19719 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19720 wq = qp->nvme_wq; 19721 pring = wq->pring; 19722 19723 ctxp = pwqe->context2; 19724 sglq = ctxp->ctxbuf->sglq; 19725 if (pwqe->sli4_xritag == NO_XRI) { 19726 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19727 pwqe->sli4_xritag = sglq->sli4_xritag; 19728 } 19729 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19730 pwqe->sli4_xritag); 19731 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); 19732 19733 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19734 qp, wq_access); 19735 ret = lpfc_sli4_wq_put(wq, wqe); 19736 if (ret) { 19737 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19738 return ret; 19739 } 19740 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19741 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19742 return 0; 19743 } 19744 return WQE_ERROR; 19745 } 19746 19747 #ifdef LPFC_MXP_STAT 19748 /** 19749 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count 19750 * @phba: pointer to lpfc hba data structure. 19751 * @hwqid: belong to which HWQ. 19752 * 19753 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count 19754 * 15 seconds after a test case is running. 19755 * 19756 * The user should call lpfc_debugfs_multixripools_write before running a test 19757 * case to clear stat_snapshot_taken. Then the user starts a test case. During 19758 * test case is running, stat_snapshot_taken is incremented by 1 every time when 19759 * this routine is called from heartbeat timer. When stat_snapshot_taken is 19760 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken. 19761 **/ 19762 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) 19763 { 19764 struct lpfc_sli4_hdw_queue *qp; 19765 struct lpfc_multixri_pool *multixri_pool; 19766 struct lpfc_pvt_pool *pvt_pool; 19767 struct lpfc_pbl_pool *pbl_pool; 19768 u32 txcmplq_cnt; 19769 19770 qp = &phba->sli4_hba.hdwq[hwqid]; 19771 multixri_pool = qp->p_multixri_pool; 19772 if (!multixri_pool) 19773 return; 19774 19775 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { 19776 pvt_pool = &qp->p_multixri_pool->pvt_pool; 19777 pbl_pool = &qp->p_multixri_pool->pbl_pool; 19778 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 19779 if (qp->nvme_wq) 19780 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; 19781 19782 multixri_pool->stat_pbl_count = pbl_pool->count; 19783 multixri_pool->stat_pvt_count = pvt_pool->count; 19784 multixri_pool->stat_busy_count = txcmplq_cnt; 19785 } 19786 19787 multixri_pool->stat_snapshot_taken++; 19788 } 19789 #endif 19790 19791 /** 19792 * lpfc_adjust_pvt_pool_count - Adjust private pool count 19793 * @phba: pointer to lpfc hba data structure. 19794 * @hwqid: belong to which HWQ. 19795 * 19796 * This routine moves some XRIs from private to public pool when private pool 19797 * is not busy. 19798 **/ 19799 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid) 19800 { 19801 struct lpfc_multixri_pool *multixri_pool; 19802 u32 io_req_count; 19803 u32 prev_io_req_count; 19804 19805 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 19806 if (!multixri_pool) 19807 return; 19808 io_req_count = multixri_pool->io_req_count; 19809 prev_io_req_count = multixri_pool->prev_io_req_count; 19810 19811 if (prev_io_req_count != io_req_count) { 19812 /* Private pool is busy */ 19813 multixri_pool->prev_io_req_count = io_req_count; 19814 } else { 19815 /* Private pool is not busy. 19816 * Move XRIs from private to public pool. 19817 */ 19818 lpfc_move_xri_pvt_to_pbl(phba, hwqid); 19819 } 19820 } 19821 19822 /** 19823 * lpfc_adjust_high_watermark - Adjust high watermark 19824 * @phba: pointer to lpfc hba data structure. 19825 * @hwqid: belong to which HWQ. 19826 * 19827 * This routine sets high watermark as number of outstanding XRIs, 19828 * but make sure the new value is between xri_limit/2 and xri_limit. 19829 **/ 19830 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) 19831 { 19832 u32 new_watermark; 19833 u32 watermark_max; 19834 u32 watermark_min; 19835 u32 xri_limit; 19836 u32 txcmplq_cnt; 19837 u32 abts_io_bufs; 19838 struct lpfc_multixri_pool *multixri_pool; 19839 struct lpfc_sli4_hdw_queue *qp; 19840 19841 qp = &phba->sli4_hba.hdwq[hwqid]; 19842 multixri_pool = qp->p_multixri_pool; 19843 if (!multixri_pool) 19844 return; 19845 xri_limit = multixri_pool->xri_limit; 19846 19847 watermark_max = xri_limit; 19848 watermark_min = xri_limit / 2; 19849 19850 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 19851 abts_io_bufs = qp->abts_scsi_io_bufs; 19852 if (qp->nvme_wq) { 19853 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; 19854 abts_io_bufs += qp->abts_nvme_io_bufs; 19855 } 19856 19857 new_watermark = txcmplq_cnt + abts_io_bufs; 19858 new_watermark = min(watermark_max, new_watermark); 19859 new_watermark = max(watermark_min, new_watermark); 19860 multixri_pool->pvt_pool.high_watermark = new_watermark; 19861 19862 #ifdef LPFC_MXP_STAT 19863 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm, 19864 new_watermark); 19865 #endif 19866 } 19867 19868 /** 19869 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool 19870 * @phba: pointer to lpfc hba data structure. 19871 * @hwqid: belong to which HWQ. 19872 * 19873 * This routine is called from hearbeat timer when pvt_pool is idle. 19874 * All free XRIs are moved from private to public pool on hwqid with 2 steps. 19875 * The first step moves (all - low_watermark) amount of XRIs. 19876 * The second step moves the rest of XRIs. 19877 **/ 19878 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) 19879 { 19880 struct lpfc_pbl_pool *pbl_pool; 19881 struct lpfc_pvt_pool *pvt_pool; 19882 struct lpfc_sli4_hdw_queue *qp; 19883 struct lpfc_io_buf *lpfc_ncmd; 19884 struct lpfc_io_buf *lpfc_ncmd_next; 19885 unsigned long iflag; 19886 struct list_head tmp_list; 19887 u32 tmp_count; 19888 19889 qp = &phba->sli4_hba.hdwq[hwqid]; 19890 pbl_pool = &qp->p_multixri_pool->pbl_pool; 19891 pvt_pool = &qp->p_multixri_pool->pvt_pool; 19892 tmp_count = 0; 19893 19894 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); 19895 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); 19896 19897 if (pvt_pool->count > pvt_pool->low_watermark) { 19898 /* Step 1: move (all - low_watermark) from pvt_pool 19899 * to pbl_pool 19900 */ 19901 19902 /* Move low watermark of bufs from pvt_pool to tmp_list */ 19903 INIT_LIST_HEAD(&tmp_list); 19904 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 19905 &pvt_pool->list, list) { 19906 list_move_tail(&lpfc_ncmd->list, &tmp_list); 19907 tmp_count++; 19908 if (tmp_count >= pvt_pool->low_watermark) 19909 break; 19910 } 19911 19912 /* Move all bufs from pvt_pool to pbl_pool */ 19913 list_splice_init(&pvt_pool->list, &pbl_pool->list); 19914 19915 /* Move all bufs from tmp_list to pvt_pool */ 19916 list_splice(&tmp_list, &pvt_pool->list); 19917 19918 pbl_pool->count += (pvt_pool->count - tmp_count); 19919 pvt_pool->count = tmp_count; 19920 } else { 19921 /* Step 2: move the rest from pvt_pool to pbl_pool */ 19922 list_splice_init(&pvt_pool->list, &pbl_pool->list); 19923 pbl_pool->count += pvt_pool->count; 19924 pvt_pool->count = 0; 19925 } 19926 19927 spin_unlock(&pvt_pool->lock); 19928 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 19929 } 19930 19931 /** 19932 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 19933 * @phba: pointer to lpfc hba data structure 19934 * @pbl_pool: specified public free XRI pool 19935 * @pvt_pool: specified private free XRI pool 19936 * @count: number of XRIs to move 19937 * 19938 * This routine tries to move some free common bufs from the specified pbl_pool 19939 * to the specified pvt_pool. It might move less than count XRIs if there's not 19940 * enough in public pool. 19941 * 19942 * Return: 19943 * true - if XRIs are successfully moved from the specified pbl_pool to the 19944 * specified pvt_pool 19945 * false - if the specified pbl_pool is empty or locked by someone else 19946 **/ 19947 static bool 19948 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 19949 struct lpfc_pbl_pool *pbl_pool, 19950 struct lpfc_pvt_pool *pvt_pool, u32 count) 19951 { 19952 struct lpfc_io_buf *lpfc_ncmd; 19953 struct lpfc_io_buf *lpfc_ncmd_next; 19954 unsigned long iflag; 19955 int ret; 19956 19957 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag); 19958 if (ret) { 19959 if (pbl_pool->count) { 19960 /* Move a batch of XRIs from public to private pool */ 19961 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); 19962 list_for_each_entry_safe(lpfc_ncmd, 19963 lpfc_ncmd_next, 19964 &pbl_pool->list, 19965 list) { 19966 list_move_tail(&lpfc_ncmd->list, 19967 &pvt_pool->list); 19968 pvt_pool->count++; 19969 pbl_pool->count--; 19970 count--; 19971 if (count == 0) 19972 break; 19973 } 19974 19975 spin_unlock(&pvt_pool->lock); 19976 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 19977 return true; 19978 } 19979 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 19980 } 19981 19982 return false; 19983 } 19984 19985 /** 19986 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 19987 * @phba: pointer to lpfc hba data structure. 19988 * @hwqid: belong to which HWQ. 19989 * @count: number of XRIs to move 19990 * 19991 * This routine tries to find some free common bufs in one of public pools with 19992 * Round Robin method. The search always starts from local hwqid, then the next 19993 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found, 19994 * a batch of free common bufs are moved to private pool on hwqid. 19995 * It might move less than count XRIs if there's not enough in public pool. 19996 **/ 19997 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) 19998 { 19999 struct lpfc_multixri_pool *multixri_pool; 20000 struct lpfc_multixri_pool *next_multixri_pool; 20001 struct lpfc_pvt_pool *pvt_pool; 20002 struct lpfc_pbl_pool *pbl_pool; 20003 struct lpfc_sli4_hdw_queue *qp; 20004 u32 next_hwqid; 20005 u32 hwq_count; 20006 int ret; 20007 20008 qp = &phba->sli4_hba.hdwq[hwqid]; 20009 multixri_pool = qp->p_multixri_pool; 20010 pvt_pool = &multixri_pool->pvt_pool; 20011 pbl_pool = &multixri_pool->pbl_pool; 20012 20013 /* Check if local pbl_pool is available */ 20014 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); 20015 if (ret) { 20016 #ifdef LPFC_MXP_STAT 20017 multixri_pool->local_pbl_hit_count++; 20018 #endif 20019 return; 20020 } 20021 20022 hwq_count = phba->cfg_hdw_queue; 20023 20024 /* Get the next hwqid which was found last time */ 20025 next_hwqid = multixri_pool->rrb_next_hwqid; 20026 20027 do { 20028 /* Go to next hwq */ 20029 next_hwqid = (next_hwqid + 1) % hwq_count; 20030 20031 next_multixri_pool = 20032 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool; 20033 pbl_pool = &next_multixri_pool->pbl_pool; 20034 20035 /* Check if the public free xri pool is available */ 20036 ret = _lpfc_move_xri_pbl_to_pvt( 20037 phba, qp, pbl_pool, pvt_pool, count); 20038 20039 /* Exit while-loop if success or all hwqid are checked */ 20040 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); 20041 20042 /* Starting point for the next time */ 20043 multixri_pool->rrb_next_hwqid = next_hwqid; 20044 20045 if (!ret) { 20046 /* stats: all public pools are empty*/ 20047 multixri_pool->pbl_empty_count++; 20048 } 20049 20050 #ifdef LPFC_MXP_STAT 20051 if (ret) { 20052 if (next_hwqid == hwqid) 20053 multixri_pool->local_pbl_hit_count++; 20054 else 20055 multixri_pool->other_pbl_hit_count++; 20056 } 20057 #endif 20058 } 20059 20060 /** 20061 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark 20062 * @phba: pointer to lpfc hba data structure. 20063 * @qp: belong to which HWQ. 20064 * 20065 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than 20066 * low watermark. 20067 **/ 20068 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid) 20069 { 20070 struct lpfc_multixri_pool *multixri_pool; 20071 struct lpfc_pvt_pool *pvt_pool; 20072 20073 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 20074 pvt_pool = &multixri_pool->pvt_pool; 20075 20076 if (pvt_pool->count < pvt_pool->low_watermark) 20077 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 20078 } 20079 20080 /** 20081 * lpfc_release_io_buf - Return one IO buf back to free pool 20082 * @phba: pointer to lpfc hba data structure. 20083 * @lpfc_ncmd: IO buf to be returned. 20084 * @qp: belong to which HWQ. 20085 * 20086 * This routine returns one IO buf back to free pool. If this is an urgent IO, 20087 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1, 20088 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and 20089 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to 20090 * lpfc_io_buf_list_put. 20091 **/ 20092 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, 20093 struct lpfc_sli4_hdw_queue *qp) 20094 { 20095 unsigned long iflag; 20096 struct lpfc_pbl_pool *pbl_pool; 20097 struct lpfc_pvt_pool *pvt_pool; 20098 struct lpfc_epd_pool *epd_pool; 20099 u32 txcmplq_cnt; 20100 u32 xri_owned; 20101 u32 xri_limit; 20102 u32 abts_io_bufs; 20103 20104 /* MUST zero fields if buffer is reused by another protocol */ 20105 lpfc_ncmd->nvmeCmd = NULL; 20106 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; 20107 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; 20108 20109 if (phba->cfg_xri_rebalancing) { 20110 if (lpfc_ncmd->expedite) { 20111 /* Return to expedite pool */ 20112 epd_pool = &phba->epd_pool; 20113 spin_lock_irqsave(&epd_pool->lock, iflag); 20114 list_add_tail(&lpfc_ncmd->list, &epd_pool->list); 20115 epd_pool->count++; 20116 spin_unlock_irqrestore(&epd_pool->lock, iflag); 20117 return; 20118 } 20119 20120 /* Avoid invalid access if an IO sneaks in and is being rejected 20121 * just _after_ xri pools are destroyed in lpfc_offline. 20122 * Nothing much can be done at this point. 20123 */ 20124 if (!qp->p_multixri_pool) 20125 return; 20126 20127 pbl_pool = &qp->p_multixri_pool->pbl_pool; 20128 pvt_pool = &qp->p_multixri_pool->pvt_pool; 20129 20130 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 20131 abts_io_bufs = qp->abts_scsi_io_bufs; 20132 if (qp->nvme_wq) { 20133 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; 20134 abts_io_bufs += qp->abts_nvme_io_bufs; 20135 } 20136 20137 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; 20138 xri_limit = qp->p_multixri_pool->xri_limit; 20139 20140 #ifdef LPFC_MXP_STAT 20141 if (xri_owned <= xri_limit) 20142 qp->p_multixri_pool->below_limit_count++; 20143 else 20144 qp->p_multixri_pool->above_limit_count++; 20145 #endif 20146 20147 /* XRI goes to either public or private free xri pool 20148 * based on watermark and xri_limit 20149 */ 20150 if ((pvt_pool->count < pvt_pool->low_watermark) || 20151 (xri_owned < xri_limit && 20152 pvt_pool->count < pvt_pool->high_watermark)) { 20153 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, 20154 qp, free_pvt_pool); 20155 list_add_tail(&lpfc_ncmd->list, 20156 &pvt_pool->list); 20157 pvt_pool->count++; 20158 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20159 } else { 20160 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, 20161 qp, free_pub_pool); 20162 list_add_tail(&lpfc_ncmd->list, 20163 &pbl_pool->list); 20164 pbl_pool->count++; 20165 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 20166 } 20167 } else { 20168 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, 20169 qp, free_xri); 20170 list_add_tail(&lpfc_ncmd->list, 20171 &qp->lpfc_io_buf_list_put); 20172 qp->put_io_bufs++; 20173 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, 20174 iflag); 20175 } 20176 } 20177 20178 /** 20179 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool 20180 * @phba: pointer to lpfc hba data structure. 20181 * @pvt_pool: pointer to private pool data structure. 20182 * @ndlp: pointer to lpfc nodelist data structure. 20183 * 20184 * This routine tries to get one free IO buf from private pool. 20185 * 20186 * Return: 20187 * pointer to one free IO buf - if private pool is not empty 20188 * NULL - if private pool is empty 20189 **/ 20190 static struct lpfc_io_buf * 20191 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, 20192 struct lpfc_sli4_hdw_queue *qp, 20193 struct lpfc_pvt_pool *pvt_pool, 20194 struct lpfc_nodelist *ndlp) 20195 { 20196 struct lpfc_io_buf *lpfc_ncmd; 20197 struct lpfc_io_buf *lpfc_ncmd_next; 20198 unsigned long iflag; 20199 20200 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); 20201 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 20202 &pvt_pool->list, list) { 20203 if (lpfc_test_rrq_active( 20204 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) 20205 continue; 20206 list_del(&lpfc_ncmd->list); 20207 pvt_pool->count--; 20208 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20209 return lpfc_ncmd; 20210 } 20211 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20212 20213 return NULL; 20214 } 20215 20216 /** 20217 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool 20218 * @phba: pointer to lpfc hba data structure. 20219 * 20220 * This routine tries to get one free IO buf from expedite pool. 20221 * 20222 * Return: 20223 * pointer to one free IO buf - if expedite pool is not empty 20224 * NULL - if expedite pool is empty 20225 **/ 20226 static struct lpfc_io_buf * 20227 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba) 20228 { 20229 struct lpfc_io_buf *lpfc_ncmd; 20230 struct lpfc_io_buf *lpfc_ncmd_next; 20231 unsigned long iflag; 20232 struct lpfc_epd_pool *epd_pool; 20233 20234 epd_pool = &phba->epd_pool; 20235 lpfc_ncmd = NULL; 20236 20237 spin_lock_irqsave(&epd_pool->lock, iflag); 20238 if (epd_pool->count > 0) { 20239 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 20240 &epd_pool->list, list) { 20241 list_del(&lpfc_ncmd->list); 20242 epd_pool->count--; 20243 break; 20244 } 20245 } 20246 spin_unlock_irqrestore(&epd_pool->lock, iflag); 20247 20248 return lpfc_ncmd; 20249 } 20250 20251 /** 20252 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs 20253 * @phba: pointer to lpfc hba data structure. 20254 * @ndlp: pointer to lpfc nodelist data structure. 20255 * @hwqid: belong to which HWQ 20256 * @expedite: 1 means this request is urgent. 20257 * 20258 * This routine will do the following actions and then return a pointer to 20259 * one free IO buf. 20260 * 20261 * 1. If private free xri count is empty, move some XRIs from public to 20262 * private pool. 20263 * 2. Get one XRI from private free xri pool. 20264 * 3. If we fail to get one from pvt_pool and this is an expedite request, 20265 * get one free xri from expedite pool. 20266 * 20267 * Note: ndlp is only used on SCSI side for RRQ testing. 20268 * The caller should pass NULL for ndlp on NVME side. 20269 * 20270 * Return: 20271 * pointer to one free IO buf - if private pool is not empty 20272 * NULL - if private pool is empty 20273 **/ 20274 static struct lpfc_io_buf * 20275 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, 20276 struct lpfc_nodelist *ndlp, 20277 int hwqid, int expedite) 20278 { 20279 struct lpfc_sli4_hdw_queue *qp; 20280 struct lpfc_multixri_pool *multixri_pool; 20281 struct lpfc_pvt_pool *pvt_pool; 20282 struct lpfc_io_buf *lpfc_ncmd; 20283 20284 qp = &phba->sli4_hba.hdwq[hwqid]; 20285 lpfc_ncmd = NULL; 20286 multixri_pool = qp->p_multixri_pool; 20287 pvt_pool = &multixri_pool->pvt_pool; 20288 multixri_pool->io_req_count++; 20289 20290 /* If pvt_pool is empty, move some XRIs from public to private pool */ 20291 if (pvt_pool->count == 0) 20292 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 20293 20294 /* Get one XRI from private free xri pool */ 20295 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); 20296 20297 if (lpfc_ncmd) { 20298 lpfc_ncmd->hdwq = qp; 20299 lpfc_ncmd->hdwq_no = hwqid; 20300 } else if (expedite) { 20301 /* If we fail to get one from pvt_pool and this is an expedite 20302 * request, get one free xri from expedite pool. 20303 */ 20304 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba); 20305 } 20306 20307 return lpfc_ncmd; 20308 } 20309 20310 static inline struct lpfc_io_buf * 20311 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx) 20312 { 20313 struct lpfc_sli4_hdw_queue *qp; 20314 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next; 20315 20316 qp = &phba->sli4_hba.hdwq[idx]; 20317 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 20318 &qp->lpfc_io_buf_list_get, list) { 20319 if (lpfc_test_rrq_active(phba, ndlp, 20320 lpfc_cmd->cur_iocbq.sli4_lxritag)) 20321 continue; 20322 20323 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED) 20324 continue; 20325 20326 list_del_init(&lpfc_cmd->list); 20327 qp->get_io_bufs--; 20328 lpfc_cmd->hdwq = qp; 20329 lpfc_cmd->hdwq_no = idx; 20330 return lpfc_cmd; 20331 } 20332 return NULL; 20333 } 20334 20335 /** 20336 * lpfc_get_io_buf - Get one IO buffer from free pool 20337 * @phba: The HBA for which this call is being executed. 20338 * @ndlp: pointer to lpfc nodelist data structure. 20339 * @hwqid: belong to which HWQ 20340 * @expedite: 1 means this request is urgent. 20341 * 20342 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1, 20343 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes 20344 * a IO buffer from head of @hdwq io_buf_list and returns to caller. 20345 * 20346 * Note: ndlp is only used on SCSI side for RRQ testing. 20347 * The caller should pass NULL for ndlp on NVME side. 20348 * 20349 * Return codes: 20350 * NULL - Error 20351 * Pointer to lpfc_io_buf - Success 20352 **/ 20353 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, 20354 struct lpfc_nodelist *ndlp, 20355 u32 hwqid, int expedite) 20356 { 20357 struct lpfc_sli4_hdw_queue *qp; 20358 unsigned long iflag; 20359 struct lpfc_io_buf *lpfc_cmd; 20360 20361 qp = &phba->sli4_hba.hdwq[hwqid]; 20362 lpfc_cmd = NULL; 20363 20364 if (phba->cfg_xri_rebalancing) 20365 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( 20366 phba, ndlp, hwqid, expedite); 20367 else { 20368 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, 20369 qp, alloc_xri_get); 20370 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) 20371 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 20372 if (!lpfc_cmd) { 20373 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, 20374 qp, alloc_xri_put); 20375 list_splice(&qp->lpfc_io_buf_list_put, 20376 &qp->lpfc_io_buf_list_get); 20377 qp->get_io_bufs += qp->put_io_bufs; 20378 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 20379 qp->put_io_bufs = 0; 20380 spin_unlock(&qp->io_buf_list_put_lock); 20381 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || 20382 expedite) 20383 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 20384 } 20385 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); 20386 } 20387 20388 return lpfc_cmd; 20389 } 20390