1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/interrupt.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/lockdep.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 #include <scsi/fc/fc_fs.h> 37 #include <linux/aer.h> 38 #ifdef CONFIG_X86 39 #include <asm/set_memory.h> 40 #endif 41 42 #include <linux/nvme-fc-driver.h> 43 44 #include "lpfc_hw4.h" 45 #include "lpfc_hw.h" 46 #include "lpfc_sli.h" 47 #include "lpfc_sli4.h" 48 #include "lpfc_nl.h" 49 #include "lpfc_disc.h" 50 #include "lpfc.h" 51 #include "lpfc_scsi.h" 52 #include "lpfc_nvme.h" 53 #include "lpfc_nvmet.h" 54 #include "lpfc_crtn.h" 55 #include "lpfc_logmsg.h" 56 #include "lpfc_compat.h" 57 #include "lpfc_debugfs.h" 58 #include "lpfc_vport.h" 59 #include "lpfc_version.h" 60 61 /* There are only four IOCB completion types. */ 62 typedef enum _lpfc_iocb_type { 63 LPFC_UNKNOWN_IOCB, 64 LPFC_UNSOL_IOCB, 65 LPFC_SOL_IOCB, 66 LPFC_ABORT_IOCB 67 } lpfc_iocb_type; 68 69 70 /* Provide function prototypes local to this module. */ 71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 72 uint32_t); 73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 74 uint8_t *, uint32_t *); 75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 76 struct lpfc_iocbq *); 77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 78 struct hbq_dmabuf *); 79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 80 struct hbq_dmabuf *dmabuf); 81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, 82 struct lpfc_queue *cq, struct lpfc_cqe *cqe); 83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 84 int); 85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, 86 struct lpfc_queue *eq, 87 struct lpfc_eqe *eqe); 88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 90 91 static IOCB_t * 92 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 93 { 94 return &iocbq->iocb; 95 } 96 97 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) 98 /** 99 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function 100 * @srcp: Source memory pointer. 101 * @destp: Destination memory pointer. 102 * @cnt: Number of words required to be copied. 103 * Must be a multiple of sizeof(uint64_t) 104 * 105 * This function is used for copying data between driver memory 106 * and the SLI WQ. This function also changes the endianness 107 * of each word if native endianness is different from SLI 108 * endianness. This function can be called with or without 109 * lock. 110 **/ 111 void 112 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 113 { 114 uint64_t *src = srcp; 115 uint64_t *dest = destp; 116 int i; 117 118 for (i = 0; i < (int)cnt; i += sizeof(uint64_t)) 119 *dest++ = *src++; 120 } 121 #else 122 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) 123 #endif 124 125 /** 126 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 127 * @q: The Work Queue to operate on. 128 * @wqe: The work Queue Entry to put on the Work queue. 129 * 130 * This routine will copy the contents of @wqe to the next available entry on 131 * the @q. This function will then ring the Work Queue Doorbell to signal the 132 * HBA to start processing the Work Queue Entry. This function returns 0 if 133 * successful. If no entries are available on @q then this function will return 134 * -ENOMEM. 135 * The caller is expected to hold the hbalock when calling this routine. 136 **/ 137 static int 138 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) 139 { 140 union lpfc_wqe *temp_wqe; 141 struct lpfc_register doorbell; 142 uint32_t host_index; 143 uint32_t idx; 144 uint32_t i = 0; 145 uint8_t *tmp; 146 u32 if_type; 147 148 /* sanity check on queue memory */ 149 if (unlikely(!q)) 150 return -ENOMEM; 151 temp_wqe = lpfc_sli4_qe(q, q->host_index); 152 153 /* If the host has not yet processed the next entry then we are done */ 154 idx = ((q->host_index + 1) % q->entry_count); 155 if (idx == q->hba_index) { 156 q->WQ_overflow++; 157 return -EBUSY; 158 } 159 q->WQ_posted++; 160 /* set consumption flag every once in a while */ 161 if (!((q->host_index + 1) % q->notify_interval)) 162 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 163 else 164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); 165 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 166 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 167 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 168 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 169 /* write to DPP aperture taking advatage of Combined Writes */ 170 tmp = (uint8_t *)temp_wqe; 171 #ifdef __raw_writeq 172 for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) 173 __raw_writeq(*((uint64_t *)(tmp + i)), 174 q->dpp_regaddr + i); 175 #else 176 for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) 177 __raw_writel(*((uint32_t *)(tmp + i)), 178 q->dpp_regaddr + i); 179 #endif 180 } 181 /* ensure WQE bcopy and DPP flushed before doorbell write */ 182 wmb(); 183 184 /* Update the host index before invoking device */ 185 host_index = q->host_index; 186 187 q->host_index = idx; 188 189 /* Ring Doorbell */ 190 doorbell.word0 = 0; 191 if (q->db_format == LPFC_DB_LIST_FORMAT) { 192 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 193 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); 194 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); 195 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, 196 q->dpp_id); 197 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell, 198 q->queue_id); 199 } else { 200 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 201 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 202 203 /* Leave bits <23:16> clear for if_type 6 dpp */ 204 if_type = bf_get(lpfc_sli_intf_if_type, 205 &q->phba->sli4_hba.sli_intf); 206 if (if_type != LPFC_SLI_INTF_IF_TYPE_6) 207 bf_set(lpfc_wq_db_list_fm_index, &doorbell, 208 host_index); 209 } 210 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 211 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 212 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 213 } else { 214 return -EINVAL; 215 } 216 writel(doorbell.word0, q->db_regaddr); 217 218 return 0; 219 } 220 221 /** 222 * lpfc_sli4_wq_release - Updates internal hba index for WQ 223 * @q: The Work Queue to operate on. 224 * @index: The index to advance the hba index to. 225 * 226 * This routine will update the HBA index of a queue to reflect consumption of 227 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 228 * an entry the host calls this function to update the queue's internal 229 * pointers. This routine returns the number of entries that were consumed by 230 * the HBA. 231 **/ 232 static uint32_t 233 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 234 { 235 uint32_t released = 0; 236 237 /* sanity check on queue memory */ 238 if (unlikely(!q)) 239 return 0; 240 241 if (q->hba_index == index) 242 return 0; 243 do { 244 q->hba_index = ((q->hba_index + 1) % q->entry_count); 245 released++; 246 } while (q->hba_index != index); 247 return released; 248 } 249 250 /** 251 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 252 * @q: The Mailbox Queue to operate on. 253 * @wqe: The Mailbox Queue Entry to put on the Work queue. 254 * 255 * This routine will copy the contents of @mqe to the next available entry on 256 * the @q. This function will then ring the Work Queue Doorbell to signal the 257 * HBA to start processing the Work Queue Entry. This function returns 0 if 258 * successful. If no entries are available on @q then this function will return 259 * -ENOMEM. 260 * The caller is expected to hold the hbalock when calling this routine. 261 **/ 262 static uint32_t 263 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 264 { 265 struct lpfc_mqe *temp_mqe; 266 struct lpfc_register doorbell; 267 268 /* sanity check on queue memory */ 269 if (unlikely(!q)) 270 return -ENOMEM; 271 temp_mqe = lpfc_sli4_qe(q, q->host_index); 272 273 /* If the host has not yet processed the next entry then we are done */ 274 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 275 return -ENOMEM; 276 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 277 /* Save off the mailbox pointer for completion */ 278 q->phba->mbox = (MAILBOX_t *)temp_mqe; 279 280 /* Update the host index before invoking device */ 281 q->host_index = ((q->host_index + 1) % q->entry_count); 282 283 /* Ring Doorbell */ 284 doorbell.word0 = 0; 285 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 286 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 287 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 288 return 0; 289 } 290 291 /** 292 * lpfc_sli4_mq_release - Updates internal hba index for MQ 293 * @q: The Mailbox Queue to operate on. 294 * 295 * This routine will update the HBA index of a queue to reflect consumption of 296 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 297 * an entry the host calls this function to update the queue's internal 298 * pointers. This routine returns the number of entries that were consumed by 299 * the HBA. 300 **/ 301 static uint32_t 302 lpfc_sli4_mq_release(struct lpfc_queue *q) 303 { 304 /* sanity check on queue memory */ 305 if (unlikely(!q)) 306 return 0; 307 308 /* Clear the mailbox pointer for completion */ 309 q->phba->mbox = NULL; 310 q->hba_index = ((q->hba_index + 1) % q->entry_count); 311 return 1; 312 } 313 314 /** 315 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 316 * @q: The Event Queue to get the first valid EQE from 317 * 318 * This routine will get the first valid Event Queue Entry from @q, update 319 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 320 * the Queue (no more work to do), or the Queue is full of EQEs that have been 321 * processed, but not popped back to the HBA then this routine will return NULL. 322 **/ 323 static struct lpfc_eqe * 324 lpfc_sli4_eq_get(struct lpfc_queue *q) 325 { 326 struct lpfc_eqe *eqe; 327 328 /* sanity check on queue memory */ 329 if (unlikely(!q)) 330 return NULL; 331 eqe = lpfc_sli4_qe(q, q->host_index); 332 333 /* If the next EQE is not valid then we are done */ 334 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) 335 return NULL; 336 337 /* 338 * insert barrier for instruction interlock : data from the hardware 339 * must have the valid bit checked before it can be copied and acted 340 * upon. Speculative instructions were allowing a bcopy at the start 341 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately 342 * after our return, to copy data before the valid bit check above 343 * was done. As such, some of the copied data was stale. The barrier 344 * ensures the check is before any data is copied. 345 */ 346 mb(); 347 return eqe; 348 } 349 350 /** 351 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 352 * @q: The Event Queue to disable interrupts 353 * 354 **/ 355 void 356 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 357 { 358 struct lpfc_register doorbell; 359 360 doorbell.word0 = 0; 361 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 362 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 363 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 364 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 365 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 366 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 367 } 368 369 /** 370 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ 371 * @q: The Event Queue to disable interrupts 372 * 373 **/ 374 void 375 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) 376 { 377 struct lpfc_register doorbell; 378 379 doorbell.word0 = 0; 380 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 382 } 383 384 /** 385 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state 386 * @phba: adapter with EQ 387 * @q: The Event Queue that the host has completed processing for. 388 * @count: Number of elements that have been consumed 389 * @arm: Indicates whether the host wants to arms this CQ. 390 * 391 * This routine will notify the HBA, by ringing the doorbell, that count 392 * number of EQEs have been processed. The @arm parameter indicates whether 393 * the queue should be rearmed when ringing the doorbell. 394 **/ 395 void 396 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 397 uint32_t count, bool arm) 398 { 399 struct lpfc_register doorbell; 400 401 /* sanity check on queue memory */ 402 if (unlikely(!q || (count == 0 && !arm))) 403 return; 404 405 /* ring doorbell for number popped */ 406 doorbell.word0 = 0; 407 if (arm) { 408 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 409 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 410 } 411 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 412 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 413 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 414 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 415 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 416 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 417 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 418 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 419 readl(q->phba->sli4_hba.EQDBregaddr); 420 } 421 422 /** 423 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state 424 * @phba: adapter with EQ 425 * @q: The Event Queue that the host has completed processing for. 426 * @count: Number of elements that have been consumed 427 * @arm: Indicates whether the host wants to arms this CQ. 428 * 429 * This routine will notify the HBA, by ringing the doorbell, that count 430 * number of EQEs have been processed. The @arm parameter indicates whether 431 * the queue should be rearmed when ringing the doorbell. 432 **/ 433 void 434 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 435 uint32_t count, bool arm) 436 { 437 struct lpfc_register doorbell; 438 439 /* sanity check on queue memory */ 440 if (unlikely(!q || (count == 0 && !arm))) 441 return; 442 443 /* ring doorbell for number popped */ 444 doorbell.word0 = 0; 445 if (arm) 446 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); 447 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); 448 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 449 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 450 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 451 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 452 readl(q->phba->sli4_hba.EQDBregaddr); 453 } 454 455 static void 456 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 457 struct lpfc_eqe *eqe) 458 { 459 if (!phba->sli4_hba.pc_sli4_params.eqav) 460 bf_set_le32(lpfc_eqe_valid, eqe, 0); 461 462 eq->host_index = ((eq->host_index + 1) % eq->entry_count); 463 464 /* if the index wrapped around, toggle the valid bit */ 465 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index) 466 eq->qe_valid = (eq->qe_valid) ? 0 : 1; 467 } 468 469 static void 470 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 471 { 472 struct lpfc_eqe *eqe; 473 uint32_t count = 0; 474 475 /* walk all the EQ entries and drop on the floor */ 476 eqe = lpfc_sli4_eq_get(eq); 477 while (eqe) { 478 __lpfc_sli4_consume_eqe(phba, eq, eqe); 479 count++; 480 eqe = lpfc_sli4_eq_get(eq); 481 } 482 483 /* Clear and re-arm the EQ */ 484 phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM); 485 } 486 487 static int 488 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq) 489 { 490 struct lpfc_eqe *eqe; 491 int count = 0, consumed = 0; 492 493 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0) 494 goto rearm_and_exit; 495 496 eqe = lpfc_sli4_eq_get(eq); 497 while (eqe) { 498 lpfc_sli4_hba_handle_eqe(phba, eq, eqe); 499 __lpfc_sli4_consume_eqe(phba, eq, eqe); 500 501 consumed++; 502 if (!(++count % eq->max_proc_limit)) 503 break; 504 505 if (!(count % eq->notify_interval)) { 506 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, 507 LPFC_QUEUE_NOARM); 508 consumed = 0; 509 } 510 511 eqe = lpfc_sli4_eq_get(eq); 512 } 513 eq->EQ_processed += count; 514 515 /* Track the max number of EQEs processed in 1 intr */ 516 if (count > eq->EQ_max_eqe) 517 eq->EQ_max_eqe = count; 518 519 eq->queue_claimed = 0; 520 521 rearm_and_exit: 522 /* Always clear and re-arm the EQ */ 523 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM); 524 525 return count; 526 } 527 528 /** 529 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 530 * @q: The Completion Queue to get the first valid CQE from 531 * 532 * This routine will get the first valid Completion Queue Entry from @q, update 533 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 534 * the Queue (no more work to do), or the Queue is full of CQEs that have been 535 * processed, but not popped back to the HBA then this routine will return NULL. 536 **/ 537 static struct lpfc_cqe * 538 lpfc_sli4_cq_get(struct lpfc_queue *q) 539 { 540 struct lpfc_cqe *cqe; 541 542 /* sanity check on queue memory */ 543 if (unlikely(!q)) 544 return NULL; 545 cqe = lpfc_sli4_qe(q, q->host_index); 546 547 /* If the next CQE is not valid then we are done */ 548 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) 549 return NULL; 550 551 /* 552 * insert barrier for instruction interlock : data from the hardware 553 * must have the valid bit checked before it can be copied and acted 554 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative 555 * instructions allowing action on content before valid bit checked, 556 * add barrier here as well. May not be needed as "content" is a 557 * single 32-bit entity here (vs multi word structure for cq's). 558 */ 559 mb(); 560 return cqe; 561 } 562 563 static void 564 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 565 struct lpfc_cqe *cqe) 566 { 567 if (!phba->sli4_hba.pc_sli4_params.cqav) 568 bf_set_le32(lpfc_cqe_valid, cqe, 0); 569 570 cq->host_index = ((cq->host_index + 1) % cq->entry_count); 571 572 /* if the index wrapped around, toggle the valid bit */ 573 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) 574 cq->qe_valid = (cq->qe_valid) ? 0 : 1; 575 } 576 577 /** 578 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state. 579 * @phba: the adapter with the CQ 580 * @q: The Completion Queue that the host has completed processing for. 581 * @count: the number of elements that were consumed 582 * @arm: Indicates whether the host wants to arms this CQ. 583 * 584 * This routine will notify the HBA, by ringing the doorbell, that the 585 * CQEs have been processed. The @arm parameter specifies whether the 586 * queue should be rearmed when ringing the doorbell. 587 **/ 588 void 589 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 590 uint32_t count, bool arm) 591 { 592 struct lpfc_register doorbell; 593 594 /* sanity check on queue memory */ 595 if (unlikely(!q || (count == 0 && !arm))) 596 return; 597 598 /* ring doorbell for number popped */ 599 doorbell.word0 = 0; 600 if (arm) 601 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 602 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 603 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 604 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 605 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 606 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 607 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 608 } 609 610 /** 611 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state. 612 * @phba: the adapter with the CQ 613 * @q: The Completion Queue that the host has completed processing for. 614 * @count: the number of elements that were consumed 615 * @arm: Indicates whether the host wants to arms this CQ. 616 * 617 * This routine will notify the HBA, by ringing the doorbell, that the 618 * CQEs have been processed. The @arm parameter specifies whether the 619 * queue should be rearmed when ringing the doorbell. 620 **/ 621 void 622 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 623 uint32_t count, bool arm) 624 { 625 struct lpfc_register doorbell; 626 627 /* sanity check on queue memory */ 628 if (unlikely(!q || (count == 0 && !arm))) 629 return; 630 631 /* ring doorbell for number popped */ 632 doorbell.word0 = 0; 633 if (arm) 634 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); 635 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count); 636 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); 637 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 638 } 639 640 /** 641 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 642 * @q: The Header Receive Queue to operate on. 643 * @wqe: The Receive Queue Entry to put on the Receive queue. 644 * 645 * This routine will copy the contents of @wqe to the next available entry on 646 * the @q. This function will then ring the Receive Queue Doorbell to signal the 647 * HBA to start processing the Receive Queue Entry. This function returns the 648 * index that the rqe was copied to if successful. If no entries are available 649 * on @q then this function will return -ENOMEM. 650 * The caller is expected to hold the hbalock when calling this routine. 651 **/ 652 int 653 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 654 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 655 { 656 struct lpfc_rqe *temp_hrqe; 657 struct lpfc_rqe *temp_drqe; 658 struct lpfc_register doorbell; 659 int hq_put_index; 660 int dq_put_index; 661 662 /* sanity check on queue memory */ 663 if (unlikely(!hq) || unlikely(!dq)) 664 return -ENOMEM; 665 hq_put_index = hq->host_index; 666 dq_put_index = dq->host_index; 667 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index); 668 temp_drqe = lpfc_sli4_qe(dq, dq_put_index); 669 670 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 671 return -EINVAL; 672 if (hq_put_index != dq_put_index) 673 return -EINVAL; 674 /* If the host has not yet processed the next entry then we are done */ 675 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) 676 return -EBUSY; 677 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 678 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 679 680 /* Update the host index to point to the next slot */ 681 hq->host_index = ((hq_put_index + 1) % hq->entry_count); 682 dq->host_index = ((dq_put_index + 1) % dq->entry_count); 683 hq->RQ_buf_posted++; 684 685 /* Ring The Header Receive Queue Doorbell */ 686 if (!(hq->host_index % hq->notify_interval)) { 687 doorbell.word0 = 0; 688 if (hq->db_format == LPFC_DB_RING_FORMAT) { 689 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 690 hq->notify_interval); 691 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 692 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 693 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 694 hq->notify_interval); 695 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 696 hq->host_index); 697 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 698 } else { 699 return -EINVAL; 700 } 701 writel(doorbell.word0, hq->db_regaddr); 702 } 703 return hq_put_index; 704 } 705 706 /** 707 * lpfc_sli4_rq_release - Updates internal hba index for RQ 708 * @q: The Header Receive Queue to operate on. 709 * 710 * This routine will update the HBA index of a queue to reflect consumption of 711 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 712 * consumed an entry the host calls this function to update the queue's 713 * internal pointers. This routine returns the number of entries that were 714 * consumed by the HBA. 715 **/ 716 static uint32_t 717 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 718 { 719 /* sanity check on queue memory */ 720 if (unlikely(!hq) || unlikely(!dq)) 721 return 0; 722 723 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 724 return 0; 725 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 726 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 727 return 1; 728 } 729 730 /** 731 * lpfc_cmd_iocb - Get next command iocb entry in the ring 732 * @phba: Pointer to HBA context object. 733 * @pring: Pointer to driver SLI ring object. 734 * 735 * This function returns pointer to next command iocb entry 736 * in the command ring. The caller must hold hbalock to prevent 737 * other threads consume the next command iocb. 738 * SLI-2/SLI-3 provide different sized iocbs. 739 **/ 740 static inline IOCB_t * 741 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 742 { 743 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 744 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 745 } 746 747 /** 748 * lpfc_resp_iocb - Get next response iocb entry in the ring 749 * @phba: Pointer to HBA context object. 750 * @pring: Pointer to driver SLI ring object. 751 * 752 * This function returns pointer to next response iocb entry 753 * in the response ring. The caller must hold hbalock to make sure 754 * that no other thread consume the next response iocb. 755 * SLI-2/SLI-3 provide different sized iocbs. 756 **/ 757 static inline IOCB_t * 758 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 759 { 760 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 761 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 762 } 763 764 /** 765 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 766 * @phba: Pointer to HBA context object. 767 * 768 * This function is called with hbalock held. This function 769 * allocates a new driver iocb object from the iocb pool. If the 770 * allocation is successful, it returns pointer to the newly 771 * allocated iocb object else it returns NULL. 772 **/ 773 struct lpfc_iocbq * 774 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 775 { 776 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 777 struct lpfc_iocbq * iocbq = NULL; 778 779 lockdep_assert_held(&phba->hbalock); 780 781 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 782 if (iocbq) 783 phba->iocb_cnt++; 784 if (phba->iocb_cnt > phba->iocb_max) 785 phba->iocb_max = phba->iocb_cnt; 786 return iocbq; 787 } 788 789 /** 790 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 791 * @phba: Pointer to HBA context object. 792 * @xritag: XRI value. 793 * 794 * This function clears the sglq pointer from the array of acive 795 * sglq's. The xritag that is passed in is used to index into the 796 * array. Before the xritag can be used it needs to be adjusted 797 * by subtracting the xribase. 798 * 799 * Returns sglq ponter = success, NULL = Failure. 800 **/ 801 struct lpfc_sglq * 802 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 803 { 804 struct lpfc_sglq *sglq; 805 806 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 807 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 808 return sglq; 809 } 810 811 /** 812 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 813 * @phba: Pointer to HBA context object. 814 * @xritag: XRI value. 815 * 816 * This function returns the sglq pointer from the array of acive 817 * sglq's. The xritag that is passed in is used to index into the 818 * array. Before the xritag can be used it needs to be adjusted 819 * by subtracting the xribase. 820 * 821 * Returns sglq ponter = success, NULL = Failure. 822 **/ 823 struct lpfc_sglq * 824 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 825 { 826 struct lpfc_sglq *sglq; 827 828 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 829 return sglq; 830 } 831 832 /** 833 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 834 * @phba: Pointer to HBA context object. 835 * @xritag: xri used in this exchange. 836 * @rrq: The RRQ to be cleared. 837 * 838 **/ 839 void 840 lpfc_clr_rrq_active(struct lpfc_hba *phba, 841 uint16_t xritag, 842 struct lpfc_node_rrq *rrq) 843 { 844 struct lpfc_nodelist *ndlp = NULL; 845 846 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 847 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 848 849 /* The target DID could have been swapped (cable swap) 850 * we should use the ndlp from the findnode if it is 851 * available. 852 */ 853 if ((!ndlp) && rrq->ndlp) 854 ndlp = rrq->ndlp; 855 856 if (!ndlp) 857 goto out; 858 859 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 860 rrq->send_rrq = 0; 861 rrq->xritag = 0; 862 rrq->rrq_stop_time = 0; 863 } 864 out: 865 mempool_free(rrq, phba->rrq_pool); 866 } 867 868 /** 869 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 870 * @phba: Pointer to HBA context object. 871 * 872 * This function is called with hbalock held. This function 873 * Checks if stop_time (ratov from setting rrq active) has 874 * been reached, if it has and the send_rrq flag is set then 875 * it will call lpfc_send_rrq. If the send_rrq flag is not set 876 * then it will just call the routine to clear the rrq and 877 * free the rrq resource. 878 * The timer is set to the next rrq that is going to expire before 879 * leaving the routine. 880 * 881 **/ 882 void 883 lpfc_handle_rrq_active(struct lpfc_hba *phba) 884 { 885 struct lpfc_node_rrq *rrq; 886 struct lpfc_node_rrq *nextrrq; 887 unsigned long next_time; 888 unsigned long iflags; 889 LIST_HEAD(send_rrq); 890 891 spin_lock_irqsave(&phba->hbalock, iflags); 892 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 893 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 894 list_for_each_entry_safe(rrq, nextrrq, 895 &phba->active_rrq_list, list) { 896 if (time_after(jiffies, rrq->rrq_stop_time)) 897 list_move(&rrq->list, &send_rrq); 898 else if (time_before(rrq->rrq_stop_time, next_time)) 899 next_time = rrq->rrq_stop_time; 900 } 901 spin_unlock_irqrestore(&phba->hbalock, iflags); 902 if ((!list_empty(&phba->active_rrq_list)) && 903 (!(phba->pport->load_flag & FC_UNLOADING))) 904 mod_timer(&phba->rrq_tmr, next_time); 905 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 906 list_del(&rrq->list); 907 if (!rrq->send_rrq) { 908 /* this call will free the rrq */ 909 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 910 } else if (lpfc_send_rrq(phba, rrq)) { 911 /* if we send the rrq then the completion handler 912 * will clear the bit in the xribitmap. 913 */ 914 lpfc_clr_rrq_active(phba, rrq->xritag, 915 rrq); 916 } 917 } 918 } 919 920 /** 921 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 922 * @vport: Pointer to vport context object. 923 * @xri: The xri used in the exchange. 924 * @did: The targets DID for this exchange. 925 * 926 * returns NULL = rrq not found in the phba->active_rrq_list. 927 * rrq = rrq for this xri and target. 928 **/ 929 struct lpfc_node_rrq * 930 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 931 { 932 struct lpfc_hba *phba = vport->phba; 933 struct lpfc_node_rrq *rrq; 934 struct lpfc_node_rrq *nextrrq; 935 unsigned long iflags; 936 937 if (phba->sli_rev != LPFC_SLI_REV4) 938 return NULL; 939 spin_lock_irqsave(&phba->hbalock, iflags); 940 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 941 if (rrq->vport == vport && rrq->xritag == xri && 942 rrq->nlp_DID == did){ 943 list_del(&rrq->list); 944 spin_unlock_irqrestore(&phba->hbalock, iflags); 945 return rrq; 946 } 947 } 948 spin_unlock_irqrestore(&phba->hbalock, iflags); 949 return NULL; 950 } 951 952 /** 953 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 954 * @vport: Pointer to vport context object. 955 * @ndlp: Pointer to the lpfc_node_list structure. 956 * If ndlp is NULL Remove all active RRQs for this vport from the 957 * phba->active_rrq_list and clear the rrq. 958 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 959 **/ 960 void 961 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 962 963 { 964 struct lpfc_hba *phba = vport->phba; 965 struct lpfc_node_rrq *rrq; 966 struct lpfc_node_rrq *nextrrq; 967 unsigned long iflags; 968 LIST_HEAD(rrq_list); 969 970 if (phba->sli_rev != LPFC_SLI_REV4) 971 return; 972 if (!ndlp) { 973 lpfc_sli4_vport_delete_els_xri_aborted(vport); 974 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 975 } 976 spin_lock_irqsave(&phba->hbalock, iflags); 977 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 978 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 979 list_move(&rrq->list, &rrq_list); 980 spin_unlock_irqrestore(&phba->hbalock, iflags); 981 982 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 983 list_del(&rrq->list); 984 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 985 } 986 } 987 988 /** 989 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 990 * @phba: Pointer to HBA context object. 991 * @ndlp: Targets nodelist pointer for this exchange. 992 * @xritag the xri in the bitmap to test. 993 * 994 * This function is called with hbalock held. This function 995 * returns 0 = rrq not active for this xri 996 * 1 = rrq is valid for this xri. 997 **/ 998 int 999 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1000 uint16_t xritag) 1001 { 1002 lockdep_assert_held(&phba->hbalock); 1003 if (!ndlp) 1004 return 0; 1005 if (!ndlp->active_rrqs_xri_bitmap) 1006 return 0; 1007 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1008 return 1; 1009 else 1010 return 0; 1011 } 1012 1013 /** 1014 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 1015 * @phba: Pointer to HBA context object. 1016 * @ndlp: nodelist pointer for this target. 1017 * @xritag: xri used in this exchange. 1018 * @rxid: Remote Exchange ID. 1019 * @send_rrq: Flag used to determine if we should send rrq els cmd. 1020 * 1021 * This function takes the hbalock. 1022 * The active bit is always set in the active rrq xri_bitmap even 1023 * if there is no slot avaiable for the other rrq information. 1024 * 1025 * returns 0 rrq actived for this xri 1026 * < 0 No memory or invalid ndlp. 1027 **/ 1028 int 1029 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1030 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 1031 { 1032 unsigned long iflags; 1033 struct lpfc_node_rrq *rrq; 1034 int empty; 1035 1036 if (!ndlp) 1037 return -EINVAL; 1038 1039 if (!phba->cfg_enable_rrq) 1040 return -EINVAL; 1041 1042 spin_lock_irqsave(&phba->hbalock, iflags); 1043 if (phba->pport->load_flag & FC_UNLOADING) { 1044 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1045 goto out; 1046 } 1047 1048 /* 1049 * set the active bit even if there is no mem available. 1050 */ 1051 if (NLP_CHK_FREE_REQ(ndlp)) 1052 goto out; 1053 1054 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 1055 goto out; 1056 1057 if (!ndlp->active_rrqs_xri_bitmap) 1058 goto out; 1059 1060 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1061 goto out; 1062 1063 spin_unlock_irqrestore(&phba->hbalock, iflags); 1064 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 1065 if (!rrq) { 1066 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1067 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 1068 " DID:0x%x Send:%d\n", 1069 xritag, rxid, ndlp->nlp_DID, send_rrq); 1070 return -EINVAL; 1071 } 1072 if (phba->cfg_enable_rrq == 1) 1073 rrq->send_rrq = send_rrq; 1074 else 1075 rrq->send_rrq = 0; 1076 rrq->xritag = xritag; 1077 rrq->rrq_stop_time = jiffies + 1078 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 1079 rrq->ndlp = ndlp; 1080 rrq->nlp_DID = ndlp->nlp_DID; 1081 rrq->vport = ndlp->vport; 1082 rrq->rxid = rxid; 1083 spin_lock_irqsave(&phba->hbalock, iflags); 1084 empty = list_empty(&phba->active_rrq_list); 1085 list_add_tail(&rrq->list, &phba->active_rrq_list); 1086 phba->hba_flag |= HBA_RRQ_ACTIVE; 1087 if (empty) 1088 lpfc_worker_wake_up(phba); 1089 spin_unlock_irqrestore(&phba->hbalock, iflags); 1090 return 0; 1091 out: 1092 spin_unlock_irqrestore(&phba->hbalock, iflags); 1093 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1094 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 1095 " DID:0x%x Send:%d\n", 1096 xritag, rxid, ndlp->nlp_DID, send_rrq); 1097 return -EINVAL; 1098 } 1099 1100 /** 1101 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool 1102 * @phba: Pointer to HBA context object. 1103 * @piocb: Pointer to the iocbq. 1104 * 1105 * This function is called with the ring lock held. This function 1106 * gets a new driver sglq object from the sglq list. If the 1107 * list is not empty then it is successful, it returns pointer to the newly 1108 * allocated sglq object else it returns NULL. 1109 **/ 1110 static struct lpfc_sglq * 1111 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1112 { 1113 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; 1114 struct lpfc_sglq *sglq = NULL; 1115 struct lpfc_sglq *start_sglq = NULL; 1116 struct lpfc_io_buf *lpfc_cmd; 1117 struct lpfc_nodelist *ndlp; 1118 int found = 0; 1119 1120 lockdep_assert_held(&phba->hbalock); 1121 1122 if (piocbq->iocb_flag & LPFC_IO_FCP) { 1123 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1; 1124 ndlp = lpfc_cmd->rdata->pnode; 1125 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 1126 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { 1127 ndlp = piocbq->context_un.ndlp; 1128 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) { 1129 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK) 1130 ndlp = NULL; 1131 else 1132 ndlp = piocbq->context_un.ndlp; 1133 } else { 1134 ndlp = piocbq->context1; 1135 } 1136 1137 spin_lock(&phba->sli4_hba.sgl_list_lock); 1138 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); 1139 start_sglq = sglq; 1140 while (!found) { 1141 if (!sglq) 1142 break; 1143 if (ndlp && ndlp->active_rrqs_xri_bitmap && 1144 test_bit(sglq->sli4_lxritag, 1145 ndlp->active_rrqs_xri_bitmap)) { 1146 /* This xri has an rrq outstanding for this DID. 1147 * put it back in the list and get another xri. 1148 */ 1149 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1150 sglq = NULL; 1151 list_remove_head(lpfc_els_sgl_list, sglq, 1152 struct lpfc_sglq, list); 1153 if (sglq == start_sglq) { 1154 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1155 sglq = NULL; 1156 break; 1157 } else 1158 continue; 1159 } 1160 sglq->ndlp = ndlp; 1161 found = 1; 1162 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1163 sglq->state = SGL_ALLOCATED; 1164 } 1165 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1166 return sglq; 1167 } 1168 1169 /** 1170 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool 1171 * @phba: Pointer to HBA context object. 1172 * @piocb: Pointer to the iocbq. 1173 * 1174 * This function is called with the sgl_list lock held. This function 1175 * gets a new driver sglq object from the sglq list. If the 1176 * list is not empty then it is successful, it returns pointer to the newly 1177 * allocated sglq object else it returns NULL. 1178 **/ 1179 struct lpfc_sglq * 1180 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1181 { 1182 struct list_head *lpfc_nvmet_sgl_list; 1183 struct lpfc_sglq *sglq = NULL; 1184 1185 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; 1186 1187 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); 1188 1189 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); 1190 if (!sglq) 1191 return NULL; 1192 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1193 sglq->state = SGL_ALLOCATED; 1194 return sglq; 1195 } 1196 1197 /** 1198 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 1199 * @phba: Pointer to HBA context object. 1200 * 1201 * This function is called with no lock held. This function 1202 * allocates a new driver iocb object from the iocb pool. If the 1203 * allocation is successful, it returns pointer to the newly 1204 * allocated iocb object else it returns NULL. 1205 **/ 1206 struct lpfc_iocbq * 1207 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 1208 { 1209 struct lpfc_iocbq * iocbq = NULL; 1210 unsigned long iflags; 1211 1212 spin_lock_irqsave(&phba->hbalock, iflags); 1213 iocbq = __lpfc_sli_get_iocbq(phba); 1214 spin_unlock_irqrestore(&phba->hbalock, iflags); 1215 return iocbq; 1216 } 1217 1218 /** 1219 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 1220 * @phba: Pointer to HBA context object. 1221 * @iocbq: Pointer to driver iocb object. 1222 * 1223 * This function is called with hbalock held to release driver 1224 * iocb object to the iocb pool. The iotag in the iocb object 1225 * does not change for each use of the iocb object. This function 1226 * clears all other fields of the iocb object when it is freed. 1227 * The sqlq structure that holds the xritag and phys and virtual 1228 * mappings for the scatter gather list is retrieved from the 1229 * active array of sglq. The get of the sglq pointer also clears 1230 * the entry in the array. If the status of the IO indiactes that 1231 * this IO was aborted then the sglq entry it put on the 1232 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1233 * IO has good status or fails for any other reason then the sglq 1234 * entry is added to the free list (lpfc_els_sgl_list). 1235 **/ 1236 static void 1237 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1238 { 1239 struct lpfc_sglq *sglq; 1240 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1241 unsigned long iflag = 0; 1242 struct lpfc_sli_ring *pring; 1243 1244 lockdep_assert_held(&phba->hbalock); 1245 1246 if (iocbq->sli4_xritag == NO_XRI) 1247 sglq = NULL; 1248 else 1249 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1250 1251 1252 if (sglq) { 1253 if (iocbq->iocb_flag & LPFC_IO_NVMET) { 1254 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1255 iflag); 1256 sglq->state = SGL_FREED; 1257 sglq->ndlp = NULL; 1258 list_add_tail(&sglq->list, 1259 &phba->sli4_hba.lpfc_nvmet_sgl_list); 1260 spin_unlock_irqrestore( 1261 &phba->sli4_hba.sgl_list_lock, iflag); 1262 goto out; 1263 } 1264 1265 pring = phba->sli4_hba.els_wq->pring; 1266 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1267 (sglq->state != SGL_XRI_ABORTED)) { 1268 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1269 iflag); 1270 list_add(&sglq->list, 1271 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1272 spin_unlock_irqrestore( 1273 &phba->sli4_hba.sgl_list_lock, iflag); 1274 } else { 1275 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1276 iflag); 1277 sglq->state = SGL_FREED; 1278 sglq->ndlp = NULL; 1279 list_add_tail(&sglq->list, 1280 &phba->sli4_hba.lpfc_els_sgl_list); 1281 spin_unlock_irqrestore( 1282 &phba->sli4_hba.sgl_list_lock, iflag); 1283 1284 /* Check if TXQ queue needs to be serviced */ 1285 if (!list_empty(&pring->txq)) 1286 lpfc_worker_wake_up(phba); 1287 } 1288 } 1289 1290 out: 1291 /* 1292 * Clean all volatile data fields, preserve iotag and node struct. 1293 */ 1294 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1295 iocbq->sli4_lxritag = NO_XRI; 1296 iocbq->sli4_xritag = NO_XRI; 1297 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | 1298 LPFC_IO_NVME_LS); 1299 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1300 } 1301 1302 1303 /** 1304 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1305 * @phba: Pointer to HBA context object. 1306 * @iocbq: Pointer to driver iocb object. 1307 * 1308 * This function is called with hbalock held to release driver 1309 * iocb object to the iocb pool. The iotag in the iocb object 1310 * does not change for each use of the iocb object. This function 1311 * clears all other fields of the iocb object when it is freed. 1312 **/ 1313 static void 1314 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1315 { 1316 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1317 1318 lockdep_assert_held(&phba->hbalock); 1319 1320 /* 1321 * Clean all volatile data fields, preserve iotag and node struct. 1322 */ 1323 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1324 iocbq->sli4_xritag = NO_XRI; 1325 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1326 } 1327 1328 /** 1329 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1330 * @phba: Pointer to HBA context object. 1331 * @iocbq: Pointer to driver iocb object. 1332 * 1333 * This function is called with hbalock held to release driver 1334 * iocb object to the iocb pool. The iotag in the iocb object 1335 * does not change for each use of the iocb object. This function 1336 * clears all other fields of the iocb object when it is freed. 1337 **/ 1338 static void 1339 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1340 { 1341 lockdep_assert_held(&phba->hbalock); 1342 1343 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1344 phba->iocb_cnt--; 1345 } 1346 1347 /** 1348 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1349 * @phba: Pointer to HBA context object. 1350 * @iocbq: Pointer to driver iocb object. 1351 * 1352 * This function is called with no lock held to release the iocb to 1353 * iocb pool. 1354 **/ 1355 void 1356 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1357 { 1358 unsigned long iflags; 1359 1360 /* 1361 * Clean all volatile data fields, preserve iotag and node struct. 1362 */ 1363 spin_lock_irqsave(&phba->hbalock, iflags); 1364 __lpfc_sli_release_iocbq(phba, iocbq); 1365 spin_unlock_irqrestore(&phba->hbalock, iflags); 1366 } 1367 1368 /** 1369 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1370 * @phba: Pointer to HBA context object. 1371 * @iocblist: List of IOCBs. 1372 * @ulpstatus: ULP status in IOCB command field. 1373 * @ulpWord4: ULP word-4 in IOCB command field. 1374 * 1375 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1376 * on the list by invoking the complete callback function associated with the 1377 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1378 * fields. 1379 **/ 1380 void 1381 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1382 uint32_t ulpstatus, uint32_t ulpWord4) 1383 { 1384 struct lpfc_iocbq *piocb; 1385 1386 while (!list_empty(iocblist)) { 1387 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1388 if (!piocb->iocb_cmpl) 1389 lpfc_sli_release_iocbq(phba, piocb); 1390 else { 1391 piocb->iocb.ulpStatus = ulpstatus; 1392 piocb->iocb.un.ulpWord[4] = ulpWord4; 1393 (piocb->iocb_cmpl) (phba, piocb, piocb); 1394 } 1395 } 1396 return; 1397 } 1398 1399 /** 1400 * lpfc_sli_iocb_cmd_type - Get the iocb type 1401 * @iocb_cmnd: iocb command code. 1402 * 1403 * This function is called by ring event handler function to get the iocb type. 1404 * This function translates the iocb command to an iocb command type used to 1405 * decide the final disposition of each completed IOCB. 1406 * The function returns 1407 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1408 * LPFC_SOL_IOCB if it is a solicited iocb completion 1409 * LPFC_ABORT_IOCB if it is an abort iocb 1410 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1411 * 1412 * The caller is not required to hold any lock. 1413 **/ 1414 static lpfc_iocb_type 1415 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1416 { 1417 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1418 1419 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1420 return 0; 1421 1422 switch (iocb_cmnd) { 1423 case CMD_XMIT_SEQUENCE_CR: 1424 case CMD_XMIT_SEQUENCE_CX: 1425 case CMD_XMIT_BCAST_CN: 1426 case CMD_XMIT_BCAST_CX: 1427 case CMD_ELS_REQUEST_CR: 1428 case CMD_ELS_REQUEST_CX: 1429 case CMD_CREATE_XRI_CR: 1430 case CMD_CREATE_XRI_CX: 1431 case CMD_GET_RPI_CN: 1432 case CMD_XMIT_ELS_RSP_CX: 1433 case CMD_GET_RPI_CR: 1434 case CMD_FCP_IWRITE_CR: 1435 case CMD_FCP_IWRITE_CX: 1436 case CMD_FCP_IREAD_CR: 1437 case CMD_FCP_IREAD_CX: 1438 case CMD_FCP_ICMND_CR: 1439 case CMD_FCP_ICMND_CX: 1440 case CMD_FCP_TSEND_CX: 1441 case CMD_FCP_TRSP_CX: 1442 case CMD_FCP_TRECEIVE_CX: 1443 case CMD_FCP_AUTO_TRSP_CX: 1444 case CMD_ADAPTER_MSG: 1445 case CMD_ADAPTER_DUMP: 1446 case CMD_XMIT_SEQUENCE64_CR: 1447 case CMD_XMIT_SEQUENCE64_CX: 1448 case CMD_XMIT_BCAST64_CN: 1449 case CMD_XMIT_BCAST64_CX: 1450 case CMD_ELS_REQUEST64_CR: 1451 case CMD_ELS_REQUEST64_CX: 1452 case CMD_FCP_IWRITE64_CR: 1453 case CMD_FCP_IWRITE64_CX: 1454 case CMD_FCP_IREAD64_CR: 1455 case CMD_FCP_IREAD64_CX: 1456 case CMD_FCP_ICMND64_CR: 1457 case CMD_FCP_ICMND64_CX: 1458 case CMD_FCP_TSEND64_CX: 1459 case CMD_FCP_TRSP64_CX: 1460 case CMD_FCP_TRECEIVE64_CX: 1461 case CMD_GEN_REQUEST64_CR: 1462 case CMD_GEN_REQUEST64_CX: 1463 case CMD_XMIT_ELS_RSP64_CX: 1464 case DSSCMD_IWRITE64_CR: 1465 case DSSCMD_IWRITE64_CX: 1466 case DSSCMD_IREAD64_CR: 1467 case DSSCMD_IREAD64_CX: 1468 type = LPFC_SOL_IOCB; 1469 break; 1470 case CMD_ABORT_XRI_CN: 1471 case CMD_ABORT_XRI_CX: 1472 case CMD_CLOSE_XRI_CN: 1473 case CMD_CLOSE_XRI_CX: 1474 case CMD_XRI_ABORTED_CX: 1475 case CMD_ABORT_MXRI64_CN: 1476 case CMD_XMIT_BLS_RSP64_CX: 1477 type = LPFC_ABORT_IOCB; 1478 break; 1479 case CMD_RCV_SEQUENCE_CX: 1480 case CMD_RCV_ELS_REQ_CX: 1481 case CMD_RCV_SEQUENCE64_CX: 1482 case CMD_RCV_ELS_REQ64_CX: 1483 case CMD_ASYNC_STATUS: 1484 case CMD_IOCB_RCV_SEQ64_CX: 1485 case CMD_IOCB_RCV_ELS64_CX: 1486 case CMD_IOCB_RCV_CONT64_CX: 1487 case CMD_IOCB_RET_XRI64_CX: 1488 type = LPFC_UNSOL_IOCB; 1489 break; 1490 case CMD_IOCB_XMIT_MSEQ64_CR: 1491 case CMD_IOCB_XMIT_MSEQ64_CX: 1492 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1493 case CMD_IOCB_RCV_ELS_LIST64_CX: 1494 case CMD_IOCB_CLOSE_EXTENDED_CN: 1495 case CMD_IOCB_ABORT_EXTENDED_CN: 1496 case CMD_IOCB_RET_HBQE64_CN: 1497 case CMD_IOCB_FCP_IBIDIR64_CR: 1498 case CMD_IOCB_FCP_IBIDIR64_CX: 1499 case CMD_IOCB_FCP_ITASKMGT64_CX: 1500 case CMD_IOCB_LOGENTRY_CN: 1501 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1502 printk("%s - Unhandled SLI-3 Command x%x\n", 1503 __func__, iocb_cmnd); 1504 type = LPFC_UNKNOWN_IOCB; 1505 break; 1506 default: 1507 type = LPFC_UNKNOWN_IOCB; 1508 break; 1509 } 1510 1511 return type; 1512 } 1513 1514 /** 1515 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1516 * @phba: Pointer to HBA context object. 1517 * 1518 * This function is called from SLI initialization code 1519 * to configure every ring of the HBA's SLI interface. The 1520 * caller is not required to hold any lock. This function issues 1521 * a config_ring mailbox command for each ring. 1522 * This function returns zero if successful else returns a negative 1523 * error code. 1524 **/ 1525 static int 1526 lpfc_sli_ring_map(struct lpfc_hba *phba) 1527 { 1528 struct lpfc_sli *psli = &phba->sli; 1529 LPFC_MBOXQ_t *pmb; 1530 MAILBOX_t *pmbox; 1531 int i, rc, ret = 0; 1532 1533 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1534 if (!pmb) 1535 return -ENOMEM; 1536 pmbox = &pmb->u.mb; 1537 phba->link_state = LPFC_INIT_MBX_CMDS; 1538 for (i = 0; i < psli->num_rings; i++) { 1539 lpfc_config_ring(phba, i, pmb); 1540 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1541 if (rc != MBX_SUCCESS) { 1542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1543 "0446 Adapter failed to init (%d), " 1544 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1545 "ring %d\n", 1546 rc, pmbox->mbxCommand, 1547 pmbox->mbxStatus, i); 1548 phba->link_state = LPFC_HBA_ERROR; 1549 ret = -ENXIO; 1550 break; 1551 } 1552 } 1553 mempool_free(pmb, phba->mbox_mem_pool); 1554 return ret; 1555 } 1556 1557 /** 1558 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1559 * @phba: Pointer to HBA context object. 1560 * @pring: Pointer to driver SLI ring object. 1561 * @piocb: Pointer to the driver iocb object. 1562 * 1563 * This function is called with hbalock held. The function adds the 1564 * new iocb to txcmplq of the given ring. This function always returns 1565 * 0. If this function is called for ELS ring, this function checks if 1566 * there is a vport associated with the ELS command. This function also 1567 * starts els_tmofunc timer if this is an ELS command. 1568 **/ 1569 static int 1570 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1571 struct lpfc_iocbq *piocb) 1572 { 1573 lockdep_assert_held(&phba->hbalock); 1574 1575 BUG_ON(!piocb); 1576 1577 list_add_tail(&piocb->list, &pring->txcmplq); 1578 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1579 pring->txcmplq_cnt++; 1580 1581 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1582 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1583 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1584 BUG_ON(!piocb->vport); 1585 if (!(piocb->vport->load_flag & FC_UNLOADING)) 1586 mod_timer(&piocb->vport->els_tmofunc, 1587 jiffies + 1588 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1589 } 1590 1591 return 0; 1592 } 1593 1594 /** 1595 * lpfc_sli_ringtx_get - Get first element of the txq 1596 * @phba: Pointer to HBA context object. 1597 * @pring: Pointer to driver SLI ring object. 1598 * 1599 * This function is called with hbalock held to get next 1600 * iocb in txq of the given ring. If there is any iocb in 1601 * the txq, the function returns first iocb in the list after 1602 * removing the iocb from the list, else it returns NULL. 1603 **/ 1604 struct lpfc_iocbq * 1605 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1606 { 1607 struct lpfc_iocbq *cmd_iocb; 1608 1609 lockdep_assert_held(&phba->hbalock); 1610 1611 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1612 return cmd_iocb; 1613 } 1614 1615 /** 1616 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1617 * @phba: Pointer to HBA context object. 1618 * @pring: Pointer to driver SLI ring object. 1619 * 1620 * This function is called with hbalock held and the caller must post the 1621 * iocb without releasing the lock. If the caller releases the lock, 1622 * iocb slot returned by the function is not guaranteed to be available. 1623 * The function returns pointer to the next available iocb slot if there 1624 * is available slot in the ring, else it returns NULL. 1625 * If the get index of the ring is ahead of the put index, the function 1626 * will post an error attention event to the worker thread to take the 1627 * HBA to offline state. 1628 **/ 1629 static IOCB_t * 1630 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1631 { 1632 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1633 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 1634 1635 lockdep_assert_held(&phba->hbalock); 1636 1637 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 1638 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 1639 pring->sli.sli3.next_cmdidx = 0; 1640 1641 if (unlikely(pring->sli.sli3.local_getidx == 1642 pring->sli.sli3.next_cmdidx)) { 1643 1644 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1645 1646 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1647 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1648 "0315 Ring %d issue: portCmdGet %d " 1649 "is bigger than cmd ring %d\n", 1650 pring->ringno, 1651 pring->sli.sli3.local_getidx, 1652 max_cmd_idx); 1653 1654 phba->link_state = LPFC_HBA_ERROR; 1655 /* 1656 * All error attention handlers are posted to 1657 * worker thread 1658 */ 1659 phba->work_ha |= HA_ERATT; 1660 phba->work_hs = HS_FFER3; 1661 1662 lpfc_worker_wake_up(phba); 1663 1664 return NULL; 1665 } 1666 1667 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 1668 return NULL; 1669 } 1670 1671 return lpfc_cmd_iocb(phba, pring); 1672 } 1673 1674 /** 1675 * lpfc_sli_next_iotag - Get an iotag for the iocb 1676 * @phba: Pointer to HBA context object. 1677 * @iocbq: Pointer to driver iocb object. 1678 * 1679 * This function gets an iotag for the iocb. If there is no unused iotag and 1680 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1681 * array and assigns a new iotag. 1682 * The function returns the allocated iotag if successful, else returns zero. 1683 * Zero is not a valid iotag. 1684 * The caller is not required to hold any lock. 1685 **/ 1686 uint16_t 1687 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1688 { 1689 struct lpfc_iocbq **new_arr; 1690 struct lpfc_iocbq **old_arr; 1691 size_t new_len; 1692 struct lpfc_sli *psli = &phba->sli; 1693 uint16_t iotag; 1694 1695 spin_lock_irq(&phba->hbalock); 1696 iotag = psli->last_iotag; 1697 if(++iotag < psli->iocbq_lookup_len) { 1698 psli->last_iotag = iotag; 1699 psli->iocbq_lookup[iotag] = iocbq; 1700 spin_unlock_irq(&phba->hbalock); 1701 iocbq->iotag = iotag; 1702 return iotag; 1703 } else if (psli->iocbq_lookup_len < (0xffff 1704 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1705 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1706 spin_unlock_irq(&phba->hbalock); 1707 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *), 1708 GFP_KERNEL); 1709 if (new_arr) { 1710 spin_lock_irq(&phba->hbalock); 1711 old_arr = psli->iocbq_lookup; 1712 if (new_len <= psli->iocbq_lookup_len) { 1713 /* highly unprobable case */ 1714 kfree(new_arr); 1715 iotag = psli->last_iotag; 1716 if(++iotag < psli->iocbq_lookup_len) { 1717 psli->last_iotag = iotag; 1718 psli->iocbq_lookup[iotag] = iocbq; 1719 spin_unlock_irq(&phba->hbalock); 1720 iocbq->iotag = iotag; 1721 return iotag; 1722 } 1723 spin_unlock_irq(&phba->hbalock); 1724 return 0; 1725 } 1726 if (psli->iocbq_lookup) 1727 memcpy(new_arr, old_arr, 1728 ((psli->last_iotag + 1) * 1729 sizeof (struct lpfc_iocbq *))); 1730 psli->iocbq_lookup = new_arr; 1731 psli->iocbq_lookup_len = new_len; 1732 psli->last_iotag = iotag; 1733 psli->iocbq_lookup[iotag] = iocbq; 1734 spin_unlock_irq(&phba->hbalock); 1735 iocbq->iotag = iotag; 1736 kfree(old_arr); 1737 return iotag; 1738 } 1739 } else 1740 spin_unlock_irq(&phba->hbalock); 1741 1742 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1743 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1744 psli->last_iotag); 1745 1746 return 0; 1747 } 1748 1749 /** 1750 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1751 * @phba: Pointer to HBA context object. 1752 * @pring: Pointer to driver SLI ring object. 1753 * @iocb: Pointer to iocb slot in the ring. 1754 * @nextiocb: Pointer to driver iocb object which need to be 1755 * posted to firmware. 1756 * 1757 * This function is called with hbalock held to post a new iocb to 1758 * the firmware. This function copies the new iocb to ring iocb slot and 1759 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1760 * a completion call back for this iocb else the function will free the 1761 * iocb object. 1762 **/ 1763 static void 1764 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1765 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1766 { 1767 lockdep_assert_held(&phba->hbalock); 1768 /* 1769 * Set up an iotag 1770 */ 1771 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1772 1773 1774 if (pring->ringno == LPFC_ELS_RING) { 1775 lpfc_debugfs_slow_ring_trc(phba, 1776 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1777 *(((uint32_t *) &nextiocb->iocb) + 4), 1778 *(((uint32_t *) &nextiocb->iocb) + 6), 1779 *(((uint32_t *) &nextiocb->iocb) + 7)); 1780 } 1781 1782 /* 1783 * Issue iocb command to adapter 1784 */ 1785 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1786 wmb(); 1787 pring->stats.iocb_cmd++; 1788 1789 /* 1790 * If there is no completion routine to call, we can release the 1791 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1792 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1793 */ 1794 if (nextiocb->iocb_cmpl) 1795 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1796 else 1797 __lpfc_sli_release_iocbq(phba, nextiocb); 1798 1799 /* 1800 * Let the HBA know what IOCB slot will be the next one the 1801 * driver will put a command into. 1802 */ 1803 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 1804 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1805 } 1806 1807 /** 1808 * lpfc_sli_update_full_ring - Update the chip attention register 1809 * @phba: Pointer to HBA context object. 1810 * @pring: Pointer to driver SLI ring object. 1811 * 1812 * The caller is not required to hold any lock for calling this function. 1813 * This function updates the chip attention bits for the ring to inform firmware 1814 * that there are pending work to be done for this ring and requests an 1815 * interrupt when there is space available in the ring. This function is 1816 * called when the driver is unable to post more iocbs to the ring due 1817 * to unavailability of space in the ring. 1818 **/ 1819 static void 1820 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1821 { 1822 int ringno = pring->ringno; 1823 1824 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1825 1826 wmb(); 1827 1828 /* 1829 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1830 * The HBA will tell us when an IOCB entry is available. 1831 */ 1832 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1833 readl(phba->CAregaddr); /* flush */ 1834 1835 pring->stats.iocb_cmd_full++; 1836 } 1837 1838 /** 1839 * lpfc_sli_update_ring - Update chip attention register 1840 * @phba: Pointer to HBA context object. 1841 * @pring: Pointer to driver SLI ring object. 1842 * 1843 * This function updates the chip attention register bit for the 1844 * given ring to inform HBA that there is more work to be done 1845 * in this ring. The caller is not required to hold any lock. 1846 **/ 1847 static void 1848 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1849 { 1850 int ringno = pring->ringno; 1851 1852 /* 1853 * Tell the HBA that there is work to do in this ring. 1854 */ 1855 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1856 wmb(); 1857 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1858 readl(phba->CAregaddr); /* flush */ 1859 } 1860 } 1861 1862 /** 1863 * lpfc_sli_resume_iocb - Process iocbs in the txq 1864 * @phba: Pointer to HBA context object. 1865 * @pring: Pointer to driver SLI ring object. 1866 * 1867 * This function is called with hbalock held to post pending iocbs 1868 * in the txq to the firmware. This function is called when driver 1869 * detects space available in the ring. 1870 **/ 1871 static void 1872 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1873 { 1874 IOCB_t *iocb; 1875 struct lpfc_iocbq *nextiocb; 1876 1877 lockdep_assert_held(&phba->hbalock); 1878 1879 /* 1880 * Check to see if: 1881 * (a) there is anything on the txq to send 1882 * (b) link is up 1883 * (c) link attention events can be processed (fcp ring only) 1884 * (d) IOCB processing is not blocked by the outstanding mbox command. 1885 */ 1886 1887 if (lpfc_is_link_up(phba) && 1888 (!list_empty(&pring->txq)) && 1889 (pring->ringno != LPFC_FCP_RING || 1890 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1891 1892 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1893 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1894 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1895 1896 if (iocb) 1897 lpfc_sli_update_ring(phba, pring); 1898 else 1899 lpfc_sli_update_full_ring(phba, pring); 1900 } 1901 1902 return; 1903 } 1904 1905 /** 1906 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1907 * @phba: Pointer to HBA context object. 1908 * @hbqno: HBQ number. 1909 * 1910 * This function is called with hbalock held to get the next 1911 * available slot for the given HBQ. If there is free slot 1912 * available for the HBQ it will return pointer to the next available 1913 * HBQ entry else it will return NULL. 1914 **/ 1915 static struct lpfc_hbq_entry * 1916 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1917 { 1918 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1919 1920 lockdep_assert_held(&phba->hbalock); 1921 1922 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1923 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1924 hbqp->next_hbqPutIdx = 0; 1925 1926 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1927 uint32_t raw_index = phba->hbq_get[hbqno]; 1928 uint32_t getidx = le32_to_cpu(raw_index); 1929 1930 hbqp->local_hbqGetIdx = getidx; 1931 1932 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1933 lpfc_printf_log(phba, KERN_ERR, 1934 LOG_SLI | LOG_VPORT, 1935 "1802 HBQ %d: local_hbqGetIdx " 1936 "%u is > than hbqp->entry_count %u\n", 1937 hbqno, hbqp->local_hbqGetIdx, 1938 hbqp->entry_count); 1939 1940 phba->link_state = LPFC_HBA_ERROR; 1941 return NULL; 1942 } 1943 1944 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1945 return NULL; 1946 } 1947 1948 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1949 hbqp->hbqPutIdx; 1950 } 1951 1952 /** 1953 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1954 * @phba: Pointer to HBA context object. 1955 * 1956 * This function is called with no lock held to free all the 1957 * hbq buffers while uninitializing the SLI interface. It also 1958 * frees the HBQ buffers returned by the firmware but not yet 1959 * processed by the upper layers. 1960 **/ 1961 void 1962 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1963 { 1964 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1965 struct hbq_dmabuf *hbq_buf; 1966 unsigned long flags; 1967 int i, hbq_count; 1968 1969 hbq_count = lpfc_sli_hbq_count(); 1970 /* Return all memory used by all HBQs */ 1971 spin_lock_irqsave(&phba->hbalock, flags); 1972 for (i = 0; i < hbq_count; ++i) { 1973 list_for_each_entry_safe(dmabuf, next_dmabuf, 1974 &phba->hbqs[i].hbq_buffer_list, list) { 1975 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1976 list_del(&hbq_buf->dbuf.list); 1977 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1978 } 1979 phba->hbqs[i].buffer_count = 0; 1980 } 1981 1982 /* Mark the HBQs not in use */ 1983 phba->hbq_in_use = 0; 1984 spin_unlock_irqrestore(&phba->hbalock, flags); 1985 } 1986 1987 /** 1988 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1989 * @phba: Pointer to HBA context object. 1990 * @hbqno: HBQ number. 1991 * @hbq_buf: Pointer to HBQ buffer. 1992 * 1993 * This function is called with the hbalock held to post a 1994 * hbq buffer to the firmware. If the function finds an empty 1995 * slot in the HBQ, it will post the buffer. The function will return 1996 * pointer to the hbq entry if it successfully post the buffer 1997 * else it will return NULL. 1998 **/ 1999 static int 2000 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 2001 struct hbq_dmabuf *hbq_buf) 2002 { 2003 lockdep_assert_held(&phba->hbalock); 2004 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 2005 } 2006 2007 /** 2008 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 2009 * @phba: Pointer to HBA context object. 2010 * @hbqno: HBQ number. 2011 * @hbq_buf: Pointer to HBQ buffer. 2012 * 2013 * This function is called with the hbalock held to post a hbq buffer to the 2014 * firmware. If the function finds an empty slot in the HBQ, it will post the 2015 * buffer and place it on the hbq_buffer_list. The function will return zero if 2016 * it successfully post the buffer else it will return an error. 2017 **/ 2018 static int 2019 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 2020 struct hbq_dmabuf *hbq_buf) 2021 { 2022 struct lpfc_hbq_entry *hbqe; 2023 dma_addr_t physaddr = hbq_buf->dbuf.phys; 2024 2025 lockdep_assert_held(&phba->hbalock); 2026 /* Get next HBQ entry slot to use */ 2027 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 2028 if (hbqe) { 2029 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 2030 2031 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 2032 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 2033 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; 2034 hbqe->bde.tus.f.bdeFlags = 0; 2035 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 2036 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 2037 /* Sync SLIM */ 2038 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 2039 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 2040 /* flush */ 2041 readl(phba->hbq_put + hbqno); 2042 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 2043 return 0; 2044 } else 2045 return -ENOMEM; 2046 } 2047 2048 /** 2049 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 2050 * @phba: Pointer to HBA context object. 2051 * @hbqno: HBQ number. 2052 * @hbq_buf: Pointer to HBQ buffer. 2053 * 2054 * This function is called with the hbalock held to post an RQE to the SLI4 2055 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 2056 * the hbq_buffer_list and return zero, otherwise it will return an error. 2057 **/ 2058 static int 2059 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 2060 struct hbq_dmabuf *hbq_buf) 2061 { 2062 int rc; 2063 struct lpfc_rqe hrqe; 2064 struct lpfc_rqe drqe; 2065 struct lpfc_queue *hrq; 2066 struct lpfc_queue *drq; 2067 2068 if (hbqno != LPFC_ELS_HBQ) 2069 return 1; 2070 hrq = phba->sli4_hba.hdr_rq; 2071 drq = phba->sli4_hba.dat_rq; 2072 2073 lockdep_assert_held(&phba->hbalock); 2074 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 2075 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 2076 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 2077 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 2078 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 2079 if (rc < 0) 2080 return rc; 2081 hbq_buf->tag = (rc | (hbqno << 16)); 2082 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 2083 return 0; 2084 } 2085 2086 /* HBQ for ELS and CT traffic. */ 2087 static struct lpfc_hbq_init lpfc_els_hbq = { 2088 .rn = 1, 2089 .entry_count = 256, 2090 .mask_count = 0, 2091 .profile = 0, 2092 .ring_mask = (1 << LPFC_ELS_RING), 2093 .buffer_count = 0, 2094 .init_count = 40, 2095 .add_count = 40, 2096 }; 2097 2098 /* Array of HBQs */ 2099 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 2100 &lpfc_els_hbq, 2101 }; 2102 2103 /** 2104 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 2105 * @phba: Pointer to HBA context object. 2106 * @hbqno: HBQ number. 2107 * @count: Number of HBQ buffers to be posted. 2108 * 2109 * This function is called with no lock held to post more hbq buffers to the 2110 * given HBQ. The function returns the number of HBQ buffers successfully 2111 * posted. 2112 **/ 2113 static int 2114 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 2115 { 2116 uint32_t i, posted = 0; 2117 unsigned long flags; 2118 struct hbq_dmabuf *hbq_buffer; 2119 LIST_HEAD(hbq_buf_list); 2120 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 2121 return 0; 2122 2123 if ((phba->hbqs[hbqno].buffer_count + count) > 2124 lpfc_hbq_defs[hbqno]->entry_count) 2125 count = lpfc_hbq_defs[hbqno]->entry_count - 2126 phba->hbqs[hbqno].buffer_count; 2127 if (!count) 2128 return 0; 2129 /* Allocate HBQ entries */ 2130 for (i = 0; i < count; i++) { 2131 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 2132 if (!hbq_buffer) 2133 break; 2134 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 2135 } 2136 /* Check whether HBQ is still in use */ 2137 spin_lock_irqsave(&phba->hbalock, flags); 2138 if (!phba->hbq_in_use) 2139 goto err; 2140 while (!list_empty(&hbq_buf_list)) { 2141 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2142 dbuf.list); 2143 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 2144 (hbqno << 16)); 2145 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 2146 phba->hbqs[hbqno].buffer_count++; 2147 posted++; 2148 } else 2149 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2150 } 2151 spin_unlock_irqrestore(&phba->hbalock, flags); 2152 return posted; 2153 err: 2154 spin_unlock_irqrestore(&phba->hbalock, flags); 2155 while (!list_empty(&hbq_buf_list)) { 2156 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2157 dbuf.list); 2158 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2159 } 2160 return 0; 2161 } 2162 2163 /** 2164 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 2165 * @phba: Pointer to HBA context object. 2166 * @qno: HBQ number. 2167 * 2168 * This function posts more buffers to the HBQ. This function 2169 * is called with no lock held. The function returns the number of HBQ entries 2170 * successfully allocated. 2171 **/ 2172 int 2173 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 2174 { 2175 if (phba->sli_rev == LPFC_SLI_REV4) 2176 return 0; 2177 else 2178 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2179 lpfc_hbq_defs[qno]->add_count); 2180 } 2181 2182 /** 2183 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 2184 * @phba: Pointer to HBA context object. 2185 * @qno: HBQ queue number. 2186 * 2187 * This function is called from SLI initialization code path with 2188 * no lock held to post initial HBQ buffers to firmware. The 2189 * function returns the number of HBQ entries successfully allocated. 2190 **/ 2191 static int 2192 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 2193 { 2194 if (phba->sli_rev == LPFC_SLI_REV4) 2195 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2196 lpfc_hbq_defs[qno]->entry_count); 2197 else 2198 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2199 lpfc_hbq_defs[qno]->init_count); 2200 } 2201 2202 /** 2203 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 2204 * @phba: Pointer to HBA context object. 2205 * @hbqno: HBQ number. 2206 * 2207 * This function removes the first hbq buffer on an hbq list and returns a 2208 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2209 **/ 2210 static struct hbq_dmabuf * 2211 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 2212 { 2213 struct lpfc_dmabuf *d_buf; 2214 2215 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 2216 if (!d_buf) 2217 return NULL; 2218 return container_of(d_buf, struct hbq_dmabuf, dbuf); 2219 } 2220 2221 /** 2222 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list 2223 * @phba: Pointer to HBA context object. 2224 * @hbqno: HBQ number. 2225 * 2226 * This function removes the first RQ buffer on an RQ buffer list and returns a 2227 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2228 **/ 2229 static struct rqb_dmabuf * 2230 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) 2231 { 2232 struct lpfc_dmabuf *h_buf; 2233 struct lpfc_rqb *rqbp; 2234 2235 rqbp = hrq->rqbp; 2236 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 2237 struct lpfc_dmabuf, list); 2238 if (!h_buf) 2239 return NULL; 2240 rqbp->buffer_count--; 2241 return container_of(h_buf, struct rqb_dmabuf, hbuf); 2242 } 2243 2244 /** 2245 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 2246 * @phba: Pointer to HBA context object. 2247 * @tag: Tag of the hbq buffer. 2248 * 2249 * This function searches for the hbq buffer associated with the given tag in 2250 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer 2251 * otherwise it returns NULL. 2252 **/ 2253 static struct hbq_dmabuf * 2254 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 2255 { 2256 struct lpfc_dmabuf *d_buf; 2257 struct hbq_dmabuf *hbq_buf; 2258 uint32_t hbqno; 2259 2260 hbqno = tag >> 16; 2261 if (hbqno >= LPFC_MAX_HBQS) 2262 return NULL; 2263 2264 spin_lock_irq(&phba->hbalock); 2265 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2266 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2267 if (hbq_buf->tag == tag) { 2268 spin_unlock_irq(&phba->hbalock); 2269 return hbq_buf; 2270 } 2271 } 2272 spin_unlock_irq(&phba->hbalock); 2273 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2274 "1803 Bad hbq tag. Data: x%x x%x\n", 2275 tag, phba->hbqs[tag >> 16].buffer_count); 2276 return NULL; 2277 } 2278 2279 /** 2280 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2281 * @phba: Pointer to HBA context object. 2282 * @hbq_buffer: Pointer to HBQ buffer. 2283 * 2284 * This function is called with hbalock. This function gives back 2285 * the hbq buffer to firmware. If the HBQ does not have space to 2286 * post the buffer, it will free the buffer. 2287 **/ 2288 void 2289 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2290 { 2291 uint32_t hbqno; 2292 2293 if (hbq_buffer) { 2294 hbqno = hbq_buffer->tag >> 16; 2295 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2296 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2297 } 2298 } 2299 2300 /** 2301 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2302 * @mbxCommand: mailbox command code. 2303 * 2304 * This function is called by the mailbox event handler function to verify 2305 * that the completed mailbox command is a legitimate mailbox command. If the 2306 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2307 * and the mailbox event handler will take the HBA offline. 2308 **/ 2309 static int 2310 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2311 { 2312 uint8_t ret; 2313 2314 switch (mbxCommand) { 2315 case MBX_LOAD_SM: 2316 case MBX_READ_NV: 2317 case MBX_WRITE_NV: 2318 case MBX_WRITE_VPARMS: 2319 case MBX_RUN_BIU_DIAG: 2320 case MBX_INIT_LINK: 2321 case MBX_DOWN_LINK: 2322 case MBX_CONFIG_LINK: 2323 case MBX_CONFIG_RING: 2324 case MBX_RESET_RING: 2325 case MBX_READ_CONFIG: 2326 case MBX_READ_RCONFIG: 2327 case MBX_READ_SPARM: 2328 case MBX_READ_STATUS: 2329 case MBX_READ_RPI: 2330 case MBX_READ_XRI: 2331 case MBX_READ_REV: 2332 case MBX_READ_LNK_STAT: 2333 case MBX_REG_LOGIN: 2334 case MBX_UNREG_LOGIN: 2335 case MBX_CLEAR_LA: 2336 case MBX_DUMP_MEMORY: 2337 case MBX_DUMP_CONTEXT: 2338 case MBX_RUN_DIAGS: 2339 case MBX_RESTART: 2340 case MBX_UPDATE_CFG: 2341 case MBX_DOWN_LOAD: 2342 case MBX_DEL_LD_ENTRY: 2343 case MBX_RUN_PROGRAM: 2344 case MBX_SET_MASK: 2345 case MBX_SET_VARIABLE: 2346 case MBX_UNREG_D_ID: 2347 case MBX_KILL_BOARD: 2348 case MBX_CONFIG_FARP: 2349 case MBX_BEACON: 2350 case MBX_LOAD_AREA: 2351 case MBX_RUN_BIU_DIAG64: 2352 case MBX_CONFIG_PORT: 2353 case MBX_READ_SPARM64: 2354 case MBX_READ_RPI64: 2355 case MBX_REG_LOGIN64: 2356 case MBX_READ_TOPOLOGY: 2357 case MBX_WRITE_WWN: 2358 case MBX_SET_DEBUG: 2359 case MBX_LOAD_EXP_ROM: 2360 case MBX_ASYNCEVT_ENABLE: 2361 case MBX_REG_VPI: 2362 case MBX_UNREG_VPI: 2363 case MBX_HEARTBEAT: 2364 case MBX_PORT_CAPABILITIES: 2365 case MBX_PORT_IOV_CONTROL: 2366 case MBX_SLI4_CONFIG: 2367 case MBX_SLI4_REQ_FTRS: 2368 case MBX_REG_FCFI: 2369 case MBX_UNREG_FCFI: 2370 case MBX_REG_VFI: 2371 case MBX_UNREG_VFI: 2372 case MBX_INIT_VPI: 2373 case MBX_INIT_VFI: 2374 case MBX_RESUME_RPI: 2375 case MBX_READ_EVENT_LOG_STATUS: 2376 case MBX_READ_EVENT_LOG: 2377 case MBX_SECURITY_MGMT: 2378 case MBX_AUTH_PORT: 2379 case MBX_ACCESS_VDATA: 2380 ret = mbxCommand; 2381 break; 2382 default: 2383 ret = MBX_SHUTDOWN; 2384 break; 2385 } 2386 return ret; 2387 } 2388 2389 /** 2390 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2391 * @phba: Pointer to HBA context object. 2392 * @pmboxq: Pointer to mailbox command. 2393 * 2394 * This is completion handler function for mailbox commands issued from 2395 * lpfc_sli_issue_mbox_wait function. This function is called by the 2396 * mailbox event handler function with no lock held. This function 2397 * will wake up thread waiting on the wait queue pointed by context1 2398 * of the mailbox. 2399 **/ 2400 void 2401 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2402 { 2403 unsigned long drvr_flag; 2404 struct completion *pmbox_done; 2405 2406 /* 2407 * If pmbox_done is empty, the driver thread gave up waiting and 2408 * continued running. 2409 */ 2410 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2411 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2412 pmbox_done = (struct completion *)pmboxq->context3; 2413 if (pmbox_done) 2414 complete(pmbox_done); 2415 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2416 return; 2417 } 2418 2419 2420 /** 2421 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2422 * @phba: Pointer to HBA context object. 2423 * @pmb: Pointer to mailbox object. 2424 * 2425 * This function is the default mailbox completion handler. It 2426 * frees the memory resources associated with the completed mailbox 2427 * command. If the completed command is a REG_LOGIN mailbox command, 2428 * this function will issue a UREG_LOGIN to re-claim the RPI. 2429 **/ 2430 void 2431 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2432 { 2433 struct lpfc_vport *vport = pmb->vport; 2434 struct lpfc_dmabuf *mp; 2435 struct lpfc_nodelist *ndlp; 2436 struct Scsi_Host *shost; 2437 uint16_t rpi, vpi; 2438 int rc; 2439 2440 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 2441 2442 if (mp) { 2443 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2444 kfree(mp); 2445 } 2446 2447 /* 2448 * If a REG_LOGIN succeeded after node is destroyed or node 2449 * is in re-discovery driver need to cleanup the RPI. 2450 */ 2451 if (!(phba->pport->load_flag & FC_UNLOADING) && 2452 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2453 !pmb->u.mb.mbxStatus) { 2454 rpi = pmb->u.mb.un.varWords[0]; 2455 vpi = pmb->u.mb.un.varRegLogin.vpi; 2456 lpfc_unreg_login(phba, vpi, rpi, pmb); 2457 pmb->vport = vport; 2458 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2459 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2460 if (rc != MBX_NOT_FINISHED) 2461 return; 2462 } 2463 2464 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2465 !(phba->pport->load_flag & FC_UNLOADING) && 2466 !pmb->u.mb.mbxStatus) { 2467 shost = lpfc_shost_from_vport(vport); 2468 spin_lock_irq(shost->host_lock); 2469 vport->vpi_state |= LPFC_VPI_REGISTERED; 2470 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2471 spin_unlock_irq(shost->host_lock); 2472 } 2473 2474 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2475 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2476 lpfc_nlp_put(ndlp); 2477 pmb->ctx_buf = NULL; 2478 pmb->ctx_ndlp = NULL; 2479 } 2480 2481 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2482 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2483 2484 /* Check to see if there are any deferred events to process */ 2485 if (ndlp) { 2486 lpfc_printf_vlog( 2487 vport, 2488 KERN_INFO, LOG_MBOX | LOG_DISCOVERY, 2489 "1438 UNREG cmpl deferred mbox x%x " 2490 "on NPort x%x Data: x%x x%x %p\n", 2491 ndlp->nlp_rpi, ndlp->nlp_DID, 2492 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp); 2493 2494 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2495 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { 2496 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2497 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 2498 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2499 } else { 2500 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2501 } 2502 pmb->ctx_ndlp = NULL; 2503 } 2504 } 2505 2506 /* Check security permission status on INIT_LINK mailbox command */ 2507 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2508 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2509 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2510 "2860 SLI authentication is required " 2511 "for INIT_LINK but has not done yet\n"); 2512 2513 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2514 lpfc_sli4_mbox_cmd_free(phba, pmb); 2515 else 2516 mempool_free(pmb, phba->mbox_mem_pool); 2517 } 2518 /** 2519 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler 2520 * @phba: Pointer to HBA context object. 2521 * @pmb: Pointer to mailbox object. 2522 * 2523 * This function is the unreg rpi mailbox completion handler. It 2524 * frees the memory resources associated with the completed mailbox 2525 * command. An additional refrenece is put on the ndlp to prevent 2526 * lpfc_nlp_release from freeing the rpi bit in the bitmask before 2527 * the unreg mailbox command completes, this routine puts the 2528 * reference back. 2529 * 2530 **/ 2531 void 2532 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2533 { 2534 struct lpfc_vport *vport = pmb->vport; 2535 struct lpfc_nodelist *ndlp; 2536 2537 ndlp = pmb->ctx_ndlp; 2538 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2539 if (phba->sli_rev == LPFC_SLI_REV4 && 2540 (bf_get(lpfc_sli_intf_if_type, 2541 &phba->sli4_hba.sli_intf) >= 2542 LPFC_SLI_INTF_IF_TYPE_2)) { 2543 if (ndlp) { 2544 lpfc_printf_vlog( 2545 vport, KERN_INFO, LOG_MBOX | LOG_SLI, 2546 "0010 UNREG_LOGIN vpi:%x " 2547 "rpi:%x DID:%x defer x%x flg x%x " 2548 "map:%x %p\n", 2549 vport->vpi, ndlp->nlp_rpi, 2550 ndlp->nlp_DID, ndlp->nlp_defer_did, 2551 ndlp->nlp_flag, 2552 ndlp->nlp_usg_map, ndlp); 2553 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2554 lpfc_nlp_put(ndlp); 2555 2556 /* Check to see if there are any deferred 2557 * events to process 2558 */ 2559 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2560 (ndlp->nlp_defer_did != 2561 NLP_EVT_NOTHING_PENDING)) { 2562 lpfc_printf_vlog( 2563 vport, KERN_INFO, LOG_DISCOVERY, 2564 "4111 UNREG cmpl deferred " 2565 "clr x%x on " 2566 "NPort x%x Data: x%x %p\n", 2567 ndlp->nlp_rpi, ndlp->nlp_DID, 2568 ndlp->nlp_defer_did, ndlp); 2569 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2570 ndlp->nlp_defer_did = 2571 NLP_EVT_NOTHING_PENDING; 2572 lpfc_issue_els_plogi( 2573 vport, ndlp->nlp_DID, 0); 2574 } else { 2575 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2576 } 2577 } 2578 } 2579 } 2580 2581 mempool_free(pmb, phba->mbox_mem_pool); 2582 } 2583 2584 /** 2585 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2586 * @phba: Pointer to HBA context object. 2587 * 2588 * This function is called with no lock held. This function processes all 2589 * the completed mailbox commands and gives it to upper layers. The interrupt 2590 * service routine processes mailbox completion interrupt and adds completed 2591 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2592 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2593 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2594 * function returns the mailbox commands to the upper layer by calling the 2595 * completion handler function of each mailbox. 2596 **/ 2597 int 2598 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2599 { 2600 MAILBOX_t *pmbox; 2601 LPFC_MBOXQ_t *pmb; 2602 int rc; 2603 LIST_HEAD(cmplq); 2604 2605 phba->sli.slistat.mbox_event++; 2606 2607 /* Get all completed mailboxe buffers into the cmplq */ 2608 spin_lock_irq(&phba->hbalock); 2609 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2610 spin_unlock_irq(&phba->hbalock); 2611 2612 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2613 do { 2614 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2615 if (pmb == NULL) 2616 break; 2617 2618 pmbox = &pmb->u.mb; 2619 2620 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2621 if (pmb->vport) { 2622 lpfc_debugfs_disc_trc(pmb->vport, 2623 LPFC_DISC_TRC_MBOX_VPORT, 2624 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2625 (uint32_t)pmbox->mbxCommand, 2626 pmbox->un.varWords[0], 2627 pmbox->un.varWords[1]); 2628 } 2629 else { 2630 lpfc_debugfs_disc_trc(phba->pport, 2631 LPFC_DISC_TRC_MBOX, 2632 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2633 (uint32_t)pmbox->mbxCommand, 2634 pmbox->un.varWords[0], 2635 pmbox->un.varWords[1]); 2636 } 2637 } 2638 2639 /* 2640 * It is a fatal error if unknown mbox command completion. 2641 */ 2642 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2643 MBX_SHUTDOWN) { 2644 /* Unknown mailbox command compl */ 2645 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2646 "(%d):0323 Unknown Mailbox command " 2647 "x%x (x%x/x%x) Cmpl\n", 2648 pmb->vport ? pmb->vport->vpi : 0, 2649 pmbox->mbxCommand, 2650 lpfc_sli_config_mbox_subsys_get(phba, 2651 pmb), 2652 lpfc_sli_config_mbox_opcode_get(phba, 2653 pmb)); 2654 phba->link_state = LPFC_HBA_ERROR; 2655 phba->work_hs = HS_FFER3; 2656 lpfc_handle_eratt(phba); 2657 continue; 2658 } 2659 2660 if (pmbox->mbxStatus) { 2661 phba->sli.slistat.mbox_stat_err++; 2662 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2663 /* Mbox cmd cmpl error - RETRYing */ 2664 lpfc_printf_log(phba, KERN_INFO, 2665 LOG_MBOX | LOG_SLI, 2666 "(%d):0305 Mbox cmd cmpl " 2667 "error - RETRYing Data: x%x " 2668 "(x%x/x%x) x%x x%x x%x\n", 2669 pmb->vport ? pmb->vport->vpi : 0, 2670 pmbox->mbxCommand, 2671 lpfc_sli_config_mbox_subsys_get(phba, 2672 pmb), 2673 lpfc_sli_config_mbox_opcode_get(phba, 2674 pmb), 2675 pmbox->mbxStatus, 2676 pmbox->un.varWords[0], 2677 pmb->vport->port_state); 2678 pmbox->mbxStatus = 0; 2679 pmbox->mbxOwner = OWN_HOST; 2680 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2681 if (rc != MBX_NOT_FINISHED) 2682 continue; 2683 } 2684 } 2685 2686 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2687 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2688 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2689 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2690 "x%x x%x x%x\n", 2691 pmb->vport ? pmb->vport->vpi : 0, 2692 pmbox->mbxCommand, 2693 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2694 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2695 pmb->mbox_cmpl, 2696 *((uint32_t *) pmbox), 2697 pmbox->un.varWords[0], 2698 pmbox->un.varWords[1], 2699 pmbox->un.varWords[2], 2700 pmbox->un.varWords[3], 2701 pmbox->un.varWords[4], 2702 pmbox->un.varWords[5], 2703 pmbox->un.varWords[6], 2704 pmbox->un.varWords[7], 2705 pmbox->un.varWords[8], 2706 pmbox->un.varWords[9], 2707 pmbox->un.varWords[10]); 2708 2709 if (pmb->mbox_cmpl) 2710 pmb->mbox_cmpl(phba,pmb); 2711 } while (1); 2712 return 0; 2713 } 2714 2715 /** 2716 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2717 * @phba: Pointer to HBA context object. 2718 * @pring: Pointer to driver SLI ring object. 2719 * @tag: buffer tag. 2720 * 2721 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2722 * is set in the tag the buffer is posted for a particular exchange, 2723 * the function will return the buffer without replacing the buffer. 2724 * If the buffer is for unsolicited ELS or CT traffic, this function 2725 * returns the buffer and also posts another buffer to the firmware. 2726 **/ 2727 static struct lpfc_dmabuf * 2728 lpfc_sli_get_buff(struct lpfc_hba *phba, 2729 struct lpfc_sli_ring *pring, 2730 uint32_t tag) 2731 { 2732 struct hbq_dmabuf *hbq_entry; 2733 2734 if (tag & QUE_BUFTAG_BIT) 2735 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2736 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2737 if (!hbq_entry) 2738 return NULL; 2739 return &hbq_entry->dbuf; 2740 } 2741 2742 /** 2743 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2744 * @phba: Pointer to HBA context object. 2745 * @pring: Pointer to driver SLI ring object. 2746 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2747 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2748 * @fch_type: the type for the first frame of the sequence. 2749 * 2750 * This function is called with no lock held. This function uses the r_ctl and 2751 * type of the received sequence to find the correct callback function to call 2752 * to process the sequence. 2753 **/ 2754 static int 2755 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2756 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2757 uint32_t fch_type) 2758 { 2759 int i; 2760 2761 switch (fch_type) { 2762 case FC_TYPE_NVME: 2763 lpfc_nvmet_unsol_ls_event(phba, pring, saveq); 2764 return 1; 2765 default: 2766 break; 2767 } 2768 2769 /* unSolicited Responses */ 2770 if (pring->prt[0].profile) { 2771 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2772 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2773 saveq); 2774 return 1; 2775 } 2776 /* We must search, based on rctl / type 2777 for the right routine */ 2778 for (i = 0; i < pring->num_mask; i++) { 2779 if ((pring->prt[i].rctl == fch_r_ctl) && 2780 (pring->prt[i].type == fch_type)) { 2781 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2782 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2783 (phba, pring, saveq); 2784 return 1; 2785 } 2786 } 2787 return 0; 2788 } 2789 2790 /** 2791 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2792 * @phba: Pointer to HBA context object. 2793 * @pring: Pointer to driver SLI ring object. 2794 * @saveq: Pointer to the unsolicited iocb. 2795 * 2796 * This function is called with no lock held by the ring event handler 2797 * when there is an unsolicited iocb posted to the response ring by the 2798 * firmware. This function gets the buffer associated with the iocbs 2799 * and calls the event handler for the ring. This function handles both 2800 * qring buffers and hbq buffers. 2801 * When the function returns 1 the caller can free the iocb object otherwise 2802 * upper layer functions will free the iocb objects. 2803 **/ 2804 static int 2805 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2806 struct lpfc_iocbq *saveq) 2807 { 2808 IOCB_t * irsp; 2809 WORD5 * w5p; 2810 uint32_t Rctl, Type; 2811 struct lpfc_iocbq *iocbq; 2812 struct lpfc_dmabuf *dmzbuf; 2813 2814 irsp = &(saveq->iocb); 2815 2816 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2817 if (pring->lpfc_sli_rcv_async_status) 2818 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2819 else 2820 lpfc_printf_log(phba, 2821 KERN_WARNING, 2822 LOG_SLI, 2823 "0316 Ring %d handler: unexpected " 2824 "ASYNC_STATUS iocb received evt_code " 2825 "0x%x\n", 2826 pring->ringno, 2827 irsp->un.asyncstat.evt_code); 2828 return 1; 2829 } 2830 2831 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2832 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2833 if (irsp->ulpBdeCount > 0) { 2834 dmzbuf = lpfc_sli_get_buff(phba, pring, 2835 irsp->un.ulpWord[3]); 2836 lpfc_in_buf_free(phba, dmzbuf); 2837 } 2838 2839 if (irsp->ulpBdeCount > 1) { 2840 dmzbuf = lpfc_sli_get_buff(phba, pring, 2841 irsp->unsli3.sli3Words[3]); 2842 lpfc_in_buf_free(phba, dmzbuf); 2843 } 2844 2845 if (irsp->ulpBdeCount > 2) { 2846 dmzbuf = lpfc_sli_get_buff(phba, pring, 2847 irsp->unsli3.sli3Words[7]); 2848 lpfc_in_buf_free(phba, dmzbuf); 2849 } 2850 2851 return 1; 2852 } 2853 2854 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2855 if (irsp->ulpBdeCount != 0) { 2856 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2857 irsp->un.ulpWord[3]); 2858 if (!saveq->context2) 2859 lpfc_printf_log(phba, 2860 KERN_ERR, 2861 LOG_SLI, 2862 "0341 Ring %d Cannot find buffer for " 2863 "an unsolicited iocb. tag 0x%x\n", 2864 pring->ringno, 2865 irsp->un.ulpWord[3]); 2866 } 2867 if (irsp->ulpBdeCount == 2) { 2868 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2869 irsp->unsli3.sli3Words[7]); 2870 if (!saveq->context3) 2871 lpfc_printf_log(phba, 2872 KERN_ERR, 2873 LOG_SLI, 2874 "0342 Ring %d Cannot find buffer for an" 2875 " unsolicited iocb. tag 0x%x\n", 2876 pring->ringno, 2877 irsp->unsli3.sli3Words[7]); 2878 } 2879 list_for_each_entry(iocbq, &saveq->list, list) { 2880 irsp = &(iocbq->iocb); 2881 if (irsp->ulpBdeCount != 0) { 2882 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2883 irsp->un.ulpWord[3]); 2884 if (!iocbq->context2) 2885 lpfc_printf_log(phba, 2886 KERN_ERR, 2887 LOG_SLI, 2888 "0343 Ring %d Cannot find " 2889 "buffer for an unsolicited iocb" 2890 ". tag 0x%x\n", pring->ringno, 2891 irsp->un.ulpWord[3]); 2892 } 2893 if (irsp->ulpBdeCount == 2) { 2894 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2895 irsp->unsli3.sli3Words[7]); 2896 if (!iocbq->context3) 2897 lpfc_printf_log(phba, 2898 KERN_ERR, 2899 LOG_SLI, 2900 "0344 Ring %d Cannot find " 2901 "buffer for an unsolicited " 2902 "iocb. tag 0x%x\n", 2903 pring->ringno, 2904 irsp->unsli3.sli3Words[7]); 2905 } 2906 } 2907 } 2908 if (irsp->ulpBdeCount != 0 && 2909 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2910 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2911 int found = 0; 2912 2913 /* search continue save q for same XRI */ 2914 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2915 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2916 saveq->iocb.unsli3.rcvsli3.ox_id) { 2917 list_add_tail(&saveq->list, &iocbq->list); 2918 found = 1; 2919 break; 2920 } 2921 } 2922 if (!found) 2923 list_add_tail(&saveq->clist, 2924 &pring->iocb_continue_saveq); 2925 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2926 list_del_init(&iocbq->clist); 2927 saveq = iocbq; 2928 irsp = &(saveq->iocb); 2929 } else 2930 return 0; 2931 } 2932 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2933 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2934 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2935 Rctl = FC_RCTL_ELS_REQ; 2936 Type = FC_TYPE_ELS; 2937 } else { 2938 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2939 Rctl = w5p->hcsw.Rctl; 2940 Type = w5p->hcsw.Type; 2941 2942 /* Firmware Workaround */ 2943 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2944 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2945 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2946 Rctl = FC_RCTL_ELS_REQ; 2947 Type = FC_TYPE_ELS; 2948 w5p->hcsw.Rctl = Rctl; 2949 w5p->hcsw.Type = Type; 2950 } 2951 } 2952 2953 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2954 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2955 "0313 Ring %d handler: unexpected Rctl x%x " 2956 "Type x%x received\n", 2957 pring->ringno, Rctl, Type); 2958 2959 return 1; 2960 } 2961 2962 /** 2963 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2964 * @phba: Pointer to HBA context object. 2965 * @pring: Pointer to driver SLI ring object. 2966 * @prspiocb: Pointer to response iocb object. 2967 * 2968 * This function looks up the iocb_lookup table to get the command iocb 2969 * corresponding to the given response iocb using the iotag of the 2970 * response iocb. This function is called with the hbalock held 2971 * for sli3 devices or the ring_lock for sli4 devices. 2972 * This function returns the command iocb object if it finds the command 2973 * iocb else returns NULL. 2974 **/ 2975 static struct lpfc_iocbq * 2976 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2977 struct lpfc_sli_ring *pring, 2978 struct lpfc_iocbq *prspiocb) 2979 { 2980 struct lpfc_iocbq *cmd_iocb = NULL; 2981 uint16_t iotag; 2982 lockdep_assert_held(&phba->hbalock); 2983 2984 iotag = prspiocb->iocb.ulpIoTag; 2985 2986 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2987 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2988 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2989 /* remove from txcmpl queue list */ 2990 list_del_init(&cmd_iocb->list); 2991 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2992 pring->txcmplq_cnt--; 2993 return cmd_iocb; 2994 } 2995 } 2996 2997 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2998 "0317 iotag x%x is out of " 2999 "range: max iotag x%x wd0 x%x\n", 3000 iotag, phba->sli.last_iotag, 3001 *(((uint32_t *) &prspiocb->iocb) + 7)); 3002 return NULL; 3003 } 3004 3005 /** 3006 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 3007 * @phba: Pointer to HBA context object. 3008 * @pring: Pointer to driver SLI ring object. 3009 * @iotag: IOCB tag. 3010 * 3011 * This function looks up the iocb_lookup table to get the command iocb 3012 * corresponding to the given iotag. This function is called with the 3013 * hbalock held. 3014 * This function returns the command iocb object if it finds the command 3015 * iocb else returns NULL. 3016 **/ 3017 static struct lpfc_iocbq * 3018 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 3019 struct lpfc_sli_ring *pring, uint16_t iotag) 3020 { 3021 struct lpfc_iocbq *cmd_iocb = NULL; 3022 3023 lockdep_assert_held(&phba->hbalock); 3024 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3025 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3026 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 3027 /* remove from txcmpl queue list */ 3028 list_del_init(&cmd_iocb->list); 3029 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 3030 pring->txcmplq_cnt--; 3031 return cmd_iocb; 3032 } 3033 } 3034 3035 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3036 "0372 iotag x%x lookup error: max iotag (x%x) " 3037 "iocb_flag x%x\n", 3038 iotag, phba->sli.last_iotag, 3039 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff); 3040 return NULL; 3041 } 3042 3043 /** 3044 * lpfc_sli_process_sol_iocb - process solicited iocb completion 3045 * @phba: Pointer to HBA context object. 3046 * @pring: Pointer to driver SLI ring object. 3047 * @saveq: Pointer to the response iocb to be processed. 3048 * 3049 * This function is called by the ring event handler for non-fcp 3050 * rings when there is a new response iocb in the response ring. 3051 * The caller is not required to hold any locks. This function 3052 * gets the command iocb associated with the response iocb and 3053 * calls the completion handler for the command iocb. If there 3054 * is no completion handler, the function will free the resources 3055 * associated with command iocb. If the response iocb is for 3056 * an already aborted command iocb, the status of the completion 3057 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 3058 * This function always returns 1. 3059 **/ 3060 static int 3061 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3062 struct lpfc_iocbq *saveq) 3063 { 3064 struct lpfc_iocbq *cmdiocbp; 3065 int rc = 1; 3066 unsigned long iflag; 3067 3068 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 3069 if (phba->sli_rev == LPFC_SLI_REV4) 3070 spin_lock_irqsave(&pring->ring_lock, iflag); 3071 else 3072 spin_lock_irqsave(&phba->hbalock, iflag); 3073 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 3074 if (phba->sli_rev == LPFC_SLI_REV4) 3075 spin_unlock_irqrestore(&pring->ring_lock, iflag); 3076 else 3077 spin_unlock_irqrestore(&phba->hbalock, iflag); 3078 3079 if (cmdiocbp) { 3080 if (cmdiocbp->iocb_cmpl) { 3081 /* 3082 * If an ELS command failed send an event to mgmt 3083 * application. 3084 */ 3085 if (saveq->iocb.ulpStatus && 3086 (pring->ringno == LPFC_ELS_RING) && 3087 (cmdiocbp->iocb.ulpCommand == 3088 CMD_ELS_REQUEST64_CR)) 3089 lpfc_send_els_failure_event(phba, 3090 cmdiocbp, saveq); 3091 3092 /* 3093 * Post all ELS completions to the worker thread. 3094 * All other are passed to the completion callback. 3095 */ 3096 if (pring->ringno == LPFC_ELS_RING) { 3097 if ((phba->sli_rev < LPFC_SLI_REV4) && 3098 (cmdiocbp->iocb_flag & 3099 LPFC_DRIVER_ABORTED)) { 3100 spin_lock_irqsave(&phba->hbalock, 3101 iflag); 3102 cmdiocbp->iocb_flag &= 3103 ~LPFC_DRIVER_ABORTED; 3104 spin_unlock_irqrestore(&phba->hbalock, 3105 iflag); 3106 saveq->iocb.ulpStatus = 3107 IOSTAT_LOCAL_REJECT; 3108 saveq->iocb.un.ulpWord[4] = 3109 IOERR_SLI_ABORTED; 3110 3111 /* Firmware could still be in progress 3112 * of DMAing payload, so don't free data 3113 * buffer till after a hbeat. 3114 */ 3115 spin_lock_irqsave(&phba->hbalock, 3116 iflag); 3117 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 3118 spin_unlock_irqrestore(&phba->hbalock, 3119 iflag); 3120 } 3121 if (phba->sli_rev == LPFC_SLI_REV4) { 3122 if (saveq->iocb_flag & 3123 LPFC_EXCHANGE_BUSY) { 3124 /* Set cmdiocb flag for the 3125 * exchange busy so sgl (xri) 3126 * will not be released until 3127 * the abort xri is received 3128 * from hba. 3129 */ 3130 spin_lock_irqsave( 3131 &phba->hbalock, iflag); 3132 cmdiocbp->iocb_flag |= 3133 LPFC_EXCHANGE_BUSY; 3134 spin_unlock_irqrestore( 3135 &phba->hbalock, iflag); 3136 } 3137 if (cmdiocbp->iocb_flag & 3138 LPFC_DRIVER_ABORTED) { 3139 /* 3140 * Clear LPFC_DRIVER_ABORTED 3141 * bit in case it was driver 3142 * initiated abort. 3143 */ 3144 spin_lock_irqsave( 3145 &phba->hbalock, iflag); 3146 cmdiocbp->iocb_flag &= 3147 ~LPFC_DRIVER_ABORTED; 3148 spin_unlock_irqrestore( 3149 &phba->hbalock, iflag); 3150 cmdiocbp->iocb.ulpStatus = 3151 IOSTAT_LOCAL_REJECT; 3152 cmdiocbp->iocb.un.ulpWord[4] = 3153 IOERR_ABORT_REQUESTED; 3154 /* 3155 * For SLI4, irsiocb contains 3156 * NO_XRI in sli_xritag, it 3157 * shall not affect releasing 3158 * sgl (xri) process. 3159 */ 3160 saveq->iocb.ulpStatus = 3161 IOSTAT_LOCAL_REJECT; 3162 saveq->iocb.un.ulpWord[4] = 3163 IOERR_SLI_ABORTED; 3164 spin_lock_irqsave( 3165 &phba->hbalock, iflag); 3166 saveq->iocb_flag |= 3167 LPFC_DELAY_MEM_FREE; 3168 spin_unlock_irqrestore( 3169 &phba->hbalock, iflag); 3170 } 3171 } 3172 } 3173 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 3174 } else 3175 lpfc_sli_release_iocbq(phba, cmdiocbp); 3176 } else { 3177 /* 3178 * Unknown initiating command based on the response iotag. 3179 * This could be the case on the ELS ring because of 3180 * lpfc_els_abort(). 3181 */ 3182 if (pring->ringno != LPFC_ELS_RING) { 3183 /* 3184 * Ring <ringno> handler: unexpected completion IoTag 3185 * <IoTag> 3186 */ 3187 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3188 "0322 Ring %d handler: " 3189 "unexpected completion IoTag x%x " 3190 "Data: x%x x%x x%x x%x\n", 3191 pring->ringno, 3192 saveq->iocb.ulpIoTag, 3193 saveq->iocb.ulpStatus, 3194 saveq->iocb.un.ulpWord[4], 3195 saveq->iocb.ulpCommand, 3196 saveq->iocb.ulpContext); 3197 } 3198 } 3199 3200 return rc; 3201 } 3202 3203 /** 3204 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 3205 * @phba: Pointer to HBA context object. 3206 * @pring: Pointer to driver SLI ring object. 3207 * 3208 * This function is called from the iocb ring event handlers when 3209 * put pointer is ahead of the get pointer for a ring. This function signal 3210 * an error attention condition to the worker thread and the worker 3211 * thread will transition the HBA to offline state. 3212 **/ 3213 static void 3214 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3215 { 3216 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3217 /* 3218 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3219 * rsp ring <portRspMax> 3220 */ 3221 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3222 "0312 Ring %d handler: portRspPut %d " 3223 "is bigger than rsp ring %d\n", 3224 pring->ringno, le32_to_cpu(pgp->rspPutInx), 3225 pring->sli.sli3.numRiocb); 3226 3227 phba->link_state = LPFC_HBA_ERROR; 3228 3229 /* 3230 * All error attention handlers are posted to 3231 * worker thread 3232 */ 3233 phba->work_ha |= HA_ERATT; 3234 phba->work_hs = HS_FFER3; 3235 3236 lpfc_worker_wake_up(phba); 3237 3238 return; 3239 } 3240 3241 /** 3242 * lpfc_poll_eratt - Error attention polling timer timeout handler 3243 * @ptr: Pointer to address of HBA context object. 3244 * 3245 * This function is invoked by the Error Attention polling timer when the 3246 * timer times out. It will check the SLI Error Attention register for 3247 * possible attention events. If so, it will post an Error Attention event 3248 * and wake up worker thread to process it. Otherwise, it will set up the 3249 * Error Attention polling timer for the next poll. 3250 **/ 3251 void lpfc_poll_eratt(struct timer_list *t) 3252 { 3253 struct lpfc_hba *phba; 3254 uint32_t eratt = 0; 3255 uint64_t sli_intr, cnt; 3256 3257 phba = from_timer(phba, t, eratt_poll); 3258 3259 /* Here we will also keep track of interrupts per sec of the hba */ 3260 sli_intr = phba->sli.slistat.sli_intr; 3261 3262 if (phba->sli.slistat.sli_prev_intr > sli_intr) 3263 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 3264 sli_intr); 3265 else 3266 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 3267 3268 /* 64-bit integer division not supported on 32-bit x86 - use do_div */ 3269 do_div(cnt, phba->eratt_poll_interval); 3270 phba->sli.slistat.sli_ips = cnt; 3271 3272 phba->sli.slistat.sli_prev_intr = sli_intr; 3273 3274 /* Check chip HA register for error event */ 3275 eratt = lpfc_sli_check_eratt(phba); 3276 3277 if (eratt) 3278 /* Tell the worker thread there is work to do */ 3279 lpfc_worker_wake_up(phba); 3280 else 3281 /* Restart the timer for next eratt poll */ 3282 mod_timer(&phba->eratt_poll, 3283 jiffies + 3284 msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 3285 return; 3286 } 3287 3288 3289 /** 3290 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 3291 * @phba: Pointer to HBA context object. 3292 * @pring: Pointer to driver SLI ring object. 3293 * @mask: Host attention register mask for this ring. 3294 * 3295 * This function is called from the interrupt context when there is a ring 3296 * event for the fcp ring. The caller does not hold any lock. 3297 * The function processes each response iocb in the response ring until it 3298 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 3299 * LE bit set. The function will call the completion handler of the command iocb 3300 * if the response iocb indicates a completion for a command iocb or it is 3301 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 3302 * function if this is an unsolicited iocb. 3303 * This routine presumes LPFC_FCP_RING handling and doesn't bother 3304 * to check it explicitly. 3305 */ 3306 int 3307 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 3308 struct lpfc_sli_ring *pring, uint32_t mask) 3309 { 3310 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3311 IOCB_t *irsp = NULL; 3312 IOCB_t *entry = NULL; 3313 struct lpfc_iocbq *cmdiocbq = NULL; 3314 struct lpfc_iocbq rspiocbq; 3315 uint32_t status; 3316 uint32_t portRspPut, portRspMax; 3317 int rc = 1; 3318 lpfc_iocb_type type; 3319 unsigned long iflag; 3320 uint32_t rsp_cmpl = 0; 3321 3322 spin_lock_irqsave(&phba->hbalock, iflag); 3323 pring->stats.iocb_event++; 3324 3325 /* 3326 * The next available response entry should never exceed the maximum 3327 * entries. If it does, treat it as an adapter hardware error. 3328 */ 3329 portRspMax = pring->sli.sli3.numRiocb; 3330 portRspPut = le32_to_cpu(pgp->rspPutInx); 3331 if (unlikely(portRspPut >= portRspMax)) { 3332 lpfc_sli_rsp_pointers_error(phba, pring); 3333 spin_unlock_irqrestore(&phba->hbalock, iflag); 3334 return 1; 3335 } 3336 if (phba->fcp_ring_in_use) { 3337 spin_unlock_irqrestore(&phba->hbalock, iflag); 3338 return 1; 3339 } else 3340 phba->fcp_ring_in_use = 1; 3341 3342 rmb(); 3343 while (pring->sli.sli3.rspidx != portRspPut) { 3344 /* 3345 * Fetch an entry off the ring and copy it into a local data 3346 * structure. The copy involves a byte-swap since the 3347 * network byte order and pci byte orders are different. 3348 */ 3349 entry = lpfc_resp_iocb(phba, pring); 3350 phba->last_completion_time = jiffies; 3351 3352 if (++pring->sli.sli3.rspidx >= portRspMax) 3353 pring->sli.sli3.rspidx = 0; 3354 3355 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 3356 (uint32_t *) &rspiocbq.iocb, 3357 phba->iocb_rsp_size); 3358 INIT_LIST_HEAD(&(rspiocbq.list)); 3359 irsp = &rspiocbq.iocb; 3360 3361 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 3362 pring->stats.iocb_rsp++; 3363 rsp_cmpl++; 3364 3365 if (unlikely(irsp->ulpStatus)) { 3366 /* 3367 * If resource errors reported from HBA, reduce 3368 * queuedepths of the SCSI device. 3369 */ 3370 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3371 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3372 IOERR_NO_RESOURCES)) { 3373 spin_unlock_irqrestore(&phba->hbalock, iflag); 3374 phba->lpfc_rampdown_queue_depth(phba); 3375 spin_lock_irqsave(&phba->hbalock, iflag); 3376 } 3377 3378 /* Rsp ring <ringno> error: IOCB */ 3379 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3380 "0336 Rsp Ring %d error: IOCB Data: " 3381 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 3382 pring->ringno, 3383 irsp->un.ulpWord[0], 3384 irsp->un.ulpWord[1], 3385 irsp->un.ulpWord[2], 3386 irsp->un.ulpWord[3], 3387 irsp->un.ulpWord[4], 3388 irsp->un.ulpWord[5], 3389 *(uint32_t *)&irsp->un1, 3390 *((uint32_t *)&irsp->un1 + 1)); 3391 } 3392 3393 switch (type) { 3394 case LPFC_ABORT_IOCB: 3395 case LPFC_SOL_IOCB: 3396 /* 3397 * Idle exchange closed via ABTS from port. No iocb 3398 * resources need to be recovered. 3399 */ 3400 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 3401 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3402 "0333 IOCB cmd 0x%x" 3403 " processed. Skipping" 3404 " completion\n", 3405 irsp->ulpCommand); 3406 break; 3407 } 3408 3409 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 3410 &rspiocbq); 3411 if (unlikely(!cmdiocbq)) 3412 break; 3413 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 3414 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3415 if (cmdiocbq->iocb_cmpl) { 3416 spin_unlock_irqrestore(&phba->hbalock, iflag); 3417 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 3418 &rspiocbq); 3419 spin_lock_irqsave(&phba->hbalock, iflag); 3420 } 3421 break; 3422 case LPFC_UNSOL_IOCB: 3423 spin_unlock_irqrestore(&phba->hbalock, iflag); 3424 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 3425 spin_lock_irqsave(&phba->hbalock, iflag); 3426 break; 3427 default: 3428 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3429 char adaptermsg[LPFC_MAX_ADPTMSG]; 3430 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3431 memcpy(&adaptermsg[0], (uint8_t *) irsp, 3432 MAX_MSG_DATA); 3433 dev_warn(&((phba->pcidev)->dev), 3434 "lpfc%d: %s\n", 3435 phba->brd_no, adaptermsg); 3436 } else { 3437 /* Unknown IOCB command */ 3438 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3439 "0334 Unknown IOCB command " 3440 "Data: x%x, x%x x%x x%x x%x\n", 3441 type, irsp->ulpCommand, 3442 irsp->ulpStatus, 3443 irsp->ulpIoTag, 3444 irsp->ulpContext); 3445 } 3446 break; 3447 } 3448 3449 /* 3450 * The response IOCB has been processed. Update the ring 3451 * pointer in SLIM. If the port response put pointer has not 3452 * been updated, sync the pgp->rspPutInx and fetch the new port 3453 * response put pointer. 3454 */ 3455 writel(pring->sli.sli3.rspidx, 3456 &phba->host_gp[pring->ringno].rspGetInx); 3457 3458 if (pring->sli.sli3.rspidx == portRspPut) 3459 portRspPut = le32_to_cpu(pgp->rspPutInx); 3460 } 3461 3462 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3463 pring->stats.iocb_rsp_full++; 3464 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3465 writel(status, phba->CAregaddr); 3466 readl(phba->CAregaddr); 3467 } 3468 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3469 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3470 pring->stats.iocb_cmd_empty++; 3471 3472 /* Force update of the local copy of cmdGetInx */ 3473 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3474 lpfc_sli_resume_iocb(phba, pring); 3475 3476 if ((pring->lpfc_sli_cmd_available)) 3477 (pring->lpfc_sli_cmd_available) (phba, pring); 3478 3479 } 3480 3481 phba->fcp_ring_in_use = 0; 3482 spin_unlock_irqrestore(&phba->hbalock, iflag); 3483 return rc; 3484 } 3485 3486 /** 3487 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3488 * @phba: Pointer to HBA context object. 3489 * @pring: Pointer to driver SLI ring object. 3490 * @rspiocbp: Pointer to driver response IOCB object. 3491 * 3492 * This function is called from the worker thread when there is a slow-path 3493 * response IOCB to process. This function chains all the response iocbs until 3494 * seeing the iocb with the LE bit set. The function will call 3495 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3496 * completion of a command iocb. The function will call the 3497 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3498 * The function frees the resources or calls the completion handler if this 3499 * iocb is an abort completion. The function returns NULL when the response 3500 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3501 * this function shall chain the iocb on to the iocb_continueq and return the 3502 * response iocb passed in. 3503 **/ 3504 static struct lpfc_iocbq * 3505 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3506 struct lpfc_iocbq *rspiocbp) 3507 { 3508 struct lpfc_iocbq *saveq; 3509 struct lpfc_iocbq *cmdiocbp; 3510 struct lpfc_iocbq *next_iocb; 3511 IOCB_t *irsp = NULL; 3512 uint32_t free_saveq; 3513 uint8_t iocb_cmd_type; 3514 lpfc_iocb_type type; 3515 unsigned long iflag; 3516 int rc; 3517 3518 spin_lock_irqsave(&phba->hbalock, iflag); 3519 /* First add the response iocb to the countinueq list */ 3520 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3521 pring->iocb_continueq_cnt++; 3522 3523 /* Now, determine whether the list is completed for processing */ 3524 irsp = &rspiocbp->iocb; 3525 if (irsp->ulpLe) { 3526 /* 3527 * By default, the driver expects to free all resources 3528 * associated with this iocb completion. 3529 */ 3530 free_saveq = 1; 3531 saveq = list_get_first(&pring->iocb_continueq, 3532 struct lpfc_iocbq, list); 3533 irsp = &(saveq->iocb); 3534 list_del_init(&pring->iocb_continueq); 3535 pring->iocb_continueq_cnt = 0; 3536 3537 pring->stats.iocb_rsp++; 3538 3539 /* 3540 * If resource errors reported from HBA, reduce 3541 * queuedepths of the SCSI device. 3542 */ 3543 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3544 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3545 IOERR_NO_RESOURCES)) { 3546 spin_unlock_irqrestore(&phba->hbalock, iflag); 3547 phba->lpfc_rampdown_queue_depth(phba); 3548 spin_lock_irqsave(&phba->hbalock, iflag); 3549 } 3550 3551 if (irsp->ulpStatus) { 3552 /* Rsp ring <ringno> error: IOCB */ 3553 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3554 "0328 Rsp Ring %d error: " 3555 "IOCB Data: " 3556 "x%x x%x x%x x%x " 3557 "x%x x%x x%x x%x " 3558 "x%x x%x x%x x%x " 3559 "x%x x%x x%x x%x\n", 3560 pring->ringno, 3561 irsp->un.ulpWord[0], 3562 irsp->un.ulpWord[1], 3563 irsp->un.ulpWord[2], 3564 irsp->un.ulpWord[3], 3565 irsp->un.ulpWord[4], 3566 irsp->un.ulpWord[5], 3567 *(((uint32_t *) irsp) + 6), 3568 *(((uint32_t *) irsp) + 7), 3569 *(((uint32_t *) irsp) + 8), 3570 *(((uint32_t *) irsp) + 9), 3571 *(((uint32_t *) irsp) + 10), 3572 *(((uint32_t *) irsp) + 11), 3573 *(((uint32_t *) irsp) + 12), 3574 *(((uint32_t *) irsp) + 13), 3575 *(((uint32_t *) irsp) + 14), 3576 *(((uint32_t *) irsp) + 15)); 3577 } 3578 3579 /* 3580 * Fetch the IOCB command type and call the correct completion 3581 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3582 * get freed back to the lpfc_iocb_list by the discovery 3583 * kernel thread. 3584 */ 3585 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3586 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3587 switch (type) { 3588 case LPFC_SOL_IOCB: 3589 spin_unlock_irqrestore(&phba->hbalock, iflag); 3590 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3591 spin_lock_irqsave(&phba->hbalock, iflag); 3592 break; 3593 3594 case LPFC_UNSOL_IOCB: 3595 spin_unlock_irqrestore(&phba->hbalock, iflag); 3596 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3597 spin_lock_irqsave(&phba->hbalock, iflag); 3598 if (!rc) 3599 free_saveq = 0; 3600 break; 3601 3602 case LPFC_ABORT_IOCB: 3603 cmdiocbp = NULL; 3604 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3605 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3606 saveq); 3607 if (cmdiocbp) { 3608 /* Call the specified completion routine */ 3609 if (cmdiocbp->iocb_cmpl) { 3610 spin_unlock_irqrestore(&phba->hbalock, 3611 iflag); 3612 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3613 saveq); 3614 spin_lock_irqsave(&phba->hbalock, 3615 iflag); 3616 } else 3617 __lpfc_sli_release_iocbq(phba, 3618 cmdiocbp); 3619 } 3620 break; 3621 3622 case LPFC_UNKNOWN_IOCB: 3623 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3624 char adaptermsg[LPFC_MAX_ADPTMSG]; 3625 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3626 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3627 MAX_MSG_DATA); 3628 dev_warn(&((phba->pcidev)->dev), 3629 "lpfc%d: %s\n", 3630 phba->brd_no, adaptermsg); 3631 } else { 3632 /* Unknown IOCB command */ 3633 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3634 "0335 Unknown IOCB " 3635 "command Data: x%x " 3636 "x%x x%x x%x\n", 3637 irsp->ulpCommand, 3638 irsp->ulpStatus, 3639 irsp->ulpIoTag, 3640 irsp->ulpContext); 3641 } 3642 break; 3643 } 3644 3645 if (free_saveq) { 3646 list_for_each_entry_safe(rspiocbp, next_iocb, 3647 &saveq->list, list) { 3648 list_del_init(&rspiocbp->list); 3649 __lpfc_sli_release_iocbq(phba, rspiocbp); 3650 } 3651 __lpfc_sli_release_iocbq(phba, saveq); 3652 } 3653 rspiocbp = NULL; 3654 } 3655 spin_unlock_irqrestore(&phba->hbalock, iflag); 3656 return rspiocbp; 3657 } 3658 3659 /** 3660 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3661 * @phba: Pointer to HBA context object. 3662 * @pring: Pointer to driver SLI ring object. 3663 * @mask: Host attention register mask for this ring. 3664 * 3665 * This routine wraps the actual slow_ring event process routine from the 3666 * API jump table function pointer from the lpfc_hba struct. 3667 **/ 3668 void 3669 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3670 struct lpfc_sli_ring *pring, uint32_t mask) 3671 { 3672 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3673 } 3674 3675 /** 3676 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3677 * @phba: Pointer to HBA context object. 3678 * @pring: Pointer to driver SLI ring object. 3679 * @mask: Host attention register mask for this ring. 3680 * 3681 * This function is called from the worker thread when there is a ring event 3682 * for non-fcp rings. The caller does not hold any lock. The function will 3683 * remove each response iocb in the response ring and calls the handle 3684 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3685 **/ 3686 static void 3687 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3688 struct lpfc_sli_ring *pring, uint32_t mask) 3689 { 3690 struct lpfc_pgp *pgp; 3691 IOCB_t *entry; 3692 IOCB_t *irsp = NULL; 3693 struct lpfc_iocbq *rspiocbp = NULL; 3694 uint32_t portRspPut, portRspMax; 3695 unsigned long iflag; 3696 uint32_t status; 3697 3698 pgp = &phba->port_gp[pring->ringno]; 3699 spin_lock_irqsave(&phba->hbalock, iflag); 3700 pring->stats.iocb_event++; 3701 3702 /* 3703 * The next available response entry should never exceed the maximum 3704 * entries. If it does, treat it as an adapter hardware error. 3705 */ 3706 portRspMax = pring->sli.sli3.numRiocb; 3707 portRspPut = le32_to_cpu(pgp->rspPutInx); 3708 if (portRspPut >= portRspMax) { 3709 /* 3710 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3711 * rsp ring <portRspMax> 3712 */ 3713 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3714 "0303 Ring %d handler: portRspPut %d " 3715 "is bigger than rsp ring %d\n", 3716 pring->ringno, portRspPut, portRspMax); 3717 3718 phba->link_state = LPFC_HBA_ERROR; 3719 spin_unlock_irqrestore(&phba->hbalock, iflag); 3720 3721 phba->work_hs = HS_FFER3; 3722 lpfc_handle_eratt(phba); 3723 3724 return; 3725 } 3726 3727 rmb(); 3728 while (pring->sli.sli3.rspidx != portRspPut) { 3729 /* 3730 * Build a completion list and call the appropriate handler. 3731 * The process is to get the next available response iocb, get 3732 * a free iocb from the list, copy the response data into the 3733 * free iocb, insert to the continuation list, and update the 3734 * next response index to slim. This process makes response 3735 * iocb's in the ring available to DMA as fast as possible but 3736 * pays a penalty for a copy operation. Since the iocb is 3737 * only 32 bytes, this penalty is considered small relative to 3738 * the PCI reads for register values and a slim write. When 3739 * the ulpLe field is set, the entire Command has been 3740 * received. 3741 */ 3742 entry = lpfc_resp_iocb(phba, pring); 3743 3744 phba->last_completion_time = jiffies; 3745 rspiocbp = __lpfc_sli_get_iocbq(phba); 3746 if (rspiocbp == NULL) { 3747 printk(KERN_ERR "%s: out of buffers! Failing " 3748 "completion.\n", __func__); 3749 break; 3750 } 3751 3752 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3753 phba->iocb_rsp_size); 3754 irsp = &rspiocbp->iocb; 3755 3756 if (++pring->sli.sli3.rspidx >= portRspMax) 3757 pring->sli.sli3.rspidx = 0; 3758 3759 if (pring->ringno == LPFC_ELS_RING) { 3760 lpfc_debugfs_slow_ring_trc(phba, 3761 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3762 *(((uint32_t *) irsp) + 4), 3763 *(((uint32_t *) irsp) + 6), 3764 *(((uint32_t *) irsp) + 7)); 3765 } 3766 3767 writel(pring->sli.sli3.rspidx, 3768 &phba->host_gp[pring->ringno].rspGetInx); 3769 3770 spin_unlock_irqrestore(&phba->hbalock, iflag); 3771 /* Handle the response IOCB */ 3772 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3773 spin_lock_irqsave(&phba->hbalock, iflag); 3774 3775 /* 3776 * If the port response put pointer has not been updated, sync 3777 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3778 * response put pointer. 3779 */ 3780 if (pring->sli.sli3.rspidx == portRspPut) { 3781 portRspPut = le32_to_cpu(pgp->rspPutInx); 3782 } 3783 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 3784 3785 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3786 /* At least one response entry has been freed */ 3787 pring->stats.iocb_rsp_full++; 3788 /* SET RxRE_RSP in Chip Att register */ 3789 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3790 writel(status, phba->CAregaddr); 3791 readl(phba->CAregaddr); /* flush */ 3792 } 3793 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3794 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3795 pring->stats.iocb_cmd_empty++; 3796 3797 /* Force update of the local copy of cmdGetInx */ 3798 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3799 lpfc_sli_resume_iocb(phba, pring); 3800 3801 if ((pring->lpfc_sli_cmd_available)) 3802 (pring->lpfc_sli_cmd_available) (phba, pring); 3803 3804 } 3805 3806 spin_unlock_irqrestore(&phba->hbalock, iflag); 3807 return; 3808 } 3809 3810 /** 3811 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3812 * @phba: Pointer to HBA context object. 3813 * @pring: Pointer to driver SLI ring object. 3814 * @mask: Host attention register mask for this ring. 3815 * 3816 * This function is called from the worker thread when there is a pending 3817 * ELS response iocb on the driver internal slow-path response iocb worker 3818 * queue. The caller does not hold any lock. The function will remove each 3819 * response iocb from the response worker queue and calls the handle 3820 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3821 **/ 3822 static void 3823 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3824 struct lpfc_sli_ring *pring, uint32_t mask) 3825 { 3826 struct lpfc_iocbq *irspiocbq; 3827 struct hbq_dmabuf *dmabuf; 3828 struct lpfc_cq_event *cq_event; 3829 unsigned long iflag; 3830 int count = 0; 3831 3832 spin_lock_irqsave(&phba->hbalock, iflag); 3833 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3834 spin_unlock_irqrestore(&phba->hbalock, iflag); 3835 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3836 /* Get the response iocb from the head of work queue */ 3837 spin_lock_irqsave(&phba->hbalock, iflag); 3838 list_remove_head(&phba->sli4_hba.sp_queue_event, 3839 cq_event, struct lpfc_cq_event, list); 3840 spin_unlock_irqrestore(&phba->hbalock, iflag); 3841 3842 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3843 case CQE_CODE_COMPL_WQE: 3844 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3845 cq_event); 3846 /* Translate ELS WCQE to response IOCBQ */ 3847 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3848 irspiocbq); 3849 if (irspiocbq) 3850 lpfc_sli_sp_handle_rspiocb(phba, pring, 3851 irspiocbq); 3852 count++; 3853 break; 3854 case CQE_CODE_RECEIVE: 3855 case CQE_CODE_RECEIVE_V1: 3856 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3857 cq_event); 3858 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3859 count++; 3860 break; 3861 default: 3862 break; 3863 } 3864 3865 /* Limit the number of events to 64 to avoid soft lockups */ 3866 if (count == 64) 3867 break; 3868 } 3869 } 3870 3871 /** 3872 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3873 * @phba: Pointer to HBA context object. 3874 * @pring: Pointer to driver SLI ring object. 3875 * 3876 * This function aborts all iocbs in the given ring and frees all the iocb 3877 * objects in txq. This function issues an abort iocb for all the iocb commands 3878 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3879 * the return of this function. The caller is not required to hold any locks. 3880 **/ 3881 void 3882 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3883 { 3884 LIST_HEAD(completions); 3885 struct lpfc_iocbq *iocb, *next_iocb; 3886 3887 if (pring->ringno == LPFC_ELS_RING) { 3888 lpfc_fabric_abort_hba(phba); 3889 } 3890 3891 /* Error everything on txq and txcmplq 3892 * First do the txq. 3893 */ 3894 if (phba->sli_rev >= LPFC_SLI_REV4) { 3895 spin_lock_irq(&pring->ring_lock); 3896 list_splice_init(&pring->txq, &completions); 3897 pring->txq_cnt = 0; 3898 spin_unlock_irq(&pring->ring_lock); 3899 3900 spin_lock_irq(&phba->hbalock); 3901 /* Next issue ABTS for everything on the txcmplq */ 3902 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3903 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3904 spin_unlock_irq(&phba->hbalock); 3905 } else { 3906 spin_lock_irq(&phba->hbalock); 3907 list_splice_init(&pring->txq, &completions); 3908 pring->txq_cnt = 0; 3909 3910 /* Next issue ABTS for everything on the txcmplq */ 3911 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3912 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3913 spin_unlock_irq(&phba->hbalock); 3914 } 3915 3916 /* Cancel all the IOCBs from the completions list */ 3917 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3918 IOERR_SLI_ABORTED); 3919 } 3920 3921 /** 3922 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 3923 * @phba: Pointer to HBA context object. 3924 * @pring: Pointer to driver SLI ring object. 3925 * 3926 * This function aborts all iocbs in FCP rings and frees all the iocb 3927 * objects in txq. This function issues an abort iocb for all the iocb commands 3928 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3929 * the return of this function. The caller is not required to hold any locks. 3930 **/ 3931 void 3932 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 3933 { 3934 struct lpfc_sli *psli = &phba->sli; 3935 struct lpfc_sli_ring *pring; 3936 uint32_t i; 3937 3938 /* Look on all the FCP Rings for the iotag */ 3939 if (phba->sli_rev >= LPFC_SLI_REV4) { 3940 for (i = 0; i < phba->cfg_hdw_queue; i++) { 3941 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 3942 lpfc_sli_abort_iocb_ring(phba, pring); 3943 } 3944 } else { 3945 pring = &psli->sli3_ring[LPFC_FCP_RING]; 3946 lpfc_sli_abort_iocb_ring(phba, pring); 3947 } 3948 } 3949 3950 /** 3951 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3952 * @phba: Pointer to HBA context object. 3953 * 3954 * This function flushes all iocbs in the fcp ring and frees all the iocb 3955 * objects in txq and txcmplq. This function will not issue abort iocbs 3956 * for all the iocb commands in txcmplq, they will just be returned with 3957 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3958 * slot has been permanently disabled. 3959 **/ 3960 void 3961 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3962 { 3963 LIST_HEAD(txq); 3964 LIST_HEAD(txcmplq); 3965 struct lpfc_sli *psli = &phba->sli; 3966 struct lpfc_sli_ring *pring; 3967 uint32_t i; 3968 struct lpfc_iocbq *piocb, *next_iocb; 3969 3970 spin_lock_irq(&phba->hbalock); 3971 /* Indicate the I/O queues are flushed */ 3972 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3973 spin_unlock_irq(&phba->hbalock); 3974 3975 /* Look on all the FCP Rings for the iotag */ 3976 if (phba->sli_rev >= LPFC_SLI_REV4) { 3977 for (i = 0; i < phba->cfg_hdw_queue; i++) { 3978 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 3979 3980 spin_lock_irq(&pring->ring_lock); 3981 /* Retrieve everything on txq */ 3982 list_splice_init(&pring->txq, &txq); 3983 list_for_each_entry_safe(piocb, next_iocb, 3984 &pring->txcmplq, list) 3985 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 3986 /* Retrieve everything on the txcmplq */ 3987 list_splice_init(&pring->txcmplq, &txcmplq); 3988 pring->txq_cnt = 0; 3989 pring->txcmplq_cnt = 0; 3990 spin_unlock_irq(&pring->ring_lock); 3991 3992 /* Flush the txq */ 3993 lpfc_sli_cancel_iocbs(phba, &txq, 3994 IOSTAT_LOCAL_REJECT, 3995 IOERR_SLI_DOWN); 3996 /* Flush the txcmpq */ 3997 lpfc_sli_cancel_iocbs(phba, &txcmplq, 3998 IOSTAT_LOCAL_REJECT, 3999 IOERR_SLI_DOWN); 4000 } 4001 } else { 4002 pring = &psli->sli3_ring[LPFC_FCP_RING]; 4003 4004 spin_lock_irq(&phba->hbalock); 4005 /* Retrieve everything on txq */ 4006 list_splice_init(&pring->txq, &txq); 4007 list_for_each_entry_safe(piocb, next_iocb, 4008 &pring->txcmplq, list) 4009 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4010 /* Retrieve everything on the txcmplq */ 4011 list_splice_init(&pring->txcmplq, &txcmplq); 4012 pring->txq_cnt = 0; 4013 pring->txcmplq_cnt = 0; 4014 spin_unlock_irq(&phba->hbalock); 4015 4016 /* Flush the txq */ 4017 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 4018 IOERR_SLI_DOWN); 4019 /* Flush the txcmpq */ 4020 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 4021 IOERR_SLI_DOWN); 4022 } 4023 } 4024 4025 /** 4026 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings 4027 * @phba: Pointer to HBA context object. 4028 * 4029 * This function flushes all wqes in the nvme rings and frees all resources 4030 * in the txcmplq. This function does not issue abort wqes for the IO 4031 * commands in txcmplq, they will just be returned with 4032 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 4033 * slot has been permanently disabled. 4034 **/ 4035 void 4036 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) 4037 { 4038 LIST_HEAD(txcmplq); 4039 struct lpfc_sli_ring *pring; 4040 uint32_t i; 4041 struct lpfc_iocbq *piocb, *next_iocb; 4042 4043 if ((phba->sli_rev < LPFC_SLI_REV4) || 4044 !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) 4045 return; 4046 4047 /* Hint to other driver operations that a flush is in progress. */ 4048 spin_lock_irq(&phba->hbalock); 4049 phba->hba_flag |= HBA_NVME_IOQ_FLUSH; 4050 spin_unlock_irq(&phba->hbalock); 4051 4052 /* Cycle through all NVME rings and complete each IO with 4053 * a local driver reason code. This is a flush so no 4054 * abort exchange to FW. 4055 */ 4056 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4057 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; 4058 4059 spin_lock_irq(&pring->ring_lock); 4060 list_for_each_entry_safe(piocb, next_iocb, 4061 &pring->txcmplq, list) 4062 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4063 /* Retrieve everything on the txcmplq */ 4064 list_splice_init(&pring->txcmplq, &txcmplq); 4065 pring->txcmplq_cnt = 0; 4066 spin_unlock_irq(&pring->ring_lock); 4067 4068 /* Flush the txcmpq &&&PAE */ 4069 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4070 IOSTAT_LOCAL_REJECT, 4071 IOERR_SLI_DOWN); 4072 } 4073 } 4074 4075 /** 4076 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 4077 * @phba: Pointer to HBA context object. 4078 * @mask: Bit mask to be checked. 4079 * 4080 * This function reads the host status register and compares 4081 * with the provided bit mask to check if HBA completed 4082 * the restart. This function will wait in a loop for the 4083 * HBA to complete restart. If the HBA does not restart within 4084 * 15 iterations, the function will reset the HBA again. The 4085 * function returns 1 when HBA fail to restart otherwise returns 4086 * zero. 4087 **/ 4088 static int 4089 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 4090 { 4091 uint32_t status; 4092 int i = 0; 4093 int retval = 0; 4094 4095 /* Read the HBA Host Status Register */ 4096 if (lpfc_readl(phba->HSregaddr, &status)) 4097 return 1; 4098 4099 /* 4100 * Check status register every 100ms for 5 retries, then every 4101 * 500ms for 5, then every 2.5 sec for 5, then reset board and 4102 * every 2.5 sec for 4. 4103 * Break our of the loop if errors occurred during init. 4104 */ 4105 while (((status & mask) != mask) && 4106 !(status & HS_FFERM) && 4107 i++ < 20) { 4108 4109 if (i <= 5) 4110 msleep(10); 4111 else if (i <= 10) 4112 msleep(500); 4113 else 4114 msleep(2500); 4115 4116 if (i == 15) { 4117 /* Do post */ 4118 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4119 lpfc_sli_brdrestart(phba); 4120 } 4121 /* Read the HBA Host Status Register */ 4122 if (lpfc_readl(phba->HSregaddr, &status)) { 4123 retval = 1; 4124 break; 4125 } 4126 } 4127 4128 /* Check to see if any errors occurred during init */ 4129 if ((status & HS_FFERM) || (i >= 20)) { 4130 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4131 "2751 Adapter failed to restart, " 4132 "status reg x%x, FW Data: A8 x%x AC x%x\n", 4133 status, 4134 readl(phba->MBslimaddr + 0xa8), 4135 readl(phba->MBslimaddr + 0xac)); 4136 phba->link_state = LPFC_HBA_ERROR; 4137 retval = 1; 4138 } 4139 4140 return retval; 4141 } 4142 4143 /** 4144 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 4145 * @phba: Pointer to HBA context object. 4146 * @mask: Bit mask to be checked. 4147 * 4148 * This function checks the host status register to check if HBA is 4149 * ready. This function will wait in a loop for the HBA to be ready 4150 * If the HBA is not ready , the function will will reset the HBA PCI 4151 * function again. The function returns 1 when HBA fail to be ready 4152 * otherwise returns zero. 4153 **/ 4154 static int 4155 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 4156 { 4157 uint32_t status; 4158 int retval = 0; 4159 4160 /* Read the HBA Host Status Register */ 4161 status = lpfc_sli4_post_status_check(phba); 4162 4163 if (status) { 4164 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4165 lpfc_sli_brdrestart(phba); 4166 status = lpfc_sli4_post_status_check(phba); 4167 } 4168 4169 /* Check to see if any errors occurred during init */ 4170 if (status) { 4171 phba->link_state = LPFC_HBA_ERROR; 4172 retval = 1; 4173 } else 4174 phba->sli4_hba.intr_enable = 0; 4175 4176 return retval; 4177 } 4178 4179 /** 4180 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 4181 * @phba: Pointer to HBA context object. 4182 * @mask: Bit mask to be checked. 4183 * 4184 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 4185 * from the API jump table function pointer from the lpfc_hba struct. 4186 **/ 4187 int 4188 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 4189 { 4190 return phba->lpfc_sli_brdready(phba, mask); 4191 } 4192 4193 #define BARRIER_TEST_PATTERN (0xdeadbeef) 4194 4195 /** 4196 * lpfc_reset_barrier - Make HBA ready for HBA reset 4197 * @phba: Pointer to HBA context object. 4198 * 4199 * This function is called before resetting an HBA. This function is called 4200 * with hbalock held and requests HBA to quiesce DMAs before a reset. 4201 **/ 4202 void lpfc_reset_barrier(struct lpfc_hba *phba) 4203 { 4204 uint32_t __iomem *resp_buf; 4205 uint32_t __iomem *mbox_buf; 4206 volatile uint32_t mbox; 4207 uint32_t hc_copy, ha_copy, resp_data; 4208 int i; 4209 uint8_t hdrtype; 4210 4211 lockdep_assert_held(&phba->hbalock); 4212 4213 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 4214 if (hdrtype != 0x80 || 4215 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 4216 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 4217 return; 4218 4219 /* 4220 * Tell the other part of the chip to suspend temporarily all 4221 * its DMA activity. 4222 */ 4223 resp_buf = phba->MBslimaddr; 4224 4225 /* Disable the error attention */ 4226 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 4227 return; 4228 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 4229 readl(phba->HCregaddr); /* flush */ 4230 phba->link_flag |= LS_IGNORE_ERATT; 4231 4232 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4233 return; 4234 if (ha_copy & HA_ERATT) { 4235 /* Clear Chip error bit */ 4236 writel(HA_ERATT, phba->HAregaddr); 4237 phba->pport->stopped = 1; 4238 } 4239 4240 mbox = 0; 4241 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 4242 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 4243 4244 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 4245 mbox_buf = phba->MBslimaddr; 4246 writel(mbox, mbox_buf); 4247 4248 for (i = 0; i < 50; i++) { 4249 if (lpfc_readl((resp_buf + 1), &resp_data)) 4250 return; 4251 if (resp_data != ~(BARRIER_TEST_PATTERN)) 4252 mdelay(1); 4253 else 4254 break; 4255 } 4256 resp_data = 0; 4257 if (lpfc_readl((resp_buf + 1), &resp_data)) 4258 return; 4259 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 4260 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 4261 phba->pport->stopped) 4262 goto restore_hc; 4263 else 4264 goto clear_errat; 4265 } 4266 4267 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 4268 resp_data = 0; 4269 for (i = 0; i < 500; i++) { 4270 if (lpfc_readl(resp_buf, &resp_data)) 4271 return; 4272 if (resp_data != mbox) 4273 mdelay(1); 4274 else 4275 break; 4276 } 4277 4278 clear_errat: 4279 4280 while (++i < 500) { 4281 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4282 return; 4283 if (!(ha_copy & HA_ERATT)) 4284 mdelay(1); 4285 else 4286 break; 4287 } 4288 4289 if (readl(phba->HAregaddr) & HA_ERATT) { 4290 writel(HA_ERATT, phba->HAregaddr); 4291 phba->pport->stopped = 1; 4292 } 4293 4294 restore_hc: 4295 phba->link_flag &= ~LS_IGNORE_ERATT; 4296 writel(hc_copy, phba->HCregaddr); 4297 readl(phba->HCregaddr); /* flush */ 4298 } 4299 4300 /** 4301 * lpfc_sli_brdkill - Issue a kill_board mailbox command 4302 * @phba: Pointer to HBA context object. 4303 * 4304 * This function issues a kill_board mailbox command and waits for 4305 * the error attention interrupt. This function is called for stopping 4306 * the firmware processing. The caller is not required to hold any 4307 * locks. This function calls lpfc_hba_down_post function to free 4308 * any pending commands after the kill. The function will return 1 when it 4309 * fails to kill the board else will return 0. 4310 **/ 4311 int 4312 lpfc_sli_brdkill(struct lpfc_hba *phba) 4313 { 4314 struct lpfc_sli *psli; 4315 LPFC_MBOXQ_t *pmb; 4316 uint32_t status; 4317 uint32_t ha_copy; 4318 int retval; 4319 int i = 0; 4320 4321 psli = &phba->sli; 4322 4323 /* Kill HBA */ 4324 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4325 "0329 Kill HBA Data: x%x x%x\n", 4326 phba->pport->port_state, psli->sli_flag); 4327 4328 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4329 if (!pmb) 4330 return 1; 4331 4332 /* Disable the error attention */ 4333 spin_lock_irq(&phba->hbalock); 4334 if (lpfc_readl(phba->HCregaddr, &status)) { 4335 spin_unlock_irq(&phba->hbalock); 4336 mempool_free(pmb, phba->mbox_mem_pool); 4337 return 1; 4338 } 4339 status &= ~HC_ERINT_ENA; 4340 writel(status, phba->HCregaddr); 4341 readl(phba->HCregaddr); /* flush */ 4342 phba->link_flag |= LS_IGNORE_ERATT; 4343 spin_unlock_irq(&phba->hbalock); 4344 4345 lpfc_kill_board(phba, pmb); 4346 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4347 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4348 4349 if (retval != MBX_SUCCESS) { 4350 if (retval != MBX_BUSY) 4351 mempool_free(pmb, phba->mbox_mem_pool); 4352 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4353 "2752 KILL_BOARD command failed retval %d\n", 4354 retval); 4355 spin_lock_irq(&phba->hbalock); 4356 phba->link_flag &= ~LS_IGNORE_ERATT; 4357 spin_unlock_irq(&phba->hbalock); 4358 return 1; 4359 } 4360 4361 spin_lock_irq(&phba->hbalock); 4362 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 4363 spin_unlock_irq(&phba->hbalock); 4364 4365 mempool_free(pmb, phba->mbox_mem_pool); 4366 4367 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 4368 * attention every 100ms for 3 seconds. If we don't get ERATT after 4369 * 3 seconds we still set HBA_ERROR state because the status of the 4370 * board is now undefined. 4371 */ 4372 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4373 return 1; 4374 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 4375 mdelay(100); 4376 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4377 return 1; 4378 } 4379 4380 del_timer_sync(&psli->mbox_tmo); 4381 if (ha_copy & HA_ERATT) { 4382 writel(HA_ERATT, phba->HAregaddr); 4383 phba->pport->stopped = 1; 4384 } 4385 spin_lock_irq(&phba->hbalock); 4386 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4387 psli->mbox_active = NULL; 4388 phba->link_flag &= ~LS_IGNORE_ERATT; 4389 spin_unlock_irq(&phba->hbalock); 4390 4391 lpfc_hba_down_post(phba); 4392 phba->link_state = LPFC_HBA_ERROR; 4393 4394 return ha_copy & HA_ERATT ? 0 : 1; 4395 } 4396 4397 /** 4398 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 4399 * @phba: Pointer to HBA context object. 4400 * 4401 * This function resets the HBA by writing HC_INITFF to the control 4402 * register. After the HBA resets, this function resets all the iocb ring 4403 * indices. This function disables PCI layer parity checking during 4404 * the reset. 4405 * This function returns 0 always. 4406 * The caller is not required to hold any locks. 4407 **/ 4408 int 4409 lpfc_sli_brdreset(struct lpfc_hba *phba) 4410 { 4411 struct lpfc_sli *psli; 4412 struct lpfc_sli_ring *pring; 4413 uint16_t cfg_value; 4414 int i; 4415 4416 psli = &phba->sli; 4417 4418 /* Reset HBA */ 4419 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4420 "0325 Reset HBA Data: x%x x%x\n", 4421 (phba->pport) ? phba->pport->port_state : 0, 4422 psli->sli_flag); 4423 4424 /* perform board reset */ 4425 phba->fc_eventTag = 0; 4426 phba->link_events = 0; 4427 if (phba->pport) { 4428 phba->pport->fc_myDID = 0; 4429 phba->pport->fc_prevDID = 0; 4430 } 4431 4432 /* Turn off parity checking and serr during the physical reset */ 4433 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) 4434 return -EIO; 4435 4436 pci_write_config_word(phba->pcidev, PCI_COMMAND, 4437 (cfg_value & 4438 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4439 4440 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 4441 4442 /* Now toggle INITFF bit in the Host Control Register */ 4443 writel(HC_INITFF, phba->HCregaddr); 4444 mdelay(1); 4445 readl(phba->HCregaddr); /* flush */ 4446 writel(0, phba->HCregaddr); 4447 readl(phba->HCregaddr); /* flush */ 4448 4449 /* Restore PCI cmd register */ 4450 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4451 4452 /* Initialize relevant SLI info */ 4453 for (i = 0; i < psli->num_rings; i++) { 4454 pring = &psli->sli3_ring[i]; 4455 pring->flag = 0; 4456 pring->sli.sli3.rspidx = 0; 4457 pring->sli.sli3.next_cmdidx = 0; 4458 pring->sli.sli3.local_getidx = 0; 4459 pring->sli.sli3.cmdidx = 0; 4460 pring->missbufcnt = 0; 4461 } 4462 4463 phba->link_state = LPFC_WARM_START; 4464 return 0; 4465 } 4466 4467 /** 4468 * lpfc_sli4_brdreset - Reset a sli-4 HBA 4469 * @phba: Pointer to HBA context object. 4470 * 4471 * This function resets a SLI4 HBA. This function disables PCI layer parity 4472 * checking during resets the device. The caller is not required to hold 4473 * any locks. 4474 * 4475 * This function returns 0 always. 4476 **/ 4477 int 4478 lpfc_sli4_brdreset(struct lpfc_hba *phba) 4479 { 4480 struct lpfc_sli *psli = &phba->sli; 4481 uint16_t cfg_value; 4482 int rc = 0; 4483 4484 /* Reset HBA */ 4485 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4486 "0295 Reset HBA Data: x%x x%x x%x\n", 4487 phba->pport->port_state, psli->sli_flag, 4488 phba->hba_flag); 4489 4490 /* perform board reset */ 4491 phba->fc_eventTag = 0; 4492 phba->link_events = 0; 4493 phba->pport->fc_myDID = 0; 4494 phba->pport->fc_prevDID = 0; 4495 4496 spin_lock_irq(&phba->hbalock); 4497 psli->sli_flag &= ~(LPFC_PROCESS_LA); 4498 phba->fcf.fcf_flag = 0; 4499 spin_unlock_irq(&phba->hbalock); 4500 4501 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ 4502 if (phba->hba_flag & HBA_FW_DUMP_OP) { 4503 phba->hba_flag &= ~HBA_FW_DUMP_OP; 4504 return rc; 4505 } 4506 4507 /* Now physically reset the device */ 4508 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4509 "0389 Performing PCI function reset!\n"); 4510 4511 /* Turn off parity checking and serr during the physical reset */ 4512 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) { 4513 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4514 "3205 PCI read Config failed\n"); 4515 return -EIO; 4516 } 4517 4518 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 4519 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4520 4521 /* Perform FCoE PCI function reset before freeing queue memory */ 4522 rc = lpfc_pci_function_reset(phba); 4523 4524 /* Restore PCI cmd register */ 4525 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4526 4527 return rc; 4528 } 4529 4530 /** 4531 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 4532 * @phba: Pointer to HBA context object. 4533 * 4534 * This function is called in the SLI initialization code path to 4535 * restart the HBA. The caller is not required to hold any lock. 4536 * This function writes MBX_RESTART mailbox command to the SLIM and 4537 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 4538 * function to free any pending commands. The function enables 4539 * POST only during the first initialization. The function returns zero. 4540 * The function does not guarantee completion of MBX_RESTART mailbox 4541 * command before the return of this function. 4542 **/ 4543 static int 4544 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 4545 { 4546 MAILBOX_t *mb; 4547 struct lpfc_sli *psli; 4548 volatile uint32_t word0; 4549 void __iomem *to_slim; 4550 uint32_t hba_aer_enabled; 4551 4552 spin_lock_irq(&phba->hbalock); 4553 4554 /* Take PCIe device Advanced Error Reporting (AER) state */ 4555 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4556 4557 psli = &phba->sli; 4558 4559 /* Restart HBA */ 4560 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4561 "0337 Restart HBA Data: x%x x%x\n", 4562 (phba->pport) ? phba->pport->port_state : 0, 4563 psli->sli_flag); 4564 4565 word0 = 0; 4566 mb = (MAILBOX_t *) &word0; 4567 mb->mbxCommand = MBX_RESTART; 4568 mb->mbxHc = 1; 4569 4570 lpfc_reset_barrier(phba); 4571 4572 to_slim = phba->MBslimaddr; 4573 writel(*(uint32_t *) mb, to_slim); 4574 readl(to_slim); /* flush */ 4575 4576 /* Only skip post after fc_ffinit is completed */ 4577 if (phba->pport && phba->pport->port_state) 4578 word0 = 1; /* This is really setting up word1 */ 4579 else 4580 word0 = 0; /* This is really setting up word1 */ 4581 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4582 writel(*(uint32_t *) mb, to_slim); 4583 readl(to_slim); /* flush */ 4584 4585 lpfc_sli_brdreset(phba); 4586 if (phba->pport) 4587 phba->pport->stopped = 0; 4588 phba->link_state = LPFC_INIT_START; 4589 phba->hba_flag = 0; 4590 spin_unlock_irq(&phba->hbalock); 4591 4592 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4593 psli->stats_start = ktime_get_seconds(); 4594 4595 /* Give the INITFF and Post time to settle. */ 4596 mdelay(100); 4597 4598 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4599 if (hba_aer_enabled) 4600 pci_disable_pcie_error_reporting(phba->pcidev); 4601 4602 lpfc_hba_down_post(phba); 4603 4604 return 0; 4605 } 4606 4607 /** 4608 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4609 * @phba: Pointer to HBA context object. 4610 * 4611 * This function is called in the SLI initialization code path to restart 4612 * a SLI4 HBA. The caller is not required to hold any lock. 4613 * At the end of the function, it calls lpfc_hba_down_post function to 4614 * free any pending commands. 4615 **/ 4616 static int 4617 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4618 { 4619 struct lpfc_sli *psli = &phba->sli; 4620 uint32_t hba_aer_enabled; 4621 int rc; 4622 4623 /* Restart HBA */ 4624 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4625 "0296 Restart HBA Data: x%x x%x\n", 4626 phba->pport->port_state, psli->sli_flag); 4627 4628 /* Take PCIe device Advanced Error Reporting (AER) state */ 4629 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4630 4631 rc = lpfc_sli4_brdreset(phba); 4632 if (rc) 4633 return rc; 4634 4635 spin_lock_irq(&phba->hbalock); 4636 phba->pport->stopped = 0; 4637 phba->link_state = LPFC_INIT_START; 4638 phba->hba_flag = 0; 4639 spin_unlock_irq(&phba->hbalock); 4640 4641 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4642 psli->stats_start = ktime_get_seconds(); 4643 4644 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4645 if (hba_aer_enabled) 4646 pci_disable_pcie_error_reporting(phba->pcidev); 4647 4648 lpfc_hba_down_post(phba); 4649 lpfc_sli4_queue_destroy(phba); 4650 4651 return rc; 4652 } 4653 4654 /** 4655 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4656 * @phba: Pointer to HBA context object. 4657 * 4658 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4659 * API jump table function pointer from the lpfc_hba struct. 4660 **/ 4661 int 4662 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4663 { 4664 return phba->lpfc_sli_brdrestart(phba); 4665 } 4666 4667 /** 4668 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4669 * @phba: Pointer to HBA context object. 4670 * 4671 * This function is called after a HBA restart to wait for successful 4672 * restart of the HBA. Successful restart of the HBA is indicated by 4673 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4674 * iteration, the function will restart the HBA again. The function returns 4675 * zero if HBA successfully restarted else returns negative error code. 4676 **/ 4677 int 4678 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4679 { 4680 uint32_t status, i = 0; 4681 4682 /* Read the HBA Host Status Register */ 4683 if (lpfc_readl(phba->HSregaddr, &status)) 4684 return -EIO; 4685 4686 /* Check status register to see what current state is */ 4687 i = 0; 4688 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4689 4690 /* Check every 10ms for 10 retries, then every 100ms for 90 4691 * retries, then every 1 sec for 50 retires for a total of 4692 * ~60 seconds before reset the board again and check every 4693 * 1 sec for 50 retries. The up to 60 seconds before the 4694 * board ready is required by the Falcon FIPS zeroization 4695 * complete, and any reset the board in between shall cause 4696 * restart of zeroization, further delay the board ready. 4697 */ 4698 if (i++ >= 200) { 4699 /* Adapter failed to init, timeout, status reg 4700 <status> */ 4701 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4702 "0436 Adapter failed to init, " 4703 "timeout, status reg x%x, " 4704 "FW Data: A8 x%x AC x%x\n", status, 4705 readl(phba->MBslimaddr + 0xa8), 4706 readl(phba->MBslimaddr + 0xac)); 4707 phba->link_state = LPFC_HBA_ERROR; 4708 return -ETIMEDOUT; 4709 } 4710 4711 /* Check to see if any errors occurred during init */ 4712 if (status & HS_FFERM) { 4713 /* ERROR: During chipset initialization */ 4714 /* Adapter failed to init, chipset, status reg 4715 <status> */ 4716 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4717 "0437 Adapter failed to init, " 4718 "chipset, status reg x%x, " 4719 "FW Data: A8 x%x AC x%x\n", status, 4720 readl(phba->MBslimaddr + 0xa8), 4721 readl(phba->MBslimaddr + 0xac)); 4722 phba->link_state = LPFC_HBA_ERROR; 4723 return -EIO; 4724 } 4725 4726 if (i <= 10) 4727 msleep(10); 4728 else if (i <= 100) 4729 msleep(100); 4730 else 4731 msleep(1000); 4732 4733 if (i == 150) { 4734 /* Do post */ 4735 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4736 lpfc_sli_brdrestart(phba); 4737 } 4738 /* Read the HBA Host Status Register */ 4739 if (lpfc_readl(phba->HSregaddr, &status)) 4740 return -EIO; 4741 } 4742 4743 /* Check to see if any errors occurred during init */ 4744 if (status & HS_FFERM) { 4745 /* ERROR: During chipset initialization */ 4746 /* Adapter failed to init, chipset, status reg <status> */ 4747 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4748 "0438 Adapter failed to init, chipset, " 4749 "status reg x%x, " 4750 "FW Data: A8 x%x AC x%x\n", status, 4751 readl(phba->MBslimaddr + 0xa8), 4752 readl(phba->MBslimaddr + 0xac)); 4753 phba->link_state = LPFC_HBA_ERROR; 4754 return -EIO; 4755 } 4756 4757 /* Clear all interrupt enable conditions */ 4758 writel(0, phba->HCregaddr); 4759 readl(phba->HCregaddr); /* flush */ 4760 4761 /* setup host attn register */ 4762 writel(0xffffffff, phba->HAregaddr); 4763 readl(phba->HAregaddr); /* flush */ 4764 return 0; 4765 } 4766 4767 /** 4768 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4769 * 4770 * This function calculates and returns the number of HBQs required to be 4771 * configured. 4772 **/ 4773 int 4774 lpfc_sli_hbq_count(void) 4775 { 4776 return ARRAY_SIZE(lpfc_hbq_defs); 4777 } 4778 4779 /** 4780 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4781 * 4782 * This function adds the number of hbq entries in every HBQ to get 4783 * the total number of hbq entries required for the HBA and returns 4784 * the total count. 4785 **/ 4786 static int 4787 lpfc_sli_hbq_entry_count(void) 4788 { 4789 int hbq_count = lpfc_sli_hbq_count(); 4790 int count = 0; 4791 int i; 4792 4793 for (i = 0; i < hbq_count; ++i) 4794 count += lpfc_hbq_defs[i]->entry_count; 4795 return count; 4796 } 4797 4798 /** 4799 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4800 * 4801 * This function calculates amount of memory required for all hbq entries 4802 * to be configured and returns the total memory required. 4803 **/ 4804 int 4805 lpfc_sli_hbq_size(void) 4806 { 4807 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4808 } 4809 4810 /** 4811 * lpfc_sli_hbq_setup - configure and initialize HBQs 4812 * @phba: Pointer to HBA context object. 4813 * 4814 * This function is called during the SLI initialization to configure 4815 * all the HBQs and post buffers to the HBQ. The caller is not 4816 * required to hold any locks. This function will return zero if successful 4817 * else it will return negative error code. 4818 **/ 4819 static int 4820 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4821 { 4822 int hbq_count = lpfc_sli_hbq_count(); 4823 LPFC_MBOXQ_t *pmb; 4824 MAILBOX_t *pmbox; 4825 uint32_t hbqno; 4826 uint32_t hbq_entry_index; 4827 4828 /* Get a Mailbox buffer to setup mailbox 4829 * commands for HBA initialization 4830 */ 4831 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4832 4833 if (!pmb) 4834 return -ENOMEM; 4835 4836 pmbox = &pmb->u.mb; 4837 4838 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4839 phba->link_state = LPFC_INIT_MBX_CMDS; 4840 phba->hbq_in_use = 1; 4841 4842 hbq_entry_index = 0; 4843 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4844 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4845 phba->hbqs[hbqno].hbqPutIdx = 0; 4846 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4847 phba->hbqs[hbqno].entry_count = 4848 lpfc_hbq_defs[hbqno]->entry_count; 4849 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4850 hbq_entry_index, pmb); 4851 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4852 4853 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4854 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4855 mbxStatus <status>, ring <num> */ 4856 4857 lpfc_printf_log(phba, KERN_ERR, 4858 LOG_SLI | LOG_VPORT, 4859 "1805 Adapter failed to init. " 4860 "Data: x%x x%x x%x\n", 4861 pmbox->mbxCommand, 4862 pmbox->mbxStatus, hbqno); 4863 4864 phba->link_state = LPFC_HBA_ERROR; 4865 mempool_free(pmb, phba->mbox_mem_pool); 4866 return -ENXIO; 4867 } 4868 } 4869 phba->hbq_count = hbq_count; 4870 4871 mempool_free(pmb, phba->mbox_mem_pool); 4872 4873 /* Initially populate or replenish the HBQs */ 4874 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4875 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4876 return 0; 4877 } 4878 4879 /** 4880 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4881 * @phba: Pointer to HBA context object. 4882 * 4883 * This function is called during the SLI initialization to configure 4884 * all the HBQs and post buffers to the HBQ. The caller is not 4885 * required to hold any locks. This function will return zero if successful 4886 * else it will return negative error code. 4887 **/ 4888 static int 4889 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4890 { 4891 phba->hbq_in_use = 1; 4892 phba->hbqs[LPFC_ELS_HBQ].entry_count = 4893 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; 4894 phba->hbq_count = 1; 4895 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); 4896 /* Initially populate or replenish the HBQs */ 4897 return 0; 4898 } 4899 4900 /** 4901 * lpfc_sli_config_port - Issue config port mailbox command 4902 * @phba: Pointer to HBA context object. 4903 * @sli_mode: sli mode - 2/3 4904 * 4905 * This function is called by the sli initialization code path 4906 * to issue config_port mailbox command. This function restarts the 4907 * HBA firmware and issues a config_port mailbox command to configure 4908 * the SLI interface in the sli mode specified by sli_mode 4909 * variable. The caller is not required to hold any locks. 4910 * The function returns 0 if successful, else returns negative error 4911 * code. 4912 **/ 4913 int 4914 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4915 { 4916 LPFC_MBOXQ_t *pmb; 4917 uint32_t resetcount = 0, rc = 0, done = 0; 4918 4919 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4920 if (!pmb) { 4921 phba->link_state = LPFC_HBA_ERROR; 4922 return -ENOMEM; 4923 } 4924 4925 phba->sli_rev = sli_mode; 4926 while (resetcount < 2 && !done) { 4927 spin_lock_irq(&phba->hbalock); 4928 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4929 spin_unlock_irq(&phba->hbalock); 4930 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4931 lpfc_sli_brdrestart(phba); 4932 rc = lpfc_sli_chipset_init(phba); 4933 if (rc) 4934 break; 4935 4936 spin_lock_irq(&phba->hbalock); 4937 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4938 spin_unlock_irq(&phba->hbalock); 4939 resetcount++; 4940 4941 /* Call pre CONFIG_PORT mailbox command initialization. A 4942 * value of 0 means the call was successful. Any other 4943 * nonzero value is a failure, but if ERESTART is returned, 4944 * the driver may reset the HBA and try again. 4945 */ 4946 rc = lpfc_config_port_prep(phba); 4947 if (rc == -ERESTART) { 4948 phba->link_state = LPFC_LINK_UNKNOWN; 4949 continue; 4950 } else if (rc) 4951 break; 4952 4953 phba->link_state = LPFC_INIT_MBX_CMDS; 4954 lpfc_config_port(phba, pmb); 4955 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4956 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4957 LPFC_SLI3_HBQ_ENABLED | 4958 LPFC_SLI3_CRP_ENABLED | 4959 LPFC_SLI3_DSS_ENABLED); 4960 if (rc != MBX_SUCCESS) { 4961 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4962 "0442 Adapter failed to init, mbxCmd x%x " 4963 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4964 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4965 spin_lock_irq(&phba->hbalock); 4966 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4967 spin_unlock_irq(&phba->hbalock); 4968 rc = -ENXIO; 4969 } else { 4970 /* Allow asynchronous mailbox command to go through */ 4971 spin_lock_irq(&phba->hbalock); 4972 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4973 spin_unlock_irq(&phba->hbalock); 4974 done = 1; 4975 4976 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 4977 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 4978 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4979 "3110 Port did not grant ASABT\n"); 4980 } 4981 } 4982 if (!done) { 4983 rc = -EINVAL; 4984 goto do_prep_failed; 4985 } 4986 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4987 if (!pmb->u.mb.un.varCfgPort.cMA) { 4988 rc = -ENXIO; 4989 goto do_prep_failed; 4990 } 4991 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 4992 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 4993 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 4994 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 4995 phba->max_vpi : phba->max_vports; 4996 4997 } else 4998 phba->max_vpi = 0; 4999 phba->fips_level = 0; 5000 phba->fips_spec_rev = 0; 5001 if (pmb->u.mb.un.varCfgPort.gdss) { 5002 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 5003 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 5004 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 5005 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5006 "2850 Security Crypto Active. FIPS x%d " 5007 "(Spec Rev: x%d)", 5008 phba->fips_level, phba->fips_spec_rev); 5009 } 5010 if (pmb->u.mb.un.varCfgPort.sec_err) { 5011 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5012 "2856 Config Port Security Crypto " 5013 "Error: x%x ", 5014 pmb->u.mb.un.varCfgPort.sec_err); 5015 } 5016 if (pmb->u.mb.un.varCfgPort.gerbm) 5017 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 5018 if (pmb->u.mb.un.varCfgPort.gcrp) 5019 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 5020 5021 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 5022 phba->port_gp = phba->mbox->us.s3_pgp.port; 5023 5024 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 5025 if (pmb->u.mb.un.varCfgPort.gbg == 0) { 5026 phba->cfg_enable_bg = 0; 5027 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 5028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5029 "0443 Adapter did not grant " 5030 "BlockGuard\n"); 5031 } 5032 } 5033 } else { 5034 phba->hbq_get = NULL; 5035 phba->port_gp = phba->mbox->us.s2.port; 5036 phba->max_vpi = 0; 5037 } 5038 do_prep_failed: 5039 mempool_free(pmb, phba->mbox_mem_pool); 5040 return rc; 5041 } 5042 5043 5044 /** 5045 * lpfc_sli_hba_setup - SLI initialization function 5046 * @phba: Pointer to HBA context object. 5047 * 5048 * This function is the main SLI initialization function. This function 5049 * is called by the HBA initialization code, HBA reset code and HBA 5050 * error attention handler code. Caller is not required to hold any 5051 * locks. This function issues config_port mailbox command to configure 5052 * the SLI, setup iocb rings and HBQ rings. In the end the function 5053 * calls the config_port_post function to issue init_link mailbox 5054 * command and to start the discovery. The function will return zero 5055 * if successful, else it will return negative error code. 5056 **/ 5057 int 5058 lpfc_sli_hba_setup(struct lpfc_hba *phba) 5059 { 5060 uint32_t rc; 5061 int mode = 3, i; 5062 int longs; 5063 5064 switch (phba->cfg_sli_mode) { 5065 case 2: 5066 if (phba->cfg_enable_npiv) { 5067 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5068 "1824 NPIV enabled: Override sli_mode " 5069 "parameter (%d) to auto (0).\n", 5070 phba->cfg_sli_mode); 5071 break; 5072 } 5073 mode = 2; 5074 break; 5075 case 0: 5076 case 3: 5077 break; 5078 default: 5079 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5080 "1819 Unrecognized sli_mode parameter: %d.\n", 5081 phba->cfg_sli_mode); 5082 5083 break; 5084 } 5085 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 5086 5087 rc = lpfc_sli_config_port(phba, mode); 5088 5089 if (rc && phba->cfg_sli_mode == 3) 5090 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5091 "1820 Unable to select SLI-3. " 5092 "Not supported by adapter.\n"); 5093 if (rc && mode != 2) 5094 rc = lpfc_sli_config_port(phba, 2); 5095 else if (rc && mode == 2) 5096 rc = lpfc_sli_config_port(phba, 3); 5097 if (rc) 5098 goto lpfc_sli_hba_setup_error; 5099 5100 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 5101 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 5102 rc = pci_enable_pcie_error_reporting(phba->pcidev); 5103 if (!rc) { 5104 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5105 "2709 This device supports " 5106 "Advanced Error Reporting (AER)\n"); 5107 spin_lock_irq(&phba->hbalock); 5108 phba->hba_flag |= HBA_AER_ENABLED; 5109 spin_unlock_irq(&phba->hbalock); 5110 } else { 5111 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5112 "2708 This device does not support " 5113 "Advanced Error Reporting (AER): %d\n", 5114 rc); 5115 phba->cfg_aer_support = 0; 5116 } 5117 } 5118 5119 if (phba->sli_rev == 3) { 5120 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 5121 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 5122 } else { 5123 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 5124 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 5125 phba->sli3_options = 0; 5126 } 5127 5128 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5129 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 5130 phba->sli_rev, phba->max_vpi); 5131 rc = lpfc_sli_ring_map(phba); 5132 5133 if (rc) 5134 goto lpfc_sli_hba_setup_error; 5135 5136 /* Initialize VPIs. */ 5137 if (phba->sli_rev == LPFC_SLI_REV3) { 5138 /* 5139 * The VPI bitmask and physical ID array are allocated 5140 * and initialized once only - at driver load. A port 5141 * reset doesn't need to reinitialize this memory. 5142 */ 5143 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 5144 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 5145 phba->vpi_bmask = kcalloc(longs, 5146 sizeof(unsigned long), 5147 GFP_KERNEL); 5148 if (!phba->vpi_bmask) { 5149 rc = -ENOMEM; 5150 goto lpfc_sli_hba_setup_error; 5151 } 5152 5153 phba->vpi_ids = kcalloc(phba->max_vpi + 1, 5154 sizeof(uint16_t), 5155 GFP_KERNEL); 5156 if (!phba->vpi_ids) { 5157 kfree(phba->vpi_bmask); 5158 rc = -ENOMEM; 5159 goto lpfc_sli_hba_setup_error; 5160 } 5161 for (i = 0; i < phba->max_vpi; i++) 5162 phba->vpi_ids[i] = i; 5163 } 5164 } 5165 5166 /* Init HBQs */ 5167 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 5168 rc = lpfc_sli_hbq_setup(phba); 5169 if (rc) 5170 goto lpfc_sli_hba_setup_error; 5171 } 5172 spin_lock_irq(&phba->hbalock); 5173 phba->sli.sli_flag |= LPFC_PROCESS_LA; 5174 spin_unlock_irq(&phba->hbalock); 5175 5176 rc = lpfc_config_port_post(phba); 5177 if (rc) 5178 goto lpfc_sli_hba_setup_error; 5179 5180 return rc; 5181 5182 lpfc_sli_hba_setup_error: 5183 phba->link_state = LPFC_HBA_ERROR; 5184 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5185 "0445 Firmware initialization failed\n"); 5186 return rc; 5187 } 5188 5189 /** 5190 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 5191 * @phba: Pointer to HBA context object. 5192 * @mboxq: mailbox pointer. 5193 * This function issue a dump mailbox command to read config region 5194 * 23 and parse the records in the region and populate driver 5195 * data structure. 5196 **/ 5197 static int 5198 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 5199 { 5200 LPFC_MBOXQ_t *mboxq; 5201 struct lpfc_dmabuf *mp; 5202 struct lpfc_mqe *mqe; 5203 uint32_t data_length; 5204 int rc; 5205 5206 /* Program the default value of vlan_id and fc_map */ 5207 phba->valid_vlan = 0; 5208 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5209 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5210 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5211 5212 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5213 if (!mboxq) 5214 return -ENOMEM; 5215 5216 mqe = &mboxq->u.mqe; 5217 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 5218 rc = -ENOMEM; 5219 goto out_free_mboxq; 5220 } 5221 5222 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 5223 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5224 5225 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5226 "(%d):2571 Mailbox cmd x%x Status x%x " 5227 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5228 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5229 "CQ: x%x x%x x%x x%x\n", 5230 mboxq->vport ? mboxq->vport->vpi : 0, 5231 bf_get(lpfc_mqe_command, mqe), 5232 bf_get(lpfc_mqe_status, mqe), 5233 mqe->un.mb_words[0], mqe->un.mb_words[1], 5234 mqe->un.mb_words[2], mqe->un.mb_words[3], 5235 mqe->un.mb_words[4], mqe->un.mb_words[5], 5236 mqe->un.mb_words[6], mqe->un.mb_words[7], 5237 mqe->un.mb_words[8], mqe->un.mb_words[9], 5238 mqe->un.mb_words[10], mqe->un.mb_words[11], 5239 mqe->un.mb_words[12], mqe->un.mb_words[13], 5240 mqe->un.mb_words[14], mqe->un.mb_words[15], 5241 mqe->un.mb_words[16], mqe->un.mb_words[50], 5242 mboxq->mcqe.word0, 5243 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5244 mboxq->mcqe.trailer); 5245 5246 if (rc) { 5247 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5248 kfree(mp); 5249 rc = -EIO; 5250 goto out_free_mboxq; 5251 } 5252 data_length = mqe->un.mb_words[5]; 5253 if (data_length > DMP_RGN23_SIZE) { 5254 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5255 kfree(mp); 5256 rc = -EIO; 5257 goto out_free_mboxq; 5258 } 5259 5260 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 5261 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5262 kfree(mp); 5263 rc = 0; 5264 5265 out_free_mboxq: 5266 mempool_free(mboxq, phba->mbox_mem_pool); 5267 return rc; 5268 } 5269 5270 /** 5271 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 5272 * @phba: pointer to lpfc hba data structure. 5273 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 5274 * @vpd: pointer to the memory to hold resulting port vpd data. 5275 * @vpd_size: On input, the number of bytes allocated to @vpd. 5276 * On output, the number of data bytes in @vpd. 5277 * 5278 * This routine executes a READ_REV SLI4 mailbox command. In 5279 * addition, this routine gets the port vpd data. 5280 * 5281 * Return codes 5282 * 0 - successful 5283 * -ENOMEM - could not allocated memory. 5284 **/ 5285 static int 5286 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5287 uint8_t *vpd, uint32_t *vpd_size) 5288 { 5289 int rc = 0; 5290 uint32_t dma_size; 5291 struct lpfc_dmabuf *dmabuf; 5292 struct lpfc_mqe *mqe; 5293 5294 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5295 if (!dmabuf) 5296 return -ENOMEM; 5297 5298 /* 5299 * Get a DMA buffer for the vpd data resulting from the READ_REV 5300 * mailbox command. 5301 */ 5302 dma_size = *vpd_size; 5303 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, 5304 &dmabuf->phys, GFP_KERNEL); 5305 if (!dmabuf->virt) { 5306 kfree(dmabuf); 5307 return -ENOMEM; 5308 } 5309 5310 /* 5311 * The SLI4 implementation of READ_REV conflicts at word1, 5312 * bits 31:16 and SLI4 adds vpd functionality not present 5313 * in SLI3. This code corrects the conflicts. 5314 */ 5315 lpfc_read_rev(phba, mboxq); 5316 mqe = &mboxq->u.mqe; 5317 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 5318 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 5319 mqe->un.read_rev.word1 &= 0x0000FFFF; 5320 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 5321 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 5322 5323 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5324 if (rc) { 5325 dma_free_coherent(&phba->pcidev->dev, dma_size, 5326 dmabuf->virt, dmabuf->phys); 5327 kfree(dmabuf); 5328 return -EIO; 5329 } 5330 5331 /* 5332 * The available vpd length cannot be bigger than the 5333 * DMA buffer passed to the port. Catch the less than 5334 * case and update the caller's size. 5335 */ 5336 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 5337 *vpd_size = mqe->un.read_rev.avail_vpd_len; 5338 5339 memcpy(vpd, dmabuf->virt, *vpd_size); 5340 5341 dma_free_coherent(&phba->pcidev->dev, dma_size, 5342 dmabuf->virt, dmabuf->phys); 5343 kfree(dmabuf); 5344 return 0; 5345 } 5346 5347 /** 5348 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes 5349 * @phba: pointer to lpfc hba data structure. 5350 * 5351 * This routine retrieves SLI4 device physical port name this PCI function 5352 * is attached to. 5353 * 5354 * Return codes 5355 * 0 - successful 5356 * otherwise - failed to retrieve controller attributes 5357 **/ 5358 static int 5359 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) 5360 { 5361 LPFC_MBOXQ_t *mboxq; 5362 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 5363 struct lpfc_controller_attribute *cntl_attr; 5364 void *virtaddr = NULL; 5365 uint32_t alloclen, reqlen; 5366 uint32_t shdr_status, shdr_add_status; 5367 union lpfc_sli4_cfg_shdr *shdr; 5368 int rc; 5369 5370 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5371 if (!mboxq) 5372 return -ENOMEM; 5373 5374 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */ 5375 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 5376 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5377 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 5378 LPFC_SLI4_MBX_NEMBED); 5379 5380 if (alloclen < reqlen) { 5381 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5382 "3084 Allocated DMA memory size (%d) is " 5383 "less than the requested DMA memory size " 5384 "(%d)\n", alloclen, reqlen); 5385 rc = -ENOMEM; 5386 goto out_free_mboxq; 5387 } 5388 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5389 virtaddr = mboxq->sge_array->addr[0]; 5390 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 5391 shdr = &mbx_cntl_attr->cfg_shdr; 5392 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5393 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5394 if (shdr_status || shdr_add_status || rc) { 5395 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5396 "3085 Mailbox x%x (x%x/x%x) failed, " 5397 "rc:x%x, status:x%x, add_status:x%x\n", 5398 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5399 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5400 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5401 rc, shdr_status, shdr_add_status); 5402 rc = -ENXIO; 5403 goto out_free_mboxq; 5404 } 5405 5406 cntl_attr = &mbx_cntl_attr->cntl_attr; 5407 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 5408 phba->sli4_hba.lnk_info.lnk_tp = 5409 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 5410 phba->sli4_hba.lnk_info.lnk_no = 5411 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 5412 5413 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); 5414 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, 5415 sizeof(phba->BIOSVersion)); 5416 5417 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5418 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n", 5419 phba->sli4_hba.lnk_info.lnk_tp, 5420 phba->sli4_hba.lnk_info.lnk_no, 5421 phba->BIOSVersion); 5422 out_free_mboxq: 5423 if (rc != MBX_TIMEOUT) { 5424 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5425 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5426 else 5427 mempool_free(mboxq, phba->mbox_mem_pool); 5428 } 5429 return rc; 5430 } 5431 5432 /** 5433 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 5434 * @phba: pointer to lpfc hba data structure. 5435 * 5436 * This routine retrieves SLI4 device physical port name this PCI function 5437 * is attached to. 5438 * 5439 * Return codes 5440 * 0 - successful 5441 * otherwise - failed to retrieve physical port name 5442 **/ 5443 static int 5444 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 5445 { 5446 LPFC_MBOXQ_t *mboxq; 5447 struct lpfc_mbx_get_port_name *get_port_name; 5448 uint32_t shdr_status, shdr_add_status; 5449 union lpfc_sli4_cfg_shdr *shdr; 5450 char cport_name = 0; 5451 int rc; 5452 5453 /* We assume nothing at this point */ 5454 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5455 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 5456 5457 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5458 if (!mboxq) 5459 return -ENOMEM; 5460 /* obtain link type and link number via READ_CONFIG */ 5461 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5462 lpfc_sli4_read_config(phba); 5463 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 5464 goto retrieve_ppname; 5465 5466 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 5467 rc = lpfc_sli4_get_ctl_attr(phba); 5468 if (rc) 5469 goto out_free_mboxq; 5470 5471 retrieve_ppname: 5472 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5473 LPFC_MBOX_OPCODE_GET_PORT_NAME, 5474 sizeof(struct lpfc_mbx_get_port_name) - 5475 sizeof(struct lpfc_sli4_cfg_mhdr), 5476 LPFC_SLI4_MBX_EMBED); 5477 get_port_name = &mboxq->u.mqe.un.get_port_name; 5478 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 5479 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 5480 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 5481 phba->sli4_hba.lnk_info.lnk_tp); 5482 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5483 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5484 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5485 if (shdr_status || shdr_add_status || rc) { 5486 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5487 "3087 Mailbox x%x (x%x/x%x) failed: " 5488 "rc:x%x, status:x%x, add_status:x%x\n", 5489 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5490 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5491 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5492 rc, shdr_status, shdr_add_status); 5493 rc = -ENXIO; 5494 goto out_free_mboxq; 5495 } 5496 switch (phba->sli4_hba.lnk_info.lnk_no) { 5497 case LPFC_LINK_NUMBER_0: 5498 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 5499 &get_port_name->u.response); 5500 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5501 break; 5502 case LPFC_LINK_NUMBER_1: 5503 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 5504 &get_port_name->u.response); 5505 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5506 break; 5507 case LPFC_LINK_NUMBER_2: 5508 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 5509 &get_port_name->u.response); 5510 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5511 break; 5512 case LPFC_LINK_NUMBER_3: 5513 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 5514 &get_port_name->u.response); 5515 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5516 break; 5517 default: 5518 break; 5519 } 5520 5521 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 5522 phba->Port[0] = cport_name; 5523 phba->Port[1] = '\0'; 5524 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5525 "3091 SLI get port name: %s\n", phba->Port); 5526 } 5527 5528 out_free_mboxq: 5529 if (rc != MBX_TIMEOUT) { 5530 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5531 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5532 else 5533 mempool_free(mboxq, phba->mbox_mem_pool); 5534 } 5535 return rc; 5536 } 5537 5538 /** 5539 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 5540 * @phba: pointer to lpfc hba data structure. 5541 * 5542 * This routine is called to explicitly arm the SLI4 device's completion and 5543 * event queues 5544 **/ 5545 static void 5546 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 5547 { 5548 int qidx; 5549 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 5550 struct lpfc_sli4_hdw_queue *qp; 5551 5552 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM); 5553 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); 5554 if (sli4_hba->nvmels_cq) 5555 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, 5556 LPFC_QUEUE_REARM); 5557 5558 qp = sli4_hba->hdwq; 5559 if (sli4_hba->hdwq) { 5560 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 5561 sli4_hba->sli4_write_cq_db(phba, qp[qidx].fcp_cq, 0, 5562 LPFC_QUEUE_REARM); 5563 sli4_hba->sli4_write_cq_db(phba, qp[qidx].nvme_cq, 0, 5564 LPFC_QUEUE_REARM); 5565 } 5566 5567 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) 5568 sli4_hba->sli4_write_eq_db(phba, qp[qidx].hba_eq, 5569 0, LPFC_QUEUE_REARM); 5570 } 5571 5572 if (phba->nvmet_support) { 5573 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { 5574 sli4_hba->sli4_write_cq_db(phba, 5575 sli4_hba->nvmet_cqset[qidx], 0, 5576 LPFC_QUEUE_REARM); 5577 } 5578 } 5579 } 5580 5581 /** 5582 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 5583 * @phba: Pointer to HBA context object. 5584 * @type: The resource extent type. 5585 * @extnt_count: buffer to hold port available extent count. 5586 * @extnt_size: buffer to hold element count per extent. 5587 * 5588 * This function calls the port and retrievs the number of available 5589 * extents and their size for a particular extent type. 5590 * 5591 * Returns: 0 if successful. Nonzero otherwise. 5592 **/ 5593 int 5594 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 5595 uint16_t *extnt_count, uint16_t *extnt_size) 5596 { 5597 int rc = 0; 5598 uint32_t length; 5599 uint32_t mbox_tmo; 5600 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 5601 LPFC_MBOXQ_t *mbox; 5602 5603 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5604 if (!mbox) 5605 return -ENOMEM; 5606 5607 /* Find out how many extents are available for this resource type */ 5608 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 5609 sizeof(struct lpfc_sli4_cfg_mhdr)); 5610 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5611 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 5612 length, LPFC_SLI4_MBX_EMBED); 5613 5614 /* Send an extents count of 0 - the GET doesn't use it. */ 5615 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5616 LPFC_SLI4_MBX_EMBED); 5617 if (unlikely(rc)) { 5618 rc = -EIO; 5619 goto err_exit; 5620 } 5621 5622 if (!phba->sli4_hba.intr_enable) 5623 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5624 else { 5625 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5626 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5627 } 5628 if (unlikely(rc)) { 5629 rc = -EIO; 5630 goto err_exit; 5631 } 5632 5633 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5634 if (bf_get(lpfc_mbox_hdr_status, 5635 &rsrc_info->header.cfg_shdr.response)) { 5636 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5637 "2930 Failed to get resource extents " 5638 "Status 0x%x Add'l Status 0x%x\n", 5639 bf_get(lpfc_mbox_hdr_status, 5640 &rsrc_info->header.cfg_shdr.response), 5641 bf_get(lpfc_mbox_hdr_add_status, 5642 &rsrc_info->header.cfg_shdr.response)); 5643 rc = -EIO; 5644 goto err_exit; 5645 } 5646 5647 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 5648 &rsrc_info->u.rsp); 5649 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 5650 &rsrc_info->u.rsp); 5651 5652 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5653 "3162 Retrieved extents type-%d from port: count:%d, " 5654 "size:%d\n", type, *extnt_count, *extnt_size); 5655 5656 err_exit: 5657 mempool_free(mbox, phba->mbox_mem_pool); 5658 return rc; 5659 } 5660 5661 /** 5662 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 5663 * @phba: Pointer to HBA context object. 5664 * @type: The extent type to check. 5665 * 5666 * This function reads the current available extents from the port and checks 5667 * if the extent count or extent size has changed since the last access. 5668 * Callers use this routine post port reset to understand if there is a 5669 * extent reprovisioning requirement. 5670 * 5671 * Returns: 5672 * -Error: error indicates problem. 5673 * 1: Extent count or size has changed. 5674 * 0: No changes. 5675 **/ 5676 static int 5677 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5678 { 5679 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5680 uint16_t size_diff, rsrc_ext_size; 5681 int rc = 0; 5682 struct lpfc_rsrc_blks *rsrc_entry; 5683 struct list_head *rsrc_blk_list = NULL; 5684 5685 size_diff = 0; 5686 curr_ext_cnt = 0; 5687 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5688 &rsrc_ext_cnt, 5689 &rsrc_ext_size); 5690 if (unlikely(rc)) 5691 return -EIO; 5692 5693 switch (type) { 5694 case LPFC_RSC_TYPE_FCOE_RPI: 5695 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5696 break; 5697 case LPFC_RSC_TYPE_FCOE_VPI: 5698 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5699 break; 5700 case LPFC_RSC_TYPE_FCOE_XRI: 5701 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5702 break; 5703 case LPFC_RSC_TYPE_FCOE_VFI: 5704 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5705 break; 5706 default: 5707 break; 5708 } 5709 5710 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5711 curr_ext_cnt++; 5712 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5713 size_diff++; 5714 } 5715 5716 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5717 rc = 1; 5718 5719 return rc; 5720 } 5721 5722 /** 5723 * lpfc_sli4_cfg_post_extnts - 5724 * @phba: Pointer to HBA context object. 5725 * @extnt_cnt - number of available extents. 5726 * @type - the extent type (rpi, xri, vfi, vpi). 5727 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5728 * @mbox - pointer to the caller's allocated mailbox structure. 5729 * 5730 * This function executes the extents allocation request. It also 5731 * takes care of the amount of memory needed to allocate or get the 5732 * allocated extents. It is the caller's responsibility to evaluate 5733 * the response. 5734 * 5735 * Returns: 5736 * -Error: Error value describes the condition found. 5737 * 0: if successful 5738 **/ 5739 static int 5740 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5741 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5742 { 5743 int rc = 0; 5744 uint32_t req_len; 5745 uint32_t emb_len; 5746 uint32_t alloc_len, mbox_tmo; 5747 5748 /* Calculate the total requested length of the dma memory */ 5749 req_len = extnt_cnt * sizeof(uint16_t); 5750 5751 /* 5752 * Calculate the size of an embedded mailbox. The uint32_t 5753 * accounts for extents-specific word. 5754 */ 5755 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5756 sizeof(uint32_t); 5757 5758 /* 5759 * Presume the allocation and response will fit into an embedded 5760 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5761 */ 5762 *emb = LPFC_SLI4_MBX_EMBED; 5763 if (req_len > emb_len) { 5764 req_len = extnt_cnt * sizeof(uint16_t) + 5765 sizeof(union lpfc_sli4_cfg_shdr) + 5766 sizeof(uint32_t); 5767 *emb = LPFC_SLI4_MBX_NEMBED; 5768 } 5769 5770 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5771 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5772 req_len, *emb); 5773 if (alloc_len < req_len) { 5774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5775 "2982 Allocated DMA memory size (x%x) is " 5776 "less than the requested DMA memory " 5777 "size (x%x)\n", alloc_len, req_len); 5778 return -ENOMEM; 5779 } 5780 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5781 if (unlikely(rc)) 5782 return -EIO; 5783 5784 if (!phba->sli4_hba.intr_enable) 5785 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5786 else { 5787 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5788 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5789 } 5790 5791 if (unlikely(rc)) 5792 rc = -EIO; 5793 return rc; 5794 } 5795 5796 /** 5797 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5798 * @phba: Pointer to HBA context object. 5799 * @type: The resource extent type to allocate. 5800 * 5801 * This function allocates the number of elements for the specified 5802 * resource type. 5803 **/ 5804 static int 5805 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5806 { 5807 bool emb = false; 5808 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5809 uint16_t rsrc_id, rsrc_start, j, k; 5810 uint16_t *ids; 5811 int i, rc; 5812 unsigned long longs; 5813 unsigned long *bmask; 5814 struct lpfc_rsrc_blks *rsrc_blks; 5815 LPFC_MBOXQ_t *mbox; 5816 uint32_t length; 5817 struct lpfc_id_range *id_array = NULL; 5818 void *virtaddr = NULL; 5819 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5820 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5821 struct list_head *ext_blk_list; 5822 5823 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5824 &rsrc_cnt, 5825 &rsrc_size); 5826 if (unlikely(rc)) 5827 return -EIO; 5828 5829 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5830 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5831 "3009 No available Resource Extents " 5832 "for resource type 0x%x: Count: 0x%x, " 5833 "Size 0x%x\n", type, rsrc_cnt, 5834 rsrc_size); 5835 return -ENOMEM; 5836 } 5837 5838 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5839 "2903 Post resource extents type-0x%x: " 5840 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5841 5842 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5843 if (!mbox) 5844 return -ENOMEM; 5845 5846 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5847 if (unlikely(rc)) { 5848 rc = -EIO; 5849 goto err_exit; 5850 } 5851 5852 /* 5853 * Figure out where the response is located. Then get local pointers 5854 * to the response data. The port does not guarantee to respond to 5855 * all extents counts request so update the local variable with the 5856 * allocated count from the port. 5857 */ 5858 if (emb == LPFC_SLI4_MBX_EMBED) { 5859 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5860 id_array = &rsrc_ext->u.rsp.id[0]; 5861 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5862 } else { 5863 virtaddr = mbox->sge_array->addr[0]; 5864 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5865 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5866 id_array = &n_rsrc->id; 5867 } 5868 5869 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5870 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5871 5872 /* 5873 * Based on the resource size and count, correct the base and max 5874 * resource values. 5875 */ 5876 length = sizeof(struct lpfc_rsrc_blks); 5877 switch (type) { 5878 case LPFC_RSC_TYPE_FCOE_RPI: 5879 phba->sli4_hba.rpi_bmask = kcalloc(longs, 5880 sizeof(unsigned long), 5881 GFP_KERNEL); 5882 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5883 rc = -ENOMEM; 5884 goto err_exit; 5885 } 5886 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, 5887 sizeof(uint16_t), 5888 GFP_KERNEL); 5889 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5890 kfree(phba->sli4_hba.rpi_bmask); 5891 rc = -ENOMEM; 5892 goto err_exit; 5893 } 5894 5895 /* 5896 * The next_rpi was initialized with the maximum available 5897 * count but the port may allocate a smaller number. Catch 5898 * that case and update the next_rpi. 5899 */ 5900 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5901 5902 /* Initialize local ptrs for common extent processing later. */ 5903 bmask = phba->sli4_hba.rpi_bmask; 5904 ids = phba->sli4_hba.rpi_ids; 5905 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5906 break; 5907 case LPFC_RSC_TYPE_FCOE_VPI: 5908 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 5909 GFP_KERNEL); 5910 if (unlikely(!phba->vpi_bmask)) { 5911 rc = -ENOMEM; 5912 goto err_exit; 5913 } 5914 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), 5915 GFP_KERNEL); 5916 if (unlikely(!phba->vpi_ids)) { 5917 kfree(phba->vpi_bmask); 5918 rc = -ENOMEM; 5919 goto err_exit; 5920 } 5921 5922 /* Initialize local ptrs for common extent processing later. */ 5923 bmask = phba->vpi_bmask; 5924 ids = phba->vpi_ids; 5925 ext_blk_list = &phba->lpfc_vpi_blk_list; 5926 break; 5927 case LPFC_RSC_TYPE_FCOE_XRI: 5928 phba->sli4_hba.xri_bmask = kcalloc(longs, 5929 sizeof(unsigned long), 5930 GFP_KERNEL); 5931 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5932 rc = -ENOMEM; 5933 goto err_exit; 5934 } 5935 phba->sli4_hba.max_cfg_param.xri_used = 0; 5936 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, 5937 sizeof(uint16_t), 5938 GFP_KERNEL); 5939 if (unlikely(!phba->sli4_hba.xri_ids)) { 5940 kfree(phba->sli4_hba.xri_bmask); 5941 rc = -ENOMEM; 5942 goto err_exit; 5943 } 5944 5945 /* Initialize local ptrs for common extent processing later. */ 5946 bmask = phba->sli4_hba.xri_bmask; 5947 ids = phba->sli4_hba.xri_ids; 5948 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5949 break; 5950 case LPFC_RSC_TYPE_FCOE_VFI: 5951 phba->sli4_hba.vfi_bmask = kcalloc(longs, 5952 sizeof(unsigned long), 5953 GFP_KERNEL); 5954 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5955 rc = -ENOMEM; 5956 goto err_exit; 5957 } 5958 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, 5959 sizeof(uint16_t), 5960 GFP_KERNEL); 5961 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5962 kfree(phba->sli4_hba.vfi_bmask); 5963 rc = -ENOMEM; 5964 goto err_exit; 5965 } 5966 5967 /* Initialize local ptrs for common extent processing later. */ 5968 bmask = phba->sli4_hba.vfi_bmask; 5969 ids = phba->sli4_hba.vfi_ids; 5970 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5971 break; 5972 default: 5973 /* Unsupported Opcode. Fail call. */ 5974 id_array = NULL; 5975 bmask = NULL; 5976 ids = NULL; 5977 ext_blk_list = NULL; 5978 goto err_exit; 5979 } 5980 5981 /* 5982 * Complete initializing the extent configuration with the 5983 * allocated ids assigned to this function. The bitmask serves 5984 * as an index into the array and manages the available ids. The 5985 * array just stores the ids communicated to the port via the wqes. 5986 */ 5987 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5988 if ((i % 2) == 0) 5989 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5990 &id_array[k]); 5991 else 5992 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5993 &id_array[k]); 5994 5995 rsrc_blks = kzalloc(length, GFP_KERNEL); 5996 if (unlikely(!rsrc_blks)) { 5997 rc = -ENOMEM; 5998 kfree(bmask); 5999 kfree(ids); 6000 goto err_exit; 6001 } 6002 rsrc_blks->rsrc_start = rsrc_id; 6003 rsrc_blks->rsrc_size = rsrc_size; 6004 list_add_tail(&rsrc_blks->list, ext_blk_list); 6005 rsrc_start = rsrc_id; 6006 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { 6007 phba->sli4_hba.io_xri_start = rsrc_start + 6008 lpfc_sli4_get_iocb_cnt(phba); 6009 } 6010 6011 while (rsrc_id < (rsrc_start + rsrc_size)) { 6012 ids[j] = rsrc_id; 6013 rsrc_id++; 6014 j++; 6015 } 6016 /* Entire word processed. Get next word.*/ 6017 if ((i % 2) == 1) 6018 k++; 6019 } 6020 err_exit: 6021 lpfc_sli4_mbox_cmd_free(phba, mbox); 6022 return rc; 6023 } 6024 6025 6026 6027 /** 6028 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 6029 * @phba: Pointer to HBA context object. 6030 * @type: the extent's type. 6031 * 6032 * This function deallocates all extents of a particular resource type. 6033 * SLI4 does not allow for deallocating a particular extent range. It 6034 * is the caller's responsibility to release all kernel memory resources. 6035 **/ 6036 static int 6037 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 6038 { 6039 int rc; 6040 uint32_t length, mbox_tmo = 0; 6041 LPFC_MBOXQ_t *mbox; 6042 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 6043 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 6044 6045 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6046 if (!mbox) 6047 return -ENOMEM; 6048 6049 /* 6050 * This function sends an embedded mailbox because it only sends the 6051 * the resource type. All extents of this type are released by the 6052 * port. 6053 */ 6054 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 6055 sizeof(struct lpfc_sli4_cfg_mhdr)); 6056 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6057 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 6058 length, LPFC_SLI4_MBX_EMBED); 6059 6060 /* Send an extents count of 0 - the dealloc doesn't use it. */ 6061 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 6062 LPFC_SLI4_MBX_EMBED); 6063 if (unlikely(rc)) { 6064 rc = -EIO; 6065 goto out_free_mbox; 6066 } 6067 if (!phba->sli4_hba.intr_enable) 6068 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6069 else { 6070 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6071 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6072 } 6073 if (unlikely(rc)) { 6074 rc = -EIO; 6075 goto out_free_mbox; 6076 } 6077 6078 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 6079 if (bf_get(lpfc_mbox_hdr_status, 6080 &dealloc_rsrc->header.cfg_shdr.response)) { 6081 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6082 "2919 Failed to release resource extents " 6083 "for type %d - Status 0x%x Add'l Status 0x%x. " 6084 "Resource memory not released.\n", 6085 type, 6086 bf_get(lpfc_mbox_hdr_status, 6087 &dealloc_rsrc->header.cfg_shdr.response), 6088 bf_get(lpfc_mbox_hdr_add_status, 6089 &dealloc_rsrc->header.cfg_shdr.response)); 6090 rc = -EIO; 6091 goto out_free_mbox; 6092 } 6093 6094 /* Release kernel memory resources for the specific type. */ 6095 switch (type) { 6096 case LPFC_RSC_TYPE_FCOE_VPI: 6097 kfree(phba->vpi_bmask); 6098 kfree(phba->vpi_ids); 6099 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6100 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6101 &phba->lpfc_vpi_blk_list, list) { 6102 list_del_init(&rsrc_blk->list); 6103 kfree(rsrc_blk); 6104 } 6105 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6106 break; 6107 case LPFC_RSC_TYPE_FCOE_XRI: 6108 kfree(phba->sli4_hba.xri_bmask); 6109 kfree(phba->sli4_hba.xri_ids); 6110 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6111 &phba->sli4_hba.lpfc_xri_blk_list, list) { 6112 list_del_init(&rsrc_blk->list); 6113 kfree(rsrc_blk); 6114 } 6115 break; 6116 case LPFC_RSC_TYPE_FCOE_VFI: 6117 kfree(phba->sli4_hba.vfi_bmask); 6118 kfree(phba->sli4_hba.vfi_ids); 6119 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6120 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6121 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 6122 list_del_init(&rsrc_blk->list); 6123 kfree(rsrc_blk); 6124 } 6125 break; 6126 case LPFC_RSC_TYPE_FCOE_RPI: 6127 /* RPI bitmask and physical id array are cleaned up earlier. */ 6128 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6129 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 6130 list_del_init(&rsrc_blk->list); 6131 kfree(rsrc_blk); 6132 } 6133 break; 6134 default: 6135 break; 6136 } 6137 6138 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6139 6140 out_free_mbox: 6141 mempool_free(mbox, phba->mbox_mem_pool); 6142 return rc; 6143 } 6144 6145 static void 6146 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, 6147 uint32_t feature) 6148 { 6149 uint32_t len; 6150 6151 len = sizeof(struct lpfc_mbx_set_feature) - 6152 sizeof(struct lpfc_sli4_cfg_mhdr); 6153 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6154 LPFC_MBOX_OPCODE_SET_FEATURES, len, 6155 LPFC_SLI4_MBX_EMBED); 6156 6157 switch (feature) { 6158 case LPFC_SET_UE_RECOVERY: 6159 bf_set(lpfc_mbx_set_feature_UER, 6160 &mbox->u.mqe.un.set_feature, 1); 6161 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; 6162 mbox->u.mqe.un.set_feature.param_len = 8; 6163 break; 6164 case LPFC_SET_MDS_DIAGS: 6165 bf_set(lpfc_mbx_set_feature_mds, 6166 &mbox->u.mqe.un.set_feature, 1); 6167 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 6168 &mbox->u.mqe.un.set_feature, 1); 6169 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 6170 mbox->u.mqe.un.set_feature.param_len = 8; 6171 break; 6172 } 6173 6174 return; 6175 } 6176 6177 /** 6178 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter 6179 * @phba: Pointer to HBA context object. 6180 * 6181 * Disable FW logging into host memory on the adapter. To 6182 * be done before reading logs from the host memory. 6183 **/ 6184 void 6185 lpfc_ras_stop_fwlog(struct lpfc_hba *phba) 6186 { 6187 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6188 6189 ras_fwlog->ras_active = false; 6190 6191 /* Disable FW logging to host memory */ 6192 writel(LPFC_CTL_PDEV_CTL_DDL_RAS, 6193 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 6194 } 6195 6196 /** 6197 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. 6198 * @phba: Pointer to HBA context object. 6199 * 6200 * This function is called to free memory allocated for RAS FW logging 6201 * support in the driver. 6202 **/ 6203 void 6204 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) 6205 { 6206 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6207 struct lpfc_dmabuf *dmabuf, *next; 6208 6209 if (!list_empty(&ras_fwlog->fwlog_buff_list)) { 6210 list_for_each_entry_safe(dmabuf, next, 6211 &ras_fwlog->fwlog_buff_list, 6212 list) { 6213 list_del(&dmabuf->list); 6214 dma_free_coherent(&phba->pcidev->dev, 6215 LPFC_RAS_MAX_ENTRY_SIZE, 6216 dmabuf->virt, dmabuf->phys); 6217 kfree(dmabuf); 6218 } 6219 } 6220 6221 if (ras_fwlog->lwpd.virt) { 6222 dma_free_coherent(&phba->pcidev->dev, 6223 sizeof(uint32_t) * 2, 6224 ras_fwlog->lwpd.virt, 6225 ras_fwlog->lwpd.phys); 6226 ras_fwlog->lwpd.virt = NULL; 6227 } 6228 6229 ras_fwlog->ras_active = false; 6230 } 6231 6232 /** 6233 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support 6234 * @phba: Pointer to HBA context object. 6235 * @fwlog_buff_count: Count of buffers to be created. 6236 * 6237 * This routine DMA memory for Log Write Position Data[LPWD] and buffer 6238 * to update FW log is posted to the adapter. 6239 * Buffer count is calculated based on module param ras_fwlog_buffsize 6240 * Size of each buffer posted to FW is 64K. 6241 **/ 6242 6243 static int 6244 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, 6245 uint32_t fwlog_buff_count) 6246 { 6247 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6248 struct lpfc_dmabuf *dmabuf; 6249 int rc = 0, i = 0; 6250 6251 /* Initialize List */ 6252 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list); 6253 6254 /* Allocate memory for the LWPD */ 6255 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev, 6256 sizeof(uint32_t) * 2, 6257 &ras_fwlog->lwpd.phys, 6258 GFP_KERNEL); 6259 if (!ras_fwlog->lwpd.virt) { 6260 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6261 "6185 LWPD Memory Alloc Failed\n"); 6262 6263 return -ENOMEM; 6264 } 6265 6266 ras_fwlog->fw_buffcount = fwlog_buff_count; 6267 for (i = 0; i < ras_fwlog->fw_buffcount; i++) { 6268 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 6269 GFP_KERNEL); 6270 if (!dmabuf) { 6271 rc = -ENOMEM; 6272 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6273 "6186 Memory Alloc failed FW logging"); 6274 goto free_mem; 6275 } 6276 6277 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6278 LPFC_RAS_MAX_ENTRY_SIZE, 6279 &dmabuf->phys, GFP_KERNEL); 6280 if (!dmabuf->virt) { 6281 kfree(dmabuf); 6282 rc = -ENOMEM; 6283 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6284 "6187 DMA Alloc Failed FW logging"); 6285 goto free_mem; 6286 } 6287 dmabuf->buffer_tag = i; 6288 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list); 6289 } 6290 6291 free_mem: 6292 if (rc) 6293 lpfc_sli4_ras_dma_free(phba); 6294 6295 return rc; 6296 } 6297 6298 /** 6299 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command 6300 * @phba: pointer to lpfc hba data structure. 6301 * @pmboxq: pointer to the driver internal queue element for mailbox command. 6302 * 6303 * Completion handler for driver's RAS MBX command to the device. 6304 **/ 6305 static void 6306 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6307 { 6308 MAILBOX_t *mb; 6309 union lpfc_sli4_cfg_shdr *shdr; 6310 uint32_t shdr_status, shdr_add_status; 6311 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6312 6313 mb = &pmb->u.mb; 6314 6315 shdr = (union lpfc_sli4_cfg_shdr *) 6316 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr; 6317 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6318 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6319 6320 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { 6321 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 6322 "6188 FW LOG mailbox " 6323 "completed with status x%x add_status x%x," 6324 " mbx status x%x\n", 6325 shdr_status, shdr_add_status, mb->mbxStatus); 6326 6327 ras_fwlog->ras_hwsupport = false; 6328 goto disable_ras; 6329 } 6330 6331 ras_fwlog->ras_active = true; 6332 mempool_free(pmb, phba->mbox_mem_pool); 6333 6334 return; 6335 6336 disable_ras: 6337 /* Free RAS DMA memory */ 6338 lpfc_sli4_ras_dma_free(phba); 6339 mempool_free(pmb, phba->mbox_mem_pool); 6340 } 6341 6342 /** 6343 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command 6344 * @phba: pointer to lpfc hba data structure. 6345 * @fwlog_level: Logging verbosity level. 6346 * @fwlog_enable: Enable/Disable logging. 6347 * 6348 * Initialize memory and post mailbox command to enable FW logging in host 6349 * memory. 6350 **/ 6351 int 6352 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, 6353 uint32_t fwlog_level, 6354 uint32_t fwlog_enable) 6355 { 6356 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6357 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL; 6358 struct lpfc_dmabuf *dmabuf; 6359 LPFC_MBOXQ_t *mbox; 6360 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; 6361 int rc = 0; 6362 6363 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * 6364 phba->cfg_ras_fwlog_buffsize); 6365 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); 6366 6367 /* 6368 * If re-enabling FW logging support use earlier allocated 6369 * DMA buffers while posting MBX command. 6370 **/ 6371 if (!ras_fwlog->lwpd.virt) { 6372 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count); 6373 if (rc) { 6374 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6375 "6189 FW Log Memory Allocation Failed"); 6376 return rc; 6377 } 6378 } 6379 6380 /* Setup Mailbox command */ 6381 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6382 if (!mbox) { 6383 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6384 "6190 RAS MBX Alloc Failed"); 6385 rc = -ENOMEM; 6386 goto mem_free; 6387 } 6388 6389 ras_fwlog->fw_loglevel = fwlog_level; 6390 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) - 6391 sizeof(struct lpfc_sli4_cfg_mhdr)); 6392 6393 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL, 6394 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION, 6395 len, LPFC_SLI4_MBX_EMBED); 6396 6397 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog; 6398 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request, 6399 fwlog_enable); 6400 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request, 6401 ras_fwlog->fw_loglevel); 6402 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request, 6403 ras_fwlog->fw_buffcount); 6404 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request, 6405 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE); 6406 6407 /* Update DMA buffer address */ 6408 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) { 6409 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); 6410 6411 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo = 6412 putPaddrLow(dmabuf->phys); 6413 6414 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi = 6415 putPaddrHigh(dmabuf->phys); 6416 } 6417 6418 /* Update LPWD address */ 6419 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); 6420 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); 6421 6422 mbox->vport = phba->pport; 6423 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; 6424 6425 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6426 6427 if (rc == MBX_NOT_FINISHED) { 6428 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6429 "6191 FW-Log Mailbox failed. " 6430 "status %d mbxStatus : x%x", rc, 6431 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 6432 mempool_free(mbox, phba->mbox_mem_pool); 6433 rc = -EIO; 6434 goto mem_free; 6435 } else 6436 rc = 0; 6437 mem_free: 6438 if (rc) 6439 lpfc_sli4_ras_dma_free(phba); 6440 6441 return rc; 6442 } 6443 6444 /** 6445 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter 6446 * @phba: Pointer to HBA context object. 6447 * 6448 * Check if RAS is supported on the adapter and initialize it. 6449 **/ 6450 void 6451 lpfc_sli4_ras_setup(struct lpfc_hba *phba) 6452 { 6453 /* Check RAS FW Log needs to be enabled or not */ 6454 if (lpfc_check_fwlog_support(phba)) 6455 return; 6456 6457 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, 6458 LPFC_RAS_ENABLE_LOGGING); 6459 } 6460 6461 /** 6462 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 6463 * @phba: Pointer to HBA context object. 6464 * 6465 * This function allocates all SLI4 resource identifiers. 6466 **/ 6467 int 6468 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 6469 { 6470 int i, rc, error = 0; 6471 uint16_t count, base; 6472 unsigned long longs; 6473 6474 if (!phba->sli4_hba.rpi_hdrs_in_use) 6475 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 6476 if (phba->sli4_hba.extents_in_use) { 6477 /* 6478 * The port supports resource extents. The XRI, VPI, VFI, RPI 6479 * resource extent count must be read and allocated before 6480 * provisioning the resource id arrays. 6481 */ 6482 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6483 LPFC_IDX_RSRC_RDY) { 6484 /* 6485 * Extent-based resources are set - the driver could 6486 * be in a port reset. Figure out if any corrective 6487 * actions need to be taken. 6488 */ 6489 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6490 LPFC_RSC_TYPE_FCOE_VFI); 6491 if (rc != 0) 6492 error++; 6493 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6494 LPFC_RSC_TYPE_FCOE_VPI); 6495 if (rc != 0) 6496 error++; 6497 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6498 LPFC_RSC_TYPE_FCOE_XRI); 6499 if (rc != 0) 6500 error++; 6501 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6502 LPFC_RSC_TYPE_FCOE_RPI); 6503 if (rc != 0) 6504 error++; 6505 6506 /* 6507 * It's possible that the number of resources 6508 * provided to this port instance changed between 6509 * resets. Detect this condition and reallocate 6510 * resources. Otherwise, there is no action. 6511 */ 6512 if (error) { 6513 lpfc_printf_log(phba, KERN_INFO, 6514 LOG_MBOX | LOG_INIT, 6515 "2931 Detected extent resource " 6516 "change. Reallocating all " 6517 "extents.\n"); 6518 rc = lpfc_sli4_dealloc_extent(phba, 6519 LPFC_RSC_TYPE_FCOE_VFI); 6520 rc = lpfc_sli4_dealloc_extent(phba, 6521 LPFC_RSC_TYPE_FCOE_VPI); 6522 rc = lpfc_sli4_dealloc_extent(phba, 6523 LPFC_RSC_TYPE_FCOE_XRI); 6524 rc = lpfc_sli4_dealloc_extent(phba, 6525 LPFC_RSC_TYPE_FCOE_RPI); 6526 } else 6527 return 0; 6528 } 6529 6530 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6531 if (unlikely(rc)) 6532 goto err_exit; 6533 6534 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6535 if (unlikely(rc)) 6536 goto err_exit; 6537 6538 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6539 if (unlikely(rc)) 6540 goto err_exit; 6541 6542 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6543 if (unlikely(rc)) 6544 goto err_exit; 6545 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6546 LPFC_IDX_RSRC_RDY); 6547 return rc; 6548 } else { 6549 /* 6550 * The port does not support resource extents. The XRI, VPI, 6551 * VFI, RPI resource ids were determined from READ_CONFIG. 6552 * Just allocate the bitmasks and provision the resource id 6553 * arrays. If a port reset is active, the resources don't 6554 * need any action - just exit. 6555 */ 6556 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6557 LPFC_IDX_RSRC_RDY) { 6558 lpfc_sli4_dealloc_resource_identifiers(phba); 6559 lpfc_sli4_remove_rpis(phba); 6560 } 6561 /* RPIs. */ 6562 count = phba->sli4_hba.max_cfg_param.max_rpi; 6563 if (count <= 0) { 6564 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6565 "3279 Invalid provisioning of " 6566 "rpi:%d\n", count); 6567 rc = -EINVAL; 6568 goto err_exit; 6569 } 6570 base = phba->sli4_hba.max_cfg_param.rpi_base; 6571 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6572 phba->sli4_hba.rpi_bmask = kcalloc(longs, 6573 sizeof(unsigned long), 6574 GFP_KERNEL); 6575 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 6576 rc = -ENOMEM; 6577 goto err_exit; 6578 } 6579 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), 6580 GFP_KERNEL); 6581 if (unlikely(!phba->sli4_hba.rpi_ids)) { 6582 rc = -ENOMEM; 6583 goto free_rpi_bmask; 6584 } 6585 6586 for (i = 0; i < count; i++) 6587 phba->sli4_hba.rpi_ids[i] = base + i; 6588 6589 /* VPIs. */ 6590 count = phba->sli4_hba.max_cfg_param.max_vpi; 6591 if (count <= 0) { 6592 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6593 "3280 Invalid provisioning of " 6594 "vpi:%d\n", count); 6595 rc = -EINVAL; 6596 goto free_rpi_ids; 6597 } 6598 base = phba->sli4_hba.max_cfg_param.vpi_base; 6599 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6600 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 6601 GFP_KERNEL); 6602 if (unlikely(!phba->vpi_bmask)) { 6603 rc = -ENOMEM; 6604 goto free_rpi_ids; 6605 } 6606 phba->vpi_ids = kcalloc(count, sizeof(uint16_t), 6607 GFP_KERNEL); 6608 if (unlikely(!phba->vpi_ids)) { 6609 rc = -ENOMEM; 6610 goto free_vpi_bmask; 6611 } 6612 6613 for (i = 0; i < count; i++) 6614 phba->vpi_ids[i] = base + i; 6615 6616 /* XRIs. */ 6617 count = phba->sli4_hba.max_cfg_param.max_xri; 6618 if (count <= 0) { 6619 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6620 "3281 Invalid provisioning of " 6621 "xri:%d\n", count); 6622 rc = -EINVAL; 6623 goto free_vpi_ids; 6624 } 6625 base = phba->sli4_hba.max_cfg_param.xri_base; 6626 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6627 phba->sli4_hba.xri_bmask = kcalloc(longs, 6628 sizeof(unsigned long), 6629 GFP_KERNEL); 6630 if (unlikely(!phba->sli4_hba.xri_bmask)) { 6631 rc = -ENOMEM; 6632 goto free_vpi_ids; 6633 } 6634 phba->sli4_hba.max_cfg_param.xri_used = 0; 6635 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), 6636 GFP_KERNEL); 6637 if (unlikely(!phba->sli4_hba.xri_ids)) { 6638 rc = -ENOMEM; 6639 goto free_xri_bmask; 6640 } 6641 6642 for (i = 0; i < count; i++) 6643 phba->sli4_hba.xri_ids[i] = base + i; 6644 6645 /* VFIs. */ 6646 count = phba->sli4_hba.max_cfg_param.max_vfi; 6647 if (count <= 0) { 6648 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6649 "3282 Invalid provisioning of " 6650 "vfi:%d\n", count); 6651 rc = -EINVAL; 6652 goto free_xri_ids; 6653 } 6654 base = phba->sli4_hba.max_cfg_param.vfi_base; 6655 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6656 phba->sli4_hba.vfi_bmask = kcalloc(longs, 6657 sizeof(unsigned long), 6658 GFP_KERNEL); 6659 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 6660 rc = -ENOMEM; 6661 goto free_xri_ids; 6662 } 6663 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), 6664 GFP_KERNEL); 6665 if (unlikely(!phba->sli4_hba.vfi_ids)) { 6666 rc = -ENOMEM; 6667 goto free_vfi_bmask; 6668 } 6669 6670 for (i = 0; i < count; i++) 6671 phba->sli4_hba.vfi_ids[i] = base + i; 6672 6673 /* 6674 * Mark all resources ready. An HBA reset doesn't need 6675 * to reset the initialization. 6676 */ 6677 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6678 LPFC_IDX_RSRC_RDY); 6679 return 0; 6680 } 6681 6682 free_vfi_bmask: 6683 kfree(phba->sli4_hba.vfi_bmask); 6684 phba->sli4_hba.vfi_bmask = NULL; 6685 free_xri_ids: 6686 kfree(phba->sli4_hba.xri_ids); 6687 phba->sli4_hba.xri_ids = NULL; 6688 free_xri_bmask: 6689 kfree(phba->sli4_hba.xri_bmask); 6690 phba->sli4_hba.xri_bmask = NULL; 6691 free_vpi_ids: 6692 kfree(phba->vpi_ids); 6693 phba->vpi_ids = NULL; 6694 free_vpi_bmask: 6695 kfree(phba->vpi_bmask); 6696 phba->vpi_bmask = NULL; 6697 free_rpi_ids: 6698 kfree(phba->sli4_hba.rpi_ids); 6699 phba->sli4_hba.rpi_ids = NULL; 6700 free_rpi_bmask: 6701 kfree(phba->sli4_hba.rpi_bmask); 6702 phba->sli4_hba.rpi_bmask = NULL; 6703 err_exit: 6704 return rc; 6705 } 6706 6707 /** 6708 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 6709 * @phba: Pointer to HBA context object. 6710 * 6711 * This function allocates the number of elements for the specified 6712 * resource type. 6713 **/ 6714 int 6715 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 6716 { 6717 if (phba->sli4_hba.extents_in_use) { 6718 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6719 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6720 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6721 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6722 } else { 6723 kfree(phba->vpi_bmask); 6724 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6725 kfree(phba->vpi_ids); 6726 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6727 kfree(phba->sli4_hba.xri_bmask); 6728 kfree(phba->sli4_hba.xri_ids); 6729 kfree(phba->sli4_hba.vfi_bmask); 6730 kfree(phba->sli4_hba.vfi_ids); 6731 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6732 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6733 } 6734 6735 return 0; 6736 } 6737 6738 /** 6739 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 6740 * @phba: Pointer to HBA context object. 6741 * @type: The resource extent type. 6742 * @extnt_count: buffer to hold port extent count response 6743 * @extnt_size: buffer to hold port extent size response. 6744 * 6745 * This function calls the port to read the host allocated extents 6746 * for a particular type. 6747 **/ 6748 int 6749 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 6750 uint16_t *extnt_cnt, uint16_t *extnt_size) 6751 { 6752 bool emb; 6753 int rc = 0; 6754 uint16_t curr_blks = 0; 6755 uint32_t req_len, emb_len; 6756 uint32_t alloc_len, mbox_tmo; 6757 struct list_head *blk_list_head; 6758 struct lpfc_rsrc_blks *rsrc_blk; 6759 LPFC_MBOXQ_t *mbox; 6760 void *virtaddr = NULL; 6761 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 6762 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 6763 union lpfc_sli4_cfg_shdr *shdr; 6764 6765 switch (type) { 6766 case LPFC_RSC_TYPE_FCOE_VPI: 6767 blk_list_head = &phba->lpfc_vpi_blk_list; 6768 break; 6769 case LPFC_RSC_TYPE_FCOE_XRI: 6770 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 6771 break; 6772 case LPFC_RSC_TYPE_FCOE_VFI: 6773 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 6774 break; 6775 case LPFC_RSC_TYPE_FCOE_RPI: 6776 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 6777 break; 6778 default: 6779 return -EIO; 6780 } 6781 6782 /* Count the number of extents currently allocatd for this type. */ 6783 list_for_each_entry(rsrc_blk, blk_list_head, list) { 6784 if (curr_blks == 0) { 6785 /* 6786 * The GET_ALLOCATED mailbox does not return the size, 6787 * just the count. The size should be just the size 6788 * stored in the current allocated block and all sizes 6789 * for an extent type are the same so set the return 6790 * value now. 6791 */ 6792 *extnt_size = rsrc_blk->rsrc_size; 6793 } 6794 curr_blks++; 6795 } 6796 6797 /* 6798 * Calculate the size of an embedded mailbox. The uint32_t 6799 * accounts for extents-specific word. 6800 */ 6801 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 6802 sizeof(uint32_t); 6803 6804 /* 6805 * Presume the allocation and response will fit into an embedded 6806 * mailbox. If not true, reconfigure to a non-embedded mailbox. 6807 */ 6808 emb = LPFC_SLI4_MBX_EMBED; 6809 req_len = emb_len; 6810 if (req_len > emb_len) { 6811 req_len = curr_blks * sizeof(uint16_t) + 6812 sizeof(union lpfc_sli4_cfg_shdr) + 6813 sizeof(uint32_t); 6814 emb = LPFC_SLI4_MBX_NEMBED; 6815 } 6816 6817 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6818 if (!mbox) 6819 return -ENOMEM; 6820 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 6821 6822 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6823 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 6824 req_len, emb); 6825 if (alloc_len < req_len) { 6826 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6827 "2983 Allocated DMA memory size (x%x) is " 6828 "less than the requested DMA memory " 6829 "size (x%x)\n", alloc_len, req_len); 6830 rc = -ENOMEM; 6831 goto err_exit; 6832 } 6833 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 6834 if (unlikely(rc)) { 6835 rc = -EIO; 6836 goto err_exit; 6837 } 6838 6839 if (!phba->sli4_hba.intr_enable) 6840 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6841 else { 6842 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6843 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6844 } 6845 6846 if (unlikely(rc)) { 6847 rc = -EIO; 6848 goto err_exit; 6849 } 6850 6851 /* 6852 * Figure out where the response is located. Then get local pointers 6853 * to the response data. The port does not guarantee to respond to 6854 * all extents counts request so update the local variable with the 6855 * allocated count from the port. 6856 */ 6857 if (emb == LPFC_SLI4_MBX_EMBED) { 6858 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 6859 shdr = &rsrc_ext->header.cfg_shdr; 6860 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 6861 } else { 6862 virtaddr = mbox->sge_array->addr[0]; 6863 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 6864 shdr = &n_rsrc->cfg_shdr; 6865 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 6866 } 6867 6868 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 6869 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6870 "2984 Failed to read allocated resources " 6871 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 6872 type, 6873 bf_get(lpfc_mbox_hdr_status, &shdr->response), 6874 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 6875 rc = -EIO; 6876 goto err_exit; 6877 } 6878 err_exit: 6879 lpfc_sli4_mbox_cmd_free(phba, mbox); 6880 return rc; 6881 } 6882 6883 /** 6884 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block 6885 * @phba: pointer to lpfc hba data structure. 6886 * @pring: Pointer to driver SLI ring object. 6887 * @sgl_list: linked link of sgl buffers to post 6888 * @cnt: number of linked list buffers 6889 * 6890 * This routine walks the list of buffers that have been allocated and 6891 * repost them to the port by using SGL block post. This is needed after a 6892 * pci_function_reset/warm_start or start. It attempts to construct blocks 6893 * of buffer sgls which contains contiguous xris and uses the non-embedded 6894 * SGL block post mailbox commands to post them to the port. For single 6895 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 6896 * mailbox command for posting. 6897 * 6898 * Returns: 0 = success, non-zero failure. 6899 **/ 6900 static int 6901 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, 6902 struct list_head *sgl_list, int cnt) 6903 { 6904 struct lpfc_sglq *sglq_entry = NULL; 6905 struct lpfc_sglq *sglq_entry_next = NULL; 6906 struct lpfc_sglq *sglq_entry_first = NULL; 6907 int status, total_cnt; 6908 int post_cnt = 0, num_posted = 0, block_cnt = 0; 6909 int last_xritag = NO_XRI; 6910 LIST_HEAD(prep_sgl_list); 6911 LIST_HEAD(blck_sgl_list); 6912 LIST_HEAD(allc_sgl_list); 6913 LIST_HEAD(post_sgl_list); 6914 LIST_HEAD(free_sgl_list); 6915 6916 spin_lock_irq(&phba->hbalock); 6917 spin_lock(&phba->sli4_hba.sgl_list_lock); 6918 list_splice_init(sgl_list, &allc_sgl_list); 6919 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6920 spin_unlock_irq(&phba->hbalock); 6921 6922 total_cnt = cnt; 6923 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6924 &allc_sgl_list, list) { 6925 list_del_init(&sglq_entry->list); 6926 block_cnt++; 6927 if ((last_xritag != NO_XRI) && 6928 (sglq_entry->sli4_xritag != last_xritag + 1)) { 6929 /* a hole in xri block, form a sgl posting block */ 6930 list_splice_init(&prep_sgl_list, &blck_sgl_list); 6931 post_cnt = block_cnt - 1; 6932 /* prepare list for next posting block */ 6933 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6934 block_cnt = 1; 6935 } else { 6936 /* prepare list for next posting block */ 6937 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6938 /* enough sgls for non-embed sgl mbox command */ 6939 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 6940 list_splice_init(&prep_sgl_list, 6941 &blck_sgl_list); 6942 post_cnt = block_cnt; 6943 block_cnt = 0; 6944 } 6945 } 6946 num_posted++; 6947 6948 /* keep track of last sgl's xritag */ 6949 last_xritag = sglq_entry->sli4_xritag; 6950 6951 /* end of repost sgl list condition for buffers */ 6952 if (num_posted == total_cnt) { 6953 if (post_cnt == 0) { 6954 list_splice_init(&prep_sgl_list, 6955 &blck_sgl_list); 6956 post_cnt = block_cnt; 6957 } else if (block_cnt == 1) { 6958 status = lpfc_sli4_post_sgl(phba, 6959 sglq_entry->phys, 0, 6960 sglq_entry->sli4_xritag); 6961 if (!status) { 6962 /* successful, put sgl to posted list */ 6963 list_add_tail(&sglq_entry->list, 6964 &post_sgl_list); 6965 } else { 6966 /* Failure, put sgl to free list */ 6967 lpfc_printf_log(phba, KERN_WARNING, 6968 LOG_SLI, 6969 "3159 Failed to post " 6970 "sgl, xritag:x%x\n", 6971 sglq_entry->sli4_xritag); 6972 list_add_tail(&sglq_entry->list, 6973 &free_sgl_list); 6974 total_cnt--; 6975 } 6976 } 6977 } 6978 6979 /* continue until a nembed page worth of sgls */ 6980 if (post_cnt == 0) 6981 continue; 6982 6983 /* post the buffer list sgls as a block */ 6984 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, 6985 post_cnt); 6986 6987 if (!status) { 6988 /* success, put sgl list to posted sgl list */ 6989 list_splice_init(&blck_sgl_list, &post_sgl_list); 6990 } else { 6991 /* Failure, put sgl list to free sgl list */ 6992 sglq_entry_first = list_first_entry(&blck_sgl_list, 6993 struct lpfc_sglq, 6994 list); 6995 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6996 "3160 Failed to post sgl-list, " 6997 "xritag:x%x-x%x\n", 6998 sglq_entry_first->sli4_xritag, 6999 (sglq_entry_first->sli4_xritag + 7000 post_cnt - 1)); 7001 list_splice_init(&blck_sgl_list, &free_sgl_list); 7002 total_cnt -= post_cnt; 7003 } 7004 7005 /* don't reset xirtag due to hole in xri block */ 7006 if (block_cnt == 0) 7007 last_xritag = NO_XRI; 7008 7009 /* reset sgl post count for next round of posting */ 7010 post_cnt = 0; 7011 } 7012 7013 /* free the sgls failed to post */ 7014 lpfc_free_sgl_list(phba, &free_sgl_list); 7015 7016 /* push sgls posted to the available list */ 7017 if (!list_empty(&post_sgl_list)) { 7018 spin_lock_irq(&phba->hbalock); 7019 spin_lock(&phba->sli4_hba.sgl_list_lock); 7020 list_splice_init(&post_sgl_list, sgl_list); 7021 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7022 spin_unlock_irq(&phba->hbalock); 7023 } else { 7024 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7025 "3161 Failure to post sgl to port.\n"); 7026 return -EIO; 7027 } 7028 7029 /* return the number of XRIs actually posted */ 7030 return total_cnt; 7031 } 7032 7033 /** 7034 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls 7035 * @phba: pointer to lpfc hba data structure. 7036 * 7037 * This routine walks the list of nvme buffers that have been allocated and 7038 * repost them to the port by using SGL block post. This is needed after a 7039 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 7040 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list 7041 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. 7042 * 7043 * Returns: 0 = success, non-zero failure. 7044 **/ 7045 static int 7046 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) 7047 { 7048 LIST_HEAD(post_nblist); 7049 int num_posted, rc = 0; 7050 7051 /* get all NVME buffers need to repost to a local list */ 7052 lpfc_io_buf_flush(phba, &post_nblist); 7053 7054 /* post the list of nvme buffer sgls to port if available */ 7055 if (!list_empty(&post_nblist)) { 7056 num_posted = lpfc_sli4_post_io_sgl_list( 7057 phba, &post_nblist, phba->sli4_hba.io_xri_cnt); 7058 /* failed to post any nvme buffer, return error */ 7059 if (num_posted == 0) 7060 rc = -EIO; 7061 } 7062 return rc; 7063 } 7064 7065 static void 7066 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 7067 { 7068 uint32_t len; 7069 7070 len = sizeof(struct lpfc_mbx_set_host_data) - 7071 sizeof(struct lpfc_sli4_cfg_mhdr); 7072 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7073 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 7074 LPFC_SLI4_MBX_EMBED); 7075 7076 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; 7077 mbox->u.mqe.un.set_host_data.param_len = 7078 LPFC_HOST_OS_DRIVER_VERSION_SIZE; 7079 snprintf(mbox->u.mqe.un.set_host_data.data, 7080 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 7081 "Linux %s v"LPFC_DRIVER_VERSION, 7082 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 7083 } 7084 7085 int 7086 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, 7087 struct lpfc_queue *drq, int count, int idx) 7088 { 7089 int rc, i; 7090 struct lpfc_rqe hrqe; 7091 struct lpfc_rqe drqe; 7092 struct lpfc_rqb *rqbp; 7093 unsigned long flags; 7094 struct rqb_dmabuf *rqb_buffer; 7095 LIST_HEAD(rqb_buf_list); 7096 7097 spin_lock_irqsave(&phba->hbalock, flags); 7098 rqbp = hrq->rqbp; 7099 for (i = 0; i < count; i++) { 7100 /* IF RQ is already full, don't bother */ 7101 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) 7102 break; 7103 rqb_buffer = rqbp->rqb_alloc_buffer(phba); 7104 if (!rqb_buffer) 7105 break; 7106 rqb_buffer->hrq = hrq; 7107 rqb_buffer->drq = drq; 7108 rqb_buffer->idx = idx; 7109 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); 7110 } 7111 while (!list_empty(&rqb_buf_list)) { 7112 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, 7113 hbuf.list); 7114 7115 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); 7116 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); 7117 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); 7118 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); 7119 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 7120 if (rc < 0) { 7121 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7122 "6421 Cannot post to HRQ %d: %x %x %x " 7123 "DRQ %x %x\n", 7124 hrq->queue_id, 7125 hrq->host_index, 7126 hrq->hba_index, 7127 hrq->entry_count, 7128 drq->host_index, 7129 drq->hba_index); 7130 rqbp->rqb_free_buffer(phba, rqb_buffer); 7131 } else { 7132 list_add_tail(&rqb_buffer->hbuf.list, 7133 &rqbp->rqb_buffer_list); 7134 rqbp->buffer_count++; 7135 } 7136 } 7137 spin_unlock_irqrestore(&phba->hbalock, flags); 7138 return 1; 7139 } 7140 7141 /** 7142 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 7143 * @phba: Pointer to HBA context object. 7144 * 7145 * This function is the main SLI4 device initialization PCI function. This 7146 * function is called by the HBA initialization code, HBA reset code and 7147 * HBA error attention handler code. Caller is not required to hold any 7148 * locks. 7149 **/ 7150 int 7151 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 7152 { 7153 int rc, i, cnt, len; 7154 LPFC_MBOXQ_t *mboxq; 7155 struct lpfc_mqe *mqe; 7156 uint8_t *vpd; 7157 uint32_t vpd_size; 7158 uint32_t ftr_rsp = 0; 7159 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 7160 struct lpfc_vport *vport = phba->pport; 7161 struct lpfc_dmabuf *mp; 7162 struct lpfc_rqb *rqbp; 7163 7164 /* Perform a PCI function reset to start from clean */ 7165 rc = lpfc_pci_function_reset(phba); 7166 if (unlikely(rc)) 7167 return -ENODEV; 7168 7169 /* Check the HBA Host Status Register for readyness */ 7170 rc = lpfc_sli4_post_status_check(phba); 7171 if (unlikely(rc)) 7172 return -ENODEV; 7173 else { 7174 spin_lock_irq(&phba->hbalock); 7175 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 7176 spin_unlock_irq(&phba->hbalock); 7177 } 7178 7179 /* 7180 * Allocate a single mailbox container for initializing the 7181 * port. 7182 */ 7183 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7184 if (!mboxq) 7185 return -ENOMEM; 7186 7187 /* Issue READ_REV to collect vpd and FW information. */ 7188 vpd_size = SLI4_PAGE_SIZE; 7189 vpd = kzalloc(vpd_size, GFP_KERNEL); 7190 if (!vpd) { 7191 rc = -ENOMEM; 7192 goto out_free_mbox; 7193 } 7194 7195 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 7196 if (unlikely(rc)) { 7197 kfree(vpd); 7198 goto out_free_mbox; 7199 } 7200 7201 mqe = &mboxq->u.mqe; 7202 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 7203 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 7204 phba->hba_flag |= HBA_FCOE_MODE; 7205 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 7206 } else { 7207 phba->hba_flag &= ~HBA_FCOE_MODE; 7208 } 7209 7210 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 7211 LPFC_DCBX_CEE_MODE) 7212 phba->hba_flag |= HBA_FIP_SUPPORT; 7213 else 7214 phba->hba_flag &= ~HBA_FIP_SUPPORT; 7215 7216 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; 7217 7218 if (phba->sli_rev != LPFC_SLI_REV4) { 7219 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7220 "0376 READ_REV Error. SLI Level %d " 7221 "FCoE enabled %d\n", 7222 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 7223 rc = -EIO; 7224 kfree(vpd); 7225 goto out_free_mbox; 7226 } 7227 7228 /* 7229 * Continue initialization with default values even if driver failed 7230 * to read FCoE param config regions, only read parameters if the 7231 * board is FCoE 7232 */ 7233 if (phba->hba_flag & HBA_FCOE_MODE && 7234 lpfc_sli4_read_fcoe_params(phba)) 7235 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 7236 "2570 Failed to read FCoE parameters\n"); 7237 7238 /* 7239 * Retrieve sli4 device physical port name, failure of doing it 7240 * is considered as non-fatal. 7241 */ 7242 rc = lpfc_sli4_retrieve_pport_name(phba); 7243 if (!rc) 7244 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7245 "3080 Successful retrieving SLI4 device " 7246 "physical port name: %s.\n", phba->Port); 7247 7248 rc = lpfc_sli4_get_ctl_attr(phba); 7249 if (!rc) 7250 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7251 "8351 Successful retrieving SLI4 device " 7252 "CTL ATTR\n"); 7253 7254 /* 7255 * Evaluate the read rev and vpd data. Populate the driver 7256 * state with the results. If this routine fails, the failure 7257 * is not fatal as the driver will use generic values. 7258 */ 7259 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 7260 if (unlikely(!rc)) { 7261 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7262 "0377 Error %d parsing vpd. " 7263 "Using defaults.\n", rc); 7264 rc = 0; 7265 } 7266 kfree(vpd); 7267 7268 /* Save information as VPD data */ 7269 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 7270 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 7271 7272 /* 7273 * This is because first G7 ASIC doesn't support the standard 7274 * 0x5a NVME cmd descriptor type/subtype 7275 */ 7276 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7277 LPFC_SLI_INTF_IF_TYPE_6) && 7278 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) && 7279 (phba->vpd.rev.smRev == 0) && 7280 (phba->cfg_nvme_embed_cmd == 1)) 7281 phba->cfg_nvme_embed_cmd = 0; 7282 7283 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 7284 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 7285 &mqe->un.read_rev); 7286 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 7287 &mqe->un.read_rev); 7288 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 7289 &mqe->un.read_rev); 7290 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 7291 &mqe->un.read_rev); 7292 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 7293 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 7294 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 7295 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 7296 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 7297 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 7298 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7299 "(%d):0380 READ_REV Status x%x " 7300 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 7301 mboxq->vport ? mboxq->vport->vpi : 0, 7302 bf_get(lpfc_mqe_status, mqe), 7303 phba->vpd.rev.opFwName, 7304 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 7305 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 7306 7307 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 7308 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); 7309 if (phba->pport->cfg_lun_queue_depth > rc) { 7310 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7311 "3362 LUN queue depth changed from %d to %d\n", 7312 phba->pport->cfg_lun_queue_depth, rc); 7313 phba->pport->cfg_lun_queue_depth = rc; 7314 } 7315 7316 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7317 LPFC_SLI_INTF_IF_TYPE_0) { 7318 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); 7319 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7320 if (rc == MBX_SUCCESS) { 7321 phba->hba_flag |= HBA_RECOVERABLE_UE; 7322 /* Set 1Sec interval to detect UE */ 7323 phba->eratt_poll_interval = 1; 7324 phba->sli4_hba.ue_to_sr = bf_get( 7325 lpfc_mbx_set_feature_UESR, 7326 &mboxq->u.mqe.un.set_feature); 7327 phba->sli4_hba.ue_to_rp = bf_get( 7328 lpfc_mbx_set_feature_UERP, 7329 &mboxq->u.mqe.un.set_feature); 7330 } 7331 } 7332 7333 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { 7334 /* Enable MDS Diagnostics only if the SLI Port supports it */ 7335 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); 7336 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7337 if (rc != MBX_SUCCESS) 7338 phba->mds_diags_support = 0; 7339 } 7340 7341 /* 7342 * Discover the port's supported feature set and match it against the 7343 * hosts requests. 7344 */ 7345 lpfc_request_features(phba, mboxq); 7346 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7347 if (unlikely(rc)) { 7348 rc = -EIO; 7349 goto out_free_mbox; 7350 } 7351 7352 /* 7353 * The port must support FCP initiator mode as this is the 7354 * only mode running in the host. 7355 */ 7356 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 7357 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7358 "0378 No support for fcpi mode.\n"); 7359 ftr_rsp++; 7360 } 7361 7362 /* Performance Hints are ONLY for FCoE */ 7363 if (phba->hba_flag & HBA_FCOE_MODE) { 7364 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 7365 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 7366 else 7367 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 7368 } 7369 7370 /* 7371 * If the port cannot support the host's requested features 7372 * then turn off the global config parameters to disable the 7373 * feature in the driver. This is not a fatal error. 7374 */ 7375 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 7376 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { 7377 phba->cfg_enable_bg = 0; 7378 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 7379 ftr_rsp++; 7380 } 7381 } 7382 7383 if (phba->max_vpi && phba->cfg_enable_npiv && 7384 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7385 ftr_rsp++; 7386 7387 if (ftr_rsp) { 7388 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7389 "0379 Feature Mismatch Data: x%08x %08x " 7390 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 7391 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 7392 phba->cfg_enable_npiv, phba->max_vpi); 7393 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 7394 phba->cfg_enable_bg = 0; 7395 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7396 phba->cfg_enable_npiv = 0; 7397 } 7398 7399 /* These SLI3 features are assumed in SLI4 */ 7400 spin_lock_irq(&phba->hbalock); 7401 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 7402 spin_unlock_irq(&phba->hbalock); 7403 7404 /* 7405 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 7406 * calls depends on these resources to complete port setup. 7407 */ 7408 rc = lpfc_sli4_alloc_resource_identifiers(phba); 7409 if (rc) { 7410 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7411 "2920 Failed to alloc Resource IDs " 7412 "rc = x%x\n", rc); 7413 goto out_free_mbox; 7414 } 7415 7416 lpfc_set_host_data(phba, mboxq); 7417 7418 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7419 if (rc) { 7420 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7421 "2134 Failed to set host os driver version %x", 7422 rc); 7423 } 7424 7425 /* Read the port's service parameters. */ 7426 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 7427 if (rc) { 7428 phba->link_state = LPFC_HBA_ERROR; 7429 rc = -ENOMEM; 7430 goto out_free_mbox; 7431 } 7432 7433 mboxq->vport = vport; 7434 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7435 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 7436 if (rc == MBX_SUCCESS) { 7437 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 7438 rc = 0; 7439 } 7440 7441 /* 7442 * This memory was allocated by the lpfc_read_sparam routine. Release 7443 * it to the mbuf pool. 7444 */ 7445 lpfc_mbuf_free(phba, mp->virt, mp->phys); 7446 kfree(mp); 7447 mboxq->ctx_buf = NULL; 7448 if (unlikely(rc)) { 7449 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7450 "0382 READ_SPARAM command failed " 7451 "status %d, mbxStatus x%x\n", 7452 rc, bf_get(lpfc_mqe_status, mqe)); 7453 phba->link_state = LPFC_HBA_ERROR; 7454 rc = -EIO; 7455 goto out_free_mbox; 7456 } 7457 7458 lpfc_update_vport_wwn(vport); 7459 7460 /* Update the fc_host data structures with new wwn. */ 7461 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 7462 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 7463 7464 /* Create all the SLI4 queues */ 7465 rc = lpfc_sli4_queue_create(phba); 7466 if (rc) { 7467 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7468 "3089 Failed to allocate queues\n"); 7469 rc = -ENODEV; 7470 goto out_free_mbox; 7471 } 7472 /* Set up all the queues to the device */ 7473 rc = lpfc_sli4_queue_setup(phba); 7474 if (unlikely(rc)) { 7475 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7476 "0381 Error %d during queue setup.\n ", rc); 7477 goto out_stop_timers; 7478 } 7479 /* Initialize the driver internal SLI layer lists. */ 7480 lpfc_sli4_setup(phba); 7481 lpfc_sli4_queue_init(phba); 7482 7483 /* update host els xri-sgl sizes and mappings */ 7484 rc = lpfc_sli4_els_sgl_update(phba); 7485 if (unlikely(rc)) { 7486 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7487 "1400 Failed to update xri-sgl size and " 7488 "mapping: %d\n", rc); 7489 goto out_destroy_queue; 7490 } 7491 7492 /* register the els sgl pool to the port */ 7493 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, 7494 phba->sli4_hba.els_xri_cnt); 7495 if (unlikely(rc < 0)) { 7496 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7497 "0582 Error %d during els sgl post " 7498 "operation\n", rc); 7499 rc = -ENODEV; 7500 goto out_destroy_queue; 7501 } 7502 phba->sli4_hba.els_xri_cnt = rc; 7503 7504 if (phba->nvmet_support) { 7505 /* update host nvmet xri-sgl sizes and mappings */ 7506 rc = lpfc_sli4_nvmet_sgl_update(phba); 7507 if (unlikely(rc)) { 7508 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7509 "6308 Failed to update nvmet-sgl size " 7510 "and mapping: %d\n", rc); 7511 goto out_destroy_queue; 7512 } 7513 7514 /* register the nvmet sgl pool to the port */ 7515 rc = lpfc_sli4_repost_sgl_list( 7516 phba, 7517 &phba->sli4_hba.lpfc_nvmet_sgl_list, 7518 phba->sli4_hba.nvmet_xri_cnt); 7519 if (unlikely(rc < 0)) { 7520 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7521 "3117 Error %d during nvmet " 7522 "sgl post\n", rc); 7523 rc = -ENODEV; 7524 goto out_destroy_queue; 7525 } 7526 phba->sli4_hba.nvmet_xri_cnt = rc; 7527 7528 cnt = phba->cfg_iocb_cnt * 1024; 7529 /* We need 1 iocbq for every SGL, for IO processing */ 7530 cnt += phba->sli4_hba.nvmet_xri_cnt; 7531 } else { 7532 /* update host common xri-sgl sizes and mappings */ 7533 rc = lpfc_sli4_io_sgl_update(phba); 7534 if (unlikely(rc)) { 7535 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7536 "6082 Failed to update nvme-sgl size " 7537 "and mapping: %d\n", rc); 7538 goto out_destroy_queue; 7539 } 7540 7541 /* register the allocated common sgl pool to the port */ 7542 rc = lpfc_sli4_repost_io_sgl_list(phba); 7543 if (unlikely(rc)) { 7544 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7545 "6116 Error %d during nvme sgl post " 7546 "operation\n", rc); 7547 /* Some NVME buffers were moved to abort nvme list */ 7548 /* A pci function reset will repost them */ 7549 rc = -ENODEV; 7550 goto out_destroy_queue; 7551 } 7552 cnt = phba->cfg_iocb_cnt * 1024; 7553 } 7554 7555 if (!phba->sli.iocbq_lookup) { 7556 /* Initialize and populate the iocb list per host */ 7557 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7558 "2821 initialize iocb list %d total %d\n", 7559 phba->cfg_iocb_cnt, cnt); 7560 rc = lpfc_init_iocb_list(phba, cnt); 7561 if (rc) { 7562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7563 "1413 Failed to init iocb list.\n"); 7564 goto out_destroy_queue; 7565 } 7566 } 7567 7568 if (phba->nvmet_support) 7569 lpfc_nvmet_create_targetport(phba); 7570 7571 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 7572 /* Post initial buffers to all RQs created */ 7573 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 7574 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 7575 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 7576 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 7577 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 7578 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; 7579 rqbp->buffer_count = 0; 7580 7581 lpfc_post_rq_buffer( 7582 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 7583 phba->sli4_hba.nvmet_mrq_data[i], 7584 phba->cfg_nvmet_mrq_post, i); 7585 } 7586 } 7587 7588 /* Post the rpi header region to the device. */ 7589 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 7590 if (unlikely(rc)) { 7591 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7592 "0393 Error %d during rpi post operation\n", 7593 rc); 7594 rc = -ENODEV; 7595 goto out_destroy_queue; 7596 } 7597 lpfc_sli4_node_prep(phba); 7598 7599 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 7600 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { 7601 /* 7602 * The FC Port needs to register FCFI (index 0) 7603 */ 7604 lpfc_reg_fcfi(phba, mboxq); 7605 mboxq->vport = phba->pport; 7606 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7607 if (rc != MBX_SUCCESS) 7608 goto out_unset_queue; 7609 rc = 0; 7610 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 7611 &mboxq->u.mqe.un.reg_fcfi); 7612 } else { 7613 /* We are a NVME Target mode with MRQ > 1 */ 7614 7615 /* First register the FCFI */ 7616 lpfc_reg_fcfi_mrq(phba, mboxq, 0); 7617 mboxq->vport = phba->pport; 7618 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7619 if (rc != MBX_SUCCESS) 7620 goto out_unset_queue; 7621 rc = 0; 7622 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, 7623 &mboxq->u.mqe.un.reg_fcfi_mrq); 7624 7625 /* Next register the MRQs */ 7626 lpfc_reg_fcfi_mrq(phba, mboxq, 1); 7627 mboxq->vport = phba->pport; 7628 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7629 if (rc != MBX_SUCCESS) 7630 goto out_unset_queue; 7631 rc = 0; 7632 } 7633 /* Check if the port is configured to be disabled */ 7634 lpfc_sli_read_link_ste(phba); 7635 } 7636 7637 /* Don't post more new bufs if repost already recovered 7638 * the nvme sgls. 7639 */ 7640 if (phba->nvmet_support == 0) { 7641 if (phba->sli4_hba.io_xri_cnt == 0) { 7642 len = lpfc_new_io_buf( 7643 phba, phba->sli4_hba.io_xri_max); 7644 if (len == 0) { 7645 rc = -ENOMEM; 7646 goto out_unset_queue; 7647 } 7648 7649 if (phba->cfg_xri_rebalancing) 7650 lpfc_create_multixri_pools(phba); 7651 } 7652 } else { 7653 phba->cfg_xri_rebalancing = 0; 7654 } 7655 7656 /* Allow asynchronous mailbox command to go through */ 7657 spin_lock_irq(&phba->hbalock); 7658 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7659 spin_unlock_irq(&phba->hbalock); 7660 7661 /* Post receive buffers to the device */ 7662 lpfc_sli4_rb_setup(phba); 7663 7664 /* Reset HBA FCF states after HBA reset */ 7665 phba->fcf.fcf_flag = 0; 7666 phba->fcf.current_rec.flag = 0; 7667 7668 /* Start the ELS watchdog timer */ 7669 mod_timer(&vport->els_tmofunc, 7670 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 7671 7672 /* Start heart beat timer */ 7673 mod_timer(&phba->hb_tmofunc, 7674 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 7675 phba->hb_outstanding = 0; 7676 phba->last_completion_time = jiffies; 7677 7678 /* start eq_delay heartbeat */ 7679 if (phba->cfg_auto_imax) 7680 queue_delayed_work(phba->wq, &phba->eq_delay_work, 7681 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 7682 7683 /* Start error attention (ERATT) polling timer */ 7684 mod_timer(&phba->eratt_poll, 7685 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 7686 7687 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 7688 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 7689 rc = pci_enable_pcie_error_reporting(phba->pcidev); 7690 if (!rc) { 7691 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7692 "2829 This device supports " 7693 "Advanced Error Reporting (AER)\n"); 7694 spin_lock_irq(&phba->hbalock); 7695 phba->hba_flag |= HBA_AER_ENABLED; 7696 spin_unlock_irq(&phba->hbalock); 7697 } else { 7698 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7699 "2830 This device does not support " 7700 "Advanced Error Reporting (AER)\n"); 7701 phba->cfg_aer_support = 0; 7702 } 7703 rc = 0; 7704 } 7705 7706 /* 7707 * The port is ready, set the host's link state to LINK_DOWN 7708 * in preparation for link interrupts. 7709 */ 7710 spin_lock_irq(&phba->hbalock); 7711 phba->link_state = LPFC_LINK_DOWN; 7712 7713 /* Check if physical ports are trunked */ 7714 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) 7715 phba->trunk_link.link0.state = LPFC_LINK_DOWN; 7716 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) 7717 phba->trunk_link.link1.state = LPFC_LINK_DOWN; 7718 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) 7719 phba->trunk_link.link2.state = LPFC_LINK_DOWN; 7720 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) 7721 phba->trunk_link.link3.state = LPFC_LINK_DOWN; 7722 spin_unlock_irq(&phba->hbalock); 7723 7724 /* Arm the CQs and then EQs on device */ 7725 lpfc_sli4_arm_cqeq_intr(phba); 7726 7727 /* Indicate device interrupt mode */ 7728 phba->sli4_hba.intr_enable = 1; 7729 7730 if (!(phba->hba_flag & HBA_FCOE_MODE) && 7731 (phba->hba_flag & LINK_DISABLED)) { 7732 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7733 "3103 Adapter Link is disabled.\n"); 7734 lpfc_down_link(phba, mboxq); 7735 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7736 if (rc != MBX_SUCCESS) { 7737 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7738 "3104 Adapter failed to issue " 7739 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 7740 goto out_io_buff_free; 7741 } 7742 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 7743 /* don't perform init_link on SLI4 FC port loopback test */ 7744 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 7745 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 7746 if (rc) 7747 goto out_io_buff_free; 7748 } 7749 } 7750 mempool_free(mboxq, phba->mbox_mem_pool); 7751 return rc; 7752 out_io_buff_free: 7753 /* Free allocated IO Buffers */ 7754 lpfc_io_free(phba); 7755 out_unset_queue: 7756 /* Unset all the queues set up in this routine when error out */ 7757 lpfc_sli4_queue_unset(phba); 7758 out_destroy_queue: 7759 lpfc_free_iocb_list(phba); 7760 lpfc_sli4_queue_destroy(phba); 7761 out_stop_timers: 7762 lpfc_stop_hba_timers(phba); 7763 out_free_mbox: 7764 mempool_free(mboxq, phba->mbox_mem_pool); 7765 return rc; 7766 } 7767 7768 /** 7769 * lpfc_mbox_timeout - Timeout call back function for mbox timer 7770 * @ptr: context object - pointer to hba structure. 7771 * 7772 * This is the callback function for mailbox timer. The mailbox 7773 * timer is armed when a new mailbox command is issued and the timer 7774 * is deleted when the mailbox complete. The function is called by 7775 * the kernel timer code when a mailbox does not complete within 7776 * expected time. This function wakes up the worker thread to 7777 * process the mailbox timeout and returns. All the processing is 7778 * done by the worker thread function lpfc_mbox_timeout_handler. 7779 **/ 7780 void 7781 lpfc_mbox_timeout(struct timer_list *t) 7782 { 7783 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); 7784 unsigned long iflag; 7785 uint32_t tmo_posted; 7786 7787 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 7788 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 7789 if (!tmo_posted) 7790 phba->pport->work_port_events |= WORKER_MBOX_TMO; 7791 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 7792 7793 if (!tmo_posted) 7794 lpfc_worker_wake_up(phba); 7795 return; 7796 } 7797 7798 /** 7799 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 7800 * are pending 7801 * @phba: Pointer to HBA context object. 7802 * 7803 * This function checks if any mailbox completions are present on the mailbox 7804 * completion queue. 7805 **/ 7806 static bool 7807 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 7808 { 7809 7810 uint32_t idx; 7811 struct lpfc_queue *mcq; 7812 struct lpfc_mcqe *mcqe; 7813 bool pending_completions = false; 7814 uint8_t qe_valid; 7815 7816 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7817 return false; 7818 7819 /* Check for completions on mailbox completion queue */ 7820 7821 mcq = phba->sli4_hba.mbx_cq; 7822 idx = mcq->hba_index; 7823 qe_valid = mcq->qe_valid; 7824 while (bf_get_le32(lpfc_cqe_valid, 7825 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) { 7826 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx)); 7827 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 7828 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 7829 pending_completions = true; 7830 break; 7831 } 7832 idx = (idx + 1) % mcq->entry_count; 7833 if (mcq->hba_index == idx) 7834 break; 7835 7836 /* if the index wrapped around, toggle the valid bit */ 7837 if (phba->sli4_hba.pc_sli4_params.cqav && !idx) 7838 qe_valid = (qe_valid) ? 0 : 1; 7839 } 7840 return pending_completions; 7841 7842 } 7843 7844 /** 7845 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 7846 * that were missed. 7847 * @phba: Pointer to HBA context object. 7848 * 7849 * For sli4, it is possible to miss an interrupt. As such mbox completions 7850 * maybe missed causing erroneous mailbox timeouts to occur. This function 7851 * checks to see if mbox completions are on the mailbox completion queue 7852 * and will process all the completions associated with the eq for the 7853 * mailbox completion queue. 7854 **/ 7855 bool 7856 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 7857 { 7858 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 7859 uint32_t eqidx; 7860 struct lpfc_queue *fpeq = NULL; 7861 bool mbox_pending; 7862 7863 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7864 return false; 7865 7866 /* Find the eq associated with the mcq */ 7867 7868 if (sli4_hba->hdwq) 7869 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) 7870 if (sli4_hba->hdwq[eqidx].hba_eq->queue_id == 7871 sli4_hba->mbx_cq->assoc_qid) { 7872 fpeq = sli4_hba->hdwq[eqidx].hba_eq; 7873 break; 7874 } 7875 if (!fpeq) 7876 return false; 7877 7878 /* Turn off interrupts from this EQ */ 7879 7880 sli4_hba->sli4_eq_clr_intr(fpeq); 7881 7882 /* Check to see if a mbox completion is pending */ 7883 7884 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 7885 7886 /* 7887 * If a mbox completion is pending, process all the events on EQ 7888 * associated with the mbox completion queue (this could include 7889 * mailbox commands, async events, els commands, receive queue data 7890 * and fcp commands) 7891 */ 7892 7893 if (mbox_pending) 7894 /* process and rearm the EQ */ 7895 lpfc_sli4_process_eq(phba, fpeq); 7896 else 7897 /* Always clear and re-arm the EQ */ 7898 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); 7899 7900 return mbox_pending; 7901 7902 } 7903 7904 /** 7905 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 7906 * @phba: Pointer to HBA context object. 7907 * 7908 * This function is called from worker thread when a mailbox command times out. 7909 * The caller is not required to hold any locks. This function will reset the 7910 * HBA and recover all the pending commands. 7911 **/ 7912 void 7913 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 7914 { 7915 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 7916 MAILBOX_t *mb = NULL; 7917 7918 struct lpfc_sli *psli = &phba->sli; 7919 7920 /* If the mailbox completed, process the completion and return */ 7921 if (lpfc_sli4_process_missed_mbox_completions(phba)) 7922 return; 7923 7924 if (pmbox != NULL) 7925 mb = &pmbox->u.mb; 7926 /* Check the pmbox pointer first. There is a race condition 7927 * between the mbox timeout handler getting executed in the 7928 * worklist and the mailbox actually completing. When this 7929 * race condition occurs, the mbox_active will be NULL. 7930 */ 7931 spin_lock_irq(&phba->hbalock); 7932 if (pmbox == NULL) { 7933 lpfc_printf_log(phba, KERN_WARNING, 7934 LOG_MBOX | LOG_SLI, 7935 "0353 Active Mailbox cleared - mailbox timeout " 7936 "exiting\n"); 7937 spin_unlock_irq(&phba->hbalock); 7938 return; 7939 } 7940 7941 /* Mbox cmd <mbxCommand> timeout */ 7942 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7943 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 7944 mb->mbxCommand, 7945 phba->pport->port_state, 7946 phba->sli.sli_flag, 7947 phba->sli.mbox_active); 7948 spin_unlock_irq(&phba->hbalock); 7949 7950 /* Setting state unknown so lpfc_sli_abort_iocb_ring 7951 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 7952 * it to fail all outstanding SCSI IO. 7953 */ 7954 spin_lock_irq(&phba->pport->work_port_lock); 7955 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 7956 spin_unlock_irq(&phba->pport->work_port_lock); 7957 spin_lock_irq(&phba->hbalock); 7958 phba->link_state = LPFC_LINK_UNKNOWN; 7959 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7960 spin_unlock_irq(&phba->hbalock); 7961 7962 lpfc_sli_abort_fcp_rings(phba); 7963 7964 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7965 "0345 Resetting board due to mailbox timeout\n"); 7966 7967 /* Reset the HBA device */ 7968 lpfc_reset_hba(phba); 7969 } 7970 7971 /** 7972 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 7973 * @phba: Pointer to HBA context object. 7974 * @pmbox: Pointer to mailbox object. 7975 * @flag: Flag indicating how the mailbox need to be processed. 7976 * 7977 * This function is called by discovery code and HBA management code 7978 * to submit a mailbox command to firmware with SLI-3 interface spec. This 7979 * function gets the hbalock to protect the data structures. 7980 * The mailbox command can be submitted in polling mode, in which case 7981 * this function will wait in a polling loop for the completion of the 7982 * mailbox. 7983 * If the mailbox is submitted in no_wait mode (not polling) the 7984 * function will submit the command and returns immediately without waiting 7985 * for the mailbox completion. The no_wait is supported only when HBA 7986 * is in SLI2/SLI3 mode - interrupts are enabled. 7987 * The SLI interface allows only one mailbox pending at a time. If the 7988 * mailbox is issued in polling mode and there is already a mailbox 7989 * pending, then the function will return an error. If the mailbox is issued 7990 * in NO_WAIT mode and there is a mailbox pending already, the function 7991 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 7992 * The sli layer owns the mailbox object until the completion of mailbox 7993 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 7994 * return codes the caller owns the mailbox command after the return of 7995 * the function. 7996 **/ 7997 static int 7998 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 7999 uint32_t flag) 8000 { 8001 MAILBOX_t *mbx; 8002 struct lpfc_sli *psli = &phba->sli; 8003 uint32_t status, evtctr; 8004 uint32_t ha_copy, hc_copy; 8005 int i; 8006 unsigned long timeout; 8007 unsigned long drvr_flag = 0; 8008 uint32_t word0, ldata; 8009 void __iomem *to_slim; 8010 int processing_queue = 0; 8011 8012 spin_lock_irqsave(&phba->hbalock, drvr_flag); 8013 if (!pmbox) { 8014 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8015 /* processing mbox queue from intr_handler */ 8016 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8017 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8018 return MBX_SUCCESS; 8019 } 8020 processing_queue = 1; 8021 pmbox = lpfc_mbox_get(phba); 8022 if (!pmbox) { 8023 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8024 return MBX_SUCCESS; 8025 } 8026 } 8027 8028 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 8029 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 8030 if(!pmbox->vport) { 8031 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8032 lpfc_printf_log(phba, KERN_ERR, 8033 LOG_MBOX | LOG_VPORT, 8034 "1806 Mbox x%x failed. No vport\n", 8035 pmbox->u.mb.mbxCommand); 8036 dump_stack(); 8037 goto out_not_finished; 8038 } 8039 } 8040 8041 /* If the PCI channel is in offline state, do not post mbox. */ 8042 if (unlikely(pci_channel_offline(phba->pcidev))) { 8043 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8044 goto out_not_finished; 8045 } 8046 8047 /* If HBA has a deferred error attention, fail the iocb. */ 8048 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8049 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8050 goto out_not_finished; 8051 } 8052 8053 psli = &phba->sli; 8054 8055 mbx = &pmbox->u.mb; 8056 status = MBX_SUCCESS; 8057 8058 if (phba->link_state == LPFC_HBA_ERROR) { 8059 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8060 8061 /* Mbox command <mbxCommand> cannot issue */ 8062 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8063 "(%d):0311 Mailbox command x%x cannot " 8064 "issue Data: x%x x%x\n", 8065 pmbox->vport ? pmbox->vport->vpi : 0, 8066 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 8067 goto out_not_finished; 8068 } 8069 8070 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 8071 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 8072 !(hc_copy & HC_MBINT_ENA)) { 8073 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8074 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8075 "(%d):2528 Mailbox command x%x cannot " 8076 "issue Data: x%x x%x\n", 8077 pmbox->vport ? pmbox->vport->vpi : 0, 8078 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 8079 goto out_not_finished; 8080 } 8081 } 8082 8083 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8084 /* Polling for a mbox command when another one is already active 8085 * is not allowed in SLI. Also, the driver must have established 8086 * SLI2 mode to queue and process multiple mbox commands. 8087 */ 8088 8089 if (flag & MBX_POLL) { 8090 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8091 8092 /* Mbox command <mbxCommand> cannot issue */ 8093 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8094 "(%d):2529 Mailbox command x%x " 8095 "cannot issue Data: x%x x%x\n", 8096 pmbox->vport ? pmbox->vport->vpi : 0, 8097 pmbox->u.mb.mbxCommand, 8098 psli->sli_flag, flag); 8099 goto out_not_finished; 8100 } 8101 8102 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 8103 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8104 /* Mbox command <mbxCommand> cannot issue */ 8105 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8106 "(%d):2530 Mailbox command x%x " 8107 "cannot issue Data: x%x x%x\n", 8108 pmbox->vport ? pmbox->vport->vpi : 0, 8109 pmbox->u.mb.mbxCommand, 8110 psli->sli_flag, flag); 8111 goto out_not_finished; 8112 } 8113 8114 /* Another mailbox command is still being processed, queue this 8115 * command to be processed later. 8116 */ 8117 lpfc_mbox_put(phba, pmbox); 8118 8119 /* Mbox cmd issue - BUSY */ 8120 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8121 "(%d):0308 Mbox cmd issue - BUSY Data: " 8122 "x%x x%x x%x x%x\n", 8123 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 8124 mbx->mbxCommand, 8125 phba->pport ? phba->pport->port_state : 0xff, 8126 psli->sli_flag, flag); 8127 8128 psli->slistat.mbox_busy++; 8129 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8130 8131 if (pmbox->vport) { 8132 lpfc_debugfs_disc_trc(pmbox->vport, 8133 LPFC_DISC_TRC_MBOX_VPORT, 8134 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 8135 (uint32_t)mbx->mbxCommand, 8136 mbx->un.varWords[0], mbx->un.varWords[1]); 8137 } 8138 else { 8139 lpfc_debugfs_disc_trc(phba->pport, 8140 LPFC_DISC_TRC_MBOX, 8141 "MBOX Bsy: cmd:x%x mb:x%x x%x", 8142 (uint32_t)mbx->mbxCommand, 8143 mbx->un.varWords[0], mbx->un.varWords[1]); 8144 } 8145 8146 return MBX_BUSY; 8147 } 8148 8149 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8150 8151 /* If we are not polling, we MUST be in SLI2 mode */ 8152 if (flag != MBX_POLL) { 8153 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 8154 (mbx->mbxCommand != MBX_KILL_BOARD)) { 8155 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8156 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8157 /* Mbox command <mbxCommand> cannot issue */ 8158 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8159 "(%d):2531 Mailbox command x%x " 8160 "cannot issue Data: x%x x%x\n", 8161 pmbox->vport ? pmbox->vport->vpi : 0, 8162 pmbox->u.mb.mbxCommand, 8163 psli->sli_flag, flag); 8164 goto out_not_finished; 8165 } 8166 /* timeout active mbox command */ 8167 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8168 1000); 8169 mod_timer(&psli->mbox_tmo, jiffies + timeout); 8170 } 8171 8172 /* Mailbox cmd <cmd> issue */ 8173 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8174 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 8175 "x%x\n", 8176 pmbox->vport ? pmbox->vport->vpi : 0, 8177 mbx->mbxCommand, 8178 phba->pport ? phba->pport->port_state : 0xff, 8179 psli->sli_flag, flag); 8180 8181 if (mbx->mbxCommand != MBX_HEARTBEAT) { 8182 if (pmbox->vport) { 8183 lpfc_debugfs_disc_trc(pmbox->vport, 8184 LPFC_DISC_TRC_MBOX_VPORT, 8185 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8186 (uint32_t)mbx->mbxCommand, 8187 mbx->un.varWords[0], mbx->un.varWords[1]); 8188 } 8189 else { 8190 lpfc_debugfs_disc_trc(phba->pport, 8191 LPFC_DISC_TRC_MBOX, 8192 "MBOX Send: cmd:x%x mb:x%x x%x", 8193 (uint32_t)mbx->mbxCommand, 8194 mbx->un.varWords[0], mbx->un.varWords[1]); 8195 } 8196 } 8197 8198 psli->slistat.mbox_cmd++; 8199 evtctr = psli->slistat.mbox_event; 8200 8201 /* next set own bit for the adapter and copy over command word */ 8202 mbx->mbxOwner = OWN_CHIP; 8203 8204 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8205 /* Populate mbox extension offset word. */ 8206 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 8207 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8208 = (uint8_t *)phba->mbox_ext 8209 - (uint8_t *)phba->mbox; 8210 } 8211 8212 /* Copy the mailbox extension data */ 8213 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) { 8214 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf, 8215 (uint8_t *)phba->mbox_ext, 8216 pmbox->in_ext_byte_len); 8217 } 8218 /* Copy command data to host SLIM area */ 8219 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 8220 } else { 8221 /* Populate mbox extension offset word. */ 8222 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 8223 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8224 = MAILBOX_HBA_EXT_OFFSET; 8225 8226 /* Copy the mailbox extension data */ 8227 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) 8228 lpfc_memcpy_to_slim(phba->MBslimaddr + 8229 MAILBOX_HBA_EXT_OFFSET, 8230 pmbox->ctx_buf, pmbox->in_ext_byte_len); 8231 8232 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8233 /* copy command data into host mbox for cmpl */ 8234 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, 8235 MAILBOX_CMD_SIZE); 8236 8237 /* First copy mbox command data to HBA SLIM, skip past first 8238 word */ 8239 to_slim = phba->MBslimaddr + sizeof (uint32_t); 8240 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 8241 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 8242 8243 /* Next copy over first word, with mbxOwner set */ 8244 ldata = *((uint32_t *)mbx); 8245 to_slim = phba->MBslimaddr; 8246 writel(ldata, to_slim); 8247 readl(to_slim); /* flush */ 8248 8249 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8250 /* switch over to host mailbox */ 8251 psli->sli_flag |= LPFC_SLI_ACTIVE; 8252 } 8253 8254 wmb(); 8255 8256 switch (flag) { 8257 case MBX_NOWAIT: 8258 /* Set up reference to mailbox command */ 8259 psli->mbox_active = pmbox; 8260 /* Interrupt board to do it */ 8261 writel(CA_MBATT, phba->CAregaddr); 8262 readl(phba->CAregaddr); /* flush */ 8263 /* Don't wait for it to finish, just return */ 8264 break; 8265 8266 case MBX_POLL: 8267 /* Set up null reference to mailbox command */ 8268 psli->mbox_active = NULL; 8269 /* Interrupt board to do it */ 8270 writel(CA_MBATT, phba->CAregaddr); 8271 readl(phba->CAregaddr); /* flush */ 8272 8273 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8274 /* First read mbox status word */ 8275 word0 = *((uint32_t *)phba->mbox); 8276 word0 = le32_to_cpu(word0); 8277 } else { 8278 /* First read mbox status word */ 8279 if (lpfc_readl(phba->MBslimaddr, &word0)) { 8280 spin_unlock_irqrestore(&phba->hbalock, 8281 drvr_flag); 8282 goto out_not_finished; 8283 } 8284 } 8285 8286 /* Read the HBA Host Attention Register */ 8287 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8288 spin_unlock_irqrestore(&phba->hbalock, 8289 drvr_flag); 8290 goto out_not_finished; 8291 } 8292 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8293 1000) + jiffies; 8294 i = 0; 8295 /* Wait for command to complete */ 8296 while (((word0 & OWN_CHIP) == OWN_CHIP) || 8297 (!(ha_copy & HA_MBATT) && 8298 (phba->link_state > LPFC_WARM_START))) { 8299 if (time_after(jiffies, timeout)) { 8300 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8301 spin_unlock_irqrestore(&phba->hbalock, 8302 drvr_flag); 8303 goto out_not_finished; 8304 } 8305 8306 /* Check if we took a mbox interrupt while we were 8307 polling */ 8308 if (((word0 & OWN_CHIP) != OWN_CHIP) 8309 && (evtctr != psli->slistat.mbox_event)) 8310 break; 8311 8312 if (i++ > 10) { 8313 spin_unlock_irqrestore(&phba->hbalock, 8314 drvr_flag); 8315 msleep(1); 8316 spin_lock_irqsave(&phba->hbalock, drvr_flag); 8317 } 8318 8319 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8320 /* First copy command data */ 8321 word0 = *((uint32_t *)phba->mbox); 8322 word0 = le32_to_cpu(word0); 8323 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 8324 MAILBOX_t *slimmb; 8325 uint32_t slimword0; 8326 /* Check real SLIM for any errors */ 8327 slimword0 = readl(phba->MBslimaddr); 8328 slimmb = (MAILBOX_t *) & slimword0; 8329 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 8330 && slimmb->mbxStatus) { 8331 psli->sli_flag &= 8332 ~LPFC_SLI_ACTIVE; 8333 word0 = slimword0; 8334 } 8335 } 8336 } else { 8337 /* First copy command data */ 8338 word0 = readl(phba->MBslimaddr); 8339 } 8340 /* Read the HBA Host Attention Register */ 8341 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8342 spin_unlock_irqrestore(&phba->hbalock, 8343 drvr_flag); 8344 goto out_not_finished; 8345 } 8346 } 8347 8348 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8349 /* copy results back to user */ 8350 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, 8351 MAILBOX_CMD_SIZE); 8352 /* Copy the mailbox extension data */ 8353 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { 8354 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 8355 pmbox->ctx_buf, 8356 pmbox->out_ext_byte_len); 8357 } 8358 } else { 8359 /* First copy command data */ 8360 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 8361 MAILBOX_CMD_SIZE); 8362 /* Copy the mailbox extension data */ 8363 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { 8364 lpfc_memcpy_from_slim( 8365 pmbox->ctx_buf, 8366 phba->MBslimaddr + 8367 MAILBOX_HBA_EXT_OFFSET, 8368 pmbox->out_ext_byte_len); 8369 } 8370 } 8371 8372 writel(HA_MBATT, phba->HAregaddr); 8373 readl(phba->HAregaddr); /* flush */ 8374 8375 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8376 status = mbx->mbxStatus; 8377 } 8378 8379 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8380 return status; 8381 8382 out_not_finished: 8383 if (processing_queue) { 8384 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 8385 lpfc_mbox_cmpl_put(phba, pmbox); 8386 } 8387 return MBX_NOT_FINISHED; 8388 } 8389 8390 /** 8391 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 8392 * @phba: Pointer to HBA context object. 8393 * 8394 * The function blocks the posting of SLI4 asynchronous mailbox commands from 8395 * the driver internal pending mailbox queue. It will then try to wait out the 8396 * possible outstanding mailbox command before return. 8397 * 8398 * Returns: 8399 * 0 - the outstanding mailbox command completed; otherwise, the wait for 8400 * the outstanding mailbox command timed out. 8401 **/ 8402 static int 8403 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 8404 { 8405 struct lpfc_sli *psli = &phba->sli; 8406 int rc = 0; 8407 unsigned long timeout = 0; 8408 8409 /* Mark the asynchronous mailbox command posting as blocked */ 8410 spin_lock_irq(&phba->hbalock); 8411 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 8412 /* Determine how long we might wait for the active mailbox 8413 * command to be gracefully completed by firmware. 8414 */ 8415 if (phba->sli.mbox_active) 8416 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 8417 phba->sli.mbox_active) * 8418 1000) + jiffies; 8419 spin_unlock_irq(&phba->hbalock); 8420 8421 /* Make sure the mailbox is really active */ 8422 if (timeout) 8423 lpfc_sli4_process_missed_mbox_completions(phba); 8424 8425 /* Wait for the outstnading mailbox command to complete */ 8426 while (phba->sli.mbox_active) { 8427 /* Check active mailbox complete status every 2ms */ 8428 msleep(2); 8429 if (time_after(jiffies, timeout)) { 8430 /* Timeout, marked the outstanding cmd not complete */ 8431 rc = 1; 8432 break; 8433 } 8434 } 8435 8436 /* Can not cleanly block async mailbox command, fails it */ 8437 if (rc) { 8438 spin_lock_irq(&phba->hbalock); 8439 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8440 spin_unlock_irq(&phba->hbalock); 8441 } 8442 return rc; 8443 } 8444 8445 /** 8446 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 8447 * @phba: Pointer to HBA context object. 8448 * 8449 * The function unblocks and resume posting of SLI4 asynchronous mailbox 8450 * commands from the driver internal pending mailbox queue. It makes sure 8451 * that there is no outstanding mailbox command before resuming posting 8452 * asynchronous mailbox commands. If, for any reason, there is outstanding 8453 * mailbox command, it will try to wait it out before resuming asynchronous 8454 * mailbox command posting. 8455 **/ 8456 static void 8457 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 8458 { 8459 struct lpfc_sli *psli = &phba->sli; 8460 8461 spin_lock_irq(&phba->hbalock); 8462 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8463 /* Asynchronous mailbox posting is not blocked, do nothing */ 8464 spin_unlock_irq(&phba->hbalock); 8465 return; 8466 } 8467 8468 /* Outstanding synchronous mailbox command is guaranteed to be done, 8469 * successful or timeout, after timing-out the outstanding mailbox 8470 * command shall always be removed, so just unblock posting async 8471 * mailbox command and resume 8472 */ 8473 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8474 spin_unlock_irq(&phba->hbalock); 8475 8476 /* wake up worker thread to post asynchronlous mailbox command */ 8477 lpfc_worker_wake_up(phba); 8478 } 8479 8480 /** 8481 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 8482 * @phba: Pointer to HBA context object. 8483 * @mboxq: Pointer to mailbox object. 8484 * 8485 * The function waits for the bootstrap mailbox register ready bit from 8486 * port for twice the regular mailbox command timeout value. 8487 * 8488 * 0 - no timeout on waiting for bootstrap mailbox register ready. 8489 * MBXERR_ERROR - wait for bootstrap mailbox register timed out. 8490 **/ 8491 static int 8492 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8493 { 8494 uint32_t db_ready; 8495 unsigned long timeout; 8496 struct lpfc_register bmbx_reg; 8497 8498 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 8499 * 1000) + jiffies; 8500 8501 do { 8502 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 8503 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 8504 if (!db_ready) 8505 mdelay(2); 8506 8507 if (time_after(jiffies, timeout)) 8508 return MBXERR_ERROR; 8509 } while (!db_ready); 8510 8511 return 0; 8512 } 8513 8514 /** 8515 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 8516 * @phba: Pointer to HBA context object. 8517 * @mboxq: Pointer to mailbox object. 8518 * 8519 * The function posts a mailbox to the port. The mailbox is expected 8520 * to be comletely filled in and ready for the port to operate on it. 8521 * This routine executes a synchronous completion operation on the 8522 * mailbox by polling for its completion. 8523 * 8524 * The caller must not be holding any locks when calling this routine. 8525 * 8526 * Returns: 8527 * MBX_SUCCESS - mailbox posted successfully 8528 * Any of the MBX error values. 8529 **/ 8530 static int 8531 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8532 { 8533 int rc = MBX_SUCCESS; 8534 unsigned long iflag; 8535 uint32_t mcqe_status; 8536 uint32_t mbx_cmnd; 8537 struct lpfc_sli *psli = &phba->sli; 8538 struct lpfc_mqe *mb = &mboxq->u.mqe; 8539 struct lpfc_bmbx_create *mbox_rgn; 8540 struct dma_address *dma_address; 8541 8542 /* 8543 * Only one mailbox can be active to the bootstrap mailbox region 8544 * at a time and there is no queueing provided. 8545 */ 8546 spin_lock_irqsave(&phba->hbalock, iflag); 8547 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8548 spin_unlock_irqrestore(&phba->hbalock, iflag); 8549 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8550 "(%d):2532 Mailbox command x%x (x%x/x%x) " 8551 "cannot issue Data: x%x x%x\n", 8552 mboxq->vport ? mboxq->vport->vpi : 0, 8553 mboxq->u.mb.mbxCommand, 8554 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8555 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8556 psli->sli_flag, MBX_POLL); 8557 return MBXERR_ERROR; 8558 } 8559 /* The server grabs the token and owns it until release */ 8560 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8561 phba->sli.mbox_active = mboxq; 8562 spin_unlock_irqrestore(&phba->hbalock, iflag); 8563 8564 /* wait for bootstrap mbox register for readyness */ 8565 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8566 if (rc) 8567 goto exit; 8568 /* 8569 * Initialize the bootstrap memory region to avoid stale data areas 8570 * in the mailbox post. Then copy the caller's mailbox contents to 8571 * the bmbx mailbox region. 8572 */ 8573 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 8574 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 8575 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 8576 sizeof(struct lpfc_mqe)); 8577 8578 /* Post the high mailbox dma address to the port and wait for ready. */ 8579 dma_address = &phba->sli4_hba.bmbx.dma_address; 8580 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 8581 8582 /* wait for bootstrap mbox register for hi-address write done */ 8583 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8584 if (rc) 8585 goto exit; 8586 8587 /* Post the low mailbox dma address to the port. */ 8588 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 8589 8590 /* wait for bootstrap mbox register for low address write done */ 8591 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8592 if (rc) 8593 goto exit; 8594 8595 /* 8596 * Read the CQ to ensure the mailbox has completed. 8597 * If so, update the mailbox status so that the upper layers 8598 * can complete the request normally. 8599 */ 8600 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 8601 sizeof(struct lpfc_mqe)); 8602 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 8603 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 8604 sizeof(struct lpfc_mcqe)); 8605 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 8606 /* 8607 * When the CQE status indicates a failure and the mailbox status 8608 * indicates success then copy the CQE status into the mailbox status 8609 * (and prefix it with x4000). 8610 */ 8611 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 8612 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 8613 bf_set(lpfc_mqe_status, mb, 8614 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 8615 rc = MBXERR_ERROR; 8616 } else 8617 lpfc_sli4_swap_str(phba, mboxq); 8618 8619 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8620 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 8621 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 8622 " x%x x%x CQ: x%x x%x x%x x%x\n", 8623 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8624 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8625 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8626 bf_get(lpfc_mqe_status, mb), 8627 mb->un.mb_words[0], mb->un.mb_words[1], 8628 mb->un.mb_words[2], mb->un.mb_words[3], 8629 mb->un.mb_words[4], mb->un.mb_words[5], 8630 mb->un.mb_words[6], mb->un.mb_words[7], 8631 mb->un.mb_words[8], mb->un.mb_words[9], 8632 mb->un.mb_words[10], mb->un.mb_words[11], 8633 mb->un.mb_words[12], mboxq->mcqe.word0, 8634 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 8635 mboxq->mcqe.trailer); 8636 exit: 8637 /* We are holding the token, no needed for lock when release */ 8638 spin_lock_irqsave(&phba->hbalock, iflag); 8639 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8640 phba->sli.mbox_active = NULL; 8641 spin_unlock_irqrestore(&phba->hbalock, iflag); 8642 return rc; 8643 } 8644 8645 /** 8646 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 8647 * @phba: Pointer to HBA context object. 8648 * @pmbox: Pointer to mailbox object. 8649 * @flag: Flag indicating how the mailbox need to be processed. 8650 * 8651 * This function is called by discovery code and HBA management code to submit 8652 * a mailbox command to firmware with SLI-4 interface spec. 8653 * 8654 * Return codes the caller owns the mailbox command after the return of the 8655 * function. 8656 **/ 8657 static int 8658 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 8659 uint32_t flag) 8660 { 8661 struct lpfc_sli *psli = &phba->sli; 8662 unsigned long iflags; 8663 int rc; 8664 8665 /* dump from issue mailbox command if setup */ 8666 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 8667 8668 rc = lpfc_mbox_dev_check(phba); 8669 if (unlikely(rc)) { 8670 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8671 "(%d):2544 Mailbox command x%x (x%x/x%x) " 8672 "cannot issue Data: x%x x%x\n", 8673 mboxq->vport ? mboxq->vport->vpi : 0, 8674 mboxq->u.mb.mbxCommand, 8675 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8676 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8677 psli->sli_flag, flag); 8678 goto out_not_finished; 8679 } 8680 8681 /* Detect polling mode and jump to a handler */ 8682 if (!phba->sli4_hba.intr_enable) { 8683 if (flag == MBX_POLL) 8684 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8685 else 8686 rc = -EIO; 8687 if (rc != MBX_SUCCESS) 8688 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8689 "(%d):2541 Mailbox command x%x " 8690 "(x%x/x%x) failure: " 8691 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8692 "Data: x%x x%x\n,", 8693 mboxq->vport ? mboxq->vport->vpi : 0, 8694 mboxq->u.mb.mbxCommand, 8695 lpfc_sli_config_mbox_subsys_get(phba, 8696 mboxq), 8697 lpfc_sli_config_mbox_opcode_get(phba, 8698 mboxq), 8699 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8700 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8701 bf_get(lpfc_mcqe_ext_status, 8702 &mboxq->mcqe), 8703 psli->sli_flag, flag); 8704 return rc; 8705 } else if (flag == MBX_POLL) { 8706 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8707 "(%d):2542 Try to issue mailbox command " 8708 "x%x (x%x/x%x) synchronously ahead of async " 8709 "mailbox command queue: x%x x%x\n", 8710 mboxq->vport ? mboxq->vport->vpi : 0, 8711 mboxq->u.mb.mbxCommand, 8712 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8713 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8714 psli->sli_flag, flag); 8715 /* Try to block the asynchronous mailbox posting */ 8716 rc = lpfc_sli4_async_mbox_block(phba); 8717 if (!rc) { 8718 /* Successfully blocked, now issue sync mbox cmd */ 8719 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8720 if (rc != MBX_SUCCESS) 8721 lpfc_printf_log(phba, KERN_WARNING, 8722 LOG_MBOX | LOG_SLI, 8723 "(%d):2597 Sync Mailbox command " 8724 "x%x (x%x/x%x) failure: " 8725 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8726 "Data: x%x x%x\n,", 8727 mboxq->vport ? mboxq->vport->vpi : 0, 8728 mboxq->u.mb.mbxCommand, 8729 lpfc_sli_config_mbox_subsys_get(phba, 8730 mboxq), 8731 lpfc_sli_config_mbox_opcode_get(phba, 8732 mboxq), 8733 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8734 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8735 bf_get(lpfc_mcqe_ext_status, 8736 &mboxq->mcqe), 8737 psli->sli_flag, flag); 8738 /* Unblock the async mailbox posting afterward */ 8739 lpfc_sli4_async_mbox_unblock(phba); 8740 } 8741 return rc; 8742 } 8743 8744 /* Now, interrupt mode asynchrous mailbox command */ 8745 rc = lpfc_mbox_cmd_check(phba, mboxq); 8746 if (rc) { 8747 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8748 "(%d):2543 Mailbox command x%x (x%x/x%x) " 8749 "cannot issue Data: x%x x%x\n", 8750 mboxq->vport ? mboxq->vport->vpi : 0, 8751 mboxq->u.mb.mbxCommand, 8752 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8753 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8754 psli->sli_flag, flag); 8755 goto out_not_finished; 8756 } 8757 8758 /* Put the mailbox command to the driver internal FIFO */ 8759 psli->slistat.mbox_busy++; 8760 spin_lock_irqsave(&phba->hbalock, iflags); 8761 lpfc_mbox_put(phba, mboxq); 8762 spin_unlock_irqrestore(&phba->hbalock, iflags); 8763 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8764 "(%d):0354 Mbox cmd issue - Enqueue Data: " 8765 "x%x (x%x/x%x) x%x x%x x%x\n", 8766 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 8767 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8768 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8769 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8770 phba->pport->port_state, 8771 psli->sli_flag, MBX_NOWAIT); 8772 /* Wake up worker thread to transport mailbox command from head */ 8773 lpfc_worker_wake_up(phba); 8774 8775 return MBX_BUSY; 8776 8777 out_not_finished: 8778 return MBX_NOT_FINISHED; 8779 } 8780 8781 /** 8782 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 8783 * @phba: Pointer to HBA context object. 8784 * 8785 * This function is called by worker thread to send a mailbox command to 8786 * SLI4 HBA firmware. 8787 * 8788 **/ 8789 int 8790 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 8791 { 8792 struct lpfc_sli *psli = &phba->sli; 8793 LPFC_MBOXQ_t *mboxq; 8794 int rc = MBX_SUCCESS; 8795 unsigned long iflags; 8796 struct lpfc_mqe *mqe; 8797 uint32_t mbx_cmnd; 8798 8799 /* Check interrupt mode before post async mailbox command */ 8800 if (unlikely(!phba->sli4_hba.intr_enable)) 8801 return MBX_NOT_FINISHED; 8802 8803 /* Check for mailbox command service token */ 8804 spin_lock_irqsave(&phba->hbalock, iflags); 8805 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8806 spin_unlock_irqrestore(&phba->hbalock, iflags); 8807 return MBX_NOT_FINISHED; 8808 } 8809 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8810 spin_unlock_irqrestore(&phba->hbalock, iflags); 8811 return MBX_NOT_FINISHED; 8812 } 8813 if (unlikely(phba->sli.mbox_active)) { 8814 spin_unlock_irqrestore(&phba->hbalock, iflags); 8815 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8816 "0384 There is pending active mailbox cmd\n"); 8817 return MBX_NOT_FINISHED; 8818 } 8819 /* Take the mailbox command service token */ 8820 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8821 8822 /* Get the next mailbox command from head of queue */ 8823 mboxq = lpfc_mbox_get(phba); 8824 8825 /* If no more mailbox command waiting for post, we're done */ 8826 if (!mboxq) { 8827 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8828 spin_unlock_irqrestore(&phba->hbalock, iflags); 8829 return MBX_SUCCESS; 8830 } 8831 phba->sli.mbox_active = mboxq; 8832 spin_unlock_irqrestore(&phba->hbalock, iflags); 8833 8834 /* Check device readiness for posting mailbox command */ 8835 rc = lpfc_mbox_dev_check(phba); 8836 if (unlikely(rc)) 8837 /* Driver clean routine will clean up pending mailbox */ 8838 goto out_not_finished; 8839 8840 /* Prepare the mbox command to be posted */ 8841 mqe = &mboxq->u.mqe; 8842 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 8843 8844 /* Start timer for the mbox_tmo and log some mailbox post messages */ 8845 mod_timer(&psli->mbox_tmo, (jiffies + 8846 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 8847 8848 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8849 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 8850 "x%x x%x\n", 8851 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8852 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8853 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8854 phba->pport->port_state, psli->sli_flag); 8855 8856 if (mbx_cmnd != MBX_HEARTBEAT) { 8857 if (mboxq->vport) { 8858 lpfc_debugfs_disc_trc(mboxq->vport, 8859 LPFC_DISC_TRC_MBOX_VPORT, 8860 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8861 mbx_cmnd, mqe->un.mb_words[0], 8862 mqe->un.mb_words[1]); 8863 } else { 8864 lpfc_debugfs_disc_trc(phba->pport, 8865 LPFC_DISC_TRC_MBOX, 8866 "MBOX Send: cmd:x%x mb:x%x x%x", 8867 mbx_cmnd, mqe->un.mb_words[0], 8868 mqe->un.mb_words[1]); 8869 } 8870 } 8871 psli->slistat.mbox_cmd++; 8872 8873 /* Post the mailbox command to the port */ 8874 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 8875 if (rc != MBX_SUCCESS) { 8876 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8877 "(%d):2533 Mailbox command x%x (x%x/x%x) " 8878 "cannot issue Data: x%x x%x\n", 8879 mboxq->vport ? mboxq->vport->vpi : 0, 8880 mboxq->u.mb.mbxCommand, 8881 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8882 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8883 psli->sli_flag, MBX_NOWAIT); 8884 goto out_not_finished; 8885 } 8886 8887 return rc; 8888 8889 out_not_finished: 8890 spin_lock_irqsave(&phba->hbalock, iflags); 8891 if (phba->sli.mbox_active) { 8892 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8893 __lpfc_mbox_cmpl_put(phba, mboxq); 8894 /* Release the token */ 8895 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8896 phba->sli.mbox_active = NULL; 8897 } 8898 spin_unlock_irqrestore(&phba->hbalock, iflags); 8899 8900 return MBX_NOT_FINISHED; 8901 } 8902 8903 /** 8904 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 8905 * @phba: Pointer to HBA context object. 8906 * @pmbox: Pointer to mailbox object. 8907 * @flag: Flag indicating how the mailbox need to be processed. 8908 * 8909 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 8910 * the API jump table function pointer from the lpfc_hba struct. 8911 * 8912 * Return codes the caller owns the mailbox command after the return of the 8913 * function. 8914 **/ 8915 int 8916 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 8917 { 8918 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 8919 } 8920 8921 /** 8922 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 8923 * @phba: The hba struct for which this call is being executed. 8924 * @dev_grp: The HBA PCI-Device group number. 8925 * 8926 * This routine sets up the mbox interface API function jump table in @phba 8927 * struct. 8928 * Returns: 0 - success, -ENODEV - failure. 8929 **/ 8930 int 8931 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8932 { 8933 8934 switch (dev_grp) { 8935 case LPFC_PCI_DEV_LP: 8936 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 8937 phba->lpfc_sli_handle_slow_ring_event = 8938 lpfc_sli_handle_slow_ring_event_s3; 8939 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 8940 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 8941 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 8942 break; 8943 case LPFC_PCI_DEV_OC: 8944 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 8945 phba->lpfc_sli_handle_slow_ring_event = 8946 lpfc_sli_handle_slow_ring_event_s4; 8947 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 8948 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 8949 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 8950 break; 8951 default: 8952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8953 "1420 Invalid HBA PCI-device group: 0x%x\n", 8954 dev_grp); 8955 return -ENODEV; 8956 break; 8957 } 8958 return 0; 8959 } 8960 8961 /** 8962 * __lpfc_sli_ringtx_put - Add an iocb to the txq 8963 * @phba: Pointer to HBA context object. 8964 * @pring: Pointer to driver SLI ring object. 8965 * @piocb: Pointer to address of newly added command iocb. 8966 * 8967 * This function is called with hbalock held to add a command 8968 * iocb to the txq when SLI layer cannot submit the command iocb 8969 * to the ring. 8970 **/ 8971 void 8972 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8973 struct lpfc_iocbq *piocb) 8974 { 8975 lockdep_assert_held(&phba->hbalock); 8976 /* Insert the caller's iocb in the txq tail for later processing. */ 8977 list_add_tail(&piocb->list, &pring->txq); 8978 } 8979 8980 /** 8981 * lpfc_sli_next_iocb - Get the next iocb in the txq 8982 * @phba: Pointer to HBA context object. 8983 * @pring: Pointer to driver SLI ring object. 8984 * @piocb: Pointer to address of newly added command iocb. 8985 * 8986 * This function is called with hbalock held before a new 8987 * iocb is submitted to the firmware. This function checks 8988 * txq to flush the iocbs in txq to Firmware before 8989 * submitting new iocbs to the Firmware. 8990 * If there are iocbs in the txq which need to be submitted 8991 * to firmware, lpfc_sli_next_iocb returns the first element 8992 * of the txq after dequeuing it from txq. 8993 * If there is no iocb in the txq then the function will return 8994 * *piocb and *piocb is set to NULL. Caller needs to check 8995 * *piocb to find if there are more commands in the txq. 8996 **/ 8997 static struct lpfc_iocbq * 8998 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8999 struct lpfc_iocbq **piocb) 9000 { 9001 struct lpfc_iocbq * nextiocb; 9002 9003 lockdep_assert_held(&phba->hbalock); 9004 9005 nextiocb = lpfc_sli_ringtx_get(phba, pring); 9006 if (!nextiocb) { 9007 nextiocb = *piocb; 9008 *piocb = NULL; 9009 } 9010 9011 return nextiocb; 9012 } 9013 9014 /** 9015 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 9016 * @phba: Pointer to HBA context object. 9017 * @ring_number: SLI ring number to issue iocb on. 9018 * @piocb: Pointer to command iocb. 9019 * @flag: Flag indicating if this command can be put into txq. 9020 * 9021 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 9022 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 9023 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 9024 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 9025 * this function allows only iocbs for posting buffers. This function finds 9026 * next available slot in the command ring and posts the command to the 9027 * available slot and writes the port attention register to request HBA start 9028 * processing new iocb. If there is no slot available in the ring and 9029 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 9030 * the function returns IOCB_BUSY. 9031 * 9032 * This function is called with hbalock held. The function will return success 9033 * after it successfully submit the iocb to firmware or after adding to the 9034 * txq. 9035 **/ 9036 static int 9037 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 9038 struct lpfc_iocbq *piocb, uint32_t flag) 9039 { 9040 struct lpfc_iocbq *nextiocb; 9041 IOCB_t *iocb; 9042 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; 9043 9044 lockdep_assert_held(&phba->hbalock); 9045 9046 if (piocb->iocb_cmpl && (!piocb->vport) && 9047 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 9048 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 9049 lpfc_printf_log(phba, KERN_ERR, 9050 LOG_SLI | LOG_VPORT, 9051 "1807 IOCB x%x failed. No vport\n", 9052 piocb->iocb.ulpCommand); 9053 dump_stack(); 9054 return IOCB_ERROR; 9055 } 9056 9057 9058 /* If the PCI channel is in offline state, do not post iocbs. */ 9059 if (unlikely(pci_channel_offline(phba->pcidev))) 9060 return IOCB_ERROR; 9061 9062 /* If HBA has a deferred error attention, fail the iocb. */ 9063 if (unlikely(phba->hba_flag & DEFER_ERATT)) 9064 return IOCB_ERROR; 9065 9066 /* 9067 * We should never get an IOCB if we are in a < LINK_DOWN state 9068 */ 9069 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 9070 return IOCB_ERROR; 9071 9072 /* 9073 * Check to see if we are blocking IOCB processing because of a 9074 * outstanding event. 9075 */ 9076 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 9077 goto iocb_busy; 9078 9079 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 9080 /* 9081 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 9082 * can be issued if the link is not up. 9083 */ 9084 switch (piocb->iocb.ulpCommand) { 9085 case CMD_GEN_REQUEST64_CR: 9086 case CMD_GEN_REQUEST64_CX: 9087 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 9088 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 9089 FC_RCTL_DD_UNSOL_CMD) || 9090 (piocb->iocb.un.genreq64.w5.hcsw.Type != 9091 MENLO_TRANSPORT_TYPE)) 9092 9093 goto iocb_busy; 9094 break; 9095 case CMD_QUE_RING_BUF_CN: 9096 case CMD_QUE_RING_BUF64_CN: 9097 /* 9098 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 9099 * completion, iocb_cmpl MUST be 0. 9100 */ 9101 if (piocb->iocb_cmpl) 9102 piocb->iocb_cmpl = NULL; 9103 /*FALLTHROUGH*/ 9104 case CMD_CREATE_XRI_CR: 9105 case CMD_CLOSE_XRI_CN: 9106 case CMD_CLOSE_XRI_CX: 9107 break; 9108 default: 9109 goto iocb_busy; 9110 } 9111 9112 /* 9113 * For FCP commands, we must be in a state where we can process link 9114 * attention events. 9115 */ 9116 } else if (unlikely(pring->ringno == LPFC_FCP_RING && 9117 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 9118 goto iocb_busy; 9119 } 9120 9121 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 9122 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 9123 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 9124 9125 if (iocb) 9126 lpfc_sli_update_ring(phba, pring); 9127 else 9128 lpfc_sli_update_full_ring(phba, pring); 9129 9130 if (!piocb) 9131 return IOCB_SUCCESS; 9132 9133 goto out_busy; 9134 9135 iocb_busy: 9136 pring->stats.iocb_cmd_delay++; 9137 9138 out_busy: 9139 9140 if (!(flag & SLI_IOCB_RET_IOCB)) { 9141 __lpfc_sli_ringtx_put(phba, pring, piocb); 9142 return IOCB_SUCCESS; 9143 } 9144 9145 return IOCB_BUSY; 9146 } 9147 9148 /** 9149 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 9150 * @phba: Pointer to HBA context object. 9151 * @piocb: Pointer to command iocb. 9152 * @sglq: Pointer to the scatter gather queue object. 9153 * 9154 * This routine converts the bpl or bde that is in the IOCB 9155 * to a sgl list for the sli4 hardware. The physical address 9156 * of the bpl/bde is converted back to a virtual address. 9157 * If the IOCB contains a BPL then the list of BDE's is 9158 * converted to sli4_sge's. If the IOCB contains a single 9159 * BDE then it is converted to a single sli_sge. 9160 * The IOCB is still in cpu endianess so the contents of 9161 * the bpl can be used without byte swapping. 9162 * 9163 * Returns valid XRI = Success, NO_XRI = Failure. 9164 **/ 9165 static uint16_t 9166 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 9167 struct lpfc_sglq *sglq) 9168 { 9169 uint16_t xritag = NO_XRI; 9170 struct ulp_bde64 *bpl = NULL; 9171 struct ulp_bde64 bde; 9172 struct sli4_sge *sgl = NULL; 9173 struct lpfc_dmabuf *dmabuf; 9174 IOCB_t *icmd; 9175 int numBdes = 0; 9176 int i = 0; 9177 uint32_t offset = 0; /* accumulated offset in the sg request list */ 9178 int inbound = 0; /* number of sg reply entries inbound from firmware */ 9179 9180 if (!piocbq || !sglq) 9181 return xritag; 9182 9183 sgl = (struct sli4_sge *)sglq->sgl; 9184 icmd = &piocbq->iocb; 9185 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 9186 return sglq->sli4_xritag; 9187 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9188 numBdes = icmd->un.genreq64.bdl.bdeSize / 9189 sizeof(struct ulp_bde64); 9190 /* The addrHigh and addrLow fields within the IOCB 9191 * have not been byteswapped yet so there is no 9192 * need to swap them back. 9193 */ 9194 if (piocbq->context3) 9195 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 9196 else 9197 return xritag; 9198 9199 bpl = (struct ulp_bde64 *)dmabuf->virt; 9200 if (!bpl) 9201 return xritag; 9202 9203 for (i = 0; i < numBdes; i++) { 9204 /* Should already be byte swapped. */ 9205 sgl->addr_hi = bpl->addrHigh; 9206 sgl->addr_lo = bpl->addrLow; 9207 9208 sgl->word2 = le32_to_cpu(sgl->word2); 9209 if ((i+1) == numBdes) 9210 bf_set(lpfc_sli4_sge_last, sgl, 1); 9211 else 9212 bf_set(lpfc_sli4_sge_last, sgl, 0); 9213 /* swap the size field back to the cpu so we 9214 * can assign it to the sgl. 9215 */ 9216 bde.tus.w = le32_to_cpu(bpl->tus.w); 9217 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 9218 /* The offsets in the sgl need to be accumulated 9219 * separately for the request and reply lists. 9220 * The request is always first, the reply follows. 9221 */ 9222 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 9223 /* add up the reply sg entries */ 9224 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 9225 inbound++; 9226 /* first inbound? reset the offset */ 9227 if (inbound == 1) 9228 offset = 0; 9229 bf_set(lpfc_sli4_sge_offset, sgl, offset); 9230 bf_set(lpfc_sli4_sge_type, sgl, 9231 LPFC_SGE_TYPE_DATA); 9232 offset += bde.tus.f.bdeSize; 9233 } 9234 sgl->word2 = cpu_to_le32(sgl->word2); 9235 bpl++; 9236 sgl++; 9237 } 9238 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 9239 /* The addrHigh and addrLow fields of the BDE have not 9240 * been byteswapped yet so they need to be swapped 9241 * before putting them in the sgl. 9242 */ 9243 sgl->addr_hi = 9244 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 9245 sgl->addr_lo = 9246 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 9247 sgl->word2 = le32_to_cpu(sgl->word2); 9248 bf_set(lpfc_sli4_sge_last, sgl, 1); 9249 sgl->word2 = cpu_to_le32(sgl->word2); 9250 sgl->sge_len = 9251 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 9252 } 9253 return sglq->sli4_xritag; 9254 } 9255 9256 /** 9257 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 9258 * @phba: Pointer to HBA context object. 9259 * @piocb: Pointer to command iocb. 9260 * @wqe: Pointer to the work queue entry. 9261 * 9262 * This routine converts the iocb command to its Work Queue Entry 9263 * equivalent. The wqe pointer should not have any fields set when 9264 * this routine is called because it will memcpy over them. 9265 * This routine does not set the CQ_ID or the WQEC bits in the 9266 * wqe. 9267 * 9268 * Returns: 0 = Success, IOCB_ERROR = Failure. 9269 **/ 9270 static int 9271 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 9272 union lpfc_wqe128 *wqe) 9273 { 9274 uint32_t xmit_len = 0, total_len = 0; 9275 uint8_t ct = 0; 9276 uint32_t fip; 9277 uint32_t abort_tag; 9278 uint8_t command_type = ELS_COMMAND_NON_FIP; 9279 uint8_t cmnd; 9280 uint16_t xritag; 9281 uint16_t abrt_iotag; 9282 struct lpfc_iocbq *abrtiocbq; 9283 struct ulp_bde64 *bpl = NULL; 9284 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 9285 int numBdes, i; 9286 struct ulp_bde64 bde; 9287 struct lpfc_nodelist *ndlp; 9288 uint32_t *pcmd; 9289 uint32_t if_type; 9290 9291 fip = phba->hba_flag & HBA_FIP_SUPPORT; 9292 /* The fcp commands will set command type */ 9293 if (iocbq->iocb_flag & LPFC_IO_FCP) 9294 command_type = FCP_COMMAND; 9295 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 9296 command_type = ELS_COMMAND_FIP; 9297 else 9298 command_type = ELS_COMMAND_NON_FIP; 9299 9300 if (phba->fcp_embed_io) 9301 memset(wqe, 0, sizeof(union lpfc_wqe128)); 9302 /* Some of the fields are in the right position already */ 9303 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 9304 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) { 9305 /* The ct field has moved so reset */ 9306 wqe->generic.wqe_com.word7 = 0; 9307 wqe->generic.wqe_com.word10 = 0; 9308 } 9309 9310 abort_tag = (uint32_t) iocbq->iotag; 9311 xritag = iocbq->sli4_xritag; 9312 /* words0-2 bpl convert bde */ 9313 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9314 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9315 sizeof(struct ulp_bde64); 9316 bpl = (struct ulp_bde64 *) 9317 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 9318 if (!bpl) 9319 return IOCB_ERROR; 9320 9321 /* Should already be byte swapped. */ 9322 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 9323 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 9324 /* swap the size field back to the cpu so we 9325 * can assign it to the sgl. 9326 */ 9327 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 9328 xmit_len = wqe->generic.bde.tus.f.bdeSize; 9329 total_len = 0; 9330 for (i = 0; i < numBdes; i++) { 9331 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9332 total_len += bde.tus.f.bdeSize; 9333 } 9334 } else 9335 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 9336 9337 iocbq->iocb.ulpIoTag = iocbq->iotag; 9338 cmnd = iocbq->iocb.ulpCommand; 9339 9340 switch (iocbq->iocb.ulpCommand) { 9341 case CMD_ELS_REQUEST64_CR: 9342 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 9343 ndlp = iocbq->context_un.ndlp; 9344 else 9345 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9346 if (!iocbq->iocb.ulpLe) { 9347 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9348 "2007 Only Limited Edition cmd Format" 9349 " supported 0x%x\n", 9350 iocbq->iocb.ulpCommand); 9351 return IOCB_ERROR; 9352 } 9353 9354 wqe->els_req.payload_len = xmit_len; 9355 /* Els_reguest64 has a TMO */ 9356 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 9357 iocbq->iocb.ulpTimeout); 9358 /* Need a VF for word 4 set the vf bit*/ 9359 bf_set(els_req64_vf, &wqe->els_req, 0); 9360 /* And a VFID for word 12 */ 9361 bf_set(els_req64_vfid, &wqe->els_req, 0); 9362 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9363 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9364 iocbq->iocb.ulpContext); 9365 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 9366 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 9367 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 9368 if (command_type == ELS_COMMAND_FIP) 9369 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 9370 >> LPFC_FIP_ELS_ID_SHIFT); 9371 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9372 iocbq->context2)->virt); 9373 if_type = bf_get(lpfc_sli_intf_if_type, 9374 &phba->sli4_hba.sli_intf); 9375 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9376 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 9377 *pcmd == ELS_CMD_SCR || 9378 *pcmd == ELS_CMD_FDISC || 9379 *pcmd == ELS_CMD_LOGO || 9380 *pcmd == ELS_CMD_PLOGI)) { 9381 bf_set(els_req64_sp, &wqe->els_req, 1); 9382 bf_set(els_req64_sid, &wqe->els_req, 9383 iocbq->vport->fc_myDID); 9384 if ((*pcmd == ELS_CMD_FLOGI) && 9385 !(phba->fc_topology == 9386 LPFC_TOPOLOGY_LOOP)) 9387 bf_set(els_req64_sid, &wqe->els_req, 0); 9388 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 9389 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9390 phba->vpi_ids[iocbq->vport->vpi]); 9391 } else if (pcmd && iocbq->context1) { 9392 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 9393 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9394 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9395 } 9396 } 9397 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 9398 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9399 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 9400 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 9401 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 9402 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 9403 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9404 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 9405 wqe->els_req.max_response_payload_len = total_len - xmit_len; 9406 break; 9407 case CMD_XMIT_SEQUENCE64_CX: 9408 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 9409 iocbq->iocb.un.ulpWord[3]); 9410 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 9411 iocbq->iocb.unsli3.rcvsli3.ox_id); 9412 /* The entire sequence is transmitted for this IOCB */ 9413 xmit_len = total_len; 9414 cmnd = CMD_XMIT_SEQUENCE64_CR; 9415 if (phba->link_flag & LS_LOOPBACK_MODE) 9416 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 9417 /* fall through */ 9418 case CMD_XMIT_SEQUENCE64_CR: 9419 /* word3 iocb=io_tag32 wqe=reserved */ 9420 wqe->xmit_sequence.rsvd3 = 0; 9421 /* word4 relative_offset memcpy */ 9422 /* word5 r_ctl/df_ctl memcpy */ 9423 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 9424 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 9425 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 9426 LPFC_WQE_IOD_WRITE); 9427 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 9428 LPFC_WQE_LENLOC_WORD12); 9429 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 9430 wqe->xmit_sequence.xmit_len = xmit_len; 9431 command_type = OTHER_COMMAND; 9432 break; 9433 case CMD_XMIT_BCAST64_CN: 9434 /* word3 iocb=iotag32 wqe=seq_payload_len */ 9435 wqe->xmit_bcast64.seq_payload_len = xmit_len; 9436 /* word4 iocb=rsvd wqe=rsvd */ 9437 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 9438 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 9439 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 9440 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9441 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 9442 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 9443 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 9444 LPFC_WQE_LENLOC_WORD3); 9445 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 9446 break; 9447 case CMD_FCP_IWRITE64_CR: 9448 command_type = FCP_COMMAND_DATA_OUT; 9449 /* word3 iocb=iotag wqe=payload_offset_len */ 9450 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9451 bf_set(payload_offset_len, &wqe->fcp_iwrite, 9452 xmit_len + sizeof(struct fcp_rsp)); 9453 bf_set(cmd_buff_len, &wqe->fcp_iwrite, 9454 0); 9455 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9456 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9457 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 9458 iocbq->iocb.ulpFCP2Rcvy); 9459 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 9460 /* Always open the exchange */ 9461 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 9462 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 9463 LPFC_WQE_LENLOC_WORD4); 9464 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 9465 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 9466 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9467 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); 9468 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 9469 if (iocbq->priority) { 9470 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9471 (iocbq->priority << 1)); 9472 } else { 9473 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9474 (phba->cfg_XLanePriority << 1)); 9475 } 9476 } 9477 /* Note, word 10 is already initialized to 0 */ 9478 9479 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9480 if (phba->cfg_enable_pbde) 9481 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1); 9482 else 9483 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); 9484 9485 if (phba->fcp_embed_io) { 9486 struct lpfc_io_buf *lpfc_cmd; 9487 struct sli4_sge *sgl; 9488 struct fcp_cmnd *fcp_cmnd; 9489 uint32_t *ptr; 9490 9491 /* 128 byte wqe support here */ 9492 9493 lpfc_cmd = iocbq->context1; 9494 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9495 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9496 9497 /* Word 0-2 - FCP_CMND */ 9498 wqe->generic.bde.tus.f.bdeFlags = 9499 BUFF_TYPE_BDE_IMMED; 9500 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9501 wqe->generic.bde.addrHigh = 0; 9502 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9503 9504 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 9505 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); 9506 9507 /* Word 22-29 FCP CMND Payload */ 9508 ptr = &wqe->words[22]; 9509 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9510 } 9511 break; 9512 case CMD_FCP_IREAD64_CR: 9513 /* word3 iocb=iotag wqe=payload_offset_len */ 9514 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9515 bf_set(payload_offset_len, &wqe->fcp_iread, 9516 xmit_len + sizeof(struct fcp_rsp)); 9517 bf_set(cmd_buff_len, &wqe->fcp_iread, 9518 0); 9519 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9520 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9521 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 9522 iocbq->iocb.ulpFCP2Rcvy); 9523 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 9524 /* Always open the exchange */ 9525 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 9526 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 9527 LPFC_WQE_LENLOC_WORD4); 9528 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 9529 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 9530 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9531 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); 9532 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); 9533 if (iocbq->priority) { 9534 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9535 (iocbq->priority << 1)); 9536 } else { 9537 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9538 (phba->cfg_XLanePriority << 1)); 9539 } 9540 } 9541 /* Note, word 10 is already initialized to 0 */ 9542 9543 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9544 if (phba->cfg_enable_pbde) 9545 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1); 9546 else 9547 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); 9548 9549 if (phba->fcp_embed_io) { 9550 struct lpfc_io_buf *lpfc_cmd; 9551 struct sli4_sge *sgl; 9552 struct fcp_cmnd *fcp_cmnd; 9553 uint32_t *ptr; 9554 9555 /* 128 byte wqe support here */ 9556 9557 lpfc_cmd = iocbq->context1; 9558 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9559 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9560 9561 /* Word 0-2 - FCP_CMND */ 9562 wqe->generic.bde.tus.f.bdeFlags = 9563 BUFF_TYPE_BDE_IMMED; 9564 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9565 wqe->generic.bde.addrHigh = 0; 9566 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9567 9568 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); 9569 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); 9570 9571 /* Word 22-29 FCP CMND Payload */ 9572 ptr = &wqe->words[22]; 9573 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9574 } 9575 break; 9576 case CMD_FCP_ICMND64_CR: 9577 /* word3 iocb=iotag wqe=payload_offset_len */ 9578 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9579 bf_set(payload_offset_len, &wqe->fcp_icmd, 9580 xmit_len + sizeof(struct fcp_rsp)); 9581 bf_set(cmd_buff_len, &wqe->fcp_icmd, 9582 0); 9583 /* word3 iocb=IO_TAG wqe=reserved */ 9584 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 9585 /* Always open the exchange */ 9586 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 9587 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 9588 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 9589 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 9590 LPFC_WQE_LENLOC_NONE); 9591 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 9592 iocbq->iocb.ulpFCP2Rcvy); 9593 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9594 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); 9595 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); 9596 if (iocbq->priority) { 9597 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9598 (iocbq->priority << 1)); 9599 } else { 9600 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9601 (phba->cfg_XLanePriority << 1)); 9602 } 9603 } 9604 /* Note, word 10 is already initialized to 0 */ 9605 9606 if (phba->fcp_embed_io) { 9607 struct lpfc_io_buf *lpfc_cmd; 9608 struct sli4_sge *sgl; 9609 struct fcp_cmnd *fcp_cmnd; 9610 uint32_t *ptr; 9611 9612 /* 128 byte wqe support here */ 9613 9614 lpfc_cmd = iocbq->context1; 9615 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9616 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9617 9618 /* Word 0-2 - FCP_CMND */ 9619 wqe->generic.bde.tus.f.bdeFlags = 9620 BUFF_TYPE_BDE_IMMED; 9621 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9622 wqe->generic.bde.addrHigh = 0; 9623 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9624 9625 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); 9626 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); 9627 9628 /* Word 22-29 FCP CMND Payload */ 9629 ptr = &wqe->words[22]; 9630 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9631 } 9632 break; 9633 case CMD_GEN_REQUEST64_CR: 9634 /* For this command calculate the xmit length of the 9635 * request bde. 9636 */ 9637 xmit_len = 0; 9638 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9639 sizeof(struct ulp_bde64); 9640 for (i = 0; i < numBdes; i++) { 9641 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9642 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 9643 break; 9644 xmit_len += bde.tus.f.bdeSize; 9645 } 9646 /* word3 iocb=IO_TAG wqe=request_payload_len */ 9647 wqe->gen_req.request_payload_len = xmit_len; 9648 /* word4 iocb=parameter wqe=relative_offset memcpy */ 9649 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 9650 /* word6 context tag copied in memcpy */ 9651 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 9652 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9653 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9654 "2015 Invalid CT %x command 0x%x\n", 9655 ct, iocbq->iocb.ulpCommand); 9656 return IOCB_ERROR; 9657 } 9658 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 9659 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 9660 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 9661 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 9662 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 9663 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 9664 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9665 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 9666 wqe->gen_req.max_response_payload_len = total_len - xmit_len; 9667 command_type = OTHER_COMMAND; 9668 break; 9669 case CMD_XMIT_ELS_RSP64_CX: 9670 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9671 /* words0-2 BDE memcpy */ 9672 /* word3 iocb=iotag32 wqe=response_payload_len */ 9673 wqe->xmit_els_rsp.response_payload_len = xmit_len; 9674 /* word4 */ 9675 wqe->xmit_els_rsp.word4 = 0; 9676 /* word5 iocb=rsvd wge=did */ 9677 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 9678 iocbq->iocb.un.xseq64.xmit_els_remoteID); 9679 9680 if_type = bf_get(lpfc_sli_intf_if_type, 9681 &phba->sli4_hba.sli_intf); 9682 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9683 if (iocbq->vport->fc_flag & FC_PT2PT) { 9684 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9685 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9686 iocbq->vport->fc_myDID); 9687 if (iocbq->vport->fc_myDID == Fabric_DID) { 9688 bf_set(wqe_els_did, 9689 &wqe->xmit_els_rsp.wqe_dest, 0); 9690 } 9691 } 9692 } 9693 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 9694 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9695 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 9696 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9697 iocbq->iocb.unsli3.rcvsli3.ox_id); 9698 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 9699 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9700 phba->vpi_ids[iocbq->vport->vpi]); 9701 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 9702 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 9703 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 9704 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 9705 LPFC_WQE_LENLOC_WORD3); 9706 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 9707 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 9708 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9709 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9710 iocbq->context2)->virt); 9711 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9712 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9713 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9714 iocbq->vport->fc_myDID); 9715 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 9716 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9717 phba->vpi_ids[phba->pport->vpi]); 9718 } 9719 command_type = OTHER_COMMAND; 9720 break; 9721 case CMD_CLOSE_XRI_CN: 9722 case CMD_ABORT_XRI_CN: 9723 case CMD_ABORT_XRI_CX: 9724 /* words 0-2 memcpy should be 0 rserved */ 9725 /* port will send abts */ 9726 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 9727 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 9728 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 9729 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 9730 } else 9731 fip = 0; 9732 9733 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 9734 /* 9735 * The link is down, or the command was ELS_FIP 9736 * so the fw does not need to send abts 9737 * on the wire. 9738 */ 9739 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 9740 else 9741 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 9742 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 9743 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 9744 wqe->abort_cmd.rsrvd5 = 0; 9745 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 9746 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9747 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 9748 /* 9749 * The abort handler will send us CMD_ABORT_XRI_CN or 9750 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 9751 */ 9752 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 9753 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 9754 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 9755 LPFC_WQE_LENLOC_NONE); 9756 cmnd = CMD_ABORT_XRI_CX; 9757 command_type = OTHER_COMMAND; 9758 xritag = 0; 9759 break; 9760 case CMD_XMIT_BLS_RSP64_CX: 9761 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9762 /* As BLS ABTS RSP WQE is very different from other WQEs, 9763 * we re-construct this WQE here based on information in 9764 * iocbq from scratch. 9765 */ 9766 memset(wqe, 0, sizeof(union lpfc_wqe)); 9767 /* OX_ID is invariable to who sent ABTS to CT exchange */ 9768 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 9769 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 9770 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 9771 LPFC_ABTS_UNSOL_INT) { 9772 /* ABTS sent by initiator to CT exchange, the 9773 * RX_ID field will be filled with the newly 9774 * allocated responder XRI. 9775 */ 9776 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9777 iocbq->sli4_xritag); 9778 } else { 9779 /* ABTS sent by responder to CT exchange, the 9780 * RX_ID field will be filled with the responder 9781 * RX_ID from ABTS. 9782 */ 9783 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9784 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 9785 } 9786 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 9787 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 9788 9789 /* Use CT=VPI */ 9790 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 9791 ndlp->nlp_DID); 9792 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 9793 iocbq->iocb.ulpContext); 9794 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 9795 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 9796 phba->vpi_ids[phba->pport->vpi]); 9797 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 9798 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 9799 LPFC_WQE_LENLOC_NONE); 9800 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 9801 command_type = OTHER_COMMAND; 9802 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 9803 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 9804 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 9805 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 9806 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 9807 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 9808 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 9809 } 9810 9811 break; 9812 case CMD_SEND_FRAME: 9813 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9814 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9815 return 0; 9816 case CMD_XRI_ABORTED_CX: 9817 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 9818 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 9819 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 9820 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 9821 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 9822 default: 9823 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9824 "2014 Invalid command 0x%x\n", 9825 iocbq->iocb.ulpCommand); 9826 return IOCB_ERROR; 9827 break; 9828 } 9829 9830 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) 9831 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 9832 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) 9833 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 9834 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) 9835 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 9836 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | 9837 LPFC_IO_DIF_INSERT); 9838 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9839 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9840 wqe->generic.wqe_com.abort_tag = abort_tag; 9841 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 9842 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 9843 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 9844 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 9845 return 0; 9846 } 9847 9848 /** 9849 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 9850 * @phba: Pointer to HBA context object. 9851 * @ring_number: SLI ring number to issue iocb on. 9852 * @piocb: Pointer to command iocb. 9853 * @flag: Flag indicating if this command can be put into txq. 9854 * 9855 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 9856 * an iocb command to an HBA with SLI-4 interface spec. 9857 * 9858 * This function is called with hbalock held. The function will return success 9859 * after it successfully submit the iocb to firmware or after adding to the 9860 * txq. 9861 **/ 9862 static int 9863 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 9864 struct lpfc_iocbq *piocb, uint32_t flag) 9865 { 9866 struct lpfc_sglq *sglq; 9867 union lpfc_wqe128 wqe; 9868 struct lpfc_queue *wq; 9869 struct lpfc_sli_ring *pring; 9870 9871 /* Get the WQ */ 9872 if ((piocb->iocb_flag & LPFC_IO_FCP) || 9873 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9874 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq; 9875 } else { 9876 wq = phba->sli4_hba.els_wq; 9877 } 9878 9879 /* Get corresponding ring */ 9880 pring = wq->pring; 9881 9882 /* 9883 * The WQE can be either 64 or 128 bytes, 9884 */ 9885 9886 lockdep_assert_held(&pring->ring_lock); 9887 9888 if (piocb->sli4_xritag == NO_XRI) { 9889 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 9890 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 9891 sglq = NULL; 9892 else { 9893 if (!list_empty(&pring->txq)) { 9894 if (!(flag & SLI_IOCB_RET_IOCB)) { 9895 __lpfc_sli_ringtx_put(phba, 9896 pring, piocb); 9897 return IOCB_SUCCESS; 9898 } else { 9899 return IOCB_BUSY; 9900 } 9901 } else { 9902 sglq = __lpfc_sli_get_els_sglq(phba, piocb); 9903 if (!sglq) { 9904 if (!(flag & SLI_IOCB_RET_IOCB)) { 9905 __lpfc_sli_ringtx_put(phba, 9906 pring, 9907 piocb); 9908 return IOCB_SUCCESS; 9909 } else 9910 return IOCB_BUSY; 9911 } 9912 } 9913 } 9914 } else if (piocb->iocb_flag & LPFC_IO_FCP) 9915 /* These IO's already have an XRI and a mapped sgl. */ 9916 sglq = NULL; 9917 else { 9918 /* 9919 * This is a continuation of a commandi,(CX) so this 9920 * sglq is on the active list 9921 */ 9922 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 9923 if (!sglq) 9924 return IOCB_ERROR; 9925 } 9926 9927 if (sglq) { 9928 piocb->sli4_lxritag = sglq->sli4_lxritag; 9929 piocb->sli4_xritag = sglq->sli4_xritag; 9930 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 9931 return IOCB_ERROR; 9932 } 9933 9934 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 9935 return IOCB_ERROR; 9936 9937 if (lpfc_sli4_wq_put(wq, &wqe)) 9938 return IOCB_ERROR; 9939 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 9940 9941 return 0; 9942 } 9943 9944 /** 9945 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 9946 * 9947 * This routine wraps the actual lockless version for issusing IOCB function 9948 * pointer from the lpfc_hba struct. 9949 * 9950 * Return codes: 9951 * IOCB_ERROR - Error 9952 * IOCB_SUCCESS - Success 9953 * IOCB_BUSY - Busy 9954 **/ 9955 int 9956 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 9957 struct lpfc_iocbq *piocb, uint32_t flag) 9958 { 9959 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9960 } 9961 9962 /** 9963 * lpfc_sli_api_table_setup - Set up sli api function jump table 9964 * @phba: The hba struct for which this call is being executed. 9965 * @dev_grp: The HBA PCI-Device group number. 9966 * 9967 * This routine sets up the SLI interface API function jump table in @phba 9968 * struct. 9969 * Returns: 0 - success, -ENODEV - failure. 9970 **/ 9971 int 9972 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 9973 { 9974 9975 switch (dev_grp) { 9976 case LPFC_PCI_DEV_LP: 9977 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 9978 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 9979 break; 9980 case LPFC_PCI_DEV_OC: 9981 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 9982 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 9983 break; 9984 default: 9985 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9986 "1419 Invalid HBA PCI-device group: 0x%x\n", 9987 dev_grp); 9988 return -ENODEV; 9989 break; 9990 } 9991 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 9992 return 0; 9993 } 9994 9995 /** 9996 * lpfc_sli4_calc_ring - Calculates which ring to use 9997 * @phba: Pointer to HBA context object. 9998 * @piocb: Pointer to command iocb. 9999 * 10000 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on 10001 * hba_wqidx, thus we need to calculate the corresponding ring. 10002 * Since ABORTS must go on the same WQ of the command they are 10003 * aborting, we use command's hba_wqidx. 10004 */ 10005 struct lpfc_sli_ring * 10006 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 10007 { 10008 struct lpfc_io_buf *lpfc_cmd; 10009 10010 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { 10011 if (unlikely(!phba->sli4_hba.hdwq)) 10012 return NULL; 10013 /* 10014 * for abort iocb hba_wqidx should already 10015 * be setup based on what work queue we used. 10016 */ 10017 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 10018 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1; 10019 piocb->hba_wqidx = lpfc_cmd->hdwq_no; 10020 } 10021 return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring; 10022 } else { 10023 if (unlikely(!phba->sli4_hba.els_wq)) 10024 return NULL; 10025 piocb->hba_wqidx = 0; 10026 return phba->sli4_hba.els_wq->pring; 10027 } 10028 } 10029 10030 /** 10031 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 10032 * @phba: Pointer to HBA context object. 10033 * @pring: Pointer to driver SLI ring object. 10034 * @piocb: Pointer to command iocb. 10035 * @flag: Flag indicating if this command can be put into txq. 10036 * 10037 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 10038 * function. This function gets the hbalock and calls 10039 * __lpfc_sli_issue_iocb function and will return the error returned 10040 * by __lpfc_sli_issue_iocb function. This wrapper is used by 10041 * functions which do not hold hbalock. 10042 **/ 10043 int 10044 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 10045 struct lpfc_iocbq *piocb, uint32_t flag) 10046 { 10047 struct lpfc_sli_ring *pring; 10048 unsigned long iflags; 10049 int rc; 10050 10051 if (phba->sli_rev == LPFC_SLI_REV4) { 10052 pring = lpfc_sli4_calc_ring(phba, piocb); 10053 if (unlikely(pring == NULL)) 10054 return IOCB_ERROR; 10055 10056 spin_lock_irqsave(&pring->ring_lock, iflags); 10057 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10058 spin_unlock_irqrestore(&pring->ring_lock, iflags); 10059 } else { 10060 /* For now, SLI2/3 will still use hbalock */ 10061 spin_lock_irqsave(&phba->hbalock, iflags); 10062 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10063 spin_unlock_irqrestore(&phba->hbalock, iflags); 10064 } 10065 return rc; 10066 } 10067 10068 /** 10069 * lpfc_extra_ring_setup - Extra ring setup function 10070 * @phba: Pointer to HBA context object. 10071 * 10072 * This function is called while driver attaches with the 10073 * HBA to setup the extra ring. The extra ring is used 10074 * only when driver needs to support target mode functionality 10075 * or IP over FC functionalities. 10076 * 10077 * This function is called with no lock held. SLI3 only. 10078 **/ 10079 static int 10080 lpfc_extra_ring_setup( struct lpfc_hba *phba) 10081 { 10082 struct lpfc_sli *psli; 10083 struct lpfc_sli_ring *pring; 10084 10085 psli = &phba->sli; 10086 10087 /* Adjust cmd/rsp ring iocb entries more evenly */ 10088 10089 /* Take some away from the FCP ring */ 10090 pring = &psli->sli3_ring[LPFC_FCP_RING]; 10091 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10092 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10093 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10094 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10095 10096 /* and give them to the extra ring */ 10097 pring = &psli->sli3_ring[LPFC_EXTRA_RING]; 10098 10099 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10100 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10101 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10102 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10103 10104 /* Setup default profile for this ring */ 10105 pring->iotag_max = 4096; 10106 pring->num_mask = 1; 10107 pring->prt[0].profile = 0; /* Mask 0 */ 10108 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 10109 pring->prt[0].type = phba->cfg_multi_ring_type; 10110 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 10111 return 0; 10112 } 10113 10114 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 10115 * @phba: Pointer to HBA context object. 10116 * @iocbq: Pointer to iocb object. 10117 * 10118 * The async_event handler calls this routine when it receives 10119 * an ASYNC_STATUS_CN event from the port. The port generates 10120 * this event when an Abort Sequence request to an rport fails 10121 * twice in succession. The abort could be originated by the 10122 * driver or by the port. The ABTS could have been for an ELS 10123 * or FCP IO. The port only generates this event when an ABTS 10124 * fails to complete after one retry. 10125 */ 10126 static void 10127 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 10128 struct lpfc_iocbq *iocbq) 10129 { 10130 struct lpfc_nodelist *ndlp = NULL; 10131 uint16_t rpi = 0, vpi = 0; 10132 struct lpfc_vport *vport = NULL; 10133 10134 /* The rpi in the ulpContext is vport-sensitive. */ 10135 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 10136 rpi = iocbq->iocb.ulpContext; 10137 10138 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10139 "3092 Port generated ABTS async event " 10140 "on vpi %d rpi %d status 0x%x\n", 10141 vpi, rpi, iocbq->iocb.ulpStatus); 10142 10143 vport = lpfc_find_vport_by_vpid(phba, vpi); 10144 if (!vport) 10145 goto err_exit; 10146 ndlp = lpfc_findnode_rpi(vport, rpi); 10147 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 10148 goto err_exit; 10149 10150 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 10151 lpfc_sli_abts_recover_port(vport, ndlp); 10152 return; 10153 10154 err_exit: 10155 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10156 "3095 Event Context not found, no " 10157 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 10158 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 10159 vpi, rpi); 10160 } 10161 10162 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 10163 * @phba: pointer to HBA context object. 10164 * @ndlp: nodelist pointer for the impacted rport. 10165 * @axri: pointer to the wcqe containing the failed exchange. 10166 * 10167 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 10168 * port. The port generates this event when an abort exchange request to an 10169 * rport fails twice in succession with no reply. The abort could be originated 10170 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 10171 */ 10172 void 10173 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 10174 struct lpfc_nodelist *ndlp, 10175 struct sli4_wcqe_xri_aborted *axri) 10176 { 10177 struct lpfc_vport *vport; 10178 uint32_t ext_status = 0; 10179 10180 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 10181 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10182 "3115 Node Context not found, driver " 10183 "ignoring abts err event\n"); 10184 return; 10185 } 10186 10187 vport = ndlp->vport; 10188 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10189 "3116 Port generated FCP XRI ABORT event on " 10190 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 10191 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 10192 bf_get(lpfc_wcqe_xa_xri, axri), 10193 bf_get(lpfc_wcqe_xa_status, axri), 10194 axri->parameter); 10195 10196 /* 10197 * Catch the ABTS protocol failure case. Older OCe FW releases returned 10198 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 10199 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 10200 */ 10201 ext_status = axri->parameter & IOERR_PARAM_MASK; 10202 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 10203 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 10204 lpfc_sli_abts_recover_port(vport, ndlp); 10205 } 10206 10207 /** 10208 * lpfc_sli_async_event_handler - ASYNC iocb handler function 10209 * @phba: Pointer to HBA context object. 10210 * @pring: Pointer to driver SLI ring object. 10211 * @iocbq: Pointer to iocb object. 10212 * 10213 * This function is called by the slow ring event handler 10214 * function when there is an ASYNC event iocb in the ring. 10215 * This function is called with no lock held. 10216 * Currently this function handles only temperature related 10217 * ASYNC events. The function decodes the temperature sensor 10218 * event message and posts events for the management applications. 10219 **/ 10220 static void 10221 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 10222 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 10223 { 10224 IOCB_t *icmd; 10225 uint16_t evt_code; 10226 struct temp_event temp_event_data; 10227 struct Scsi_Host *shost; 10228 uint32_t *iocb_w; 10229 10230 icmd = &iocbq->iocb; 10231 evt_code = icmd->un.asyncstat.evt_code; 10232 10233 switch (evt_code) { 10234 case ASYNC_TEMP_WARN: 10235 case ASYNC_TEMP_SAFE: 10236 temp_event_data.data = (uint32_t) icmd->ulpContext; 10237 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 10238 if (evt_code == ASYNC_TEMP_WARN) { 10239 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 10240 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10241 "0347 Adapter is very hot, please take " 10242 "corrective action. temperature : %d Celsius\n", 10243 (uint32_t) icmd->ulpContext); 10244 } else { 10245 temp_event_data.event_code = LPFC_NORMAL_TEMP; 10246 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10247 "0340 Adapter temperature is OK now. " 10248 "temperature : %d Celsius\n", 10249 (uint32_t) icmd->ulpContext); 10250 } 10251 10252 /* Send temperature change event to applications */ 10253 shost = lpfc_shost_from_vport(phba->pport); 10254 fc_host_post_vendor_event(shost, fc_get_event_number(), 10255 sizeof(temp_event_data), (char *) &temp_event_data, 10256 LPFC_NL_VENDOR_ID); 10257 break; 10258 case ASYNC_STATUS_CN: 10259 lpfc_sli_abts_err_handler(phba, iocbq); 10260 break; 10261 default: 10262 iocb_w = (uint32_t *) icmd; 10263 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10264 "0346 Ring %d handler: unexpected ASYNC_STATUS" 10265 " evt_code 0x%x\n" 10266 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 10267 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 10268 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 10269 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 10270 pring->ringno, icmd->un.asyncstat.evt_code, 10271 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 10272 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 10273 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 10274 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 10275 10276 break; 10277 } 10278 } 10279 10280 10281 /** 10282 * lpfc_sli4_setup - SLI ring setup function 10283 * @phba: Pointer to HBA context object. 10284 * 10285 * lpfc_sli_setup sets up rings of the SLI interface with 10286 * number of iocbs per ring and iotags. This function is 10287 * called while driver attach to the HBA and before the 10288 * interrupts are enabled. So there is no need for locking. 10289 * 10290 * This function always returns 0. 10291 **/ 10292 int 10293 lpfc_sli4_setup(struct lpfc_hba *phba) 10294 { 10295 struct lpfc_sli_ring *pring; 10296 10297 pring = phba->sli4_hba.els_wq->pring; 10298 pring->num_mask = LPFC_MAX_RING_MASK; 10299 pring->prt[0].profile = 0; /* Mask 0 */ 10300 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10301 pring->prt[0].type = FC_TYPE_ELS; 10302 pring->prt[0].lpfc_sli_rcv_unsol_event = 10303 lpfc_els_unsol_event; 10304 pring->prt[1].profile = 0; /* Mask 1 */ 10305 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10306 pring->prt[1].type = FC_TYPE_ELS; 10307 pring->prt[1].lpfc_sli_rcv_unsol_event = 10308 lpfc_els_unsol_event; 10309 pring->prt[2].profile = 0; /* Mask 2 */ 10310 /* NameServer Inquiry */ 10311 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10312 /* NameServer */ 10313 pring->prt[2].type = FC_TYPE_CT; 10314 pring->prt[2].lpfc_sli_rcv_unsol_event = 10315 lpfc_ct_unsol_event; 10316 pring->prt[3].profile = 0; /* Mask 3 */ 10317 /* NameServer response */ 10318 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10319 /* NameServer */ 10320 pring->prt[3].type = FC_TYPE_CT; 10321 pring->prt[3].lpfc_sli_rcv_unsol_event = 10322 lpfc_ct_unsol_event; 10323 return 0; 10324 } 10325 10326 /** 10327 * lpfc_sli_setup - SLI ring setup function 10328 * @phba: Pointer to HBA context object. 10329 * 10330 * lpfc_sli_setup sets up rings of the SLI interface with 10331 * number of iocbs per ring and iotags. This function is 10332 * called while driver attach to the HBA and before the 10333 * interrupts are enabled. So there is no need for locking. 10334 * 10335 * This function always returns 0. SLI3 only. 10336 **/ 10337 int 10338 lpfc_sli_setup(struct lpfc_hba *phba) 10339 { 10340 int i, totiocbsize = 0; 10341 struct lpfc_sli *psli = &phba->sli; 10342 struct lpfc_sli_ring *pring; 10343 10344 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 10345 psli->sli_flag = 0; 10346 10347 psli->iocbq_lookup = NULL; 10348 psli->iocbq_lookup_len = 0; 10349 psli->last_iotag = 0; 10350 10351 for (i = 0; i < psli->num_rings; i++) { 10352 pring = &psli->sli3_ring[i]; 10353 switch (i) { 10354 case LPFC_FCP_RING: /* ring 0 - FCP */ 10355 /* numCiocb and numRiocb are used in config_port */ 10356 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 10357 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 10358 pring->sli.sli3.numCiocb += 10359 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10360 pring->sli.sli3.numRiocb += 10361 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10362 pring->sli.sli3.numCiocb += 10363 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10364 pring->sli.sli3.numRiocb += 10365 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10366 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10367 SLI3_IOCB_CMD_SIZE : 10368 SLI2_IOCB_CMD_SIZE; 10369 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10370 SLI3_IOCB_RSP_SIZE : 10371 SLI2_IOCB_RSP_SIZE; 10372 pring->iotag_ctr = 0; 10373 pring->iotag_max = 10374 (phba->cfg_hba_queue_depth * 2); 10375 pring->fast_iotag = pring->iotag_max; 10376 pring->num_mask = 0; 10377 break; 10378 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 10379 /* numCiocb and numRiocb are used in config_port */ 10380 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 10381 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 10382 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10383 SLI3_IOCB_CMD_SIZE : 10384 SLI2_IOCB_CMD_SIZE; 10385 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10386 SLI3_IOCB_RSP_SIZE : 10387 SLI2_IOCB_RSP_SIZE; 10388 pring->iotag_max = phba->cfg_hba_queue_depth; 10389 pring->num_mask = 0; 10390 break; 10391 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 10392 /* numCiocb and numRiocb are used in config_port */ 10393 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 10394 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 10395 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10396 SLI3_IOCB_CMD_SIZE : 10397 SLI2_IOCB_CMD_SIZE; 10398 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10399 SLI3_IOCB_RSP_SIZE : 10400 SLI2_IOCB_RSP_SIZE; 10401 pring->fast_iotag = 0; 10402 pring->iotag_ctr = 0; 10403 pring->iotag_max = 4096; 10404 pring->lpfc_sli_rcv_async_status = 10405 lpfc_sli_async_event_handler; 10406 pring->num_mask = LPFC_MAX_RING_MASK; 10407 pring->prt[0].profile = 0; /* Mask 0 */ 10408 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10409 pring->prt[0].type = FC_TYPE_ELS; 10410 pring->prt[0].lpfc_sli_rcv_unsol_event = 10411 lpfc_els_unsol_event; 10412 pring->prt[1].profile = 0; /* Mask 1 */ 10413 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10414 pring->prt[1].type = FC_TYPE_ELS; 10415 pring->prt[1].lpfc_sli_rcv_unsol_event = 10416 lpfc_els_unsol_event; 10417 pring->prt[2].profile = 0; /* Mask 2 */ 10418 /* NameServer Inquiry */ 10419 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10420 /* NameServer */ 10421 pring->prt[2].type = FC_TYPE_CT; 10422 pring->prt[2].lpfc_sli_rcv_unsol_event = 10423 lpfc_ct_unsol_event; 10424 pring->prt[3].profile = 0; /* Mask 3 */ 10425 /* NameServer response */ 10426 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10427 /* NameServer */ 10428 pring->prt[3].type = FC_TYPE_CT; 10429 pring->prt[3].lpfc_sli_rcv_unsol_event = 10430 lpfc_ct_unsol_event; 10431 break; 10432 } 10433 totiocbsize += (pring->sli.sli3.numCiocb * 10434 pring->sli.sli3.sizeCiocb) + 10435 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 10436 } 10437 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 10438 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 10439 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 10440 "SLI2 SLIM Data: x%x x%lx\n", 10441 phba->brd_no, totiocbsize, 10442 (unsigned long) MAX_SLIM_IOCB_SIZE); 10443 } 10444 if (phba->cfg_multi_ring_support == 2) 10445 lpfc_extra_ring_setup(phba); 10446 10447 return 0; 10448 } 10449 10450 /** 10451 * lpfc_sli4_queue_init - Queue initialization function 10452 * @phba: Pointer to HBA context object. 10453 * 10454 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each 10455 * ring. This function also initializes ring indices of each ring. 10456 * This function is called during the initialization of the SLI 10457 * interface of an HBA. 10458 * This function is called with no lock held and always returns 10459 * 1. 10460 **/ 10461 void 10462 lpfc_sli4_queue_init(struct lpfc_hba *phba) 10463 { 10464 struct lpfc_sli *psli; 10465 struct lpfc_sli_ring *pring; 10466 int i; 10467 10468 psli = &phba->sli; 10469 spin_lock_irq(&phba->hbalock); 10470 INIT_LIST_HEAD(&psli->mboxq); 10471 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10472 /* Initialize list headers for txq and txcmplq as double linked lists */ 10473 for (i = 0; i < phba->cfg_hdw_queue; i++) { 10474 pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; 10475 pring->flag = 0; 10476 pring->ringno = LPFC_FCP_RING; 10477 pring->txcmplq_cnt = 0; 10478 INIT_LIST_HEAD(&pring->txq); 10479 INIT_LIST_HEAD(&pring->txcmplq); 10480 INIT_LIST_HEAD(&pring->iocb_continueq); 10481 spin_lock_init(&pring->ring_lock); 10482 } 10483 pring = phba->sli4_hba.els_wq->pring; 10484 pring->flag = 0; 10485 pring->ringno = LPFC_ELS_RING; 10486 pring->txcmplq_cnt = 0; 10487 INIT_LIST_HEAD(&pring->txq); 10488 INIT_LIST_HEAD(&pring->txcmplq); 10489 INIT_LIST_HEAD(&pring->iocb_continueq); 10490 spin_lock_init(&pring->ring_lock); 10491 10492 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10493 for (i = 0; i < phba->cfg_hdw_queue; i++) { 10494 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; 10495 pring->flag = 0; 10496 pring->ringno = LPFC_FCP_RING; 10497 pring->txcmplq_cnt = 0; 10498 INIT_LIST_HEAD(&pring->txq); 10499 INIT_LIST_HEAD(&pring->txcmplq); 10500 INIT_LIST_HEAD(&pring->iocb_continueq); 10501 spin_lock_init(&pring->ring_lock); 10502 } 10503 pring = phba->sli4_hba.nvmels_wq->pring; 10504 pring->flag = 0; 10505 pring->ringno = LPFC_ELS_RING; 10506 pring->txcmplq_cnt = 0; 10507 INIT_LIST_HEAD(&pring->txq); 10508 INIT_LIST_HEAD(&pring->txcmplq); 10509 INIT_LIST_HEAD(&pring->iocb_continueq); 10510 spin_lock_init(&pring->ring_lock); 10511 } 10512 10513 spin_unlock_irq(&phba->hbalock); 10514 } 10515 10516 /** 10517 * lpfc_sli_queue_init - Queue initialization function 10518 * @phba: Pointer to HBA context object. 10519 * 10520 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each 10521 * ring. This function also initializes ring indices of each ring. 10522 * This function is called during the initialization of the SLI 10523 * interface of an HBA. 10524 * This function is called with no lock held and always returns 10525 * 1. 10526 **/ 10527 void 10528 lpfc_sli_queue_init(struct lpfc_hba *phba) 10529 { 10530 struct lpfc_sli *psli; 10531 struct lpfc_sli_ring *pring; 10532 int i; 10533 10534 psli = &phba->sli; 10535 spin_lock_irq(&phba->hbalock); 10536 INIT_LIST_HEAD(&psli->mboxq); 10537 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10538 /* Initialize list headers for txq and txcmplq as double linked lists */ 10539 for (i = 0; i < psli->num_rings; i++) { 10540 pring = &psli->sli3_ring[i]; 10541 pring->ringno = i; 10542 pring->sli.sli3.next_cmdidx = 0; 10543 pring->sli.sli3.local_getidx = 0; 10544 pring->sli.sli3.cmdidx = 0; 10545 INIT_LIST_HEAD(&pring->iocb_continueq); 10546 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 10547 INIT_LIST_HEAD(&pring->postbufq); 10548 pring->flag = 0; 10549 INIT_LIST_HEAD(&pring->txq); 10550 INIT_LIST_HEAD(&pring->txcmplq); 10551 spin_lock_init(&pring->ring_lock); 10552 } 10553 spin_unlock_irq(&phba->hbalock); 10554 } 10555 10556 /** 10557 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 10558 * @phba: Pointer to HBA context object. 10559 * 10560 * This routine flushes the mailbox command subsystem. It will unconditionally 10561 * flush all the mailbox commands in the three possible stages in the mailbox 10562 * command sub-system: pending mailbox command queue; the outstanding mailbox 10563 * command; and completed mailbox command queue. It is caller's responsibility 10564 * to make sure that the driver is in the proper state to flush the mailbox 10565 * command sub-system. Namely, the posting of mailbox commands into the 10566 * pending mailbox command queue from the various clients must be stopped; 10567 * either the HBA is in a state that it will never works on the outstanding 10568 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 10569 * mailbox command has been completed. 10570 **/ 10571 static void 10572 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 10573 { 10574 LIST_HEAD(completions); 10575 struct lpfc_sli *psli = &phba->sli; 10576 LPFC_MBOXQ_t *pmb; 10577 unsigned long iflag; 10578 10579 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10580 local_bh_disable(); 10581 10582 /* Flush all the mailbox commands in the mbox system */ 10583 spin_lock_irqsave(&phba->hbalock, iflag); 10584 10585 /* The pending mailbox command queue */ 10586 list_splice_init(&phba->sli.mboxq, &completions); 10587 /* The outstanding active mailbox command */ 10588 if (psli->mbox_active) { 10589 list_add_tail(&psli->mbox_active->list, &completions); 10590 psli->mbox_active = NULL; 10591 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10592 } 10593 /* The completed mailbox command queue */ 10594 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 10595 spin_unlock_irqrestore(&phba->hbalock, iflag); 10596 10597 /* Enable softirqs again, done with phba->hbalock */ 10598 local_bh_enable(); 10599 10600 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 10601 while (!list_empty(&completions)) { 10602 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 10603 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 10604 if (pmb->mbox_cmpl) 10605 pmb->mbox_cmpl(phba, pmb); 10606 } 10607 } 10608 10609 /** 10610 * lpfc_sli_host_down - Vport cleanup function 10611 * @vport: Pointer to virtual port object. 10612 * 10613 * lpfc_sli_host_down is called to clean up the resources 10614 * associated with a vport before destroying virtual 10615 * port data structures. 10616 * This function does following operations: 10617 * - Free discovery resources associated with this virtual 10618 * port. 10619 * - Free iocbs associated with this virtual port in 10620 * the txq. 10621 * - Send abort for all iocb commands associated with this 10622 * vport in txcmplq. 10623 * 10624 * This function is called with no lock held and always returns 1. 10625 **/ 10626 int 10627 lpfc_sli_host_down(struct lpfc_vport *vport) 10628 { 10629 LIST_HEAD(completions); 10630 struct lpfc_hba *phba = vport->phba; 10631 struct lpfc_sli *psli = &phba->sli; 10632 struct lpfc_queue *qp = NULL; 10633 struct lpfc_sli_ring *pring; 10634 struct lpfc_iocbq *iocb, *next_iocb; 10635 int i; 10636 unsigned long flags = 0; 10637 uint16_t prev_pring_flag; 10638 10639 lpfc_cleanup_discovery_resources(vport); 10640 10641 spin_lock_irqsave(&phba->hbalock, flags); 10642 10643 /* 10644 * Error everything on the txq since these iocbs 10645 * have not been given to the FW yet. 10646 * Also issue ABTS for everything on the txcmplq 10647 */ 10648 if (phba->sli_rev != LPFC_SLI_REV4) { 10649 for (i = 0; i < psli->num_rings; i++) { 10650 pring = &psli->sli3_ring[i]; 10651 prev_pring_flag = pring->flag; 10652 /* Only slow rings */ 10653 if (pring->ringno == LPFC_ELS_RING) { 10654 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10655 /* Set the lpfc data pending flag */ 10656 set_bit(LPFC_DATA_READY, &phba->data_flags); 10657 } 10658 list_for_each_entry_safe(iocb, next_iocb, 10659 &pring->txq, list) { 10660 if (iocb->vport != vport) 10661 continue; 10662 list_move_tail(&iocb->list, &completions); 10663 } 10664 list_for_each_entry_safe(iocb, next_iocb, 10665 &pring->txcmplq, list) { 10666 if (iocb->vport != vport) 10667 continue; 10668 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10669 } 10670 pring->flag = prev_pring_flag; 10671 } 10672 } else { 10673 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10674 pring = qp->pring; 10675 if (!pring) 10676 continue; 10677 if (pring == phba->sli4_hba.els_wq->pring) { 10678 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10679 /* Set the lpfc data pending flag */ 10680 set_bit(LPFC_DATA_READY, &phba->data_flags); 10681 } 10682 prev_pring_flag = pring->flag; 10683 spin_lock_irq(&pring->ring_lock); 10684 list_for_each_entry_safe(iocb, next_iocb, 10685 &pring->txq, list) { 10686 if (iocb->vport != vport) 10687 continue; 10688 list_move_tail(&iocb->list, &completions); 10689 } 10690 spin_unlock_irq(&pring->ring_lock); 10691 list_for_each_entry_safe(iocb, next_iocb, 10692 &pring->txcmplq, list) { 10693 if (iocb->vport != vport) 10694 continue; 10695 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10696 } 10697 pring->flag = prev_pring_flag; 10698 } 10699 } 10700 spin_unlock_irqrestore(&phba->hbalock, flags); 10701 10702 /* Cancel all the IOCBs from the completions list */ 10703 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10704 IOERR_SLI_DOWN); 10705 return 1; 10706 } 10707 10708 /** 10709 * lpfc_sli_hba_down - Resource cleanup function for the HBA 10710 * @phba: Pointer to HBA context object. 10711 * 10712 * This function cleans up all iocb, buffers, mailbox commands 10713 * while shutting down the HBA. This function is called with no 10714 * lock held and always returns 1. 10715 * This function does the following to cleanup driver resources: 10716 * - Free discovery resources for each virtual port 10717 * - Cleanup any pending fabric iocbs 10718 * - Iterate through the iocb txq and free each entry 10719 * in the list. 10720 * - Free up any buffer posted to the HBA 10721 * - Free mailbox commands in the mailbox queue. 10722 **/ 10723 int 10724 lpfc_sli_hba_down(struct lpfc_hba *phba) 10725 { 10726 LIST_HEAD(completions); 10727 struct lpfc_sli *psli = &phba->sli; 10728 struct lpfc_queue *qp = NULL; 10729 struct lpfc_sli_ring *pring; 10730 struct lpfc_dmabuf *buf_ptr; 10731 unsigned long flags = 0; 10732 int i; 10733 10734 /* Shutdown the mailbox command sub-system */ 10735 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 10736 10737 lpfc_hba_down_prep(phba); 10738 10739 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10740 local_bh_disable(); 10741 10742 lpfc_fabric_abort_hba(phba); 10743 10744 spin_lock_irqsave(&phba->hbalock, flags); 10745 10746 /* 10747 * Error everything on the txq since these iocbs 10748 * have not been given to the FW yet. 10749 */ 10750 if (phba->sli_rev != LPFC_SLI_REV4) { 10751 for (i = 0; i < psli->num_rings; i++) { 10752 pring = &psli->sli3_ring[i]; 10753 /* Only slow rings */ 10754 if (pring->ringno == LPFC_ELS_RING) { 10755 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10756 /* Set the lpfc data pending flag */ 10757 set_bit(LPFC_DATA_READY, &phba->data_flags); 10758 } 10759 list_splice_init(&pring->txq, &completions); 10760 } 10761 } else { 10762 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10763 pring = qp->pring; 10764 if (!pring) 10765 continue; 10766 spin_lock_irq(&pring->ring_lock); 10767 list_splice_init(&pring->txq, &completions); 10768 spin_unlock_irq(&pring->ring_lock); 10769 if (pring == phba->sli4_hba.els_wq->pring) { 10770 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10771 /* Set the lpfc data pending flag */ 10772 set_bit(LPFC_DATA_READY, &phba->data_flags); 10773 } 10774 } 10775 } 10776 spin_unlock_irqrestore(&phba->hbalock, flags); 10777 10778 /* Cancel all the IOCBs from the completions list */ 10779 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10780 IOERR_SLI_DOWN); 10781 10782 spin_lock_irqsave(&phba->hbalock, flags); 10783 list_splice_init(&phba->elsbuf, &completions); 10784 phba->elsbuf_cnt = 0; 10785 phba->elsbuf_prev_cnt = 0; 10786 spin_unlock_irqrestore(&phba->hbalock, flags); 10787 10788 while (!list_empty(&completions)) { 10789 list_remove_head(&completions, buf_ptr, 10790 struct lpfc_dmabuf, list); 10791 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 10792 kfree(buf_ptr); 10793 } 10794 10795 /* Enable softirqs again, done with phba->hbalock */ 10796 local_bh_enable(); 10797 10798 /* Return any active mbox cmds */ 10799 del_timer_sync(&psli->mbox_tmo); 10800 10801 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 10802 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 10803 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 10804 10805 return 1; 10806 } 10807 10808 /** 10809 * lpfc_sli_pcimem_bcopy - SLI memory copy function 10810 * @srcp: Source memory pointer. 10811 * @destp: Destination memory pointer. 10812 * @cnt: Number of words required to be copied. 10813 * 10814 * This function is used for copying data between driver memory 10815 * and the SLI memory. This function also changes the endianness 10816 * of each word if native endianness is different from SLI 10817 * endianness. This function can be called with or without 10818 * lock. 10819 **/ 10820 void 10821 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 10822 { 10823 uint32_t *src = srcp; 10824 uint32_t *dest = destp; 10825 uint32_t ldata; 10826 int i; 10827 10828 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 10829 ldata = *src; 10830 ldata = le32_to_cpu(ldata); 10831 *dest = ldata; 10832 src++; 10833 dest++; 10834 } 10835 } 10836 10837 10838 /** 10839 * lpfc_sli_bemem_bcopy - SLI memory copy function 10840 * @srcp: Source memory pointer. 10841 * @destp: Destination memory pointer. 10842 * @cnt: Number of words required to be copied. 10843 * 10844 * This function is used for copying data between a data structure 10845 * with big endian representation to local endianness. 10846 * This function can be called with or without lock. 10847 **/ 10848 void 10849 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 10850 { 10851 uint32_t *src = srcp; 10852 uint32_t *dest = destp; 10853 uint32_t ldata; 10854 int i; 10855 10856 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 10857 ldata = *src; 10858 ldata = be32_to_cpu(ldata); 10859 *dest = ldata; 10860 src++; 10861 dest++; 10862 } 10863 } 10864 10865 /** 10866 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 10867 * @phba: Pointer to HBA context object. 10868 * @pring: Pointer to driver SLI ring object. 10869 * @mp: Pointer to driver buffer object. 10870 * 10871 * This function is called with no lock held. 10872 * It always return zero after adding the buffer to the postbufq 10873 * buffer list. 10874 **/ 10875 int 10876 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10877 struct lpfc_dmabuf *mp) 10878 { 10879 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 10880 later */ 10881 spin_lock_irq(&phba->hbalock); 10882 list_add_tail(&mp->list, &pring->postbufq); 10883 pring->postbufq_cnt++; 10884 spin_unlock_irq(&phba->hbalock); 10885 return 0; 10886 } 10887 10888 /** 10889 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 10890 * @phba: Pointer to HBA context object. 10891 * 10892 * When HBQ is enabled, buffers are searched based on tags. This function 10893 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 10894 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 10895 * does not conflict with tags of buffer posted for unsolicited events. 10896 * The function returns the allocated tag. The function is called with 10897 * no locks held. 10898 **/ 10899 uint32_t 10900 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 10901 { 10902 spin_lock_irq(&phba->hbalock); 10903 phba->buffer_tag_count++; 10904 /* 10905 * Always set the QUE_BUFTAG_BIT to distiguish between 10906 * a tag assigned by HBQ. 10907 */ 10908 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 10909 spin_unlock_irq(&phba->hbalock); 10910 return phba->buffer_tag_count; 10911 } 10912 10913 /** 10914 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 10915 * @phba: Pointer to HBA context object. 10916 * @pring: Pointer to driver SLI ring object. 10917 * @tag: Buffer tag. 10918 * 10919 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 10920 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 10921 * iocb is posted to the response ring with the tag of the buffer. 10922 * This function searches the pring->postbufq list using the tag 10923 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 10924 * iocb. If the buffer is found then lpfc_dmabuf object of the 10925 * buffer is returned to the caller else NULL is returned. 10926 * This function is called with no lock held. 10927 **/ 10928 struct lpfc_dmabuf * 10929 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10930 uint32_t tag) 10931 { 10932 struct lpfc_dmabuf *mp, *next_mp; 10933 struct list_head *slp = &pring->postbufq; 10934 10935 /* Search postbufq, from the beginning, looking for a match on tag */ 10936 spin_lock_irq(&phba->hbalock); 10937 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10938 if (mp->buffer_tag == tag) { 10939 list_del_init(&mp->list); 10940 pring->postbufq_cnt--; 10941 spin_unlock_irq(&phba->hbalock); 10942 return mp; 10943 } 10944 } 10945 10946 spin_unlock_irq(&phba->hbalock); 10947 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10948 "0402 Cannot find virtual addr for buffer tag on " 10949 "ring %d Data x%lx x%p x%p x%x\n", 10950 pring->ringno, (unsigned long) tag, 10951 slp->next, slp->prev, pring->postbufq_cnt); 10952 10953 return NULL; 10954 } 10955 10956 /** 10957 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 10958 * @phba: Pointer to HBA context object. 10959 * @pring: Pointer to driver SLI ring object. 10960 * @phys: DMA address of the buffer. 10961 * 10962 * This function searches the buffer list using the dma_address 10963 * of unsolicited event to find the driver's lpfc_dmabuf object 10964 * corresponding to the dma_address. The function returns the 10965 * lpfc_dmabuf object if a buffer is found else it returns NULL. 10966 * This function is called by the ct and els unsolicited event 10967 * handlers to get the buffer associated with the unsolicited 10968 * event. 10969 * 10970 * This function is called with no lock held. 10971 **/ 10972 struct lpfc_dmabuf * 10973 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10974 dma_addr_t phys) 10975 { 10976 struct lpfc_dmabuf *mp, *next_mp; 10977 struct list_head *slp = &pring->postbufq; 10978 10979 /* Search postbufq, from the beginning, looking for a match on phys */ 10980 spin_lock_irq(&phba->hbalock); 10981 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10982 if (mp->phys == phys) { 10983 list_del_init(&mp->list); 10984 pring->postbufq_cnt--; 10985 spin_unlock_irq(&phba->hbalock); 10986 return mp; 10987 } 10988 } 10989 10990 spin_unlock_irq(&phba->hbalock); 10991 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10992 "0410 Cannot find virtual addr for mapped buf on " 10993 "ring %d Data x%llx x%p x%p x%x\n", 10994 pring->ringno, (unsigned long long)phys, 10995 slp->next, slp->prev, pring->postbufq_cnt); 10996 return NULL; 10997 } 10998 10999 /** 11000 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 11001 * @phba: Pointer to HBA context object. 11002 * @cmdiocb: Pointer to driver command iocb object. 11003 * @rspiocb: Pointer to driver response iocb object. 11004 * 11005 * This function is the completion handler for the abort iocbs for 11006 * ELS commands. This function is called from the ELS ring event 11007 * handler with no lock held. This function frees memory resources 11008 * associated with the abort iocb. 11009 **/ 11010 static void 11011 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11012 struct lpfc_iocbq *rspiocb) 11013 { 11014 IOCB_t *irsp = &rspiocb->iocb; 11015 uint16_t abort_iotag, abort_context; 11016 struct lpfc_iocbq *abort_iocb = NULL; 11017 11018 if (irsp->ulpStatus) { 11019 11020 /* 11021 * Assume that the port already completed and returned, or 11022 * will return the iocb. Just Log the message. 11023 */ 11024 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 11025 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 11026 11027 spin_lock_irq(&phba->hbalock); 11028 if (phba->sli_rev < LPFC_SLI_REV4) { 11029 if (irsp->ulpCommand == CMD_ABORT_XRI_CX && 11030 irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 11031 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) { 11032 spin_unlock_irq(&phba->hbalock); 11033 goto release_iocb; 11034 } 11035 if (abort_iotag != 0 && 11036 abort_iotag <= phba->sli.last_iotag) 11037 abort_iocb = 11038 phba->sli.iocbq_lookup[abort_iotag]; 11039 } else 11040 /* For sli4 the abort_tag is the XRI, 11041 * so the abort routine puts the iotag of the iocb 11042 * being aborted in the context field of the abort 11043 * IOCB. 11044 */ 11045 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 11046 11047 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 11048 "0327 Cannot abort els iocb %p " 11049 "with tag %x context %x, abort status %x, " 11050 "abort code %x\n", 11051 abort_iocb, abort_iotag, abort_context, 11052 irsp->ulpStatus, irsp->un.ulpWord[4]); 11053 11054 spin_unlock_irq(&phba->hbalock); 11055 } 11056 release_iocb: 11057 lpfc_sli_release_iocbq(phba, cmdiocb); 11058 return; 11059 } 11060 11061 /** 11062 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 11063 * @phba: Pointer to HBA context object. 11064 * @cmdiocb: Pointer to driver command iocb object. 11065 * @rspiocb: Pointer to driver response iocb object. 11066 * 11067 * The function is called from SLI ring event handler with no 11068 * lock held. This function is the completion handler for ELS commands 11069 * which are aborted. The function frees memory resources used for 11070 * the aborted ELS commands. 11071 **/ 11072 static void 11073 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11074 struct lpfc_iocbq *rspiocb) 11075 { 11076 IOCB_t *irsp = &rspiocb->iocb; 11077 11078 /* ELS cmd tag <ulpIoTag> completes */ 11079 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11080 "0139 Ignoring ELS cmd tag x%x completion Data: " 11081 "x%x x%x x%x\n", 11082 irsp->ulpIoTag, irsp->ulpStatus, 11083 irsp->un.ulpWord[4], irsp->ulpTimeout); 11084 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 11085 lpfc_ct_free_iocb(phba, cmdiocb); 11086 else 11087 lpfc_els_free_iocb(phba, cmdiocb); 11088 return; 11089 } 11090 11091 /** 11092 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 11093 * @phba: Pointer to HBA context object. 11094 * @pring: Pointer to driver SLI ring object. 11095 * @cmdiocb: Pointer to driver command iocb object. 11096 * 11097 * This function issues an abort iocb for the provided command iocb down to 11098 * the port. Other than the case the outstanding command iocb is an abort 11099 * request, this function issues abort out unconditionally. This function is 11100 * called with hbalock held. The function returns 0 when it fails due to 11101 * memory allocation failure or when the command iocb is an abort request. 11102 **/ 11103 static int 11104 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11105 struct lpfc_iocbq *cmdiocb) 11106 { 11107 struct lpfc_vport *vport = cmdiocb->vport; 11108 struct lpfc_iocbq *abtsiocbp; 11109 IOCB_t *icmd = NULL; 11110 IOCB_t *iabt = NULL; 11111 int retval; 11112 unsigned long iflags; 11113 struct lpfc_nodelist *ndlp; 11114 11115 lockdep_assert_held(&phba->hbalock); 11116 11117 /* 11118 * There are certain command types we don't want to abort. And we 11119 * don't want to abort commands that are already in the process of 11120 * being aborted. 11121 */ 11122 icmd = &cmdiocb->iocb; 11123 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11124 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11125 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11126 return 0; 11127 11128 /* issue ABTS for this IOCB based on iotag */ 11129 abtsiocbp = __lpfc_sli_get_iocbq(phba); 11130 if (abtsiocbp == NULL) 11131 return 0; 11132 11133 /* This signals the response to set the correct status 11134 * before calling the completion handler 11135 */ 11136 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 11137 11138 iabt = &abtsiocbp->iocb; 11139 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 11140 iabt->un.acxri.abortContextTag = icmd->ulpContext; 11141 if (phba->sli_rev == LPFC_SLI_REV4) { 11142 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 11143 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 11144 } else { 11145 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 11146 if (pring->ringno == LPFC_ELS_RING) { 11147 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1); 11148 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi; 11149 } 11150 } 11151 iabt->ulpLe = 1; 11152 iabt->ulpClass = icmd->ulpClass; 11153 11154 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11155 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; 11156 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 11157 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 11158 if (cmdiocb->iocb_flag & LPFC_IO_FOF) 11159 abtsiocbp->iocb_flag |= LPFC_IO_FOF; 11160 11161 if (phba->link_state >= LPFC_LINK_UP) 11162 iabt->ulpCommand = CMD_ABORT_XRI_CN; 11163 else 11164 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 11165 11166 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 11167 abtsiocbp->vport = vport; 11168 11169 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 11170 "0339 Abort xri x%x, original iotag x%x, " 11171 "abort cmd iotag x%x\n", 11172 iabt->un.acxri.abortIoTag, 11173 iabt->un.acxri.abortContextTag, 11174 abtsiocbp->iotag); 11175 11176 if (phba->sli_rev == LPFC_SLI_REV4) { 11177 pring = lpfc_sli4_calc_ring(phba, abtsiocbp); 11178 if (unlikely(pring == NULL)) 11179 return 0; 11180 /* Note: both hbalock and ring_lock need to be set here */ 11181 spin_lock_irqsave(&pring->ring_lock, iflags); 11182 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11183 abtsiocbp, 0); 11184 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11185 } else { 11186 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11187 abtsiocbp, 0); 11188 } 11189 11190 if (retval) 11191 __lpfc_sli_release_iocbq(phba, abtsiocbp); 11192 11193 /* 11194 * Caller to this routine should check for IOCB_ERROR 11195 * and handle it properly. This routine no longer removes 11196 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11197 */ 11198 return retval; 11199 } 11200 11201 /** 11202 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 11203 * @phba: Pointer to HBA context object. 11204 * @pring: Pointer to driver SLI ring object. 11205 * @cmdiocb: Pointer to driver command iocb object. 11206 * 11207 * This function issues an abort iocb for the provided command iocb. In case 11208 * of unloading, the abort iocb will not be issued to commands on the ELS 11209 * ring. Instead, the callback function shall be changed to those commands 11210 * so that nothing happens when them finishes. This function is called with 11211 * hbalock held. The function returns 0 when the command iocb is an abort 11212 * request. 11213 **/ 11214 int 11215 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11216 struct lpfc_iocbq *cmdiocb) 11217 { 11218 struct lpfc_vport *vport = cmdiocb->vport; 11219 int retval = IOCB_ERROR; 11220 IOCB_t *icmd = NULL; 11221 11222 lockdep_assert_held(&phba->hbalock); 11223 11224 /* 11225 * There are certain command types we don't want to abort. And we 11226 * don't want to abort commands that are already in the process of 11227 * being aborted. 11228 */ 11229 icmd = &cmdiocb->iocb; 11230 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11231 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11232 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11233 return 0; 11234 11235 if (!pring) { 11236 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11237 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11238 else 11239 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11240 goto abort_iotag_exit; 11241 } 11242 11243 /* 11244 * If we're unloading, don't abort iocb on the ELS ring, but change 11245 * the callback so that nothing happens when it finishes. 11246 */ 11247 if ((vport->load_flag & FC_UNLOADING) && 11248 (pring->ringno == LPFC_ELS_RING)) { 11249 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11250 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11251 else 11252 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11253 goto abort_iotag_exit; 11254 } 11255 11256 /* Now, we try to issue the abort to the cmdiocb out */ 11257 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 11258 11259 abort_iotag_exit: 11260 /* 11261 * Caller to this routine should check for IOCB_ERROR 11262 * and handle it properly. This routine no longer removes 11263 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11264 */ 11265 return retval; 11266 } 11267 11268 /** 11269 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 11270 * @phba: pointer to lpfc HBA data structure. 11271 * 11272 * This routine will abort all pending and outstanding iocbs to an HBA. 11273 **/ 11274 void 11275 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 11276 { 11277 struct lpfc_sli *psli = &phba->sli; 11278 struct lpfc_sli_ring *pring; 11279 struct lpfc_queue *qp = NULL; 11280 int i; 11281 11282 if (phba->sli_rev != LPFC_SLI_REV4) { 11283 for (i = 0; i < psli->num_rings; i++) { 11284 pring = &psli->sli3_ring[i]; 11285 lpfc_sli_abort_iocb_ring(phba, pring); 11286 } 11287 return; 11288 } 11289 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 11290 pring = qp->pring; 11291 if (!pring) 11292 continue; 11293 lpfc_sli_abort_iocb_ring(phba, pring); 11294 } 11295 } 11296 11297 /** 11298 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 11299 * @iocbq: Pointer to driver iocb object. 11300 * @vport: Pointer to driver virtual port object. 11301 * @tgt_id: SCSI ID of the target. 11302 * @lun_id: LUN ID of the scsi device. 11303 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 11304 * 11305 * This function acts as an iocb filter for functions which abort or count 11306 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 11307 * 0 if the filtering criteria is met for the given iocb and will return 11308 * 1 if the filtering criteria is not met. 11309 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 11310 * given iocb is for the SCSI device specified by vport, tgt_id and 11311 * lun_id parameter. 11312 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 11313 * given iocb is for the SCSI target specified by vport and tgt_id 11314 * parameters. 11315 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 11316 * given iocb is for the SCSI host associated with the given vport. 11317 * This function is called with no locks held. 11318 **/ 11319 static int 11320 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 11321 uint16_t tgt_id, uint64_t lun_id, 11322 lpfc_ctx_cmd ctx_cmd) 11323 { 11324 struct lpfc_io_buf *lpfc_cmd; 11325 int rc = 1; 11326 11327 if (iocbq->vport != vport) 11328 return rc; 11329 11330 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 11331 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) 11332 return rc; 11333 11334 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 11335 11336 if (lpfc_cmd->pCmd == NULL) 11337 return rc; 11338 11339 switch (ctx_cmd) { 11340 case LPFC_CTX_LUN: 11341 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11342 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 11343 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 11344 rc = 0; 11345 break; 11346 case LPFC_CTX_TGT: 11347 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11348 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 11349 rc = 0; 11350 break; 11351 case LPFC_CTX_HOST: 11352 rc = 0; 11353 break; 11354 default: 11355 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 11356 __func__, ctx_cmd); 11357 break; 11358 } 11359 11360 return rc; 11361 } 11362 11363 /** 11364 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 11365 * @vport: Pointer to virtual port. 11366 * @tgt_id: SCSI ID of the target. 11367 * @lun_id: LUN ID of the scsi device. 11368 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11369 * 11370 * This function returns number of FCP commands pending for the vport. 11371 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 11372 * commands pending on the vport associated with SCSI device specified 11373 * by tgt_id and lun_id parameters. 11374 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 11375 * commands pending on the vport associated with SCSI target specified 11376 * by tgt_id parameter. 11377 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 11378 * commands pending on the vport. 11379 * This function returns the number of iocbs which satisfy the filter. 11380 * This function is called without any lock held. 11381 **/ 11382 int 11383 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 11384 lpfc_ctx_cmd ctx_cmd) 11385 { 11386 struct lpfc_hba *phba = vport->phba; 11387 struct lpfc_iocbq *iocbq; 11388 int sum, i; 11389 11390 spin_lock_irq(&phba->hbalock); 11391 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 11392 iocbq = phba->sli.iocbq_lookup[i]; 11393 11394 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 11395 ctx_cmd) == 0) 11396 sum++; 11397 } 11398 spin_unlock_irq(&phba->hbalock); 11399 11400 return sum; 11401 } 11402 11403 /** 11404 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 11405 * @phba: Pointer to HBA context object 11406 * @cmdiocb: Pointer to command iocb object. 11407 * @rspiocb: Pointer to response iocb object. 11408 * 11409 * This function is called when an aborted FCP iocb completes. This 11410 * function is called by the ring event handler with no lock held. 11411 * This function frees the iocb. 11412 **/ 11413 void 11414 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11415 struct lpfc_iocbq *rspiocb) 11416 { 11417 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11418 "3096 ABORT_XRI_CN completing on rpi x%x " 11419 "original iotag x%x, abort cmd iotag x%x " 11420 "status 0x%x, reason 0x%x\n", 11421 cmdiocb->iocb.un.acxri.abortContextTag, 11422 cmdiocb->iocb.un.acxri.abortIoTag, 11423 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 11424 rspiocb->iocb.un.ulpWord[4]); 11425 lpfc_sli_release_iocbq(phba, cmdiocb); 11426 return; 11427 } 11428 11429 /** 11430 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 11431 * @vport: Pointer to virtual port. 11432 * @pring: Pointer to driver SLI ring object. 11433 * @tgt_id: SCSI ID of the target. 11434 * @lun_id: LUN ID of the scsi device. 11435 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11436 * 11437 * This function sends an abort command for every SCSI command 11438 * associated with the given virtual port pending on the ring 11439 * filtered by lpfc_sli_validate_fcp_iocb function. 11440 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 11441 * FCP iocbs associated with lun specified by tgt_id and lun_id 11442 * parameters 11443 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 11444 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11445 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 11446 * FCP iocbs associated with virtual port. 11447 * This function returns number of iocbs it failed to abort. 11448 * This function is called with no locks held. 11449 **/ 11450 int 11451 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11452 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 11453 { 11454 struct lpfc_hba *phba = vport->phba; 11455 struct lpfc_iocbq *iocbq; 11456 struct lpfc_iocbq *abtsiocb; 11457 struct lpfc_sli_ring *pring_s4; 11458 IOCB_t *cmd = NULL; 11459 int errcnt = 0, ret_val = 0; 11460 int i; 11461 11462 /* all I/Os are in process of being flushed */ 11463 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) 11464 return errcnt; 11465 11466 for (i = 1; i <= phba->sli.last_iotag; i++) { 11467 iocbq = phba->sli.iocbq_lookup[i]; 11468 11469 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11470 abort_cmd) != 0) 11471 continue; 11472 11473 /* 11474 * If the iocbq is already being aborted, don't take a second 11475 * action, but do count it. 11476 */ 11477 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 11478 continue; 11479 11480 /* issue ABTS for this IOCB based on iotag */ 11481 abtsiocb = lpfc_sli_get_iocbq(phba); 11482 if (abtsiocb == NULL) { 11483 errcnt++; 11484 continue; 11485 } 11486 11487 /* indicate the IO is being aborted by the driver. */ 11488 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11489 11490 cmd = &iocbq->iocb; 11491 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11492 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 11493 if (phba->sli_rev == LPFC_SLI_REV4) 11494 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 11495 else 11496 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 11497 abtsiocb->iocb.ulpLe = 1; 11498 abtsiocb->iocb.ulpClass = cmd->ulpClass; 11499 abtsiocb->vport = vport; 11500 11501 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11502 abtsiocb->hba_wqidx = iocbq->hba_wqidx; 11503 if (iocbq->iocb_flag & LPFC_IO_FCP) 11504 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 11505 if (iocbq->iocb_flag & LPFC_IO_FOF) 11506 abtsiocb->iocb_flag |= LPFC_IO_FOF; 11507 11508 if (lpfc_is_link_up(phba)) 11509 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11510 else 11511 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11512 11513 /* Setup callback routine and issue the command. */ 11514 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11515 if (phba->sli_rev == LPFC_SLI_REV4) { 11516 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); 11517 if (!pring_s4) 11518 continue; 11519 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11520 abtsiocb, 0); 11521 } else 11522 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 11523 abtsiocb, 0); 11524 if (ret_val == IOCB_ERROR) { 11525 lpfc_sli_release_iocbq(phba, abtsiocb); 11526 errcnt++; 11527 continue; 11528 } 11529 } 11530 11531 return errcnt; 11532 } 11533 11534 /** 11535 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 11536 * @vport: Pointer to virtual port. 11537 * @pring: Pointer to driver SLI ring object. 11538 * @tgt_id: SCSI ID of the target. 11539 * @lun_id: LUN ID of the scsi device. 11540 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11541 * 11542 * This function sends an abort command for every SCSI command 11543 * associated with the given virtual port pending on the ring 11544 * filtered by lpfc_sli_validate_fcp_iocb function. 11545 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 11546 * FCP iocbs associated with lun specified by tgt_id and lun_id 11547 * parameters 11548 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 11549 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11550 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 11551 * FCP iocbs associated with virtual port. 11552 * This function returns number of iocbs it aborted . 11553 * This function is called with no locks held right after a taskmgmt 11554 * command is sent. 11555 **/ 11556 int 11557 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11558 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 11559 { 11560 struct lpfc_hba *phba = vport->phba; 11561 struct lpfc_io_buf *lpfc_cmd; 11562 struct lpfc_iocbq *abtsiocbq; 11563 struct lpfc_nodelist *ndlp; 11564 struct lpfc_iocbq *iocbq; 11565 IOCB_t *icmd; 11566 int sum, i, ret_val; 11567 unsigned long iflags; 11568 struct lpfc_sli_ring *pring_s4 = NULL; 11569 11570 spin_lock_irqsave(&phba->hbalock, iflags); 11571 11572 /* all I/Os are in process of being flushed */ 11573 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 11574 spin_unlock_irqrestore(&phba->hbalock, iflags); 11575 return 0; 11576 } 11577 sum = 0; 11578 11579 for (i = 1; i <= phba->sli.last_iotag; i++) { 11580 iocbq = phba->sli.iocbq_lookup[i]; 11581 11582 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11583 cmd) != 0) 11584 continue; 11585 11586 /* Guard against IO completion being called at same time */ 11587 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 11588 spin_lock(&lpfc_cmd->buf_lock); 11589 11590 if (!lpfc_cmd->pCmd) { 11591 spin_unlock(&lpfc_cmd->buf_lock); 11592 continue; 11593 } 11594 11595 if (phba->sli_rev == LPFC_SLI_REV4) { 11596 pring_s4 = 11597 phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring; 11598 if (!pring_s4) { 11599 spin_unlock(&lpfc_cmd->buf_lock); 11600 continue; 11601 } 11602 /* Note: both hbalock and ring_lock must be set here */ 11603 spin_lock(&pring_s4->ring_lock); 11604 } 11605 11606 /* 11607 * If the iocbq is already being aborted, don't take a second 11608 * action, but do count it. 11609 */ 11610 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) || 11611 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 11612 if (phba->sli_rev == LPFC_SLI_REV4) 11613 spin_unlock(&pring_s4->ring_lock); 11614 spin_unlock(&lpfc_cmd->buf_lock); 11615 continue; 11616 } 11617 11618 /* issue ABTS for this IOCB based on iotag */ 11619 abtsiocbq = __lpfc_sli_get_iocbq(phba); 11620 if (!abtsiocbq) { 11621 if (phba->sli_rev == LPFC_SLI_REV4) 11622 spin_unlock(&pring_s4->ring_lock); 11623 spin_unlock(&lpfc_cmd->buf_lock); 11624 continue; 11625 } 11626 11627 icmd = &iocbq->iocb; 11628 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11629 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; 11630 if (phba->sli_rev == LPFC_SLI_REV4) 11631 abtsiocbq->iocb.un.acxri.abortIoTag = 11632 iocbq->sli4_xritag; 11633 else 11634 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; 11635 abtsiocbq->iocb.ulpLe = 1; 11636 abtsiocbq->iocb.ulpClass = icmd->ulpClass; 11637 abtsiocbq->vport = vport; 11638 11639 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11640 abtsiocbq->hba_wqidx = iocbq->hba_wqidx; 11641 if (iocbq->iocb_flag & LPFC_IO_FCP) 11642 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 11643 if (iocbq->iocb_flag & LPFC_IO_FOF) 11644 abtsiocbq->iocb_flag |= LPFC_IO_FOF; 11645 11646 ndlp = lpfc_cmd->rdata->pnode; 11647 11648 if (lpfc_is_link_up(phba) && 11649 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 11650 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11651 else 11652 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11653 11654 /* Setup callback routine and issue the command. */ 11655 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11656 11657 /* 11658 * Indicate the IO is being aborted by the driver and set 11659 * the caller's flag into the aborted IO. 11660 */ 11661 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11662 11663 if (phba->sli_rev == LPFC_SLI_REV4) { 11664 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11665 abtsiocbq, 0); 11666 spin_unlock(&pring_s4->ring_lock); 11667 } else { 11668 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 11669 abtsiocbq, 0); 11670 } 11671 11672 spin_unlock(&lpfc_cmd->buf_lock); 11673 11674 if (ret_val == IOCB_ERROR) 11675 __lpfc_sli_release_iocbq(phba, abtsiocbq); 11676 else 11677 sum++; 11678 } 11679 spin_unlock_irqrestore(&phba->hbalock, iflags); 11680 return sum; 11681 } 11682 11683 /** 11684 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 11685 * @phba: Pointer to HBA context object. 11686 * @cmdiocbq: Pointer to command iocb. 11687 * @rspiocbq: Pointer to response iocb. 11688 * 11689 * This function is the completion handler for iocbs issued using 11690 * lpfc_sli_issue_iocb_wait function. This function is called by the 11691 * ring event handler function without any lock held. This function 11692 * can be called from both worker thread context and interrupt 11693 * context. This function also can be called from other thread which 11694 * cleans up the SLI layer objects. 11695 * This function copy the contents of the response iocb to the 11696 * response iocb memory object provided by the caller of 11697 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 11698 * sleeps for the iocb completion. 11699 **/ 11700 static void 11701 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 11702 struct lpfc_iocbq *cmdiocbq, 11703 struct lpfc_iocbq *rspiocbq) 11704 { 11705 wait_queue_head_t *pdone_q; 11706 unsigned long iflags; 11707 struct lpfc_io_buf *lpfc_cmd; 11708 11709 spin_lock_irqsave(&phba->hbalock, iflags); 11710 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { 11711 11712 /* 11713 * A time out has occurred for the iocb. If a time out 11714 * completion handler has been supplied, call it. Otherwise, 11715 * just free the iocbq. 11716 */ 11717 11718 spin_unlock_irqrestore(&phba->hbalock, iflags); 11719 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; 11720 cmdiocbq->wait_iocb_cmpl = NULL; 11721 if (cmdiocbq->iocb_cmpl) 11722 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); 11723 else 11724 lpfc_sli_release_iocbq(phba, cmdiocbq); 11725 return; 11726 } 11727 11728 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 11729 if (cmdiocbq->context2 && rspiocbq) 11730 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 11731 &rspiocbq->iocb, sizeof(IOCB_t)); 11732 11733 /* Set the exchange busy flag for task management commands */ 11734 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 11735 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 11736 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, 11737 cur_iocbq); 11738 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 11739 } 11740 11741 pdone_q = cmdiocbq->context_un.wait_queue; 11742 if (pdone_q) 11743 wake_up(pdone_q); 11744 spin_unlock_irqrestore(&phba->hbalock, iflags); 11745 return; 11746 } 11747 11748 /** 11749 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 11750 * @phba: Pointer to HBA context object.. 11751 * @piocbq: Pointer to command iocb. 11752 * @flag: Flag to test. 11753 * 11754 * This routine grabs the hbalock and then test the iocb_flag to 11755 * see if the passed in flag is set. 11756 * Returns: 11757 * 1 if flag is set. 11758 * 0 if flag is not set. 11759 **/ 11760 static int 11761 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 11762 struct lpfc_iocbq *piocbq, uint32_t flag) 11763 { 11764 unsigned long iflags; 11765 int ret; 11766 11767 spin_lock_irqsave(&phba->hbalock, iflags); 11768 ret = piocbq->iocb_flag & flag; 11769 spin_unlock_irqrestore(&phba->hbalock, iflags); 11770 return ret; 11771 11772 } 11773 11774 /** 11775 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 11776 * @phba: Pointer to HBA context object.. 11777 * @pring: Pointer to sli ring. 11778 * @piocb: Pointer to command iocb. 11779 * @prspiocbq: Pointer to response iocb. 11780 * @timeout: Timeout in number of seconds. 11781 * 11782 * This function issues the iocb to firmware and waits for the 11783 * iocb to complete. The iocb_cmpl field of the shall be used 11784 * to handle iocbs which time out. If the field is NULL, the 11785 * function shall free the iocbq structure. If more clean up is 11786 * needed, the caller is expected to provide a completion function 11787 * that will provide the needed clean up. If the iocb command is 11788 * not completed within timeout seconds, the function will either 11789 * free the iocbq structure (if iocb_cmpl == NULL) or execute the 11790 * completion function set in the iocb_cmpl field and then return 11791 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 11792 * resources if this function returns IOCB_TIMEDOUT. 11793 * The function waits for the iocb completion using an 11794 * non-interruptible wait. 11795 * This function will sleep while waiting for iocb completion. 11796 * So, this function should not be called from any context which 11797 * does not allow sleeping. Due to the same reason, this function 11798 * cannot be called with interrupt disabled. 11799 * This function assumes that the iocb completions occur while 11800 * this function sleep. So, this function cannot be called from 11801 * the thread which process iocb completion for this ring. 11802 * This function clears the iocb_flag of the iocb object before 11803 * issuing the iocb and the iocb completion handler sets this 11804 * flag and wakes this thread when the iocb completes. 11805 * The contents of the response iocb will be copied to prspiocbq 11806 * by the completion handler when the command completes. 11807 * This function returns IOCB_SUCCESS when success. 11808 * This function is called with no lock held. 11809 **/ 11810 int 11811 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 11812 uint32_t ring_number, 11813 struct lpfc_iocbq *piocb, 11814 struct lpfc_iocbq *prspiocbq, 11815 uint32_t timeout) 11816 { 11817 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 11818 long timeleft, timeout_req = 0; 11819 int retval = IOCB_SUCCESS; 11820 uint32_t creg_val; 11821 struct lpfc_iocbq *iocb; 11822 int txq_cnt = 0; 11823 int txcmplq_cnt = 0; 11824 struct lpfc_sli_ring *pring; 11825 unsigned long iflags; 11826 bool iocb_completed = true; 11827 11828 if (phba->sli_rev >= LPFC_SLI_REV4) 11829 pring = lpfc_sli4_calc_ring(phba, piocb); 11830 else 11831 pring = &phba->sli.sli3_ring[ring_number]; 11832 /* 11833 * If the caller has provided a response iocbq buffer, then context2 11834 * is NULL or its an error. 11835 */ 11836 if (prspiocbq) { 11837 if (piocb->context2) 11838 return IOCB_ERROR; 11839 piocb->context2 = prspiocbq; 11840 } 11841 11842 piocb->wait_iocb_cmpl = piocb->iocb_cmpl; 11843 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 11844 piocb->context_un.wait_queue = &done_q; 11845 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 11846 11847 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11848 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11849 return IOCB_ERROR; 11850 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 11851 writel(creg_val, phba->HCregaddr); 11852 readl(phba->HCregaddr); /* flush */ 11853 } 11854 11855 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 11856 SLI_IOCB_RET_IOCB); 11857 if (retval == IOCB_SUCCESS) { 11858 timeout_req = msecs_to_jiffies(timeout * 1000); 11859 timeleft = wait_event_timeout(done_q, 11860 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 11861 timeout_req); 11862 spin_lock_irqsave(&phba->hbalock, iflags); 11863 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 11864 11865 /* 11866 * IOCB timed out. Inform the wake iocb wait 11867 * completion function and set local status 11868 */ 11869 11870 iocb_completed = false; 11871 piocb->iocb_flag |= LPFC_IO_WAKE_TMO; 11872 } 11873 spin_unlock_irqrestore(&phba->hbalock, iflags); 11874 if (iocb_completed) { 11875 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11876 "0331 IOCB wake signaled\n"); 11877 /* Note: we are not indicating if the IOCB has a success 11878 * status or not - that's for the caller to check. 11879 * IOCB_SUCCESS means just that the command was sent and 11880 * completed. Not that it completed successfully. 11881 * */ 11882 } else if (timeleft == 0) { 11883 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11884 "0338 IOCB wait timeout error - no " 11885 "wake response Data x%x\n", timeout); 11886 retval = IOCB_TIMEDOUT; 11887 } else { 11888 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11889 "0330 IOCB wake NOT set, " 11890 "Data x%x x%lx\n", 11891 timeout, (timeleft / jiffies)); 11892 retval = IOCB_TIMEDOUT; 11893 } 11894 } else if (retval == IOCB_BUSY) { 11895 if (phba->cfg_log_verbose & LOG_SLI) { 11896 list_for_each_entry(iocb, &pring->txq, list) { 11897 txq_cnt++; 11898 } 11899 list_for_each_entry(iocb, &pring->txcmplq, list) { 11900 txcmplq_cnt++; 11901 } 11902 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11903 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 11904 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 11905 } 11906 return retval; 11907 } else { 11908 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11909 "0332 IOCB wait issue failed, Data x%x\n", 11910 retval); 11911 retval = IOCB_ERROR; 11912 } 11913 11914 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11915 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11916 return IOCB_ERROR; 11917 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 11918 writel(creg_val, phba->HCregaddr); 11919 readl(phba->HCregaddr); /* flush */ 11920 } 11921 11922 if (prspiocbq) 11923 piocb->context2 = NULL; 11924 11925 piocb->context_un.wait_queue = NULL; 11926 piocb->iocb_cmpl = NULL; 11927 return retval; 11928 } 11929 11930 /** 11931 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 11932 * @phba: Pointer to HBA context object. 11933 * @pmboxq: Pointer to driver mailbox object. 11934 * @timeout: Timeout in number of seconds. 11935 * 11936 * This function issues the mailbox to firmware and waits for the 11937 * mailbox command to complete. If the mailbox command is not 11938 * completed within timeout seconds, it returns MBX_TIMEOUT. 11939 * The function waits for the mailbox completion using an 11940 * interruptible wait. If the thread is woken up due to a 11941 * signal, MBX_TIMEOUT error is returned to the caller. Caller 11942 * should not free the mailbox resources, if this function returns 11943 * MBX_TIMEOUT. 11944 * This function will sleep while waiting for mailbox completion. 11945 * So, this function should not be called from any context which 11946 * does not allow sleeping. Due to the same reason, this function 11947 * cannot be called with interrupt disabled. 11948 * This function assumes that the mailbox completion occurs while 11949 * this function sleep. So, this function cannot be called from 11950 * the worker thread which processes mailbox completion. 11951 * This function is called in the context of HBA management 11952 * applications. 11953 * This function returns MBX_SUCCESS when successful. 11954 * This function is called with no lock held. 11955 **/ 11956 int 11957 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 11958 uint32_t timeout) 11959 { 11960 struct completion mbox_done; 11961 int retval; 11962 unsigned long flag; 11963 11964 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 11965 /* setup wake call as IOCB callback */ 11966 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 11967 11968 /* setup context3 field to pass wait_queue pointer to wake function */ 11969 init_completion(&mbox_done); 11970 pmboxq->context3 = &mbox_done; 11971 /* now issue the command */ 11972 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 11973 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 11974 wait_for_completion_timeout(&mbox_done, 11975 msecs_to_jiffies(timeout * 1000)); 11976 11977 spin_lock_irqsave(&phba->hbalock, flag); 11978 pmboxq->context3 = NULL; 11979 /* 11980 * if LPFC_MBX_WAKE flag is set the mailbox is completed 11981 * else do not free the resources. 11982 */ 11983 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 11984 retval = MBX_SUCCESS; 11985 } else { 11986 retval = MBX_TIMEOUT; 11987 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 11988 } 11989 spin_unlock_irqrestore(&phba->hbalock, flag); 11990 } 11991 return retval; 11992 } 11993 11994 /** 11995 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 11996 * @phba: Pointer to HBA context. 11997 * 11998 * This function is called to shutdown the driver's mailbox sub-system. 11999 * It first marks the mailbox sub-system is in a block state to prevent 12000 * the asynchronous mailbox command from issued off the pending mailbox 12001 * command queue. If the mailbox command sub-system shutdown is due to 12002 * HBA error conditions such as EEH or ERATT, this routine shall invoke 12003 * the mailbox sub-system flush routine to forcefully bring down the 12004 * mailbox sub-system. Otherwise, if it is due to normal condition (such 12005 * as with offline or HBA function reset), this routine will wait for the 12006 * outstanding mailbox command to complete before invoking the mailbox 12007 * sub-system flush routine to gracefully bring down mailbox sub-system. 12008 **/ 12009 void 12010 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 12011 { 12012 struct lpfc_sli *psli = &phba->sli; 12013 unsigned long timeout; 12014 12015 if (mbx_action == LPFC_MBX_NO_WAIT) { 12016 /* delay 100ms for port state */ 12017 msleep(100); 12018 lpfc_sli_mbox_sys_flush(phba); 12019 return; 12020 } 12021 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 12022 12023 /* Disable softirqs, including timers from obtaining phba->hbalock */ 12024 local_bh_disable(); 12025 12026 spin_lock_irq(&phba->hbalock); 12027 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 12028 12029 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 12030 /* Determine how long we might wait for the active mailbox 12031 * command to be gracefully completed by firmware. 12032 */ 12033 if (phba->sli.mbox_active) 12034 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 12035 phba->sli.mbox_active) * 12036 1000) + jiffies; 12037 spin_unlock_irq(&phba->hbalock); 12038 12039 /* Enable softirqs again, done with phba->hbalock */ 12040 local_bh_enable(); 12041 12042 while (phba->sli.mbox_active) { 12043 /* Check active mailbox complete status every 2ms */ 12044 msleep(2); 12045 if (time_after(jiffies, timeout)) 12046 /* Timeout, let the mailbox flush routine to 12047 * forcefully release active mailbox command 12048 */ 12049 break; 12050 } 12051 } else { 12052 spin_unlock_irq(&phba->hbalock); 12053 12054 /* Enable softirqs again, done with phba->hbalock */ 12055 local_bh_enable(); 12056 } 12057 12058 lpfc_sli_mbox_sys_flush(phba); 12059 } 12060 12061 /** 12062 * lpfc_sli_eratt_read - read sli-3 error attention events 12063 * @phba: Pointer to HBA context. 12064 * 12065 * This function is called to read the SLI3 device error attention registers 12066 * for possible error attention events. The caller must hold the hostlock 12067 * with spin_lock_irq(). 12068 * 12069 * This function returns 1 when there is Error Attention in the Host Attention 12070 * Register and returns 0 otherwise. 12071 **/ 12072 static int 12073 lpfc_sli_eratt_read(struct lpfc_hba *phba) 12074 { 12075 uint32_t ha_copy; 12076 12077 /* Read chip Host Attention (HA) register */ 12078 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12079 goto unplug_err; 12080 12081 if (ha_copy & HA_ERATT) { 12082 /* Read host status register to retrieve error event */ 12083 if (lpfc_sli_read_hs(phba)) 12084 goto unplug_err; 12085 12086 /* Check if there is a deferred error condition is active */ 12087 if ((HS_FFER1 & phba->work_hs) && 12088 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12089 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 12090 phba->hba_flag |= DEFER_ERATT; 12091 /* Clear all interrupt enable conditions */ 12092 writel(0, phba->HCregaddr); 12093 readl(phba->HCregaddr); 12094 } 12095 12096 /* Set the driver HA work bitmap */ 12097 phba->work_ha |= HA_ERATT; 12098 /* Indicate polling handles this ERATT */ 12099 phba->hba_flag |= HBA_ERATT_HANDLED; 12100 return 1; 12101 } 12102 return 0; 12103 12104 unplug_err: 12105 /* Set the driver HS work bitmap */ 12106 phba->work_hs |= UNPLUG_ERR; 12107 /* Set the driver HA work bitmap */ 12108 phba->work_ha |= HA_ERATT; 12109 /* Indicate polling handles this ERATT */ 12110 phba->hba_flag |= HBA_ERATT_HANDLED; 12111 return 1; 12112 } 12113 12114 /** 12115 * lpfc_sli4_eratt_read - read sli-4 error attention events 12116 * @phba: Pointer to HBA context. 12117 * 12118 * This function is called to read the SLI4 device error attention registers 12119 * for possible error attention events. The caller must hold the hostlock 12120 * with spin_lock_irq(). 12121 * 12122 * This function returns 1 when there is Error Attention in the Host Attention 12123 * Register and returns 0 otherwise. 12124 **/ 12125 static int 12126 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 12127 { 12128 uint32_t uerr_sta_hi, uerr_sta_lo; 12129 uint32_t if_type, portsmphr; 12130 struct lpfc_register portstat_reg; 12131 12132 /* 12133 * For now, use the SLI4 device internal unrecoverable error 12134 * registers for error attention. This can be changed later. 12135 */ 12136 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 12137 switch (if_type) { 12138 case LPFC_SLI_INTF_IF_TYPE_0: 12139 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 12140 &uerr_sta_lo) || 12141 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 12142 &uerr_sta_hi)) { 12143 phba->work_hs |= UNPLUG_ERR; 12144 phba->work_ha |= HA_ERATT; 12145 phba->hba_flag |= HBA_ERATT_HANDLED; 12146 return 1; 12147 } 12148 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 12149 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 12150 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12151 "1423 HBA Unrecoverable error: " 12152 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 12153 "ue_mask_lo_reg=0x%x, " 12154 "ue_mask_hi_reg=0x%x\n", 12155 uerr_sta_lo, uerr_sta_hi, 12156 phba->sli4_hba.ue_mask_lo, 12157 phba->sli4_hba.ue_mask_hi); 12158 phba->work_status[0] = uerr_sta_lo; 12159 phba->work_status[1] = uerr_sta_hi; 12160 phba->work_ha |= HA_ERATT; 12161 phba->hba_flag |= HBA_ERATT_HANDLED; 12162 return 1; 12163 } 12164 break; 12165 case LPFC_SLI_INTF_IF_TYPE_2: 12166 case LPFC_SLI_INTF_IF_TYPE_6: 12167 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 12168 &portstat_reg.word0) || 12169 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 12170 &portsmphr)){ 12171 phba->work_hs |= UNPLUG_ERR; 12172 phba->work_ha |= HA_ERATT; 12173 phba->hba_flag |= HBA_ERATT_HANDLED; 12174 return 1; 12175 } 12176 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 12177 phba->work_status[0] = 12178 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 12179 phba->work_status[1] = 12180 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 12181 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12182 "2885 Port Status Event: " 12183 "port status reg 0x%x, " 12184 "port smphr reg 0x%x, " 12185 "error 1=0x%x, error 2=0x%x\n", 12186 portstat_reg.word0, 12187 portsmphr, 12188 phba->work_status[0], 12189 phba->work_status[1]); 12190 phba->work_ha |= HA_ERATT; 12191 phba->hba_flag |= HBA_ERATT_HANDLED; 12192 return 1; 12193 } 12194 break; 12195 case LPFC_SLI_INTF_IF_TYPE_1: 12196 default: 12197 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12198 "2886 HBA Error Attention on unsupported " 12199 "if type %d.", if_type); 12200 return 1; 12201 } 12202 12203 return 0; 12204 } 12205 12206 /** 12207 * lpfc_sli_check_eratt - check error attention events 12208 * @phba: Pointer to HBA context. 12209 * 12210 * This function is called from timer soft interrupt context to check HBA's 12211 * error attention register bit for error attention events. 12212 * 12213 * This function returns 1 when there is Error Attention in the Host Attention 12214 * Register and returns 0 otherwise. 12215 **/ 12216 int 12217 lpfc_sli_check_eratt(struct lpfc_hba *phba) 12218 { 12219 uint32_t ha_copy; 12220 12221 /* If somebody is waiting to handle an eratt, don't process it 12222 * here. The brdkill function will do this. 12223 */ 12224 if (phba->link_flag & LS_IGNORE_ERATT) 12225 return 0; 12226 12227 /* Check if interrupt handler handles this ERATT */ 12228 spin_lock_irq(&phba->hbalock); 12229 if (phba->hba_flag & HBA_ERATT_HANDLED) { 12230 /* Interrupt handler has handled ERATT */ 12231 spin_unlock_irq(&phba->hbalock); 12232 return 0; 12233 } 12234 12235 /* 12236 * If there is deferred error attention, do not check for error 12237 * attention 12238 */ 12239 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12240 spin_unlock_irq(&phba->hbalock); 12241 return 0; 12242 } 12243 12244 /* If PCI channel is offline, don't process it */ 12245 if (unlikely(pci_channel_offline(phba->pcidev))) { 12246 spin_unlock_irq(&phba->hbalock); 12247 return 0; 12248 } 12249 12250 switch (phba->sli_rev) { 12251 case LPFC_SLI_REV2: 12252 case LPFC_SLI_REV3: 12253 /* Read chip Host Attention (HA) register */ 12254 ha_copy = lpfc_sli_eratt_read(phba); 12255 break; 12256 case LPFC_SLI_REV4: 12257 /* Read device Uncoverable Error (UERR) registers */ 12258 ha_copy = lpfc_sli4_eratt_read(phba); 12259 break; 12260 default: 12261 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12262 "0299 Invalid SLI revision (%d)\n", 12263 phba->sli_rev); 12264 ha_copy = 0; 12265 break; 12266 } 12267 spin_unlock_irq(&phba->hbalock); 12268 12269 return ha_copy; 12270 } 12271 12272 /** 12273 * lpfc_intr_state_check - Check device state for interrupt handling 12274 * @phba: Pointer to HBA context. 12275 * 12276 * This inline routine checks whether a device or its PCI slot is in a state 12277 * that the interrupt should be handled. 12278 * 12279 * This function returns 0 if the device or the PCI slot is in a state that 12280 * interrupt should be handled, otherwise -EIO. 12281 */ 12282 static inline int 12283 lpfc_intr_state_check(struct lpfc_hba *phba) 12284 { 12285 /* If the pci channel is offline, ignore all the interrupts */ 12286 if (unlikely(pci_channel_offline(phba->pcidev))) 12287 return -EIO; 12288 12289 /* Update device level interrupt statistics */ 12290 phba->sli.slistat.sli_intr++; 12291 12292 /* Ignore all interrupts during initialization. */ 12293 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 12294 return -EIO; 12295 12296 return 0; 12297 } 12298 12299 /** 12300 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 12301 * @irq: Interrupt number. 12302 * @dev_id: The device context pointer. 12303 * 12304 * This function is directly called from the PCI layer as an interrupt 12305 * service routine when device with SLI-3 interface spec is enabled with 12306 * MSI-X multi-message interrupt mode and there are slow-path events in 12307 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 12308 * interrupt mode, this function is called as part of the device-level 12309 * interrupt handler. When the PCI slot is in error recovery or the HBA 12310 * is undergoing initialization, the interrupt handler will not process 12311 * the interrupt. The link attention and ELS ring attention events are 12312 * handled by the worker thread. The interrupt handler signals the worker 12313 * thread and returns for these events. This function is called without 12314 * any lock held. It gets the hbalock to access and update SLI data 12315 * structures. 12316 * 12317 * This function returns IRQ_HANDLED when interrupt is handled else it 12318 * returns IRQ_NONE. 12319 **/ 12320 irqreturn_t 12321 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 12322 { 12323 struct lpfc_hba *phba; 12324 uint32_t ha_copy, hc_copy; 12325 uint32_t work_ha_copy; 12326 unsigned long status; 12327 unsigned long iflag; 12328 uint32_t control; 12329 12330 MAILBOX_t *mbox, *pmbox; 12331 struct lpfc_vport *vport; 12332 struct lpfc_nodelist *ndlp; 12333 struct lpfc_dmabuf *mp; 12334 LPFC_MBOXQ_t *pmb; 12335 int rc; 12336 12337 /* 12338 * Get the driver's phba structure from the dev_id and 12339 * assume the HBA is not interrupting. 12340 */ 12341 phba = (struct lpfc_hba *)dev_id; 12342 12343 if (unlikely(!phba)) 12344 return IRQ_NONE; 12345 12346 /* 12347 * Stuff needs to be attented to when this function is invoked as an 12348 * individual interrupt handler in MSI-X multi-message interrupt mode 12349 */ 12350 if (phba->intr_type == MSIX) { 12351 /* Check device state for handling interrupt */ 12352 if (lpfc_intr_state_check(phba)) 12353 return IRQ_NONE; 12354 /* Need to read HA REG for slow-path events */ 12355 spin_lock_irqsave(&phba->hbalock, iflag); 12356 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12357 goto unplug_error; 12358 /* If somebody is waiting to handle an eratt don't process it 12359 * here. The brdkill function will do this. 12360 */ 12361 if (phba->link_flag & LS_IGNORE_ERATT) 12362 ha_copy &= ~HA_ERATT; 12363 /* Check the need for handling ERATT in interrupt handler */ 12364 if (ha_copy & HA_ERATT) { 12365 if (phba->hba_flag & HBA_ERATT_HANDLED) 12366 /* ERATT polling has handled ERATT */ 12367 ha_copy &= ~HA_ERATT; 12368 else 12369 /* Indicate interrupt handler handles ERATT */ 12370 phba->hba_flag |= HBA_ERATT_HANDLED; 12371 } 12372 12373 /* 12374 * If there is deferred error attention, do not check for any 12375 * interrupt. 12376 */ 12377 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12378 spin_unlock_irqrestore(&phba->hbalock, iflag); 12379 return IRQ_NONE; 12380 } 12381 12382 /* Clear up only attention source related to slow-path */ 12383 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 12384 goto unplug_error; 12385 12386 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 12387 HC_LAINT_ENA | HC_ERINT_ENA), 12388 phba->HCregaddr); 12389 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 12390 phba->HAregaddr); 12391 writel(hc_copy, phba->HCregaddr); 12392 readl(phba->HAregaddr); /* flush */ 12393 spin_unlock_irqrestore(&phba->hbalock, iflag); 12394 } else 12395 ha_copy = phba->ha_copy; 12396 12397 work_ha_copy = ha_copy & phba->work_ha_mask; 12398 12399 if (work_ha_copy) { 12400 if (work_ha_copy & HA_LATT) { 12401 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 12402 /* 12403 * Turn off Link Attention interrupts 12404 * until CLEAR_LA done 12405 */ 12406 spin_lock_irqsave(&phba->hbalock, iflag); 12407 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 12408 if (lpfc_readl(phba->HCregaddr, &control)) 12409 goto unplug_error; 12410 control &= ~HC_LAINT_ENA; 12411 writel(control, phba->HCregaddr); 12412 readl(phba->HCregaddr); /* flush */ 12413 spin_unlock_irqrestore(&phba->hbalock, iflag); 12414 } 12415 else 12416 work_ha_copy &= ~HA_LATT; 12417 } 12418 12419 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 12420 /* 12421 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 12422 * the only slow ring. 12423 */ 12424 status = (work_ha_copy & 12425 (HA_RXMASK << (4*LPFC_ELS_RING))); 12426 status >>= (4*LPFC_ELS_RING); 12427 if (status & HA_RXMASK) { 12428 spin_lock_irqsave(&phba->hbalock, iflag); 12429 if (lpfc_readl(phba->HCregaddr, &control)) 12430 goto unplug_error; 12431 12432 lpfc_debugfs_slow_ring_trc(phba, 12433 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 12434 control, status, 12435 (uint32_t)phba->sli.slistat.sli_intr); 12436 12437 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 12438 lpfc_debugfs_slow_ring_trc(phba, 12439 "ISR Disable ring:" 12440 "pwork:x%x hawork:x%x wait:x%x", 12441 phba->work_ha, work_ha_copy, 12442 (uint32_t)((unsigned long) 12443 &phba->work_waitq)); 12444 12445 control &= 12446 ~(HC_R0INT_ENA << LPFC_ELS_RING); 12447 writel(control, phba->HCregaddr); 12448 readl(phba->HCregaddr); /* flush */ 12449 } 12450 else { 12451 lpfc_debugfs_slow_ring_trc(phba, 12452 "ISR slow ring: pwork:" 12453 "x%x hawork:x%x wait:x%x", 12454 phba->work_ha, work_ha_copy, 12455 (uint32_t)((unsigned long) 12456 &phba->work_waitq)); 12457 } 12458 spin_unlock_irqrestore(&phba->hbalock, iflag); 12459 } 12460 } 12461 spin_lock_irqsave(&phba->hbalock, iflag); 12462 if (work_ha_copy & HA_ERATT) { 12463 if (lpfc_sli_read_hs(phba)) 12464 goto unplug_error; 12465 /* 12466 * Check if there is a deferred error condition 12467 * is active 12468 */ 12469 if ((HS_FFER1 & phba->work_hs) && 12470 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12471 HS_FFER6 | HS_FFER7 | HS_FFER8) & 12472 phba->work_hs)) { 12473 phba->hba_flag |= DEFER_ERATT; 12474 /* Clear all interrupt enable conditions */ 12475 writel(0, phba->HCregaddr); 12476 readl(phba->HCregaddr); 12477 } 12478 } 12479 12480 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 12481 pmb = phba->sli.mbox_active; 12482 pmbox = &pmb->u.mb; 12483 mbox = phba->mbox; 12484 vport = pmb->vport; 12485 12486 /* First check out the status word */ 12487 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 12488 if (pmbox->mbxOwner != OWN_HOST) { 12489 spin_unlock_irqrestore(&phba->hbalock, iflag); 12490 /* 12491 * Stray Mailbox Interrupt, mbxCommand <cmd> 12492 * mbxStatus <status> 12493 */ 12494 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12495 LOG_SLI, 12496 "(%d):0304 Stray Mailbox " 12497 "Interrupt mbxCommand x%x " 12498 "mbxStatus x%x\n", 12499 (vport ? vport->vpi : 0), 12500 pmbox->mbxCommand, 12501 pmbox->mbxStatus); 12502 /* clear mailbox attention bit */ 12503 work_ha_copy &= ~HA_MBATT; 12504 } else { 12505 phba->sli.mbox_active = NULL; 12506 spin_unlock_irqrestore(&phba->hbalock, iflag); 12507 phba->last_completion_time = jiffies; 12508 del_timer(&phba->sli.mbox_tmo); 12509 if (pmb->mbox_cmpl) { 12510 lpfc_sli_pcimem_bcopy(mbox, pmbox, 12511 MAILBOX_CMD_SIZE); 12512 if (pmb->out_ext_byte_len && 12513 pmb->ctx_buf) 12514 lpfc_sli_pcimem_bcopy( 12515 phba->mbox_ext, 12516 pmb->ctx_buf, 12517 pmb->out_ext_byte_len); 12518 } 12519 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 12520 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 12521 12522 lpfc_debugfs_disc_trc(vport, 12523 LPFC_DISC_TRC_MBOX_VPORT, 12524 "MBOX dflt rpi: : " 12525 "status:x%x rpi:x%x", 12526 (uint32_t)pmbox->mbxStatus, 12527 pmbox->un.varWords[0], 0); 12528 12529 if (!pmbox->mbxStatus) { 12530 mp = (struct lpfc_dmabuf *) 12531 (pmb->ctx_buf); 12532 ndlp = (struct lpfc_nodelist *) 12533 pmb->ctx_ndlp; 12534 12535 /* Reg_LOGIN of dflt RPI was 12536 * successful. new lets get 12537 * rid of the RPI using the 12538 * same mbox buffer. 12539 */ 12540 lpfc_unreg_login(phba, 12541 vport->vpi, 12542 pmbox->un.varWords[0], 12543 pmb); 12544 pmb->mbox_cmpl = 12545 lpfc_mbx_cmpl_dflt_rpi; 12546 pmb->ctx_buf = mp; 12547 pmb->ctx_ndlp = ndlp; 12548 pmb->vport = vport; 12549 rc = lpfc_sli_issue_mbox(phba, 12550 pmb, 12551 MBX_NOWAIT); 12552 if (rc != MBX_BUSY) 12553 lpfc_printf_log(phba, 12554 KERN_ERR, 12555 LOG_MBOX | LOG_SLI, 12556 "0350 rc should have" 12557 "been MBX_BUSY\n"); 12558 if (rc != MBX_NOT_FINISHED) 12559 goto send_current_mbox; 12560 } 12561 } 12562 spin_lock_irqsave( 12563 &phba->pport->work_port_lock, 12564 iflag); 12565 phba->pport->work_port_events &= 12566 ~WORKER_MBOX_TMO; 12567 spin_unlock_irqrestore( 12568 &phba->pport->work_port_lock, 12569 iflag); 12570 lpfc_mbox_cmpl_put(phba, pmb); 12571 } 12572 } else 12573 spin_unlock_irqrestore(&phba->hbalock, iflag); 12574 12575 if ((work_ha_copy & HA_MBATT) && 12576 (phba->sli.mbox_active == NULL)) { 12577 send_current_mbox: 12578 /* Process next mailbox command if there is one */ 12579 do { 12580 rc = lpfc_sli_issue_mbox(phba, NULL, 12581 MBX_NOWAIT); 12582 } while (rc == MBX_NOT_FINISHED); 12583 if (rc != MBX_SUCCESS) 12584 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12585 LOG_SLI, "0349 rc should be " 12586 "MBX_SUCCESS\n"); 12587 } 12588 12589 spin_lock_irqsave(&phba->hbalock, iflag); 12590 phba->work_ha |= work_ha_copy; 12591 spin_unlock_irqrestore(&phba->hbalock, iflag); 12592 lpfc_worker_wake_up(phba); 12593 } 12594 return IRQ_HANDLED; 12595 unplug_error: 12596 spin_unlock_irqrestore(&phba->hbalock, iflag); 12597 return IRQ_HANDLED; 12598 12599 } /* lpfc_sli_sp_intr_handler */ 12600 12601 /** 12602 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 12603 * @irq: Interrupt number. 12604 * @dev_id: The device context pointer. 12605 * 12606 * This function is directly called from the PCI layer as an interrupt 12607 * service routine when device with SLI-3 interface spec is enabled with 12608 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 12609 * ring event in the HBA. However, when the device is enabled with either 12610 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12611 * device-level interrupt handler. When the PCI slot is in error recovery 12612 * or the HBA is undergoing initialization, the interrupt handler will not 12613 * process the interrupt. The SCSI FCP fast-path ring event are handled in 12614 * the intrrupt context. This function is called without any lock held. 12615 * It gets the hbalock to access and update SLI data structures. 12616 * 12617 * This function returns IRQ_HANDLED when interrupt is handled else it 12618 * returns IRQ_NONE. 12619 **/ 12620 irqreturn_t 12621 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 12622 { 12623 struct lpfc_hba *phba; 12624 uint32_t ha_copy; 12625 unsigned long status; 12626 unsigned long iflag; 12627 struct lpfc_sli_ring *pring; 12628 12629 /* Get the driver's phba structure from the dev_id and 12630 * assume the HBA is not interrupting. 12631 */ 12632 phba = (struct lpfc_hba *) dev_id; 12633 12634 if (unlikely(!phba)) 12635 return IRQ_NONE; 12636 12637 /* 12638 * Stuff needs to be attented to when this function is invoked as an 12639 * individual interrupt handler in MSI-X multi-message interrupt mode 12640 */ 12641 if (phba->intr_type == MSIX) { 12642 /* Check device state for handling interrupt */ 12643 if (lpfc_intr_state_check(phba)) 12644 return IRQ_NONE; 12645 /* Need to read HA REG for FCP ring and other ring events */ 12646 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12647 return IRQ_HANDLED; 12648 /* Clear up only attention source related to fast-path */ 12649 spin_lock_irqsave(&phba->hbalock, iflag); 12650 /* 12651 * If there is deferred error attention, do not check for 12652 * any interrupt. 12653 */ 12654 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12655 spin_unlock_irqrestore(&phba->hbalock, iflag); 12656 return IRQ_NONE; 12657 } 12658 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 12659 phba->HAregaddr); 12660 readl(phba->HAregaddr); /* flush */ 12661 spin_unlock_irqrestore(&phba->hbalock, iflag); 12662 } else 12663 ha_copy = phba->ha_copy; 12664 12665 /* 12666 * Process all events on FCP ring. Take the optimized path for FCP IO. 12667 */ 12668 ha_copy &= ~(phba->work_ha_mask); 12669 12670 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12671 status >>= (4*LPFC_FCP_RING); 12672 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 12673 if (status & HA_RXMASK) 12674 lpfc_sli_handle_fast_ring_event(phba, pring, status); 12675 12676 if (phba->cfg_multi_ring_support == 2) { 12677 /* 12678 * Process all events on extra ring. Take the optimized path 12679 * for extra ring IO. 12680 */ 12681 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12682 status >>= (4*LPFC_EXTRA_RING); 12683 if (status & HA_RXMASK) { 12684 lpfc_sli_handle_fast_ring_event(phba, 12685 &phba->sli.sli3_ring[LPFC_EXTRA_RING], 12686 status); 12687 } 12688 } 12689 return IRQ_HANDLED; 12690 } /* lpfc_sli_fp_intr_handler */ 12691 12692 /** 12693 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 12694 * @irq: Interrupt number. 12695 * @dev_id: The device context pointer. 12696 * 12697 * This function is the HBA device-level interrupt handler to device with 12698 * SLI-3 interface spec, called from the PCI layer when either MSI or 12699 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 12700 * requires driver attention. This function invokes the slow-path interrupt 12701 * attention handling function and fast-path interrupt attention handling 12702 * function in turn to process the relevant HBA attention events. This 12703 * function is called without any lock held. It gets the hbalock to access 12704 * and update SLI data structures. 12705 * 12706 * This function returns IRQ_HANDLED when interrupt is handled, else it 12707 * returns IRQ_NONE. 12708 **/ 12709 irqreturn_t 12710 lpfc_sli_intr_handler(int irq, void *dev_id) 12711 { 12712 struct lpfc_hba *phba; 12713 irqreturn_t sp_irq_rc, fp_irq_rc; 12714 unsigned long status1, status2; 12715 uint32_t hc_copy; 12716 12717 /* 12718 * Get the driver's phba structure from the dev_id and 12719 * assume the HBA is not interrupting. 12720 */ 12721 phba = (struct lpfc_hba *) dev_id; 12722 12723 if (unlikely(!phba)) 12724 return IRQ_NONE; 12725 12726 /* Check device state for handling interrupt */ 12727 if (lpfc_intr_state_check(phba)) 12728 return IRQ_NONE; 12729 12730 spin_lock(&phba->hbalock); 12731 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 12732 spin_unlock(&phba->hbalock); 12733 return IRQ_HANDLED; 12734 } 12735 12736 if (unlikely(!phba->ha_copy)) { 12737 spin_unlock(&phba->hbalock); 12738 return IRQ_NONE; 12739 } else if (phba->ha_copy & HA_ERATT) { 12740 if (phba->hba_flag & HBA_ERATT_HANDLED) 12741 /* ERATT polling has handled ERATT */ 12742 phba->ha_copy &= ~HA_ERATT; 12743 else 12744 /* Indicate interrupt handler handles ERATT */ 12745 phba->hba_flag |= HBA_ERATT_HANDLED; 12746 } 12747 12748 /* 12749 * If there is deferred error attention, do not check for any interrupt. 12750 */ 12751 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12752 spin_unlock(&phba->hbalock); 12753 return IRQ_NONE; 12754 } 12755 12756 /* Clear attention sources except link and error attentions */ 12757 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 12758 spin_unlock(&phba->hbalock); 12759 return IRQ_HANDLED; 12760 } 12761 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 12762 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 12763 phba->HCregaddr); 12764 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 12765 writel(hc_copy, phba->HCregaddr); 12766 readl(phba->HAregaddr); /* flush */ 12767 spin_unlock(&phba->hbalock); 12768 12769 /* 12770 * Invokes slow-path host attention interrupt handling as appropriate. 12771 */ 12772 12773 /* status of events with mailbox and link attention */ 12774 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 12775 12776 /* status of events with ELS ring */ 12777 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 12778 status2 >>= (4*LPFC_ELS_RING); 12779 12780 if (status1 || (status2 & HA_RXMASK)) 12781 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 12782 else 12783 sp_irq_rc = IRQ_NONE; 12784 12785 /* 12786 * Invoke fast-path host attention interrupt handling as appropriate. 12787 */ 12788 12789 /* status of events with FCP ring */ 12790 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12791 status1 >>= (4*LPFC_FCP_RING); 12792 12793 /* status of events with extra ring */ 12794 if (phba->cfg_multi_ring_support == 2) { 12795 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12796 status2 >>= (4*LPFC_EXTRA_RING); 12797 } else 12798 status2 = 0; 12799 12800 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 12801 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 12802 else 12803 fp_irq_rc = IRQ_NONE; 12804 12805 /* Return device-level interrupt handling status */ 12806 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 12807 } /* lpfc_sli_intr_handler */ 12808 12809 /** 12810 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 12811 * @phba: pointer to lpfc hba data structure. 12812 * 12813 * This routine is invoked by the worker thread to process all the pending 12814 * SLI4 els abort xri events. 12815 **/ 12816 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 12817 { 12818 struct lpfc_cq_event *cq_event; 12819 12820 /* First, declare the els xri abort event has been handled */ 12821 spin_lock_irq(&phba->hbalock); 12822 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 12823 spin_unlock_irq(&phba->hbalock); 12824 /* Now, handle all the els xri abort events */ 12825 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 12826 /* Get the first event from the head of the event queue */ 12827 spin_lock_irq(&phba->hbalock); 12828 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 12829 cq_event, struct lpfc_cq_event, list); 12830 spin_unlock_irq(&phba->hbalock); 12831 /* Notify aborted XRI for ELS work queue */ 12832 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12833 /* Free the event processed back to the free pool */ 12834 lpfc_sli4_cq_event_release(phba, cq_event); 12835 } 12836 } 12837 12838 /** 12839 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 12840 * @phba: pointer to lpfc hba data structure 12841 * @pIocbIn: pointer to the rspiocbq 12842 * @pIocbOut: pointer to the cmdiocbq 12843 * @wcqe: pointer to the complete wcqe 12844 * 12845 * This routine transfers the fields of a command iocbq to a response iocbq 12846 * by copying all the IOCB fields from command iocbq and transferring the 12847 * completion status information from the complete wcqe. 12848 **/ 12849 static void 12850 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 12851 struct lpfc_iocbq *pIocbIn, 12852 struct lpfc_iocbq *pIocbOut, 12853 struct lpfc_wcqe_complete *wcqe) 12854 { 12855 int numBdes, i; 12856 unsigned long iflags; 12857 uint32_t status, max_response; 12858 struct lpfc_dmabuf *dmabuf; 12859 struct ulp_bde64 *bpl, bde; 12860 size_t offset = offsetof(struct lpfc_iocbq, iocb); 12861 12862 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 12863 sizeof(struct lpfc_iocbq) - offset); 12864 /* Map WCQE parameters into irspiocb parameters */ 12865 status = bf_get(lpfc_wcqe_c_status, wcqe); 12866 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 12867 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 12868 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 12869 pIocbIn->iocb.un.fcpi.fcpi_parm = 12870 pIocbOut->iocb.un.fcpi.fcpi_parm - 12871 wcqe->total_data_placed; 12872 else 12873 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12874 else { 12875 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12876 switch (pIocbOut->iocb.ulpCommand) { 12877 case CMD_ELS_REQUEST64_CR: 12878 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12879 bpl = (struct ulp_bde64 *)dmabuf->virt; 12880 bde.tus.w = le32_to_cpu(bpl[1].tus.w); 12881 max_response = bde.tus.f.bdeSize; 12882 break; 12883 case CMD_GEN_REQUEST64_CR: 12884 max_response = 0; 12885 if (!pIocbOut->context3) 12886 break; 12887 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 12888 sizeof(struct ulp_bde64); 12889 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12890 bpl = (struct ulp_bde64 *)dmabuf->virt; 12891 for (i = 0; i < numBdes; i++) { 12892 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 12893 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 12894 max_response += bde.tus.f.bdeSize; 12895 } 12896 break; 12897 default: 12898 max_response = wcqe->total_data_placed; 12899 break; 12900 } 12901 if (max_response < wcqe->total_data_placed) 12902 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 12903 else 12904 pIocbIn->iocb.un.genreq64.bdl.bdeSize = 12905 wcqe->total_data_placed; 12906 } 12907 12908 /* Convert BG errors for completion status */ 12909 if (status == CQE_STATUS_DI_ERROR) { 12910 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 12911 12912 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 12913 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 12914 else 12915 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 12916 12917 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 12918 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 12919 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12920 BGS_GUARD_ERR_MASK; 12921 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 12922 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12923 BGS_APPTAG_ERR_MASK; 12924 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 12925 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12926 BGS_REFTAG_ERR_MASK; 12927 12928 /* Check to see if there was any good data before the error */ 12929 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 12930 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12931 BGS_HI_WATER_MARK_PRESENT_MASK; 12932 pIocbIn->iocb.unsli3.sli3_bg.bghm = 12933 wcqe->total_data_placed; 12934 } 12935 12936 /* 12937 * Set ALL the error bits to indicate we don't know what 12938 * type of error it is. 12939 */ 12940 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 12941 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12942 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 12943 BGS_GUARD_ERR_MASK); 12944 } 12945 12946 /* Pick up HBA exchange busy condition */ 12947 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 12948 spin_lock_irqsave(&phba->hbalock, iflags); 12949 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 12950 spin_unlock_irqrestore(&phba->hbalock, iflags); 12951 } 12952 } 12953 12954 /** 12955 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 12956 * @phba: Pointer to HBA context object. 12957 * @wcqe: Pointer to work-queue completion queue entry. 12958 * 12959 * This routine handles an ELS work-queue completion event and construct 12960 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 12961 * discovery engine to handle. 12962 * 12963 * Return: Pointer to the receive IOCBQ, NULL otherwise. 12964 **/ 12965 static struct lpfc_iocbq * 12966 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 12967 struct lpfc_iocbq *irspiocbq) 12968 { 12969 struct lpfc_sli_ring *pring; 12970 struct lpfc_iocbq *cmdiocbq; 12971 struct lpfc_wcqe_complete *wcqe; 12972 unsigned long iflags; 12973 12974 pring = lpfc_phba_elsring(phba); 12975 if (unlikely(!pring)) 12976 return NULL; 12977 12978 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 12979 spin_lock_irqsave(&pring->ring_lock, iflags); 12980 pring->stats.iocb_event++; 12981 /* Look up the ELS command IOCB and create pseudo response IOCB */ 12982 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 12983 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 12984 if (unlikely(!cmdiocbq)) { 12985 spin_unlock_irqrestore(&pring->ring_lock, iflags); 12986 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12987 "0386 ELS complete with no corresponding " 12988 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", 12989 wcqe->word0, wcqe->total_data_placed, 12990 wcqe->parameter, wcqe->word3); 12991 lpfc_sli_release_iocbq(phba, irspiocbq); 12992 return NULL; 12993 } 12994 12995 /* Put the iocb back on the txcmplq */ 12996 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); 12997 spin_unlock_irqrestore(&pring->ring_lock, iflags); 12998 12999 /* Fake the irspiocbq and copy necessary response information */ 13000 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 13001 13002 return irspiocbq; 13003 } 13004 13005 inline struct lpfc_cq_event * 13006 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) 13007 { 13008 struct lpfc_cq_event *cq_event; 13009 13010 /* Allocate a new internal CQ_EVENT entry */ 13011 cq_event = lpfc_sli4_cq_event_alloc(phba); 13012 if (!cq_event) { 13013 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13014 "0602 Failed to alloc CQ_EVENT entry\n"); 13015 return NULL; 13016 } 13017 13018 /* Move the CQE into the event */ 13019 memcpy(&cq_event->cqe, entry, size); 13020 return cq_event; 13021 } 13022 13023 /** 13024 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 13025 * @phba: Pointer to HBA context object. 13026 * @cqe: Pointer to mailbox completion queue entry. 13027 * 13028 * This routine process a mailbox completion queue entry with asynchrous 13029 * event. 13030 * 13031 * Return: true if work posted to worker thread, otherwise false. 13032 **/ 13033 static bool 13034 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13035 { 13036 struct lpfc_cq_event *cq_event; 13037 unsigned long iflags; 13038 13039 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13040 "0392 Async Event: word0:x%x, word1:x%x, " 13041 "word2:x%x, word3:x%x\n", mcqe->word0, 13042 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 13043 13044 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); 13045 if (!cq_event) 13046 return false; 13047 spin_lock_irqsave(&phba->hbalock, iflags); 13048 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 13049 /* Set the async event flag */ 13050 phba->hba_flag |= ASYNC_EVENT; 13051 spin_unlock_irqrestore(&phba->hbalock, iflags); 13052 13053 return true; 13054 } 13055 13056 /** 13057 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 13058 * @phba: Pointer to HBA context object. 13059 * @cqe: Pointer to mailbox completion queue entry. 13060 * 13061 * This routine process a mailbox completion queue entry with mailbox 13062 * completion event. 13063 * 13064 * Return: true if work posted to worker thread, otherwise false. 13065 **/ 13066 static bool 13067 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13068 { 13069 uint32_t mcqe_status; 13070 MAILBOX_t *mbox, *pmbox; 13071 struct lpfc_mqe *mqe; 13072 struct lpfc_vport *vport; 13073 struct lpfc_nodelist *ndlp; 13074 struct lpfc_dmabuf *mp; 13075 unsigned long iflags; 13076 LPFC_MBOXQ_t *pmb; 13077 bool workposted = false; 13078 int rc; 13079 13080 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 13081 if (!bf_get(lpfc_trailer_completed, mcqe)) 13082 goto out_no_mqe_complete; 13083 13084 /* Get the reference to the active mbox command */ 13085 spin_lock_irqsave(&phba->hbalock, iflags); 13086 pmb = phba->sli.mbox_active; 13087 if (unlikely(!pmb)) { 13088 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13089 "1832 No pending MBOX command to handle\n"); 13090 spin_unlock_irqrestore(&phba->hbalock, iflags); 13091 goto out_no_mqe_complete; 13092 } 13093 spin_unlock_irqrestore(&phba->hbalock, iflags); 13094 mqe = &pmb->u.mqe; 13095 pmbox = (MAILBOX_t *)&pmb->u.mqe; 13096 mbox = phba->mbox; 13097 vport = pmb->vport; 13098 13099 /* Reset heartbeat timer */ 13100 phba->last_completion_time = jiffies; 13101 del_timer(&phba->sli.mbox_tmo); 13102 13103 /* Move mbox data to caller's mailbox region, do endian swapping */ 13104 if (pmb->mbox_cmpl && mbox) 13105 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 13106 13107 /* 13108 * For mcqe errors, conditionally move a modified error code to 13109 * the mbox so that the error will not be missed. 13110 */ 13111 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 13112 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 13113 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 13114 bf_set(lpfc_mqe_status, mqe, 13115 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 13116 } 13117 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 13118 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 13119 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 13120 "MBOX dflt rpi: status:x%x rpi:x%x", 13121 mcqe_status, 13122 pmbox->un.varWords[0], 0); 13123 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 13124 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 13125 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 13126 /* Reg_LOGIN of dflt RPI was successful. Now lets get 13127 * RID of the PPI using the same mbox buffer. 13128 */ 13129 lpfc_unreg_login(phba, vport->vpi, 13130 pmbox->un.varWords[0], pmb); 13131 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 13132 pmb->ctx_buf = mp; 13133 pmb->ctx_ndlp = ndlp; 13134 pmb->vport = vport; 13135 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 13136 if (rc != MBX_BUSY) 13137 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 13138 LOG_SLI, "0385 rc should " 13139 "have been MBX_BUSY\n"); 13140 if (rc != MBX_NOT_FINISHED) 13141 goto send_current_mbox; 13142 } 13143 } 13144 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 13145 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 13146 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 13147 13148 /* There is mailbox completion work to do */ 13149 spin_lock_irqsave(&phba->hbalock, iflags); 13150 __lpfc_mbox_cmpl_put(phba, pmb); 13151 phba->work_ha |= HA_MBATT; 13152 spin_unlock_irqrestore(&phba->hbalock, iflags); 13153 workposted = true; 13154 13155 send_current_mbox: 13156 spin_lock_irqsave(&phba->hbalock, iflags); 13157 /* Release the mailbox command posting token */ 13158 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13159 /* Setting active mailbox pointer need to be in sync to flag clear */ 13160 phba->sli.mbox_active = NULL; 13161 spin_unlock_irqrestore(&phba->hbalock, iflags); 13162 /* Wake up worker thread to post the next pending mailbox command */ 13163 lpfc_worker_wake_up(phba); 13164 out_no_mqe_complete: 13165 if (bf_get(lpfc_trailer_consumed, mcqe)) 13166 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 13167 return workposted; 13168 } 13169 13170 /** 13171 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 13172 * @phba: Pointer to HBA context object. 13173 * @cqe: Pointer to mailbox completion queue entry. 13174 * 13175 * This routine process a mailbox completion queue entry, it invokes the 13176 * proper mailbox complete handling or asynchrous event handling routine 13177 * according to the MCQE's async bit. 13178 * 13179 * Return: true if work posted to worker thread, otherwise false. 13180 **/ 13181 static bool 13182 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13183 struct lpfc_cqe *cqe) 13184 { 13185 struct lpfc_mcqe mcqe; 13186 bool workposted; 13187 13188 cq->CQ_mbox++; 13189 13190 /* Copy the mailbox MCQE and convert endian order as needed */ 13191 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 13192 13193 /* Invoke the proper event handling routine */ 13194 if (!bf_get(lpfc_trailer_async, &mcqe)) 13195 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 13196 else 13197 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 13198 return workposted; 13199 } 13200 13201 /** 13202 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 13203 * @phba: Pointer to HBA context object. 13204 * @cq: Pointer to associated CQ 13205 * @wcqe: Pointer to work-queue completion queue entry. 13206 * 13207 * This routine handles an ELS work-queue completion event. 13208 * 13209 * Return: true if work posted to worker thread, otherwise false. 13210 **/ 13211 static bool 13212 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13213 struct lpfc_wcqe_complete *wcqe) 13214 { 13215 struct lpfc_iocbq *irspiocbq; 13216 unsigned long iflags; 13217 struct lpfc_sli_ring *pring = cq->pring; 13218 int txq_cnt = 0; 13219 int txcmplq_cnt = 0; 13220 int fcp_txcmplq_cnt = 0; 13221 13222 /* Check for response status */ 13223 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13224 /* Log the error status */ 13225 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13226 "0357 ELS CQE error: status=x%x: " 13227 "CQE: %08x %08x %08x %08x\n", 13228 bf_get(lpfc_wcqe_c_status, wcqe), 13229 wcqe->word0, wcqe->total_data_placed, 13230 wcqe->parameter, wcqe->word3); 13231 } 13232 13233 /* Get an irspiocbq for later ELS response processing use */ 13234 irspiocbq = lpfc_sli_get_iocbq(phba); 13235 if (!irspiocbq) { 13236 if (!list_empty(&pring->txq)) 13237 txq_cnt++; 13238 if (!list_empty(&pring->txcmplq)) 13239 txcmplq_cnt++; 13240 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13241 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 13242 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 13243 txq_cnt, phba->iocb_cnt, 13244 fcp_txcmplq_cnt, 13245 txcmplq_cnt); 13246 return false; 13247 } 13248 13249 /* Save off the slow-path queue event for work thread to process */ 13250 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 13251 spin_lock_irqsave(&phba->hbalock, iflags); 13252 list_add_tail(&irspiocbq->cq_event.list, 13253 &phba->sli4_hba.sp_queue_event); 13254 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13255 spin_unlock_irqrestore(&phba->hbalock, iflags); 13256 13257 return true; 13258 } 13259 13260 /** 13261 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 13262 * @phba: Pointer to HBA context object. 13263 * @wcqe: Pointer to work-queue completion queue entry. 13264 * 13265 * This routine handles slow-path WQ entry consumed event by invoking the 13266 * proper WQ release routine to the slow-path WQ. 13267 **/ 13268 static void 13269 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 13270 struct lpfc_wcqe_release *wcqe) 13271 { 13272 /* sanity check on queue memory */ 13273 if (unlikely(!phba->sli4_hba.els_wq)) 13274 return; 13275 /* Check for the slow-path ELS work queue */ 13276 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 13277 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 13278 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13279 else 13280 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13281 "2579 Slow-path wqe consume event carries " 13282 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 13283 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 13284 phba->sli4_hba.els_wq->queue_id); 13285 } 13286 13287 /** 13288 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 13289 * @phba: Pointer to HBA context object. 13290 * @cq: Pointer to a WQ completion queue. 13291 * @wcqe: Pointer to work-queue completion queue entry. 13292 * 13293 * This routine handles an XRI abort event. 13294 * 13295 * Return: true if work posted to worker thread, otherwise false. 13296 **/ 13297 static bool 13298 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 13299 struct lpfc_queue *cq, 13300 struct sli4_wcqe_xri_aborted *wcqe) 13301 { 13302 bool workposted = false; 13303 struct lpfc_cq_event *cq_event; 13304 unsigned long iflags; 13305 13306 switch (cq->subtype) { 13307 case LPFC_FCP: 13308 lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq); 13309 workposted = false; 13310 break; 13311 case LPFC_NVME_LS: /* NVME LS uses ELS resources */ 13312 case LPFC_ELS: 13313 cq_event = lpfc_cq_event_setup( 13314 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 13315 if (!cq_event) 13316 return false; 13317 cq_event->hdwq = cq->hdwq; 13318 spin_lock_irqsave(&phba->hbalock, iflags); 13319 list_add_tail(&cq_event->list, 13320 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 13321 /* Set the els xri abort event flag */ 13322 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 13323 spin_unlock_irqrestore(&phba->hbalock, iflags); 13324 workposted = true; 13325 break; 13326 case LPFC_NVME: 13327 /* Notify aborted XRI for NVME work queue */ 13328 if (phba->nvmet_support) 13329 lpfc_sli4_nvmet_xri_aborted(phba, wcqe); 13330 else 13331 lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq); 13332 13333 workposted = false; 13334 break; 13335 default: 13336 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13337 "0603 Invalid CQ subtype %d: " 13338 "%08x %08x %08x %08x\n", 13339 cq->subtype, wcqe->word0, wcqe->parameter, 13340 wcqe->word2, wcqe->word3); 13341 workposted = false; 13342 break; 13343 } 13344 return workposted; 13345 } 13346 13347 #define FC_RCTL_MDS_DIAGS 0xF4 13348 13349 /** 13350 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 13351 * @phba: Pointer to HBA context object. 13352 * @rcqe: Pointer to receive-queue completion queue entry. 13353 * 13354 * This routine process a receive-queue completion queue entry. 13355 * 13356 * Return: true if work posted to worker thread, otherwise false. 13357 **/ 13358 static bool 13359 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 13360 { 13361 bool workposted = false; 13362 struct fc_frame_header *fc_hdr; 13363 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 13364 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 13365 struct lpfc_nvmet_tgtport *tgtp; 13366 struct hbq_dmabuf *dma_buf; 13367 uint32_t status, rq_id; 13368 unsigned long iflags; 13369 13370 /* sanity check on queue memory */ 13371 if (unlikely(!hrq) || unlikely(!drq)) 13372 return workposted; 13373 13374 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13375 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13376 else 13377 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13378 if (rq_id != hrq->queue_id) 13379 goto out; 13380 13381 status = bf_get(lpfc_rcqe_status, rcqe); 13382 switch (status) { 13383 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13384 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13385 "2537 Receive Frame Truncated!!\n"); 13386 /* fall through */ 13387 case FC_STATUS_RQ_SUCCESS: 13388 spin_lock_irqsave(&phba->hbalock, iflags); 13389 lpfc_sli4_rq_release(hrq, drq); 13390 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 13391 if (!dma_buf) { 13392 hrq->RQ_no_buf_found++; 13393 spin_unlock_irqrestore(&phba->hbalock, iflags); 13394 goto out; 13395 } 13396 hrq->RQ_rcv_buf++; 13397 hrq->RQ_buf_posted--; 13398 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 13399 13400 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13401 13402 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 13403 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 13404 spin_unlock_irqrestore(&phba->hbalock, iflags); 13405 /* Handle MDS Loopback frames */ 13406 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf); 13407 break; 13408 } 13409 13410 /* save off the frame for the work thread to process */ 13411 list_add_tail(&dma_buf->cq_event.list, 13412 &phba->sli4_hba.sp_queue_event); 13413 /* Frame received */ 13414 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13415 spin_unlock_irqrestore(&phba->hbalock, iflags); 13416 workposted = true; 13417 break; 13418 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13419 if (phba->nvmet_support) { 13420 tgtp = phba->targetport->private; 13421 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13422 "6402 RQE Error x%x, posted %d err_cnt " 13423 "%d: %x %x %x\n", 13424 status, hrq->RQ_buf_posted, 13425 hrq->RQ_no_posted_buf, 13426 atomic_read(&tgtp->rcv_fcp_cmd_in), 13427 atomic_read(&tgtp->rcv_fcp_cmd_out), 13428 atomic_read(&tgtp->xmt_fcp_release)); 13429 } 13430 /* fallthrough */ 13431 13432 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13433 hrq->RQ_no_posted_buf++; 13434 /* Post more buffers if possible */ 13435 spin_lock_irqsave(&phba->hbalock, iflags); 13436 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 13437 spin_unlock_irqrestore(&phba->hbalock, iflags); 13438 workposted = true; 13439 break; 13440 } 13441 out: 13442 return workposted; 13443 } 13444 13445 /** 13446 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 13447 * @phba: Pointer to HBA context object. 13448 * @cq: Pointer to the completion queue. 13449 * @cqe: Pointer to a completion queue entry. 13450 * 13451 * This routine process a slow-path work-queue or receive queue completion queue 13452 * entry. 13453 * 13454 * Return: true if work posted to worker thread, otherwise false. 13455 **/ 13456 static bool 13457 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13458 struct lpfc_cqe *cqe) 13459 { 13460 struct lpfc_cqe cqevt; 13461 bool workposted = false; 13462 13463 /* Copy the work queue CQE and convert endian order if needed */ 13464 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 13465 13466 /* Check and process for different type of WCQE and dispatch */ 13467 switch (bf_get(lpfc_cqe_code, &cqevt)) { 13468 case CQE_CODE_COMPL_WQE: 13469 /* Process the WQ/RQ complete event */ 13470 phba->last_completion_time = jiffies; 13471 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 13472 (struct lpfc_wcqe_complete *)&cqevt); 13473 break; 13474 case CQE_CODE_RELEASE_WQE: 13475 /* Process the WQ release event */ 13476 lpfc_sli4_sp_handle_rel_wcqe(phba, 13477 (struct lpfc_wcqe_release *)&cqevt); 13478 break; 13479 case CQE_CODE_XRI_ABORTED: 13480 /* Process the WQ XRI abort event */ 13481 phba->last_completion_time = jiffies; 13482 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13483 (struct sli4_wcqe_xri_aborted *)&cqevt); 13484 break; 13485 case CQE_CODE_RECEIVE: 13486 case CQE_CODE_RECEIVE_V1: 13487 /* Process the RQ event */ 13488 phba->last_completion_time = jiffies; 13489 workposted = lpfc_sli4_sp_handle_rcqe(phba, 13490 (struct lpfc_rcqe *)&cqevt); 13491 break; 13492 default: 13493 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13494 "0388 Not a valid WCQE code: x%x\n", 13495 bf_get(lpfc_cqe_code, &cqevt)); 13496 break; 13497 } 13498 return workposted; 13499 } 13500 13501 /** 13502 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 13503 * @phba: Pointer to HBA context object. 13504 * @eqe: Pointer to fast-path event queue entry. 13505 * 13506 * This routine process a event queue entry from the slow-path event queue. 13507 * It will check the MajorCode and MinorCode to determine this is for a 13508 * completion event on a completion queue, if not, an error shall be logged 13509 * and just return. Otherwise, it will get to the corresponding completion 13510 * queue and process all the entries on that completion queue, rearm the 13511 * completion queue, and then return. 13512 * 13513 **/ 13514 static void 13515 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13516 struct lpfc_queue *speq) 13517 { 13518 struct lpfc_queue *cq = NULL, *childq; 13519 uint16_t cqid; 13520 13521 /* Get the reference to the corresponding CQ */ 13522 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13523 13524 list_for_each_entry(childq, &speq->child_list, list) { 13525 if (childq->queue_id == cqid) { 13526 cq = childq; 13527 break; 13528 } 13529 } 13530 if (unlikely(!cq)) { 13531 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13532 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13533 "0365 Slow-path CQ identifier " 13534 "(%d) does not exist\n", cqid); 13535 return; 13536 } 13537 13538 /* Save EQ associated with this CQ */ 13539 cq->assoc_qp = speq; 13540 13541 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork)) 13542 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13543 "0390 Cannot schedule soft IRQ " 13544 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13545 cqid, cq->queue_id, raw_smp_processor_id()); 13546 } 13547 13548 /** 13549 * __lpfc_sli4_process_cq - Process elements of a CQ 13550 * @phba: Pointer to HBA context object. 13551 * @cq: Pointer to CQ to be processed 13552 * @handler: Routine to process each cqe 13553 * @delay: Pointer to usdelay to set in case of rescheduling of the handler 13554 * 13555 * This routine processes completion queue entries in a CQ. While a valid 13556 * queue element is found, the handler is called. During processing checks 13557 * are made for periodic doorbell writes to let the hardware know of 13558 * element consumption. 13559 * 13560 * If the max limit on cqes to process is hit, or there are no more valid 13561 * entries, the loop stops. If we processed a sufficient number of elements, 13562 * meaning there is sufficient load, rather than rearming and generating 13563 * another interrupt, a cq rescheduling delay will be set. A delay of 0 13564 * indicates no rescheduling. 13565 * 13566 * Returns True if work scheduled, False otherwise. 13567 **/ 13568 static bool 13569 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, 13570 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, 13571 struct lpfc_cqe *), unsigned long *delay) 13572 { 13573 struct lpfc_cqe *cqe; 13574 bool workposted = false; 13575 int count = 0, consumed = 0; 13576 bool arm = true; 13577 13578 /* default - no reschedule */ 13579 *delay = 0; 13580 13581 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) 13582 goto rearm_and_exit; 13583 13584 /* Process all the entries to the CQ */ 13585 cqe = lpfc_sli4_cq_get(cq); 13586 while (cqe) { 13587 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS) && defined(BUILD_NVME) 13588 if (phba->ktime_on) 13589 cq->isr_timestamp = ktime_get_ns(); 13590 else 13591 cq->isr_timestamp = 0; 13592 #endif 13593 workposted |= handler(phba, cq, cqe); 13594 __lpfc_sli4_consume_cqe(phba, cq, cqe); 13595 13596 consumed++; 13597 if (!(++count % cq->max_proc_limit)) 13598 break; 13599 13600 if (!(count % cq->notify_interval)) { 13601 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 13602 LPFC_QUEUE_NOARM); 13603 consumed = 0; 13604 } 13605 13606 cqe = lpfc_sli4_cq_get(cq); 13607 } 13608 if (count >= phba->cfg_cq_poll_threshold) { 13609 *delay = 1; 13610 arm = false; 13611 } 13612 13613 /* Track the max number of CQEs processed in 1 EQ */ 13614 if (count > cq->CQ_max_cqe) 13615 cq->CQ_max_cqe = count; 13616 13617 cq->assoc_qp->EQ_cqe_cnt += count; 13618 13619 /* Catch the no cq entry condition */ 13620 if (unlikely(count == 0)) 13621 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13622 "0369 No entry from completion queue " 13623 "qid=%d\n", cq->queue_id); 13624 13625 cq->queue_claimed = 0; 13626 13627 rearm_and_exit: 13628 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 13629 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM); 13630 13631 return workposted; 13632 } 13633 13634 /** 13635 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry 13636 * @cq: pointer to CQ to process 13637 * 13638 * This routine calls the cq processing routine with a handler specific 13639 * to the type of queue bound to it. 13640 * 13641 * The CQ routine returns two values: the first is the calling status, 13642 * which indicates whether work was queued to the background discovery 13643 * thread. If true, the routine should wakeup the discovery thread; 13644 * the second is the delay parameter. If non-zero, rather than rearming 13645 * the CQ and yet another interrupt, the CQ handler should be queued so 13646 * that it is processed in a subsequent polling action. The value of 13647 * the delay indicates when to reschedule it. 13648 **/ 13649 static void 13650 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) 13651 { 13652 struct lpfc_hba *phba = cq->phba; 13653 unsigned long delay; 13654 bool workposted = false; 13655 13656 /* Process and rearm the CQ */ 13657 switch (cq->type) { 13658 case LPFC_MCQ: 13659 workposted |= __lpfc_sli4_process_cq(phba, cq, 13660 lpfc_sli4_sp_handle_mcqe, 13661 &delay); 13662 break; 13663 case LPFC_WCQ: 13664 if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME) 13665 workposted |= __lpfc_sli4_process_cq(phba, cq, 13666 lpfc_sli4_fp_handle_cqe, 13667 &delay); 13668 else 13669 workposted |= __lpfc_sli4_process_cq(phba, cq, 13670 lpfc_sli4_sp_handle_cqe, 13671 &delay); 13672 break; 13673 default: 13674 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13675 "0370 Invalid completion queue type (%d)\n", 13676 cq->type); 13677 return; 13678 } 13679 13680 if (delay) { 13681 if (!queue_delayed_work_on(cq->chann, phba->wq, 13682 &cq->sched_spwork, delay)) 13683 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13684 "0394 Cannot schedule soft IRQ " 13685 "for cqid=%d on CPU %d\n", 13686 cq->queue_id, cq->chann); 13687 } 13688 13689 /* wake up worker thread if there are works to be done */ 13690 if (workposted) 13691 lpfc_worker_wake_up(phba); 13692 } 13693 13694 /** 13695 * lpfc_sli4_sp_process_cq - slow-path work handler when started by 13696 * interrupt 13697 * @work: pointer to work element 13698 * 13699 * translates from the work handler and calls the slow-path handler. 13700 **/ 13701 static void 13702 lpfc_sli4_sp_process_cq(struct work_struct *work) 13703 { 13704 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); 13705 13706 __lpfc_sli4_sp_process_cq(cq); 13707 } 13708 13709 /** 13710 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer 13711 * @work: pointer to work element 13712 * 13713 * translates from the work handler and calls the slow-path handler. 13714 **/ 13715 static void 13716 lpfc_sli4_dly_sp_process_cq(struct work_struct *work) 13717 { 13718 struct lpfc_queue *cq = container_of(to_delayed_work(work), 13719 struct lpfc_queue, sched_spwork); 13720 13721 __lpfc_sli4_sp_process_cq(cq); 13722 } 13723 13724 /** 13725 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 13726 * @phba: Pointer to HBA context object. 13727 * @cq: Pointer to associated CQ 13728 * @wcqe: Pointer to work-queue completion queue entry. 13729 * 13730 * This routine process a fast-path work queue completion entry from fast-path 13731 * event queue for FCP command response completion. 13732 **/ 13733 static void 13734 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13735 struct lpfc_wcqe_complete *wcqe) 13736 { 13737 struct lpfc_sli_ring *pring = cq->pring; 13738 struct lpfc_iocbq *cmdiocbq; 13739 struct lpfc_iocbq irspiocbq; 13740 unsigned long iflags; 13741 13742 /* Check for response status */ 13743 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13744 /* If resource errors reported from HBA, reduce queue 13745 * depth of the SCSI device. 13746 */ 13747 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 13748 IOSTAT_LOCAL_REJECT)) && 13749 ((wcqe->parameter & IOERR_PARAM_MASK) == 13750 IOERR_NO_RESOURCES)) 13751 phba->lpfc_rampdown_queue_depth(phba); 13752 13753 /* Log the error status */ 13754 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13755 "0373 FCP CQE error: status=x%x: " 13756 "CQE: %08x %08x %08x %08x\n", 13757 bf_get(lpfc_wcqe_c_status, wcqe), 13758 wcqe->word0, wcqe->total_data_placed, 13759 wcqe->parameter, wcqe->word3); 13760 } 13761 13762 /* Look up the FCP command IOCB and create pseudo response IOCB */ 13763 spin_lock_irqsave(&pring->ring_lock, iflags); 13764 pring->stats.iocb_event++; 13765 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13766 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13767 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13768 if (unlikely(!cmdiocbq)) { 13769 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13770 "0374 FCP complete with no corresponding " 13771 "cmdiocb: iotag (%d)\n", 13772 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13773 return; 13774 } 13775 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13776 cmdiocbq->isr_timestamp = cq->isr_timestamp; 13777 #endif 13778 if (cmdiocbq->iocb_cmpl == NULL) { 13779 if (cmdiocbq->wqe_cmpl) { 13780 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13781 spin_lock_irqsave(&phba->hbalock, iflags); 13782 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13783 spin_unlock_irqrestore(&phba->hbalock, iflags); 13784 } 13785 13786 /* Pass the cmd_iocb and the wcqe to the upper layer */ 13787 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe); 13788 return; 13789 } 13790 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13791 "0375 FCP cmdiocb not callback function " 13792 "iotag: (%d)\n", 13793 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13794 return; 13795 } 13796 13797 /* Fake the irspiocb and copy necessary response information */ 13798 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 13799 13800 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13801 spin_lock_irqsave(&phba->hbalock, iflags); 13802 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13803 spin_unlock_irqrestore(&phba->hbalock, iflags); 13804 } 13805 13806 /* Pass the cmd_iocb and the rsp state to the upper layer */ 13807 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 13808 } 13809 13810 /** 13811 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 13812 * @phba: Pointer to HBA context object. 13813 * @cq: Pointer to completion queue. 13814 * @wcqe: Pointer to work-queue completion queue entry. 13815 * 13816 * This routine handles an fast-path WQ entry consumed event by invoking the 13817 * proper WQ release routine to the slow-path WQ. 13818 **/ 13819 static void 13820 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13821 struct lpfc_wcqe_release *wcqe) 13822 { 13823 struct lpfc_queue *childwq; 13824 bool wqid_matched = false; 13825 uint16_t hba_wqid; 13826 13827 /* Check for fast-path FCP work queue release */ 13828 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 13829 list_for_each_entry(childwq, &cq->child_list, list) { 13830 if (childwq->queue_id == hba_wqid) { 13831 lpfc_sli4_wq_release(childwq, 13832 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13833 if (childwq->q_flag & HBA_NVMET_WQFULL) 13834 lpfc_nvmet_wqfull_process(phba, childwq); 13835 wqid_matched = true; 13836 break; 13837 } 13838 } 13839 /* Report warning log message if no match found */ 13840 if (wqid_matched != true) 13841 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13842 "2580 Fast-path wqe consume event carries " 13843 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); 13844 } 13845 13846 /** 13847 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry 13848 * @phba: Pointer to HBA context object. 13849 * @rcqe: Pointer to receive-queue completion queue entry. 13850 * 13851 * This routine process a receive-queue completion queue entry. 13852 * 13853 * Return: true if work posted to worker thread, otherwise false. 13854 **/ 13855 static bool 13856 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13857 struct lpfc_rcqe *rcqe) 13858 { 13859 bool workposted = false; 13860 struct lpfc_queue *hrq; 13861 struct lpfc_queue *drq; 13862 struct rqb_dmabuf *dma_buf; 13863 struct fc_frame_header *fc_hdr; 13864 struct lpfc_nvmet_tgtport *tgtp; 13865 uint32_t status, rq_id; 13866 unsigned long iflags; 13867 uint32_t fctl, idx; 13868 13869 if ((phba->nvmet_support == 0) || 13870 (phba->sli4_hba.nvmet_cqset == NULL)) 13871 return workposted; 13872 13873 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; 13874 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; 13875 drq = phba->sli4_hba.nvmet_mrq_data[idx]; 13876 13877 /* sanity check on queue memory */ 13878 if (unlikely(!hrq) || unlikely(!drq)) 13879 return workposted; 13880 13881 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13882 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13883 else 13884 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13885 13886 if ((phba->nvmet_support == 0) || 13887 (rq_id != hrq->queue_id)) 13888 return workposted; 13889 13890 status = bf_get(lpfc_rcqe_status, rcqe); 13891 switch (status) { 13892 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13893 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13894 "6126 Receive Frame Truncated!!\n"); 13895 /* fall through */ 13896 case FC_STATUS_RQ_SUCCESS: 13897 spin_lock_irqsave(&phba->hbalock, iflags); 13898 lpfc_sli4_rq_release(hrq, drq); 13899 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 13900 if (!dma_buf) { 13901 hrq->RQ_no_buf_found++; 13902 spin_unlock_irqrestore(&phba->hbalock, iflags); 13903 goto out; 13904 } 13905 spin_unlock_irqrestore(&phba->hbalock, iflags); 13906 hrq->RQ_rcv_buf++; 13907 hrq->RQ_buf_posted--; 13908 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13909 13910 /* Just some basic sanity checks on FCP Command frame */ 13911 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 13912 fc_hdr->fh_f_ctl[1] << 8 | 13913 fc_hdr->fh_f_ctl[2]); 13914 if (((fctl & 13915 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != 13916 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || 13917 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ 13918 goto drop; 13919 13920 if (fc_hdr->fh_type == FC_TYPE_FCP) { 13921 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 13922 lpfc_nvmet_unsol_fcp_event( 13923 phba, idx, dma_buf, 13924 cq->isr_timestamp); 13925 return false; 13926 } 13927 drop: 13928 lpfc_rq_buf_free(phba, &dma_buf->hbuf); 13929 break; 13930 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13931 if (phba->nvmet_support) { 13932 tgtp = phba->targetport->private; 13933 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13934 "6401 RQE Error x%x, posted %d err_cnt " 13935 "%d: %x %x %x\n", 13936 status, hrq->RQ_buf_posted, 13937 hrq->RQ_no_posted_buf, 13938 atomic_read(&tgtp->rcv_fcp_cmd_in), 13939 atomic_read(&tgtp->rcv_fcp_cmd_out), 13940 atomic_read(&tgtp->xmt_fcp_release)); 13941 } 13942 /* fallthrough */ 13943 13944 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13945 hrq->RQ_no_posted_buf++; 13946 /* Post more buffers if possible */ 13947 break; 13948 } 13949 out: 13950 return workposted; 13951 } 13952 13953 /** 13954 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry 13955 * @phba: adapter with cq 13956 * @cq: Pointer to the completion queue. 13957 * @eqe: Pointer to fast-path completion queue entry. 13958 * 13959 * This routine process a fast-path work queue completion entry from fast-path 13960 * event queue for FCP command response completion. 13961 * 13962 * Return: true if work posted to worker thread, otherwise false. 13963 **/ 13964 static bool 13965 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13966 struct lpfc_cqe *cqe) 13967 { 13968 struct lpfc_wcqe_release wcqe; 13969 bool workposted = false; 13970 13971 /* Copy the work queue CQE and convert endian order if needed */ 13972 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 13973 13974 /* Check and process for different type of WCQE and dispatch */ 13975 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 13976 case CQE_CODE_COMPL_WQE: 13977 case CQE_CODE_NVME_ERSP: 13978 cq->CQ_wq++; 13979 /* Process the WQ complete event */ 13980 phba->last_completion_time = jiffies; 13981 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME)) 13982 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 13983 (struct lpfc_wcqe_complete *)&wcqe); 13984 if (cq->subtype == LPFC_NVME_LS) 13985 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 13986 (struct lpfc_wcqe_complete *)&wcqe); 13987 break; 13988 case CQE_CODE_RELEASE_WQE: 13989 cq->CQ_release_wqe++; 13990 /* Process the WQ release event */ 13991 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 13992 (struct lpfc_wcqe_release *)&wcqe); 13993 break; 13994 case CQE_CODE_XRI_ABORTED: 13995 cq->CQ_xri_aborted++; 13996 /* Process the WQ XRI abort event */ 13997 phba->last_completion_time = jiffies; 13998 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13999 (struct sli4_wcqe_xri_aborted *)&wcqe); 14000 break; 14001 case CQE_CODE_RECEIVE_V1: 14002 case CQE_CODE_RECEIVE: 14003 phba->last_completion_time = jiffies; 14004 if (cq->subtype == LPFC_NVMET) { 14005 workposted = lpfc_sli4_nvmet_handle_rcqe( 14006 phba, cq, (struct lpfc_rcqe *)&wcqe); 14007 } 14008 break; 14009 default: 14010 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14011 "0144 Not a valid CQE code: x%x\n", 14012 bf_get(lpfc_wcqe_c_code, &wcqe)); 14013 break; 14014 } 14015 return workposted; 14016 } 14017 14018 /** 14019 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 14020 * @phba: Pointer to HBA context object. 14021 * @eqe: Pointer to fast-path event queue entry. 14022 * 14023 * This routine process a event queue entry from the fast-path event queue. 14024 * It will check the MajorCode and MinorCode to determine this is for a 14025 * completion event on a completion queue, if not, an error shall be logged 14026 * and just return. Otherwise, it will get to the corresponding completion 14027 * queue and process all the entries on the completion queue, rearm the 14028 * completion queue, and then return. 14029 **/ 14030 static void 14031 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 14032 struct lpfc_eqe *eqe) 14033 { 14034 struct lpfc_queue *cq = NULL; 14035 uint32_t qidx = eq->hdwq; 14036 uint16_t cqid, id; 14037 14038 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 14039 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14040 "0366 Not a valid completion " 14041 "event: majorcode=x%x, minorcode=x%x\n", 14042 bf_get_le32(lpfc_eqe_major_code, eqe), 14043 bf_get_le32(lpfc_eqe_minor_code, eqe)); 14044 return; 14045 } 14046 14047 /* Get the reference to the corresponding CQ */ 14048 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 14049 14050 /* Use the fast lookup method first */ 14051 if (cqid <= phba->sli4_hba.cq_max) { 14052 cq = phba->sli4_hba.cq_lookup[cqid]; 14053 if (cq) 14054 goto work_cq; 14055 } 14056 14057 /* Next check for NVMET completion */ 14058 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { 14059 id = phba->sli4_hba.nvmet_cqset[0]->queue_id; 14060 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { 14061 /* Process NVMET unsol rcv */ 14062 cq = phba->sli4_hba.nvmet_cqset[cqid - id]; 14063 goto process_cq; 14064 } 14065 } 14066 14067 if (phba->sli4_hba.nvmels_cq && 14068 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { 14069 /* Process NVME unsol rcv */ 14070 cq = phba->sli4_hba.nvmels_cq; 14071 } 14072 14073 /* Otherwise this is a Slow path event */ 14074 if (cq == NULL) { 14075 lpfc_sli4_sp_handle_eqe(phba, eqe, 14076 phba->sli4_hba.hdwq[qidx].hba_eq); 14077 return; 14078 } 14079 14080 process_cq: 14081 if (unlikely(cqid != cq->queue_id)) { 14082 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14083 "0368 Miss-matched fast-path completion " 14084 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 14085 cqid, cq->queue_id); 14086 return; 14087 } 14088 14089 work_cq: 14090 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork)) 14091 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14092 "0363 Cannot schedule soft IRQ " 14093 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 14094 cqid, cq->queue_id, raw_smp_processor_id()); 14095 } 14096 14097 /** 14098 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry 14099 * @cq: Pointer to CQ to be processed 14100 * 14101 * This routine calls the cq processing routine with the handler for 14102 * fast path CQEs. 14103 * 14104 * The CQ routine returns two values: the first is the calling status, 14105 * which indicates whether work was queued to the background discovery 14106 * thread. If true, the routine should wakeup the discovery thread; 14107 * the second is the delay parameter. If non-zero, rather than rearming 14108 * the CQ and yet another interrupt, the CQ handler should be queued so 14109 * that it is processed in a subsequent polling action. The value of 14110 * the delay indicates when to reschedule it. 14111 **/ 14112 static void 14113 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) 14114 { 14115 struct lpfc_hba *phba = cq->phba; 14116 unsigned long delay; 14117 bool workposted = false; 14118 14119 /* process and rearm the CQ */ 14120 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, 14121 &delay); 14122 14123 if (delay) { 14124 if (!queue_delayed_work_on(cq->chann, phba->wq, 14125 &cq->sched_irqwork, delay)) 14126 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14127 "0367 Cannot schedule soft IRQ " 14128 "for cqid=%d on CPU %d\n", 14129 cq->queue_id, cq->chann); 14130 } 14131 14132 /* wake up worker thread if there are works to be done */ 14133 if (workposted) 14134 lpfc_worker_wake_up(phba); 14135 } 14136 14137 /** 14138 * lpfc_sli4_hba_process_cq - fast-path work handler when started by 14139 * interrupt 14140 * @work: pointer to work element 14141 * 14142 * translates from the work handler and calls the fast-path handler. 14143 **/ 14144 static void 14145 lpfc_sli4_hba_process_cq(struct work_struct *work) 14146 { 14147 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); 14148 14149 __lpfc_sli4_hba_process_cq(cq); 14150 } 14151 14152 /** 14153 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer 14154 * @work: pointer to work element 14155 * 14156 * translates from the work handler and calls the fast-path handler. 14157 **/ 14158 static void 14159 lpfc_sli4_dly_hba_process_cq(struct work_struct *work) 14160 { 14161 struct lpfc_queue *cq = container_of(to_delayed_work(work), 14162 struct lpfc_queue, sched_irqwork); 14163 14164 __lpfc_sli4_hba_process_cq(cq); 14165 } 14166 14167 /** 14168 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 14169 * @irq: Interrupt number. 14170 * @dev_id: The device context pointer. 14171 * 14172 * This function is directly called from the PCI layer as an interrupt 14173 * service routine when device with SLI-4 interface spec is enabled with 14174 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 14175 * ring event in the HBA. However, when the device is enabled with either 14176 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 14177 * device-level interrupt handler. When the PCI slot is in error recovery 14178 * or the HBA is undergoing initialization, the interrupt handler will not 14179 * process the interrupt. The SCSI FCP fast-path ring event are handled in 14180 * the intrrupt context. This function is called without any lock held. 14181 * It gets the hbalock to access and update SLI data structures. Note that, 14182 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 14183 * equal to that of FCP CQ index. 14184 * 14185 * The link attention and ELS ring attention events are handled 14186 * by the worker thread. The interrupt handler signals the worker thread 14187 * and returns for these events. This function is called without any lock 14188 * held. It gets the hbalock to access and update SLI data structures. 14189 * 14190 * This function returns IRQ_HANDLED when interrupt is handled else it 14191 * returns IRQ_NONE. 14192 **/ 14193 irqreturn_t 14194 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 14195 { 14196 struct lpfc_hba *phba; 14197 struct lpfc_hba_eq_hdl *hba_eq_hdl; 14198 struct lpfc_queue *fpeq; 14199 unsigned long iflag; 14200 int ecount = 0; 14201 int hba_eqidx; 14202 struct lpfc_eq_intr_info *eqi; 14203 uint32_t icnt; 14204 14205 /* Get the driver's phba structure from the dev_id */ 14206 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 14207 phba = hba_eq_hdl->phba; 14208 hba_eqidx = hba_eq_hdl->idx; 14209 14210 if (unlikely(!phba)) 14211 return IRQ_NONE; 14212 if (unlikely(!phba->sli4_hba.hdwq)) 14213 return IRQ_NONE; 14214 14215 /* Get to the EQ struct associated with this vector */ 14216 fpeq = phba->sli4_hba.hdwq[hba_eqidx].hba_eq; 14217 if (unlikely(!fpeq)) 14218 return IRQ_NONE; 14219 14220 /* Check device state for handling interrupt */ 14221 if (unlikely(lpfc_intr_state_check(phba))) { 14222 /* Check again for link_state with lock held */ 14223 spin_lock_irqsave(&phba->hbalock, iflag); 14224 if (phba->link_state < LPFC_LINK_DOWN) 14225 /* Flush, clear interrupt, and rearm the EQ */ 14226 lpfc_sli4_eq_flush(phba, fpeq); 14227 spin_unlock_irqrestore(&phba->hbalock, iflag); 14228 return IRQ_NONE; 14229 } 14230 14231 eqi = phba->sli4_hba.eq_info; 14232 icnt = this_cpu_inc_return(eqi->icnt); 14233 fpeq->last_cpu = raw_smp_processor_id(); 14234 14235 if (icnt > LPFC_EQD_ISR_TRIGGER && 14236 phba->cfg_irq_chann == 1 && 14237 phba->cfg_auto_imax && 14238 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && 14239 phba->sli.sli_flag & LPFC_SLI_USE_EQDR) 14240 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); 14241 14242 /* process and rearm the EQ */ 14243 ecount = lpfc_sli4_process_eq(phba, fpeq); 14244 14245 if (unlikely(ecount == 0)) { 14246 fpeq->EQ_no_entry++; 14247 if (phba->intr_type == MSIX) 14248 /* MSI-X treated interrupt served as no EQ share INT */ 14249 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14250 "0358 MSI-X interrupt with no EQE\n"); 14251 else 14252 /* Non MSI-X treated on interrupt as EQ share INT */ 14253 return IRQ_NONE; 14254 } 14255 14256 return IRQ_HANDLED; 14257 } /* lpfc_sli4_fp_intr_handler */ 14258 14259 /** 14260 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 14261 * @irq: Interrupt number. 14262 * @dev_id: The device context pointer. 14263 * 14264 * This function is the device-level interrupt handler to device with SLI-4 14265 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 14266 * interrupt mode is enabled and there is an event in the HBA which requires 14267 * driver attention. This function invokes the slow-path interrupt attention 14268 * handling function and fast-path interrupt attention handling function in 14269 * turn to process the relevant HBA attention events. This function is called 14270 * without any lock held. It gets the hbalock to access and update SLI data 14271 * structures. 14272 * 14273 * This function returns IRQ_HANDLED when interrupt is handled, else it 14274 * returns IRQ_NONE. 14275 **/ 14276 irqreturn_t 14277 lpfc_sli4_intr_handler(int irq, void *dev_id) 14278 { 14279 struct lpfc_hba *phba; 14280 irqreturn_t hba_irq_rc; 14281 bool hba_handled = false; 14282 int qidx; 14283 14284 /* Get the driver's phba structure from the dev_id */ 14285 phba = (struct lpfc_hba *)dev_id; 14286 14287 if (unlikely(!phba)) 14288 return IRQ_NONE; 14289 14290 /* 14291 * Invoke fast-path host attention interrupt handling as appropriate. 14292 */ 14293 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 14294 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 14295 &phba->sli4_hba.hba_eq_hdl[qidx]); 14296 if (hba_irq_rc == IRQ_HANDLED) 14297 hba_handled |= true; 14298 } 14299 14300 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 14301 } /* lpfc_sli4_intr_handler */ 14302 14303 /** 14304 * lpfc_sli4_queue_free - free a queue structure and associated memory 14305 * @queue: The queue structure to free. 14306 * 14307 * This function frees a queue structure and the DMAable memory used for 14308 * the host resident queue. This function must be called after destroying the 14309 * queue on the HBA. 14310 **/ 14311 void 14312 lpfc_sli4_queue_free(struct lpfc_queue *queue) 14313 { 14314 struct lpfc_dmabuf *dmabuf; 14315 14316 if (!queue) 14317 return; 14318 14319 if (!list_empty(&queue->wq_list)) 14320 list_del(&queue->wq_list); 14321 14322 while (!list_empty(&queue->page_list)) { 14323 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 14324 list); 14325 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size, 14326 dmabuf->virt, dmabuf->phys); 14327 kfree(dmabuf); 14328 } 14329 if (queue->rqbp) { 14330 lpfc_free_rq_buffer(queue->phba, queue); 14331 kfree(queue->rqbp); 14332 } 14333 14334 if (!list_empty(&queue->cpu_list)) 14335 list_del(&queue->cpu_list); 14336 14337 kfree(queue); 14338 return; 14339 } 14340 14341 /** 14342 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 14343 * @phba: The HBA that this queue is being created on. 14344 * @page_size: The size of a queue page 14345 * @entry_size: The size of each queue entry for this queue. 14346 * @entry count: The number of entries that this queue will handle. 14347 * @cpu: The cpu that will primarily utilize this queue. 14348 * 14349 * This function allocates a queue structure and the DMAable memory used for 14350 * the host resident queue. This function must be called before creating the 14351 * queue on the HBA. 14352 **/ 14353 struct lpfc_queue * 14354 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, 14355 uint32_t entry_size, uint32_t entry_count, int cpu) 14356 { 14357 struct lpfc_queue *queue; 14358 struct lpfc_dmabuf *dmabuf; 14359 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14360 uint16_t x, pgcnt; 14361 14362 if (!phba->sli4_hba.pc_sli4_params.supported) 14363 hw_page_size = page_size; 14364 14365 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size; 14366 14367 /* If needed, Adjust page count to match the max the adapter supports */ 14368 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt) 14369 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt; 14370 14371 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt), 14372 GFP_KERNEL, cpu_to_node(cpu)); 14373 if (!queue) 14374 return NULL; 14375 14376 INIT_LIST_HEAD(&queue->list); 14377 INIT_LIST_HEAD(&queue->wq_list); 14378 INIT_LIST_HEAD(&queue->wqfull_list); 14379 INIT_LIST_HEAD(&queue->page_list); 14380 INIT_LIST_HEAD(&queue->child_list); 14381 INIT_LIST_HEAD(&queue->cpu_list); 14382 14383 /* Set queue parameters now. If the system cannot provide memory 14384 * resources, the free routine needs to know what was allocated. 14385 */ 14386 queue->page_count = pgcnt; 14387 queue->q_pgs = (void **)&queue[1]; 14388 queue->entry_cnt_per_pg = hw_page_size / entry_size; 14389 queue->entry_size = entry_size; 14390 queue->entry_count = entry_count; 14391 queue->page_size = hw_page_size; 14392 queue->phba = phba; 14393 14394 for (x = 0; x < queue->page_count; x++) { 14395 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL, 14396 dev_to_node(&phba->pcidev->dev)); 14397 if (!dmabuf) 14398 goto out_fail; 14399 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14400 hw_page_size, &dmabuf->phys, 14401 GFP_KERNEL); 14402 if (!dmabuf->virt) { 14403 kfree(dmabuf); 14404 goto out_fail; 14405 } 14406 dmabuf->buffer_tag = x; 14407 list_add_tail(&dmabuf->list, &queue->page_list); 14408 /* use lpfc_sli4_qe to index a paritcular entry in this page */ 14409 queue->q_pgs[x] = dmabuf->virt; 14410 } 14411 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); 14412 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); 14413 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq); 14414 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq); 14415 14416 /* notify_interval will be set during q creation */ 14417 14418 return queue; 14419 out_fail: 14420 lpfc_sli4_queue_free(queue); 14421 return NULL; 14422 } 14423 14424 /** 14425 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 14426 * @phba: HBA structure that indicates port to create a queue on. 14427 * @pci_barset: PCI BAR set flag. 14428 * 14429 * This function shall perform iomap of the specified PCI BAR address to host 14430 * memory address if not already done so and return it. The returned host 14431 * memory address can be NULL. 14432 */ 14433 static void __iomem * 14434 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 14435 { 14436 if (!phba->pcidev) 14437 return NULL; 14438 14439 switch (pci_barset) { 14440 case WQ_PCI_BAR_0_AND_1: 14441 return phba->pci_bar0_memmap_p; 14442 case WQ_PCI_BAR_2_AND_3: 14443 return phba->pci_bar2_memmap_p; 14444 case WQ_PCI_BAR_4_AND_5: 14445 return phba->pci_bar4_memmap_p; 14446 default: 14447 break; 14448 } 14449 return NULL; 14450 } 14451 14452 /** 14453 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs 14454 * @phba: HBA structure that EQs are on. 14455 * @startq: The starting EQ index to modify 14456 * @numq: The number of EQs (consecutive indexes) to modify 14457 * @usdelay: amount of delay 14458 * 14459 * This function revises the EQ delay on 1 or more EQs. The EQ delay 14460 * is set either by writing to a register (if supported by the SLI Port) 14461 * or by mailbox command. The mailbox command allows several EQs to be 14462 * updated at once. 14463 * 14464 * The @phba struct is used to send a mailbox command to HBA. The @startq 14465 * is used to get the starting EQ index to change. The @numq value is 14466 * used to specify how many consecutive EQ indexes, starting at EQ index, 14467 * are to be changed. This function is asynchronous and will wait for any 14468 * mailbox commands to finish before returning. 14469 * 14470 * On success this function will return a zero. If unable to allocate 14471 * enough memory this function will return -ENOMEM. If a mailbox command 14472 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may 14473 * have had their delay multipler changed. 14474 **/ 14475 void 14476 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, 14477 uint32_t numq, uint32_t usdelay) 14478 { 14479 struct lpfc_mbx_modify_eq_delay *eq_delay; 14480 LPFC_MBOXQ_t *mbox; 14481 struct lpfc_queue *eq; 14482 int cnt = 0, rc, length; 14483 uint32_t shdr_status, shdr_add_status; 14484 uint32_t dmult; 14485 int qidx; 14486 union lpfc_sli4_cfg_shdr *shdr; 14487 14488 if (startq >= phba->cfg_irq_chann) 14489 return; 14490 14491 if (usdelay > 0xFFFF) { 14492 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME, 14493 "6429 usdelay %d too large. Scaled down to " 14494 "0xFFFF.\n", usdelay); 14495 usdelay = 0xFFFF; 14496 } 14497 14498 /* set values by EQ_DELAY register if supported */ 14499 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { 14500 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 14501 eq = phba->sli4_hba.hdwq[qidx].hba_eq; 14502 if (!eq) 14503 continue; 14504 14505 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay); 14506 14507 if (++cnt >= numq) 14508 break; 14509 } 14510 14511 return; 14512 } 14513 14514 /* Otherwise, set values by mailbox cmd */ 14515 14516 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14517 if (!mbox) { 14518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME, 14519 "6428 Failed allocating mailbox cmd buffer." 14520 " EQ delay was not set.\n"); 14521 return; 14522 } 14523 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 14524 sizeof(struct lpfc_sli4_cfg_mhdr)); 14525 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14526 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 14527 length, LPFC_SLI4_MBX_EMBED); 14528 eq_delay = &mbox->u.mqe.un.eq_delay; 14529 14530 /* Calculate delay multiper from maximum interrupt per second */ 14531 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC; 14532 if (dmult) 14533 dmult--; 14534 if (dmult > LPFC_DMULT_MAX) 14535 dmult = LPFC_DMULT_MAX; 14536 14537 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 14538 eq = phba->sli4_hba.hdwq[qidx].hba_eq; 14539 if (!eq) 14540 continue; 14541 eq->q_mode = usdelay; 14542 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 14543 eq_delay->u.request.eq[cnt].phase = 0; 14544 eq_delay->u.request.eq[cnt].delay_multi = dmult; 14545 14546 if (++cnt >= numq) 14547 break; 14548 } 14549 eq_delay->u.request.num_eq = cnt; 14550 14551 mbox->vport = phba->pport; 14552 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14553 mbox->ctx_buf = NULL; 14554 mbox->ctx_ndlp = NULL; 14555 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14556 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 14557 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14558 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14559 if (shdr_status || shdr_add_status || rc) { 14560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14561 "2512 MODIFY_EQ_DELAY mailbox failed with " 14562 "status x%x add_status x%x, mbx status x%x\n", 14563 shdr_status, shdr_add_status, rc); 14564 } 14565 mempool_free(mbox, phba->mbox_mem_pool); 14566 return; 14567 } 14568 14569 /** 14570 * lpfc_eq_create - Create an Event Queue on the HBA 14571 * @phba: HBA structure that indicates port to create a queue on. 14572 * @eq: The queue structure to use to create the event queue. 14573 * @imax: The maximum interrupt per second limit. 14574 * 14575 * This function creates an event queue, as detailed in @eq, on a port, 14576 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 14577 * 14578 * The @phba struct is used to send mailbox command to HBA. The @eq struct 14579 * is used to get the entry count and entry size that are necessary to 14580 * determine the number of pages to allocate and use for this queue. This 14581 * function will send the EQ_CREATE mailbox command to the HBA to setup the 14582 * event queue. This function is asynchronous and will wait for the mailbox 14583 * command to finish before continuing. 14584 * 14585 * On success this function will return a zero. If unable to allocate enough 14586 * memory this function will return -ENOMEM. If the queue create mailbox command 14587 * fails this function will return -ENXIO. 14588 **/ 14589 int 14590 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 14591 { 14592 struct lpfc_mbx_eq_create *eq_create; 14593 LPFC_MBOXQ_t *mbox; 14594 int rc, length, status = 0; 14595 struct lpfc_dmabuf *dmabuf; 14596 uint32_t shdr_status, shdr_add_status; 14597 union lpfc_sli4_cfg_shdr *shdr; 14598 uint16_t dmult; 14599 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14600 14601 /* sanity check on queue memory */ 14602 if (!eq) 14603 return -ENODEV; 14604 if (!phba->sli4_hba.pc_sli4_params.supported) 14605 hw_page_size = SLI4_PAGE_SIZE; 14606 14607 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14608 if (!mbox) 14609 return -ENOMEM; 14610 length = (sizeof(struct lpfc_mbx_eq_create) - 14611 sizeof(struct lpfc_sli4_cfg_mhdr)); 14612 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14613 LPFC_MBOX_OPCODE_EQ_CREATE, 14614 length, LPFC_SLI4_MBX_EMBED); 14615 eq_create = &mbox->u.mqe.un.eq_create; 14616 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 14617 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 14618 eq->page_count); 14619 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 14620 LPFC_EQE_SIZE); 14621 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 14622 14623 /* Use version 2 of CREATE_EQ if eqav is set */ 14624 if (phba->sli4_hba.pc_sli4_params.eqav) { 14625 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14626 LPFC_Q_CREATE_VERSION_2); 14627 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context, 14628 phba->sli4_hba.pc_sli4_params.eqav); 14629 } 14630 14631 /* don't setup delay multiplier using EQ_CREATE */ 14632 dmult = 0; 14633 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 14634 dmult); 14635 switch (eq->entry_count) { 14636 default: 14637 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14638 "0360 Unsupported EQ count. (%d)\n", 14639 eq->entry_count); 14640 if (eq->entry_count < 256) 14641 return -EINVAL; 14642 /* fall through - otherwise default to smallest count */ 14643 case 256: 14644 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14645 LPFC_EQ_CNT_256); 14646 break; 14647 case 512: 14648 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14649 LPFC_EQ_CNT_512); 14650 break; 14651 case 1024: 14652 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14653 LPFC_EQ_CNT_1024); 14654 break; 14655 case 2048: 14656 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14657 LPFC_EQ_CNT_2048); 14658 break; 14659 case 4096: 14660 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14661 LPFC_EQ_CNT_4096); 14662 break; 14663 } 14664 list_for_each_entry(dmabuf, &eq->page_list, list) { 14665 memset(dmabuf->virt, 0, hw_page_size); 14666 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14667 putPaddrLow(dmabuf->phys); 14668 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14669 putPaddrHigh(dmabuf->phys); 14670 } 14671 mbox->vport = phba->pport; 14672 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14673 mbox->ctx_buf = NULL; 14674 mbox->ctx_ndlp = NULL; 14675 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14676 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14677 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14678 if (shdr_status || shdr_add_status || rc) { 14679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14680 "2500 EQ_CREATE mailbox failed with " 14681 "status x%x add_status x%x, mbx status x%x\n", 14682 shdr_status, shdr_add_status, rc); 14683 status = -ENXIO; 14684 } 14685 eq->type = LPFC_EQ; 14686 eq->subtype = LPFC_NONE; 14687 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 14688 if (eq->queue_id == 0xFFFF) 14689 status = -ENXIO; 14690 eq->host_index = 0; 14691 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL; 14692 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT; 14693 14694 mempool_free(mbox, phba->mbox_mem_pool); 14695 return status; 14696 } 14697 14698 /** 14699 * lpfc_cq_create - Create a Completion Queue on the HBA 14700 * @phba: HBA structure that indicates port to create a queue on. 14701 * @cq: The queue structure to use to create the completion queue. 14702 * @eq: The event queue to bind this completion queue to. 14703 * 14704 * This function creates a completion queue, as detailed in @wq, on a port, 14705 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 14706 * 14707 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14708 * is used to get the entry count and entry size that are necessary to 14709 * determine the number of pages to allocate and use for this queue. The @eq 14710 * is used to indicate which event queue to bind this completion queue to. This 14711 * function will send the CQ_CREATE mailbox command to the HBA to setup the 14712 * completion queue. This function is asynchronous and will wait for the mailbox 14713 * command to finish before continuing. 14714 * 14715 * On success this function will return a zero. If unable to allocate enough 14716 * memory this function will return -ENOMEM. If the queue create mailbox command 14717 * fails this function will return -ENXIO. 14718 **/ 14719 int 14720 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 14721 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 14722 { 14723 struct lpfc_mbx_cq_create *cq_create; 14724 struct lpfc_dmabuf *dmabuf; 14725 LPFC_MBOXQ_t *mbox; 14726 int rc, length, status = 0; 14727 uint32_t shdr_status, shdr_add_status; 14728 union lpfc_sli4_cfg_shdr *shdr; 14729 14730 /* sanity check on queue memory */ 14731 if (!cq || !eq) 14732 return -ENODEV; 14733 14734 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14735 if (!mbox) 14736 return -ENOMEM; 14737 length = (sizeof(struct lpfc_mbx_cq_create) - 14738 sizeof(struct lpfc_sli4_cfg_mhdr)); 14739 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14740 LPFC_MBOX_OPCODE_CQ_CREATE, 14741 length, LPFC_SLI4_MBX_EMBED); 14742 cq_create = &mbox->u.mqe.un.cq_create; 14743 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 14744 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 14745 cq->page_count); 14746 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 14747 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 14748 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14749 phba->sli4_hba.pc_sli4_params.cqv); 14750 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 14751 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 14752 (cq->page_size / SLI4_PAGE_SIZE)); 14753 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 14754 eq->queue_id); 14755 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context, 14756 phba->sli4_hba.pc_sli4_params.cqav); 14757 } else { 14758 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 14759 eq->queue_id); 14760 } 14761 switch (cq->entry_count) { 14762 case 2048: 14763 case 4096: 14764 if (phba->sli4_hba.pc_sli4_params.cqv == 14765 LPFC_Q_CREATE_VERSION_2) { 14766 cq_create->u.request.context.lpfc_cq_context_count = 14767 cq->entry_count; 14768 bf_set(lpfc_cq_context_count, 14769 &cq_create->u.request.context, 14770 LPFC_CQ_CNT_WORD7); 14771 break; 14772 } 14773 /* fall through */ 14774 default: 14775 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14776 "0361 Unsupported CQ count: " 14777 "entry cnt %d sz %d pg cnt %d\n", 14778 cq->entry_count, cq->entry_size, 14779 cq->page_count); 14780 if (cq->entry_count < 256) { 14781 status = -EINVAL; 14782 goto out; 14783 } 14784 /* fall through - otherwise default to smallest count */ 14785 case 256: 14786 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14787 LPFC_CQ_CNT_256); 14788 break; 14789 case 512: 14790 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14791 LPFC_CQ_CNT_512); 14792 break; 14793 case 1024: 14794 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14795 LPFC_CQ_CNT_1024); 14796 break; 14797 } 14798 list_for_each_entry(dmabuf, &cq->page_list, list) { 14799 memset(dmabuf->virt, 0, cq->page_size); 14800 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14801 putPaddrLow(dmabuf->phys); 14802 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14803 putPaddrHigh(dmabuf->phys); 14804 } 14805 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14806 14807 /* The IOCTL status is embedded in the mailbox subheader. */ 14808 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14809 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14810 if (shdr_status || shdr_add_status || rc) { 14811 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14812 "2501 CQ_CREATE mailbox failed with " 14813 "status x%x add_status x%x, mbx status x%x\n", 14814 shdr_status, shdr_add_status, rc); 14815 status = -ENXIO; 14816 goto out; 14817 } 14818 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14819 if (cq->queue_id == 0xFFFF) { 14820 status = -ENXIO; 14821 goto out; 14822 } 14823 /* link the cq onto the parent eq child list */ 14824 list_add_tail(&cq->list, &eq->child_list); 14825 /* Set up completion queue's type and subtype */ 14826 cq->type = type; 14827 cq->subtype = subtype; 14828 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14829 cq->assoc_qid = eq->queue_id; 14830 cq->assoc_qp = eq; 14831 cq->host_index = 0; 14832 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 14833 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); 14834 14835 if (cq->queue_id > phba->sli4_hba.cq_max) 14836 phba->sli4_hba.cq_max = cq->queue_id; 14837 out: 14838 mempool_free(mbox, phba->mbox_mem_pool); 14839 return status; 14840 } 14841 14842 /** 14843 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ 14844 * @phba: HBA structure that indicates port to create a queue on. 14845 * @cqp: The queue structure array to use to create the completion queues. 14846 * @hdwq: The hardware queue array with the EQ to bind completion queues to. 14847 * 14848 * This function creates a set of completion queue, s to support MRQ 14849 * as detailed in @cqp, on a port, 14850 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. 14851 * 14852 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14853 * is used to get the entry count and entry size that are necessary to 14854 * determine the number of pages to allocate and use for this queue. The @eq 14855 * is used to indicate which event queue to bind this completion queue to. This 14856 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the 14857 * completion queue. This function is asynchronous and will wait for the mailbox 14858 * command to finish before continuing. 14859 * 14860 * On success this function will return a zero. If unable to allocate enough 14861 * memory this function will return -ENOMEM. If the queue create mailbox command 14862 * fails this function will return -ENXIO. 14863 **/ 14864 int 14865 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 14866 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, 14867 uint32_t subtype) 14868 { 14869 struct lpfc_queue *cq; 14870 struct lpfc_queue *eq; 14871 struct lpfc_mbx_cq_create_set *cq_set; 14872 struct lpfc_dmabuf *dmabuf; 14873 LPFC_MBOXQ_t *mbox; 14874 int rc, length, alloclen, status = 0; 14875 int cnt, idx, numcq, page_idx = 0; 14876 uint32_t shdr_status, shdr_add_status; 14877 union lpfc_sli4_cfg_shdr *shdr; 14878 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14879 14880 /* sanity check on queue memory */ 14881 numcq = phba->cfg_nvmet_mrq; 14882 if (!cqp || !hdwq || !numcq) 14883 return -ENODEV; 14884 14885 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14886 if (!mbox) 14887 return -ENOMEM; 14888 14889 length = sizeof(struct lpfc_mbx_cq_create_set); 14890 length += ((numcq * cqp[0]->page_count) * 14891 sizeof(struct dma_address)); 14892 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14893 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, 14894 LPFC_SLI4_MBX_NEMBED); 14895 if (alloclen < length) { 14896 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14897 "3098 Allocated DMA memory size (%d) is " 14898 "less than the requested DMA memory size " 14899 "(%d)\n", alloclen, length); 14900 status = -ENOMEM; 14901 goto out; 14902 } 14903 cq_set = mbox->sge_array->addr[0]; 14904 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; 14905 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); 14906 14907 for (idx = 0; idx < numcq; idx++) { 14908 cq = cqp[idx]; 14909 eq = hdwq[idx].hba_eq; 14910 if (!cq || !eq) { 14911 status = -ENOMEM; 14912 goto out; 14913 } 14914 if (!phba->sli4_hba.pc_sli4_params.supported) 14915 hw_page_size = cq->page_size; 14916 14917 switch (idx) { 14918 case 0: 14919 bf_set(lpfc_mbx_cq_create_set_page_size, 14920 &cq_set->u.request, 14921 (hw_page_size / SLI4_PAGE_SIZE)); 14922 bf_set(lpfc_mbx_cq_create_set_num_pages, 14923 &cq_set->u.request, cq->page_count); 14924 bf_set(lpfc_mbx_cq_create_set_evt, 14925 &cq_set->u.request, 1); 14926 bf_set(lpfc_mbx_cq_create_set_valid, 14927 &cq_set->u.request, 1); 14928 bf_set(lpfc_mbx_cq_create_set_cqe_size, 14929 &cq_set->u.request, 0); 14930 bf_set(lpfc_mbx_cq_create_set_num_cq, 14931 &cq_set->u.request, numcq); 14932 bf_set(lpfc_mbx_cq_create_set_autovalid, 14933 &cq_set->u.request, 14934 phba->sli4_hba.pc_sli4_params.cqav); 14935 switch (cq->entry_count) { 14936 case 2048: 14937 case 4096: 14938 if (phba->sli4_hba.pc_sli4_params.cqv == 14939 LPFC_Q_CREATE_VERSION_2) { 14940 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14941 &cq_set->u.request, 14942 cq->entry_count); 14943 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14944 &cq_set->u.request, 14945 LPFC_CQ_CNT_WORD7); 14946 break; 14947 } 14948 /* fall through */ 14949 default: 14950 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14951 "3118 Bad CQ count. (%d)\n", 14952 cq->entry_count); 14953 if (cq->entry_count < 256) { 14954 status = -EINVAL; 14955 goto out; 14956 } 14957 /* fall through - otherwise default to smallest */ 14958 case 256: 14959 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14960 &cq_set->u.request, LPFC_CQ_CNT_256); 14961 break; 14962 case 512: 14963 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14964 &cq_set->u.request, LPFC_CQ_CNT_512); 14965 break; 14966 case 1024: 14967 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14968 &cq_set->u.request, LPFC_CQ_CNT_1024); 14969 break; 14970 } 14971 bf_set(lpfc_mbx_cq_create_set_eq_id0, 14972 &cq_set->u.request, eq->queue_id); 14973 break; 14974 case 1: 14975 bf_set(lpfc_mbx_cq_create_set_eq_id1, 14976 &cq_set->u.request, eq->queue_id); 14977 break; 14978 case 2: 14979 bf_set(lpfc_mbx_cq_create_set_eq_id2, 14980 &cq_set->u.request, eq->queue_id); 14981 break; 14982 case 3: 14983 bf_set(lpfc_mbx_cq_create_set_eq_id3, 14984 &cq_set->u.request, eq->queue_id); 14985 break; 14986 case 4: 14987 bf_set(lpfc_mbx_cq_create_set_eq_id4, 14988 &cq_set->u.request, eq->queue_id); 14989 break; 14990 case 5: 14991 bf_set(lpfc_mbx_cq_create_set_eq_id5, 14992 &cq_set->u.request, eq->queue_id); 14993 break; 14994 case 6: 14995 bf_set(lpfc_mbx_cq_create_set_eq_id6, 14996 &cq_set->u.request, eq->queue_id); 14997 break; 14998 case 7: 14999 bf_set(lpfc_mbx_cq_create_set_eq_id7, 15000 &cq_set->u.request, eq->queue_id); 15001 break; 15002 case 8: 15003 bf_set(lpfc_mbx_cq_create_set_eq_id8, 15004 &cq_set->u.request, eq->queue_id); 15005 break; 15006 case 9: 15007 bf_set(lpfc_mbx_cq_create_set_eq_id9, 15008 &cq_set->u.request, eq->queue_id); 15009 break; 15010 case 10: 15011 bf_set(lpfc_mbx_cq_create_set_eq_id10, 15012 &cq_set->u.request, eq->queue_id); 15013 break; 15014 case 11: 15015 bf_set(lpfc_mbx_cq_create_set_eq_id11, 15016 &cq_set->u.request, eq->queue_id); 15017 break; 15018 case 12: 15019 bf_set(lpfc_mbx_cq_create_set_eq_id12, 15020 &cq_set->u.request, eq->queue_id); 15021 break; 15022 case 13: 15023 bf_set(lpfc_mbx_cq_create_set_eq_id13, 15024 &cq_set->u.request, eq->queue_id); 15025 break; 15026 case 14: 15027 bf_set(lpfc_mbx_cq_create_set_eq_id14, 15028 &cq_set->u.request, eq->queue_id); 15029 break; 15030 case 15: 15031 bf_set(lpfc_mbx_cq_create_set_eq_id15, 15032 &cq_set->u.request, eq->queue_id); 15033 break; 15034 } 15035 15036 /* link the cq onto the parent eq child list */ 15037 list_add_tail(&cq->list, &eq->child_list); 15038 /* Set up completion queue's type and subtype */ 15039 cq->type = type; 15040 cq->subtype = subtype; 15041 cq->assoc_qid = eq->queue_id; 15042 cq->assoc_qp = eq; 15043 cq->host_index = 0; 15044 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 15045 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, 15046 cq->entry_count); 15047 cq->chann = idx; 15048 15049 rc = 0; 15050 list_for_each_entry(dmabuf, &cq->page_list, list) { 15051 memset(dmabuf->virt, 0, hw_page_size); 15052 cnt = page_idx + dmabuf->buffer_tag; 15053 cq_set->u.request.page[cnt].addr_lo = 15054 putPaddrLow(dmabuf->phys); 15055 cq_set->u.request.page[cnt].addr_hi = 15056 putPaddrHigh(dmabuf->phys); 15057 rc++; 15058 } 15059 page_idx += rc; 15060 } 15061 15062 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15063 15064 /* The IOCTL status is embedded in the mailbox subheader. */ 15065 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15066 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15067 if (shdr_status || shdr_add_status || rc) { 15068 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15069 "3119 CQ_CREATE_SET mailbox failed with " 15070 "status x%x add_status x%x, mbx status x%x\n", 15071 shdr_status, shdr_add_status, rc); 15072 status = -ENXIO; 15073 goto out; 15074 } 15075 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); 15076 if (rc == 0xFFFF) { 15077 status = -ENXIO; 15078 goto out; 15079 } 15080 15081 for (idx = 0; idx < numcq; idx++) { 15082 cq = cqp[idx]; 15083 cq->queue_id = rc + idx; 15084 if (cq->queue_id > phba->sli4_hba.cq_max) 15085 phba->sli4_hba.cq_max = cq->queue_id; 15086 } 15087 15088 out: 15089 lpfc_sli4_mbox_cmd_free(phba, mbox); 15090 return status; 15091 } 15092 15093 /** 15094 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 15095 * @phba: HBA structure that indicates port to create a queue on. 15096 * @mq: The queue structure to use to create the mailbox queue. 15097 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 15098 * @cq: The completion queue to associate with this cq. 15099 * 15100 * This function provides failback (fb) functionality when the 15101 * mq_create_ext fails on older FW generations. It's purpose is identical 15102 * to mq_create_ext otherwise. 15103 * 15104 * This routine cannot fail as all attributes were previously accessed and 15105 * initialized in mq_create_ext. 15106 **/ 15107 static void 15108 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 15109 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 15110 { 15111 struct lpfc_mbx_mq_create *mq_create; 15112 struct lpfc_dmabuf *dmabuf; 15113 int length; 15114 15115 length = (sizeof(struct lpfc_mbx_mq_create) - 15116 sizeof(struct lpfc_sli4_cfg_mhdr)); 15117 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15118 LPFC_MBOX_OPCODE_MQ_CREATE, 15119 length, LPFC_SLI4_MBX_EMBED); 15120 mq_create = &mbox->u.mqe.un.mq_create; 15121 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 15122 mq->page_count); 15123 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 15124 cq->queue_id); 15125 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 15126 switch (mq->entry_count) { 15127 case 16: 15128 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15129 LPFC_MQ_RING_SIZE_16); 15130 break; 15131 case 32: 15132 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15133 LPFC_MQ_RING_SIZE_32); 15134 break; 15135 case 64: 15136 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15137 LPFC_MQ_RING_SIZE_64); 15138 break; 15139 case 128: 15140 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15141 LPFC_MQ_RING_SIZE_128); 15142 break; 15143 } 15144 list_for_each_entry(dmabuf, &mq->page_list, list) { 15145 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15146 putPaddrLow(dmabuf->phys); 15147 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15148 putPaddrHigh(dmabuf->phys); 15149 } 15150 } 15151 15152 /** 15153 * lpfc_mq_create - Create a mailbox Queue on the HBA 15154 * @phba: HBA structure that indicates port to create a queue on. 15155 * @mq: The queue structure to use to create the mailbox queue. 15156 * @cq: The completion queue to associate with this cq. 15157 * @subtype: The queue's subtype. 15158 * 15159 * This function creates a mailbox queue, as detailed in @mq, on a port, 15160 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 15161 * 15162 * The @phba struct is used to send mailbox command to HBA. The @cq struct 15163 * is used to get the entry count and entry size that are necessary to 15164 * determine the number of pages to allocate and use for this queue. This 15165 * function will send the MQ_CREATE mailbox command to the HBA to setup the 15166 * mailbox queue. This function is asynchronous and will wait for the mailbox 15167 * command to finish before continuing. 15168 * 15169 * On success this function will return a zero. If unable to allocate enough 15170 * memory this function will return -ENOMEM. If the queue create mailbox command 15171 * fails this function will return -ENXIO. 15172 **/ 15173 int32_t 15174 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 15175 struct lpfc_queue *cq, uint32_t subtype) 15176 { 15177 struct lpfc_mbx_mq_create *mq_create; 15178 struct lpfc_mbx_mq_create_ext *mq_create_ext; 15179 struct lpfc_dmabuf *dmabuf; 15180 LPFC_MBOXQ_t *mbox; 15181 int rc, length, status = 0; 15182 uint32_t shdr_status, shdr_add_status; 15183 union lpfc_sli4_cfg_shdr *shdr; 15184 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15185 15186 /* sanity check on queue memory */ 15187 if (!mq || !cq) 15188 return -ENODEV; 15189 if (!phba->sli4_hba.pc_sli4_params.supported) 15190 hw_page_size = SLI4_PAGE_SIZE; 15191 15192 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15193 if (!mbox) 15194 return -ENOMEM; 15195 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 15196 sizeof(struct lpfc_sli4_cfg_mhdr)); 15197 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15198 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 15199 length, LPFC_SLI4_MBX_EMBED); 15200 15201 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 15202 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 15203 bf_set(lpfc_mbx_mq_create_ext_num_pages, 15204 &mq_create_ext->u.request, mq->page_count); 15205 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 15206 &mq_create_ext->u.request, 1); 15207 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 15208 &mq_create_ext->u.request, 1); 15209 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 15210 &mq_create_ext->u.request, 1); 15211 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 15212 &mq_create_ext->u.request, 1); 15213 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 15214 &mq_create_ext->u.request, 1); 15215 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 15216 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15217 phba->sli4_hba.pc_sli4_params.mqv); 15218 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 15219 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 15220 cq->queue_id); 15221 else 15222 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 15223 cq->queue_id); 15224 switch (mq->entry_count) { 15225 default: 15226 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15227 "0362 Unsupported MQ count. (%d)\n", 15228 mq->entry_count); 15229 if (mq->entry_count < 16) { 15230 status = -EINVAL; 15231 goto out; 15232 } 15233 /* fall through - otherwise default to smallest count */ 15234 case 16: 15235 bf_set(lpfc_mq_context_ring_size, 15236 &mq_create_ext->u.request.context, 15237 LPFC_MQ_RING_SIZE_16); 15238 break; 15239 case 32: 15240 bf_set(lpfc_mq_context_ring_size, 15241 &mq_create_ext->u.request.context, 15242 LPFC_MQ_RING_SIZE_32); 15243 break; 15244 case 64: 15245 bf_set(lpfc_mq_context_ring_size, 15246 &mq_create_ext->u.request.context, 15247 LPFC_MQ_RING_SIZE_64); 15248 break; 15249 case 128: 15250 bf_set(lpfc_mq_context_ring_size, 15251 &mq_create_ext->u.request.context, 15252 LPFC_MQ_RING_SIZE_128); 15253 break; 15254 } 15255 list_for_each_entry(dmabuf, &mq->page_list, list) { 15256 memset(dmabuf->virt, 0, hw_page_size); 15257 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 15258 putPaddrLow(dmabuf->phys); 15259 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 15260 putPaddrHigh(dmabuf->phys); 15261 } 15262 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15263 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15264 &mq_create_ext->u.response); 15265 if (rc != MBX_SUCCESS) { 15266 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15267 "2795 MQ_CREATE_EXT failed with " 15268 "status x%x. Failback to MQ_CREATE.\n", 15269 rc); 15270 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 15271 mq_create = &mbox->u.mqe.un.mq_create; 15272 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15273 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 15274 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15275 &mq_create->u.response); 15276 } 15277 15278 /* The IOCTL status is embedded in the mailbox subheader. */ 15279 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15280 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15281 if (shdr_status || shdr_add_status || rc) { 15282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15283 "2502 MQ_CREATE mailbox failed with " 15284 "status x%x add_status x%x, mbx status x%x\n", 15285 shdr_status, shdr_add_status, rc); 15286 status = -ENXIO; 15287 goto out; 15288 } 15289 if (mq->queue_id == 0xFFFF) { 15290 status = -ENXIO; 15291 goto out; 15292 } 15293 mq->type = LPFC_MQ; 15294 mq->assoc_qid = cq->queue_id; 15295 mq->subtype = subtype; 15296 mq->host_index = 0; 15297 mq->hba_index = 0; 15298 15299 /* link the mq onto the parent cq child list */ 15300 list_add_tail(&mq->list, &cq->child_list); 15301 out: 15302 mempool_free(mbox, phba->mbox_mem_pool); 15303 return status; 15304 } 15305 15306 /** 15307 * lpfc_wq_create - Create a Work Queue on the HBA 15308 * @phba: HBA structure that indicates port to create a queue on. 15309 * @wq: The queue structure to use to create the work queue. 15310 * @cq: The completion queue to bind this work queue to. 15311 * @subtype: The subtype of the work queue indicating its functionality. 15312 * 15313 * This function creates a work queue, as detailed in @wq, on a port, described 15314 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 15315 * 15316 * The @phba struct is used to send mailbox command to HBA. The @wq struct 15317 * is used to get the entry count and entry size that are necessary to 15318 * determine the number of pages to allocate and use for this queue. The @cq 15319 * is used to indicate which completion queue to bind this work queue to. This 15320 * function will send the WQ_CREATE mailbox command to the HBA to setup the 15321 * work queue. This function is asynchronous and will wait for the mailbox 15322 * command to finish before continuing. 15323 * 15324 * On success this function will return a zero. If unable to allocate enough 15325 * memory this function will return -ENOMEM. If the queue create mailbox command 15326 * fails this function will return -ENXIO. 15327 **/ 15328 int 15329 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 15330 struct lpfc_queue *cq, uint32_t subtype) 15331 { 15332 struct lpfc_mbx_wq_create *wq_create; 15333 struct lpfc_dmabuf *dmabuf; 15334 LPFC_MBOXQ_t *mbox; 15335 int rc, length, status = 0; 15336 uint32_t shdr_status, shdr_add_status; 15337 union lpfc_sli4_cfg_shdr *shdr; 15338 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15339 struct dma_address *page; 15340 void __iomem *bar_memmap_p; 15341 uint32_t db_offset; 15342 uint16_t pci_barset; 15343 uint8_t dpp_barset; 15344 uint32_t dpp_offset; 15345 unsigned long pg_addr; 15346 uint8_t wq_create_version; 15347 15348 /* sanity check on queue memory */ 15349 if (!wq || !cq) 15350 return -ENODEV; 15351 if (!phba->sli4_hba.pc_sli4_params.supported) 15352 hw_page_size = wq->page_size; 15353 15354 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15355 if (!mbox) 15356 return -ENOMEM; 15357 length = (sizeof(struct lpfc_mbx_wq_create) - 15358 sizeof(struct lpfc_sli4_cfg_mhdr)); 15359 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15360 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 15361 length, LPFC_SLI4_MBX_EMBED); 15362 wq_create = &mbox->u.mqe.un.wq_create; 15363 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 15364 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 15365 wq->page_count); 15366 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 15367 cq->queue_id); 15368 15369 /* wqv is the earliest version supported, NOT the latest */ 15370 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15371 phba->sli4_hba.pc_sli4_params.wqv); 15372 15373 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) || 15374 (wq->page_size > SLI4_PAGE_SIZE)) 15375 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15376 else 15377 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15378 15379 15380 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) 15381 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15382 else 15383 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15384 15385 switch (wq_create_version) { 15386 case LPFC_Q_CREATE_VERSION_1: 15387 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 15388 wq->entry_count); 15389 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15390 LPFC_Q_CREATE_VERSION_1); 15391 15392 switch (wq->entry_size) { 15393 default: 15394 case 64: 15395 bf_set(lpfc_mbx_wq_create_wqe_size, 15396 &wq_create->u.request_1, 15397 LPFC_WQ_WQE_SIZE_64); 15398 break; 15399 case 128: 15400 bf_set(lpfc_mbx_wq_create_wqe_size, 15401 &wq_create->u.request_1, 15402 LPFC_WQ_WQE_SIZE_128); 15403 break; 15404 } 15405 /* Request DPP by default */ 15406 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); 15407 bf_set(lpfc_mbx_wq_create_page_size, 15408 &wq_create->u.request_1, 15409 (wq->page_size / SLI4_PAGE_SIZE)); 15410 page = wq_create->u.request_1.page; 15411 break; 15412 default: 15413 page = wq_create->u.request.page; 15414 break; 15415 } 15416 15417 list_for_each_entry(dmabuf, &wq->page_list, list) { 15418 memset(dmabuf->virt, 0, hw_page_size); 15419 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 15420 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 15421 } 15422 15423 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15424 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 15425 15426 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15427 /* The IOCTL status is embedded in the mailbox subheader. */ 15428 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15429 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15430 if (shdr_status || shdr_add_status || rc) { 15431 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15432 "2503 WQ_CREATE mailbox failed with " 15433 "status x%x add_status x%x, mbx status x%x\n", 15434 shdr_status, shdr_add_status, rc); 15435 status = -ENXIO; 15436 goto out; 15437 } 15438 15439 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) 15440 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, 15441 &wq_create->u.response); 15442 else 15443 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, 15444 &wq_create->u.response_1); 15445 15446 if (wq->queue_id == 0xFFFF) { 15447 status = -ENXIO; 15448 goto out; 15449 } 15450 15451 wq->db_format = LPFC_DB_LIST_FORMAT; 15452 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) { 15453 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15454 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 15455 &wq_create->u.response); 15456 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 15457 (wq->db_format != LPFC_DB_RING_FORMAT)) { 15458 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15459 "3265 WQ[%d] doorbell format " 15460 "not supported: x%x\n", 15461 wq->queue_id, wq->db_format); 15462 status = -EINVAL; 15463 goto out; 15464 } 15465 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 15466 &wq_create->u.response); 15467 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15468 pci_barset); 15469 if (!bar_memmap_p) { 15470 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15471 "3263 WQ[%d] failed to memmap " 15472 "pci barset:x%x\n", 15473 wq->queue_id, pci_barset); 15474 status = -ENOMEM; 15475 goto out; 15476 } 15477 db_offset = wq_create->u.response.doorbell_offset; 15478 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 15479 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 15480 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15481 "3252 WQ[%d] doorbell offset " 15482 "not supported: x%x\n", 15483 wq->queue_id, db_offset); 15484 status = -EINVAL; 15485 goto out; 15486 } 15487 wq->db_regaddr = bar_memmap_p + db_offset; 15488 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15489 "3264 WQ[%d]: barset:x%x, offset:x%x, " 15490 "format:x%x\n", wq->queue_id, 15491 pci_barset, db_offset, wq->db_format); 15492 } else 15493 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15494 } else { 15495 /* Check if DPP was honored by the firmware */ 15496 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, 15497 &wq_create->u.response_1); 15498 if (wq->dpp_enable) { 15499 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set, 15500 &wq_create->u.response_1); 15501 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15502 pci_barset); 15503 if (!bar_memmap_p) { 15504 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15505 "3267 WQ[%d] failed to memmap " 15506 "pci barset:x%x\n", 15507 wq->queue_id, pci_barset); 15508 status = -ENOMEM; 15509 goto out; 15510 } 15511 db_offset = wq_create->u.response_1.doorbell_offset; 15512 wq->db_regaddr = bar_memmap_p + db_offset; 15513 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, 15514 &wq_create->u.response_1); 15515 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar, 15516 &wq_create->u.response_1); 15517 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15518 dpp_barset); 15519 if (!bar_memmap_p) { 15520 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15521 "3268 WQ[%d] failed to memmap " 15522 "pci barset:x%x\n", 15523 wq->queue_id, dpp_barset); 15524 status = -ENOMEM; 15525 goto out; 15526 } 15527 dpp_offset = wq_create->u.response_1.dpp_offset; 15528 wq->dpp_regaddr = bar_memmap_p + dpp_offset; 15529 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15530 "3271 WQ[%d]: barset:x%x, offset:x%x, " 15531 "dpp_id:x%x dpp_barset:x%x " 15532 "dpp_offset:x%x\n", 15533 wq->queue_id, pci_barset, db_offset, 15534 wq->dpp_id, dpp_barset, dpp_offset); 15535 15536 /* Enable combined writes for DPP aperture */ 15537 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; 15538 #ifdef CONFIG_X86 15539 rc = set_memory_wc(pg_addr, 1); 15540 if (rc) { 15541 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15542 "3272 Cannot setup Combined " 15543 "Write on WQ[%d] - disable DPP\n", 15544 wq->queue_id); 15545 phba->cfg_enable_dpp = 0; 15546 } 15547 #else 15548 phba->cfg_enable_dpp = 0; 15549 #endif 15550 } else 15551 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15552 } 15553 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); 15554 if (wq->pring == NULL) { 15555 status = -ENOMEM; 15556 goto out; 15557 } 15558 wq->type = LPFC_WQ; 15559 wq->assoc_qid = cq->queue_id; 15560 wq->subtype = subtype; 15561 wq->host_index = 0; 15562 wq->hba_index = 0; 15563 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; 15564 15565 /* link the wq onto the parent cq child list */ 15566 list_add_tail(&wq->list, &cq->child_list); 15567 out: 15568 mempool_free(mbox, phba->mbox_mem_pool); 15569 return status; 15570 } 15571 15572 /** 15573 * lpfc_rq_create - Create a Receive Queue on the HBA 15574 * @phba: HBA structure that indicates port to create a queue on. 15575 * @hrq: The queue structure to use to create the header receive queue. 15576 * @drq: The queue structure to use to create the data receive queue. 15577 * @cq: The completion queue to bind this work queue to. 15578 * 15579 * This function creates a receive buffer queue pair , as detailed in @hrq and 15580 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15581 * to the HBA. 15582 * 15583 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15584 * struct is used to get the entry count that is necessary to determine the 15585 * number of pages to use for this queue. The @cq is used to indicate which 15586 * completion queue to bind received buffers that are posted to these queues to. 15587 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15588 * receive queue pair. This function is asynchronous and will wait for the 15589 * mailbox command to finish before continuing. 15590 * 15591 * On success this function will return a zero. If unable to allocate enough 15592 * memory this function will return -ENOMEM. If the queue create mailbox command 15593 * fails this function will return -ENXIO. 15594 **/ 15595 int 15596 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 15597 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 15598 { 15599 struct lpfc_mbx_rq_create *rq_create; 15600 struct lpfc_dmabuf *dmabuf; 15601 LPFC_MBOXQ_t *mbox; 15602 int rc, length, status = 0; 15603 uint32_t shdr_status, shdr_add_status; 15604 union lpfc_sli4_cfg_shdr *shdr; 15605 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15606 void __iomem *bar_memmap_p; 15607 uint32_t db_offset; 15608 uint16_t pci_barset; 15609 15610 /* sanity check on queue memory */ 15611 if (!hrq || !drq || !cq) 15612 return -ENODEV; 15613 if (!phba->sli4_hba.pc_sli4_params.supported) 15614 hw_page_size = SLI4_PAGE_SIZE; 15615 15616 if (hrq->entry_count != drq->entry_count) 15617 return -EINVAL; 15618 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15619 if (!mbox) 15620 return -ENOMEM; 15621 length = (sizeof(struct lpfc_mbx_rq_create) - 15622 sizeof(struct lpfc_sli4_cfg_mhdr)); 15623 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15624 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15625 length, LPFC_SLI4_MBX_EMBED); 15626 rq_create = &mbox->u.mqe.un.rq_create; 15627 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15628 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15629 phba->sli4_hba.pc_sli4_params.rqv); 15630 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15631 bf_set(lpfc_rq_context_rqe_count_1, 15632 &rq_create->u.request.context, 15633 hrq->entry_count); 15634 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 15635 bf_set(lpfc_rq_context_rqe_size, 15636 &rq_create->u.request.context, 15637 LPFC_RQE_SIZE_8); 15638 bf_set(lpfc_rq_context_page_size, 15639 &rq_create->u.request.context, 15640 LPFC_RQ_PAGE_SIZE_4096); 15641 } else { 15642 switch (hrq->entry_count) { 15643 default: 15644 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15645 "2535 Unsupported RQ count. (%d)\n", 15646 hrq->entry_count); 15647 if (hrq->entry_count < 512) { 15648 status = -EINVAL; 15649 goto out; 15650 } 15651 /* fall through - otherwise default to smallest count */ 15652 case 512: 15653 bf_set(lpfc_rq_context_rqe_count, 15654 &rq_create->u.request.context, 15655 LPFC_RQ_RING_SIZE_512); 15656 break; 15657 case 1024: 15658 bf_set(lpfc_rq_context_rqe_count, 15659 &rq_create->u.request.context, 15660 LPFC_RQ_RING_SIZE_1024); 15661 break; 15662 case 2048: 15663 bf_set(lpfc_rq_context_rqe_count, 15664 &rq_create->u.request.context, 15665 LPFC_RQ_RING_SIZE_2048); 15666 break; 15667 case 4096: 15668 bf_set(lpfc_rq_context_rqe_count, 15669 &rq_create->u.request.context, 15670 LPFC_RQ_RING_SIZE_4096); 15671 break; 15672 } 15673 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 15674 LPFC_HDR_BUF_SIZE); 15675 } 15676 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15677 cq->queue_id); 15678 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15679 hrq->page_count); 15680 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15681 memset(dmabuf->virt, 0, hw_page_size); 15682 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15683 putPaddrLow(dmabuf->phys); 15684 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15685 putPaddrHigh(dmabuf->phys); 15686 } 15687 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15688 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15689 15690 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15691 /* The IOCTL status is embedded in the mailbox subheader. */ 15692 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15693 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15694 if (shdr_status || shdr_add_status || rc) { 15695 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15696 "2504 RQ_CREATE mailbox failed with " 15697 "status x%x add_status x%x, mbx status x%x\n", 15698 shdr_status, shdr_add_status, rc); 15699 status = -ENXIO; 15700 goto out; 15701 } 15702 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15703 if (hrq->queue_id == 0xFFFF) { 15704 status = -ENXIO; 15705 goto out; 15706 } 15707 15708 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15709 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 15710 &rq_create->u.response); 15711 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 15712 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 15713 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15714 "3262 RQ [%d] doorbell format not " 15715 "supported: x%x\n", hrq->queue_id, 15716 hrq->db_format); 15717 status = -EINVAL; 15718 goto out; 15719 } 15720 15721 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 15722 &rq_create->u.response); 15723 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 15724 if (!bar_memmap_p) { 15725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15726 "3269 RQ[%d] failed to memmap pci " 15727 "barset:x%x\n", hrq->queue_id, 15728 pci_barset); 15729 status = -ENOMEM; 15730 goto out; 15731 } 15732 15733 db_offset = rq_create->u.response.doorbell_offset; 15734 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 15735 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 15736 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15737 "3270 RQ[%d] doorbell offset not " 15738 "supported: x%x\n", hrq->queue_id, 15739 db_offset); 15740 status = -EINVAL; 15741 goto out; 15742 } 15743 hrq->db_regaddr = bar_memmap_p + db_offset; 15744 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15745 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 15746 "format:x%x\n", hrq->queue_id, pci_barset, 15747 db_offset, hrq->db_format); 15748 } else { 15749 hrq->db_format = LPFC_DB_RING_FORMAT; 15750 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15751 } 15752 hrq->type = LPFC_HRQ; 15753 hrq->assoc_qid = cq->queue_id; 15754 hrq->subtype = subtype; 15755 hrq->host_index = 0; 15756 hrq->hba_index = 0; 15757 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 15758 15759 /* now create the data queue */ 15760 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15761 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15762 length, LPFC_SLI4_MBX_EMBED); 15763 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15764 phba->sli4_hba.pc_sli4_params.rqv); 15765 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15766 bf_set(lpfc_rq_context_rqe_count_1, 15767 &rq_create->u.request.context, hrq->entry_count); 15768 if (subtype == LPFC_NVMET) 15769 rq_create->u.request.context.buffer_size = 15770 LPFC_NVMET_DATA_BUF_SIZE; 15771 else 15772 rq_create->u.request.context.buffer_size = 15773 LPFC_DATA_BUF_SIZE; 15774 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 15775 LPFC_RQE_SIZE_8); 15776 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 15777 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15778 } else { 15779 switch (drq->entry_count) { 15780 default: 15781 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15782 "2536 Unsupported RQ count. (%d)\n", 15783 drq->entry_count); 15784 if (drq->entry_count < 512) { 15785 status = -EINVAL; 15786 goto out; 15787 } 15788 /* fall through - otherwise default to smallest count */ 15789 case 512: 15790 bf_set(lpfc_rq_context_rqe_count, 15791 &rq_create->u.request.context, 15792 LPFC_RQ_RING_SIZE_512); 15793 break; 15794 case 1024: 15795 bf_set(lpfc_rq_context_rqe_count, 15796 &rq_create->u.request.context, 15797 LPFC_RQ_RING_SIZE_1024); 15798 break; 15799 case 2048: 15800 bf_set(lpfc_rq_context_rqe_count, 15801 &rq_create->u.request.context, 15802 LPFC_RQ_RING_SIZE_2048); 15803 break; 15804 case 4096: 15805 bf_set(lpfc_rq_context_rqe_count, 15806 &rq_create->u.request.context, 15807 LPFC_RQ_RING_SIZE_4096); 15808 break; 15809 } 15810 if (subtype == LPFC_NVMET) 15811 bf_set(lpfc_rq_context_buf_size, 15812 &rq_create->u.request.context, 15813 LPFC_NVMET_DATA_BUF_SIZE); 15814 else 15815 bf_set(lpfc_rq_context_buf_size, 15816 &rq_create->u.request.context, 15817 LPFC_DATA_BUF_SIZE); 15818 } 15819 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15820 cq->queue_id); 15821 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15822 drq->page_count); 15823 list_for_each_entry(dmabuf, &drq->page_list, list) { 15824 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15825 putPaddrLow(dmabuf->phys); 15826 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15827 putPaddrHigh(dmabuf->phys); 15828 } 15829 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15830 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15831 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15832 /* The IOCTL status is embedded in the mailbox subheader. */ 15833 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15834 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15835 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15836 if (shdr_status || shdr_add_status || rc) { 15837 status = -ENXIO; 15838 goto out; 15839 } 15840 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15841 if (drq->queue_id == 0xFFFF) { 15842 status = -ENXIO; 15843 goto out; 15844 } 15845 drq->type = LPFC_DRQ; 15846 drq->assoc_qid = cq->queue_id; 15847 drq->subtype = subtype; 15848 drq->host_index = 0; 15849 drq->hba_index = 0; 15850 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 15851 15852 /* link the header and data RQs onto the parent cq child list */ 15853 list_add_tail(&hrq->list, &cq->child_list); 15854 list_add_tail(&drq->list, &cq->child_list); 15855 15856 out: 15857 mempool_free(mbox, phba->mbox_mem_pool); 15858 return status; 15859 } 15860 15861 /** 15862 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA 15863 * @phba: HBA structure that indicates port to create a queue on. 15864 * @hrqp: The queue structure array to use to create the header receive queues. 15865 * @drqp: The queue structure array to use to create the data receive queues. 15866 * @cqp: The completion queue array to bind these receive queues to. 15867 * 15868 * This function creates a receive buffer queue pair , as detailed in @hrq and 15869 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15870 * to the HBA. 15871 * 15872 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15873 * struct is used to get the entry count that is necessary to determine the 15874 * number of pages to use for this queue. The @cq is used to indicate which 15875 * completion queue to bind received buffers that are posted to these queues to. 15876 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15877 * receive queue pair. This function is asynchronous and will wait for the 15878 * mailbox command to finish before continuing. 15879 * 15880 * On success this function will return a zero. If unable to allocate enough 15881 * memory this function will return -ENOMEM. If the queue create mailbox command 15882 * fails this function will return -ENXIO. 15883 **/ 15884 int 15885 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 15886 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 15887 uint32_t subtype) 15888 { 15889 struct lpfc_queue *hrq, *drq, *cq; 15890 struct lpfc_mbx_rq_create_v2 *rq_create; 15891 struct lpfc_dmabuf *dmabuf; 15892 LPFC_MBOXQ_t *mbox; 15893 int rc, length, alloclen, status = 0; 15894 int cnt, idx, numrq, page_idx = 0; 15895 uint32_t shdr_status, shdr_add_status; 15896 union lpfc_sli4_cfg_shdr *shdr; 15897 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15898 15899 numrq = phba->cfg_nvmet_mrq; 15900 /* sanity check on array memory */ 15901 if (!hrqp || !drqp || !cqp || !numrq) 15902 return -ENODEV; 15903 if (!phba->sli4_hba.pc_sli4_params.supported) 15904 hw_page_size = SLI4_PAGE_SIZE; 15905 15906 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15907 if (!mbox) 15908 return -ENOMEM; 15909 15910 length = sizeof(struct lpfc_mbx_rq_create_v2); 15911 length += ((2 * numrq * hrqp[0]->page_count) * 15912 sizeof(struct dma_address)); 15913 15914 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15915 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, 15916 LPFC_SLI4_MBX_NEMBED); 15917 if (alloclen < length) { 15918 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15919 "3099 Allocated DMA memory size (%d) is " 15920 "less than the requested DMA memory size " 15921 "(%d)\n", alloclen, length); 15922 status = -ENOMEM; 15923 goto out; 15924 } 15925 15926 15927 15928 rq_create = mbox->sge_array->addr[0]; 15929 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; 15930 15931 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); 15932 cnt = 0; 15933 15934 for (idx = 0; idx < numrq; idx++) { 15935 hrq = hrqp[idx]; 15936 drq = drqp[idx]; 15937 cq = cqp[idx]; 15938 15939 /* sanity check on queue memory */ 15940 if (!hrq || !drq || !cq) { 15941 status = -ENODEV; 15942 goto out; 15943 } 15944 15945 if (hrq->entry_count != drq->entry_count) { 15946 status = -EINVAL; 15947 goto out; 15948 } 15949 15950 if (idx == 0) { 15951 bf_set(lpfc_mbx_rq_create_num_pages, 15952 &rq_create->u.request, 15953 hrq->page_count); 15954 bf_set(lpfc_mbx_rq_create_rq_cnt, 15955 &rq_create->u.request, (numrq * 2)); 15956 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, 15957 1); 15958 bf_set(lpfc_rq_context_base_cq, 15959 &rq_create->u.request.context, 15960 cq->queue_id); 15961 bf_set(lpfc_rq_context_data_size, 15962 &rq_create->u.request.context, 15963 LPFC_NVMET_DATA_BUF_SIZE); 15964 bf_set(lpfc_rq_context_hdr_size, 15965 &rq_create->u.request.context, 15966 LPFC_HDR_BUF_SIZE); 15967 bf_set(lpfc_rq_context_rqe_count_1, 15968 &rq_create->u.request.context, 15969 hrq->entry_count); 15970 bf_set(lpfc_rq_context_rqe_size, 15971 &rq_create->u.request.context, 15972 LPFC_RQE_SIZE_8); 15973 bf_set(lpfc_rq_context_page_size, 15974 &rq_create->u.request.context, 15975 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15976 } 15977 rc = 0; 15978 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15979 memset(dmabuf->virt, 0, hw_page_size); 15980 cnt = page_idx + dmabuf->buffer_tag; 15981 rq_create->u.request.page[cnt].addr_lo = 15982 putPaddrLow(dmabuf->phys); 15983 rq_create->u.request.page[cnt].addr_hi = 15984 putPaddrHigh(dmabuf->phys); 15985 rc++; 15986 } 15987 page_idx += rc; 15988 15989 rc = 0; 15990 list_for_each_entry(dmabuf, &drq->page_list, list) { 15991 memset(dmabuf->virt, 0, hw_page_size); 15992 cnt = page_idx + dmabuf->buffer_tag; 15993 rq_create->u.request.page[cnt].addr_lo = 15994 putPaddrLow(dmabuf->phys); 15995 rq_create->u.request.page[cnt].addr_hi = 15996 putPaddrHigh(dmabuf->phys); 15997 rc++; 15998 } 15999 page_idx += rc; 16000 16001 hrq->db_format = LPFC_DB_RING_FORMAT; 16002 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16003 hrq->type = LPFC_HRQ; 16004 hrq->assoc_qid = cq->queue_id; 16005 hrq->subtype = subtype; 16006 hrq->host_index = 0; 16007 hrq->hba_index = 0; 16008 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16009 16010 drq->db_format = LPFC_DB_RING_FORMAT; 16011 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16012 drq->type = LPFC_DRQ; 16013 drq->assoc_qid = cq->queue_id; 16014 drq->subtype = subtype; 16015 drq->host_index = 0; 16016 drq->hba_index = 0; 16017 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16018 16019 list_add_tail(&hrq->list, &cq->child_list); 16020 list_add_tail(&drq->list, &cq->child_list); 16021 } 16022 16023 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16024 /* The IOCTL status is embedded in the mailbox subheader. */ 16025 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16026 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16027 if (shdr_status || shdr_add_status || rc) { 16028 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16029 "3120 RQ_CREATE mailbox failed with " 16030 "status x%x add_status x%x, mbx status x%x\n", 16031 shdr_status, shdr_add_status, rc); 16032 status = -ENXIO; 16033 goto out; 16034 } 16035 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 16036 if (rc == 0xFFFF) { 16037 status = -ENXIO; 16038 goto out; 16039 } 16040 16041 /* Initialize all RQs with associated queue id */ 16042 for (idx = 0; idx < numrq; idx++) { 16043 hrq = hrqp[idx]; 16044 hrq->queue_id = rc + (2 * idx); 16045 drq = drqp[idx]; 16046 drq->queue_id = rc + (2 * idx) + 1; 16047 } 16048 16049 out: 16050 lpfc_sli4_mbox_cmd_free(phba, mbox); 16051 return status; 16052 } 16053 16054 /** 16055 * lpfc_eq_destroy - Destroy an event Queue on the HBA 16056 * @eq: The queue structure associated with the queue to destroy. 16057 * 16058 * This function destroys a queue, as detailed in @eq by sending an mailbox 16059 * command, specific to the type of queue, to the HBA. 16060 * 16061 * The @eq struct is used to get the queue ID of the queue to destroy. 16062 * 16063 * On success this function will return a zero. If the queue destroy mailbox 16064 * command fails this function will return -ENXIO. 16065 **/ 16066 int 16067 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 16068 { 16069 LPFC_MBOXQ_t *mbox; 16070 int rc, length, status = 0; 16071 uint32_t shdr_status, shdr_add_status; 16072 union lpfc_sli4_cfg_shdr *shdr; 16073 16074 /* sanity check on queue memory */ 16075 if (!eq) 16076 return -ENODEV; 16077 16078 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 16079 if (!mbox) 16080 return -ENOMEM; 16081 length = (sizeof(struct lpfc_mbx_eq_destroy) - 16082 sizeof(struct lpfc_sli4_cfg_mhdr)); 16083 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16084 LPFC_MBOX_OPCODE_EQ_DESTROY, 16085 length, LPFC_SLI4_MBX_EMBED); 16086 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 16087 eq->queue_id); 16088 mbox->vport = eq->phba->pport; 16089 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16090 16091 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 16092 /* The IOCTL status is embedded in the mailbox subheader. */ 16093 shdr = (union lpfc_sli4_cfg_shdr *) 16094 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 16095 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16096 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16097 if (shdr_status || shdr_add_status || rc) { 16098 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16099 "2505 EQ_DESTROY mailbox failed with " 16100 "status x%x add_status x%x, mbx status x%x\n", 16101 shdr_status, shdr_add_status, rc); 16102 status = -ENXIO; 16103 } 16104 16105 /* Remove eq from any list */ 16106 list_del_init(&eq->list); 16107 mempool_free(mbox, eq->phba->mbox_mem_pool); 16108 return status; 16109 } 16110 16111 /** 16112 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 16113 * @cq: The queue structure associated with the queue to destroy. 16114 * 16115 * This function destroys a queue, as detailed in @cq by sending an mailbox 16116 * command, specific to the type of queue, to the HBA. 16117 * 16118 * The @cq struct is used to get the queue ID of the queue to destroy. 16119 * 16120 * On success this function will return a zero. If the queue destroy mailbox 16121 * command fails this function will return -ENXIO. 16122 **/ 16123 int 16124 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 16125 { 16126 LPFC_MBOXQ_t *mbox; 16127 int rc, length, status = 0; 16128 uint32_t shdr_status, shdr_add_status; 16129 union lpfc_sli4_cfg_shdr *shdr; 16130 16131 /* sanity check on queue memory */ 16132 if (!cq) 16133 return -ENODEV; 16134 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 16135 if (!mbox) 16136 return -ENOMEM; 16137 length = (sizeof(struct lpfc_mbx_cq_destroy) - 16138 sizeof(struct lpfc_sli4_cfg_mhdr)); 16139 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16140 LPFC_MBOX_OPCODE_CQ_DESTROY, 16141 length, LPFC_SLI4_MBX_EMBED); 16142 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 16143 cq->queue_id); 16144 mbox->vport = cq->phba->pport; 16145 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16146 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 16147 /* The IOCTL status is embedded in the mailbox subheader. */ 16148 shdr = (union lpfc_sli4_cfg_shdr *) 16149 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 16150 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16151 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16152 if (shdr_status || shdr_add_status || rc) { 16153 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16154 "2506 CQ_DESTROY mailbox failed with " 16155 "status x%x add_status x%x, mbx status x%x\n", 16156 shdr_status, shdr_add_status, rc); 16157 status = -ENXIO; 16158 } 16159 /* Remove cq from any list */ 16160 list_del_init(&cq->list); 16161 mempool_free(mbox, cq->phba->mbox_mem_pool); 16162 return status; 16163 } 16164 16165 /** 16166 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 16167 * @qm: The queue structure associated with the queue to destroy. 16168 * 16169 * This function destroys a queue, as detailed in @mq by sending an mailbox 16170 * command, specific to the type of queue, to the HBA. 16171 * 16172 * The @mq struct is used to get the queue ID of the queue to destroy. 16173 * 16174 * On success this function will return a zero. If the queue destroy mailbox 16175 * command fails this function will return -ENXIO. 16176 **/ 16177 int 16178 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 16179 { 16180 LPFC_MBOXQ_t *mbox; 16181 int rc, length, status = 0; 16182 uint32_t shdr_status, shdr_add_status; 16183 union lpfc_sli4_cfg_shdr *shdr; 16184 16185 /* sanity check on queue memory */ 16186 if (!mq) 16187 return -ENODEV; 16188 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 16189 if (!mbox) 16190 return -ENOMEM; 16191 length = (sizeof(struct lpfc_mbx_mq_destroy) - 16192 sizeof(struct lpfc_sli4_cfg_mhdr)); 16193 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16194 LPFC_MBOX_OPCODE_MQ_DESTROY, 16195 length, LPFC_SLI4_MBX_EMBED); 16196 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 16197 mq->queue_id); 16198 mbox->vport = mq->phba->pport; 16199 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16200 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 16201 /* The IOCTL status is embedded in the mailbox subheader. */ 16202 shdr = (union lpfc_sli4_cfg_shdr *) 16203 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 16204 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16205 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16206 if (shdr_status || shdr_add_status || rc) { 16207 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16208 "2507 MQ_DESTROY mailbox failed with " 16209 "status x%x add_status x%x, mbx status x%x\n", 16210 shdr_status, shdr_add_status, rc); 16211 status = -ENXIO; 16212 } 16213 /* Remove mq from any list */ 16214 list_del_init(&mq->list); 16215 mempool_free(mbox, mq->phba->mbox_mem_pool); 16216 return status; 16217 } 16218 16219 /** 16220 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 16221 * @wq: The queue structure associated with the queue to destroy. 16222 * 16223 * This function destroys a queue, as detailed in @wq by sending an mailbox 16224 * command, specific to the type of queue, to the HBA. 16225 * 16226 * The @wq struct is used to get the queue ID of the queue to destroy. 16227 * 16228 * On success this function will return a zero. If the queue destroy mailbox 16229 * command fails this function will return -ENXIO. 16230 **/ 16231 int 16232 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 16233 { 16234 LPFC_MBOXQ_t *mbox; 16235 int rc, length, status = 0; 16236 uint32_t shdr_status, shdr_add_status; 16237 union lpfc_sli4_cfg_shdr *shdr; 16238 16239 /* sanity check on queue memory */ 16240 if (!wq) 16241 return -ENODEV; 16242 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 16243 if (!mbox) 16244 return -ENOMEM; 16245 length = (sizeof(struct lpfc_mbx_wq_destroy) - 16246 sizeof(struct lpfc_sli4_cfg_mhdr)); 16247 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16248 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 16249 length, LPFC_SLI4_MBX_EMBED); 16250 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 16251 wq->queue_id); 16252 mbox->vport = wq->phba->pport; 16253 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16254 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 16255 shdr = (union lpfc_sli4_cfg_shdr *) 16256 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 16257 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16258 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16259 if (shdr_status || shdr_add_status || rc) { 16260 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16261 "2508 WQ_DESTROY mailbox failed with " 16262 "status x%x add_status x%x, mbx status x%x\n", 16263 shdr_status, shdr_add_status, rc); 16264 status = -ENXIO; 16265 } 16266 /* Remove wq from any list */ 16267 list_del_init(&wq->list); 16268 kfree(wq->pring); 16269 wq->pring = NULL; 16270 mempool_free(mbox, wq->phba->mbox_mem_pool); 16271 return status; 16272 } 16273 16274 /** 16275 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 16276 * @rq: The queue structure associated with the queue to destroy. 16277 * 16278 * This function destroys a queue, as detailed in @rq by sending an mailbox 16279 * command, specific to the type of queue, to the HBA. 16280 * 16281 * The @rq struct is used to get the queue ID of the queue to destroy. 16282 * 16283 * On success this function will return a zero. If the queue destroy mailbox 16284 * command fails this function will return -ENXIO. 16285 **/ 16286 int 16287 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 16288 struct lpfc_queue *drq) 16289 { 16290 LPFC_MBOXQ_t *mbox; 16291 int rc, length, status = 0; 16292 uint32_t shdr_status, shdr_add_status; 16293 union lpfc_sli4_cfg_shdr *shdr; 16294 16295 /* sanity check on queue memory */ 16296 if (!hrq || !drq) 16297 return -ENODEV; 16298 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 16299 if (!mbox) 16300 return -ENOMEM; 16301 length = (sizeof(struct lpfc_mbx_rq_destroy) - 16302 sizeof(struct lpfc_sli4_cfg_mhdr)); 16303 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16304 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 16305 length, LPFC_SLI4_MBX_EMBED); 16306 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16307 hrq->queue_id); 16308 mbox->vport = hrq->phba->pport; 16309 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16310 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 16311 /* The IOCTL status is embedded in the mailbox subheader. */ 16312 shdr = (union lpfc_sli4_cfg_shdr *) 16313 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16314 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16315 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16316 if (shdr_status || shdr_add_status || rc) { 16317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16318 "2509 RQ_DESTROY mailbox failed with " 16319 "status x%x add_status x%x, mbx status x%x\n", 16320 shdr_status, shdr_add_status, rc); 16321 if (rc != MBX_TIMEOUT) 16322 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16323 return -ENXIO; 16324 } 16325 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16326 drq->queue_id); 16327 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 16328 shdr = (union lpfc_sli4_cfg_shdr *) 16329 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16330 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16331 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16332 if (shdr_status || shdr_add_status || rc) { 16333 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16334 "2510 RQ_DESTROY mailbox failed with " 16335 "status x%x add_status x%x, mbx status x%x\n", 16336 shdr_status, shdr_add_status, rc); 16337 status = -ENXIO; 16338 } 16339 list_del_init(&hrq->list); 16340 list_del_init(&drq->list); 16341 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16342 return status; 16343 } 16344 16345 /** 16346 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 16347 * @phba: The virtual port for which this call being executed. 16348 * @pdma_phys_addr0: Physical address of the 1st SGL page. 16349 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 16350 * @xritag: the xritag that ties this io to the SGL pages. 16351 * 16352 * This routine will post the sgl pages for the IO that has the xritag 16353 * that is in the iocbq structure. The xritag is assigned during iocbq 16354 * creation and persists for as long as the driver is loaded. 16355 * if the caller has fewer than 256 scatter gather segments to map then 16356 * pdma_phys_addr1 should be 0. 16357 * If the caller needs to map more than 256 scatter gather segment then 16358 * pdma_phys_addr1 should be a valid physical address. 16359 * physical address for SGLs must be 64 byte aligned. 16360 * If you are going to map 2 SGL's then the first one must have 256 entries 16361 * the second sgl can have between 1 and 256 entries. 16362 * 16363 * Return codes: 16364 * 0 - Success 16365 * -ENXIO, -ENOMEM - Failure 16366 **/ 16367 int 16368 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 16369 dma_addr_t pdma_phys_addr0, 16370 dma_addr_t pdma_phys_addr1, 16371 uint16_t xritag) 16372 { 16373 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 16374 LPFC_MBOXQ_t *mbox; 16375 int rc; 16376 uint32_t shdr_status, shdr_add_status; 16377 uint32_t mbox_tmo; 16378 union lpfc_sli4_cfg_shdr *shdr; 16379 16380 if (xritag == NO_XRI) { 16381 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16382 "0364 Invalid param:\n"); 16383 return -EINVAL; 16384 } 16385 16386 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16387 if (!mbox) 16388 return -ENOMEM; 16389 16390 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16391 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16392 sizeof(struct lpfc_mbx_post_sgl_pages) - 16393 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 16394 16395 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 16396 &mbox->u.mqe.un.post_sgl_pages; 16397 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 16398 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 16399 16400 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 16401 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 16402 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 16403 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 16404 16405 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 16406 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 16407 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 16408 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 16409 if (!phba->sli4_hba.intr_enable) 16410 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16411 else { 16412 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16413 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16414 } 16415 /* The IOCTL status is embedded in the mailbox subheader. */ 16416 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 16417 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16418 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16419 if (rc != MBX_TIMEOUT) 16420 mempool_free(mbox, phba->mbox_mem_pool); 16421 if (shdr_status || shdr_add_status || rc) { 16422 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16423 "2511 POST_SGL mailbox failed with " 16424 "status x%x add_status x%x, mbx status x%x\n", 16425 shdr_status, shdr_add_status, rc); 16426 } 16427 return 0; 16428 } 16429 16430 /** 16431 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 16432 * @phba: pointer to lpfc hba data structure. 16433 * 16434 * This routine is invoked to post rpi header templates to the 16435 * HBA consistent with the SLI-4 interface spec. This routine 16436 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 16437 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 16438 * 16439 * Returns 16440 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 16441 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 16442 **/ 16443 static uint16_t 16444 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 16445 { 16446 unsigned long xri; 16447 16448 /* 16449 * Fetch the next logical xri. Because this index is logical, 16450 * the driver starts at 0 each time. 16451 */ 16452 spin_lock_irq(&phba->hbalock); 16453 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 16454 phba->sli4_hba.max_cfg_param.max_xri, 0); 16455 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 16456 spin_unlock_irq(&phba->hbalock); 16457 return NO_XRI; 16458 } else { 16459 set_bit(xri, phba->sli4_hba.xri_bmask); 16460 phba->sli4_hba.max_cfg_param.xri_used++; 16461 } 16462 spin_unlock_irq(&phba->hbalock); 16463 return xri; 16464 } 16465 16466 /** 16467 * lpfc_sli4_free_xri - Release an xri for reuse. 16468 * @phba: pointer to lpfc hba data structure. 16469 * 16470 * This routine is invoked to release an xri to the pool of 16471 * available rpis maintained by the driver. 16472 **/ 16473 static void 16474 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16475 { 16476 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 16477 phba->sli4_hba.max_cfg_param.xri_used--; 16478 } 16479 } 16480 16481 /** 16482 * lpfc_sli4_free_xri - Release an xri for reuse. 16483 * @phba: pointer to lpfc hba data structure. 16484 * 16485 * This routine is invoked to release an xri to the pool of 16486 * available rpis maintained by the driver. 16487 **/ 16488 void 16489 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16490 { 16491 spin_lock_irq(&phba->hbalock); 16492 __lpfc_sli4_free_xri(phba, xri); 16493 spin_unlock_irq(&phba->hbalock); 16494 } 16495 16496 /** 16497 * lpfc_sli4_next_xritag - Get an xritag for the io 16498 * @phba: Pointer to HBA context object. 16499 * 16500 * This function gets an xritag for the iocb. If there is no unused xritag 16501 * it will return 0xffff. 16502 * The function returns the allocated xritag if successful, else returns zero. 16503 * Zero is not a valid xritag. 16504 * The caller is not required to hold any lock. 16505 **/ 16506 uint16_t 16507 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 16508 { 16509 uint16_t xri_index; 16510 16511 xri_index = lpfc_sli4_alloc_xri(phba); 16512 if (xri_index == NO_XRI) 16513 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 16514 "2004 Failed to allocate XRI.last XRITAG is %d" 16515 " Max XRI is %d, Used XRI is %d\n", 16516 xri_index, 16517 phba->sli4_hba.max_cfg_param.max_xri, 16518 phba->sli4_hba.max_cfg_param.xri_used); 16519 return xri_index; 16520 } 16521 16522 /** 16523 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. 16524 * @phba: pointer to lpfc hba data structure. 16525 * @post_sgl_list: pointer to els sgl entry list. 16526 * @count: number of els sgl entries on the list. 16527 * 16528 * This routine is invoked to post a block of driver's sgl pages to the 16529 * HBA using non-embedded mailbox command. No Lock is held. This routine 16530 * is only called when the driver is loading and after all IO has been 16531 * stopped. 16532 **/ 16533 static int 16534 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, 16535 struct list_head *post_sgl_list, 16536 int post_cnt) 16537 { 16538 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 16539 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16540 struct sgl_page_pairs *sgl_pg_pairs; 16541 void *viraddr; 16542 LPFC_MBOXQ_t *mbox; 16543 uint32_t reqlen, alloclen, pg_pairs; 16544 uint32_t mbox_tmo; 16545 uint16_t xritag_start = 0; 16546 int rc = 0; 16547 uint32_t shdr_status, shdr_add_status; 16548 union lpfc_sli4_cfg_shdr *shdr; 16549 16550 reqlen = post_cnt * sizeof(struct sgl_page_pairs) + 16551 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16552 if (reqlen > SLI4_PAGE_SIZE) { 16553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16554 "2559 Block sgl registration required DMA " 16555 "size (%d) great than a page\n", reqlen); 16556 return -ENOMEM; 16557 } 16558 16559 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16560 if (!mbox) 16561 return -ENOMEM; 16562 16563 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16564 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16565 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 16566 LPFC_SLI4_MBX_NEMBED); 16567 16568 if (alloclen < reqlen) { 16569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16570 "0285 Allocated DMA memory size (%d) is " 16571 "less than the requested DMA memory " 16572 "size (%d)\n", alloclen, reqlen); 16573 lpfc_sli4_mbox_cmd_free(phba, mbox); 16574 return -ENOMEM; 16575 } 16576 /* Set up the SGL pages in the non-embedded DMA pages */ 16577 viraddr = mbox->sge_array->addr[0]; 16578 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16579 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16580 16581 pg_pairs = 0; 16582 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 16583 /* Set up the sge entry */ 16584 sgl_pg_pairs->sgl_pg0_addr_lo = 16585 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 16586 sgl_pg_pairs->sgl_pg0_addr_hi = 16587 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 16588 sgl_pg_pairs->sgl_pg1_addr_lo = 16589 cpu_to_le32(putPaddrLow(0)); 16590 sgl_pg_pairs->sgl_pg1_addr_hi = 16591 cpu_to_le32(putPaddrHigh(0)); 16592 16593 /* Keep the first xritag on the list */ 16594 if (pg_pairs == 0) 16595 xritag_start = sglq_entry->sli4_xritag; 16596 sgl_pg_pairs++; 16597 pg_pairs++; 16598 } 16599 16600 /* Complete initialization and perform endian conversion. */ 16601 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16602 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); 16603 sgl->word0 = cpu_to_le32(sgl->word0); 16604 16605 if (!phba->sli4_hba.intr_enable) 16606 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16607 else { 16608 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16609 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16610 } 16611 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16612 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16613 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16614 if (rc != MBX_TIMEOUT) 16615 lpfc_sli4_mbox_cmd_free(phba, mbox); 16616 if (shdr_status || shdr_add_status || rc) { 16617 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16618 "2513 POST_SGL_BLOCK mailbox command failed " 16619 "status x%x add_status x%x mbx status x%x\n", 16620 shdr_status, shdr_add_status, rc); 16621 rc = -ENXIO; 16622 } 16623 return rc; 16624 } 16625 16626 /** 16627 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware 16628 * @phba: pointer to lpfc hba data structure. 16629 * @nblist: pointer to nvme buffer list. 16630 * @count: number of scsi buffers on the list. 16631 * 16632 * This routine is invoked to post a block of @count scsi sgl pages from a 16633 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. 16634 * No Lock is held. 16635 * 16636 **/ 16637 static int 16638 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, 16639 int count) 16640 { 16641 struct lpfc_io_buf *lpfc_ncmd; 16642 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16643 struct sgl_page_pairs *sgl_pg_pairs; 16644 void *viraddr; 16645 LPFC_MBOXQ_t *mbox; 16646 uint32_t reqlen, alloclen, pg_pairs; 16647 uint32_t mbox_tmo; 16648 uint16_t xritag_start = 0; 16649 int rc = 0; 16650 uint32_t shdr_status, shdr_add_status; 16651 dma_addr_t pdma_phys_bpl1; 16652 union lpfc_sli4_cfg_shdr *shdr; 16653 16654 /* Calculate the requested length of the dma memory */ 16655 reqlen = count * sizeof(struct sgl_page_pairs) + 16656 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16657 if (reqlen > SLI4_PAGE_SIZE) { 16658 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 16659 "6118 Block sgl registration required DMA " 16660 "size (%d) great than a page\n", reqlen); 16661 return -ENOMEM; 16662 } 16663 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16664 if (!mbox) { 16665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16666 "6119 Failed to allocate mbox cmd memory\n"); 16667 return -ENOMEM; 16668 } 16669 16670 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16671 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16672 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16673 reqlen, LPFC_SLI4_MBX_NEMBED); 16674 16675 if (alloclen < reqlen) { 16676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16677 "6120 Allocated DMA memory size (%d) is " 16678 "less than the requested DMA memory " 16679 "size (%d)\n", alloclen, reqlen); 16680 lpfc_sli4_mbox_cmd_free(phba, mbox); 16681 return -ENOMEM; 16682 } 16683 16684 /* Get the first SGE entry from the non-embedded DMA memory */ 16685 viraddr = mbox->sge_array->addr[0]; 16686 16687 /* Set up the SGL pages in the non-embedded DMA pages */ 16688 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16689 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16690 16691 pg_pairs = 0; 16692 list_for_each_entry(lpfc_ncmd, nblist, list) { 16693 /* Set up the sge entry */ 16694 sgl_pg_pairs->sgl_pg0_addr_lo = 16695 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); 16696 sgl_pg_pairs->sgl_pg0_addr_hi = 16697 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); 16698 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 16699 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + 16700 SGL_PAGE_SIZE; 16701 else 16702 pdma_phys_bpl1 = 0; 16703 sgl_pg_pairs->sgl_pg1_addr_lo = 16704 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 16705 sgl_pg_pairs->sgl_pg1_addr_hi = 16706 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 16707 /* Keep the first xritag on the list */ 16708 if (pg_pairs == 0) 16709 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; 16710 sgl_pg_pairs++; 16711 pg_pairs++; 16712 } 16713 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16714 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 16715 /* Perform endian conversion if necessary */ 16716 sgl->word0 = cpu_to_le32(sgl->word0); 16717 16718 if (!phba->sli4_hba.intr_enable) { 16719 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16720 } else { 16721 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16722 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16723 } 16724 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; 16725 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16726 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16727 if (rc != MBX_TIMEOUT) 16728 lpfc_sli4_mbox_cmd_free(phba, mbox); 16729 if (shdr_status || shdr_add_status || rc) { 16730 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16731 "6125 POST_SGL_BLOCK mailbox command failed " 16732 "status x%x add_status x%x mbx status x%x\n", 16733 shdr_status, shdr_add_status, rc); 16734 rc = -ENXIO; 16735 } 16736 return rc; 16737 } 16738 16739 /** 16740 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list 16741 * @phba: pointer to lpfc hba data structure. 16742 * @post_nblist: pointer to the nvme buffer list. 16743 * 16744 * This routine walks a list of nvme buffers that was passed in. It attempts 16745 * to construct blocks of nvme buffer sgls which contains contiguous xris and 16746 * uses the non-embedded SGL block post mailbox commands to post to the port. 16747 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use 16748 * embedded SGL post mailbox command for posting. The @post_nblist passed in 16749 * must be local list, thus no lock is needed when manipulate the list. 16750 * 16751 * Returns: 0 = failure, non-zero number of successfully posted buffers. 16752 **/ 16753 int 16754 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, 16755 struct list_head *post_nblist, int sb_count) 16756 { 16757 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 16758 int status, sgl_size; 16759 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; 16760 dma_addr_t pdma_phys_sgl1; 16761 int last_xritag = NO_XRI; 16762 int cur_xritag; 16763 LIST_HEAD(prep_nblist); 16764 LIST_HEAD(blck_nblist); 16765 LIST_HEAD(nvme_nblist); 16766 16767 /* sanity check */ 16768 if (sb_count <= 0) 16769 return -EINVAL; 16770 16771 sgl_size = phba->cfg_sg_dma_buf_size; 16772 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { 16773 list_del_init(&lpfc_ncmd->list); 16774 block_cnt++; 16775 if ((last_xritag != NO_XRI) && 16776 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { 16777 /* a hole in xri block, form a sgl posting block */ 16778 list_splice_init(&prep_nblist, &blck_nblist); 16779 post_cnt = block_cnt - 1; 16780 /* prepare list for next posting block */ 16781 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 16782 block_cnt = 1; 16783 } else { 16784 /* prepare list for next posting block */ 16785 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 16786 /* enough sgls for non-embed sgl mbox command */ 16787 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 16788 list_splice_init(&prep_nblist, &blck_nblist); 16789 post_cnt = block_cnt; 16790 block_cnt = 0; 16791 } 16792 } 16793 num_posting++; 16794 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 16795 16796 /* end of repost sgl list condition for NVME buffers */ 16797 if (num_posting == sb_count) { 16798 if (post_cnt == 0) { 16799 /* last sgl posting block */ 16800 list_splice_init(&prep_nblist, &blck_nblist); 16801 post_cnt = block_cnt; 16802 } else if (block_cnt == 1) { 16803 /* last single sgl with non-contiguous xri */ 16804 if (sgl_size > SGL_PAGE_SIZE) 16805 pdma_phys_sgl1 = 16806 lpfc_ncmd->dma_phys_sgl + 16807 SGL_PAGE_SIZE; 16808 else 16809 pdma_phys_sgl1 = 0; 16810 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 16811 status = lpfc_sli4_post_sgl( 16812 phba, lpfc_ncmd->dma_phys_sgl, 16813 pdma_phys_sgl1, cur_xritag); 16814 if (status) { 16815 /* Post error. Buffer unavailable. */ 16816 lpfc_ncmd->flags |= 16817 LPFC_SBUF_NOT_POSTED; 16818 } else { 16819 /* Post success. Bffer available. */ 16820 lpfc_ncmd->flags &= 16821 ~LPFC_SBUF_NOT_POSTED; 16822 lpfc_ncmd->status = IOSTAT_SUCCESS; 16823 num_posted++; 16824 } 16825 /* success, put on NVME buffer sgl list */ 16826 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 16827 } 16828 } 16829 16830 /* continue until a nembed page worth of sgls */ 16831 if (post_cnt == 0) 16832 continue; 16833 16834 /* post block of NVME buffer list sgls */ 16835 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist, 16836 post_cnt); 16837 16838 /* don't reset xirtag due to hole in xri block */ 16839 if (block_cnt == 0) 16840 last_xritag = NO_XRI; 16841 16842 /* reset NVME buffer post count for next round of posting */ 16843 post_cnt = 0; 16844 16845 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ 16846 while (!list_empty(&blck_nblist)) { 16847 list_remove_head(&blck_nblist, lpfc_ncmd, 16848 struct lpfc_io_buf, list); 16849 if (status) { 16850 /* Post error. Mark buffer unavailable. */ 16851 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; 16852 } else { 16853 /* Post success, Mark buffer available. */ 16854 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; 16855 lpfc_ncmd->status = IOSTAT_SUCCESS; 16856 num_posted++; 16857 } 16858 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 16859 } 16860 } 16861 /* Push NVME buffers with sgl posted to the available list */ 16862 lpfc_io_buf_replenish(phba, &nvme_nblist); 16863 16864 return num_posted; 16865 } 16866 16867 /** 16868 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 16869 * @phba: pointer to lpfc_hba struct that the frame was received on 16870 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16871 * 16872 * This function checks the fields in the @fc_hdr to see if the FC frame is a 16873 * valid type of frame that the LPFC driver will handle. This function will 16874 * return a zero if the frame is a valid frame or a non zero value when the 16875 * frame does not pass the check. 16876 **/ 16877 static int 16878 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 16879 { 16880 /* make rctl_names static to save stack space */ 16881 struct fc_vft_header *fc_vft_hdr; 16882 uint32_t *header = (uint32_t *) fc_hdr; 16883 16884 switch (fc_hdr->fh_r_ctl) { 16885 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 16886 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 16887 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 16888 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 16889 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 16890 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 16891 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 16892 case FC_RCTL_DD_CMD_STATUS: /* command status */ 16893 case FC_RCTL_ELS_REQ: /* extended link services request */ 16894 case FC_RCTL_ELS_REP: /* extended link services reply */ 16895 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 16896 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 16897 case FC_RCTL_BA_NOP: /* basic link service NOP */ 16898 case FC_RCTL_BA_ABTS: /* basic link service abort */ 16899 case FC_RCTL_BA_RMC: /* remove connection */ 16900 case FC_RCTL_BA_ACC: /* basic accept */ 16901 case FC_RCTL_BA_RJT: /* basic reject */ 16902 case FC_RCTL_BA_PRMT: 16903 case FC_RCTL_ACK_1: /* acknowledge_1 */ 16904 case FC_RCTL_ACK_0: /* acknowledge_0 */ 16905 case FC_RCTL_P_RJT: /* port reject */ 16906 case FC_RCTL_F_RJT: /* fabric reject */ 16907 case FC_RCTL_P_BSY: /* port busy */ 16908 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 16909 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 16910 case FC_RCTL_LCR: /* link credit reset */ 16911 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ 16912 case FC_RCTL_END: /* end */ 16913 break; 16914 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 16915 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16916 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 16917 return lpfc_fc_frame_check(phba, fc_hdr); 16918 default: 16919 goto drop; 16920 } 16921 16922 switch (fc_hdr->fh_type) { 16923 case FC_TYPE_BLS: 16924 case FC_TYPE_ELS: 16925 case FC_TYPE_FCP: 16926 case FC_TYPE_CT: 16927 case FC_TYPE_NVME: 16928 break; 16929 case FC_TYPE_IP: 16930 case FC_TYPE_ILS: 16931 default: 16932 goto drop; 16933 } 16934 16935 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 16936 "2538 Received frame rctl:x%x, type:x%x, " 16937 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 16938 fc_hdr->fh_r_ctl, fc_hdr->fh_type, 16939 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 16940 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 16941 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 16942 be32_to_cpu(header[6])); 16943 return 0; 16944 drop: 16945 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 16946 "2539 Dropped frame rctl:x%x type:x%x\n", 16947 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 16948 return 1; 16949 } 16950 16951 /** 16952 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 16953 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16954 * 16955 * This function processes the FC header to retrieve the VFI from the VF 16956 * header, if one exists. This function will return the VFI if one exists 16957 * or 0 if no VSAN Header exists. 16958 **/ 16959 static uint32_t 16960 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 16961 { 16962 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16963 16964 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 16965 return 0; 16966 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 16967 } 16968 16969 /** 16970 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 16971 * @phba: Pointer to the HBA structure to search for the vport on 16972 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16973 * @fcfi: The FC Fabric ID that the frame came from 16974 * 16975 * This function searches the @phba for a vport that matches the content of the 16976 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 16977 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 16978 * returns the matching vport pointer or NULL if unable to match frame to a 16979 * vport. 16980 **/ 16981 static struct lpfc_vport * 16982 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 16983 uint16_t fcfi, uint32_t did) 16984 { 16985 struct lpfc_vport **vports; 16986 struct lpfc_vport *vport = NULL; 16987 int i; 16988 16989 if (did == Fabric_DID) 16990 return phba->pport; 16991 if ((phba->pport->fc_flag & FC_PT2PT) && 16992 !(phba->link_state == LPFC_HBA_READY)) 16993 return phba->pport; 16994 16995 vports = lpfc_create_vport_work_array(phba); 16996 if (vports != NULL) { 16997 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 16998 if (phba->fcf.fcfi == fcfi && 16999 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 17000 vports[i]->fc_myDID == did) { 17001 vport = vports[i]; 17002 break; 17003 } 17004 } 17005 } 17006 lpfc_destroy_vport_work_array(phba, vports); 17007 return vport; 17008 } 17009 17010 /** 17011 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 17012 * @vport: The vport to work on. 17013 * 17014 * This function updates the receive sequence time stamp for this vport. The 17015 * receive sequence time stamp indicates the time that the last frame of the 17016 * the sequence that has been idle for the longest amount of time was received. 17017 * the driver uses this time stamp to indicate if any received sequences have 17018 * timed out. 17019 **/ 17020 static void 17021 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 17022 { 17023 struct lpfc_dmabuf *h_buf; 17024 struct hbq_dmabuf *dmabuf = NULL; 17025 17026 /* get the oldest sequence on the rcv list */ 17027 h_buf = list_get_first(&vport->rcv_buffer_list, 17028 struct lpfc_dmabuf, list); 17029 if (!h_buf) 17030 return; 17031 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17032 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 17033 } 17034 17035 /** 17036 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 17037 * @vport: The vport that the received sequences were sent to. 17038 * 17039 * This function cleans up all outstanding received sequences. This is called 17040 * by the driver when a link event or user action invalidates all the received 17041 * sequences. 17042 **/ 17043 void 17044 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 17045 { 17046 struct lpfc_dmabuf *h_buf, *hnext; 17047 struct lpfc_dmabuf *d_buf, *dnext; 17048 struct hbq_dmabuf *dmabuf = NULL; 17049 17050 /* start with the oldest sequence on the rcv list */ 17051 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17052 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17053 list_del_init(&dmabuf->hbuf.list); 17054 list_for_each_entry_safe(d_buf, dnext, 17055 &dmabuf->dbuf.list, list) { 17056 list_del_init(&d_buf->list); 17057 lpfc_in_buf_free(vport->phba, d_buf); 17058 } 17059 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17060 } 17061 } 17062 17063 /** 17064 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 17065 * @vport: The vport that the received sequences were sent to. 17066 * 17067 * This function determines whether any received sequences have timed out by 17068 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 17069 * indicates that there is at least one timed out sequence this routine will 17070 * go through the received sequences one at a time from most inactive to most 17071 * active to determine which ones need to be cleaned up. Once it has determined 17072 * that a sequence needs to be cleaned up it will simply free up the resources 17073 * without sending an abort. 17074 **/ 17075 void 17076 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 17077 { 17078 struct lpfc_dmabuf *h_buf, *hnext; 17079 struct lpfc_dmabuf *d_buf, *dnext; 17080 struct hbq_dmabuf *dmabuf = NULL; 17081 unsigned long timeout; 17082 int abort_count = 0; 17083 17084 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17085 vport->rcv_buffer_time_stamp); 17086 if (list_empty(&vport->rcv_buffer_list) || 17087 time_before(jiffies, timeout)) 17088 return; 17089 /* start with the oldest sequence on the rcv list */ 17090 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17091 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17092 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17093 dmabuf->time_stamp); 17094 if (time_before(jiffies, timeout)) 17095 break; 17096 abort_count++; 17097 list_del_init(&dmabuf->hbuf.list); 17098 list_for_each_entry_safe(d_buf, dnext, 17099 &dmabuf->dbuf.list, list) { 17100 list_del_init(&d_buf->list); 17101 lpfc_in_buf_free(vport->phba, d_buf); 17102 } 17103 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17104 } 17105 if (abort_count) 17106 lpfc_update_rcv_time_stamp(vport); 17107 } 17108 17109 /** 17110 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 17111 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 17112 * 17113 * This function searches through the existing incomplete sequences that have 17114 * been sent to this @vport. If the frame matches one of the incomplete 17115 * sequences then the dbuf in the @dmabuf is added to the list of frames that 17116 * make up that sequence. If no sequence is found that matches this frame then 17117 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 17118 * This function returns a pointer to the first dmabuf in the sequence list that 17119 * the frame was linked to. 17120 **/ 17121 static struct hbq_dmabuf * 17122 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17123 { 17124 struct fc_frame_header *new_hdr; 17125 struct fc_frame_header *temp_hdr; 17126 struct lpfc_dmabuf *d_buf; 17127 struct lpfc_dmabuf *h_buf; 17128 struct hbq_dmabuf *seq_dmabuf = NULL; 17129 struct hbq_dmabuf *temp_dmabuf = NULL; 17130 uint8_t found = 0; 17131 17132 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17133 dmabuf->time_stamp = jiffies; 17134 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17135 17136 /* Use the hdr_buf to find the sequence that this frame belongs to */ 17137 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17138 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17139 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17140 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17141 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17142 continue; 17143 /* found a pending sequence that matches this frame */ 17144 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17145 break; 17146 } 17147 if (!seq_dmabuf) { 17148 /* 17149 * This indicates first frame received for this sequence. 17150 * Queue the buffer on the vport's rcv_buffer_list. 17151 */ 17152 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17153 lpfc_update_rcv_time_stamp(vport); 17154 return dmabuf; 17155 } 17156 temp_hdr = seq_dmabuf->hbuf.virt; 17157 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 17158 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17159 list_del_init(&seq_dmabuf->hbuf.list); 17160 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17161 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17162 lpfc_update_rcv_time_stamp(vport); 17163 return dmabuf; 17164 } 17165 /* move this sequence to the tail to indicate a young sequence */ 17166 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 17167 seq_dmabuf->time_stamp = jiffies; 17168 lpfc_update_rcv_time_stamp(vport); 17169 if (list_empty(&seq_dmabuf->dbuf.list)) { 17170 temp_hdr = dmabuf->hbuf.virt; 17171 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17172 return seq_dmabuf; 17173 } 17174 /* find the correct place in the sequence to insert this frame */ 17175 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); 17176 while (!found) { 17177 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17178 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 17179 /* 17180 * If the frame's sequence count is greater than the frame on 17181 * the list then insert the frame right after this frame 17182 */ 17183 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 17184 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17185 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 17186 found = 1; 17187 break; 17188 } 17189 17190 if (&d_buf->list == &seq_dmabuf->dbuf.list) 17191 break; 17192 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); 17193 } 17194 17195 if (found) 17196 return seq_dmabuf; 17197 return NULL; 17198 } 17199 17200 /** 17201 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 17202 * @vport: pointer to a vitural port 17203 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17204 * 17205 * This function tries to abort from the partially assembed sequence, described 17206 * by the information from basic abbort @dmabuf. It checks to see whether such 17207 * partially assembled sequence held by the driver. If so, it shall free up all 17208 * the frames from the partially assembled sequence. 17209 * 17210 * Return 17211 * true -- if there is matching partially assembled sequence present and all 17212 * the frames freed with the sequence; 17213 * false -- if there is no matching partially assembled sequence present so 17214 * nothing got aborted in the lower layer driver 17215 **/ 17216 static bool 17217 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 17218 struct hbq_dmabuf *dmabuf) 17219 { 17220 struct fc_frame_header *new_hdr; 17221 struct fc_frame_header *temp_hdr; 17222 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 17223 struct hbq_dmabuf *seq_dmabuf = NULL; 17224 17225 /* Use the hdr_buf to find the sequence that matches this frame */ 17226 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17227 INIT_LIST_HEAD(&dmabuf->hbuf.list); 17228 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17229 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17230 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17231 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17232 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17233 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17234 continue; 17235 /* found a pending sequence that matches this frame */ 17236 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17237 break; 17238 } 17239 17240 /* Free up all the frames from the partially assembled sequence */ 17241 if (seq_dmabuf) { 17242 list_for_each_entry_safe(d_buf, n_buf, 17243 &seq_dmabuf->dbuf.list, list) { 17244 list_del_init(&d_buf->list); 17245 lpfc_in_buf_free(vport->phba, d_buf); 17246 } 17247 return true; 17248 } 17249 return false; 17250 } 17251 17252 /** 17253 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 17254 * @vport: pointer to a vitural port 17255 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17256 * 17257 * This function tries to abort from the assembed sequence from upper level 17258 * protocol, described by the information from basic abbort @dmabuf. It 17259 * checks to see whether such pending context exists at upper level protocol. 17260 * If so, it shall clean up the pending context. 17261 * 17262 * Return 17263 * true -- if there is matching pending context of the sequence cleaned 17264 * at ulp; 17265 * false -- if there is no matching pending context of the sequence present 17266 * at ulp. 17267 **/ 17268 static bool 17269 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17270 { 17271 struct lpfc_hba *phba = vport->phba; 17272 int handled; 17273 17274 /* Accepting abort at ulp with SLI4 only */ 17275 if (phba->sli_rev < LPFC_SLI_REV4) 17276 return false; 17277 17278 /* Register all caring upper level protocols to attend abort */ 17279 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 17280 if (handled) 17281 return true; 17282 17283 return false; 17284 } 17285 17286 /** 17287 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 17288 * @phba: Pointer to HBA context object. 17289 * @cmd_iocbq: pointer to the command iocbq structure. 17290 * @rsp_iocbq: pointer to the response iocbq structure. 17291 * 17292 * This function handles the sequence abort response iocb command complete 17293 * event. It properly releases the memory allocated to the sequence abort 17294 * accept iocb. 17295 **/ 17296 static void 17297 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 17298 struct lpfc_iocbq *cmd_iocbq, 17299 struct lpfc_iocbq *rsp_iocbq) 17300 { 17301 struct lpfc_nodelist *ndlp; 17302 17303 if (cmd_iocbq) { 17304 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; 17305 lpfc_nlp_put(ndlp); 17306 lpfc_nlp_not_used(ndlp); 17307 lpfc_sli_release_iocbq(phba, cmd_iocbq); 17308 } 17309 17310 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 17311 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 17312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17313 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 17314 rsp_iocbq->iocb.ulpStatus, 17315 rsp_iocbq->iocb.un.ulpWord[4]); 17316 } 17317 17318 /** 17319 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 17320 * @phba: Pointer to HBA context object. 17321 * @xri: xri id in transaction. 17322 * 17323 * This function validates the xri maps to the known range of XRIs allocated an 17324 * used by the driver. 17325 **/ 17326 uint16_t 17327 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 17328 uint16_t xri) 17329 { 17330 uint16_t i; 17331 17332 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 17333 if (xri == phba->sli4_hba.xri_ids[i]) 17334 return i; 17335 } 17336 return NO_XRI; 17337 } 17338 17339 /** 17340 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 17341 * @phba: Pointer to HBA context object. 17342 * @fc_hdr: pointer to a FC frame header. 17343 * 17344 * This function sends a basic response to a previous unsol sequence abort 17345 * event after aborting the sequence handling. 17346 **/ 17347 void 17348 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 17349 struct fc_frame_header *fc_hdr, bool aborted) 17350 { 17351 struct lpfc_hba *phba = vport->phba; 17352 struct lpfc_iocbq *ctiocb = NULL; 17353 struct lpfc_nodelist *ndlp; 17354 uint16_t oxid, rxid, xri, lxri; 17355 uint32_t sid, fctl; 17356 IOCB_t *icmd; 17357 int rc; 17358 17359 if (!lpfc_is_link_up(phba)) 17360 return; 17361 17362 sid = sli4_sid_from_fc_hdr(fc_hdr); 17363 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 17364 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 17365 17366 ndlp = lpfc_findnode_did(vport, sid); 17367 if (!ndlp) { 17368 ndlp = lpfc_nlp_init(vport, sid); 17369 if (!ndlp) { 17370 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17371 "1268 Failed to allocate ndlp for " 17372 "oxid:x%x SID:x%x\n", oxid, sid); 17373 return; 17374 } 17375 /* Put ndlp onto pport node list */ 17376 lpfc_enqueue_node(vport, ndlp); 17377 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 17378 /* re-setup ndlp without removing from node list */ 17379 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 17380 if (!ndlp) { 17381 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17382 "3275 Failed to active ndlp found " 17383 "for oxid:x%x SID:x%x\n", oxid, sid); 17384 return; 17385 } 17386 } 17387 17388 /* Allocate buffer for rsp iocb */ 17389 ctiocb = lpfc_sli_get_iocbq(phba); 17390 if (!ctiocb) 17391 return; 17392 17393 /* Extract the F_CTL field from FC_HDR */ 17394 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 17395 17396 icmd = &ctiocb->iocb; 17397 icmd->un.xseq64.bdl.bdeSize = 0; 17398 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 17399 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 17400 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 17401 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 17402 17403 /* Fill in the rest of iocb fields */ 17404 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 17405 icmd->ulpBdeCount = 0; 17406 icmd->ulpLe = 1; 17407 icmd->ulpClass = CLASS3; 17408 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 17409 ctiocb->context1 = lpfc_nlp_get(ndlp); 17410 17411 ctiocb->iocb_cmpl = NULL; 17412 ctiocb->vport = phba->pport; 17413 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 17414 ctiocb->sli4_lxritag = NO_XRI; 17415 ctiocb->sli4_xritag = NO_XRI; 17416 17417 if (fctl & FC_FC_EX_CTX) 17418 /* Exchange responder sent the abort so we 17419 * own the oxid. 17420 */ 17421 xri = oxid; 17422 else 17423 xri = rxid; 17424 lxri = lpfc_sli4_xri_inrange(phba, xri); 17425 if (lxri != NO_XRI) 17426 lpfc_set_rrq_active(phba, ndlp, lxri, 17427 (xri == oxid) ? rxid : oxid, 0); 17428 /* For BA_ABTS from exchange responder, if the logical xri with 17429 * the oxid maps to the FCP XRI range, the port no longer has 17430 * that exchange context, send a BLS_RJT. Override the IOCB for 17431 * a BA_RJT. 17432 */ 17433 if ((fctl & FC_FC_EX_CTX) && 17434 (lxri > lpfc_sli4_get_iocb_cnt(phba))) { 17435 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17436 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17437 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17438 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17439 } 17440 17441 /* If BA_ABTS failed to abort a partially assembled receive sequence, 17442 * the driver no longer has that exchange, send a BLS_RJT. Override 17443 * the IOCB for a BA_RJT. 17444 */ 17445 if (aborted == false) { 17446 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17447 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17448 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17449 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17450 } 17451 17452 if (fctl & FC_FC_EX_CTX) { 17453 /* ABTS sent by responder to CT exchange, construction 17454 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 17455 * field and RX_ID from ABTS for RX_ID field. 17456 */ 17457 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 17458 } else { 17459 /* ABTS sent by initiator to CT exchange, construction 17460 * of BA_ACC will need to allocate a new XRI as for the 17461 * XRI_TAG field. 17462 */ 17463 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 17464 } 17465 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 17466 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 17467 17468 /* Xmit CT abts response on exchange <xid> */ 17469 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 17470 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 17471 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 17472 17473 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 17474 if (rc == IOCB_ERROR) { 17475 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 17476 "2925 Failed to issue CT ABTS RSP x%x on " 17477 "xri x%x, Data x%x\n", 17478 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 17479 phba->link_state); 17480 lpfc_nlp_put(ndlp); 17481 ctiocb->context1 = NULL; 17482 lpfc_sli_release_iocbq(phba, ctiocb); 17483 } 17484 } 17485 17486 /** 17487 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 17488 * @vport: Pointer to the vport on which this sequence was received 17489 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17490 * 17491 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 17492 * receive sequence is only partially assembed by the driver, it shall abort 17493 * the partially assembled frames for the sequence. Otherwise, if the 17494 * unsolicited receive sequence has been completely assembled and passed to 17495 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 17496 * unsolicited sequence has been aborted. After that, it will issue a basic 17497 * accept to accept the abort. 17498 **/ 17499 static void 17500 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 17501 struct hbq_dmabuf *dmabuf) 17502 { 17503 struct lpfc_hba *phba = vport->phba; 17504 struct fc_frame_header fc_hdr; 17505 uint32_t fctl; 17506 bool aborted; 17507 17508 /* Make a copy of fc_hdr before the dmabuf being released */ 17509 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 17510 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 17511 17512 if (fctl & FC_FC_EX_CTX) { 17513 /* ABTS by responder to exchange, no cleanup needed */ 17514 aborted = true; 17515 } else { 17516 /* ABTS by initiator to exchange, need to do cleanup */ 17517 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 17518 if (aborted == false) 17519 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 17520 } 17521 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17522 17523 if (phba->nvmet_support) { 17524 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); 17525 return; 17526 } 17527 17528 /* Respond with BA_ACC or BA_RJT accordingly */ 17529 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 17530 } 17531 17532 /** 17533 * lpfc_seq_complete - Indicates if a sequence is complete 17534 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17535 * 17536 * This function checks the sequence, starting with the frame described by 17537 * @dmabuf, to see if all the frames associated with this sequence are present. 17538 * the frames associated with this sequence are linked to the @dmabuf using the 17539 * dbuf list. This function looks for two major things. 1) That the first frame 17540 * has a sequence count of zero. 2) There is a frame with last frame of sequence 17541 * set. 3) That there are no holes in the sequence count. The function will 17542 * return 1 when the sequence is complete, otherwise it will return 0. 17543 **/ 17544 static int 17545 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 17546 { 17547 struct fc_frame_header *hdr; 17548 struct lpfc_dmabuf *d_buf; 17549 struct hbq_dmabuf *seq_dmabuf; 17550 uint32_t fctl; 17551 int seq_count = 0; 17552 17553 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17554 /* make sure first fame of sequence has a sequence count of zero */ 17555 if (hdr->fh_seq_cnt != seq_count) 17556 return 0; 17557 fctl = (hdr->fh_f_ctl[0] << 16 | 17558 hdr->fh_f_ctl[1] << 8 | 17559 hdr->fh_f_ctl[2]); 17560 /* If last frame of sequence we can return success. */ 17561 if (fctl & FC_FC_END_SEQ) 17562 return 1; 17563 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 17564 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17565 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17566 /* If there is a hole in the sequence count then fail. */ 17567 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 17568 return 0; 17569 fctl = (hdr->fh_f_ctl[0] << 16 | 17570 hdr->fh_f_ctl[1] << 8 | 17571 hdr->fh_f_ctl[2]); 17572 /* If last frame of sequence we can return success. */ 17573 if (fctl & FC_FC_END_SEQ) 17574 return 1; 17575 } 17576 return 0; 17577 } 17578 17579 /** 17580 * lpfc_prep_seq - Prep sequence for ULP processing 17581 * @vport: Pointer to the vport on which this sequence was received 17582 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17583 * 17584 * This function takes a sequence, described by a list of frames, and creates 17585 * a list of iocbq structures to describe the sequence. This iocbq list will be 17586 * used to issue to the generic unsolicited sequence handler. This routine 17587 * returns a pointer to the first iocbq in the list. If the function is unable 17588 * to allocate an iocbq then it throw out the received frames that were not 17589 * able to be described and return a pointer to the first iocbq. If unable to 17590 * allocate any iocbqs (including the first) this function will return NULL. 17591 **/ 17592 static struct lpfc_iocbq * 17593 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 17594 { 17595 struct hbq_dmabuf *hbq_buf; 17596 struct lpfc_dmabuf *d_buf, *n_buf; 17597 struct lpfc_iocbq *first_iocbq, *iocbq; 17598 struct fc_frame_header *fc_hdr; 17599 uint32_t sid; 17600 uint32_t len, tot_len; 17601 struct ulp_bde64 *pbde; 17602 17603 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17604 /* remove from receive buffer list */ 17605 list_del_init(&seq_dmabuf->hbuf.list); 17606 lpfc_update_rcv_time_stamp(vport); 17607 /* get the Remote Port's SID */ 17608 sid = sli4_sid_from_fc_hdr(fc_hdr); 17609 tot_len = 0; 17610 /* Get an iocbq struct to fill in. */ 17611 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 17612 if (first_iocbq) { 17613 /* Initialize the first IOCB. */ 17614 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 17615 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 17616 first_iocbq->vport = vport; 17617 17618 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 17619 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 17620 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 17621 first_iocbq->iocb.un.rcvels.parmRo = 17622 sli4_did_from_fc_hdr(fc_hdr); 17623 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 17624 } else 17625 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 17626 first_iocbq->iocb.ulpContext = NO_XRI; 17627 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 17628 be16_to_cpu(fc_hdr->fh_ox_id); 17629 /* iocbq is prepped for internal consumption. Physical vpi. */ 17630 first_iocbq->iocb.unsli3.rcvsli3.vpi = 17631 vport->phba->vpi_ids[vport->vpi]; 17632 /* put the first buffer into the first IOCBq */ 17633 tot_len = bf_get(lpfc_rcqe_length, 17634 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 17635 17636 first_iocbq->context2 = &seq_dmabuf->dbuf; 17637 first_iocbq->context3 = NULL; 17638 first_iocbq->iocb.ulpBdeCount = 1; 17639 if (tot_len > LPFC_DATA_BUF_SIZE) 17640 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17641 LPFC_DATA_BUF_SIZE; 17642 else 17643 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; 17644 17645 first_iocbq->iocb.un.rcvels.remoteID = sid; 17646 17647 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17648 } 17649 iocbq = first_iocbq; 17650 /* 17651 * Each IOCBq can have two Buffers assigned, so go through the list 17652 * of buffers for this sequence and save two buffers in each IOCBq 17653 */ 17654 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 17655 if (!iocbq) { 17656 lpfc_in_buf_free(vport->phba, d_buf); 17657 continue; 17658 } 17659 if (!iocbq->context3) { 17660 iocbq->context3 = d_buf; 17661 iocbq->iocb.ulpBdeCount++; 17662 /* We need to get the size out of the right CQE */ 17663 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17664 len = bf_get(lpfc_rcqe_length, 17665 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17666 pbde = (struct ulp_bde64 *) 17667 &iocbq->iocb.unsli3.sli3Words[4]; 17668 if (len > LPFC_DATA_BUF_SIZE) 17669 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 17670 else 17671 pbde->tus.f.bdeSize = len; 17672 17673 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 17674 tot_len += len; 17675 } else { 17676 iocbq = lpfc_sli_get_iocbq(vport->phba); 17677 if (!iocbq) { 17678 if (first_iocbq) { 17679 first_iocbq->iocb.ulpStatus = 17680 IOSTAT_FCP_RSP_ERROR; 17681 first_iocbq->iocb.un.ulpWord[4] = 17682 IOERR_NO_RESOURCES; 17683 } 17684 lpfc_in_buf_free(vport->phba, d_buf); 17685 continue; 17686 } 17687 /* We need to get the size out of the right CQE */ 17688 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17689 len = bf_get(lpfc_rcqe_length, 17690 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17691 iocbq->context2 = d_buf; 17692 iocbq->context3 = NULL; 17693 iocbq->iocb.ulpBdeCount = 1; 17694 if (len > LPFC_DATA_BUF_SIZE) 17695 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17696 LPFC_DATA_BUF_SIZE; 17697 else 17698 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; 17699 17700 tot_len += len; 17701 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17702 17703 iocbq->iocb.un.rcvels.remoteID = sid; 17704 list_add_tail(&iocbq->list, &first_iocbq->list); 17705 } 17706 } 17707 return first_iocbq; 17708 } 17709 17710 static void 17711 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 17712 struct hbq_dmabuf *seq_dmabuf) 17713 { 17714 struct fc_frame_header *fc_hdr; 17715 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 17716 struct lpfc_hba *phba = vport->phba; 17717 17718 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17719 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 17720 if (!iocbq) { 17721 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17722 "2707 Ring %d handler: Failed to allocate " 17723 "iocb Rctl x%x Type x%x received\n", 17724 LPFC_ELS_RING, 17725 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17726 return; 17727 } 17728 if (!lpfc_complete_unsol_iocb(phba, 17729 phba->sli4_hba.els_wq->pring, 17730 iocbq, fc_hdr->fh_r_ctl, 17731 fc_hdr->fh_type)) 17732 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17733 "2540 Ring %d handler: unexpected Rctl " 17734 "x%x Type x%x received\n", 17735 LPFC_ELS_RING, 17736 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17737 17738 /* Free iocb created in lpfc_prep_seq */ 17739 list_for_each_entry_safe(curr_iocb, next_iocb, 17740 &iocbq->list, list) { 17741 list_del_init(&curr_iocb->list); 17742 lpfc_sli_release_iocbq(phba, curr_iocb); 17743 } 17744 lpfc_sli_release_iocbq(phba, iocbq); 17745 } 17746 17747 static void 17748 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 17749 struct lpfc_iocbq *rspiocb) 17750 { 17751 struct lpfc_dmabuf *pcmd = cmdiocb->context2; 17752 17753 if (pcmd && pcmd->virt) 17754 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17755 kfree(pcmd); 17756 lpfc_sli_release_iocbq(phba, cmdiocb); 17757 lpfc_drain_txq(phba); 17758 } 17759 17760 static void 17761 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 17762 struct hbq_dmabuf *dmabuf) 17763 { 17764 struct fc_frame_header *fc_hdr; 17765 struct lpfc_hba *phba = vport->phba; 17766 struct lpfc_iocbq *iocbq = NULL; 17767 union lpfc_wqe *wqe; 17768 struct lpfc_dmabuf *pcmd = NULL; 17769 uint32_t frame_len; 17770 int rc; 17771 unsigned long iflags; 17772 17773 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17774 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); 17775 17776 /* Send the received frame back */ 17777 iocbq = lpfc_sli_get_iocbq(phba); 17778 if (!iocbq) { 17779 /* Queue cq event and wakeup worker thread to process it */ 17780 spin_lock_irqsave(&phba->hbalock, iflags); 17781 list_add_tail(&dmabuf->cq_event.list, 17782 &phba->sli4_hba.sp_queue_event); 17783 phba->hba_flag |= HBA_SP_QUEUE_EVT; 17784 spin_unlock_irqrestore(&phba->hbalock, iflags); 17785 lpfc_worker_wake_up(phba); 17786 return; 17787 } 17788 17789 /* Allocate buffer for command payload */ 17790 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 17791 if (pcmd) 17792 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 17793 &pcmd->phys); 17794 if (!pcmd || !pcmd->virt) 17795 goto exit; 17796 17797 INIT_LIST_HEAD(&pcmd->list); 17798 17799 /* copyin the payload */ 17800 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); 17801 17802 /* fill in BDE's for command */ 17803 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys); 17804 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys); 17805 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 17806 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len; 17807 17808 iocbq->context2 = pcmd; 17809 iocbq->vport = vport; 17810 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 17811 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 17812 17813 /* 17814 * Setup rest of the iocb as though it were a WQE 17815 * Build the SEND_FRAME WQE 17816 */ 17817 wqe = (union lpfc_wqe *)&iocbq->iocb; 17818 17819 wqe->send_frame.frame_len = frame_len; 17820 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr)); 17821 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1)); 17822 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2)); 17823 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3)); 17824 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4)); 17825 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5)); 17826 17827 iocbq->iocb.ulpCommand = CMD_SEND_FRAME; 17828 iocbq->iocb.ulpLe = 1; 17829 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl; 17830 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); 17831 if (rc == IOCB_ERROR) 17832 goto exit; 17833 17834 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17835 return; 17836 17837 exit: 17838 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 17839 "2023 Unable to process MDS loopback frame\n"); 17840 if (pcmd && pcmd->virt) 17841 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17842 kfree(pcmd); 17843 if (iocbq) 17844 lpfc_sli_release_iocbq(phba, iocbq); 17845 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17846 } 17847 17848 /** 17849 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 17850 * @phba: Pointer to HBA context object. 17851 * 17852 * This function is called with no lock held. This function processes all 17853 * the received buffers and gives it to upper layers when a received buffer 17854 * indicates that it is the final frame in the sequence. The interrupt 17855 * service routine processes received buffers at interrupt contexts. 17856 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 17857 * appropriate receive function when the final frame in a sequence is received. 17858 **/ 17859 void 17860 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 17861 struct hbq_dmabuf *dmabuf) 17862 { 17863 struct hbq_dmabuf *seq_dmabuf; 17864 struct fc_frame_header *fc_hdr; 17865 struct lpfc_vport *vport; 17866 uint32_t fcfi; 17867 uint32_t did; 17868 17869 /* Process each received buffer */ 17870 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17871 17872 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 17873 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 17874 vport = phba->pport; 17875 /* Handle MDS Loopback frames */ 17876 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 17877 return; 17878 } 17879 17880 /* check to see if this a valid type of frame */ 17881 if (lpfc_fc_frame_check(phba, fc_hdr)) { 17882 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17883 return; 17884 } 17885 17886 if ((bf_get(lpfc_cqe_code, 17887 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 17888 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 17889 &dmabuf->cq_event.cqe.rcqe_cmpl); 17890 else 17891 fcfi = bf_get(lpfc_rcqe_fcf_id, 17892 &dmabuf->cq_event.cqe.rcqe_cmpl); 17893 17894 /* d_id this frame is directed to */ 17895 did = sli4_did_from_fc_hdr(fc_hdr); 17896 17897 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); 17898 if (!vport) { 17899 /* throw out the frame */ 17900 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17901 return; 17902 } 17903 17904 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 17905 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 17906 (did != Fabric_DID)) { 17907 /* 17908 * Throw out the frame if we are not pt2pt. 17909 * The pt2pt protocol allows for discovery frames 17910 * to be received without a registered VPI. 17911 */ 17912 if (!(vport->fc_flag & FC_PT2PT) || 17913 (phba->link_state == LPFC_HBA_READY)) { 17914 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17915 return; 17916 } 17917 } 17918 17919 /* Handle the basic abort sequence (BA_ABTS) event */ 17920 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 17921 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 17922 return; 17923 } 17924 17925 /* Link this frame */ 17926 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 17927 if (!seq_dmabuf) { 17928 /* unable to add frame to vport - throw it out */ 17929 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17930 return; 17931 } 17932 /* If not last frame in sequence continue processing frames. */ 17933 if (!lpfc_seq_complete(seq_dmabuf)) 17934 return; 17935 17936 /* Send the complete sequence to the upper layer protocol */ 17937 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 17938 } 17939 17940 /** 17941 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 17942 * @phba: pointer to lpfc hba data structure. 17943 * 17944 * This routine is invoked to post rpi header templates to the 17945 * HBA consistent with the SLI-4 interface spec. This routine 17946 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 17947 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 17948 * 17949 * This routine does not require any locks. It's usage is expected 17950 * to be driver load or reset recovery when the driver is 17951 * sequential. 17952 * 17953 * Return codes 17954 * 0 - successful 17955 * -EIO - The mailbox failed to complete successfully. 17956 * When this error occurs, the driver is not guaranteed 17957 * to have any rpi regions posted to the device and 17958 * must either attempt to repost the regions or take a 17959 * fatal error. 17960 **/ 17961 int 17962 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 17963 { 17964 struct lpfc_rpi_hdr *rpi_page; 17965 uint32_t rc = 0; 17966 uint16_t lrpi = 0; 17967 17968 /* SLI4 ports that support extents do not require RPI headers. */ 17969 if (!phba->sli4_hba.rpi_hdrs_in_use) 17970 goto exit; 17971 if (phba->sli4_hba.extents_in_use) 17972 return -EIO; 17973 17974 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 17975 /* 17976 * Assign the rpi headers a physical rpi only if the driver 17977 * has not initialized those resources. A port reset only 17978 * needs the headers posted. 17979 */ 17980 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 17981 LPFC_RPI_RSRC_RDY) 17982 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 17983 17984 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 17985 if (rc != MBX_SUCCESS) { 17986 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17987 "2008 Error %d posting all rpi " 17988 "headers\n", rc); 17989 rc = -EIO; 17990 break; 17991 } 17992 } 17993 17994 exit: 17995 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 17996 LPFC_RPI_RSRC_RDY); 17997 return rc; 17998 } 17999 18000 /** 18001 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 18002 * @phba: pointer to lpfc hba data structure. 18003 * @rpi_page: pointer to the rpi memory region. 18004 * 18005 * This routine is invoked to post a single rpi header to the 18006 * HBA consistent with the SLI-4 interface spec. This memory region 18007 * maps up to 64 rpi context regions. 18008 * 18009 * Return codes 18010 * 0 - successful 18011 * -ENOMEM - No available memory 18012 * -EIO - The mailbox failed to complete successfully. 18013 **/ 18014 int 18015 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 18016 { 18017 LPFC_MBOXQ_t *mboxq; 18018 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 18019 uint32_t rc = 0; 18020 uint32_t shdr_status, shdr_add_status; 18021 union lpfc_sli4_cfg_shdr *shdr; 18022 18023 /* SLI4 ports that support extents do not require RPI headers. */ 18024 if (!phba->sli4_hba.rpi_hdrs_in_use) 18025 return rc; 18026 if (phba->sli4_hba.extents_in_use) 18027 return -EIO; 18028 18029 /* The port is notified of the header region via a mailbox command. */ 18030 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18031 if (!mboxq) { 18032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18033 "2001 Unable to allocate memory for issuing " 18034 "SLI_CONFIG_SPECIAL mailbox command\n"); 18035 return -ENOMEM; 18036 } 18037 18038 /* Post all rpi memory regions to the port. */ 18039 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 18040 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18041 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 18042 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 18043 sizeof(struct lpfc_sli4_cfg_mhdr), 18044 LPFC_SLI4_MBX_EMBED); 18045 18046 18047 /* Post the physical rpi to the port for this rpi header. */ 18048 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 18049 rpi_page->start_rpi); 18050 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 18051 hdr_tmpl, rpi_page->page_count); 18052 18053 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 18054 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 18055 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 18056 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 18057 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18058 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18059 if (rc != MBX_TIMEOUT) 18060 mempool_free(mboxq, phba->mbox_mem_pool); 18061 if (shdr_status || shdr_add_status || rc) { 18062 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18063 "2514 POST_RPI_HDR mailbox failed with " 18064 "status x%x add_status x%x, mbx status x%x\n", 18065 shdr_status, shdr_add_status, rc); 18066 rc = -ENXIO; 18067 } else { 18068 /* 18069 * The next_rpi stores the next logical module-64 rpi value used 18070 * to post physical rpis in subsequent rpi postings. 18071 */ 18072 spin_lock_irq(&phba->hbalock); 18073 phba->sli4_hba.next_rpi = rpi_page->next_rpi; 18074 spin_unlock_irq(&phba->hbalock); 18075 } 18076 return rc; 18077 } 18078 18079 /** 18080 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 18081 * @phba: pointer to lpfc hba data structure. 18082 * 18083 * This routine is invoked to post rpi header templates to the 18084 * HBA consistent with the SLI-4 interface spec. This routine 18085 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 18086 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 18087 * 18088 * Returns 18089 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 18090 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 18091 **/ 18092 int 18093 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 18094 { 18095 unsigned long rpi; 18096 uint16_t max_rpi, rpi_limit; 18097 uint16_t rpi_remaining, lrpi = 0; 18098 struct lpfc_rpi_hdr *rpi_hdr; 18099 unsigned long iflag; 18100 18101 /* 18102 * Fetch the next logical rpi. Because this index is logical, 18103 * the driver starts at 0 each time. 18104 */ 18105 spin_lock_irqsave(&phba->hbalock, iflag); 18106 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 18107 rpi_limit = phba->sli4_hba.next_rpi; 18108 18109 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 18110 if (rpi >= rpi_limit) 18111 rpi = LPFC_RPI_ALLOC_ERROR; 18112 else { 18113 set_bit(rpi, phba->sli4_hba.rpi_bmask); 18114 phba->sli4_hba.max_cfg_param.rpi_used++; 18115 phba->sli4_hba.rpi_count++; 18116 } 18117 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 18118 "0001 rpi:%x max:%x lim:%x\n", 18119 (int) rpi, max_rpi, rpi_limit); 18120 18121 /* 18122 * Don't try to allocate more rpi header regions if the device limit 18123 * has been exhausted. 18124 */ 18125 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 18126 (phba->sli4_hba.rpi_count >= max_rpi)) { 18127 spin_unlock_irqrestore(&phba->hbalock, iflag); 18128 return rpi; 18129 } 18130 18131 /* 18132 * RPI header postings are not required for SLI4 ports capable of 18133 * extents. 18134 */ 18135 if (!phba->sli4_hba.rpi_hdrs_in_use) { 18136 spin_unlock_irqrestore(&phba->hbalock, iflag); 18137 return rpi; 18138 } 18139 18140 /* 18141 * If the driver is running low on rpi resources, allocate another 18142 * page now. Note that the next_rpi value is used because 18143 * it represents how many are actually in use whereas max_rpi notes 18144 * how many are supported max by the device. 18145 */ 18146 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 18147 spin_unlock_irqrestore(&phba->hbalock, iflag); 18148 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 18149 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 18150 if (!rpi_hdr) { 18151 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18152 "2002 Error Could not grow rpi " 18153 "count\n"); 18154 } else { 18155 lrpi = rpi_hdr->start_rpi; 18156 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 18157 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 18158 } 18159 } 18160 18161 return rpi; 18162 } 18163 18164 /** 18165 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18166 * @phba: pointer to lpfc hba data structure. 18167 * 18168 * This routine is invoked to release an rpi to the pool of 18169 * available rpis maintained by the driver. 18170 **/ 18171 static void 18172 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18173 { 18174 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 18175 phba->sli4_hba.rpi_count--; 18176 phba->sli4_hba.max_cfg_param.rpi_used--; 18177 } 18178 } 18179 18180 /** 18181 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18182 * @phba: pointer to lpfc hba data structure. 18183 * 18184 * This routine is invoked to release an rpi to the pool of 18185 * available rpis maintained by the driver. 18186 **/ 18187 void 18188 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18189 { 18190 spin_lock_irq(&phba->hbalock); 18191 __lpfc_sli4_free_rpi(phba, rpi); 18192 spin_unlock_irq(&phba->hbalock); 18193 } 18194 18195 /** 18196 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 18197 * @phba: pointer to lpfc hba data structure. 18198 * 18199 * This routine is invoked to remove the memory region that 18200 * provided rpi via a bitmask. 18201 **/ 18202 void 18203 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 18204 { 18205 kfree(phba->sli4_hba.rpi_bmask); 18206 kfree(phba->sli4_hba.rpi_ids); 18207 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 18208 } 18209 18210 /** 18211 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 18212 * @phba: pointer to lpfc hba data structure. 18213 * 18214 * This routine is invoked to remove the memory region that 18215 * provided rpi via a bitmask. 18216 **/ 18217 int 18218 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 18219 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 18220 { 18221 LPFC_MBOXQ_t *mboxq; 18222 struct lpfc_hba *phba = ndlp->phba; 18223 int rc; 18224 18225 /* The port is notified of the header region via a mailbox command. */ 18226 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18227 if (!mboxq) 18228 return -ENOMEM; 18229 18230 /* Post all rpi memory regions to the port. */ 18231 lpfc_resume_rpi(mboxq, ndlp); 18232 if (cmpl) { 18233 mboxq->mbox_cmpl = cmpl; 18234 mboxq->ctx_buf = arg; 18235 mboxq->ctx_ndlp = ndlp; 18236 } else 18237 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18238 mboxq->vport = ndlp->vport; 18239 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18240 if (rc == MBX_NOT_FINISHED) { 18241 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18242 "2010 Resume RPI Mailbox failed " 18243 "status %d, mbxStatus x%x\n", rc, 18244 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18245 mempool_free(mboxq, phba->mbox_mem_pool); 18246 return -EIO; 18247 } 18248 return 0; 18249 } 18250 18251 /** 18252 * lpfc_sli4_init_vpi - Initialize a vpi with the port 18253 * @vport: Pointer to the vport for which the vpi is being initialized 18254 * 18255 * This routine is invoked to activate a vpi with the port. 18256 * 18257 * Returns: 18258 * 0 success 18259 * -Evalue otherwise 18260 **/ 18261 int 18262 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 18263 { 18264 LPFC_MBOXQ_t *mboxq; 18265 int rc = 0; 18266 int retval = MBX_SUCCESS; 18267 uint32_t mbox_tmo; 18268 struct lpfc_hba *phba = vport->phba; 18269 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18270 if (!mboxq) 18271 return -ENOMEM; 18272 lpfc_init_vpi(phba, mboxq, vport->vpi); 18273 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 18274 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 18275 if (rc != MBX_SUCCESS) { 18276 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 18277 "2022 INIT VPI Mailbox failed " 18278 "status %d, mbxStatus x%x\n", rc, 18279 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18280 retval = -EIO; 18281 } 18282 if (rc != MBX_TIMEOUT) 18283 mempool_free(mboxq, vport->phba->mbox_mem_pool); 18284 18285 return retval; 18286 } 18287 18288 /** 18289 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 18290 * @phba: pointer to lpfc hba data structure. 18291 * @mboxq: Pointer to mailbox object. 18292 * 18293 * This routine is invoked to manually add a single FCF record. The caller 18294 * must pass a completely initialized FCF_Record. This routine takes 18295 * care of the nonembedded mailbox operations. 18296 **/ 18297 static void 18298 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 18299 { 18300 void *virt_addr; 18301 union lpfc_sli4_cfg_shdr *shdr; 18302 uint32_t shdr_status, shdr_add_status; 18303 18304 virt_addr = mboxq->sge_array->addr[0]; 18305 /* The IOCTL status is embedded in the mailbox subheader. */ 18306 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 18307 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18308 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18309 18310 if ((shdr_status || shdr_add_status) && 18311 (shdr_status != STATUS_FCF_IN_USE)) 18312 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18313 "2558 ADD_FCF_RECORD mailbox failed with " 18314 "status x%x add_status x%x\n", 18315 shdr_status, shdr_add_status); 18316 18317 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18318 } 18319 18320 /** 18321 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 18322 * @phba: pointer to lpfc hba data structure. 18323 * @fcf_record: pointer to the initialized fcf record to add. 18324 * 18325 * This routine is invoked to manually add a single FCF record. The caller 18326 * must pass a completely initialized FCF_Record. This routine takes 18327 * care of the nonembedded mailbox operations. 18328 **/ 18329 int 18330 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 18331 { 18332 int rc = 0; 18333 LPFC_MBOXQ_t *mboxq; 18334 uint8_t *bytep; 18335 void *virt_addr; 18336 struct lpfc_mbx_sge sge; 18337 uint32_t alloc_len, req_len; 18338 uint32_t fcfindex; 18339 18340 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18341 if (!mboxq) { 18342 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18343 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 18344 return -ENOMEM; 18345 } 18346 18347 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 18348 sizeof(uint32_t); 18349 18350 /* Allocate DMA memory and set up the non-embedded mailbox command */ 18351 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18352 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 18353 req_len, LPFC_SLI4_MBX_NEMBED); 18354 if (alloc_len < req_len) { 18355 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18356 "2523 Allocated DMA memory size (x%x) is " 18357 "less than the requested DMA memory " 18358 "size (x%x)\n", alloc_len, req_len); 18359 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18360 return -ENOMEM; 18361 } 18362 18363 /* 18364 * Get the first SGE entry from the non-embedded DMA memory. This 18365 * routine only uses a single SGE. 18366 */ 18367 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 18368 virt_addr = mboxq->sge_array->addr[0]; 18369 /* 18370 * Configure the FCF record for FCFI 0. This is the driver's 18371 * hardcoded default and gets used in nonFIP mode. 18372 */ 18373 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 18374 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 18375 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 18376 18377 /* 18378 * Copy the fcf_index and the FCF Record Data. The data starts after 18379 * the FCoE header plus word10. The data copy needs to be endian 18380 * correct. 18381 */ 18382 bytep += sizeof(uint32_t); 18383 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 18384 mboxq->vport = phba->pport; 18385 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 18386 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18387 if (rc == MBX_NOT_FINISHED) { 18388 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18389 "2515 ADD_FCF_RECORD mailbox failed with " 18390 "status 0x%x\n", rc); 18391 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18392 rc = -EIO; 18393 } else 18394 rc = 0; 18395 18396 return rc; 18397 } 18398 18399 /** 18400 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 18401 * @phba: pointer to lpfc hba data structure. 18402 * @fcf_record: pointer to the fcf record to write the default data. 18403 * @fcf_index: FCF table entry index. 18404 * 18405 * This routine is invoked to build the driver's default FCF record. The 18406 * values used are hardcoded. This routine handles memory initialization. 18407 * 18408 **/ 18409 void 18410 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 18411 struct fcf_record *fcf_record, 18412 uint16_t fcf_index) 18413 { 18414 memset(fcf_record, 0, sizeof(struct fcf_record)); 18415 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 18416 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 18417 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 18418 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 18419 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 18420 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 18421 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 18422 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 18423 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 18424 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 18425 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 18426 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 18427 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 18428 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 18429 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 18430 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 18431 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 18432 /* Set the VLAN bit map */ 18433 if (phba->valid_vlan) { 18434 fcf_record->vlan_bitmap[phba->vlan_id / 8] 18435 = 1 << (phba->vlan_id % 8); 18436 } 18437 } 18438 18439 /** 18440 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 18441 * @phba: pointer to lpfc hba data structure. 18442 * @fcf_index: FCF table entry offset. 18443 * 18444 * This routine is invoked to scan the entire FCF table by reading FCF 18445 * record and processing it one at a time starting from the @fcf_index 18446 * for initial FCF discovery or fast FCF failover rediscovery. 18447 * 18448 * Return 0 if the mailbox command is submitted successfully, none 0 18449 * otherwise. 18450 **/ 18451 int 18452 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18453 { 18454 int rc = 0, error; 18455 LPFC_MBOXQ_t *mboxq; 18456 18457 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 18458 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 18459 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18460 if (!mboxq) { 18461 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18462 "2000 Failed to allocate mbox for " 18463 "READ_FCF cmd\n"); 18464 error = -ENOMEM; 18465 goto fail_fcf_scan; 18466 } 18467 /* Construct the read FCF record mailbox command */ 18468 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18469 if (rc) { 18470 error = -EINVAL; 18471 goto fail_fcf_scan; 18472 } 18473 /* Issue the mailbox command asynchronously */ 18474 mboxq->vport = phba->pport; 18475 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 18476 18477 spin_lock_irq(&phba->hbalock); 18478 phba->hba_flag |= FCF_TS_INPROG; 18479 spin_unlock_irq(&phba->hbalock); 18480 18481 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18482 if (rc == MBX_NOT_FINISHED) 18483 error = -EIO; 18484 else { 18485 /* Reset eligible FCF count for new scan */ 18486 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 18487 phba->fcf.eligible_fcf_cnt = 0; 18488 error = 0; 18489 } 18490 fail_fcf_scan: 18491 if (error) { 18492 if (mboxq) 18493 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18494 /* FCF scan failed, clear FCF_TS_INPROG flag */ 18495 spin_lock_irq(&phba->hbalock); 18496 phba->hba_flag &= ~FCF_TS_INPROG; 18497 spin_unlock_irq(&phba->hbalock); 18498 } 18499 return error; 18500 } 18501 18502 /** 18503 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 18504 * @phba: pointer to lpfc hba data structure. 18505 * @fcf_index: FCF table entry offset. 18506 * 18507 * This routine is invoked to read an FCF record indicated by @fcf_index 18508 * and to use it for FLOGI roundrobin FCF failover. 18509 * 18510 * Return 0 if the mailbox command is submitted successfully, none 0 18511 * otherwise. 18512 **/ 18513 int 18514 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18515 { 18516 int rc = 0, error; 18517 LPFC_MBOXQ_t *mboxq; 18518 18519 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18520 if (!mboxq) { 18521 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18522 "2763 Failed to allocate mbox for " 18523 "READ_FCF cmd\n"); 18524 error = -ENOMEM; 18525 goto fail_fcf_read; 18526 } 18527 /* Construct the read FCF record mailbox command */ 18528 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18529 if (rc) { 18530 error = -EINVAL; 18531 goto fail_fcf_read; 18532 } 18533 /* Issue the mailbox command asynchronously */ 18534 mboxq->vport = phba->pport; 18535 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 18536 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18537 if (rc == MBX_NOT_FINISHED) 18538 error = -EIO; 18539 else 18540 error = 0; 18541 18542 fail_fcf_read: 18543 if (error && mboxq) 18544 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18545 return error; 18546 } 18547 18548 /** 18549 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 18550 * @phba: pointer to lpfc hba data structure. 18551 * @fcf_index: FCF table entry offset. 18552 * 18553 * This routine is invoked to read an FCF record indicated by @fcf_index to 18554 * determine whether it's eligible for FLOGI roundrobin failover list. 18555 * 18556 * Return 0 if the mailbox command is submitted successfully, none 0 18557 * otherwise. 18558 **/ 18559 int 18560 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18561 { 18562 int rc = 0, error; 18563 LPFC_MBOXQ_t *mboxq; 18564 18565 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18566 if (!mboxq) { 18567 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18568 "2758 Failed to allocate mbox for " 18569 "READ_FCF cmd\n"); 18570 error = -ENOMEM; 18571 goto fail_fcf_read; 18572 } 18573 /* Construct the read FCF record mailbox command */ 18574 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18575 if (rc) { 18576 error = -EINVAL; 18577 goto fail_fcf_read; 18578 } 18579 /* Issue the mailbox command asynchronously */ 18580 mboxq->vport = phba->pport; 18581 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 18582 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18583 if (rc == MBX_NOT_FINISHED) 18584 error = -EIO; 18585 else 18586 error = 0; 18587 18588 fail_fcf_read: 18589 if (error && mboxq) 18590 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18591 return error; 18592 } 18593 18594 /** 18595 * lpfc_check_next_fcf_pri_level 18596 * phba pointer to the lpfc_hba struct for this port. 18597 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 18598 * routine when the rr_bmask is empty. The FCF indecies are put into the 18599 * rr_bmask based on their priority level. Starting from the highest priority 18600 * to the lowest. The most likely FCF candidate will be in the highest 18601 * priority group. When this routine is called it searches the fcf_pri list for 18602 * next lowest priority group and repopulates the rr_bmask with only those 18603 * fcf_indexes. 18604 * returns: 18605 * 1=success 0=failure 18606 **/ 18607 static int 18608 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 18609 { 18610 uint16_t next_fcf_pri; 18611 uint16_t last_index; 18612 struct lpfc_fcf_pri *fcf_pri; 18613 int rc; 18614 int ret = 0; 18615 18616 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 18617 LPFC_SLI4_FCF_TBL_INDX_MAX); 18618 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18619 "3060 Last IDX %d\n", last_index); 18620 18621 /* Verify the priority list has 2 or more entries */ 18622 spin_lock_irq(&phba->hbalock); 18623 if (list_empty(&phba->fcf.fcf_pri_list) || 18624 list_is_singular(&phba->fcf.fcf_pri_list)) { 18625 spin_unlock_irq(&phba->hbalock); 18626 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18627 "3061 Last IDX %d\n", last_index); 18628 return 0; /* Empty rr list */ 18629 } 18630 spin_unlock_irq(&phba->hbalock); 18631 18632 next_fcf_pri = 0; 18633 /* 18634 * Clear the rr_bmask and set all of the bits that are at this 18635 * priority. 18636 */ 18637 memset(phba->fcf.fcf_rr_bmask, 0, 18638 sizeof(*phba->fcf.fcf_rr_bmask)); 18639 spin_lock_irq(&phba->hbalock); 18640 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18641 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 18642 continue; 18643 /* 18644 * the 1st priority that has not FLOGI failed 18645 * will be the highest. 18646 */ 18647 if (!next_fcf_pri) 18648 next_fcf_pri = fcf_pri->fcf_rec.priority; 18649 spin_unlock_irq(&phba->hbalock); 18650 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18651 rc = lpfc_sli4_fcf_rr_index_set(phba, 18652 fcf_pri->fcf_rec.fcf_index); 18653 if (rc) 18654 return 0; 18655 } 18656 spin_lock_irq(&phba->hbalock); 18657 } 18658 /* 18659 * if next_fcf_pri was not set above and the list is not empty then 18660 * we have failed flogis on all of them. So reset flogi failed 18661 * and start at the beginning. 18662 */ 18663 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 18664 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18665 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 18666 /* 18667 * the 1st priority that has not FLOGI failed 18668 * will be the highest. 18669 */ 18670 if (!next_fcf_pri) 18671 next_fcf_pri = fcf_pri->fcf_rec.priority; 18672 spin_unlock_irq(&phba->hbalock); 18673 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18674 rc = lpfc_sli4_fcf_rr_index_set(phba, 18675 fcf_pri->fcf_rec.fcf_index); 18676 if (rc) 18677 return 0; 18678 } 18679 spin_lock_irq(&phba->hbalock); 18680 } 18681 } else 18682 ret = 1; 18683 spin_unlock_irq(&phba->hbalock); 18684 18685 return ret; 18686 } 18687 /** 18688 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 18689 * @phba: pointer to lpfc hba data structure. 18690 * 18691 * This routine is to get the next eligible FCF record index in a round 18692 * robin fashion. If the next eligible FCF record index equals to the 18693 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 18694 * shall be returned, otherwise, the next eligible FCF record's index 18695 * shall be returned. 18696 **/ 18697 uint16_t 18698 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 18699 { 18700 uint16_t next_fcf_index; 18701 18702 initial_priority: 18703 /* Search start from next bit of currently registered FCF index */ 18704 next_fcf_index = phba->fcf.current_rec.fcf_indx; 18705 18706 next_priority: 18707 /* Determine the next fcf index to check */ 18708 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 18709 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18710 LPFC_SLI4_FCF_TBL_INDX_MAX, 18711 next_fcf_index); 18712 18713 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 18714 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18715 /* 18716 * If we have wrapped then we need to clear the bits that 18717 * have been tested so that we can detect when we should 18718 * change the priority level. 18719 */ 18720 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18721 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 18722 } 18723 18724 18725 /* Check roundrobin failover list empty condition */ 18726 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 18727 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 18728 /* 18729 * If next fcf index is not found check if there are lower 18730 * Priority level fcf's in the fcf_priority list. 18731 * Set up the rr_bmask with all of the avaiable fcf bits 18732 * at that level and continue the selection process. 18733 */ 18734 if (lpfc_check_next_fcf_pri_level(phba)) 18735 goto initial_priority; 18736 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 18737 "2844 No roundrobin failover FCF available\n"); 18738 18739 return LPFC_FCOE_FCF_NEXT_NONE; 18740 } 18741 18742 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 18743 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 18744 LPFC_FCF_FLOGI_FAILED) { 18745 if (list_is_singular(&phba->fcf.fcf_pri_list)) 18746 return LPFC_FCOE_FCF_NEXT_NONE; 18747 18748 goto next_priority; 18749 } 18750 18751 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18752 "2845 Get next roundrobin failover FCF (x%x)\n", 18753 next_fcf_index); 18754 18755 return next_fcf_index; 18756 } 18757 18758 /** 18759 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 18760 * @phba: pointer to lpfc hba data structure. 18761 * 18762 * This routine sets the FCF record index in to the eligible bmask for 18763 * roundrobin failover search. It checks to make sure that the index 18764 * does not go beyond the range of the driver allocated bmask dimension 18765 * before setting the bit. 18766 * 18767 * Returns 0 if the index bit successfully set, otherwise, it returns 18768 * -EINVAL. 18769 **/ 18770 int 18771 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 18772 { 18773 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18774 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18775 "2610 FCF (x%x) reached driver's book " 18776 "keeping dimension:x%x\n", 18777 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18778 return -EINVAL; 18779 } 18780 /* Set the eligible FCF record index bmask */ 18781 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18782 18783 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18784 "2790 Set FCF (x%x) to roundrobin FCF failover " 18785 "bmask\n", fcf_index); 18786 18787 return 0; 18788 } 18789 18790 /** 18791 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 18792 * @phba: pointer to lpfc hba data structure. 18793 * 18794 * This routine clears the FCF record index from the eligible bmask for 18795 * roundrobin failover search. It checks to make sure that the index 18796 * does not go beyond the range of the driver allocated bmask dimension 18797 * before clearing the bit. 18798 **/ 18799 void 18800 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 18801 { 18802 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 18803 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18804 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18805 "2762 FCF (x%x) reached driver's book " 18806 "keeping dimension:x%x\n", 18807 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18808 return; 18809 } 18810 /* Clear the eligible FCF record index bmask */ 18811 spin_lock_irq(&phba->hbalock); 18812 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 18813 list) { 18814 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 18815 list_del_init(&fcf_pri->list); 18816 break; 18817 } 18818 } 18819 spin_unlock_irq(&phba->hbalock); 18820 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18821 18822 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18823 "2791 Clear FCF (x%x) from roundrobin failover " 18824 "bmask\n", fcf_index); 18825 } 18826 18827 /** 18828 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 18829 * @phba: pointer to lpfc hba data structure. 18830 * 18831 * This routine is the completion routine for the rediscover FCF table mailbox 18832 * command. If the mailbox command returned failure, it will try to stop the 18833 * FCF rediscover wait timer. 18834 **/ 18835 static void 18836 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 18837 { 18838 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18839 uint32_t shdr_status, shdr_add_status; 18840 18841 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18842 18843 shdr_status = bf_get(lpfc_mbox_hdr_status, 18844 &redisc_fcf->header.cfg_shdr.response); 18845 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 18846 &redisc_fcf->header.cfg_shdr.response); 18847 if (shdr_status || shdr_add_status) { 18848 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18849 "2746 Requesting for FCF rediscovery failed " 18850 "status x%x add_status x%x\n", 18851 shdr_status, shdr_add_status); 18852 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 18853 spin_lock_irq(&phba->hbalock); 18854 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 18855 spin_unlock_irq(&phba->hbalock); 18856 /* 18857 * CVL event triggered FCF rediscover request failed, 18858 * last resort to re-try current registered FCF entry. 18859 */ 18860 lpfc_retry_pport_discovery(phba); 18861 } else { 18862 spin_lock_irq(&phba->hbalock); 18863 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 18864 spin_unlock_irq(&phba->hbalock); 18865 /* 18866 * DEAD FCF event triggered FCF rediscover request 18867 * failed, last resort to fail over as a link down 18868 * to FCF registration. 18869 */ 18870 lpfc_sli4_fcf_dead_failthrough(phba); 18871 } 18872 } else { 18873 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18874 "2775 Start FCF rediscover quiescent timer\n"); 18875 /* 18876 * Start FCF rediscovery wait timer for pending FCF 18877 * before rescan FCF record table. 18878 */ 18879 lpfc_fcf_redisc_wait_start_timer(phba); 18880 } 18881 18882 mempool_free(mbox, phba->mbox_mem_pool); 18883 } 18884 18885 /** 18886 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 18887 * @phba: pointer to lpfc hba data structure. 18888 * 18889 * This routine is invoked to request for rediscovery of the entire FCF table 18890 * by the port. 18891 **/ 18892 int 18893 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 18894 { 18895 LPFC_MBOXQ_t *mbox; 18896 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18897 int rc, length; 18898 18899 /* Cancel retry delay timers to all vports before FCF rediscover */ 18900 lpfc_cancel_all_vport_retry_delay_timer(phba); 18901 18902 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18903 if (!mbox) { 18904 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18905 "2745 Failed to allocate mbox for " 18906 "requesting FCF rediscover.\n"); 18907 return -ENOMEM; 18908 } 18909 18910 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 18911 sizeof(struct lpfc_sli4_cfg_mhdr)); 18912 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 18913 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 18914 length, LPFC_SLI4_MBX_EMBED); 18915 18916 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18917 /* Set count to 0 for invalidating the entire FCF database */ 18918 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 18919 18920 /* Issue the mailbox command asynchronously */ 18921 mbox->vport = phba->pport; 18922 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 18923 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 18924 18925 if (rc == MBX_NOT_FINISHED) { 18926 mempool_free(mbox, phba->mbox_mem_pool); 18927 return -EIO; 18928 } 18929 return 0; 18930 } 18931 18932 /** 18933 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 18934 * @phba: pointer to lpfc hba data structure. 18935 * 18936 * This function is the failover routine as a last resort to the FCF DEAD 18937 * event when driver failed to perform fast FCF failover. 18938 **/ 18939 void 18940 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 18941 { 18942 uint32_t link_state; 18943 18944 /* 18945 * Last resort as FCF DEAD event failover will treat this as 18946 * a link down, but save the link state because we don't want 18947 * it to be changed to Link Down unless it is already down. 18948 */ 18949 link_state = phba->link_state; 18950 lpfc_linkdown(phba); 18951 phba->link_state = link_state; 18952 18953 /* Unregister FCF if no devices connected to it */ 18954 lpfc_unregister_unused_fcf(phba); 18955 } 18956 18957 /** 18958 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 18959 * @phba: pointer to lpfc hba data structure. 18960 * @rgn23_data: pointer to configure region 23 data. 18961 * 18962 * This function gets SLI3 port configure region 23 data through memory dump 18963 * mailbox command. When it successfully retrieves data, the size of the data 18964 * will be returned, otherwise, 0 will be returned. 18965 **/ 18966 static uint32_t 18967 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 18968 { 18969 LPFC_MBOXQ_t *pmb = NULL; 18970 MAILBOX_t *mb; 18971 uint32_t offset = 0; 18972 int rc; 18973 18974 if (!rgn23_data) 18975 return 0; 18976 18977 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18978 if (!pmb) { 18979 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18980 "2600 failed to allocate mailbox memory\n"); 18981 return 0; 18982 } 18983 mb = &pmb->u.mb; 18984 18985 do { 18986 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 18987 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 18988 18989 if (rc != MBX_SUCCESS) { 18990 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 18991 "2601 failed to read config " 18992 "region 23, rc 0x%x Status 0x%x\n", 18993 rc, mb->mbxStatus); 18994 mb->un.varDmp.word_cnt = 0; 18995 } 18996 /* 18997 * dump mem may return a zero when finished or we got a 18998 * mailbox error, either way we are done. 18999 */ 19000 if (mb->un.varDmp.word_cnt == 0) 19001 break; 19002 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 19003 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 19004 19005 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 19006 rgn23_data + offset, 19007 mb->un.varDmp.word_cnt); 19008 offset += mb->un.varDmp.word_cnt; 19009 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 19010 19011 mempool_free(pmb, phba->mbox_mem_pool); 19012 return offset; 19013 } 19014 19015 /** 19016 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 19017 * @phba: pointer to lpfc hba data structure. 19018 * @rgn23_data: pointer to configure region 23 data. 19019 * 19020 * This function gets SLI4 port configure region 23 data through memory dump 19021 * mailbox command. When it successfully retrieves data, the size of the data 19022 * will be returned, otherwise, 0 will be returned. 19023 **/ 19024 static uint32_t 19025 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 19026 { 19027 LPFC_MBOXQ_t *mboxq = NULL; 19028 struct lpfc_dmabuf *mp = NULL; 19029 struct lpfc_mqe *mqe; 19030 uint32_t data_length = 0; 19031 int rc; 19032 19033 if (!rgn23_data) 19034 return 0; 19035 19036 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19037 if (!mboxq) { 19038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19039 "3105 failed to allocate mailbox memory\n"); 19040 return 0; 19041 } 19042 19043 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 19044 goto out; 19045 mqe = &mboxq->u.mqe; 19046 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 19047 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 19048 if (rc) 19049 goto out; 19050 data_length = mqe->un.mb_words[5]; 19051 if (data_length == 0) 19052 goto out; 19053 if (data_length > DMP_RGN23_SIZE) { 19054 data_length = 0; 19055 goto out; 19056 } 19057 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 19058 out: 19059 mempool_free(mboxq, phba->mbox_mem_pool); 19060 if (mp) { 19061 lpfc_mbuf_free(phba, mp->virt, mp->phys); 19062 kfree(mp); 19063 } 19064 return data_length; 19065 } 19066 19067 /** 19068 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 19069 * @phba: pointer to lpfc hba data structure. 19070 * 19071 * This function read region 23 and parse TLV for port status to 19072 * decide if the user disaled the port. If the TLV indicates the 19073 * port is disabled, the hba_flag is set accordingly. 19074 **/ 19075 void 19076 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 19077 { 19078 uint8_t *rgn23_data = NULL; 19079 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 19080 uint32_t offset = 0; 19081 19082 /* Get adapter Region 23 data */ 19083 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 19084 if (!rgn23_data) 19085 goto out; 19086 19087 if (phba->sli_rev < LPFC_SLI_REV4) 19088 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 19089 else { 19090 if_type = bf_get(lpfc_sli_intf_if_type, 19091 &phba->sli4_hba.sli_intf); 19092 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 19093 goto out; 19094 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 19095 } 19096 19097 if (!data_size) 19098 goto out; 19099 19100 /* Check the region signature first */ 19101 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 19102 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19103 "2619 Config region 23 has bad signature\n"); 19104 goto out; 19105 } 19106 offset += 4; 19107 19108 /* Check the data structure version */ 19109 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 19110 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19111 "2620 Config region 23 has bad version\n"); 19112 goto out; 19113 } 19114 offset += 4; 19115 19116 /* Parse TLV entries in the region */ 19117 while (offset < data_size) { 19118 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 19119 break; 19120 /* 19121 * If the TLV is not driver specific TLV or driver id is 19122 * not linux driver id, skip the record. 19123 */ 19124 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 19125 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 19126 (rgn23_data[offset + 3] != 0)) { 19127 offset += rgn23_data[offset + 1] * 4 + 4; 19128 continue; 19129 } 19130 19131 /* Driver found a driver specific TLV in the config region */ 19132 sub_tlv_len = rgn23_data[offset + 1] * 4; 19133 offset += 4; 19134 tlv_offset = 0; 19135 19136 /* 19137 * Search for configured port state sub-TLV. 19138 */ 19139 while ((offset < data_size) && 19140 (tlv_offset < sub_tlv_len)) { 19141 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 19142 offset += 4; 19143 tlv_offset += 4; 19144 break; 19145 } 19146 if (rgn23_data[offset] != PORT_STE_TYPE) { 19147 offset += rgn23_data[offset + 1] * 4 + 4; 19148 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 19149 continue; 19150 } 19151 19152 /* This HBA contains PORT_STE configured */ 19153 if (!rgn23_data[offset + 2]) 19154 phba->hba_flag |= LINK_DISABLED; 19155 19156 goto out; 19157 } 19158 } 19159 19160 out: 19161 kfree(rgn23_data); 19162 return; 19163 } 19164 19165 /** 19166 * lpfc_wr_object - write an object to the firmware 19167 * @phba: HBA structure that indicates port to create a queue on. 19168 * @dmabuf_list: list of dmabufs to write to the port. 19169 * @size: the total byte value of the objects to write to the port. 19170 * @offset: the current offset to be used to start the transfer. 19171 * 19172 * This routine will create a wr_object mailbox command to send to the port. 19173 * the mailbox command will be constructed using the dma buffers described in 19174 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 19175 * BDEs that the imbedded mailbox can support. The @offset variable will be 19176 * used to indicate the starting offset of the transfer and will also return 19177 * the offset after the write object mailbox has completed. @size is used to 19178 * determine the end of the object and whether the eof bit should be set. 19179 * 19180 * Return 0 is successful and offset will contain the the new offset to use 19181 * for the next write. 19182 * Return negative value for error cases. 19183 **/ 19184 int 19185 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 19186 uint32_t size, uint32_t *offset) 19187 { 19188 struct lpfc_mbx_wr_object *wr_object; 19189 LPFC_MBOXQ_t *mbox; 19190 int rc = 0, i = 0; 19191 uint32_t shdr_status, shdr_add_status, shdr_change_status; 19192 uint32_t mbox_tmo; 19193 struct lpfc_dmabuf *dmabuf; 19194 uint32_t written = 0; 19195 bool check_change_status = false; 19196 19197 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19198 if (!mbox) 19199 return -ENOMEM; 19200 19201 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 19202 LPFC_MBOX_OPCODE_WRITE_OBJECT, 19203 sizeof(struct lpfc_mbx_wr_object) - 19204 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 19205 19206 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 19207 wr_object->u.request.write_offset = *offset; 19208 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 19209 wr_object->u.request.object_name[0] = 19210 cpu_to_le32(wr_object->u.request.object_name[0]); 19211 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 19212 list_for_each_entry(dmabuf, dmabuf_list, list) { 19213 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 19214 break; 19215 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 19216 wr_object->u.request.bde[i].addrHigh = 19217 putPaddrHigh(dmabuf->phys); 19218 if (written + SLI4_PAGE_SIZE >= size) { 19219 wr_object->u.request.bde[i].tus.f.bdeSize = 19220 (size - written); 19221 written += (size - written); 19222 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 19223 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1); 19224 check_change_status = true; 19225 } else { 19226 wr_object->u.request.bde[i].tus.f.bdeSize = 19227 SLI4_PAGE_SIZE; 19228 written += SLI4_PAGE_SIZE; 19229 } 19230 i++; 19231 } 19232 wr_object->u.request.bde_count = i; 19233 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 19234 if (!phba->sli4_hba.intr_enable) 19235 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 19236 else { 19237 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 19238 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 19239 } 19240 /* The IOCTL status is embedded in the mailbox subheader. */ 19241 shdr_status = bf_get(lpfc_mbox_hdr_status, 19242 &wr_object->header.cfg_shdr.response); 19243 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 19244 &wr_object->header.cfg_shdr.response); 19245 if (check_change_status) { 19246 shdr_change_status = bf_get(lpfc_wr_object_change_status, 19247 &wr_object->u.response); 19248 switch (shdr_change_status) { 19249 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): 19250 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19251 "3198 Firmware write complete: System " 19252 "reboot required to instantiate\n"); 19253 break; 19254 case (LPFC_CHANGE_STATUS_FW_RESET): 19255 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19256 "3199 Firmware write complete: Firmware" 19257 " reset required to instantiate\n"); 19258 break; 19259 case (LPFC_CHANGE_STATUS_PORT_MIGRATION): 19260 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19261 "3200 Firmware write complete: Port " 19262 "Migration or PCI Reset required to " 19263 "instantiate\n"); 19264 break; 19265 case (LPFC_CHANGE_STATUS_PCI_RESET): 19266 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19267 "3201 Firmware write complete: PCI " 19268 "Reset required to instantiate\n"); 19269 break; 19270 default: 19271 break; 19272 } 19273 } 19274 if (rc != MBX_TIMEOUT) 19275 mempool_free(mbox, phba->mbox_mem_pool); 19276 if (shdr_status || shdr_add_status || rc) { 19277 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19278 "3025 Write Object mailbox failed with " 19279 "status x%x add_status x%x, mbx status x%x\n", 19280 shdr_status, shdr_add_status, rc); 19281 rc = -ENXIO; 19282 *offset = shdr_add_status; 19283 } else 19284 *offset += wr_object->u.response.actual_write_length; 19285 return rc; 19286 } 19287 19288 /** 19289 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 19290 * @vport: pointer to vport data structure. 19291 * 19292 * This function iterate through the mailboxq and clean up all REG_LOGIN 19293 * and REG_VPI mailbox commands associated with the vport. This function 19294 * is called when driver want to restart discovery of the vport due to 19295 * a Clear Virtual Link event. 19296 **/ 19297 void 19298 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 19299 { 19300 struct lpfc_hba *phba = vport->phba; 19301 LPFC_MBOXQ_t *mb, *nextmb; 19302 struct lpfc_dmabuf *mp; 19303 struct lpfc_nodelist *ndlp; 19304 struct lpfc_nodelist *act_mbx_ndlp = NULL; 19305 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 19306 LIST_HEAD(mbox_cmd_list); 19307 uint8_t restart_loop; 19308 19309 /* Clean up internally queued mailbox commands with the vport */ 19310 spin_lock_irq(&phba->hbalock); 19311 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 19312 if (mb->vport != vport) 19313 continue; 19314 19315 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19316 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19317 continue; 19318 19319 list_del(&mb->list); 19320 list_add_tail(&mb->list, &mbox_cmd_list); 19321 } 19322 /* Clean up active mailbox command with the vport */ 19323 mb = phba->sli.mbox_active; 19324 if (mb && (mb->vport == vport)) { 19325 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 19326 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 19327 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19328 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19329 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19330 /* Put reference count for delayed processing */ 19331 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 19332 /* Unregister the RPI when mailbox complete */ 19333 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19334 } 19335 } 19336 /* Cleanup any mailbox completions which are not yet processed */ 19337 do { 19338 restart_loop = 0; 19339 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 19340 /* 19341 * If this mailox is already processed or it is 19342 * for another vport ignore it. 19343 */ 19344 if ((mb->vport != vport) || 19345 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 19346 continue; 19347 19348 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19349 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19350 continue; 19351 19352 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19353 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19354 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19355 /* Unregister the RPI when mailbox complete */ 19356 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19357 restart_loop = 1; 19358 spin_unlock_irq(&phba->hbalock); 19359 spin_lock(shost->host_lock); 19360 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19361 spin_unlock(shost->host_lock); 19362 spin_lock_irq(&phba->hbalock); 19363 break; 19364 } 19365 } 19366 } while (restart_loop); 19367 19368 spin_unlock_irq(&phba->hbalock); 19369 19370 /* Release the cleaned-up mailbox commands */ 19371 while (!list_empty(&mbox_cmd_list)) { 19372 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 19373 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19374 mp = (struct lpfc_dmabuf *)(mb->ctx_buf); 19375 if (mp) { 19376 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 19377 kfree(mp); 19378 } 19379 mb->ctx_buf = NULL; 19380 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19381 mb->ctx_ndlp = NULL; 19382 if (ndlp) { 19383 spin_lock(shost->host_lock); 19384 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19385 spin_unlock(shost->host_lock); 19386 lpfc_nlp_put(ndlp); 19387 } 19388 } 19389 mempool_free(mb, phba->mbox_mem_pool); 19390 } 19391 19392 /* Release the ndlp with the cleaned-up active mailbox command */ 19393 if (act_mbx_ndlp) { 19394 spin_lock(shost->host_lock); 19395 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19396 spin_unlock(shost->host_lock); 19397 lpfc_nlp_put(act_mbx_ndlp); 19398 } 19399 } 19400 19401 /** 19402 * lpfc_drain_txq - Drain the txq 19403 * @phba: Pointer to HBA context object. 19404 * 19405 * This function attempt to submit IOCBs on the txq 19406 * to the adapter. For SLI4 adapters, the txq contains 19407 * ELS IOCBs that have been deferred because the there 19408 * are no SGLs. This congestion can occur with large 19409 * vport counts during node discovery. 19410 **/ 19411 19412 uint32_t 19413 lpfc_drain_txq(struct lpfc_hba *phba) 19414 { 19415 LIST_HEAD(completions); 19416 struct lpfc_sli_ring *pring; 19417 struct lpfc_iocbq *piocbq = NULL; 19418 unsigned long iflags = 0; 19419 char *fail_msg = NULL; 19420 struct lpfc_sglq *sglq; 19421 union lpfc_wqe128 wqe; 19422 uint32_t txq_cnt = 0; 19423 struct lpfc_queue *wq; 19424 19425 if (phba->link_flag & LS_MDS_LOOPBACK) { 19426 /* MDS WQE are posted only to first WQ*/ 19427 wq = phba->sli4_hba.hdwq[0].fcp_wq; 19428 if (unlikely(!wq)) 19429 return 0; 19430 pring = wq->pring; 19431 } else { 19432 wq = phba->sli4_hba.els_wq; 19433 if (unlikely(!wq)) 19434 return 0; 19435 pring = lpfc_phba_elsring(phba); 19436 } 19437 19438 if (unlikely(!pring) || list_empty(&pring->txq)) 19439 return 0; 19440 19441 spin_lock_irqsave(&pring->ring_lock, iflags); 19442 list_for_each_entry(piocbq, &pring->txq, list) { 19443 txq_cnt++; 19444 } 19445 19446 if (txq_cnt > pring->txq_max) 19447 pring->txq_max = txq_cnt; 19448 19449 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19450 19451 while (!list_empty(&pring->txq)) { 19452 spin_lock_irqsave(&pring->ring_lock, iflags); 19453 19454 piocbq = lpfc_sli_ringtx_get(phba, pring); 19455 if (!piocbq) { 19456 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19457 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19458 "2823 txq empty and txq_cnt is %d\n ", 19459 txq_cnt); 19460 break; 19461 } 19462 sglq = __lpfc_sli_get_els_sglq(phba, piocbq); 19463 if (!sglq) { 19464 __lpfc_sli_ringtx_put(phba, pring, piocbq); 19465 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19466 break; 19467 } 19468 txq_cnt--; 19469 19470 /* The xri and iocb resources secured, 19471 * attempt to issue request 19472 */ 19473 piocbq->sli4_lxritag = sglq->sli4_lxritag; 19474 piocbq->sli4_xritag = sglq->sli4_xritag; 19475 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 19476 fail_msg = "to convert bpl to sgl"; 19477 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 19478 fail_msg = "to convert iocb to wqe"; 19479 else if (lpfc_sli4_wq_put(wq, &wqe)) 19480 fail_msg = " - Wq is full"; 19481 else 19482 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 19483 19484 if (fail_msg) { 19485 /* Failed means we can't issue and need to cancel */ 19486 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19487 "2822 IOCB failed %s iotag 0x%x " 19488 "xri 0x%x\n", 19489 fail_msg, 19490 piocbq->iotag, piocbq->sli4_xritag); 19491 list_add_tail(&piocbq->list, &completions); 19492 } 19493 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19494 } 19495 19496 /* Cancel all the IOCBs that cannot be issued */ 19497 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 19498 IOERR_SLI_ABORTED); 19499 19500 return txq_cnt; 19501 } 19502 19503 /** 19504 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. 19505 * @phba: Pointer to HBA context object. 19506 * @pwqe: Pointer to command WQE. 19507 * @sglq: Pointer to the scatter gather queue object. 19508 * 19509 * This routine converts the bpl or bde that is in the WQE 19510 * to a sgl list for the sli4 hardware. The physical address 19511 * of the bpl/bde is converted back to a virtual address. 19512 * If the WQE contains a BPL then the list of BDE's is 19513 * converted to sli4_sge's. If the WQE contains a single 19514 * BDE then it is converted to a single sli_sge. 19515 * The WQE is still in cpu endianness so the contents of 19516 * the bpl can be used without byte swapping. 19517 * 19518 * Returns valid XRI = Success, NO_XRI = Failure. 19519 */ 19520 static uint16_t 19521 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, 19522 struct lpfc_sglq *sglq) 19523 { 19524 uint16_t xritag = NO_XRI; 19525 struct ulp_bde64 *bpl = NULL; 19526 struct ulp_bde64 bde; 19527 struct sli4_sge *sgl = NULL; 19528 struct lpfc_dmabuf *dmabuf; 19529 union lpfc_wqe128 *wqe; 19530 int numBdes = 0; 19531 int i = 0; 19532 uint32_t offset = 0; /* accumulated offset in the sg request list */ 19533 int inbound = 0; /* number of sg reply entries inbound from firmware */ 19534 uint32_t cmd; 19535 19536 if (!pwqeq || !sglq) 19537 return xritag; 19538 19539 sgl = (struct sli4_sge *)sglq->sgl; 19540 wqe = &pwqeq->wqe; 19541 pwqeq->iocb.ulpIoTag = pwqeq->iotag; 19542 19543 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); 19544 if (cmd == CMD_XMIT_BLS_RSP64_WQE) 19545 return sglq->sli4_xritag; 19546 numBdes = pwqeq->rsvd2; 19547 if (numBdes) { 19548 /* The addrHigh and addrLow fields within the WQE 19549 * have not been byteswapped yet so there is no 19550 * need to swap them back. 19551 */ 19552 if (pwqeq->context3) 19553 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3; 19554 else 19555 return xritag; 19556 19557 bpl = (struct ulp_bde64 *)dmabuf->virt; 19558 if (!bpl) 19559 return xritag; 19560 19561 for (i = 0; i < numBdes; i++) { 19562 /* Should already be byte swapped. */ 19563 sgl->addr_hi = bpl->addrHigh; 19564 sgl->addr_lo = bpl->addrLow; 19565 19566 sgl->word2 = le32_to_cpu(sgl->word2); 19567 if ((i+1) == numBdes) 19568 bf_set(lpfc_sli4_sge_last, sgl, 1); 19569 else 19570 bf_set(lpfc_sli4_sge_last, sgl, 0); 19571 /* swap the size field back to the cpu so we 19572 * can assign it to the sgl. 19573 */ 19574 bde.tus.w = le32_to_cpu(bpl->tus.w); 19575 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 19576 /* The offsets in the sgl need to be accumulated 19577 * separately for the request and reply lists. 19578 * The request is always first, the reply follows. 19579 */ 19580 switch (cmd) { 19581 case CMD_GEN_REQUEST64_WQE: 19582 /* add up the reply sg entries */ 19583 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 19584 inbound++; 19585 /* first inbound? reset the offset */ 19586 if (inbound == 1) 19587 offset = 0; 19588 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19589 bf_set(lpfc_sli4_sge_type, sgl, 19590 LPFC_SGE_TYPE_DATA); 19591 offset += bde.tus.f.bdeSize; 19592 break; 19593 case CMD_FCP_TRSP64_WQE: 19594 bf_set(lpfc_sli4_sge_offset, sgl, 0); 19595 bf_set(lpfc_sli4_sge_type, sgl, 19596 LPFC_SGE_TYPE_DATA); 19597 break; 19598 case CMD_FCP_TSEND64_WQE: 19599 case CMD_FCP_TRECEIVE64_WQE: 19600 bf_set(lpfc_sli4_sge_type, sgl, 19601 bpl->tus.f.bdeFlags); 19602 if (i < 3) 19603 offset = 0; 19604 else 19605 offset += bde.tus.f.bdeSize; 19606 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19607 break; 19608 } 19609 sgl->word2 = cpu_to_le32(sgl->word2); 19610 bpl++; 19611 sgl++; 19612 } 19613 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { 19614 /* The addrHigh and addrLow fields of the BDE have not 19615 * been byteswapped yet so they need to be swapped 19616 * before putting them in the sgl. 19617 */ 19618 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); 19619 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); 19620 sgl->word2 = le32_to_cpu(sgl->word2); 19621 bf_set(lpfc_sli4_sge_last, sgl, 1); 19622 sgl->word2 = cpu_to_le32(sgl->word2); 19623 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); 19624 } 19625 return sglq->sli4_xritag; 19626 } 19627 19628 /** 19629 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) 19630 * @phba: Pointer to HBA context object. 19631 * @ring_number: Base sli ring number 19632 * @pwqe: Pointer to command WQE. 19633 **/ 19634 int 19635 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 19636 struct lpfc_iocbq *pwqe) 19637 { 19638 union lpfc_wqe128 *wqe = &pwqe->wqe; 19639 struct lpfc_nvmet_rcv_ctx *ctxp; 19640 struct lpfc_queue *wq; 19641 struct lpfc_sglq *sglq; 19642 struct lpfc_sli_ring *pring; 19643 unsigned long iflags; 19644 uint32_t ret = 0; 19645 19646 /* NVME_LS and NVME_LS ABTS requests. */ 19647 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { 19648 pring = phba->sli4_hba.nvmels_wq->pring; 19649 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19650 qp, wq_access); 19651 sglq = __lpfc_sli_get_els_sglq(phba, pwqe); 19652 if (!sglq) { 19653 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19654 return WQE_BUSY; 19655 } 19656 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19657 pwqe->sli4_xritag = sglq->sli4_xritag; 19658 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { 19659 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19660 return WQE_ERROR; 19661 } 19662 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19663 pwqe->sli4_xritag); 19664 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); 19665 if (ret) { 19666 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19667 return ret; 19668 } 19669 19670 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19671 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19672 return 0; 19673 } 19674 19675 /* NVME_FCREQ and NVME_ABTS requests */ 19676 if (pwqe->iocb_flag & LPFC_IO_NVME) { 19677 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19678 wq = qp->nvme_wq; 19679 pring = wq->pring; 19680 19681 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); 19682 19683 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19684 qp, wq_access); 19685 ret = lpfc_sli4_wq_put(wq, wqe); 19686 if (ret) { 19687 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19688 return ret; 19689 } 19690 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19691 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19692 return 0; 19693 } 19694 19695 /* NVMET requests */ 19696 if (pwqe->iocb_flag & LPFC_IO_NVMET) { 19697 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19698 wq = qp->nvme_wq; 19699 pring = wq->pring; 19700 19701 ctxp = pwqe->context2; 19702 sglq = ctxp->ctxbuf->sglq; 19703 if (pwqe->sli4_xritag == NO_XRI) { 19704 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19705 pwqe->sli4_xritag = sglq->sli4_xritag; 19706 } 19707 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19708 pwqe->sli4_xritag); 19709 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); 19710 19711 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19712 qp, wq_access); 19713 ret = lpfc_sli4_wq_put(wq, wqe); 19714 if (ret) { 19715 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19716 return ret; 19717 } 19718 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19719 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19720 return 0; 19721 } 19722 return WQE_ERROR; 19723 } 19724 19725 #ifdef LPFC_MXP_STAT 19726 /** 19727 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count 19728 * @phba: pointer to lpfc hba data structure. 19729 * @hwqid: belong to which HWQ. 19730 * 19731 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count 19732 * 15 seconds after a test case is running. 19733 * 19734 * The user should call lpfc_debugfs_multixripools_write before running a test 19735 * case to clear stat_snapshot_taken. Then the user starts a test case. During 19736 * test case is running, stat_snapshot_taken is incremented by 1 every time when 19737 * this routine is called from heartbeat timer. When stat_snapshot_taken is 19738 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken. 19739 **/ 19740 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) 19741 { 19742 struct lpfc_sli4_hdw_queue *qp; 19743 struct lpfc_multixri_pool *multixri_pool; 19744 struct lpfc_pvt_pool *pvt_pool; 19745 struct lpfc_pbl_pool *pbl_pool; 19746 u32 txcmplq_cnt; 19747 19748 qp = &phba->sli4_hba.hdwq[hwqid]; 19749 multixri_pool = qp->p_multixri_pool; 19750 if (!multixri_pool) 19751 return; 19752 19753 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { 19754 pvt_pool = &qp->p_multixri_pool->pvt_pool; 19755 pbl_pool = &qp->p_multixri_pool->pbl_pool; 19756 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 19757 if (qp->nvme_wq) 19758 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; 19759 19760 multixri_pool->stat_pbl_count = pbl_pool->count; 19761 multixri_pool->stat_pvt_count = pvt_pool->count; 19762 multixri_pool->stat_busy_count = txcmplq_cnt; 19763 } 19764 19765 multixri_pool->stat_snapshot_taken++; 19766 } 19767 #endif 19768 19769 /** 19770 * lpfc_adjust_pvt_pool_count - Adjust private pool count 19771 * @phba: pointer to lpfc hba data structure. 19772 * @hwqid: belong to which HWQ. 19773 * 19774 * This routine moves some XRIs from private to public pool when private pool 19775 * is not busy. 19776 **/ 19777 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid) 19778 { 19779 struct lpfc_multixri_pool *multixri_pool; 19780 u32 io_req_count; 19781 u32 prev_io_req_count; 19782 19783 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 19784 if (!multixri_pool) 19785 return; 19786 io_req_count = multixri_pool->io_req_count; 19787 prev_io_req_count = multixri_pool->prev_io_req_count; 19788 19789 if (prev_io_req_count != io_req_count) { 19790 /* Private pool is busy */ 19791 multixri_pool->prev_io_req_count = io_req_count; 19792 } else { 19793 /* Private pool is not busy. 19794 * Move XRIs from private to public pool. 19795 */ 19796 lpfc_move_xri_pvt_to_pbl(phba, hwqid); 19797 } 19798 } 19799 19800 /** 19801 * lpfc_adjust_high_watermark - Adjust high watermark 19802 * @phba: pointer to lpfc hba data structure. 19803 * @hwqid: belong to which HWQ. 19804 * 19805 * This routine sets high watermark as number of outstanding XRIs, 19806 * but make sure the new value is between xri_limit/2 and xri_limit. 19807 **/ 19808 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) 19809 { 19810 u32 new_watermark; 19811 u32 watermark_max; 19812 u32 watermark_min; 19813 u32 xri_limit; 19814 u32 txcmplq_cnt; 19815 u32 abts_io_bufs; 19816 struct lpfc_multixri_pool *multixri_pool; 19817 struct lpfc_sli4_hdw_queue *qp; 19818 19819 qp = &phba->sli4_hba.hdwq[hwqid]; 19820 multixri_pool = qp->p_multixri_pool; 19821 if (!multixri_pool) 19822 return; 19823 xri_limit = multixri_pool->xri_limit; 19824 19825 watermark_max = xri_limit; 19826 watermark_min = xri_limit / 2; 19827 19828 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 19829 abts_io_bufs = qp->abts_scsi_io_bufs; 19830 if (qp->nvme_wq) { 19831 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; 19832 abts_io_bufs += qp->abts_nvme_io_bufs; 19833 } 19834 19835 new_watermark = txcmplq_cnt + abts_io_bufs; 19836 new_watermark = min(watermark_max, new_watermark); 19837 new_watermark = max(watermark_min, new_watermark); 19838 multixri_pool->pvt_pool.high_watermark = new_watermark; 19839 19840 #ifdef LPFC_MXP_STAT 19841 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm, 19842 new_watermark); 19843 #endif 19844 } 19845 19846 /** 19847 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool 19848 * @phba: pointer to lpfc hba data structure. 19849 * @hwqid: belong to which HWQ. 19850 * 19851 * This routine is called from hearbeat timer when pvt_pool is idle. 19852 * All free XRIs are moved from private to public pool on hwqid with 2 steps. 19853 * The first step moves (all - low_watermark) amount of XRIs. 19854 * The second step moves the rest of XRIs. 19855 **/ 19856 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) 19857 { 19858 struct lpfc_pbl_pool *pbl_pool; 19859 struct lpfc_pvt_pool *pvt_pool; 19860 struct lpfc_sli4_hdw_queue *qp; 19861 struct lpfc_io_buf *lpfc_ncmd; 19862 struct lpfc_io_buf *lpfc_ncmd_next; 19863 unsigned long iflag; 19864 struct list_head tmp_list; 19865 u32 tmp_count; 19866 19867 qp = &phba->sli4_hba.hdwq[hwqid]; 19868 pbl_pool = &qp->p_multixri_pool->pbl_pool; 19869 pvt_pool = &qp->p_multixri_pool->pvt_pool; 19870 tmp_count = 0; 19871 19872 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); 19873 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); 19874 19875 if (pvt_pool->count > pvt_pool->low_watermark) { 19876 /* Step 1: move (all - low_watermark) from pvt_pool 19877 * to pbl_pool 19878 */ 19879 19880 /* Move low watermark of bufs from pvt_pool to tmp_list */ 19881 INIT_LIST_HEAD(&tmp_list); 19882 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 19883 &pvt_pool->list, list) { 19884 list_move_tail(&lpfc_ncmd->list, &tmp_list); 19885 tmp_count++; 19886 if (tmp_count >= pvt_pool->low_watermark) 19887 break; 19888 } 19889 19890 /* Move all bufs from pvt_pool to pbl_pool */ 19891 list_splice_init(&pvt_pool->list, &pbl_pool->list); 19892 19893 /* Move all bufs from tmp_list to pvt_pool */ 19894 list_splice(&tmp_list, &pvt_pool->list); 19895 19896 pbl_pool->count += (pvt_pool->count - tmp_count); 19897 pvt_pool->count = tmp_count; 19898 } else { 19899 /* Step 2: move the rest from pvt_pool to pbl_pool */ 19900 list_splice_init(&pvt_pool->list, &pbl_pool->list); 19901 pbl_pool->count += pvt_pool->count; 19902 pvt_pool->count = 0; 19903 } 19904 19905 spin_unlock(&pvt_pool->lock); 19906 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 19907 } 19908 19909 /** 19910 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 19911 * @phba: pointer to lpfc hba data structure 19912 * @pbl_pool: specified public free XRI pool 19913 * @pvt_pool: specified private free XRI pool 19914 * @count: number of XRIs to move 19915 * 19916 * This routine tries to move some free common bufs from the specified pbl_pool 19917 * to the specified pvt_pool. It might move less than count XRIs if there's not 19918 * enough in public pool. 19919 * 19920 * Return: 19921 * true - if XRIs are successfully moved from the specified pbl_pool to the 19922 * specified pvt_pool 19923 * false - if the specified pbl_pool is empty or locked by someone else 19924 **/ 19925 static bool 19926 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 19927 struct lpfc_pbl_pool *pbl_pool, 19928 struct lpfc_pvt_pool *pvt_pool, u32 count) 19929 { 19930 struct lpfc_io_buf *lpfc_ncmd; 19931 struct lpfc_io_buf *lpfc_ncmd_next; 19932 unsigned long iflag; 19933 int ret; 19934 19935 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag); 19936 if (ret) { 19937 if (pbl_pool->count) { 19938 /* Move a batch of XRIs from public to private pool */ 19939 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); 19940 list_for_each_entry_safe(lpfc_ncmd, 19941 lpfc_ncmd_next, 19942 &pbl_pool->list, 19943 list) { 19944 list_move_tail(&lpfc_ncmd->list, 19945 &pvt_pool->list); 19946 pvt_pool->count++; 19947 pbl_pool->count--; 19948 count--; 19949 if (count == 0) 19950 break; 19951 } 19952 19953 spin_unlock(&pvt_pool->lock); 19954 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 19955 return true; 19956 } 19957 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 19958 } 19959 19960 return false; 19961 } 19962 19963 /** 19964 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 19965 * @phba: pointer to lpfc hba data structure. 19966 * @hwqid: belong to which HWQ. 19967 * @count: number of XRIs to move 19968 * 19969 * This routine tries to find some free common bufs in one of public pools with 19970 * Round Robin method. The search always starts from local hwqid, then the next 19971 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found, 19972 * a batch of free common bufs are moved to private pool on hwqid. 19973 * It might move less than count XRIs if there's not enough in public pool. 19974 **/ 19975 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) 19976 { 19977 struct lpfc_multixri_pool *multixri_pool; 19978 struct lpfc_multixri_pool *next_multixri_pool; 19979 struct lpfc_pvt_pool *pvt_pool; 19980 struct lpfc_pbl_pool *pbl_pool; 19981 struct lpfc_sli4_hdw_queue *qp; 19982 u32 next_hwqid; 19983 u32 hwq_count; 19984 int ret; 19985 19986 qp = &phba->sli4_hba.hdwq[hwqid]; 19987 multixri_pool = qp->p_multixri_pool; 19988 pvt_pool = &multixri_pool->pvt_pool; 19989 pbl_pool = &multixri_pool->pbl_pool; 19990 19991 /* Check if local pbl_pool is available */ 19992 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); 19993 if (ret) { 19994 #ifdef LPFC_MXP_STAT 19995 multixri_pool->local_pbl_hit_count++; 19996 #endif 19997 return; 19998 } 19999 20000 hwq_count = phba->cfg_hdw_queue; 20001 20002 /* Get the next hwqid which was found last time */ 20003 next_hwqid = multixri_pool->rrb_next_hwqid; 20004 20005 do { 20006 /* Go to next hwq */ 20007 next_hwqid = (next_hwqid + 1) % hwq_count; 20008 20009 next_multixri_pool = 20010 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool; 20011 pbl_pool = &next_multixri_pool->pbl_pool; 20012 20013 /* Check if the public free xri pool is available */ 20014 ret = _lpfc_move_xri_pbl_to_pvt( 20015 phba, qp, pbl_pool, pvt_pool, count); 20016 20017 /* Exit while-loop if success or all hwqid are checked */ 20018 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); 20019 20020 /* Starting point for the next time */ 20021 multixri_pool->rrb_next_hwqid = next_hwqid; 20022 20023 if (!ret) { 20024 /* stats: all public pools are empty*/ 20025 multixri_pool->pbl_empty_count++; 20026 } 20027 20028 #ifdef LPFC_MXP_STAT 20029 if (ret) { 20030 if (next_hwqid == hwqid) 20031 multixri_pool->local_pbl_hit_count++; 20032 else 20033 multixri_pool->other_pbl_hit_count++; 20034 } 20035 #endif 20036 } 20037 20038 /** 20039 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark 20040 * @phba: pointer to lpfc hba data structure. 20041 * @qp: belong to which HWQ. 20042 * 20043 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than 20044 * low watermark. 20045 **/ 20046 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid) 20047 { 20048 struct lpfc_multixri_pool *multixri_pool; 20049 struct lpfc_pvt_pool *pvt_pool; 20050 20051 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 20052 pvt_pool = &multixri_pool->pvt_pool; 20053 20054 if (pvt_pool->count < pvt_pool->low_watermark) 20055 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 20056 } 20057 20058 /** 20059 * lpfc_release_io_buf - Return one IO buf back to free pool 20060 * @phba: pointer to lpfc hba data structure. 20061 * @lpfc_ncmd: IO buf to be returned. 20062 * @qp: belong to which HWQ. 20063 * 20064 * This routine returns one IO buf back to free pool. If this is an urgent IO, 20065 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1, 20066 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and 20067 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to 20068 * lpfc_io_buf_list_put. 20069 **/ 20070 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, 20071 struct lpfc_sli4_hdw_queue *qp) 20072 { 20073 unsigned long iflag; 20074 struct lpfc_pbl_pool *pbl_pool; 20075 struct lpfc_pvt_pool *pvt_pool; 20076 struct lpfc_epd_pool *epd_pool; 20077 u32 txcmplq_cnt; 20078 u32 xri_owned; 20079 u32 xri_limit; 20080 u32 abts_io_bufs; 20081 20082 /* MUST zero fields if buffer is reused by another protocol */ 20083 lpfc_ncmd->nvmeCmd = NULL; 20084 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; 20085 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; 20086 20087 if (phba->cfg_xri_rebalancing) { 20088 if (lpfc_ncmd->expedite) { 20089 /* Return to expedite pool */ 20090 epd_pool = &phba->epd_pool; 20091 spin_lock_irqsave(&epd_pool->lock, iflag); 20092 list_add_tail(&lpfc_ncmd->list, &epd_pool->list); 20093 epd_pool->count++; 20094 spin_unlock_irqrestore(&epd_pool->lock, iflag); 20095 return; 20096 } 20097 20098 /* Avoid invalid access if an IO sneaks in and is being rejected 20099 * just _after_ xri pools are destroyed in lpfc_offline. 20100 * Nothing much can be done at this point. 20101 */ 20102 if (!qp->p_multixri_pool) 20103 return; 20104 20105 pbl_pool = &qp->p_multixri_pool->pbl_pool; 20106 pvt_pool = &qp->p_multixri_pool->pvt_pool; 20107 20108 txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; 20109 abts_io_bufs = qp->abts_scsi_io_bufs; 20110 if (qp->nvme_wq) { 20111 txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; 20112 abts_io_bufs += qp->abts_nvme_io_bufs; 20113 } 20114 20115 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; 20116 xri_limit = qp->p_multixri_pool->xri_limit; 20117 20118 #ifdef LPFC_MXP_STAT 20119 if (xri_owned <= xri_limit) 20120 qp->p_multixri_pool->below_limit_count++; 20121 else 20122 qp->p_multixri_pool->above_limit_count++; 20123 #endif 20124 20125 /* XRI goes to either public or private free xri pool 20126 * based on watermark and xri_limit 20127 */ 20128 if ((pvt_pool->count < pvt_pool->low_watermark) || 20129 (xri_owned < xri_limit && 20130 pvt_pool->count < pvt_pool->high_watermark)) { 20131 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, 20132 qp, free_pvt_pool); 20133 list_add_tail(&lpfc_ncmd->list, 20134 &pvt_pool->list); 20135 pvt_pool->count++; 20136 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20137 } else { 20138 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, 20139 qp, free_pub_pool); 20140 list_add_tail(&lpfc_ncmd->list, 20141 &pbl_pool->list); 20142 pbl_pool->count++; 20143 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 20144 } 20145 } else { 20146 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, 20147 qp, free_xri); 20148 list_add_tail(&lpfc_ncmd->list, 20149 &qp->lpfc_io_buf_list_put); 20150 qp->put_io_bufs++; 20151 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, 20152 iflag); 20153 } 20154 } 20155 20156 /** 20157 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool 20158 * @phba: pointer to lpfc hba data structure. 20159 * @pvt_pool: pointer to private pool data structure. 20160 * @ndlp: pointer to lpfc nodelist data structure. 20161 * 20162 * This routine tries to get one free IO buf from private pool. 20163 * 20164 * Return: 20165 * pointer to one free IO buf - if private pool is not empty 20166 * NULL - if private pool is empty 20167 **/ 20168 static struct lpfc_io_buf * 20169 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, 20170 struct lpfc_sli4_hdw_queue *qp, 20171 struct lpfc_pvt_pool *pvt_pool, 20172 struct lpfc_nodelist *ndlp) 20173 { 20174 struct lpfc_io_buf *lpfc_ncmd; 20175 struct lpfc_io_buf *lpfc_ncmd_next; 20176 unsigned long iflag; 20177 20178 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); 20179 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 20180 &pvt_pool->list, list) { 20181 if (lpfc_test_rrq_active( 20182 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) 20183 continue; 20184 list_del(&lpfc_ncmd->list); 20185 pvt_pool->count--; 20186 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20187 return lpfc_ncmd; 20188 } 20189 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20190 20191 return NULL; 20192 } 20193 20194 /** 20195 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool 20196 * @phba: pointer to lpfc hba data structure. 20197 * 20198 * This routine tries to get one free IO buf from expedite pool. 20199 * 20200 * Return: 20201 * pointer to one free IO buf - if expedite pool is not empty 20202 * NULL - if expedite pool is empty 20203 **/ 20204 static struct lpfc_io_buf * 20205 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba) 20206 { 20207 struct lpfc_io_buf *lpfc_ncmd; 20208 struct lpfc_io_buf *lpfc_ncmd_next; 20209 unsigned long iflag; 20210 struct lpfc_epd_pool *epd_pool; 20211 20212 epd_pool = &phba->epd_pool; 20213 lpfc_ncmd = NULL; 20214 20215 spin_lock_irqsave(&epd_pool->lock, iflag); 20216 if (epd_pool->count > 0) { 20217 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 20218 &epd_pool->list, list) { 20219 list_del(&lpfc_ncmd->list); 20220 epd_pool->count--; 20221 break; 20222 } 20223 } 20224 spin_unlock_irqrestore(&epd_pool->lock, iflag); 20225 20226 return lpfc_ncmd; 20227 } 20228 20229 /** 20230 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs 20231 * @phba: pointer to lpfc hba data structure. 20232 * @ndlp: pointer to lpfc nodelist data structure. 20233 * @hwqid: belong to which HWQ 20234 * @expedite: 1 means this request is urgent. 20235 * 20236 * This routine will do the following actions and then return a pointer to 20237 * one free IO buf. 20238 * 20239 * 1. If private free xri count is empty, move some XRIs from public to 20240 * private pool. 20241 * 2. Get one XRI from private free xri pool. 20242 * 3. If we fail to get one from pvt_pool and this is an expedite request, 20243 * get one free xri from expedite pool. 20244 * 20245 * Note: ndlp is only used on SCSI side for RRQ testing. 20246 * The caller should pass NULL for ndlp on NVME side. 20247 * 20248 * Return: 20249 * pointer to one free IO buf - if private pool is not empty 20250 * NULL - if private pool is empty 20251 **/ 20252 static struct lpfc_io_buf * 20253 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, 20254 struct lpfc_nodelist *ndlp, 20255 int hwqid, int expedite) 20256 { 20257 struct lpfc_sli4_hdw_queue *qp; 20258 struct lpfc_multixri_pool *multixri_pool; 20259 struct lpfc_pvt_pool *pvt_pool; 20260 struct lpfc_io_buf *lpfc_ncmd; 20261 20262 qp = &phba->sli4_hba.hdwq[hwqid]; 20263 lpfc_ncmd = NULL; 20264 multixri_pool = qp->p_multixri_pool; 20265 pvt_pool = &multixri_pool->pvt_pool; 20266 multixri_pool->io_req_count++; 20267 20268 /* If pvt_pool is empty, move some XRIs from public to private pool */ 20269 if (pvt_pool->count == 0) 20270 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 20271 20272 /* Get one XRI from private free xri pool */ 20273 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); 20274 20275 if (lpfc_ncmd) { 20276 lpfc_ncmd->hdwq = qp; 20277 lpfc_ncmd->hdwq_no = hwqid; 20278 } else if (expedite) { 20279 /* If we fail to get one from pvt_pool and this is an expedite 20280 * request, get one free xri from expedite pool. 20281 */ 20282 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba); 20283 } 20284 20285 return lpfc_ncmd; 20286 } 20287 20288 static inline struct lpfc_io_buf * 20289 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx) 20290 { 20291 struct lpfc_sli4_hdw_queue *qp; 20292 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next; 20293 20294 qp = &phba->sli4_hba.hdwq[idx]; 20295 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 20296 &qp->lpfc_io_buf_list_get, list) { 20297 if (lpfc_test_rrq_active(phba, ndlp, 20298 lpfc_cmd->cur_iocbq.sli4_lxritag)) 20299 continue; 20300 20301 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED) 20302 continue; 20303 20304 list_del_init(&lpfc_cmd->list); 20305 qp->get_io_bufs--; 20306 lpfc_cmd->hdwq = qp; 20307 lpfc_cmd->hdwq_no = idx; 20308 return lpfc_cmd; 20309 } 20310 return NULL; 20311 } 20312 20313 /** 20314 * lpfc_get_io_buf - Get one IO buffer from free pool 20315 * @phba: The HBA for which this call is being executed. 20316 * @ndlp: pointer to lpfc nodelist data structure. 20317 * @hwqid: belong to which HWQ 20318 * @expedite: 1 means this request is urgent. 20319 * 20320 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1, 20321 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes 20322 * a IO buffer from head of @hdwq io_buf_list and returns to caller. 20323 * 20324 * Note: ndlp is only used on SCSI side for RRQ testing. 20325 * The caller should pass NULL for ndlp on NVME side. 20326 * 20327 * Return codes: 20328 * NULL - Error 20329 * Pointer to lpfc_io_buf - Success 20330 **/ 20331 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, 20332 struct lpfc_nodelist *ndlp, 20333 u32 hwqid, int expedite) 20334 { 20335 struct lpfc_sli4_hdw_queue *qp; 20336 unsigned long iflag; 20337 struct lpfc_io_buf *lpfc_cmd; 20338 20339 qp = &phba->sli4_hba.hdwq[hwqid]; 20340 lpfc_cmd = NULL; 20341 20342 if (phba->cfg_xri_rebalancing) 20343 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( 20344 phba, ndlp, hwqid, expedite); 20345 else { 20346 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, 20347 qp, alloc_xri_get); 20348 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) 20349 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 20350 if (!lpfc_cmd) { 20351 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, 20352 qp, alloc_xri_put); 20353 list_splice(&qp->lpfc_io_buf_list_put, 20354 &qp->lpfc_io_buf_list_get); 20355 qp->get_io_bufs += qp->put_io_bufs; 20356 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 20357 qp->put_io_bufs = 0; 20358 spin_unlock(&qp->io_buf_list_put_lock); 20359 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || 20360 expedite) 20361 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 20362 } 20363 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); 20364 } 20365 20366 return lpfc_cmd; 20367 } 20368