1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/interrupt.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/lockdep.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 #include <scsi/fc/fc_fs.h> 37 #include <linux/aer.h> 38 #ifdef CONFIG_X86 39 #include <asm/set_memory.h> 40 #endif 41 42 #include <linux/nvme-fc-driver.h> 43 44 #include "lpfc_hw4.h" 45 #include "lpfc_hw.h" 46 #include "lpfc_sli.h" 47 #include "lpfc_sli4.h" 48 #include "lpfc_nl.h" 49 #include "lpfc_disc.h" 50 #include "lpfc.h" 51 #include "lpfc_scsi.h" 52 #include "lpfc_nvme.h" 53 #include "lpfc_nvmet.h" 54 #include "lpfc_crtn.h" 55 #include "lpfc_logmsg.h" 56 #include "lpfc_compat.h" 57 #include "lpfc_debugfs.h" 58 #include "lpfc_vport.h" 59 #include "lpfc_version.h" 60 61 /* There are only four IOCB completion types. */ 62 typedef enum _lpfc_iocb_type { 63 LPFC_UNKNOWN_IOCB, 64 LPFC_UNSOL_IOCB, 65 LPFC_SOL_IOCB, 66 LPFC_ABORT_IOCB 67 } lpfc_iocb_type; 68 69 70 /* Provide function prototypes local to this module. */ 71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 72 uint32_t); 73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 74 uint8_t *, uint32_t *); 75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 76 struct lpfc_iocbq *); 77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 78 struct hbq_dmabuf *); 79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 80 struct hbq_dmabuf *dmabuf); 81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, 82 struct lpfc_queue *cq, struct lpfc_cqe *cqe); 83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 84 int); 85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, 86 struct lpfc_queue *eq, 87 struct lpfc_eqe *eqe); 88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 90 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q); 91 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, 92 struct lpfc_queue *cq, 93 struct lpfc_cqe *cqe); 94 95 static IOCB_t * 96 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 97 { 98 return &iocbq->iocb; 99 } 100 101 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) 102 /** 103 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function 104 * @srcp: Source memory pointer. 105 * @destp: Destination memory pointer. 106 * @cnt: Number of words required to be copied. 107 * Must be a multiple of sizeof(uint64_t) 108 * 109 * This function is used for copying data between driver memory 110 * and the SLI WQ. This function also changes the endianness 111 * of each word if native endianness is different from SLI 112 * endianness. This function can be called with or without 113 * lock. 114 **/ 115 static void 116 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 117 { 118 uint64_t *src = srcp; 119 uint64_t *dest = destp; 120 int i; 121 122 for (i = 0; i < (int)cnt; i += sizeof(uint64_t)) 123 *dest++ = *src++; 124 } 125 #else 126 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) 127 #endif 128 129 /** 130 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 131 * @q: The Work Queue to operate on. 132 * @wqe: The work Queue Entry to put on the Work queue. 133 * 134 * This routine will copy the contents of @wqe to the next available entry on 135 * the @q. This function will then ring the Work Queue Doorbell to signal the 136 * HBA to start processing the Work Queue Entry. This function returns 0 if 137 * successful. If no entries are available on @q then this function will return 138 * -ENOMEM. 139 * The caller is expected to hold the hbalock when calling this routine. 140 **/ 141 static int 142 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) 143 { 144 union lpfc_wqe *temp_wqe; 145 struct lpfc_register doorbell; 146 uint32_t host_index; 147 uint32_t idx; 148 uint32_t i = 0; 149 uint8_t *tmp; 150 u32 if_type; 151 152 /* sanity check on queue memory */ 153 if (unlikely(!q)) 154 return -ENOMEM; 155 temp_wqe = lpfc_sli4_qe(q, q->host_index); 156 157 /* If the host has not yet processed the next entry then we are done */ 158 idx = ((q->host_index + 1) % q->entry_count); 159 if (idx == q->hba_index) { 160 q->WQ_overflow++; 161 return -EBUSY; 162 } 163 q->WQ_posted++; 164 /* set consumption flag every once in a while */ 165 if (!((q->host_index + 1) % q->notify_interval)) 166 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 167 else 168 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); 169 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 170 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 171 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 172 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 173 /* write to DPP aperture taking advatage of Combined Writes */ 174 tmp = (uint8_t *)temp_wqe; 175 #ifdef __raw_writeq 176 for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) 177 __raw_writeq(*((uint64_t *)(tmp + i)), 178 q->dpp_regaddr + i); 179 #else 180 for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) 181 __raw_writel(*((uint32_t *)(tmp + i)), 182 q->dpp_regaddr + i); 183 #endif 184 } 185 /* ensure WQE bcopy and DPP flushed before doorbell write */ 186 wmb(); 187 188 /* Update the host index before invoking device */ 189 host_index = q->host_index; 190 191 q->host_index = idx; 192 193 /* Ring Doorbell */ 194 doorbell.word0 = 0; 195 if (q->db_format == LPFC_DB_LIST_FORMAT) { 196 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 197 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); 198 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); 199 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, 200 q->dpp_id); 201 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell, 202 q->queue_id); 203 } else { 204 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 205 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 206 207 /* Leave bits <23:16> clear for if_type 6 dpp */ 208 if_type = bf_get(lpfc_sli_intf_if_type, 209 &q->phba->sli4_hba.sli_intf); 210 if (if_type != LPFC_SLI_INTF_IF_TYPE_6) 211 bf_set(lpfc_wq_db_list_fm_index, &doorbell, 212 host_index); 213 } 214 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 215 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 216 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 217 } else { 218 return -EINVAL; 219 } 220 writel(doorbell.word0, q->db_regaddr); 221 222 return 0; 223 } 224 225 /** 226 * lpfc_sli4_wq_release - Updates internal hba index for WQ 227 * @q: The Work Queue to operate on. 228 * @index: The index to advance the hba index to. 229 * 230 * This routine will update the HBA index of a queue to reflect consumption of 231 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 232 * an entry the host calls this function to update the queue's internal 233 * pointers. This routine returns the number of entries that were consumed by 234 * the HBA. 235 **/ 236 static uint32_t 237 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 238 { 239 uint32_t released = 0; 240 241 /* sanity check on queue memory */ 242 if (unlikely(!q)) 243 return 0; 244 245 if (q->hba_index == index) 246 return 0; 247 do { 248 q->hba_index = ((q->hba_index + 1) % q->entry_count); 249 released++; 250 } while (q->hba_index != index); 251 return released; 252 } 253 254 /** 255 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 256 * @q: The Mailbox Queue to operate on. 257 * @wqe: The Mailbox Queue Entry to put on the Work queue. 258 * 259 * This routine will copy the contents of @mqe to the next available entry on 260 * the @q. This function will then ring the Work Queue Doorbell to signal the 261 * HBA to start processing the Work Queue Entry. This function returns 0 if 262 * successful. If no entries are available on @q then this function will return 263 * -ENOMEM. 264 * The caller is expected to hold the hbalock when calling this routine. 265 **/ 266 static uint32_t 267 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 268 { 269 struct lpfc_mqe *temp_mqe; 270 struct lpfc_register doorbell; 271 272 /* sanity check on queue memory */ 273 if (unlikely(!q)) 274 return -ENOMEM; 275 temp_mqe = lpfc_sli4_qe(q, q->host_index); 276 277 /* If the host has not yet processed the next entry then we are done */ 278 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 279 return -ENOMEM; 280 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 281 /* Save off the mailbox pointer for completion */ 282 q->phba->mbox = (MAILBOX_t *)temp_mqe; 283 284 /* Update the host index before invoking device */ 285 q->host_index = ((q->host_index + 1) % q->entry_count); 286 287 /* Ring Doorbell */ 288 doorbell.word0 = 0; 289 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 290 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 291 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 292 return 0; 293 } 294 295 /** 296 * lpfc_sli4_mq_release - Updates internal hba index for MQ 297 * @q: The Mailbox Queue to operate on. 298 * 299 * This routine will update the HBA index of a queue to reflect consumption of 300 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 301 * an entry the host calls this function to update the queue's internal 302 * pointers. This routine returns the number of entries that were consumed by 303 * the HBA. 304 **/ 305 static uint32_t 306 lpfc_sli4_mq_release(struct lpfc_queue *q) 307 { 308 /* sanity check on queue memory */ 309 if (unlikely(!q)) 310 return 0; 311 312 /* Clear the mailbox pointer for completion */ 313 q->phba->mbox = NULL; 314 q->hba_index = ((q->hba_index + 1) % q->entry_count); 315 return 1; 316 } 317 318 /** 319 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 320 * @q: The Event Queue to get the first valid EQE from 321 * 322 * This routine will get the first valid Event Queue Entry from @q, update 323 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 324 * the Queue (no more work to do), or the Queue is full of EQEs that have been 325 * processed, but not popped back to the HBA then this routine will return NULL. 326 **/ 327 static struct lpfc_eqe * 328 lpfc_sli4_eq_get(struct lpfc_queue *q) 329 { 330 struct lpfc_eqe *eqe; 331 332 /* sanity check on queue memory */ 333 if (unlikely(!q)) 334 return NULL; 335 eqe = lpfc_sli4_qe(q, q->host_index); 336 337 /* If the next EQE is not valid then we are done */ 338 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) 339 return NULL; 340 341 /* 342 * insert barrier for instruction interlock : data from the hardware 343 * must have the valid bit checked before it can be copied and acted 344 * upon. Speculative instructions were allowing a bcopy at the start 345 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately 346 * after our return, to copy data before the valid bit check above 347 * was done. As such, some of the copied data was stale. The barrier 348 * ensures the check is before any data is copied. 349 */ 350 mb(); 351 return eqe; 352 } 353 354 /** 355 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 356 * @q: The Event Queue to disable interrupts 357 * 358 **/ 359 void 360 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 361 { 362 struct lpfc_register doorbell; 363 364 doorbell.word0 = 0; 365 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 366 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 367 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 368 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 369 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 370 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 371 } 372 373 /** 374 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ 375 * @q: The Event Queue to disable interrupts 376 * 377 **/ 378 void 379 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) 380 { 381 struct lpfc_register doorbell; 382 383 doorbell.word0 = 0; 384 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 385 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 386 } 387 388 /** 389 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state 390 * @phba: adapter with EQ 391 * @q: The Event Queue that the host has completed processing for. 392 * @count: Number of elements that have been consumed 393 * @arm: Indicates whether the host wants to arms this CQ. 394 * 395 * This routine will notify the HBA, by ringing the doorbell, that count 396 * number of EQEs have been processed. The @arm parameter indicates whether 397 * the queue should be rearmed when ringing the doorbell. 398 **/ 399 void 400 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 401 uint32_t count, bool arm) 402 { 403 struct lpfc_register doorbell; 404 405 /* sanity check on queue memory */ 406 if (unlikely(!q || (count == 0 && !arm))) 407 return; 408 409 /* ring doorbell for number popped */ 410 doorbell.word0 = 0; 411 if (arm) { 412 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 413 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 414 } 415 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 416 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 417 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 418 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 419 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 420 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 421 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 422 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 423 readl(q->phba->sli4_hba.EQDBregaddr); 424 } 425 426 /** 427 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state 428 * @phba: adapter with EQ 429 * @q: The Event Queue that the host has completed processing for. 430 * @count: Number of elements that have been consumed 431 * @arm: Indicates whether the host wants to arms this CQ. 432 * 433 * This routine will notify the HBA, by ringing the doorbell, that count 434 * number of EQEs have been processed. The @arm parameter indicates whether 435 * the queue should be rearmed when ringing the doorbell. 436 **/ 437 void 438 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 439 uint32_t count, bool arm) 440 { 441 struct lpfc_register doorbell; 442 443 /* sanity check on queue memory */ 444 if (unlikely(!q || (count == 0 && !arm))) 445 return; 446 447 /* ring doorbell for number popped */ 448 doorbell.word0 = 0; 449 if (arm) 450 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); 451 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); 452 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 453 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 454 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 455 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 456 readl(q->phba->sli4_hba.EQDBregaddr); 457 } 458 459 static void 460 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 461 struct lpfc_eqe *eqe) 462 { 463 if (!phba->sli4_hba.pc_sli4_params.eqav) 464 bf_set_le32(lpfc_eqe_valid, eqe, 0); 465 466 eq->host_index = ((eq->host_index + 1) % eq->entry_count); 467 468 /* if the index wrapped around, toggle the valid bit */ 469 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index) 470 eq->qe_valid = (eq->qe_valid) ? 0 : 1; 471 } 472 473 static void 474 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 475 { 476 struct lpfc_eqe *eqe = NULL; 477 u32 eq_count = 0, cq_count = 0; 478 struct lpfc_cqe *cqe = NULL; 479 struct lpfc_queue *cq = NULL, *childq = NULL; 480 int cqid = 0; 481 482 /* walk all the EQ entries and drop on the floor */ 483 eqe = lpfc_sli4_eq_get(eq); 484 while (eqe) { 485 /* Get the reference to the corresponding CQ */ 486 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 487 cq = NULL; 488 489 list_for_each_entry(childq, &eq->child_list, list) { 490 if (childq->queue_id == cqid) { 491 cq = childq; 492 break; 493 } 494 } 495 /* If CQ is valid, iterate through it and drop all the CQEs */ 496 if (cq) { 497 cqe = lpfc_sli4_cq_get(cq); 498 while (cqe) { 499 __lpfc_sli4_consume_cqe(phba, cq, cqe); 500 cq_count++; 501 cqe = lpfc_sli4_cq_get(cq); 502 } 503 /* Clear and re-arm the CQ */ 504 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count, 505 LPFC_QUEUE_REARM); 506 cq_count = 0; 507 } 508 __lpfc_sli4_consume_eqe(phba, eq, eqe); 509 eq_count++; 510 eqe = lpfc_sli4_eq_get(eq); 511 } 512 513 /* Clear and re-arm the EQ */ 514 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM); 515 } 516 517 static int 518 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, 519 uint8_t rearm) 520 { 521 struct lpfc_eqe *eqe; 522 int count = 0, consumed = 0; 523 524 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0) 525 goto rearm_and_exit; 526 527 eqe = lpfc_sli4_eq_get(eq); 528 while (eqe) { 529 lpfc_sli4_hba_handle_eqe(phba, eq, eqe); 530 __lpfc_sli4_consume_eqe(phba, eq, eqe); 531 532 consumed++; 533 if (!(++count % eq->max_proc_limit)) 534 break; 535 536 if (!(count % eq->notify_interval)) { 537 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, 538 LPFC_QUEUE_NOARM); 539 consumed = 0; 540 } 541 542 eqe = lpfc_sli4_eq_get(eq); 543 } 544 eq->EQ_processed += count; 545 546 /* Track the max number of EQEs processed in 1 intr */ 547 if (count > eq->EQ_max_eqe) 548 eq->EQ_max_eqe = count; 549 550 eq->queue_claimed = 0; 551 552 rearm_and_exit: 553 /* Always clear the EQ. */ 554 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm); 555 556 return count; 557 } 558 559 /** 560 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 561 * @q: The Completion Queue to get the first valid CQE from 562 * 563 * This routine will get the first valid Completion Queue Entry from @q, update 564 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 565 * the Queue (no more work to do), or the Queue is full of CQEs that have been 566 * processed, but not popped back to the HBA then this routine will return NULL. 567 **/ 568 static struct lpfc_cqe * 569 lpfc_sli4_cq_get(struct lpfc_queue *q) 570 { 571 struct lpfc_cqe *cqe; 572 573 /* sanity check on queue memory */ 574 if (unlikely(!q)) 575 return NULL; 576 cqe = lpfc_sli4_qe(q, q->host_index); 577 578 /* If the next CQE is not valid then we are done */ 579 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) 580 return NULL; 581 582 /* 583 * insert barrier for instruction interlock : data from the hardware 584 * must have the valid bit checked before it can be copied and acted 585 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative 586 * instructions allowing action on content before valid bit checked, 587 * add barrier here as well. May not be needed as "content" is a 588 * single 32-bit entity here (vs multi word structure for cq's). 589 */ 590 mb(); 591 return cqe; 592 } 593 594 static void 595 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 596 struct lpfc_cqe *cqe) 597 { 598 if (!phba->sli4_hba.pc_sli4_params.cqav) 599 bf_set_le32(lpfc_cqe_valid, cqe, 0); 600 601 cq->host_index = ((cq->host_index + 1) % cq->entry_count); 602 603 /* if the index wrapped around, toggle the valid bit */ 604 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) 605 cq->qe_valid = (cq->qe_valid) ? 0 : 1; 606 } 607 608 /** 609 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state. 610 * @phba: the adapter with the CQ 611 * @q: The Completion Queue that the host has completed processing for. 612 * @count: the number of elements that were consumed 613 * @arm: Indicates whether the host wants to arms this CQ. 614 * 615 * This routine will notify the HBA, by ringing the doorbell, that the 616 * CQEs have been processed. The @arm parameter specifies whether the 617 * queue should be rearmed when ringing the doorbell. 618 **/ 619 void 620 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 621 uint32_t count, bool arm) 622 { 623 struct lpfc_register doorbell; 624 625 /* sanity check on queue memory */ 626 if (unlikely(!q || (count == 0 && !arm))) 627 return; 628 629 /* ring doorbell for number popped */ 630 doorbell.word0 = 0; 631 if (arm) 632 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 633 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 634 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 635 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 636 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 637 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 638 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 639 } 640 641 /** 642 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state. 643 * @phba: the adapter with the CQ 644 * @q: The Completion Queue that the host has completed processing for. 645 * @count: the number of elements that were consumed 646 * @arm: Indicates whether the host wants to arms this CQ. 647 * 648 * This routine will notify the HBA, by ringing the doorbell, that the 649 * CQEs have been processed. The @arm parameter specifies whether the 650 * queue should be rearmed when ringing the doorbell. 651 **/ 652 void 653 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 654 uint32_t count, bool arm) 655 { 656 struct lpfc_register doorbell; 657 658 /* sanity check on queue memory */ 659 if (unlikely(!q || (count == 0 && !arm))) 660 return; 661 662 /* ring doorbell for number popped */ 663 doorbell.word0 = 0; 664 if (arm) 665 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); 666 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count); 667 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); 668 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 669 } 670 671 /** 672 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 673 * @q: The Header Receive Queue to operate on. 674 * @wqe: The Receive Queue Entry to put on the Receive queue. 675 * 676 * This routine will copy the contents of @wqe to the next available entry on 677 * the @q. This function will then ring the Receive Queue Doorbell to signal the 678 * HBA to start processing the Receive Queue Entry. This function returns the 679 * index that the rqe was copied to if successful. If no entries are available 680 * on @q then this function will return -ENOMEM. 681 * The caller is expected to hold the hbalock when calling this routine. 682 **/ 683 int 684 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 685 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 686 { 687 struct lpfc_rqe *temp_hrqe; 688 struct lpfc_rqe *temp_drqe; 689 struct lpfc_register doorbell; 690 int hq_put_index; 691 int dq_put_index; 692 693 /* sanity check on queue memory */ 694 if (unlikely(!hq) || unlikely(!dq)) 695 return -ENOMEM; 696 hq_put_index = hq->host_index; 697 dq_put_index = dq->host_index; 698 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index); 699 temp_drqe = lpfc_sli4_qe(dq, dq_put_index); 700 701 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 702 return -EINVAL; 703 if (hq_put_index != dq_put_index) 704 return -EINVAL; 705 /* If the host has not yet processed the next entry then we are done */ 706 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) 707 return -EBUSY; 708 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 709 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 710 711 /* Update the host index to point to the next slot */ 712 hq->host_index = ((hq_put_index + 1) % hq->entry_count); 713 dq->host_index = ((dq_put_index + 1) % dq->entry_count); 714 hq->RQ_buf_posted++; 715 716 /* Ring The Header Receive Queue Doorbell */ 717 if (!(hq->host_index % hq->notify_interval)) { 718 doorbell.word0 = 0; 719 if (hq->db_format == LPFC_DB_RING_FORMAT) { 720 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 721 hq->notify_interval); 722 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 723 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 724 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 725 hq->notify_interval); 726 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 727 hq->host_index); 728 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 729 } else { 730 return -EINVAL; 731 } 732 writel(doorbell.word0, hq->db_regaddr); 733 } 734 return hq_put_index; 735 } 736 737 /** 738 * lpfc_sli4_rq_release - Updates internal hba index for RQ 739 * @q: The Header Receive Queue to operate on. 740 * 741 * This routine will update the HBA index of a queue to reflect consumption of 742 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 743 * consumed an entry the host calls this function to update the queue's 744 * internal pointers. This routine returns the number of entries that were 745 * consumed by the HBA. 746 **/ 747 static uint32_t 748 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 749 { 750 /* sanity check on queue memory */ 751 if (unlikely(!hq) || unlikely(!dq)) 752 return 0; 753 754 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 755 return 0; 756 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 757 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 758 return 1; 759 } 760 761 /** 762 * lpfc_cmd_iocb - Get next command iocb entry in the ring 763 * @phba: Pointer to HBA context object. 764 * @pring: Pointer to driver SLI ring object. 765 * 766 * This function returns pointer to next command iocb entry 767 * in the command ring. The caller must hold hbalock to prevent 768 * other threads consume the next command iocb. 769 * SLI-2/SLI-3 provide different sized iocbs. 770 **/ 771 static inline IOCB_t * 772 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 773 { 774 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 775 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 776 } 777 778 /** 779 * lpfc_resp_iocb - Get next response iocb entry in the ring 780 * @phba: Pointer to HBA context object. 781 * @pring: Pointer to driver SLI ring object. 782 * 783 * This function returns pointer to next response iocb entry 784 * in the response ring. The caller must hold hbalock to make sure 785 * that no other thread consume the next response iocb. 786 * SLI-2/SLI-3 provide different sized iocbs. 787 **/ 788 static inline IOCB_t * 789 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 790 { 791 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 792 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 793 } 794 795 /** 796 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 797 * @phba: Pointer to HBA context object. 798 * 799 * This function is called with hbalock held. This function 800 * allocates a new driver iocb object from the iocb pool. If the 801 * allocation is successful, it returns pointer to the newly 802 * allocated iocb object else it returns NULL. 803 **/ 804 struct lpfc_iocbq * 805 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 806 { 807 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 808 struct lpfc_iocbq * iocbq = NULL; 809 810 lockdep_assert_held(&phba->hbalock); 811 812 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 813 if (iocbq) 814 phba->iocb_cnt++; 815 if (phba->iocb_cnt > phba->iocb_max) 816 phba->iocb_max = phba->iocb_cnt; 817 return iocbq; 818 } 819 820 /** 821 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 822 * @phba: Pointer to HBA context object. 823 * @xritag: XRI value. 824 * 825 * This function clears the sglq pointer from the array of acive 826 * sglq's. The xritag that is passed in is used to index into the 827 * array. Before the xritag can be used it needs to be adjusted 828 * by subtracting the xribase. 829 * 830 * Returns sglq ponter = success, NULL = Failure. 831 **/ 832 struct lpfc_sglq * 833 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 834 { 835 struct lpfc_sglq *sglq; 836 837 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 838 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 839 return sglq; 840 } 841 842 /** 843 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 844 * @phba: Pointer to HBA context object. 845 * @xritag: XRI value. 846 * 847 * This function returns the sglq pointer from the array of acive 848 * sglq's. The xritag that is passed in is used to index into the 849 * array. Before the xritag can be used it needs to be adjusted 850 * by subtracting the xribase. 851 * 852 * Returns sglq ponter = success, NULL = Failure. 853 **/ 854 struct lpfc_sglq * 855 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 856 { 857 struct lpfc_sglq *sglq; 858 859 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 860 return sglq; 861 } 862 863 /** 864 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 865 * @phba: Pointer to HBA context object. 866 * @xritag: xri used in this exchange. 867 * @rrq: The RRQ to be cleared. 868 * 869 **/ 870 void 871 lpfc_clr_rrq_active(struct lpfc_hba *phba, 872 uint16_t xritag, 873 struct lpfc_node_rrq *rrq) 874 { 875 struct lpfc_nodelist *ndlp = NULL; 876 877 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 878 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 879 880 /* The target DID could have been swapped (cable swap) 881 * we should use the ndlp from the findnode if it is 882 * available. 883 */ 884 if ((!ndlp) && rrq->ndlp) 885 ndlp = rrq->ndlp; 886 887 if (!ndlp) 888 goto out; 889 890 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 891 rrq->send_rrq = 0; 892 rrq->xritag = 0; 893 rrq->rrq_stop_time = 0; 894 } 895 out: 896 mempool_free(rrq, phba->rrq_pool); 897 } 898 899 /** 900 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 901 * @phba: Pointer to HBA context object. 902 * 903 * This function is called with hbalock held. This function 904 * Checks if stop_time (ratov from setting rrq active) has 905 * been reached, if it has and the send_rrq flag is set then 906 * it will call lpfc_send_rrq. If the send_rrq flag is not set 907 * then it will just call the routine to clear the rrq and 908 * free the rrq resource. 909 * The timer is set to the next rrq that is going to expire before 910 * leaving the routine. 911 * 912 **/ 913 void 914 lpfc_handle_rrq_active(struct lpfc_hba *phba) 915 { 916 struct lpfc_node_rrq *rrq; 917 struct lpfc_node_rrq *nextrrq; 918 unsigned long next_time; 919 unsigned long iflags; 920 LIST_HEAD(send_rrq); 921 922 spin_lock_irqsave(&phba->hbalock, iflags); 923 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 924 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 925 list_for_each_entry_safe(rrq, nextrrq, 926 &phba->active_rrq_list, list) { 927 if (time_after(jiffies, rrq->rrq_stop_time)) 928 list_move(&rrq->list, &send_rrq); 929 else if (time_before(rrq->rrq_stop_time, next_time)) 930 next_time = rrq->rrq_stop_time; 931 } 932 spin_unlock_irqrestore(&phba->hbalock, iflags); 933 if ((!list_empty(&phba->active_rrq_list)) && 934 (!(phba->pport->load_flag & FC_UNLOADING))) 935 mod_timer(&phba->rrq_tmr, next_time); 936 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 937 list_del(&rrq->list); 938 if (!rrq->send_rrq) { 939 /* this call will free the rrq */ 940 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 941 } else if (lpfc_send_rrq(phba, rrq)) { 942 /* if we send the rrq then the completion handler 943 * will clear the bit in the xribitmap. 944 */ 945 lpfc_clr_rrq_active(phba, rrq->xritag, 946 rrq); 947 } 948 } 949 } 950 951 /** 952 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 953 * @vport: Pointer to vport context object. 954 * @xri: The xri used in the exchange. 955 * @did: The targets DID for this exchange. 956 * 957 * returns NULL = rrq not found in the phba->active_rrq_list. 958 * rrq = rrq for this xri and target. 959 **/ 960 struct lpfc_node_rrq * 961 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 962 { 963 struct lpfc_hba *phba = vport->phba; 964 struct lpfc_node_rrq *rrq; 965 struct lpfc_node_rrq *nextrrq; 966 unsigned long iflags; 967 968 if (phba->sli_rev != LPFC_SLI_REV4) 969 return NULL; 970 spin_lock_irqsave(&phba->hbalock, iflags); 971 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 972 if (rrq->vport == vport && rrq->xritag == xri && 973 rrq->nlp_DID == did){ 974 list_del(&rrq->list); 975 spin_unlock_irqrestore(&phba->hbalock, iflags); 976 return rrq; 977 } 978 } 979 spin_unlock_irqrestore(&phba->hbalock, iflags); 980 return NULL; 981 } 982 983 /** 984 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 985 * @vport: Pointer to vport context object. 986 * @ndlp: Pointer to the lpfc_node_list structure. 987 * If ndlp is NULL Remove all active RRQs for this vport from the 988 * phba->active_rrq_list and clear the rrq. 989 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 990 **/ 991 void 992 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 993 994 { 995 struct lpfc_hba *phba = vport->phba; 996 struct lpfc_node_rrq *rrq; 997 struct lpfc_node_rrq *nextrrq; 998 unsigned long iflags; 999 LIST_HEAD(rrq_list); 1000 1001 if (phba->sli_rev != LPFC_SLI_REV4) 1002 return; 1003 if (!ndlp) { 1004 lpfc_sli4_vport_delete_els_xri_aborted(vport); 1005 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 1006 } 1007 spin_lock_irqsave(&phba->hbalock, iflags); 1008 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 1009 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 1010 list_move(&rrq->list, &rrq_list); 1011 spin_unlock_irqrestore(&phba->hbalock, iflags); 1012 1013 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 1014 list_del(&rrq->list); 1015 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1016 } 1017 } 1018 1019 /** 1020 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 1021 * @phba: Pointer to HBA context object. 1022 * @ndlp: Targets nodelist pointer for this exchange. 1023 * @xritag the xri in the bitmap to test. 1024 * 1025 * This function returns: 1026 * 0 = rrq not active for this xri 1027 * 1 = rrq is valid for this xri. 1028 **/ 1029 int 1030 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1031 uint16_t xritag) 1032 { 1033 if (!ndlp) 1034 return 0; 1035 if (!ndlp->active_rrqs_xri_bitmap) 1036 return 0; 1037 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1038 return 1; 1039 else 1040 return 0; 1041 } 1042 1043 /** 1044 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 1045 * @phba: Pointer to HBA context object. 1046 * @ndlp: nodelist pointer for this target. 1047 * @xritag: xri used in this exchange. 1048 * @rxid: Remote Exchange ID. 1049 * @send_rrq: Flag used to determine if we should send rrq els cmd. 1050 * 1051 * This function takes the hbalock. 1052 * The active bit is always set in the active rrq xri_bitmap even 1053 * if there is no slot avaiable for the other rrq information. 1054 * 1055 * returns 0 rrq actived for this xri 1056 * < 0 No memory or invalid ndlp. 1057 **/ 1058 int 1059 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1060 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 1061 { 1062 unsigned long iflags; 1063 struct lpfc_node_rrq *rrq; 1064 int empty; 1065 1066 if (!ndlp) 1067 return -EINVAL; 1068 1069 if (!phba->cfg_enable_rrq) 1070 return -EINVAL; 1071 1072 spin_lock_irqsave(&phba->hbalock, iflags); 1073 if (phba->pport->load_flag & FC_UNLOADING) { 1074 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1075 goto out; 1076 } 1077 1078 /* 1079 * set the active bit even if there is no mem available. 1080 */ 1081 if (NLP_CHK_FREE_REQ(ndlp)) 1082 goto out; 1083 1084 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 1085 goto out; 1086 1087 if (!ndlp->active_rrqs_xri_bitmap) 1088 goto out; 1089 1090 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1091 goto out; 1092 1093 spin_unlock_irqrestore(&phba->hbalock, iflags); 1094 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 1095 if (!rrq) { 1096 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1097 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 1098 " DID:0x%x Send:%d\n", 1099 xritag, rxid, ndlp->nlp_DID, send_rrq); 1100 return -EINVAL; 1101 } 1102 if (phba->cfg_enable_rrq == 1) 1103 rrq->send_rrq = send_rrq; 1104 else 1105 rrq->send_rrq = 0; 1106 rrq->xritag = xritag; 1107 rrq->rrq_stop_time = jiffies + 1108 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 1109 rrq->ndlp = ndlp; 1110 rrq->nlp_DID = ndlp->nlp_DID; 1111 rrq->vport = ndlp->vport; 1112 rrq->rxid = rxid; 1113 spin_lock_irqsave(&phba->hbalock, iflags); 1114 empty = list_empty(&phba->active_rrq_list); 1115 list_add_tail(&rrq->list, &phba->active_rrq_list); 1116 phba->hba_flag |= HBA_RRQ_ACTIVE; 1117 if (empty) 1118 lpfc_worker_wake_up(phba); 1119 spin_unlock_irqrestore(&phba->hbalock, iflags); 1120 return 0; 1121 out: 1122 spin_unlock_irqrestore(&phba->hbalock, iflags); 1123 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1124 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 1125 " DID:0x%x Send:%d\n", 1126 xritag, rxid, ndlp->nlp_DID, send_rrq); 1127 return -EINVAL; 1128 } 1129 1130 /** 1131 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool 1132 * @phba: Pointer to HBA context object. 1133 * @piocb: Pointer to the iocbq. 1134 * 1135 * The driver calls this function with either the nvme ls ring lock 1136 * or the fc els ring lock held depending on the iocb usage. This function 1137 * gets a new driver sglq object from the sglq list. If the list is not empty 1138 * then it is successful, it returns pointer to the newly allocated sglq 1139 * object else it returns NULL. 1140 **/ 1141 static struct lpfc_sglq * 1142 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1143 { 1144 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; 1145 struct lpfc_sglq *sglq = NULL; 1146 struct lpfc_sglq *start_sglq = NULL; 1147 struct lpfc_io_buf *lpfc_cmd; 1148 struct lpfc_nodelist *ndlp; 1149 struct lpfc_sli_ring *pring = NULL; 1150 int found = 0; 1151 1152 if (piocbq->iocb_flag & LPFC_IO_NVME_LS) 1153 pring = phba->sli4_hba.nvmels_wq->pring; 1154 else 1155 pring = lpfc_phba_elsring(phba); 1156 1157 lockdep_assert_held(&pring->ring_lock); 1158 1159 if (piocbq->iocb_flag & LPFC_IO_FCP) { 1160 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1; 1161 ndlp = lpfc_cmd->rdata->pnode; 1162 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 1163 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { 1164 ndlp = piocbq->context_un.ndlp; 1165 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) { 1166 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK) 1167 ndlp = NULL; 1168 else 1169 ndlp = piocbq->context_un.ndlp; 1170 } else { 1171 ndlp = piocbq->context1; 1172 } 1173 1174 spin_lock(&phba->sli4_hba.sgl_list_lock); 1175 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); 1176 start_sglq = sglq; 1177 while (!found) { 1178 if (!sglq) 1179 break; 1180 if (ndlp && ndlp->active_rrqs_xri_bitmap && 1181 test_bit(sglq->sli4_lxritag, 1182 ndlp->active_rrqs_xri_bitmap)) { 1183 /* This xri has an rrq outstanding for this DID. 1184 * put it back in the list and get another xri. 1185 */ 1186 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1187 sglq = NULL; 1188 list_remove_head(lpfc_els_sgl_list, sglq, 1189 struct lpfc_sglq, list); 1190 if (sglq == start_sglq) { 1191 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1192 sglq = NULL; 1193 break; 1194 } else 1195 continue; 1196 } 1197 sglq->ndlp = ndlp; 1198 found = 1; 1199 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1200 sglq->state = SGL_ALLOCATED; 1201 } 1202 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1203 return sglq; 1204 } 1205 1206 /** 1207 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool 1208 * @phba: Pointer to HBA context object. 1209 * @piocb: Pointer to the iocbq. 1210 * 1211 * This function is called with the sgl_list lock held. This function 1212 * gets a new driver sglq object from the sglq list. If the 1213 * list is not empty then it is successful, it returns pointer to the newly 1214 * allocated sglq object else it returns NULL. 1215 **/ 1216 struct lpfc_sglq * 1217 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1218 { 1219 struct list_head *lpfc_nvmet_sgl_list; 1220 struct lpfc_sglq *sglq = NULL; 1221 1222 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; 1223 1224 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); 1225 1226 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); 1227 if (!sglq) 1228 return NULL; 1229 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1230 sglq->state = SGL_ALLOCATED; 1231 return sglq; 1232 } 1233 1234 /** 1235 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 1236 * @phba: Pointer to HBA context object. 1237 * 1238 * This function is called with no lock held. This function 1239 * allocates a new driver iocb object from the iocb pool. If the 1240 * allocation is successful, it returns pointer to the newly 1241 * allocated iocb object else it returns NULL. 1242 **/ 1243 struct lpfc_iocbq * 1244 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 1245 { 1246 struct lpfc_iocbq * iocbq = NULL; 1247 unsigned long iflags; 1248 1249 spin_lock_irqsave(&phba->hbalock, iflags); 1250 iocbq = __lpfc_sli_get_iocbq(phba); 1251 spin_unlock_irqrestore(&phba->hbalock, iflags); 1252 return iocbq; 1253 } 1254 1255 /** 1256 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 1257 * @phba: Pointer to HBA context object. 1258 * @iocbq: Pointer to driver iocb object. 1259 * 1260 * This function is called with hbalock held to release driver 1261 * iocb object to the iocb pool. The iotag in the iocb object 1262 * does not change for each use of the iocb object. This function 1263 * clears all other fields of the iocb object when it is freed. 1264 * The sqlq structure that holds the xritag and phys and virtual 1265 * mappings for the scatter gather list is retrieved from the 1266 * active array of sglq. The get of the sglq pointer also clears 1267 * the entry in the array. If the status of the IO indiactes that 1268 * this IO was aborted then the sglq entry it put on the 1269 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1270 * IO has good status or fails for any other reason then the sglq 1271 * entry is added to the free list (lpfc_els_sgl_list). 1272 **/ 1273 static void 1274 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1275 { 1276 struct lpfc_sglq *sglq; 1277 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1278 unsigned long iflag = 0; 1279 struct lpfc_sli_ring *pring; 1280 1281 lockdep_assert_held(&phba->hbalock); 1282 1283 if (iocbq->sli4_xritag == NO_XRI) 1284 sglq = NULL; 1285 else 1286 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1287 1288 1289 if (sglq) { 1290 if (iocbq->iocb_flag & LPFC_IO_NVMET) { 1291 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1292 iflag); 1293 sglq->state = SGL_FREED; 1294 sglq->ndlp = NULL; 1295 list_add_tail(&sglq->list, 1296 &phba->sli4_hba.lpfc_nvmet_sgl_list); 1297 spin_unlock_irqrestore( 1298 &phba->sli4_hba.sgl_list_lock, iflag); 1299 goto out; 1300 } 1301 1302 pring = phba->sli4_hba.els_wq->pring; 1303 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1304 (sglq->state != SGL_XRI_ABORTED)) { 1305 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1306 iflag); 1307 list_add(&sglq->list, 1308 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1309 spin_unlock_irqrestore( 1310 &phba->sli4_hba.sgl_list_lock, iflag); 1311 } else { 1312 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1313 iflag); 1314 sglq->state = SGL_FREED; 1315 sglq->ndlp = NULL; 1316 list_add_tail(&sglq->list, 1317 &phba->sli4_hba.lpfc_els_sgl_list); 1318 spin_unlock_irqrestore( 1319 &phba->sli4_hba.sgl_list_lock, iflag); 1320 1321 /* Check if TXQ queue needs to be serviced */ 1322 if (!list_empty(&pring->txq)) 1323 lpfc_worker_wake_up(phba); 1324 } 1325 } 1326 1327 out: 1328 /* 1329 * Clean all volatile data fields, preserve iotag and node struct. 1330 */ 1331 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1332 iocbq->sli4_lxritag = NO_XRI; 1333 iocbq->sli4_xritag = NO_XRI; 1334 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | 1335 LPFC_IO_NVME_LS); 1336 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1337 } 1338 1339 1340 /** 1341 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1342 * @phba: Pointer to HBA context object. 1343 * @iocbq: Pointer to driver iocb object. 1344 * 1345 * This function is called with hbalock held to release driver 1346 * iocb object to the iocb pool. The iotag in the iocb object 1347 * does not change for each use of the iocb object. This function 1348 * clears all other fields of the iocb object when it is freed. 1349 **/ 1350 static void 1351 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1352 { 1353 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1354 1355 lockdep_assert_held(&phba->hbalock); 1356 1357 /* 1358 * Clean all volatile data fields, preserve iotag and node struct. 1359 */ 1360 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1361 iocbq->sli4_xritag = NO_XRI; 1362 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1363 } 1364 1365 /** 1366 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1367 * @phba: Pointer to HBA context object. 1368 * @iocbq: Pointer to driver iocb object. 1369 * 1370 * This function is called with hbalock held to release driver 1371 * iocb object to the iocb pool. The iotag in the iocb object 1372 * does not change for each use of the iocb object. This function 1373 * clears all other fields of the iocb object when it is freed. 1374 **/ 1375 static void 1376 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1377 { 1378 lockdep_assert_held(&phba->hbalock); 1379 1380 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1381 phba->iocb_cnt--; 1382 } 1383 1384 /** 1385 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1386 * @phba: Pointer to HBA context object. 1387 * @iocbq: Pointer to driver iocb object. 1388 * 1389 * This function is called with no lock held to release the iocb to 1390 * iocb pool. 1391 **/ 1392 void 1393 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1394 { 1395 unsigned long iflags; 1396 1397 /* 1398 * Clean all volatile data fields, preserve iotag and node struct. 1399 */ 1400 spin_lock_irqsave(&phba->hbalock, iflags); 1401 __lpfc_sli_release_iocbq(phba, iocbq); 1402 spin_unlock_irqrestore(&phba->hbalock, iflags); 1403 } 1404 1405 /** 1406 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1407 * @phba: Pointer to HBA context object. 1408 * @iocblist: List of IOCBs. 1409 * @ulpstatus: ULP status in IOCB command field. 1410 * @ulpWord4: ULP word-4 in IOCB command field. 1411 * 1412 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1413 * on the list by invoking the complete callback function associated with the 1414 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1415 * fields. 1416 **/ 1417 void 1418 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1419 uint32_t ulpstatus, uint32_t ulpWord4) 1420 { 1421 struct lpfc_iocbq *piocb; 1422 1423 while (!list_empty(iocblist)) { 1424 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1425 if (!piocb->iocb_cmpl) { 1426 if (piocb->iocb_flag & LPFC_IO_NVME) 1427 lpfc_nvme_cancel_iocb(phba, piocb); 1428 else 1429 lpfc_sli_release_iocbq(phba, piocb); 1430 } else { 1431 piocb->iocb.ulpStatus = ulpstatus; 1432 piocb->iocb.un.ulpWord[4] = ulpWord4; 1433 (piocb->iocb_cmpl) (phba, piocb, piocb); 1434 } 1435 } 1436 return; 1437 } 1438 1439 /** 1440 * lpfc_sli_iocb_cmd_type - Get the iocb type 1441 * @iocb_cmnd: iocb command code. 1442 * 1443 * This function is called by ring event handler function to get the iocb type. 1444 * This function translates the iocb command to an iocb command type used to 1445 * decide the final disposition of each completed IOCB. 1446 * The function returns 1447 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1448 * LPFC_SOL_IOCB if it is a solicited iocb completion 1449 * LPFC_ABORT_IOCB if it is an abort iocb 1450 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1451 * 1452 * The caller is not required to hold any lock. 1453 **/ 1454 static lpfc_iocb_type 1455 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1456 { 1457 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1458 1459 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1460 return 0; 1461 1462 switch (iocb_cmnd) { 1463 case CMD_XMIT_SEQUENCE_CR: 1464 case CMD_XMIT_SEQUENCE_CX: 1465 case CMD_XMIT_BCAST_CN: 1466 case CMD_XMIT_BCAST_CX: 1467 case CMD_ELS_REQUEST_CR: 1468 case CMD_ELS_REQUEST_CX: 1469 case CMD_CREATE_XRI_CR: 1470 case CMD_CREATE_XRI_CX: 1471 case CMD_GET_RPI_CN: 1472 case CMD_XMIT_ELS_RSP_CX: 1473 case CMD_GET_RPI_CR: 1474 case CMD_FCP_IWRITE_CR: 1475 case CMD_FCP_IWRITE_CX: 1476 case CMD_FCP_IREAD_CR: 1477 case CMD_FCP_IREAD_CX: 1478 case CMD_FCP_ICMND_CR: 1479 case CMD_FCP_ICMND_CX: 1480 case CMD_FCP_TSEND_CX: 1481 case CMD_FCP_TRSP_CX: 1482 case CMD_FCP_TRECEIVE_CX: 1483 case CMD_FCP_AUTO_TRSP_CX: 1484 case CMD_ADAPTER_MSG: 1485 case CMD_ADAPTER_DUMP: 1486 case CMD_XMIT_SEQUENCE64_CR: 1487 case CMD_XMIT_SEQUENCE64_CX: 1488 case CMD_XMIT_BCAST64_CN: 1489 case CMD_XMIT_BCAST64_CX: 1490 case CMD_ELS_REQUEST64_CR: 1491 case CMD_ELS_REQUEST64_CX: 1492 case CMD_FCP_IWRITE64_CR: 1493 case CMD_FCP_IWRITE64_CX: 1494 case CMD_FCP_IREAD64_CR: 1495 case CMD_FCP_IREAD64_CX: 1496 case CMD_FCP_ICMND64_CR: 1497 case CMD_FCP_ICMND64_CX: 1498 case CMD_FCP_TSEND64_CX: 1499 case CMD_FCP_TRSP64_CX: 1500 case CMD_FCP_TRECEIVE64_CX: 1501 case CMD_GEN_REQUEST64_CR: 1502 case CMD_GEN_REQUEST64_CX: 1503 case CMD_XMIT_ELS_RSP64_CX: 1504 case DSSCMD_IWRITE64_CR: 1505 case DSSCMD_IWRITE64_CX: 1506 case DSSCMD_IREAD64_CR: 1507 case DSSCMD_IREAD64_CX: 1508 type = LPFC_SOL_IOCB; 1509 break; 1510 case CMD_ABORT_XRI_CN: 1511 case CMD_ABORT_XRI_CX: 1512 case CMD_CLOSE_XRI_CN: 1513 case CMD_CLOSE_XRI_CX: 1514 case CMD_XRI_ABORTED_CX: 1515 case CMD_ABORT_MXRI64_CN: 1516 case CMD_XMIT_BLS_RSP64_CX: 1517 type = LPFC_ABORT_IOCB; 1518 break; 1519 case CMD_RCV_SEQUENCE_CX: 1520 case CMD_RCV_ELS_REQ_CX: 1521 case CMD_RCV_SEQUENCE64_CX: 1522 case CMD_RCV_ELS_REQ64_CX: 1523 case CMD_ASYNC_STATUS: 1524 case CMD_IOCB_RCV_SEQ64_CX: 1525 case CMD_IOCB_RCV_ELS64_CX: 1526 case CMD_IOCB_RCV_CONT64_CX: 1527 case CMD_IOCB_RET_XRI64_CX: 1528 type = LPFC_UNSOL_IOCB; 1529 break; 1530 case CMD_IOCB_XMIT_MSEQ64_CR: 1531 case CMD_IOCB_XMIT_MSEQ64_CX: 1532 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1533 case CMD_IOCB_RCV_ELS_LIST64_CX: 1534 case CMD_IOCB_CLOSE_EXTENDED_CN: 1535 case CMD_IOCB_ABORT_EXTENDED_CN: 1536 case CMD_IOCB_RET_HBQE64_CN: 1537 case CMD_IOCB_FCP_IBIDIR64_CR: 1538 case CMD_IOCB_FCP_IBIDIR64_CX: 1539 case CMD_IOCB_FCP_ITASKMGT64_CX: 1540 case CMD_IOCB_LOGENTRY_CN: 1541 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1542 printk("%s - Unhandled SLI-3 Command x%x\n", 1543 __func__, iocb_cmnd); 1544 type = LPFC_UNKNOWN_IOCB; 1545 break; 1546 default: 1547 type = LPFC_UNKNOWN_IOCB; 1548 break; 1549 } 1550 1551 return type; 1552 } 1553 1554 /** 1555 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1556 * @phba: Pointer to HBA context object. 1557 * 1558 * This function is called from SLI initialization code 1559 * to configure every ring of the HBA's SLI interface. The 1560 * caller is not required to hold any lock. This function issues 1561 * a config_ring mailbox command for each ring. 1562 * This function returns zero if successful else returns a negative 1563 * error code. 1564 **/ 1565 static int 1566 lpfc_sli_ring_map(struct lpfc_hba *phba) 1567 { 1568 struct lpfc_sli *psli = &phba->sli; 1569 LPFC_MBOXQ_t *pmb; 1570 MAILBOX_t *pmbox; 1571 int i, rc, ret = 0; 1572 1573 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1574 if (!pmb) 1575 return -ENOMEM; 1576 pmbox = &pmb->u.mb; 1577 phba->link_state = LPFC_INIT_MBX_CMDS; 1578 for (i = 0; i < psli->num_rings; i++) { 1579 lpfc_config_ring(phba, i, pmb); 1580 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1581 if (rc != MBX_SUCCESS) { 1582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1583 "0446 Adapter failed to init (%d), " 1584 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1585 "ring %d\n", 1586 rc, pmbox->mbxCommand, 1587 pmbox->mbxStatus, i); 1588 phba->link_state = LPFC_HBA_ERROR; 1589 ret = -ENXIO; 1590 break; 1591 } 1592 } 1593 mempool_free(pmb, phba->mbox_mem_pool); 1594 return ret; 1595 } 1596 1597 /** 1598 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1599 * @phba: Pointer to HBA context object. 1600 * @pring: Pointer to driver SLI ring object. 1601 * @piocb: Pointer to the driver iocb object. 1602 * 1603 * The driver calls this function with the hbalock held for SLI3 ports or 1604 * the ring lock held for SLI4 ports. The function adds the 1605 * new iocb to txcmplq of the given ring. This function always returns 1606 * 0. If this function is called for ELS ring, this function checks if 1607 * there is a vport associated with the ELS command. This function also 1608 * starts els_tmofunc timer if this is an ELS command. 1609 **/ 1610 static int 1611 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1612 struct lpfc_iocbq *piocb) 1613 { 1614 if (phba->sli_rev == LPFC_SLI_REV4) 1615 lockdep_assert_held(&pring->ring_lock); 1616 else 1617 lockdep_assert_held(&phba->hbalock); 1618 1619 BUG_ON(!piocb); 1620 1621 list_add_tail(&piocb->list, &pring->txcmplq); 1622 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1623 pring->txcmplq_cnt++; 1624 1625 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1626 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1627 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1628 BUG_ON(!piocb->vport); 1629 if (!(piocb->vport->load_flag & FC_UNLOADING)) 1630 mod_timer(&piocb->vport->els_tmofunc, 1631 jiffies + 1632 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1633 } 1634 1635 return 0; 1636 } 1637 1638 /** 1639 * lpfc_sli_ringtx_get - Get first element of the txq 1640 * @phba: Pointer to HBA context object. 1641 * @pring: Pointer to driver SLI ring object. 1642 * 1643 * This function is called with hbalock held to get next 1644 * iocb in txq of the given ring. If there is any iocb in 1645 * the txq, the function returns first iocb in the list after 1646 * removing the iocb from the list, else it returns NULL. 1647 **/ 1648 struct lpfc_iocbq * 1649 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1650 { 1651 struct lpfc_iocbq *cmd_iocb; 1652 1653 lockdep_assert_held(&phba->hbalock); 1654 1655 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1656 return cmd_iocb; 1657 } 1658 1659 /** 1660 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1661 * @phba: Pointer to HBA context object. 1662 * @pring: Pointer to driver SLI ring object. 1663 * 1664 * This function is called with hbalock held and the caller must post the 1665 * iocb without releasing the lock. If the caller releases the lock, 1666 * iocb slot returned by the function is not guaranteed to be available. 1667 * The function returns pointer to the next available iocb slot if there 1668 * is available slot in the ring, else it returns NULL. 1669 * If the get index of the ring is ahead of the put index, the function 1670 * will post an error attention event to the worker thread to take the 1671 * HBA to offline state. 1672 **/ 1673 static IOCB_t * 1674 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1675 { 1676 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1677 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 1678 1679 lockdep_assert_held(&phba->hbalock); 1680 1681 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 1682 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 1683 pring->sli.sli3.next_cmdidx = 0; 1684 1685 if (unlikely(pring->sli.sli3.local_getidx == 1686 pring->sli.sli3.next_cmdidx)) { 1687 1688 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1689 1690 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1691 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1692 "0315 Ring %d issue: portCmdGet %d " 1693 "is bigger than cmd ring %d\n", 1694 pring->ringno, 1695 pring->sli.sli3.local_getidx, 1696 max_cmd_idx); 1697 1698 phba->link_state = LPFC_HBA_ERROR; 1699 /* 1700 * All error attention handlers are posted to 1701 * worker thread 1702 */ 1703 phba->work_ha |= HA_ERATT; 1704 phba->work_hs = HS_FFER3; 1705 1706 lpfc_worker_wake_up(phba); 1707 1708 return NULL; 1709 } 1710 1711 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 1712 return NULL; 1713 } 1714 1715 return lpfc_cmd_iocb(phba, pring); 1716 } 1717 1718 /** 1719 * lpfc_sli_next_iotag - Get an iotag for the iocb 1720 * @phba: Pointer to HBA context object. 1721 * @iocbq: Pointer to driver iocb object. 1722 * 1723 * This function gets an iotag for the iocb. If there is no unused iotag and 1724 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1725 * array and assigns a new iotag. 1726 * The function returns the allocated iotag if successful, else returns zero. 1727 * Zero is not a valid iotag. 1728 * The caller is not required to hold any lock. 1729 **/ 1730 uint16_t 1731 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1732 { 1733 struct lpfc_iocbq **new_arr; 1734 struct lpfc_iocbq **old_arr; 1735 size_t new_len; 1736 struct lpfc_sli *psli = &phba->sli; 1737 uint16_t iotag; 1738 1739 spin_lock_irq(&phba->hbalock); 1740 iotag = psli->last_iotag; 1741 if(++iotag < psli->iocbq_lookup_len) { 1742 psli->last_iotag = iotag; 1743 psli->iocbq_lookup[iotag] = iocbq; 1744 spin_unlock_irq(&phba->hbalock); 1745 iocbq->iotag = iotag; 1746 return iotag; 1747 } else if (psli->iocbq_lookup_len < (0xffff 1748 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1749 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1750 spin_unlock_irq(&phba->hbalock); 1751 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *), 1752 GFP_KERNEL); 1753 if (new_arr) { 1754 spin_lock_irq(&phba->hbalock); 1755 old_arr = psli->iocbq_lookup; 1756 if (new_len <= psli->iocbq_lookup_len) { 1757 /* highly unprobable case */ 1758 kfree(new_arr); 1759 iotag = psli->last_iotag; 1760 if(++iotag < psli->iocbq_lookup_len) { 1761 psli->last_iotag = iotag; 1762 psli->iocbq_lookup[iotag] = iocbq; 1763 spin_unlock_irq(&phba->hbalock); 1764 iocbq->iotag = iotag; 1765 return iotag; 1766 } 1767 spin_unlock_irq(&phba->hbalock); 1768 return 0; 1769 } 1770 if (psli->iocbq_lookup) 1771 memcpy(new_arr, old_arr, 1772 ((psli->last_iotag + 1) * 1773 sizeof (struct lpfc_iocbq *))); 1774 psli->iocbq_lookup = new_arr; 1775 psli->iocbq_lookup_len = new_len; 1776 psli->last_iotag = iotag; 1777 psli->iocbq_lookup[iotag] = iocbq; 1778 spin_unlock_irq(&phba->hbalock); 1779 iocbq->iotag = iotag; 1780 kfree(old_arr); 1781 return iotag; 1782 } 1783 } else 1784 spin_unlock_irq(&phba->hbalock); 1785 1786 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1787 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1788 psli->last_iotag); 1789 1790 return 0; 1791 } 1792 1793 /** 1794 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1795 * @phba: Pointer to HBA context object. 1796 * @pring: Pointer to driver SLI ring object. 1797 * @iocb: Pointer to iocb slot in the ring. 1798 * @nextiocb: Pointer to driver iocb object which need to be 1799 * posted to firmware. 1800 * 1801 * This function is called with hbalock held to post a new iocb to 1802 * the firmware. This function copies the new iocb to ring iocb slot and 1803 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1804 * a completion call back for this iocb else the function will free the 1805 * iocb object. 1806 **/ 1807 static void 1808 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1809 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1810 { 1811 lockdep_assert_held(&phba->hbalock); 1812 /* 1813 * Set up an iotag 1814 */ 1815 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1816 1817 1818 if (pring->ringno == LPFC_ELS_RING) { 1819 lpfc_debugfs_slow_ring_trc(phba, 1820 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1821 *(((uint32_t *) &nextiocb->iocb) + 4), 1822 *(((uint32_t *) &nextiocb->iocb) + 6), 1823 *(((uint32_t *) &nextiocb->iocb) + 7)); 1824 } 1825 1826 /* 1827 * Issue iocb command to adapter 1828 */ 1829 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1830 wmb(); 1831 pring->stats.iocb_cmd++; 1832 1833 /* 1834 * If there is no completion routine to call, we can release the 1835 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1836 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1837 */ 1838 if (nextiocb->iocb_cmpl) 1839 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1840 else 1841 __lpfc_sli_release_iocbq(phba, nextiocb); 1842 1843 /* 1844 * Let the HBA know what IOCB slot will be the next one the 1845 * driver will put a command into. 1846 */ 1847 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 1848 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1849 } 1850 1851 /** 1852 * lpfc_sli_update_full_ring - Update the chip attention register 1853 * @phba: Pointer to HBA context object. 1854 * @pring: Pointer to driver SLI ring object. 1855 * 1856 * The caller is not required to hold any lock for calling this function. 1857 * This function updates the chip attention bits for the ring to inform firmware 1858 * that there are pending work to be done for this ring and requests an 1859 * interrupt when there is space available in the ring. This function is 1860 * called when the driver is unable to post more iocbs to the ring due 1861 * to unavailability of space in the ring. 1862 **/ 1863 static void 1864 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1865 { 1866 int ringno = pring->ringno; 1867 1868 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1869 1870 wmb(); 1871 1872 /* 1873 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1874 * The HBA will tell us when an IOCB entry is available. 1875 */ 1876 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1877 readl(phba->CAregaddr); /* flush */ 1878 1879 pring->stats.iocb_cmd_full++; 1880 } 1881 1882 /** 1883 * lpfc_sli_update_ring - Update chip attention register 1884 * @phba: Pointer to HBA context object. 1885 * @pring: Pointer to driver SLI ring object. 1886 * 1887 * This function updates the chip attention register bit for the 1888 * given ring to inform HBA that there is more work to be done 1889 * in this ring. The caller is not required to hold any lock. 1890 **/ 1891 static void 1892 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1893 { 1894 int ringno = pring->ringno; 1895 1896 /* 1897 * Tell the HBA that there is work to do in this ring. 1898 */ 1899 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1900 wmb(); 1901 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1902 readl(phba->CAregaddr); /* flush */ 1903 } 1904 } 1905 1906 /** 1907 * lpfc_sli_resume_iocb - Process iocbs in the txq 1908 * @phba: Pointer to HBA context object. 1909 * @pring: Pointer to driver SLI ring object. 1910 * 1911 * This function is called with hbalock held to post pending iocbs 1912 * in the txq to the firmware. This function is called when driver 1913 * detects space available in the ring. 1914 **/ 1915 static void 1916 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1917 { 1918 IOCB_t *iocb; 1919 struct lpfc_iocbq *nextiocb; 1920 1921 lockdep_assert_held(&phba->hbalock); 1922 1923 /* 1924 * Check to see if: 1925 * (a) there is anything on the txq to send 1926 * (b) link is up 1927 * (c) link attention events can be processed (fcp ring only) 1928 * (d) IOCB processing is not blocked by the outstanding mbox command. 1929 */ 1930 1931 if (lpfc_is_link_up(phba) && 1932 (!list_empty(&pring->txq)) && 1933 (pring->ringno != LPFC_FCP_RING || 1934 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1935 1936 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1937 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1938 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1939 1940 if (iocb) 1941 lpfc_sli_update_ring(phba, pring); 1942 else 1943 lpfc_sli_update_full_ring(phba, pring); 1944 } 1945 1946 return; 1947 } 1948 1949 /** 1950 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1951 * @phba: Pointer to HBA context object. 1952 * @hbqno: HBQ number. 1953 * 1954 * This function is called with hbalock held to get the next 1955 * available slot for the given HBQ. If there is free slot 1956 * available for the HBQ it will return pointer to the next available 1957 * HBQ entry else it will return NULL. 1958 **/ 1959 static struct lpfc_hbq_entry * 1960 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1961 { 1962 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1963 1964 lockdep_assert_held(&phba->hbalock); 1965 1966 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1967 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1968 hbqp->next_hbqPutIdx = 0; 1969 1970 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1971 uint32_t raw_index = phba->hbq_get[hbqno]; 1972 uint32_t getidx = le32_to_cpu(raw_index); 1973 1974 hbqp->local_hbqGetIdx = getidx; 1975 1976 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1977 lpfc_printf_log(phba, KERN_ERR, 1978 LOG_SLI | LOG_VPORT, 1979 "1802 HBQ %d: local_hbqGetIdx " 1980 "%u is > than hbqp->entry_count %u\n", 1981 hbqno, hbqp->local_hbqGetIdx, 1982 hbqp->entry_count); 1983 1984 phba->link_state = LPFC_HBA_ERROR; 1985 return NULL; 1986 } 1987 1988 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1989 return NULL; 1990 } 1991 1992 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1993 hbqp->hbqPutIdx; 1994 } 1995 1996 /** 1997 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1998 * @phba: Pointer to HBA context object. 1999 * 2000 * This function is called with no lock held to free all the 2001 * hbq buffers while uninitializing the SLI interface. It also 2002 * frees the HBQ buffers returned by the firmware but not yet 2003 * processed by the upper layers. 2004 **/ 2005 void 2006 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 2007 { 2008 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 2009 struct hbq_dmabuf *hbq_buf; 2010 unsigned long flags; 2011 int i, hbq_count; 2012 2013 hbq_count = lpfc_sli_hbq_count(); 2014 /* Return all memory used by all HBQs */ 2015 spin_lock_irqsave(&phba->hbalock, flags); 2016 for (i = 0; i < hbq_count; ++i) { 2017 list_for_each_entry_safe(dmabuf, next_dmabuf, 2018 &phba->hbqs[i].hbq_buffer_list, list) { 2019 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 2020 list_del(&hbq_buf->dbuf.list); 2021 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 2022 } 2023 phba->hbqs[i].buffer_count = 0; 2024 } 2025 2026 /* Mark the HBQs not in use */ 2027 phba->hbq_in_use = 0; 2028 spin_unlock_irqrestore(&phba->hbalock, flags); 2029 } 2030 2031 /** 2032 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 2033 * @phba: Pointer to HBA context object. 2034 * @hbqno: HBQ number. 2035 * @hbq_buf: Pointer to HBQ buffer. 2036 * 2037 * This function is called with the hbalock held to post a 2038 * hbq buffer to the firmware. If the function finds an empty 2039 * slot in the HBQ, it will post the buffer. The function will return 2040 * pointer to the hbq entry if it successfully post the buffer 2041 * else it will return NULL. 2042 **/ 2043 static int 2044 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 2045 struct hbq_dmabuf *hbq_buf) 2046 { 2047 lockdep_assert_held(&phba->hbalock); 2048 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 2049 } 2050 2051 /** 2052 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 2053 * @phba: Pointer to HBA context object. 2054 * @hbqno: HBQ number. 2055 * @hbq_buf: Pointer to HBQ buffer. 2056 * 2057 * This function is called with the hbalock held to post a hbq buffer to the 2058 * firmware. If the function finds an empty slot in the HBQ, it will post the 2059 * buffer and place it on the hbq_buffer_list. The function will return zero if 2060 * it successfully post the buffer else it will return an error. 2061 **/ 2062 static int 2063 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 2064 struct hbq_dmabuf *hbq_buf) 2065 { 2066 struct lpfc_hbq_entry *hbqe; 2067 dma_addr_t physaddr = hbq_buf->dbuf.phys; 2068 2069 lockdep_assert_held(&phba->hbalock); 2070 /* Get next HBQ entry slot to use */ 2071 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 2072 if (hbqe) { 2073 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 2074 2075 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 2076 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 2077 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; 2078 hbqe->bde.tus.f.bdeFlags = 0; 2079 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 2080 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 2081 /* Sync SLIM */ 2082 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 2083 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 2084 /* flush */ 2085 readl(phba->hbq_put + hbqno); 2086 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 2087 return 0; 2088 } else 2089 return -ENOMEM; 2090 } 2091 2092 /** 2093 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 2094 * @phba: Pointer to HBA context object. 2095 * @hbqno: HBQ number. 2096 * @hbq_buf: Pointer to HBQ buffer. 2097 * 2098 * This function is called with the hbalock held to post an RQE to the SLI4 2099 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 2100 * the hbq_buffer_list and return zero, otherwise it will return an error. 2101 **/ 2102 static int 2103 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 2104 struct hbq_dmabuf *hbq_buf) 2105 { 2106 int rc; 2107 struct lpfc_rqe hrqe; 2108 struct lpfc_rqe drqe; 2109 struct lpfc_queue *hrq; 2110 struct lpfc_queue *drq; 2111 2112 if (hbqno != LPFC_ELS_HBQ) 2113 return 1; 2114 hrq = phba->sli4_hba.hdr_rq; 2115 drq = phba->sli4_hba.dat_rq; 2116 2117 lockdep_assert_held(&phba->hbalock); 2118 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 2119 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 2120 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 2121 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 2122 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 2123 if (rc < 0) 2124 return rc; 2125 hbq_buf->tag = (rc | (hbqno << 16)); 2126 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 2127 return 0; 2128 } 2129 2130 /* HBQ for ELS and CT traffic. */ 2131 static struct lpfc_hbq_init lpfc_els_hbq = { 2132 .rn = 1, 2133 .entry_count = 256, 2134 .mask_count = 0, 2135 .profile = 0, 2136 .ring_mask = (1 << LPFC_ELS_RING), 2137 .buffer_count = 0, 2138 .init_count = 40, 2139 .add_count = 40, 2140 }; 2141 2142 /* Array of HBQs */ 2143 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 2144 &lpfc_els_hbq, 2145 }; 2146 2147 /** 2148 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 2149 * @phba: Pointer to HBA context object. 2150 * @hbqno: HBQ number. 2151 * @count: Number of HBQ buffers to be posted. 2152 * 2153 * This function is called with no lock held to post more hbq buffers to the 2154 * given HBQ. The function returns the number of HBQ buffers successfully 2155 * posted. 2156 **/ 2157 static int 2158 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 2159 { 2160 uint32_t i, posted = 0; 2161 unsigned long flags; 2162 struct hbq_dmabuf *hbq_buffer; 2163 LIST_HEAD(hbq_buf_list); 2164 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 2165 return 0; 2166 2167 if ((phba->hbqs[hbqno].buffer_count + count) > 2168 lpfc_hbq_defs[hbqno]->entry_count) 2169 count = lpfc_hbq_defs[hbqno]->entry_count - 2170 phba->hbqs[hbqno].buffer_count; 2171 if (!count) 2172 return 0; 2173 /* Allocate HBQ entries */ 2174 for (i = 0; i < count; i++) { 2175 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 2176 if (!hbq_buffer) 2177 break; 2178 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 2179 } 2180 /* Check whether HBQ is still in use */ 2181 spin_lock_irqsave(&phba->hbalock, flags); 2182 if (!phba->hbq_in_use) 2183 goto err; 2184 while (!list_empty(&hbq_buf_list)) { 2185 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2186 dbuf.list); 2187 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 2188 (hbqno << 16)); 2189 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 2190 phba->hbqs[hbqno].buffer_count++; 2191 posted++; 2192 } else 2193 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2194 } 2195 spin_unlock_irqrestore(&phba->hbalock, flags); 2196 return posted; 2197 err: 2198 spin_unlock_irqrestore(&phba->hbalock, flags); 2199 while (!list_empty(&hbq_buf_list)) { 2200 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2201 dbuf.list); 2202 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2203 } 2204 return 0; 2205 } 2206 2207 /** 2208 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 2209 * @phba: Pointer to HBA context object. 2210 * @qno: HBQ number. 2211 * 2212 * This function posts more buffers to the HBQ. This function 2213 * is called with no lock held. The function returns the number of HBQ entries 2214 * successfully allocated. 2215 **/ 2216 int 2217 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 2218 { 2219 if (phba->sli_rev == LPFC_SLI_REV4) 2220 return 0; 2221 else 2222 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2223 lpfc_hbq_defs[qno]->add_count); 2224 } 2225 2226 /** 2227 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 2228 * @phba: Pointer to HBA context object. 2229 * @qno: HBQ queue number. 2230 * 2231 * This function is called from SLI initialization code path with 2232 * no lock held to post initial HBQ buffers to firmware. The 2233 * function returns the number of HBQ entries successfully allocated. 2234 **/ 2235 static int 2236 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 2237 { 2238 if (phba->sli_rev == LPFC_SLI_REV4) 2239 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2240 lpfc_hbq_defs[qno]->entry_count); 2241 else 2242 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2243 lpfc_hbq_defs[qno]->init_count); 2244 } 2245 2246 /** 2247 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 2248 * @phba: Pointer to HBA context object. 2249 * @hbqno: HBQ number. 2250 * 2251 * This function removes the first hbq buffer on an hbq list and returns a 2252 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2253 **/ 2254 static struct hbq_dmabuf * 2255 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 2256 { 2257 struct lpfc_dmabuf *d_buf; 2258 2259 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 2260 if (!d_buf) 2261 return NULL; 2262 return container_of(d_buf, struct hbq_dmabuf, dbuf); 2263 } 2264 2265 /** 2266 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list 2267 * @phba: Pointer to HBA context object. 2268 * @hbqno: HBQ number. 2269 * 2270 * This function removes the first RQ buffer on an RQ buffer list and returns a 2271 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2272 **/ 2273 static struct rqb_dmabuf * 2274 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) 2275 { 2276 struct lpfc_dmabuf *h_buf; 2277 struct lpfc_rqb *rqbp; 2278 2279 rqbp = hrq->rqbp; 2280 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 2281 struct lpfc_dmabuf, list); 2282 if (!h_buf) 2283 return NULL; 2284 rqbp->buffer_count--; 2285 return container_of(h_buf, struct rqb_dmabuf, hbuf); 2286 } 2287 2288 /** 2289 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 2290 * @phba: Pointer to HBA context object. 2291 * @tag: Tag of the hbq buffer. 2292 * 2293 * This function searches for the hbq buffer associated with the given tag in 2294 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer 2295 * otherwise it returns NULL. 2296 **/ 2297 static struct hbq_dmabuf * 2298 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 2299 { 2300 struct lpfc_dmabuf *d_buf; 2301 struct hbq_dmabuf *hbq_buf; 2302 uint32_t hbqno; 2303 2304 hbqno = tag >> 16; 2305 if (hbqno >= LPFC_MAX_HBQS) 2306 return NULL; 2307 2308 spin_lock_irq(&phba->hbalock); 2309 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2310 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2311 if (hbq_buf->tag == tag) { 2312 spin_unlock_irq(&phba->hbalock); 2313 return hbq_buf; 2314 } 2315 } 2316 spin_unlock_irq(&phba->hbalock); 2317 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2318 "1803 Bad hbq tag. Data: x%x x%x\n", 2319 tag, phba->hbqs[tag >> 16].buffer_count); 2320 return NULL; 2321 } 2322 2323 /** 2324 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2325 * @phba: Pointer to HBA context object. 2326 * @hbq_buffer: Pointer to HBQ buffer. 2327 * 2328 * This function is called with hbalock. This function gives back 2329 * the hbq buffer to firmware. If the HBQ does not have space to 2330 * post the buffer, it will free the buffer. 2331 **/ 2332 void 2333 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2334 { 2335 uint32_t hbqno; 2336 2337 if (hbq_buffer) { 2338 hbqno = hbq_buffer->tag >> 16; 2339 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2340 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2341 } 2342 } 2343 2344 /** 2345 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2346 * @mbxCommand: mailbox command code. 2347 * 2348 * This function is called by the mailbox event handler function to verify 2349 * that the completed mailbox command is a legitimate mailbox command. If the 2350 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2351 * and the mailbox event handler will take the HBA offline. 2352 **/ 2353 static int 2354 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2355 { 2356 uint8_t ret; 2357 2358 switch (mbxCommand) { 2359 case MBX_LOAD_SM: 2360 case MBX_READ_NV: 2361 case MBX_WRITE_NV: 2362 case MBX_WRITE_VPARMS: 2363 case MBX_RUN_BIU_DIAG: 2364 case MBX_INIT_LINK: 2365 case MBX_DOWN_LINK: 2366 case MBX_CONFIG_LINK: 2367 case MBX_CONFIG_RING: 2368 case MBX_RESET_RING: 2369 case MBX_READ_CONFIG: 2370 case MBX_READ_RCONFIG: 2371 case MBX_READ_SPARM: 2372 case MBX_READ_STATUS: 2373 case MBX_READ_RPI: 2374 case MBX_READ_XRI: 2375 case MBX_READ_REV: 2376 case MBX_READ_LNK_STAT: 2377 case MBX_REG_LOGIN: 2378 case MBX_UNREG_LOGIN: 2379 case MBX_CLEAR_LA: 2380 case MBX_DUMP_MEMORY: 2381 case MBX_DUMP_CONTEXT: 2382 case MBX_RUN_DIAGS: 2383 case MBX_RESTART: 2384 case MBX_UPDATE_CFG: 2385 case MBX_DOWN_LOAD: 2386 case MBX_DEL_LD_ENTRY: 2387 case MBX_RUN_PROGRAM: 2388 case MBX_SET_MASK: 2389 case MBX_SET_VARIABLE: 2390 case MBX_UNREG_D_ID: 2391 case MBX_KILL_BOARD: 2392 case MBX_CONFIG_FARP: 2393 case MBX_BEACON: 2394 case MBX_LOAD_AREA: 2395 case MBX_RUN_BIU_DIAG64: 2396 case MBX_CONFIG_PORT: 2397 case MBX_READ_SPARM64: 2398 case MBX_READ_RPI64: 2399 case MBX_REG_LOGIN64: 2400 case MBX_READ_TOPOLOGY: 2401 case MBX_WRITE_WWN: 2402 case MBX_SET_DEBUG: 2403 case MBX_LOAD_EXP_ROM: 2404 case MBX_ASYNCEVT_ENABLE: 2405 case MBX_REG_VPI: 2406 case MBX_UNREG_VPI: 2407 case MBX_HEARTBEAT: 2408 case MBX_PORT_CAPABILITIES: 2409 case MBX_PORT_IOV_CONTROL: 2410 case MBX_SLI4_CONFIG: 2411 case MBX_SLI4_REQ_FTRS: 2412 case MBX_REG_FCFI: 2413 case MBX_UNREG_FCFI: 2414 case MBX_REG_VFI: 2415 case MBX_UNREG_VFI: 2416 case MBX_INIT_VPI: 2417 case MBX_INIT_VFI: 2418 case MBX_RESUME_RPI: 2419 case MBX_READ_EVENT_LOG_STATUS: 2420 case MBX_READ_EVENT_LOG: 2421 case MBX_SECURITY_MGMT: 2422 case MBX_AUTH_PORT: 2423 case MBX_ACCESS_VDATA: 2424 ret = mbxCommand; 2425 break; 2426 default: 2427 ret = MBX_SHUTDOWN; 2428 break; 2429 } 2430 return ret; 2431 } 2432 2433 /** 2434 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2435 * @phba: Pointer to HBA context object. 2436 * @pmboxq: Pointer to mailbox command. 2437 * 2438 * This is completion handler function for mailbox commands issued from 2439 * lpfc_sli_issue_mbox_wait function. This function is called by the 2440 * mailbox event handler function with no lock held. This function 2441 * will wake up thread waiting on the wait queue pointed by context1 2442 * of the mailbox. 2443 **/ 2444 void 2445 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2446 { 2447 unsigned long drvr_flag; 2448 struct completion *pmbox_done; 2449 2450 /* 2451 * If pmbox_done is empty, the driver thread gave up waiting and 2452 * continued running. 2453 */ 2454 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2455 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2456 pmbox_done = (struct completion *)pmboxq->context3; 2457 if (pmbox_done) 2458 complete(pmbox_done); 2459 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2460 return; 2461 } 2462 2463 static void 2464 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2465 { 2466 unsigned long iflags; 2467 2468 if (ndlp->nlp_flag & NLP_RELEASE_RPI) { 2469 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); 2470 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags); 2471 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 2472 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 2473 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags); 2474 } 2475 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2476 } 2477 2478 /** 2479 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2480 * @phba: Pointer to HBA context object. 2481 * @pmb: Pointer to mailbox object. 2482 * 2483 * This function is the default mailbox completion handler. It 2484 * frees the memory resources associated with the completed mailbox 2485 * command. If the completed command is a REG_LOGIN mailbox command, 2486 * this function will issue a UREG_LOGIN to re-claim the RPI. 2487 **/ 2488 void 2489 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2490 { 2491 struct lpfc_vport *vport = pmb->vport; 2492 struct lpfc_dmabuf *mp; 2493 struct lpfc_nodelist *ndlp; 2494 struct Scsi_Host *shost; 2495 uint16_t rpi, vpi; 2496 int rc; 2497 2498 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 2499 2500 if (mp) { 2501 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2502 kfree(mp); 2503 } 2504 2505 /* 2506 * If a REG_LOGIN succeeded after node is destroyed or node 2507 * is in re-discovery driver need to cleanup the RPI. 2508 */ 2509 if (!(phba->pport->load_flag & FC_UNLOADING) && 2510 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2511 !pmb->u.mb.mbxStatus) { 2512 rpi = pmb->u.mb.un.varWords[0]; 2513 vpi = pmb->u.mb.un.varRegLogin.vpi; 2514 lpfc_unreg_login(phba, vpi, rpi, pmb); 2515 pmb->vport = vport; 2516 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2517 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2518 if (rc != MBX_NOT_FINISHED) 2519 return; 2520 } 2521 2522 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2523 !(phba->pport->load_flag & FC_UNLOADING) && 2524 !pmb->u.mb.mbxStatus) { 2525 shost = lpfc_shost_from_vport(vport); 2526 spin_lock_irq(shost->host_lock); 2527 vport->vpi_state |= LPFC_VPI_REGISTERED; 2528 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2529 spin_unlock_irq(shost->host_lock); 2530 } 2531 2532 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2533 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2534 lpfc_nlp_put(ndlp); 2535 pmb->ctx_buf = NULL; 2536 pmb->ctx_ndlp = NULL; 2537 } 2538 2539 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2540 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2541 2542 /* Check to see if there are any deferred events to process */ 2543 if (ndlp) { 2544 lpfc_printf_vlog( 2545 vport, 2546 KERN_INFO, LOG_MBOX | LOG_DISCOVERY, 2547 "1438 UNREG cmpl deferred mbox x%x " 2548 "on NPort x%x Data: x%x x%x %px\n", 2549 ndlp->nlp_rpi, ndlp->nlp_DID, 2550 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp); 2551 2552 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2553 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { 2554 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2555 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 2556 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2557 } else { 2558 __lpfc_sli_rpi_release(vport, ndlp); 2559 } 2560 if (vport->load_flag & FC_UNLOADING) 2561 lpfc_nlp_put(ndlp); 2562 pmb->ctx_ndlp = NULL; 2563 } 2564 } 2565 2566 /* Check security permission status on INIT_LINK mailbox command */ 2567 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2568 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2569 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2570 "2860 SLI authentication is required " 2571 "for INIT_LINK but has not done yet\n"); 2572 2573 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2574 lpfc_sli4_mbox_cmd_free(phba, pmb); 2575 else 2576 mempool_free(pmb, phba->mbox_mem_pool); 2577 } 2578 /** 2579 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler 2580 * @phba: Pointer to HBA context object. 2581 * @pmb: Pointer to mailbox object. 2582 * 2583 * This function is the unreg rpi mailbox completion handler. It 2584 * frees the memory resources associated with the completed mailbox 2585 * command. An additional refrenece is put on the ndlp to prevent 2586 * lpfc_nlp_release from freeing the rpi bit in the bitmask before 2587 * the unreg mailbox command completes, this routine puts the 2588 * reference back. 2589 * 2590 **/ 2591 void 2592 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2593 { 2594 struct lpfc_vport *vport = pmb->vport; 2595 struct lpfc_nodelist *ndlp; 2596 2597 ndlp = pmb->ctx_ndlp; 2598 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2599 if (phba->sli_rev == LPFC_SLI_REV4 && 2600 (bf_get(lpfc_sli_intf_if_type, 2601 &phba->sli4_hba.sli_intf) >= 2602 LPFC_SLI_INTF_IF_TYPE_2)) { 2603 if (ndlp) { 2604 lpfc_printf_vlog( 2605 vport, KERN_INFO, LOG_MBOX | LOG_SLI, 2606 "0010 UNREG_LOGIN vpi:%x " 2607 "rpi:%x DID:%x defer x%x flg x%x " 2608 "map:%x %px\n", 2609 vport->vpi, ndlp->nlp_rpi, 2610 ndlp->nlp_DID, ndlp->nlp_defer_did, 2611 ndlp->nlp_flag, 2612 ndlp->nlp_usg_map, ndlp); 2613 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2614 lpfc_nlp_put(ndlp); 2615 2616 /* Check to see if there are any deferred 2617 * events to process 2618 */ 2619 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2620 (ndlp->nlp_defer_did != 2621 NLP_EVT_NOTHING_PENDING)) { 2622 lpfc_printf_vlog( 2623 vport, KERN_INFO, LOG_DISCOVERY, 2624 "4111 UNREG cmpl deferred " 2625 "clr x%x on " 2626 "NPort x%x Data: x%x x%px\n", 2627 ndlp->nlp_rpi, ndlp->nlp_DID, 2628 ndlp->nlp_defer_did, ndlp); 2629 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2630 ndlp->nlp_defer_did = 2631 NLP_EVT_NOTHING_PENDING; 2632 lpfc_issue_els_plogi( 2633 vport, ndlp->nlp_DID, 0); 2634 } else { 2635 __lpfc_sli_rpi_release(vport, ndlp); 2636 } 2637 } 2638 } 2639 } 2640 2641 mempool_free(pmb, phba->mbox_mem_pool); 2642 } 2643 2644 /** 2645 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2646 * @phba: Pointer to HBA context object. 2647 * 2648 * This function is called with no lock held. This function processes all 2649 * the completed mailbox commands and gives it to upper layers. The interrupt 2650 * service routine processes mailbox completion interrupt and adds completed 2651 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2652 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2653 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2654 * function returns the mailbox commands to the upper layer by calling the 2655 * completion handler function of each mailbox. 2656 **/ 2657 int 2658 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2659 { 2660 MAILBOX_t *pmbox; 2661 LPFC_MBOXQ_t *pmb; 2662 int rc; 2663 LIST_HEAD(cmplq); 2664 2665 phba->sli.slistat.mbox_event++; 2666 2667 /* Get all completed mailboxe buffers into the cmplq */ 2668 spin_lock_irq(&phba->hbalock); 2669 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2670 spin_unlock_irq(&phba->hbalock); 2671 2672 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2673 do { 2674 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2675 if (pmb == NULL) 2676 break; 2677 2678 pmbox = &pmb->u.mb; 2679 2680 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2681 if (pmb->vport) { 2682 lpfc_debugfs_disc_trc(pmb->vport, 2683 LPFC_DISC_TRC_MBOX_VPORT, 2684 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2685 (uint32_t)pmbox->mbxCommand, 2686 pmbox->un.varWords[0], 2687 pmbox->un.varWords[1]); 2688 } 2689 else { 2690 lpfc_debugfs_disc_trc(phba->pport, 2691 LPFC_DISC_TRC_MBOX, 2692 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2693 (uint32_t)pmbox->mbxCommand, 2694 pmbox->un.varWords[0], 2695 pmbox->un.varWords[1]); 2696 } 2697 } 2698 2699 /* 2700 * It is a fatal error if unknown mbox command completion. 2701 */ 2702 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2703 MBX_SHUTDOWN) { 2704 /* Unknown mailbox command compl */ 2705 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2706 "(%d):0323 Unknown Mailbox command " 2707 "x%x (x%x/x%x) Cmpl\n", 2708 pmb->vport ? pmb->vport->vpi : 2709 LPFC_VPORT_UNKNOWN, 2710 pmbox->mbxCommand, 2711 lpfc_sli_config_mbox_subsys_get(phba, 2712 pmb), 2713 lpfc_sli_config_mbox_opcode_get(phba, 2714 pmb)); 2715 phba->link_state = LPFC_HBA_ERROR; 2716 phba->work_hs = HS_FFER3; 2717 lpfc_handle_eratt(phba); 2718 continue; 2719 } 2720 2721 if (pmbox->mbxStatus) { 2722 phba->sli.slistat.mbox_stat_err++; 2723 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2724 /* Mbox cmd cmpl error - RETRYing */ 2725 lpfc_printf_log(phba, KERN_INFO, 2726 LOG_MBOX | LOG_SLI, 2727 "(%d):0305 Mbox cmd cmpl " 2728 "error - RETRYing Data: x%x " 2729 "(x%x/x%x) x%x x%x x%x\n", 2730 pmb->vport ? pmb->vport->vpi : 2731 LPFC_VPORT_UNKNOWN, 2732 pmbox->mbxCommand, 2733 lpfc_sli_config_mbox_subsys_get(phba, 2734 pmb), 2735 lpfc_sli_config_mbox_opcode_get(phba, 2736 pmb), 2737 pmbox->mbxStatus, 2738 pmbox->un.varWords[0], 2739 pmb->vport ? pmb->vport->port_state : 2740 LPFC_VPORT_UNKNOWN); 2741 pmbox->mbxStatus = 0; 2742 pmbox->mbxOwner = OWN_HOST; 2743 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2744 if (rc != MBX_NOT_FINISHED) 2745 continue; 2746 } 2747 } 2748 2749 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2750 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2751 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps " 2752 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2753 "x%x x%x x%x\n", 2754 pmb->vport ? pmb->vport->vpi : 0, 2755 pmbox->mbxCommand, 2756 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2757 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2758 pmb->mbox_cmpl, 2759 *((uint32_t *) pmbox), 2760 pmbox->un.varWords[0], 2761 pmbox->un.varWords[1], 2762 pmbox->un.varWords[2], 2763 pmbox->un.varWords[3], 2764 pmbox->un.varWords[4], 2765 pmbox->un.varWords[5], 2766 pmbox->un.varWords[6], 2767 pmbox->un.varWords[7], 2768 pmbox->un.varWords[8], 2769 pmbox->un.varWords[9], 2770 pmbox->un.varWords[10]); 2771 2772 if (pmb->mbox_cmpl) 2773 pmb->mbox_cmpl(phba,pmb); 2774 } while (1); 2775 return 0; 2776 } 2777 2778 /** 2779 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2780 * @phba: Pointer to HBA context object. 2781 * @pring: Pointer to driver SLI ring object. 2782 * @tag: buffer tag. 2783 * 2784 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2785 * is set in the tag the buffer is posted for a particular exchange, 2786 * the function will return the buffer without replacing the buffer. 2787 * If the buffer is for unsolicited ELS or CT traffic, this function 2788 * returns the buffer and also posts another buffer to the firmware. 2789 **/ 2790 static struct lpfc_dmabuf * 2791 lpfc_sli_get_buff(struct lpfc_hba *phba, 2792 struct lpfc_sli_ring *pring, 2793 uint32_t tag) 2794 { 2795 struct hbq_dmabuf *hbq_entry; 2796 2797 if (tag & QUE_BUFTAG_BIT) 2798 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2799 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2800 if (!hbq_entry) 2801 return NULL; 2802 return &hbq_entry->dbuf; 2803 } 2804 2805 /** 2806 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2807 * @phba: Pointer to HBA context object. 2808 * @pring: Pointer to driver SLI ring object. 2809 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2810 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2811 * @fch_type: the type for the first frame of the sequence. 2812 * 2813 * This function is called with no lock held. This function uses the r_ctl and 2814 * type of the received sequence to find the correct callback function to call 2815 * to process the sequence. 2816 **/ 2817 static int 2818 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2819 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2820 uint32_t fch_type) 2821 { 2822 int i; 2823 2824 switch (fch_type) { 2825 case FC_TYPE_NVME: 2826 lpfc_nvmet_unsol_ls_event(phba, pring, saveq); 2827 return 1; 2828 default: 2829 break; 2830 } 2831 2832 /* unSolicited Responses */ 2833 if (pring->prt[0].profile) { 2834 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2835 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2836 saveq); 2837 return 1; 2838 } 2839 /* We must search, based on rctl / type 2840 for the right routine */ 2841 for (i = 0; i < pring->num_mask; i++) { 2842 if ((pring->prt[i].rctl == fch_r_ctl) && 2843 (pring->prt[i].type == fch_type)) { 2844 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2845 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2846 (phba, pring, saveq); 2847 return 1; 2848 } 2849 } 2850 return 0; 2851 } 2852 2853 /** 2854 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2855 * @phba: Pointer to HBA context object. 2856 * @pring: Pointer to driver SLI ring object. 2857 * @saveq: Pointer to the unsolicited iocb. 2858 * 2859 * This function is called with no lock held by the ring event handler 2860 * when there is an unsolicited iocb posted to the response ring by the 2861 * firmware. This function gets the buffer associated with the iocbs 2862 * and calls the event handler for the ring. This function handles both 2863 * qring buffers and hbq buffers. 2864 * When the function returns 1 the caller can free the iocb object otherwise 2865 * upper layer functions will free the iocb objects. 2866 **/ 2867 static int 2868 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2869 struct lpfc_iocbq *saveq) 2870 { 2871 IOCB_t * irsp; 2872 WORD5 * w5p; 2873 uint32_t Rctl, Type; 2874 struct lpfc_iocbq *iocbq; 2875 struct lpfc_dmabuf *dmzbuf; 2876 2877 irsp = &(saveq->iocb); 2878 2879 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2880 if (pring->lpfc_sli_rcv_async_status) 2881 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2882 else 2883 lpfc_printf_log(phba, 2884 KERN_WARNING, 2885 LOG_SLI, 2886 "0316 Ring %d handler: unexpected " 2887 "ASYNC_STATUS iocb received evt_code " 2888 "0x%x\n", 2889 pring->ringno, 2890 irsp->un.asyncstat.evt_code); 2891 return 1; 2892 } 2893 2894 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2895 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2896 if (irsp->ulpBdeCount > 0) { 2897 dmzbuf = lpfc_sli_get_buff(phba, pring, 2898 irsp->un.ulpWord[3]); 2899 lpfc_in_buf_free(phba, dmzbuf); 2900 } 2901 2902 if (irsp->ulpBdeCount > 1) { 2903 dmzbuf = lpfc_sli_get_buff(phba, pring, 2904 irsp->unsli3.sli3Words[3]); 2905 lpfc_in_buf_free(phba, dmzbuf); 2906 } 2907 2908 if (irsp->ulpBdeCount > 2) { 2909 dmzbuf = lpfc_sli_get_buff(phba, pring, 2910 irsp->unsli3.sli3Words[7]); 2911 lpfc_in_buf_free(phba, dmzbuf); 2912 } 2913 2914 return 1; 2915 } 2916 2917 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2918 if (irsp->ulpBdeCount != 0) { 2919 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2920 irsp->un.ulpWord[3]); 2921 if (!saveq->context2) 2922 lpfc_printf_log(phba, 2923 KERN_ERR, 2924 LOG_SLI, 2925 "0341 Ring %d Cannot find buffer for " 2926 "an unsolicited iocb. tag 0x%x\n", 2927 pring->ringno, 2928 irsp->un.ulpWord[3]); 2929 } 2930 if (irsp->ulpBdeCount == 2) { 2931 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2932 irsp->unsli3.sli3Words[7]); 2933 if (!saveq->context3) 2934 lpfc_printf_log(phba, 2935 KERN_ERR, 2936 LOG_SLI, 2937 "0342 Ring %d Cannot find buffer for an" 2938 " unsolicited iocb. tag 0x%x\n", 2939 pring->ringno, 2940 irsp->unsli3.sli3Words[7]); 2941 } 2942 list_for_each_entry(iocbq, &saveq->list, list) { 2943 irsp = &(iocbq->iocb); 2944 if (irsp->ulpBdeCount != 0) { 2945 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2946 irsp->un.ulpWord[3]); 2947 if (!iocbq->context2) 2948 lpfc_printf_log(phba, 2949 KERN_ERR, 2950 LOG_SLI, 2951 "0343 Ring %d Cannot find " 2952 "buffer for an unsolicited iocb" 2953 ". tag 0x%x\n", pring->ringno, 2954 irsp->un.ulpWord[3]); 2955 } 2956 if (irsp->ulpBdeCount == 2) { 2957 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2958 irsp->unsli3.sli3Words[7]); 2959 if (!iocbq->context3) 2960 lpfc_printf_log(phba, 2961 KERN_ERR, 2962 LOG_SLI, 2963 "0344 Ring %d Cannot find " 2964 "buffer for an unsolicited " 2965 "iocb. tag 0x%x\n", 2966 pring->ringno, 2967 irsp->unsli3.sli3Words[7]); 2968 } 2969 } 2970 } 2971 if (irsp->ulpBdeCount != 0 && 2972 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2973 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2974 int found = 0; 2975 2976 /* search continue save q for same XRI */ 2977 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2978 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2979 saveq->iocb.unsli3.rcvsli3.ox_id) { 2980 list_add_tail(&saveq->list, &iocbq->list); 2981 found = 1; 2982 break; 2983 } 2984 } 2985 if (!found) 2986 list_add_tail(&saveq->clist, 2987 &pring->iocb_continue_saveq); 2988 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2989 list_del_init(&iocbq->clist); 2990 saveq = iocbq; 2991 irsp = &(saveq->iocb); 2992 } else 2993 return 0; 2994 } 2995 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2996 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2997 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2998 Rctl = FC_RCTL_ELS_REQ; 2999 Type = FC_TYPE_ELS; 3000 } else { 3001 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 3002 Rctl = w5p->hcsw.Rctl; 3003 Type = w5p->hcsw.Type; 3004 3005 /* Firmware Workaround */ 3006 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 3007 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 3008 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 3009 Rctl = FC_RCTL_ELS_REQ; 3010 Type = FC_TYPE_ELS; 3011 w5p->hcsw.Rctl = Rctl; 3012 w5p->hcsw.Type = Type; 3013 } 3014 } 3015 3016 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 3017 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3018 "0313 Ring %d handler: unexpected Rctl x%x " 3019 "Type x%x received\n", 3020 pring->ringno, Rctl, Type); 3021 3022 return 1; 3023 } 3024 3025 /** 3026 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 3027 * @phba: Pointer to HBA context object. 3028 * @pring: Pointer to driver SLI ring object. 3029 * @prspiocb: Pointer to response iocb object. 3030 * 3031 * This function looks up the iocb_lookup table to get the command iocb 3032 * corresponding to the given response iocb using the iotag of the 3033 * response iocb. The driver calls this function with the hbalock held 3034 * for SLI3 ports or the ring lock held for SLI4 ports. 3035 * This function returns the command iocb object if it finds the command 3036 * iocb else returns NULL. 3037 **/ 3038 static struct lpfc_iocbq * 3039 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 3040 struct lpfc_sli_ring *pring, 3041 struct lpfc_iocbq *prspiocb) 3042 { 3043 struct lpfc_iocbq *cmd_iocb = NULL; 3044 uint16_t iotag; 3045 spinlock_t *temp_lock = NULL; 3046 unsigned long iflag = 0; 3047 3048 if (phba->sli_rev == LPFC_SLI_REV4) 3049 temp_lock = &pring->ring_lock; 3050 else 3051 temp_lock = &phba->hbalock; 3052 3053 spin_lock_irqsave(temp_lock, iflag); 3054 iotag = prspiocb->iocb.ulpIoTag; 3055 3056 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3057 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3058 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 3059 /* remove from txcmpl queue list */ 3060 list_del_init(&cmd_iocb->list); 3061 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 3062 pring->txcmplq_cnt--; 3063 spin_unlock_irqrestore(temp_lock, iflag); 3064 return cmd_iocb; 3065 } 3066 } 3067 3068 spin_unlock_irqrestore(temp_lock, iflag); 3069 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3070 "0317 iotag x%x is out of " 3071 "range: max iotag x%x wd0 x%x\n", 3072 iotag, phba->sli.last_iotag, 3073 *(((uint32_t *) &prspiocb->iocb) + 7)); 3074 return NULL; 3075 } 3076 3077 /** 3078 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 3079 * @phba: Pointer to HBA context object. 3080 * @pring: Pointer to driver SLI ring object. 3081 * @iotag: IOCB tag. 3082 * 3083 * This function looks up the iocb_lookup table to get the command iocb 3084 * corresponding to the given iotag. The driver calls this function with 3085 * the ring lock held because this function is an SLI4 port only helper. 3086 * This function returns the command iocb object if it finds the command 3087 * iocb else returns NULL. 3088 **/ 3089 static struct lpfc_iocbq * 3090 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 3091 struct lpfc_sli_ring *pring, uint16_t iotag) 3092 { 3093 struct lpfc_iocbq *cmd_iocb = NULL; 3094 spinlock_t *temp_lock = NULL; 3095 unsigned long iflag = 0; 3096 3097 if (phba->sli_rev == LPFC_SLI_REV4) 3098 temp_lock = &pring->ring_lock; 3099 else 3100 temp_lock = &phba->hbalock; 3101 3102 spin_lock_irqsave(temp_lock, iflag); 3103 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3104 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3105 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 3106 /* remove from txcmpl queue list */ 3107 list_del_init(&cmd_iocb->list); 3108 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 3109 pring->txcmplq_cnt--; 3110 spin_unlock_irqrestore(temp_lock, iflag); 3111 return cmd_iocb; 3112 } 3113 } 3114 3115 spin_unlock_irqrestore(temp_lock, iflag); 3116 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3117 "0372 iotag x%x lookup error: max iotag (x%x) " 3118 "iocb_flag x%x\n", 3119 iotag, phba->sli.last_iotag, 3120 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff); 3121 return NULL; 3122 } 3123 3124 /** 3125 * lpfc_sli_process_sol_iocb - process solicited iocb completion 3126 * @phba: Pointer to HBA context object. 3127 * @pring: Pointer to driver SLI ring object. 3128 * @saveq: Pointer to the response iocb to be processed. 3129 * 3130 * This function is called by the ring event handler for non-fcp 3131 * rings when there is a new response iocb in the response ring. 3132 * The caller is not required to hold any locks. This function 3133 * gets the command iocb associated with the response iocb and 3134 * calls the completion handler for the command iocb. If there 3135 * is no completion handler, the function will free the resources 3136 * associated with command iocb. If the response iocb is for 3137 * an already aborted command iocb, the status of the completion 3138 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 3139 * This function always returns 1. 3140 **/ 3141 static int 3142 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3143 struct lpfc_iocbq *saveq) 3144 { 3145 struct lpfc_iocbq *cmdiocbp; 3146 int rc = 1; 3147 unsigned long iflag; 3148 3149 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 3150 if (cmdiocbp) { 3151 if (cmdiocbp->iocb_cmpl) { 3152 /* 3153 * If an ELS command failed send an event to mgmt 3154 * application. 3155 */ 3156 if (saveq->iocb.ulpStatus && 3157 (pring->ringno == LPFC_ELS_RING) && 3158 (cmdiocbp->iocb.ulpCommand == 3159 CMD_ELS_REQUEST64_CR)) 3160 lpfc_send_els_failure_event(phba, 3161 cmdiocbp, saveq); 3162 3163 /* 3164 * Post all ELS completions to the worker thread. 3165 * All other are passed to the completion callback. 3166 */ 3167 if (pring->ringno == LPFC_ELS_RING) { 3168 if ((phba->sli_rev < LPFC_SLI_REV4) && 3169 (cmdiocbp->iocb_flag & 3170 LPFC_DRIVER_ABORTED)) { 3171 spin_lock_irqsave(&phba->hbalock, 3172 iflag); 3173 cmdiocbp->iocb_flag &= 3174 ~LPFC_DRIVER_ABORTED; 3175 spin_unlock_irqrestore(&phba->hbalock, 3176 iflag); 3177 saveq->iocb.ulpStatus = 3178 IOSTAT_LOCAL_REJECT; 3179 saveq->iocb.un.ulpWord[4] = 3180 IOERR_SLI_ABORTED; 3181 3182 /* Firmware could still be in progress 3183 * of DMAing payload, so don't free data 3184 * buffer till after a hbeat. 3185 */ 3186 spin_lock_irqsave(&phba->hbalock, 3187 iflag); 3188 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 3189 spin_unlock_irqrestore(&phba->hbalock, 3190 iflag); 3191 } 3192 if (phba->sli_rev == LPFC_SLI_REV4) { 3193 if (saveq->iocb_flag & 3194 LPFC_EXCHANGE_BUSY) { 3195 /* Set cmdiocb flag for the 3196 * exchange busy so sgl (xri) 3197 * will not be released until 3198 * the abort xri is received 3199 * from hba. 3200 */ 3201 spin_lock_irqsave( 3202 &phba->hbalock, iflag); 3203 cmdiocbp->iocb_flag |= 3204 LPFC_EXCHANGE_BUSY; 3205 spin_unlock_irqrestore( 3206 &phba->hbalock, iflag); 3207 } 3208 if (cmdiocbp->iocb_flag & 3209 LPFC_DRIVER_ABORTED) { 3210 /* 3211 * Clear LPFC_DRIVER_ABORTED 3212 * bit in case it was driver 3213 * initiated abort. 3214 */ 3215 spin_lock_irqsave( 3216 &phba->hbalock, iflag); 3217 cmdiocbp->iocb_flag &= 3218 ~LPFC_DRIVER_ABORTED; 3219 spin_unlock_irqrestore( 3220 &phba->hbalock, iflag); 3221 cmdiocbp->iocb.ulpStatus = 3222 IOSTAT_LOCAL_REJECT; 3223 cmdiocbp->iocb.un.ulpWord[4] = 3224 IOERR_ABORT_REQUESTED; 3225 /* 3226 * For SLI4, irsiocb contains 3227 * NO_XRI in sli_xritag, it 3228 * shall not affect releasing 3229 * sgl (xri) process. 3230 */ 3231 saveq->iocb.ulpStatus = 3232 IOSTAT_LOCAL_REJECT; 3233 saveq->iocb.un.ulpWord[4] = 3234 IOERR_SLI_ABORTED; 3235 spin_lock_irqsave( 3236 &phba->hbalock, iflag); 3237 saveq->iocb_flag |= 3238 LPFC_DELAY_MEM_FREE; 3239 spin_unlock_irqrestore( 3240 &phba->hbalock, iflag); 3241 } 3242 } 3243 } 3244 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 3245 } else 3246 lpfc_sli_release_iocbq(phba, cmdiocbp); 3247 } else { 3248 /* 3249 * Unknown initiating command based on the response iotag. 3250 * This could be the case on the ELS ring because of 3251 * lpfc_els_abort(). 3252 */ 3253 if (pring->ringno != LPFC_ELS_RING) { 3254 /* 3255 * Ring <ringno> handler: unexpected completion IoTag 3256 * <IoTag> 3257 */ 3258 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3259 "0322 Ring %d handler: " 3260 "unexpected completion IoTag x%x " 3261 "Data: x%x x%x x%x x%x\n", 3262 pring->ringno, 3263 saveq->iocb.ulpIoTag, 3264 saveq->iocb.ulpStatus, 3265 saveq->iocb.un.ulpWord[4], 3266 saveq->iocb.ulpCommand, 3267 saveq->iocb.ulpContext); 3268 } 3269 } 3270 3271 return rc; 3272 } 3273 3274 /** 3275 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 3276 * @phba: Pointer to HBA context object. 3277 * @pring: Pointer to driver SLI ring object. 3278 * 3279 * This function is called from the iocb ring event handlers when 3280 * put pointer is ahead of the get pointer for a ring. This function signal 3281 * an error attention condition to the worker thread and the worker 3282 * thread will transition the HBA to offline state. 3283 **/ 3284 static void 3285 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3286 { 3287 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3288 /* 3289 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3290 * rsp ring <portRspMax> 3291 */ 3292 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3293 "0312 Ring %d handler: portRspPut %d " 3294 "is bigger than rsp ring %d\n", 3295 pring->ringno, le32_to_cpu(pgp->rspPutInx), 3296 pring->sli.sli3.numRiocb); 3297 3298 phba->link_state = LPFC_HBA_ERROR; 3299 3300 /* 3301 * All error attention handlers are posted to 3302 * worker thread 3303 */ 3304 phba->work_ha |= HA_ERATT; 3305 phba->work_hs = HS_FFER3; 3306 3307 lpfc_worker_wake_up(phba); 3308 3309 return; 3310 } 3311 3312 /** 3313 * lpfc_poll_eratt - Error attention polling timer timeout handler 3314 * @ptr: Pointer to address of HBA context object. 3315 * 3316 * This function is invoked by the Error Attention polling timer when the 3317 * timer times out. It will check the SLI Error Attention register for 3318 * possible attention events. If so, it will post an Error Attention event 3319 * and wake up worker thread to process it. Otherwise, it will set up the 3320 * Error Attention polling timer for the next poll. 3321 **/ 3322 void lpfc_poll_eratt(struct timer_list *t) 3323 { 3324 struct lpfc_hba *phba; 3325 uint32_t eratt = 0; 3326 uint64_t sli_intr, cnt; 3327 3328 phba = from_timer(phba, t, eratt_poll); 3329 3330 /* Here we will also keep track of interrupts per sec of the hba */ 3331 sli_intr = phba->sli.slistat.sli_intr; 3332 3333 if (phba->sli.slistat.sli_prev_intr > sli_intr) 3334 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 3335 sli_intr); 3336 else 3337 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 3338 3339 /* 64-bit integer division not supported on 32-bit x86 - use do_div */ 3340 do_div(cnt, phba->eratt_poll_interval); 3341 phba->sli.slistat.sli_ips = cnt; 3342 3343 phba->sli.slistat.sli_prev_intr = sli_intr; 3344 3345 /* Check chip HA register for error event */ 3346 eratt = lpfc_sli_check_eratt(phba); 3347 3348 if (eratt) 3349 /* Tell the worker thread there is work to do */ 3350 lpfc_worker_wake_up(phba); 3351 else 3352 /* Restart the timer for next eratt poll */ 3353 mod_timer(&phba->eratt_poll, 3354 jiffies + 3355 msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 3356 return; 3357 } 3358 3359 3360 /** 3361 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 3362 * @phba: Pointer to HBA context object. 3363 * @pring: Pointer to driver SLI ring object. 3364 * @mask: Host attention register mask for this ring. 3365 * 3366 * This function is called from the interrupt context when there is a ring 3367 * event for the fcp ring. The caller does not hold any lock. 3368 * The function processes each response iocb in the response ring until it 3369 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 3370 * LE bit set. The function will call the completion handler of the command iocb 3371 * if the response iocb indicates a completion for a command iocb or it is 3372 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 3373 * function if this is an unsolicited iocb. 3374 * This routine presumes LPFC_FCP_RING handling and doesn't bother 3375 * to check it explicitly. 3376 */ 3377 int 3378 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 3379 struct lpfc_sli_ring *pring, uint32_t mask) 3380 { 3381 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3382 IOCB_t *irsp = NULL; 3383 IOCB_t *entry = NULL; 3384 struct lpfc_iocbq *cmdiocbq = NULL; 3385 struct lpfc_iocbq rspiocbq; 3386 uint32_t status; 3387 uint32_t portRspPut, portRspMax; 3388 int rc = 1; 3389 lpfc_iocb_type type; 3390 unsigned long iflag; 3391 uint32_t rsp_cmpl = 0; 3392 3393 spin_lock_irqsave(&phba->hbalock, iflag); 3394 pring->stats.iocb_event++; 3395 3396 /* 3397 * The next available response entry should never exceed the maximum 3398 * entries. If it does, treat it as an adapter hardware error. 3399 */ 3400 portRspMax = pring->sli.sli3.numRiocb; 3401 portRspPut = le32_to_cpu(pgp->rspPutInx); 3402 if (unlikely(portRspPut >= portRspMax)) { 3403 lpfc_sli_rsp_pointers_error(phba, pring); 3404 spin_unlock_irqrestore(&phba->hbalock, iflag); 3405 return 1; 3406 } 3407 if (phba->fcp_ring_in_use) { 3408 spin_unlock_irqrestore(&phba->hbalock, iflag); 3409 return 1; 3410 } else 3411 phba->fcp_ring_in_use = 1; 3412 3413 rmb(); 3414 while (pring->sli.sli3.rspidx != portRspPut) { 3415 /* 3416 * Fetch an entry off the ring and copy it into a local data 3417 * structure. The copy involves a byte-swap since the 3418 * network byte order and pci byte orders are different. 3419 */ 3420 entry = lpfc_resp_iocb(phba, pring); 3421 phba->last_completion_time = jiffies; 3422 3423 if (++pring->sli.sli3.rspidx >= portRspMax) 3424 pring->sli.sli3.rspidx = 0; 3425 3426 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 3427 (uint32_t *) &rspiocbq.iocb, 3428 phba->iocb_rsp_size); 3429 INIT_LIST_HEAD(&(rspiocbq.list)); 3430 irsp = &rspiocbq.iocb; 3431 3432 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 3433 pring->stats.iocb_rsp++; 3434 rsp_cmpl++; 3435 3436 if (unlikely(irsp->ulpStatus)) { 3437 /* 3438 * If resource errors reported from HBA, reduce 3439 * queuedepths of the SCSI device. 3440 */ 3441 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3442 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3443 IOERR_NO_RESOURCES)) { 3444 spin_unlock_irqrestore(&phba->hbalock, iflag); 3445 phba->lpfc_rampdown_queue_depth(phba); 3446 spin_lock_irqsave(&phba->hbalock, iflag); 3447 } 3448 3449 /* Rsp ring <ringno> error: IOCB */ 3450 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3451 "0336 Rsp Ring %d error: IOCB Data: " 3452 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 3453 pring->ringno, 3454 irsp->un.ulpWord[0], 3455 irsp->un.ulpWord[1], 3456 irsp->un.ulpWord[2], 3457 irsp->un.ulpWord[3], 3458 irsp->un.ulpWord[4], 3459 irsp->un.ulpWord[5], 3460 *(uint32_t *)&irsp->un1, 3461 *((uint32_t *)&irsp->un1 + 1)); 3462 } 3463 3464 switch (type) { 3465 case LPFC_ABORT_IOCB: 3466 case LPFC_SOL_IOCB: 3467 /* 3468 * Idle exchange closed via ABTS from port. No iocb 3469 * resources need to be recovered. 3470 */ 3471 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 3472 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3473 "0333 IOCB cmd 0x%x" 3474 " processed. Skipping" 3475 " completion\n", 3476 irsp->ulpCommand); 3477 break; 3478 } 3479 3480 spin_unlock_irqrestore(&phba->hbalock, iflag); 3481 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 3482 &rspiocbq); 3483 spin_lock_irqsave(&phba->hbalock, iflag); 3484 if (unlikely(!cmdiocbq)) 3485 break; 3486 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 3487 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3488 if (cmdiocbq->iocb_cmpl) { 3489 spin_unlock_irqrestore(&phba->hbalock, iflag); 3490 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 3491 &rspiocbq); 3492 spin_lock_irqsave(&phba->hbalock, iflag); 3493 } 3494 break; 3495 case LPFC_UNSOL_IOCB: 3496 spin_unlock_irqrestore(&phba->hbalock, iflag); 3497 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 3498 spin_lock_irqsave(&phba->hbalock, iflag); 3499 break; 3500 default: 3501 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3502 char adaptermsg[LPFC_MAX_ADPTMSG]; 3503 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3504 memcpy(&adaptermsg[0], (uint8_t *) irsp, 3505 MAX_MSG_DATA); 3506 dev_warn(&((phba->pcidev)->dev), 3507 "lpfc%d: %s\n", 3508 phba->brd_no, adaptermsg); 3509 } else { 3510 /* Unknown IOCB command */ 3511 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3512 "0334 Unknown IOCB command " 3513 "Data: x%x, x%x x%x x%x x%x\n", 3514 type, irsp->ulpCommand, 3515 irsp->ulpStatus, 3516 irsp->ulpIoTag, 3517 irsp->ulpContext); 3518 } 3519 break; 3520 } 3521 3522 /* 3523 * The response IOCB has been processed. Update the ring 3524 * pointer in SLIM. If the port response put pointer has not 3525 * been updated, sync the pgp->rspPutInx and fetch the new port 3526 * response put pointer. 3527 */ 3528 writel(pring->sli.sli3.rspidx, 3529 &phba->host_gp[pring->ringno].rspGetInx); 3530 3531 if (pring->sli.sli3.rspidx == portRspPut) 3532 portRspPut = le32_to_cpu(pgp->rspPutInx); 3533 } 3534 3535 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3536 pring->stats.iocb_rsp_full++; 3537 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3538 writel(status, phba->CAregaddr); 3539 readl(phba->CAregaddr); 3540 } 3541 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3542 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3543 pring->stats.iocb_cmd_empty++; 3544 3545 /* Force update of the local copy of cmdGetInx */ 3546 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3547 lpfc_sli_resume_iocb(phba, pring); 3548 3549 if ((pring->lpfc_sli_cmd_available)) 3550 (pring->lpfc_sli_cmd_available) (phba, pring); 3551 3552 } 3553 3554 phba->fcp_ring_in_use = 0; 3555 spin_unlock_irqrestore(&phba->hbalock, iflag); 3556 return rc; 3557 } 3558 3559 /** 3560 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3561 * @phba: Pointer to HBA context object. 3562 * @pring: Pointer to driver SLI ring object. 3563 * @rspiocbp: Pointer to driver response IOCB object. 3564 * 3565 * This function is called from the worker thread when there is a slow-path 3566 * response IOCB to process. This function chains all the response iocbs until 3567 * seeing the iocb with the LE bit set. The function will call 3568 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3569 * completion of a command iocb. The function will call the 3570 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3571 * The function frees the resources or calls the completion handler if this 3572 * iocb is an abort completion. The function returns NULL when the response 3573 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3574 * this function shall chain the iocb on to the iocb_continueq and return the 3575 * response iocb passed in. 3576 **/ 3577 static struct lpfc_iocbq * 3578 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3579 struct lpfc_iocbq *rspiocbp) 3580 { 3581 struct lpfc_iocbq *saveq; 3582 struct lpfc_iocbq *cmdiocbp; 3583 struct lpfc_iocbq *next_iocb; 3584 IOCB_t *irsp = NULL; 3585 uint32_t free_saveq; 3586 uint8_t iocb_cmd_type; 3587 lpfc_iocb_type type; 3588 unsigned long iflag; 3589 int rc; 3590 3591 spin_lock_irqsave(&phba->hbalock, iflag); 3592 /* First add the response iocb to the countinueq list */ 3593 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3594 pring->iocb_continueq_cnt++; 3595 3596 /* Now, determine whether the list is completed for processing */ 3597 irsp = &rspiocbp->iocb; 3598 if (irsp->ulpLe) { 3599 /* 3600 * By default, the driver expects to free all resources 3601 * associated with this iocb completion. 3602 */ 3603 free_saveq = 1; 3604 saveq = list_get_first(&pring->iocb_continueq, 3605 struct lpfc_iocbq, list); 3606 irsp = &(saveq->iocb); 3607 list_del_init(&pring->iocb_continueq); 3608 pring->iocb_continueq_cnt = 0; 3609 3610 pring->stats.iocb_rsp++; 3611 3612 /* 3613 * If resource errors reported from HBA, reduce 3614 * queuedepths of the SCSI device. 3615 */ 3616 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3617 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3618 IOERR_NO_RESOURCES)) { 3619 spin_unlock_irqrestore(&phba->hbalock, iflag); 3620 phba->lpfc_rampdown_queue_depth(phba); 3621 spin_lock_irqsave(&phba->hbalock, iflag); 3622 } 3623 3624 if (irsp->ulpStatus) { 3625 /* Rsp ring <ringno> error: IOCB */ 3626 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3627 "0328 Rsp Ring %d error: " 3628 "IOCB Data: " 3629 "x%x x%x x%x x%x " 3630 "x%x x%x x%x x%x " 3631 "x%x x%x x%x x%x " 3632 "x%x x%x x%x x%x\n", 3633 pring->ringno, 3634 irsp->un.ulpWord[0], 3635 irsp->un.ulpWord[1], 3636 irsp->un.ulpWord[2], 3637 irsp->un.ulpWord[3], 3638 irsp->un.ulpWord[4], 3639 irsp->un.ulpWord[5], 3640 *(((uint32_t *) irsp) + 6), 3641 *(((uint32_t *) irsp) + 7), 3642 *(((uint32_t *) irsp) + 8), 3643 *(((uint32_t *) irsp) + 9), 3644 *(((uint32_t *) irsp) + 10), 3645 *(((uint32_t *) irsp) + 11), 3646 *(((uint32_t *) irsp) + 12), 3647 *(((uint32_t *) irsp) + 13), 3648 *(((uint32_t *) irsp) + 14), 3649 *(((uint32_t *) irsp) + 15)); 3650 } 3651 3652 /* 3653 * Fetch the IOCB command type and call the correct completion 3654 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3655 * get freed back to the lpfc_iocb_list by the discovery 3656 * kernel thread. 3657 */ 3658 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3659 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3660 switch (type) { 3661 case LPFC_SOL_IOCB: 3662 spin_unlock_irqrestore(&phba->hbalock, iflag); 3663 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3664 spin_lock_irqsave(&phba->hbalock, iflag); 3665 break; 3666 3667 case LPFC_UNSOL_IOCB: 3668 spin_unlock_irqrestore(&phba->hbalock, iflag); 3669 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3670 spin_lock_irqsave(&phba->hbalock, iflag); 3671 if (!rc) 3672 free_saveq = 0; 3673 break; 3674 3675 case LPFC_ABORT_IOCB: 3676 cmdiocbp = NULL; 3677 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) { 3678 spin_unlock_irqrestore(&phba->hbalock, iflag); 3679 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3680 saveq); 3681 spin_lock_irqsave(&phba->hbalock, iflag); 3682 } 3683 if (cmdiocbp) { 3684 /* Call the specified completion routine */ 3685 if (cmdiocbp->iocb_cmpl) { 3686 spin_unlock_irqrestore(&phba->hbalock, 3687 iflag); 3688 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3689 saveq); 3690 spin_lock_irqsave(&phba->hbalock, 3691 iflag); 3692 } else 3693 __lpfc_sli_release_iocbq(phba, 3694 cmdiocbp); 3695 } 3696 break; 3697 3698 case LPFC_UNKNOWN_IOCB: 3699 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3700 char adaptermsg[LPFC_MAX_ADPTMSG]; 3701 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3702 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3703 MAX_MSG_DATA); 3704 dev_warn(&((phba->pcidev)->dev), 3705 "lpfc%d: %s\n", 3706 phba->brd_no, adaptermsg); 3707 } else { 3708 /* Unknown IOCB command */ 3709 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3710 "0335 Unknown IOCB " 3711 "command Data: x%x " 3712 "x%x x%x x%x\n", 3713 irsp->ulpCommand, 3714 irsp->ulpStatus, 3715 irsp->ulpIoTag, 3716 irsp->ulpContext); 3717 } 3718 break; 3719 } 3720 3721 if (free_saveq) { 3722 list_for_each_entry_safe(rspiocbp, next_iocb, 3723 &saveq->list, list) { 3724 list_del_init(&rspiocbp->list); 3725 __lpfc_sli_release_iocbq(phba, rspiocbp); 3726 } 3727 __lpfc_sli_release_iocbq(phba, saveq); 3728 } 3729 rspiocbp = NULL; 3730 } 3731 spin_unlock_irqrestore(&phba->hbalock, iflag); 3732 return rspiocbp; 3733 } 3734 3735 /** 3736 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3737 * @phba: Pointer to HBA context object. 3738 * @pring: Pointer to driver SLI ring object. 3739 * @mask: Host attention register mask for this ring. 3740 * 3741 * This routine wraps the actual slow_ring event process routine from the 3742 * API jump table function pointer from the lpfc_hba struct. 3743 **/ 3744 void 3745 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3746 struct lpfc_sli_ring *pring, uint32_t mask) 3747 { 3748 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3749 } 3750 3751 /** 3752 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3753 * @phba: Pointer to HBA context object. 3754 * @pring: Pointer to driver SLI ring object. 3755 * @mask: Host attention register mask for this ring. 3756 * 3757 * This function is called from the worker thread when there is a ring event 3758 * for non-fcp rings. The caller does not hold any lock. The function will 3759 * remove each response iocb in the response ring and calls the handle 3760 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3761 **/ 3762 static void 3763 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3764 struct lpfc_sli_ring *pring, uint32_t mask) 3765 { 3766 struct lpfc_pgp *pgp; 3767 IOCB_t *entry; 3768 IOCB_t *irsp = NULL; 3769 struct lpfc_iocbq *rspiocbp = NULL; 3770 uint32_t portRspPut, portRspMax; 3771 unsigned long iflag; 3772 uint32_t status; 3773 3774 pgp = &phba->port_gp[pring->ringno]; 3775 spin_lock_irqsave(&phba->hbalock, iflag); 3776 pring->stats.iocb_event++; 3777 3778 /* 3779 * The next available response entry should never exceed the maximum 3780 * entries. If it does, treat it as an adapter hardware error. 3781 */ 3782 portRspMax = pring->sli.sli3.numRiocb; 3783 portRspPut = le32_to_cpu(pgp->rspPutInx); 3784 if (portRspPut >= portRspMax) { 3785 /* 3786 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3787 * rsp ring <portRspMax> 3788 */ 3789 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3790 "0303 Ring %d handler: portRspPut %d " 3791 "is bigger than rsp ring %d\n", 3792 pring->ringno, portRspPut, portRspMax); 3793 3794 phba->link_state = LPFC_HBA_ERROR; 3795 spin_unlock_irqrestore(&phba->hbalock, iflag); 3796 3797 phba->work_hs = HS_FFER3; 3798 lpfc_handle_eratt(phba); 3799 3800 return; 3801 } 3802 3803 rmb(); 3804 while (pring->sli.sli3.rspidx != portRspPut) { 3805 /* 3806 * Build a completion list and call the appropriate handler. 3807 * The process is to get the next available response iocb, get 3808 * a free iocb from the list, copy the response data into the 3809 * free iocb, insert to the continuation list, and update the 3810 * next response index to slim. This process makes response 3811 * iocb's in the ring available to DMA as fast as possible but 3812 * pays a penalty for a copy operation. Since the iocb is 3813 * only 32 bytes, this penalty is considered small relative to 3814 * the PCI reads for register values and a slim write. When 3815 * the ulpLe field is set, the entire Command has been 3816 * received. 3817 */ 3818 entry = lpfc_resp_iocb(phba, pring); 3819 3820 phba->last_completion_time = jiffies; 3821 rspiocbp = __lpfc_sli_get_iocbq(phba); 3822 if (rspiocbp == NULL) { 3823 printk(KERN_ERR "%s: out of buffers! Failing " 3824 "completion.\n", __func__); 3825 break; 3826 } 3827 3828 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3829 phba->iocb_rsp_size); 3830 irsp = &rspiocbp->iocb; 3831 3832 if (++pring->sli.sli3.rspidx >= portRspMax) 3833 pring->sli.sli3.rspidx = 0; 3834 3835 if (pring->ringno == LPFC_ELS_RING) { 3836 lpfc_debugfs_slow_ring_trc(phba, 3837 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3838 *(((uint32_t *) irsp) + 4), 3839 *(((uint32_t *) irsp) + 6), 3840 *(((uint32_t *) irsp) + 7)); 3841 } 3842 3843 writel(pring->sli.sli3.rspidx, 3844 &phba->host_gp[pring->ringno].rspGetInx); 3845 3846 spin_unlock_irqrestore(&phba->hbalock, iflag); 3847 /* Handle the response IOCB */ 3848 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3849 spin_lock_irqsave(&phba->hbalock, iflag); 3850 3851 /* 3852 * If the port response put pointer has not been updated, sync 3853 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3854 * response put pointer. 3855 */ 3856 if (pring->sli.sli3.rspidx == portRspPut) { 3857 portRspPut = le32_to_cpu(pgp->rspPutInx); 3858 } 3859 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 3860 3861 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3862 /* At least one response entry has been freed */ 3863 pring->stats.iocb_rsp_full++; 3864 /* SET RxRE_RSP in Chip Att register */ 3865 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3866 writel(status, phba->CAregaddr); 3867 readl(phba->CAregaddr); /* flush */ 3868 } 3869 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3870 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3871 pring->stats.iocb_cmd_empty++; 3872 3873 /* Force update of the local copy of cmdGetInx */ 3874 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3875 lpfc_sli_resume_iocb(phba, pring); 3876 3877 if ((pring->lpfc_sli_cmd_available)) 3878 (pring->lpfc_sli_cmd_available) (phba, pring); 3879 3880 } 3881 3882 spin_unlock_irqrestore(&phba->hbalock, iflag); 3883 return; 3884 } 3885 3886 /** 3887 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3888 * @phba: Pointer to HBA context object. 3889 * @pring: Pointer to driver SLI ring object. 3890 * @mask: Host attention register mask for this ring. 3891 * 3892 * This function is called from the worker thread when there is a pending 3893 * ELS response iocb on the driver internal slow-path response iocb worker 3894 * queue. The caller does not hold any lock. The function will remove each 3895 * response iocb from the response worker queue and calls the handle 3896 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3897 **/ 3898 static void 3899 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3900 struct lpfc_sli_ring *pring, uint32_t mask) 3901 { 3902 struct lpfc_iocbq *irspiocbq; 3903 struct hbq_dmabuf *dmabuf; 3904 struct lpfc_cq_event *cq_event; 3905 unsigned long iflag; 3906 int count = 0; 3907 3908 spin_lock_irqsave(&phba->hbalock, iflag); 3909 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3910 spin_unlock_irqrestore(&phba->hbalock, iflag); 3911 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3912 /* Get the response iocb from the head of work queue */ 3913 spin_lock_irqsave(&phba->hbalock, iflag); 3914 list_remove_head(&phba->sli4_hba.sp_queue_event, 3915 cq_event, struct lpfc_cq_event, list); 3916 spin_unlock_irqrestore(&phba->hbalock, iflag); 3917 3918 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3919 case CQE_CODE_COMPL_WQE: 3920 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3921 cq_event); 3922 /* Translate ELS WCQE to response IOCBQ */ 3923 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3924 irspiocbq); 3925 if (irspiocbq) 3926 lpfc_sli_sp_handle_rspiocb(phba, pring, 3927 irspiocbq); 3928 count++; 3929 break; 3930 case CQE_CODE_RECEIVE: 3931 case CQE_CODE_RECEIVE_V1: 3932 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3933 cq_event); 3934 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3935 count++; 3936 break; 3937 default: 3938 break; 3939 } 3940 3941 /* Limit the number of events to 64 to avoid soft lockups */ 3942 if (count == 64) 3943 break; 3944 } 3945 } 3946 3947 /** 3948 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3949 * @phba: Pointer to HBA context object. 3950 * @pring: Pointer to driver SLI ring object. 3951 * 3952 * This function aborts all iocbs in the given ring and frees all the iocb 3953 * objects in txq. This function issues an abort iocb for all the iocb commands 3954 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3955 * the return of this function. The caller is not required to hold any locks. 3956 **/ 3957 void 3958 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3959 { 3960 LIST_HEAD(completions); 3961 struct lpfc_iocbq *iocb, *next_iocb; 3962 3963 if (pring->ringno == LPFC_ELS_RING) { 3964 lpfc_fabric_abort_hba(phba); 3965 } 3966 3967 /* Error everything on txq and txcmplq 3968 * First do the txq. 3969 */ 3970 if (phba->sli_rev >= LPFC_SLI_REV4) { 3971 spin_lock_irq(&pring->ring_lock); 3972 list_splice_init(&pring->txq, &completions); 3973 pring->txq_cnt = 0; 3974 spin_unlock_irq(&pring->ring_lock); 3975 3976 spin_lock_irq(&phba->hbalock); 3977 /* Next issue ABTS for everything on the txcmplq */ 3978 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3979 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3980 spin_unlock_irq(&phba->hbalock); 3981 } else { 3982 spin_lock_irq(&phba->hbalock); 3983 list_splice_init(&pring->txq, &completions); 3984 pring->txq_cnt = 0; 3985 3986 /* Next issue ABTS for everything on the txcmplq */ 3987 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3988 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3989 spin_unlock_irq(&phba->hbalock); 3990 } 3991 3992 /* Cancel all the IOCBs from the completions list */ 3993 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3994 IOERR_SLI_ABORTED); 3995 } 3996 3997 /** 3998 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 3999 * @phba: Pointer to HBA context object. 4000 * @pring: Pointer to driver SLI ring object. 4001 * 4002 * This function aborts all iocbs in FCP rings and frees all the iocb 4003 * objects in txq. This function issues an abort iocb for all the iocb commands 4004 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 4005 * the return of this function. The caller is not required to hold any locks. 4006 **/ 4007 void 4008 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 4009 { 4010 struct lpfc_sli *psli = &phba->sli; 4011 struct lpfc_sli_ring *pring; 4012 uint32_t i; 4013 4014 /* Look on all the FCP Rings for the iotag */ 4015 if (phba->sli_rev >= LPFC_SLI_REV4) { 4016 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4017 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 4018 lpfc_sli_abort_iocb_ring(phba, pring); 4019 } 4020 } else { 4021 pring = &psli->sli3_ring[LPFC_FCP_RING]; 4022 lpfc_sli_abort_iocb_ring(phba, pring); 4023 } 4024 } 4025 4026 /** 4027 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring 4028 * @phba: Pointer to HBA context object. 4029 * 4030 * This function flushes all iocbs in the IO ring and frees all the iocb 4031 * objects in txq and txcmplq. This function will not issue abort iocbs 4032 * for all the iocb commands in txcmplq, they will just be returned with 4033 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 4034 * slot has been permanently disabled. 4035 **/ 4036 void 4037 lpfc_sli_flush_io_rings(struct lpfc_hba *phba) 4038 { 4039 LIST_HEAD(txq); 4040 LIST_HEAD(txcmplq); 4041 struct lpfc_sli *psli = &phba->sli; 4042 struct lpfc_sli_ring *pring; 4043 uint32_t i; 4044 struct lpfc_iocbq *piocb, *next_iocb; 4045 4046 spin_lock_irq(&phba->hbalock); 4047 /* Indicate the I/O queues are flushed */ 4048 phba->hba_flag |= HBA_IOQ_FLUSH; 4049 spin_unlock_irq(&phba->hbalock); 4050 4051 /* Look on all the FCP Rings for the iotag */ 4052 if (phba->sli_rev >= LPFC_SLI_REV4) { 4053 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4054 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 4055 4056 spin_lock_irq(&pring->ring_lock); 4057 /* Retrieve everything on txq */ 4058 list_splice_init(&pring->txq, &txq); 4059 list_for_each_entry_safe(piocb, next_iocb, 4060 &pring->txcmplq, list) 4061 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4062 /* Retrieve everything on the txcmplq */ 4063 list_splice_init(&pring->txcmplq, &txcmplq); 4064 pring->txq_cnt = 0; 4065 pring->txcmplq_cnt = 0; 4066 spin_unlock_irq(&pring->ring_lock); 4067 4068 /* Flush the txq */ 4069 lpfc_sli_cancel_iocbs(phba, &txq, 4070 IOSTAT_LOCAL_REJECT, 4071 IOERR_SLI_DOWN); 4072 /* Flush the txcmpq */ 4073 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4074 IOSTAT_LOCAL_REJECT, 4075 IOERR_SLI_DOWN); 4076 } 4077 } else { 4078 pring = &psli->sli3_ring[LPFC_FCP_RING]; 4079 4080 spin_lock_irq(&phba->hbalock); 4081 /* Retrieve everything on txq */ 4082 list_splice_init(&pring->txq, &txq); 4083 list_for_each_entry_safe(piocb, next_iocb, 4084 &pring->txcmplq, list) 4085 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4086 /* Retrieve everything on the txcmplq */ 4087 list_splice_init(&pring->txcmplq, &txcmplq); 4088 pring->txq_cnt = 0; 4089 pring->txcmplq_cnt = 0; 4090 spin_unlock_irq(&phba->hbalock); 4091 4092 /* Flush the txq */ 4093 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 4094 IOERR_SLI_DOWN); 4095 /* Flush the txcmpq */ 4096 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 4097 IOERR_SLI_DOWN); 4098 } 4099 } 4100 4101 /** 4102 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 4103 * @phba: Pointer to HBA context object. 4104 * @mask: Bit mask to be checked. 4105 * 4106 * This function reads the host status register and compares 4107 * with the provided bit mask to check if HBA completed 4108 * the restart. This function will wait in a loop for the 4109 * HBA to complete restart. If the HBA does not restart within 4110 * 15 iterations, the function will reset the HBA again. The 4111 * function returns 1 when HBA fail to restart otherwise returns 4112 * zero. 4113 **/ 4114 static int 4115 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 4116 { 4117 uint32_t status; 4118 int i = 0; 4119 int retval = 0; 4120 4121 /* Read the HBA Host Status Register */ 4122 if (lpfc_readl(phba->HSregaddr, &status)) 4123 return 1; 4124 4125 /* 4126 * Check status register every 100ms for 5 retries, then every 4127 * 500ms for 5, then every 2.5 sec for 5, then reset board and 4128 * every 2.5 sec for 4. 4129 * Break our of the loop if errors occurred during init. 4130 */ 4131 while (((status & mask) != mask) && 4132 !(status & HS_FFERM) && 4133 i++ < 20) { 4134 4135 if (i <= 5) 4136 msleep(10); 4137 else if (i <= 10) 4138 msleep(500); 4139 else 4140 msleep(2500); 4141 4142 if (i == 15) { 4143 /* Do post */ 4144 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4145 lpfc_sli_brdrestart(phba); 4146 } 4147 /* Read the HBA Host Status Register */ 4148 if (lpfc_readl(phba->HSregaddr, &status)) { 4149 retval = 1; 4150 break; 4151 } 4152 } 4153 4154 /* Check to see if any errors occurred during init */ 4155 if ((status & HS_FFERM) || (i >= 20)) { 4156 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4157 "2751 Adapter failed to restart, " 4158 "status reg x%x, FW Data: A8 x%x AC x%x\n", 4159 status, 4160 readl(phba->MBslimaddr + 0xa8), 4161 readl(phba->MBslimaddr + 0xac)); 4162 phba->link_state = LPFC_HBA_ERROR; 4163 retval = 1; 4164 } 4165 4166 return retval; 4167 } 4168 4169 /** 4170 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 4171 * @phba: Pointer to HBA context object. 4172 * @mask: Bit mask to be checked. 4173 * 4174 * This function checks the host status register to check if HBA is 4175 * ready. This function will wait in a loop for the HBA to be ready 4176 * If the HBA is not ready , the function will will reset the HBA PCI 4177 * function again. The function returns 1 when HBA fail to be ready 4178 * otherwise returns zero. 4179 **/ 4180 static int 4181 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 4182 { 4183 uint32_t status; 4184 int retval = 0; 4185 4186 /* Read the HBA Host Status Register */ 4187 status = lpfc_sli4_post_status_check(phba); 4188 4189 if (status) { 4190 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4191 lpfc_sli_brdrestart(phba); 4192 status = lpfc_sli4_post_status_check(phba); 4193 } 4194 4195 /* Check to see if any errors occurred during init */ 4196 if (status) { 4197 phba->link_state = LPFC_HBA_ERROR; 4198 retval = 1; 4199 } else 4200 phba->sli4_hba.intr_enable = 0; 4201 4202 return retval; 4203 } 4204 4205 /** 4206 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 4207 * @phba: Pointer to HBA context object. 4208 * @mask: Bit mask to be checked. 4209 * 4210 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 4211 * from the API jump table function pointer from the lpfc_hba struct. 4212 **/ 4213 int 4214 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 4215 { 4216 return phba->lpfc_sli_brdready(phba, mask); 4217 } 4218 4219 #define BARRIER_TEST_PATTERN (0xdeadbeef) 4220 4221 /** 4222 * lpfc_reset_barrier - Make HBA ready for HBA reset 4223 * @phba: Pointer to HBA context object. 4224 * 4225 * This function is called before resetting an HBA. This function is called 4226 * with hbalock held and requests HBA to quiesce DMAs before a reset. 4227 **/ 4228 void lpfc_reset_barrier(struct lpfc_hba *phba) 4229 { 4230 uint32_t __iomem *resp_buf; 4231 uint32_t __iomem *mbox_buf; 4232 volatile uint32_t mbox; 4233 uint32_t hc_copy, ha_copy, resp_data; 4234 int i; 4235 uint8_t hdrtype; 4236 4237 lockdep_assert_held(&phba->hbalock); 4238 4239 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 4240 if (hdrtype != 0x80 || 4241 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 4242 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 4243 return; 4244 4245 /* 4246 * Tell the other part of the chip to suspend temporarily all 4247 * its DMA activity. 4248 */ 4249 resp_buf = phba->MBslimaddr; 4250 4251 /* Disable the error attention */ 4252 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 4253 return; 4254 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 4255 readl(phba->HCregaddr); /* flush */ 4256 phba->link_flag |= LS_IGNORE_ERATT; 4257 4258 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4259 return; 4260 if (ha_copy & HA_ERATT) { 4261 /* Clear Chip error bit */ 4262 writel(HA_ERATT, phba->HAregaddr); 4263 phba->pport->stopped = 1; 4264 } 4265 4266 mbox = 0; 4267 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 4268 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 4269 4270 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 4271 mbox_buf = phba->MBslimaddr; 4272 writel(mbox, mbox_buf); 4273 4274 for (i = 0; i < 50; i++) { 4275 if (lpfc_readl((resp_buf + 1), &resp_data)) 4276 return; 4277 if (resp_data != ~(BARRIER_TEST_PATTERN)) 4278 mdelay(1); 4279 else 4280 break; 4281 } 4282 resp_data = 0; 4283 if (lpfc_readl((resp_buf + 1), &resp_data)) 4284 return; 4285 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 4286 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 4287 phba->pport->stopped) 4288 goto restore_hc; 4289 else 4290 goto clear_errat; 4291 } 4292 4293 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 4294 resp_data = 0; 4295 for (i = 0; i < 500; i++) { 4296 if (lpfc_readl(resp_buf, &resp_data)) 4297 return; 4298 if (resp_data != mbox) 4299 mdelay(1); 4300 else 4301 break; 4302 } 4303 4304 clear_errat: 4305 4306 while (++i < 500) { 4307 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4308 return; 4309 if (!(ha_copy & HA_ERATT)) 4310 mdelay(1); 4311 else 4312 break; 4313 } 4314 4315 if (readl(phba->HAregaddr) & HA_ERATT) { 4316 writel(HA_ERATT, phba->HAregaddr); 4317 phba->pport->stopped = 1; 4318 } 4319 4320 restore_hc: 4321 phba->link_flag &= ~LS_IGNORE_ERATT; 4322 writel(hc_copy, phba->HCregaddr); 4323 readl(phba->HCregaddr); /* flush */ 4324 } 4325 4326 /** 4327 * lpfc_sli_brdkill - Issue a kill_board mailbox command 4328 * @phba: Pointer to HBA context object. 4329 * 4330 * This function issues a kill_board mailbox command and waits for 4331 * the error attention interrupt. This function is called for stopping 4332 * the firmware processing. The caller is not required to hold any 4333 * locks. This function calls lpfc_hba_down_post function to free 4334 * any pending commands after the kill. The function will return 1 when it 4335 * fails to kill the board else will return 0. 4336 **/ 4337 int 4338 lpfc_sli_brdkill(struct lpfc_hba *phba) 4339 { 4340 struct lpfc_sli *psli; 4341 LPFC_MBOXQ_t *pmb; 4342 uint32_t status; 4343 uint32_t ha_copy; 4344 int retval; 4345 int i = 0; 4346 4347 psli = &phba->sli; 4348 4349 /* Kill HBA */ 4350 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4351 "0329 Kill HBA Data: x%x x%x\n", 4352 phba->pport->port_state, psli->sli_flag); 4353 4354 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4355 if (!pmb) 4356 return 1; 4357 4358 /* Disable the error attention */ 4359 spin_lock_irq(&phba->hbalock); 4360 if (lpfc_readl(phba->HCregaddr, &status)) { 4361 spin_unlock_irq(&phba->hbalock); 4362 mempool_free(pmb, phba->mbox_mem_pool); 4363 return 1; 4364 } 4365 status &= ~HC_ERINT_ENA; 4366 writel(status, phba->HCregaddr); 4367 readl(phba->HCregaddr); /* flush */ 4368 phba->link_flag |= LS_IGNORE_ERATT; 4369 spin_unlock_irq(&phba->hbalock); 4370 4371 lpfc_kill_board(phba, pmb); 4372 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4373 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4374 4375 if (retval != MBX_SUCCESS) { 4376 if (retval != MBX_BUSY) 4377 mempool_free(pmb, phba->mbox_mem_pool); 4378 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4379 "2752 KILL_BOARD command failed retval %d\n", 4380 retval); 4381 spin_lock_irq(&phba->hbalock); 4382 phba->link_flag &= ~LS_IGNORE_ERATT; 4383 spin_unlock_irq(&phba->hbalock); 4384 return 1; 4385 } 4386 4387 spin_lock_irq(&phba->hbalock); 4388 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 4389 spin_unlock_irq(&phba->hbalock); 4390 4391 mempool_free(pmb, phba->mbox_mem_pool); 4392 4393 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 4394 * attention every 100ms for 3 seconds. If we don't get ERATT after 4395 * 3 seconds we still set HBA_ERROR state because the status of the 4396 * board is now undefined. 4397 */ 4398 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4399 return 1; 4400 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 4401 mdelay(100); 4402 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4403 return 1; 4404 } 4405 4406 del_timer_sync(&psli->mbox_tmo); 4407 if (ha_copy & HA_ERATT) { 4408 writel(HA_ERATT, phba->HAregaddr); 4409 phba->pport->stopped = 1; 4410 } 4411 spin_lock_irq(&phba->hbalock); 4412 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4413 psli->mbox_active = NULL; 4414 phba->link_flag &= ~LS_IGNORE_ERATT; 4415 spin_unlock_irq(&phba->hbalock); 4416 4417 lpfc_hba_down_post(phba); 4418 phba->link_state = LPFC_HBA_ERROR; 4419 4420 return ha_copy & HA_ERATT ? 0 : 1; 4421 } 4422 4423 /** 4424 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 4425 * @phba: Pointer to HBA context object. 4426 * 4427 * This function resets the HBA by writing HC_INITFF to the control 4428 * register. After the HBA resets, this function resets all the iocb ring 4429 * indices. This function disables PCI layer parity checking during 4430 * the reset. 4431 * This function returns 0 always. 4432 * The caller is not required to hold any locks. 4433 **/ 4434 int 4435 lpfc_sli_brdreset(struct lpfc_hba *phba) 4436 { 4437 struct lpfc_sli *psli; 4438 struct lpfc_sli_ring *pring; 4439 uint16_t cfg_value; 4440 int i; 4441 4442 psli = &phba->sli; 4443 4444 /* Reset HBA */ 4445 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4446 "0325 Reset HBA Data: x%x x%x\n", 4447 (phba->pport) ? phba->pport->port_state : 0, 4448 psli->sli_flag); 4449 4450 /* perform board reset */ 4451 phba->fc_eventTag = 0; 4452 phba->link_events = 0; 4453 if (phba->pport) { 4454 phba->pport->fc_myDID = 0; 4455 phba->pport->fc_prevDID = 0; 4456 } 4457 4458 /* Turn off parity checking and serr during the physical reset */ 4459 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) 4460 return -EIO; 4461 4462 pci_write_config_word(phba->pcidev, PCI_COMMAND, 4463 (cfg_value & 4464 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4465 4466 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 4467 4468 /* Now toggle INITFF bit in the Host Control Register */ 4469 writel(HC_INITFF, phba->HCregaddr); 4470 mdelay(1); 4471 readl(phba->HCregaddr); /* flush */ 4472 writel(0, phba->HCregaddr); 4473 readl(phba->HCregaddr); /* flush */ 4474 4475 /* Restore PCI cmd register */ 4476 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4477 4478 /* Initialize relevant SLI info */ 4479 for (i = 0; i < psli->num_rings; i++) { 4480 pring = &psli->sli3_ring[i]; 4481 pring->flag = 0; 4482 pring->sli.sli3.rspidx = 0; 4483 pring->sli.sli3.next_cmdidx = 0; 4484 pring->sli.sli3.local_getidx = 0; 4485 pring->sli.sli3.cmdidx = 0; 4486 pring->missbufcnt = 0; 4487 } 4488 4489 phba->link_state = LPFC_WARM_START; 4490 return 0; 4491 } 4492 4493 /** 4494 * lpfc_sli4_brdreset - Reset a sli-4 HBA 4495 * @phba: Pointer to HBA context object. 4496 * 4497 * This function resets a SLI4 HBA. This function disables PCI layer parity 4498 * checking during resets the device. The caller is not required to hold 4499 * any locks. 4500 * 4501 * This function returns 0 on success else returns negative error code. 4502 **/ 4503 int 4504 lpfc_sli4_brdreset(struct lpfc_hba *phba) 4505 { 4506 struct lpfc_sli *psli = &phba->sli; 4507 uint16_t cfg_value; 4508 int rc = 0; 4509 4510 /* Reset HBA */ 4511 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4512 "0295 Reset HBA Data: x%x x%x x%x\n", 4513 phba->pport->port_state, psli->sli_flag, 4514 phba->hba_flag); 4515 4516 /* perform board reset */ 4517 phba->fc_eventTag = 0; 4518 phba->link_events = 0; 4519 phba->pport->fc_myDID = 0; 4520 phba->pport->fc_prevDID = 0; 4521 4522 spin_lock_irq(&phba->hbalock); 4523 psli->sli_flag &= ~(LPFC_PROCESS_LA); 4524 phba->fcf.fcf_flag = 0; 4525 spin_unlock_irq(&phba->hbalock); 4526 4527 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ 4528 if (phba->hba_flag & HBA_FW_DUMP_OP) { 4529 phba->hba_flag &= ~HBA_FW_DUMP_OP; 4530 return rc; 4531 } 4532 4533 /* Now physically reset the device */ 4534 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4535 "0389 Performing PCI function reset!\n"); 4536 4537 /* Turn off parity checking and serr during the physical reset */ 4538 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) { 4539 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4540 "3205 PCI read Config failed\n"); 4541 return -EIO; 4542 } 4543 4544 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 4545 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4546 4547 /* Perform FCoE PCI function reset before freeing queue memory */ 4548 rc = lpfc_pci_function_reset(phba); 4549 4550 /* Restore PCI cmd register */ 4551 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4552 4553 return rc; 4554 } 4555 4556 /** 4557 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 4558 * @phba: Pointer to HBA context object. 4559 * 4560 * This function is called in the SLI initialization code path to 4561 * restart the HBA. The caller is not required to hold any lock. 4562 * This function writes MBX_RESTART mailbox command to the SLIM and 4563 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 4564 * function to free any pending commands. The function enables 4565 * POST only during the first initialization. The function returns zero. 4566 * The function does not guarantee completion of MBX_RESTART mailbox 4567 * command before the return of this function. 4568 **/ 4569 static int 4570 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 4571 { 4572 MAILBOX_t *mb; 4573 struct lpfc_sli *psli; 4574 volatile uint32_t word0; 4575 void __iomem *to_slim; 4576 uint32_t hba_aer_enabled; 4577 4578 spin_lock_irq(&phba->hbalock); 4579 4580 /* Take PCIe device Advanced Error Reporting (AER) state */ 4581 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4582 4583 psli = &phba->sli; 4584 4585 /* Restart HBA */ 4586 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4587 "0337 Restart HBA Data: x%x x%x\n", 4588 (phba->pport) ? phba->pport->port_state : 0, 4589 psli->sli_flag); 4590 4591 word0 = 0; 4592 mb = (MAILBOX_t *) &word0; 4593 mb->mbxCommand = MBX_RESTART; 4594 mb->mbxHc = 1; 4595 4596 lpfc_reset_barrier(phba); 4597 4598 to_slim = phba->MBslimaddr; 4599 writel(*(uint32_t *) mb, to_slim); 4600 readl(to_slim); /* flush */ 4601 4602 /* Only skip post after fc_ffinit is completed */ 4603 if (phba->pport && phba->pport->port_state) 4604 word0 = 1; /* This is really setting up word1 */ 4605 else 4606 word0 = 0; /* This is really setting up word1 */ 4607 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4608 writel(*(uint32_t *) mb, to_slim); 4609 readl(to_slim); /* flush */ 4610 4611 lpfc_sli_brdreset(phba); 4612 if (phba->pport) 4613 phba->pport->stopped = 0; 4614 phba->link_state = LPFC_INIT_START; 4615 phba->hba_flag = 0; 4616 spin_unlock_irq(&phba->hbalock); 4617 4618 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4619 psli->stats_start = ktime_get_seconds(); 4620 4621 /* Give the INITFF and Post time to settle. */ 4622 mdelay(100); 4623 4624 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4625 if (hba_aer_enabled) 4626 pci_disable_pcie_error_reporting(phba->pcidev); 4627 4628 lpfc_hba_down_post(phba); 4629 4630 return 0; 4631 } 4632 4633 /** 4634 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4635 * @phba: Pointer to HBA context object. 4636 * 4637 * This function is called in the SLI initialization code path to restart 4638 * a SLI4 HBA. The caller is not required to hold any lock. 4639 * At the end of the function, it calls lpfc_hba_down_post function to 4640 * free any pending commands. 4641 **/ 4642 static int 4643 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4644 { 4645 struct lpfc_sli *psli = &phba->sli; 4646 uint32_t hba_aer_enabled; 4647 int rc; 4648 4649 /* Restart HBA */ 4650 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4651 "0296 Restart HBA Data: x%x x%x\n", 4652 phba->pport->port_state, psli->sli_flag); 4653 4654 /* Take PCIe device Advanced Error Reporting (AER) state */ 4655 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4656 4657 rc = lpfc_sli4_brdreset(phba); 4658 if (rc) { 4659 phba->link_state = LPFC_HBA_ERROR; 4660 goto hba_down_queue; 4661 } 4662 4663 spin_lock_irq(&phba->hbalock); 4664 phba->pport->stopped = 0; 4665 phba->link_state = LPFC_INIT_START; 4666 phba->hba_flag = 0; 4667 spin_unlock_irq(&phba->hbalock); 4668 4669 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4670 psli->stats_start = ktime_get_seconds(); 4671 4672 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4673 if (hba_aer_enabled) 4674 pci_disable_pcie_error_reporting(phba->pcidev); 4675 4676 hba_down_queue: 4677 lpfc_hba_down_post(phba); 4678 lpfc_sli4_queue_destroy(phba); 4679 4680 return rc; 4681 } 4682 4683 /** 4684 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4685 * @phba: Pointer to HBA context object. 4686 * 4687 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4688 * API jump table function pointer from the lpfc_hba struct. 4689 **/ 4690 int 4691 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4692 { 4693 return phba->lpfc_sli_brdrestart(phba); 4694 } 4695 4696 /** 4697 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4698 * @phba: Pointer to HBA context object. 4699 * 4700 * This function is called after a HBA restart to wait for successful 4701 * restart of the HBA. Successful restart of the HBA is indicated by 4702 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4703 * iteration, the function will restart the HBA again. The function returns 4704 * zero if HBA successfully restarted else returns negative error code. 4705 **/ 4706 int 4707 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4708 { 4709 uint32_t status, i = 0; 4710 4711 /* Read the HBA Host Status Register */ 4712 if (lpfc_readl(phba->HSregaddr, &status)) 4713 return -EIO; 4714 4715 /* Check status register to see what current state is */ 4716 i = 0; 4717 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4718 4719 /* Check every 10ms for 10 retries, then every 100ms for 90 4720 * retries, then every 1 sec for 50 retires for a total of 4721 * ~60 seconds before reset the board again and check every 4722 * 1 sec for 50 retries. The up to 60 seconds before the 4723 * board ready is required by the Falcon FIPS zeroization 4724 * complete, and any reset the board in between shall cause 4725 * restart of zeroization, further delay the board ready. 4726 */ 4727 if (i++ >= 200) { 4728 /* Adapter failed to init, timeout, status reg 4729 <status> */ 4730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4731 "0436 Adapter failed to init, " 4732 "timeout, status reg x%x, " 4733 "FW Data: A8 x%x AC x%x\n", status, 4734 readl(phba->MBslimaddr + 0xa8), 4735 readl(phba->MBslimaddr + 0xac)); 4736 phba->link_state = LPFC_HBA_ERROR; 4737 return -ETIMEDOUT; 4738 } 4739 4740 /* Check to see if any errors occurred during init */ 4741 if (status & HS_FFERM) { 4742 /* ERROR: During chipset initialization */ 4743 /* Adapter failed to init, chipset, status reg 4744 <status> */ 4745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4746 "0437 Adapter failed to init, " 4747 "chipset, status reg x%x, " 4748 "FW Data: A8 x%x AC x%x\n", status, 4749 readl(phba->MBslimaddr + 0xa8), 4750 readl(phba->MBslimaddr + 0xac)); 4751 phba->link_state = LPFC_HBA_ERROR; 4752 return -EIO; 4753 } 4754 4755 if (i <= 10) 4756 msleep(10); 4757 else if (i <= 100) 4758 msleep(100); 4759 else 4760 msleep(1000); 4761 4762 if (i == 150) { 4763 /* Do post */ 4764 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4765 lpfc_sli_brdrestart(phba); 4766 } 4767 /* Read the HBA Host Status Register */ 4768 if (lpfc_readl(phba->HSregaddr, &status)) 4769 return -EIO; 4770 } 4771 4772 /* Check to see if any errors occurred during init */ 4773 if (status & HS_FFERM) { 4774 /* ERROR: During chipset initialization */ 4775 /* Adapter failed to init, chipset, status reg <status> */ 4776 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4777 "0438 Adapter failed to init, chipset, " 4778 "status reg x%x, " 4779 "FW Data: A8 x%x AC x%x\n", status, 4780 readl(phba->MBslimaddr + 0xa8), 4781 readl(phba->MBslimaddr + 0xac)); 4782 phba->link_state = LPFC_HBA_ERROR; 4783 return -EIO; 4784 } 4785 4786 /* Clear all interrupt enable conditions */ 4787 writel(0, phba->HCregaddr); 4788 readl(phba->HCregaddr); /* flush */ 4789 4790 /* setup host attn register */ 4791 writel(0xffffffff, phba->HAregaddr); 4792 readl(phba->HAregaddr); /* flush */ 4793 return 0; 4794 } 4795 4796 /** 4797 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4798 * 4799 * This function calculates and returns the number of HBQs required to be 4800 * configured. 4801 **/ 4802 int 4803 lpfc_sli_hbq_count(void) 4804 { 4805 return ARRAY_SIZE(lpfc_hbq_defs); 4806 } 4807 4808 /** 4809 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4810 * 4811 * This function adds the number of hbq entries in every HBQ to get 4812 * the total number of hbq entries required for the HBA and returns 4813 * the total count. 4814 **/ 4815 static int 4816 lpfc_sli_hbq_entry_count(void) 4817 { 4818 int hbq_count = lpfc_sli_hbq_count(); 4819 int count = 0; 4820 int i; 4821 4822 for (i = 0; i < hbq_count; ++i) 4823 count += lpfc_hbq_defs[i]->entry_count; 4824 return count; 4825 } 4826 4827 /** 4828 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4829 * 4830 * This function calculates amount of memory required for all hbq entries 4831 * to be configured and returns the total memory required. 4832 **/ 4833 int 4834 lpfc_sli_hbq_size(void) 4835 { 4836 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4837 } 4838 4839 /** 4840 * lpfc_sli_hbq_setup - configure and initialize HBQs 4841 * @phba: Pointer to HBA context object. 4842 * 4843 * This function is called during the SLI initialization to configure 4844 * all the HBQs and post buffers to the HBQ. The caller is not 4845 * required to hold any locks. This function will return zero if successful 4846 * else it will return negative error code. 4847 **/ 4848 static int 4849 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4850 { 4851 int hbq_count = lpfc_sli_hbq_count(); 4852 LPFC_MBOXQ_t *pmb; 4853 MAILBOX_t *pmbox; 4854 uint32_t hbqno; 4855 uint32_t hbq_entry_index; 4856 4857 /* Get a Mailbox buffer to setup mailbox 4858 * commands for HBA initialization 4859 */ 4860 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4861 4862 if (!pmb) 4863 return -ENOMEM; 4864 4865 pmbox = &pmb->u.mb; 4866 4867 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4868 phba->link_state = LPFC_INIT_MBX_CMDS; 4869 phba->hbq_in_use = 1; 4870 4871 hbq_entry_index = 0; 4872 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4873 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4874 phba->hbqs[hbqno].hbqPutIdx = 0; 4875 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4876 phba->hbqs[hbqno].entry_count = 4877 lpfc_hbq_defs[hbqno]->entry_count; 4878 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4879 hbq_entry_index, pmb); 4880 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4881 4882 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4883 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4884 mbxStatus <status>, ring <num> */ 4885 4886 lpfc_printf_log(phba, KERN_ERR, 4887 LOG_SLI | LOG_VPORT, 4888 "1805 Adapter failed to init. " 4889 "Data: x%x x%x x%x\n", 4890 pmbox->mbxCommand, 4891 pmbox->mbxStatus, hbqno); 4892 4893 phba->link_state = LPFC_HBA_ERROR; 4894 mempool_free(pmb, phba->mbox_mem_pool); 4895 return -ENXIO; 4896 } 4897 } 4898 phba->hbq_count = hbq_count; 4899 4900 mempool_free(pmb, phba->mbox_mem_pool); 4901 4902 /* Initially populate or replenish the HBQs */ 4903 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4904 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4905 return 0; 4906 } 4907 4908 /** 4909 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4910 * @phba: Pointer to HBA context object. 4911 * 4912 * This function is called during the SLI initialization to configure 4913 * all the HBQs and post buffers to the HBQ. The caller is not 4914 * required to hold any locks. This function will return zero if successful 4915 * else it will return negative error code. 4916 **/ 4917 static int 4918 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4919 { 4920 phba->hbq_in_use = 1; 4921 /** 4922 * Specific case when the MDS diagnostics is enabled and supported. 4923 * The receive buffer count is truncated to manage the incoming 4924 * traffic. 4925 **/ 4926 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) 4927 phba->hbqs[LPFC_ELS_HBQ].entry_count = 4928 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1; 4929 else 4930 phba->hbqs[LPFC_ELS_HBQ].entry_count = 4931 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; 4932 phba->hbq_count = 1; 4933 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); 4934 /* Initially populate or replenish the HBQs */ 4935 return 0; 4936 } 4937 4938 /** 4939 * lpfc_sli_config_port - Issue config port mailbox command 4940 * @phba: Pointer to HBA context object. 4941 * @sli_mode: sli mode - 2/3 4942 * 4943 * This function is called by the sli initialization code path 4944 * to issue config_port mailbox command. This function restarts the 4945 * HBA firmware and issues a config_port mailbox command to configure 4946 * the SLI interface in the sli mode specified by sli_mode 4947 * variable. The caller is not required to hold any locks. 4948 * The function returns 0 if successful, else returns negative error 4949 * code. 4950 **/ 4951 int 4952 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4953 { 4954 LPFC_MBOXQ_t *pmb; 4955 uint32_t resetcount = 0, rc = 0, done = 0; 4956 4957 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4958 if (!pmb) { 4959 phba->link_state = LPFC_HBA_ERROR; 4960 return -ENOMEM; 4961 } 4962 4963 phba->sli_rev = sli_mode; 4964 while (resetcount < 2 && !done) { 4965 spin_lock_irq(&phba->hbalock); 4966 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4967 spin_unlock_irq(&phba->hbalock); 4968 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4969 lpfc_sli_brdrestart(phba); 4970 rc = lpfc_sli_chipset_init(phba); 4971 if (rc) 4972 break; 4973 4974 spin_lock_irq(&phba->hbalock); 4975 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4976 spin_unlock_irq(&phba->hbalock); 4977 resetcount++; 4978 4979 /* Call pre CONFIG_PORT mailbox command initialization. A 4980 * value of 0 means the call was successful. Any other 4981 * nonzero value is a failure, but if ERESTART is returned, 4982 * the driver may reset the HBA and try again. 4983 */ 4984 rc = lpfc_config_port_prep(phba); 4985 if (rc == -ERESTART) { 4986 phba->link_state = LPFC_LINK_UNKNOWN; 4987 continue; 4988 } else if (rc) 4989 break; 4990 4991 phba->link_state = LPFC_INIT_MBX_CMDS; 4992 lpfc_config_port(phba, pmb); 4993 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4994 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4995 LPFC_SLI3_HBQ_ENABLED | 4996 LPFC_SLI3_CRP_ENABLED | 4997 LPFC_SLI3_DSS_ENABLED); 4998 if (rc != MBX_SUCCESS) { 4999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5000 "0442 Adapter failed to init, mbxCmd x%x " 5001 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 5002 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 5003 spin_lock_irq(&phba->hbalock); 5004 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 5005 spin_unlock_irq(&phba->hbalock); 5006 rc = -ENXIO; 5007 } else { 5008 /* Allow asynchronous mailbox command to go through */ 5009 spin_lock_irq(&phba->hbalock); 5010 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 5011 spin_unlock_irq(&phba->hbalock); 5012 done = 1; 5013 5014 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 5015 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 5016 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 5017 "3110 Port did not grant ASABT\n"); 5018 } 5019 } 5020 if (!done) { 5021 rc = -EINVAL; 5022 goto do_prep_failed; 5023 } 5024 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 5025 if (!pmb->u.mb.un.varCfgPort.cMA) { 5026 rc = -ENXIO; 5027 goto do_prep_failed; 5028 } 5029 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 5030 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 5031 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 5032 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 5033 phba->max_vpi : phba->max_vports; 5034 5035 } else 5036 phba->max_vpi = 0; 5037 phba->fips_level = 0; 5038 phba->fips_spec_rev = 0; 5039 if (pmb->u.mb.un.varCfgPort.gdss) { 5040 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 5041 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 5042 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 5043 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5044 "2850 Security Crypto Active. FIPS x%d " 5045 "(Spec Rev: x%d)", 5046 phba->fips_level, phba->fips_spec_rev); 5047 } 5048 if (pmb->u.mb.un.varCfgPort.sec_err) { 5049 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5050 "2856 Config Port Security Crypto " 5051 "Error: x%x ", 5052 pmb->u.mb.un.varCfgPort.sec_err); 5053 } 5054 if (pmb->u.mb.un.varCfgPort.gerbm) 5055 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 5056 if (pmb->u.mb.un.varCfgPort.gcrp) 5057 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 5058 5059 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 5060 phba->port_gp = phba->mbox->us.s3_pgp.port; 5061 5062 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 5063 if (pmb->u.mb.un.varCfgPort.gbg == 0) { 5064 phba->cfg_enable_bg = 0; 5065 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 5066 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5067 "0443 Adapter did not grant " 5068 "BlockGuard\n"); 5069 } 5070 } 5071 } else { 5072 phba->hbq_get = NULL; 5073 phba->port_gp = phba->mbox->us.s2.port; 5074 phba->max_vpi = 0; 5075 } 5076 do_prep_failed: 5077 mempool_free(pmb, phba->mbox_mem_pool); 5078 return rc; 5079 } 5080 5081 5082 /** 5083 * lpfc_sli_hba_setup - SLI initialization function 5084 * @phba: Pointer to HBA context object. 5085 * 5086 * This function is the main SLI initialization function. This function 5087 * is called by the HBA initialization code, HBA reset code and HBA 5088 * error attention handler code. Caller is not required to hold any 5089 * locks. This function issues config_port mailbox command to configure 5090 * the SLI, setup iocb rings and HBQ rings. In the end the function 5091 * calls the config_port_post function to issue init_link mailbox 5092 * command and to start the discovery. The function will return zero 5093 * if successful, else it will return negative error code. 5094 **/ 5095 int 5096 lpfc_sli_hba_setup(struct lpfc_hba *phba) 5097 { 5098 uint32_t rc; 5099 int mode = 3, i; 5100 int longs; 5101 5102 switch (phba->cfg_sli_mode) { 5103 case 2: 5104 if (phba->cfg_enable_npiv) { 5105 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5106 "1824 NPIV enabled: Override sli_mode " 5107 "parameter (%d) to auto (0).\n", 5108 phba->cfg_sli_mode); 5109 break; 5110 } 5111 mode = 2; 5112 break; 5113 case 0: 5114 case 3: 5115 break; 5116 default: 5117 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5118 "1819 Unrecognized sli_mode parameter: %d.\n", 5119 phba->cfg_sli_mode); 5120 5121 break; 5122 } 5123 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 5124 5125 rc = lpfc_sli_config_port(phba, mode); 5126 5127 if (rc && phba->cfg_sli_mode == 3) 5128 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5129 "1820 Unable to select SLI-3. " 5130 "Not supported by adapter.\n"); 5131 if (rc && mode != 2) 5132 rc = lpfc_sli_config_port(phba, 2); 5133 else if (rc && mode == 2) 5134 rc = lpfc_sli_config_port(phba, 3); 5135 if (rc) 5136 goto lpfc_sli_hba_setup_error; 5137 5138 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 5139 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 5140 rc = pci_enable_pcie_error_reporting(phba->pcidev); 5141 if (!rc) { 5142 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5143 "2709 This device supports " 5144 "Advanced Error Reporting (AER)\n"); 5145 spin_lock_irq(&phba->hbalock); 5146 phba->hba_flag |= HBA_AER_ENABLED; 5147 spin_unlock_irq(&phba->hbalock); 5148 } else { 5149 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5150 "2708 This device does not support " 5151 "Advanced Error Reporting (AER): %d\n", 5152 rc); 5153 phba->cfg_aer_support = 0; 5154 } 5155 } 5156 5157 if (phba->sli_rev == 3) { 5158 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 5159 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 5160 } else { 5161 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 5162 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 5163 phba->sli3_options = 0; 5164 } 5165 5166 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5167 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 5168 phba->sli_rev, phba->max_vpi); 5169 rc = lpfc_sli_ring_map(phba); 5170 5171 if (rc) 5172 goto lpfc_sli_hba_setup_error; 5173 5174 /* Initialize VPIs. */ 5175 if (phba->sli_rev == LPFC_SLI_REV3) { 5176 /* 5177 * The VPI bitmask and physical ID array are allocated 5178 * and initialized once only - at driver load. A port 5179 * reset doesn't need to reinitialize this memory. 5180 */ 5181 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 5182 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 5183 phba->vpi_bmask = kcalloc(longs, 5184 sizeof(unsigned long), 5185 GFP_KERNEL); 5186 if (!phba->vpi_bmask) { 5187 rc = -ENOMEM; 5188 goto lpfc_sli_hba_setup_error; 5189 } 5190 5191 phba->vpi_ids = kcalloc(phba->max_vpi + 1, 5192 sizeof(uint16_t), 5193 GFP_KERNEL); 5194 if (!phba->vpi_ids) { 5195 kfree(phba->vpi_bmask); 5196 rc = -ENOMEM; 5197 goto lpfc_sli_hba_setup_error; 5198 } 5199 for (i = 0; i < phba->max_vpi; i++) 5200 phba->vpi_ids[i] = i; 5201 } 5202 } 5203 5204 /* Init HBQs */ 5205 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 5206 rc = lpfc_sli_hbq_setup(phba); 5207 if (rc) 5208 goto lpfc_sli_hba_setup_error; 5209 } 5210 spin_lock_irq(&phba->hbalock); 5211 phba->sli.sli_flag |= LPFC_PROCESS_LA; 5212 spin_unlock_irq(&phba->hbalock); 5213 5214 rc = lpfc_config_port_post(phba); 5215 if (rc) 5216 goto lpfc_sli_hba_setup_error; 5217 5218 return rc; 5219 5220 lpfc_sli_hba_setup_error: 5221 phba->link_state = LPFC_HBA_ERROR; 5222 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5223 "0445 Firmware initialization failed\n"); 5224 return rc; 5225 } 5226 5227 /** 5228 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 5229 * @phba: Pointer to HBA context object. 5230 * @mboxq: mailbox pointer. 5231 * This function issue a dump mailbox command to read config region 5232 * 23 and parse the records in the region and populate driver 5233 * data structure. 5234 **/ 5235 static int 5236 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 5237 { 5238 LPFC_MBOXQ_t *mboxq; 5239 struct lpfc_dmabuf *mp; 5240 struct lpfc_mqe *mqe; 5241 uint32_t data_length; 5242 int rc; 5243 5244 /* Program the default value of vlan_id and fc_map */ 5245 phba->valid_vlan = 0; 5246 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5247 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5248 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5249 5250 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5251 if (!mboxq) 5252 return -ENOMEM; 5253 5254 mqe = &mboxq->u.mqe; 5255 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 5256 rc = -ENOMEM; 5257 goto out_free_mboxq; 5258 } 5259 5260 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 5261 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5262 5263 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5264 "(%d):2571 Mailbox cmd x%x Status x%x " 5265 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5266 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5267 "CQ: x%x x%x x%x x%x\n", 5268 mboxq->vport ? mboxq->vport->vpi : 0, 5269 bf_get(lpfc_mqe_command, mqe), 5270 bf_get(lpfc_mqe_status, mqe), 5271 mqe->un.mb_words[0], mqe->un.mb_words[1], 5272 mqe->un.mb_words[2], mqe->un.mb_words[3], 5273 mqe->un.mb_words[4], mqe->un.mb_words[5], 5274 mqe->un.mb_words[6], mqe->un.mb_words[7], 5275 mqe->un.mb_words[8], mqe->un.mb_words[9], 5276 mqe->un.mb_words[10], mqe->un.mb_words[11], 5277 mqe->un.mb_words[12], mqe->un.mb_words[13], 5278 mqe->un.mb_words[14], mqe->un.mb_words[15], 5279 mqe->un.mb_words[16], mqe->un.mb_words[50], 5280 mboxq->mcqe.word0, 5281 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5282 mboxq->mcqe.trailer); 5283 5284 if (rc) { 5285 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5286 kfree(mp); 5287 rc = -EIO; 5288 goto out_free_mboxq; 5289 } 5290 data_length = mqe->un.mb_words[5]; 5291 if (data_length > DMP_RGN23_SIZE) { 5292 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5293 kfree(mp); 5294 rc = -EIO; 5295 goto out_free_mboxq; 5296 } 5297 5298 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 5299 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5300 kfree(mp); 5301 rc = 0; 5302 5303 out_free_mboxq: 5304 mempool_free(mboxq, phba->mbox_mem_pool); 5305 return rc; 5306 } 5307 5308 /** 5309 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 5310 * @phba: pointer to lpfc hba data structure. 5311 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 5312 * @vpd: pointer to the memory to hold resulting port vpd data. 5313 * @vpd_size: On input, the number of bytes allocated to @vpd. 5314 * On output, the number of data bytes in @vpd. 5315 * 5316 * This routine executes a READ_REV SLI4 mailbox command. In 5317 * addition, this routine gets the port vpd data. 5318 * 5319 * Return codes 5320 * 0 - successful 5321 * -ENOMEM - could not allocated memory. 5322 **/ 5323 static int 5324 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5325 uint8_t *vpd, uint32_t *vpd_size) 5326 { 5327 int rc = 0; 5328 uint32_t dma_size; 5329 struct lpfc_dmabuf *dmabuf; 5330 struct lpfc_mqe *mqe; 5331 5332 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5333 if (!dmabuf) 5334 return -ENOMEM; 5335 5336 /* 5337 * Get a DMA buffer for the vpd data resulting from the READ_REV 5338 * mailbox command. 5339 */ 5340 dma_size = *vpd_size; 5341 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, 5342 &dmabuf->phys, GFP_KERNEL); 5343 if (!dmabuf->virt) { 5344 kfree(dmabuf); 5345 return -ENOMEM; 5346 } 5347 5348 /* 5349 * The SLI4 implementation of READ_REV conflicts at word1, 5350 * bits 31:16 and SLI4 adds vpd functionality not present 5351 * in SLI3. This code corrects the conflicts. 5352 */ 5353 lpfc_read_rev(phba, mboxq); 5354 mqe = &mboxq->u.mqe; 5355 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 5356 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 5357 mqe->un.read_rev.word1 &= 0x0000FFFF; 5358 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 5359 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 5360 5361 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5362 if (rc) { 5363 dma_free_coherent(&phba->pcidev->dev, dma_size, 5364 dmabuf->virt, dmabuf->phys); 5365 kfree(dmabuf); 5366 return -EIO; 5367 } 5368 5369 /* 5370 * The available vpd length cannot be bigger than the 5371 * DMA buffer passed to the port. Catch the less than 5372 * case and update the caller's size. 5373 */ 5374 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 5375 *vpd_size = mqe->un.read_rev.avail_vpd_len; 5376 5377 memcpy(vpd, dmabuf->virt, *vpd_size); 5378 5379 dma_free_coherent(&phba->pcidev->dev, dma_size, 5380 dmabuf->virt, dmabuf->phys); 5381 kfree(dmabuf); 5382 return 0; 5383 } 5384 5385 /** 5386 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes 5387 * @phba: pointer to lpfc hba data structure. 5388 * 5389 * This routine retrieves SLI4 device physical port name this PCI function 5390 * is attached to. 5391 * 5392 * Return codes 5393 * 0 - successful 5394 * otherwise - failed to retrieve controller attributes 5395 **/ 5396 static int 5397 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) 5398 { 5399 LPFC_MBOXQ_t *mboxq; 5400 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 5401 struct lpfc_controller_attribute *cntl_attr; 5402 void *virtaddr = NULL; 5403 uint32_t alloclen, reqlen; 5404 uint32_t shdr_status, shdr_add_status; 5405 union lpfc_sli4_cfg_shdr *shdr; 5406 int rc; 5407 5408 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5409 if (!mboxq) 5410 return -ENOMEM; 5411 5412 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */ 5413 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 5414 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5415 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 5416 LPFC_SLI4_MBX_NEMBED); 5417 5418 if (alloclen < reqlen) { 5419 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5420 "3084 Allocated DMA memory size (%d) is " 5421 "less than the requested DMA memory size " 5422 "(%d)\n", alloclen, reqlen); 5423 rc = -ENOMEM; 5424 goto out_free_mboxq; 5425 } 5426 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5427 virtaddr = mboxq->sge_array->addr[0]; 5428 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 5429 shdr = &mbx_cntl_attr->cfg_shdr; 5430 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5431 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5432 if (shdr_status || shdr_add_status || rc) { 5433 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5434 "3085 Mailbox x%x (x%x/x%x) failed, " 5435 "rc:x%x, status:x%x, add_status:x%x\n", 5436 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5437 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5438 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5439 rc, shdr_status, shdr_add_status); 5440 rc = -ENXIO; 5441 goto out_free_mboxq; 5442 } 5443 5444 cntl_attr = &mbx_cntl_attr->cntl_attr; 5445 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 5446 phba->sli4_hba.lnk_info.lnk_tp = 5447 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 5448 phba->sli4_hba.lnk_info.lnk_no = 5449 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 5450 5451 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); 5452 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, 5453 sizeof(phba->BIOSVersion)); 5454 5455 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5456 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n", 5457 phba->sli4_hba.lnk_info.lnk_tp, 5458 phba->sli4_hba.lnk_info.lnk_no, 5459 phba->BIOSVersion); 5460 out_free_mboxq: 5461 if (rc != MBX_TIMEOUT) { 5462 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5463 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5464 else 5465 mempool_free(mboxq, phba->mbox_mem_pool); 5466 } 5467 return rc; 5468 } 5469 5470 /** 5471 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 5472 * @phba: pointer to lpfc hba data structure. 5473 * 5474 * This routine retrieves SLI4 device physical port name this PCI function 5475 * is attached to. 5476 * 5477 * Return codes 5478 * 0 - successful 5479 * otherwise - failed to retrieve physical port name 5480 **/ 5481 static int 5482 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 5483 { 5484 LPFC_MBOXQ_t *mboxq; 5485 struct lpfc_mbx_get_port_name *get_port_name; 5486 uint32_t shdr_status, shdr_add_status; 5487 union lpfc_sli4_cfg_shdr *shdr; 5488 char cport_name = 0; 5489 int rc; 5490 5491 /* We assume nothing at this point */ 5492 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5493 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 5494 5495 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5496 if (!mboxq) 5497 return -ENOMEM; 5498 /* obtain link type and link number via READ_CONFIG */ 5499 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5500 lpfc_sli4_read_config(phba); 5501 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 5502 goto retrieve_ppname; 5503 5504 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 5505 rc = lpfc_sli4_get_ctl_attr(phba); 5506 if (rc) 5507 goto out_free_mboxq; 5508 5509 retrieve_ppname: 5510 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5511 LPFC_MBOX_OPCODE_GET_PORT_NAME, 5512 sizeof(struct lpfc_mbx_get_port_name) - 5513 sizeof(struct lpfc_sli4_cfg_mhdr), 5514 LPFC_SLI4_MBX_EMBED); 5515 get_port_name = &mboxq->u.mqe.un.get_port_name; 5516 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 5517 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 5518 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 5519 phba->sli4_hba.lnk_info.lnk_tp); 5520 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5521 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5522 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5523 if (shdr_status || shdr_add_status || rc) { 5524 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5525 "3087 Mailbox x%x (x%x/x%x) failed: " 5526 "rc:x%x, status:x%x, add_status:x%x\n", 5527 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5528 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5529 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5530 rc, shdr_status, shdr_add_status); 5531 rc = -ENXIO; 5532 goto out_free_mboxq; 5533 } 5534 switch (phba->sli4_hba.lnk_info.lnk_no) { 5535 case LPFC_LINK_NUMBER_0: 5536 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 5537 &get_port_name->u.response); 5538 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5539 break; 5540 case LPFC_LINK_NUMBER_1: 5541 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 5542 &get_port_name->u.response); 5543 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5544 break; 5545 case LPFC_LINK_NUMBER_2: 5546 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 5547 &get_port_name->u.response); 5548 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5549 break; 5550 case LPFC_LINK_NUMBER_3: 5551 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 5552 &get_port_name->u.response); 5553 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5554 break; 5555 default: 5556 break; 5557 } 5558 5559 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 5560 phba->Port[0] = cport_name; 5561 phba->Port[1] = '\0'; 5562 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5563 "3091 SLI get port name: %s\n", phba->Port); 5564 } 5565 5566 out_free_mboxq: 5567 if (rc != MBX_TIMEOUT) { 5568 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5569 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5570 else 5571 mempool_free(mboxq, phba->mbox_mem_pool); 5572 } 5573 return rc; 5574 } 5575 5576 /** 5577 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 5578 * @phba: pointer to lpfc hba data structure. 5579 * 5580 * This routine is called to explicitly arm the SLI4 device's completion and 5581 * event queues 5582 **/ 5583 static void 5584 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 5585 { 5586 int qidx; 5587 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 5588 struct lpfc_sli4_hdw_queue *qp; 5589 struct lpfc_queue *eq; 5590 5591 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM); 5592 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); 5593 if (sli4_hba->nvmels_cq) 5594 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, 5595 LPFC_QUEUE_REARM); 5596 5597 if (sli4_hba->hdwq) { 5598 /* Loop thru all Hardware Queues */ 5599 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 5600 qp = &sli4_hba->hdwq[qidx]; 5601 /* ARM the corresponding CQ */ 5602 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0, 5603 LPFC_QUEUE_REARM); 5604 } 5605 5606 /* Loop thru all IRQ vectors */ 5607 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 5608 eq = sli4_hba->hba_eq_hdl[qidx].eq; 5609 /* ARM the corresponding EQ */ 5610 sli4_hba->sli4_write_eq_db(phba, eq, 5611 0, LPFC_QUEUE_REARM); 5612 } 5613 } 5614 5615 if (phba->nvmet_support) { 5616 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { 5617 sli4_hba->sli4_write_cq_db(phba, 5618 sli4_hba->nvmet_cqset[qidx], 0, 5619 LPFC_QUEUE_REARM); 5620 } 5621 } 5622 } 5623 5624 /** 5625 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 5626 * @phba: Pointer to HBA context object. 5627 * @type: The resource extent type. 5628 * @extnt_count: buffer to hold port available extent count. 5629 * @extnt_size: buffer to hold element count per extent. 5630 * 5631 * This function calls the port and retrievs the number of available 5632 * extents and their size for a particular extent type. 5633 * 5634 * Returns: 0 if successful. Nonzero otherwise. 5635 **/ 5636 int 5637 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 5638 uint16_t *extnt_count, uint16_t *extnt_size) 5639 { 5640 int rc = 0; 5641 uint32_t length; 5642 uint32_t mbox_tmo; 5643 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 5644 LPFC_MBOXQ_t *mbox; 5645 5646 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5647 if (!mbox) 5648 return -ENOMEM; 5649 5650 /* Find out how many extents are available for this resource type */ 5651 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 5652 sizeof(struct lpfc_sli4_cfg_mhdr)); 5653 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5654 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 5655 length, LPFC_SLI4_MBX_EMBED); 5656 5657 /* Send an extents count of 0 - the GET doesn't use it. */ 5658 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5659 LPFC_SLI4_MBX_EMBED); 5660 if (unlikely(rc)) { 5661 rc = -EIO; 5662 goto err_exit; 5663 } 5664 5665 if (!phba->sli4_hba.intr_enable) 5666 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5667 else { 5668 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5669 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5670 } 5671 if (unlikely(rc)) { 5672 rc = -EIO; 5673 goto err_exit; 5674 } 5675 5676 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5677 if (bf_get(lpfc_mbox_hdr_status, 5678 &rsrc_info->header.cfg_shdr.response)) { 5679 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5680 "2930 Failed to get resource extents " 5681 "Status 0x%x Add'l Status 0x%x\n", 5682 bf_get(lpfc_mbox_hdr_status, 5683 &rsrc_info->header.cfg_shdr.response), 5684 bf_get(lpfc_mbox_hdr_add_status, 5685 &rsrc_info->header.cfg_shdr.response)); 5686 rc = -EIO; 5687 goto err_exit; 5688 } 5689 5690 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 5691 &rsrc_info->u.rsp); 5692 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 5693 &rsrc_info->u.rsp); 5694 5695 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5696 "3162 Retrieved extents type-%d from port: count:%d, " 5697 "size:%d\n", type, *extnt_count, *extnt_size); 5698 5699 err_exit: 5700 mempool_free(mbox, phba->mbox_mem_pool); 5701 return rc; 5702 } 5703 5704 /** 5705 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 5706 * @phba: Pointer to HBA context object. 5707 * @type: The extent type to check. 5708 * 5709 * This function reads the current available extents from the port and checks 5710 * if the extent count or extent size has changed since the last access. 5711 * Callers use this routine post port reset to understand if there is a 5712 * extent reprovisioning requirement. 5713 * 5714 * Returns: 5715 * -Error: error indicates problem. 5716 * 1: Extent count or size has changed. 5717 * 0: No changes. 5718 **/ 5719 static int 5720 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5721 { 5722 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5723 uint16_t size_diff, rsrc_ext_size; 5724 int rc = 0; 5725 struct lpfc_rsrc_blks *rsrc_entry; 5726 struct list_head *rsrc_blk_list = NULL; 5727 5728 size_diff = 0; 5729 curr_ext_cnt = 0; 5730 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5731 &rsrc_ext_cnt, 5732 &rsrc_ext_size); 5733 if (unlikely(rc)) 5734 return -EIO; 5735 5736 switch (type) { 5737 case LPFC_RSC_TYPE_FCOE_RPI: 5738 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5739 break; 5740 case LPFC_RSC_TYPE_FCOE_VPI: 5741 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5742 break; 5743 case LPFC_RSC_TYPE_FCOE_XRI: 5744 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5745 break; 5746 case LPFC_RSC_TYPE_FCOE_VFI: 5747 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5748 break; 5749 default: 5750 break; 5751 } 5752 5753 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5754 curr_ext_cnt++; 5755 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5756 size_diff++; 5757 } 5758 5759 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5760 rc = 1; 5761 5762 return rc; 5763 } 5764 5765 /** 5766 * lpfc_sli4_cfg_post_extnts - 5767 * @phba: Pointer to HBA context object. 5768 * @extnt_cnt - number of available extents. 5769 * @type - the extent type (rpi, xri, vfi, vpi). 5770 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5771 * @mbox - pointer to the caller's allocated mailbox structure. 5772 * 5773 * This function executes the extents allocation request. It also 5774 * takes care of the amount of memory needed to allocate or get the 5775 * allocated extents. It is the caller's responsibility to evaluate 5776 * the response. 5777 * 5778 * Returns: 5779 * -Error: Error value describes the condition found. 5780 * 0: if successful 5781 **/ 5782 static int 5783 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5784 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5785 { 5786 int rc = 0; 5787 uint32_t req_len; 5788 uint32_t emb_len; 5789 uint32_t alloc_len, mbox_tmo; 5790 5791 /* Calculate the total requested length of the dma memory */ 5792 req_len = extnt_cnt * sizeof(uint16_t); 5793 5794 /* 5795 * Calculate the size of an embedded mailbox. The uint32_t 5796 * accounts for extents-specific word. 5797 */ 5798 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5799 sizeof(uint32_t); 5800 5801 /* 5802 * Presume the allocation and response will fit into an embedded 5803 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5804 */ 5805 *emb = LPFC_SLI4_MBX_EMBED; 5806 if (req_len > emb_len) { 5807 req_len = extnt_cnt * sizeof(uint16_t) + 5808 sizeof(union lpfc_sli4_cfg_shdr) + 5809 sizeof(uint32_t); 5810 *emb = LPFC_SLI4_MBX_NEMBED; 5811 } 5812 5813 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5814 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5815 req_len, *emb); 5816 if (alloc_len < req_len) { 5817 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5818 "2982 Allocated DMA memory size (x%x) is " 5819 "less than the requested DMA memory " 5820 "size (x%x)\n", alloc_len, req_len); 5821 return -ENOMEM; 5822 } 5823 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5824 if (unlikely(rc)) 5825 return -EIO; 5826 5827 if (!phba->sli4_hba.intr_enable) 5828 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5829 else { 5830 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5831 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5832 } 5833 5834 if (unlikely(rc)) 5835 rc = -EIO; 5836 return rc; 5837 } 5838 5839 /** 5840 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5841 * @phba: Pointer to HBA context object. 5842 * @type: The resource extent type to allocate. 5843 * 5844 * This function allocates the number of elements for the specified 5845 * resource type. 5846 **/ 5847 static int 5848 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5849 { 5850 bool emb = false; 5851 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5852 uint16_t rsrc_id, rsrc_start, j, k; 5853 uint16_t *ids; 5854 int i, rc; 5855 unsigned long longs; 5856 unsigned long *bmask; 5857 struct lpfc_rsrc_blks *rsrc_blks; 5858 LPFC_MBOXQ_t *mbox; 5859 uint32_t length; 5860 struct lpfc_id_range *id_array = NULL; 5861 void *virtaddr = NULL; 5862 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5863 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5864 struct list_head *ext_blk_list; 5865 5866 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5867 &rsrc_cnt, 5868 &rsrc_size); 5869 if (unlikely(rc)) 5870 return -EIO; 5871 5872 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5873 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5874 "3009 No available Resource Extents " 5875 "for resource type 0x%x: Count: 0x%x, " 5876 "Size 0x%x\n", type, rsrc_cnt, 5877 rsrc_size); 5878 return -ENOMEM; 5879 } 5880 5881 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5882 "2903 Post resource extents type-0x%x: " 5883 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5884 5885 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5886 if (!mbox) 5887 return -ENOMEM; 5888 5889 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5890 if (unlikely(rc)) { 5891 rc = -EIO; 5892 goto err_exit; 5893 } 5894 5895 /* 5896 * Figure out where the response is located. Then get local pointers 5897 * to the response data. The port does not guarantee to respond to 5898 * all extents counts request so update the local variable with the 5899 * allocated count from the port. 5900 */ 5901 if (emb == LPFC_SLI4_MBX_EMBED) { 5902 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5903 id_array = &rsrc_ext->u.rsp.id[0]; 5904 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5905 } else { 5906 virtaddr = mbox->sge_array->addr[0]; 5907 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5908 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5909 id_array = &n_rsrc->id; 5910 } 5911 5912 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5913 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5914 5915 /* 5916 * Based on the resource size and count, correct the base and max 5917 * resource values. 5918 */ 5919 length = sizeof(struct lpfc_rsrc_blks); 5920 switch (type) { 5921 case LPFC_RSC_TYPE_FCOE_RPI: 5922 phba->sli4_hba.rpi_bmask = kcalloc(longs, 5923 sizeof(unsigned long), 5924 GFP_KERNEL); 5925 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5926 rc = -ENOMEM; 5927 goto err_exit; 5928 } 5929 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, 5930 sizeof(uint16_t), 5931 GFP_KERNEL); 5932 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5933 kfree(phba->sli4_hba.rpi_bmask); 5934 rc = -ENOMEM; 5935 goto err_exit; 5936 } 5937 5938 /* 5939 * The next_rpi was initialized with the maximum available 5940 * count but the port may allocate a smaller number. Catch 5941 * that case and update the next_rpi. 5942 */ 5943 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5944 5945 /* Initialize local ptrs for common extent processing later. */ 5946 bmask = phba->sli4_hba.rpi_bmask; 5947 ids = phba->sli4_hba.rpi_ids; 5948 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5949 break; 5950 case LPFC_RSC_TYPE_FCOE_VPI: 5951 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 5952 GFP_KERNEL); 5953 if (unlikely(!phba->vpi_bmask)) { 5954 rc = -ENOMEM; 5955 goto err_exit; 5956 } 5957 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), 5958 GFP_KERNEL); 5959 if (unlikely(!phba->vpi_ids)) { 5960 kfree(phba->vpi_bmask); 5961 rc = -ENOMEM; 5962 goto err_exit; 5963 } 5964 5965 /* Initialize local ptrs for common extent processing later. */ 5966 bmask = phba->vpi_bmask; 5967 ids = phba->vpi_ids; 5968 ext_blk_list = &phba->lpfc_vpi_blk_list; 5969 break; 5970 case LPFC_RSC_TYPE_FCOE_XRI: 5971 phba->sli4_hba.xri_bmask = kcalloc(longs, 5972 sizeof(unsigned long), 5973 GFP_KERNEL); 5974 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5975 rc = -ENOMEM; 5976 goto err_exit; 5977 } 5978 phba->sli4_hba.max_cfg_param.xri_used = 0; 5979 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, 5980 sizeof(uint16_t), 5981 GFP_KERNEL); 5982 if (unlikely(!phba->sli4_hba.xri_ids)) { 5983 kfree(phba->sli4_hba.xri_bmask); 5984 rc = -ENOMEM; 5985 goto err_exit; 5986 } 5987 5988 /* Initialize local ptrs for common extent processing later. */ 5989 bmask = phba->sli4_hba.xri_bmask; 5990 ids = phba->sli4_hba.xri_ids; 5991 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5992 break; 5993 case LPFC_RSC_TYPE_FCOE_VFI: 5994 phba->sli4_hba.vfi_bmask = kcalloc(longs, 5995 sizeof(unsigned long), 5996 GFP_KERNEL); 5997 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5998 rc = -ENOMEM; 5999 goto err_exit; 6000 } 6001 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, 6002 sizeof(uint16_t), 6003 GFP_KERNEL); 6004 if (unlikely(!phba->sli4_hba.vfi_ids)) { 6005 kfree(phba->sli4_hba.vfi_bmask); 6006 rc = -ENOMEM; 6007 goto err_exit; 6008 } 6009 6010 /* Initialize local ptrs for common extent processing later. */ 6011 bmask = phba->sli4_hba.vfi_bmask; 6012 ids = phba->sli4_hba.vfi_ids; 6013 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 6014 break; 6015 default: 6016 /* Unsupported Opcode. Fail call. */ 6017 id_array = NULL; 6018 bmask = NULL; 6019 ids = NULL; 6020 ext_blk_list = NULL; 6021 goto err_exit; 6022 } 6023 6024 /* 6025 * Complete initializing the extent configuration with the 6026 * allocated ids assigned to this function. The bitmask serves 6027 * as an index into the array and manages the available ids. The 6028 * array just stores the ids communicated to the port via the wqes. 6029 */ 6030 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 6031 if ((i % 2) == 0) 6032 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 6033 &id_array[k]); 6034 else 6035 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 6036 &id_array[k]); 6037 6038 rsrc_blks = kzalloc(length, GFP_KERNEL); 6039 if (unlikely(!rsrc_blks)) { 6040 rc = -ENOMEM; 6041 kfree(bmask); 6042 kfree(ids); 6043 goto err_exit; 6044 } 6045 rsrc_blks->rsrc_start = rsrc_id; 6046 rsrc_blks->rsrc_size = rsrc_size; 6047 list_add_tail(&rsrc_blks->list, ext_blk_list); 6048 rsrc_start = rsrc_id; 6049 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { 6050 phba->sli4_hba.io_xri_start = rsrc_start + 6051 lpfc_sli4_get_iocb_cnt(phba); 6052 } 6053 6054 while (rsrc_id < (rsrc_start + rsrc_size)) { 6055 ids[j] = rsrc_id; 6056 rsrc_id++; 6057 j++; 6058 } 6059 /* Entire word processed. Get next word.*/ 6060 if ((i % 2) == 1) 6061 k++; 6062 } 6063 err_exit: 6064 lpfc_sli4_mbox_cmd_free(phba, mbox); 6065 return rc; 6066 } 6067 6068 6069 6070 /** 6071 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 6072 * @phba: Pointer to HBA context object. 6073 * @type: the extent's type. 6074 * 6075 * This function deallocates all extents of a particular resource type. 6076 * SLI4 does not allow for deallocating a particular extent range. It 6077 * is the caller's responsibility to release all kernel memory resources. 6078 **/ 6079 static int 6080 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 6081 { 6082 int rc; 6083 uint32_t length, mbox_tmo = 0; 6084 LPFC_MBOXQ_t *mbox; 6085 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 6086 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 6087 6088 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6089 if (!mbox) 6090 return -ENOMEM; 6091 6092 /* 6093 * This function sends an embedded mailbox because it only sends the 6094 * the resource type. All extents of this type are released by the 6095 * port. 6096 */ 6097 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 6098 sizeof(struct lpfc_sli4_cfg_mhdr)); 6099 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6100 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 6101 length, LPFC_SLI4_MBX_EMBED); 6102 6103 /* Send an extents count of 0 - the dealloc doesn't use it. */ 6104 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 6105 LPFC_SLI4_MBX_EMBED); 6106 if (unlikely(rc)) { 6107 rc = -EIO; 6108 goto out_free_mbox; 6109 } 6110 if (!phba->sli4_hba.intr_enable) 6111 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6112 else { 6113 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6114 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6115 } 6116 if (unlikely(rc)) { 6117 rc = -EIO; 6118 goto out_free_mbox; 6119 } 6120 6121 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 6122 if (bf_get(lpfc_mbox_hdr_status, 6123 &dealloc_rsrc->header.cfg_shdr.response)) { 6124 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6125 "2919 Failed to release resource extents " 6126 "for type %d - Status 0x%x Add'l Status 0x%x. " 6127 "Resource memory not released.\n", 6128 type, 6129 bf_get(lpfc_mbox_hdr_status, 6130 &dealloc_rsrc->header.cfg_shdr.response), 6131 bf_get(lpfc_mbox_hdr_add_status, 6132 &dealloc_rsrc->header.cfg_shdr.response)); 6133 rc = -EIO; 6134 goto out_free_mbox; 6135 } 6136 6137 /* Release kernel memory resources for the specific type. */ 6138 switch (type) { 6139 case LPFC_RSC_TYPE_FCOE_VPI: 6140 kfree(phba->vpi_bmask); 6141 kfree(phba->vpi_ids); 6142 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6143 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6144 &phba->lpfc_vpi_blk_list, list) { 6145 list_del_init(&rsrc_blk->list); 6146 kfree(rsrc_blk); 6147 } 6148 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6149 break; 6150 case LPFC_RSC_TYPE_FCOE_XRI: 6151 kfree(phba->sli4_hba.xri_bmask); 6152 kfree(phba->sli4_hba.xri_ids); 6153 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6154 &phba->sli4_hba.lpfc_xri_blk_list, list) { 6155 list_del_init(&rsrc_blk->list); 6156 kfree(rsrc_blk); 6157 } 6158 break; 6159 case LPFC_RSC_TYPE_FCOE_VFI: 6160 kfree(phba->sli4_hba.vfi_bmask); 6161 kfree(phba->sli4_hba.vfi_ids); 6162 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6163 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6164 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 6165 list_del_init(&rsrc_blk->list); 6166 kfree(rsrc_blk); 6167 } 6168 break; 6169 case LPFC_RSC_TYPE_FCOE_RPI: 6170 /* RPI bitmask and physical id array are cleaned up earlier. */ 6171 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6172 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 6173 list_del_init(&rsrc_blk->list); 6174 kfree(rsrc_blk); 6175 } 6176 break; 6177 default: 6178 break; 6179 } 6180 6181 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6182 6183 out_free_mbox: 6184 mempool_free(mbox, phba->mbox_mem_pool); 6185 return rc; 6186 } 6187 6188 static void 6189 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, 6190 uint32_t feature) 6191 { 6192 uint32_t len; 6193 6194 len = sizeof(struct lpfc_mbx_set_feature) - 6195 sizeof(struct lpfc_sli4_cfg_mhdr); 6196 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6197 LPFC_MBOX_OPCODE_SET_FEATURES, len, 6198 LPFC_SLI4_MBX_EMBED); 6199 6200 switch (feature) { 6201 case LPFC_SET_UE_RECOVERY: 6202 bf_set(lpfc_mbx_set_feature_UER, 6203 &mbox->u.mqe.un.set_feature, 1); 6204 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; 6205 mbox->u.mqe.un.set_feature.param_len = 8; 6206 break; 6207 case LPFC_SET_MDS_DIAGS: 6208 bf_set(lpfc_mbx_set_feature_mds, 6209 &mbox->u.mqe.un.set_feature, 1); 6210 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 6211 &mbox->u.mqe.un.set_feature, 1); 6212 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 6213 mbox->u.mqe.un.set_feature.param_len = 8; 6214 break; 6215 case LPFC_SET_DUAL_DUMP: 6216 bf_set(lpfc_mbx_set_feature_dd, 6217 &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP); 6218 bf_set(lpfc_mbx_set_feature_ddquery, 6219 &mbox->u.mqe.un.set_feature, 0); 6220 mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP; 6221 mbox->u.mqe.un.set_feature.param_len = 4; 6222 break; 6223 } 6224 6225 return; 6226 } 6227 6228 /** 6229 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter 6230 * @phba: Pointer to HBA context object. 6231 * 6232 * Disable FW logging into host memory on the adapter. To 6233 * be done before reading logs from the host memory. 6234 **/ 6235 void 6236 lpfc_ras_stop_fwlog(struct lpfc_hba *phba) 6237 { 6238 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6239 6240 spin_lock_irq(&phba->hbalock); 6241 ras_fwlog->state = INACTIVE; 6242 spin_unlock_irq(&phba->hbalock); 6243 6244 /* Disable FW logging to host memory */ 6245 writel(LPFC_CTL_PDEV_CTL_DDL_RAS, 6246 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 6247 6248 /* Wait 10ms for firmware to stop using DMA buffer */ 6249 usleep_range(10 * 1000, 20 * 1000); 6250 } 6251 6252 /** 6253 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. 6254 * @phba: Pointer to HBA context object. 6255 * 6256 * This function is called to free memory allocated for RAS FW logging 6257 * support in the driver. 6258 **/ 6259 void 6260 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) 6261 { 6262 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6263 struct lpfc_dmabuf *dmabuf, *next; 6264 6265 if (!list_empty(&ras_fwlog->fwlog_buff_list)) { 6266 list_for_each_entry_safe(dmabuf, next, 6267 &ras_fwlog->fwlog_buff_list, 6268 list) { 6269 list_del(&dmabuf->list); 6270 dma_free_coherent(&phba->pcidev->dev, 6271 LPFC_RAS_MAX_ENTRY_SIZE, 6272 dmabuf->virt, dmabuf->phys); 6273 kfree(dmabuf); 6274 } 6275 } 6276 6277 if (ras_fwlog->lwpd.virt) { 6278 dma_free_coherent(&phba->pcidev->dev, 6279 sizeof(uint32_t) * 2, 6280 ras_fwlog->lwpd.virt, 6281 ras_fwlog->lwpd.phys); 6282 ras_fwlog->lwpd.virt = NULL; 6283 } 6284 6285 spin_lock_irq(&phba->hbalock); 6286 ras_fwlog->state = INACTIVE; 6287 spin_unlock_irq(&phba->hbalock); 6288 } 6289 6290 /** 6291 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support 6292 * @phba: Pointer to HBA context object. 6293 * @fwlog_buff_count: Count of buffers to be created. 6294 * 6295 * This routine DMA memory for Log Write Position Data[LPWD] and buffer 6296 * to update FW log is posted to the adapter. 6297 * Buffer count is calculated based on module param ras_fwlog_buffsize 6298 * Size of each buffer posted to FW is 64K. 6299 **/ 6300 6301 static int 6302 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, 6303 uint32_t fwlog_buff_count) 6304 { 6305 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6306 struct lpfc_dmabuf *dmabuf; 6307 int rc = 0, i = 0; 6308 6309 /* Initialize List */ 6310 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list); 6311 6312 /* Allocate memory for the LWPD */ 6313 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev, 6314 sizeof(uint32_t) * 2, 6315 &ras_fwlog->lwpd.phys, 6316 GFP_KERNEL); 6317 if (!ras_fwlog->lwpd.virt) { 6318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6319 "6185 LWPD Memory Alloc Failed\n"); 6320 6321 return -ENOMEM; 6322 } 6323 6324 ras_fwlog->fw_buffcount = fwlog_buff_count; 6325 for (i = 0; i < ras_fwlog->fw_buffcount; i++) { 6326 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 6327 GFP_KERNEL); 6328 if (!dmabuf) { 6329 rc = -ENOMEM; 6330 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6331 "6186 Memory Alloc failed FW logging"); 6332 goto free_mem; 6333 } 6334 6335 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6336 LPFC_RAS_MAX_ENTRY_SIZE, 6337 &dmabuf->phys, GFP_KERNEL); 6338 if (!dmabuf->virt) { 6339 kfree(dmabuf); 6340 rc = -ENOMEM; 6341 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6342 "6187 DMA Alloc Failed FW logging"); 6343 goto free_mem; 6344 } 6345 dmabuf->buffer_tag = i; 6346 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list); 6347 } 6348 6349 free_mem: 6350 if (rc) 6351 lpfc_sli4_ras_dma_free(phba); 6352 6353 return rc; 6354 } 6355 6356 /** 6357 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command 6358 * @phba: pointer to lpfc hba data structure. 6359 * @pmboxq: pointer to the driver internal queue element for mailbox command. 6360 * 6361 * Completion handler for driver's RAS MBX command to the device. 6362 **/ 6363 static void 6364 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6365 { 6366 MAILBOX_t *mb; 6367 union lpfc_sli4_cfg_shdr *shdr; 6368 uint32_t shdr_status, shdr_add_status; 6369 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6370 6371 mb = &pmb->u.mb; 6372 6373 shdr = (union lpfc_sli4_cfg_shdr *) 6374 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr; 6375 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6376 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6377 6378 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { 6379 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 6380 "6188 FW LOG mailbox " 6381 "completed with status x%x add_status x%x," 6382 " mbx status x%x\n", 6383 shdr_status, shdr_add_status, mb->mbxStatus); 6384 6385 ras_fwlog->ras_hwsupport = false; 6386 goto disable_ras; 6387 } 6388 6389 spin_lock_irq(&phba->hbalock); 6390 ras_fwlog->state = ACTIVE; 6391 spin_unlock_irq(&phba->hbalock); 6392 mempool_free(pmb, phba->mbox_mem_pool); 6393 6394 return; 6395 6396 disable_ras: 6397 /* Free RAS DMA memory */ 6398 lpfc_sli4_ras_dma_free(phba); 6399 mempool_free(pmb, phba->mbox_mem_pool); 6400 } 6401 6402 /** 6403 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command 6404 * @phba: pointer to lpfc hba data structure. 6405 * @fwlog_level: Logging verbosity level. 6406 * @fwlog_enable: Enable/Disable logging. 6407 * 6408 * Initialize memory and post mailbox command to enable FW logging in host 6409 * memory. 6410 **/ 6411 int 6412 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, 6413 uint32_t fwlog_level, 6414 uint32_t fwlog_enable) 6415 { 6416 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6417 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL; 6418 struct lpfc_dmabuf *dmabuf; 6419 LPFC_MBOXQ_t *mbox; 6420 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; 6421 int rc = 0; 6422 6423 spin_lock_irq(&phba->hbalock); 6424 ras_fwlog->state = INACTIVE; 6425 spin_unlock_irq(&phba->hbalock); 6426 6427 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * 6428 phba->cfg_ras_fwlog_buffsize); 6429 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); 6430 6431 /* 6432 * If re-enabling FW logging support use earlier allocated 6433 * DMA buffers while posting MBX command. 6434 **/ 6435 if (!ras_fwlog->lwpd.virt) { 6436 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count); 6437 if (rc) { 6438 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6439 "6189 FW Log Memory Allocation Failed"); 6440 return rc; 6441 } 6442 } 6443 6444 /* Setup Mailbox command */ 6445 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6446 if (!mbox) { 6447 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6448 "6190 RAS MBX Alloc Failed"); 6449 rc = -ENOMEM; 6450 goto mem_free; 6451 } 6452 6453 ras_fwlog->fw_loglevel = fwlog_level; 6454 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) - 6455 sizeof(struct lpfc_sli4_cfg_mhdr)); 6456 6457 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL, 6458 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION, 6459 len, LPFC_SLI4_MBX_EMBED); 6460 6461 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog; 6462 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request, 6463 fwlog_enable); 6464 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request, 6465 ras_fwlog->fw_loglevel); 6466 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request, 6467 ras_fwlog->fw_buffcount); 6468 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request, 6469 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE); 6470 6471 /* Update DMA buffer address */ 6472 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) { 6473 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); 6474 6475 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo = 6476 putPaddrLow(dmabuf->phys); 6477 6478 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi = 6479 putPaddrHigh(dmabuf->phys); 6480 } 6481 6482 /* Update LPWD address */ 6483 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); 6484 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); 6485 6486 spin_lock_irq(&phba->hbalock); 6487 ras_fwlog->state = REG_INPROGRESS; 6488 spin_unlock_irq(&phba->hbalock); 6489 mbox->vport = phba->pport; 6490 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; 6491 6492 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6493 6494 if (rc == MBX_NOT_FINISHED) { 6495 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6496 "6191 FW-Log Mailbox failed. " 6497 "status %d mbxStatus : x%x", rc, 6498 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 6499 mempool_free(mbox, phba->mbox_mem_pool); 6500 rc = -EIO; 6501 goto mem_free; 6502 } else 6503 rc = 0; 6504 mem_free: 6505 if (rc) 6506 lpfc_sli4_ras_dma_free(phba); 6507 6508 return rc; 6509 } 6510 6511 /** 6512 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter 6513 * @phba: Pointer to HBA context object. 6514 * 6515 * Check if RAS is supported on the adapter and initialize it. 6516 **/ 6517 void 6518 lpfc_sli4_ras_setup(struct lpfc_hba *phba) 6519 { 6520 /* Check RAS FW Log needs to be enabled or not */ 6521 if (lpfc_check_fwlog_support(phba)) 6522 return; 6523 6524 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, 6525 LPFC_RAS_ENABLE_LOGGING); 6526 } 6527 6528 /** 6529 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 6530 * @phba: Pointer to HBA context object. 6531 * 6532 * This function allocates all SLI4 resource identifiers. 6533 **/ 6534 int 6535 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 6536 { 6537 int i, rc, error = 0; 6538 uint16_t count, base; 6539 unsigned long longs; 6540 6541 if (!phba->sli4_hba.rpi_hdrs_in_use) 6542 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 6543 if (phba->sli4_hba.extents_in_use) { 6544 /* 6545 * The port supports resource extents. The XRI, VPI, VFI, RPI 6546 * resource extent count must be read and allocated before 6547 * provisioning the resource id arrays. 6548 */ 6549 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6550 LPFC_IDX_RSRC_RDY) { 6551 /* 6552 * Extent-based resources are set - the driver could 6553 * be in a port reset. Figure out if any corrective 6554 * actions need to be taken. 6555 */ 6556 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6557 LPFC_RSC_TYPE_FCOE_VFI); 6558 if (rc != 0) 6559 error++; 6560 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6561 LPFC_RSC_TYPE_FCOE_VPI); 6562 if (rc != 0) 6563 error++; 6564 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6565 LPFC_RSC_TYPE_FCOE_XRI); 6566 if (rc != 0) 6567 error++; 6568 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6569 LPFC_RSC_TYPE_FCOE_RPI); 6570 if (rc != 0) 6571 error++; 6572 6573 /* 6574 * It's possible that the number of resources 6575 * provided to this port instance changed between 6576 * resets. Detect this condition and reallocate 6577 * resources. Otherwise, there is no action. 6578 */ 6579 if (error) { 6580 lpfc_printf_log(phba, KERN_INFO, 6581 LOG_MBOX | LOG_INIT, 6582 "2931 Detected extent resource " 6583 "change. Reallocating all " 6584 "extents.\n"); 6585 rc = lpfc_sli4_dealloc_extent(phba, 6586 LPFC_RSC_TYPE_FCOE_VFI); 6587 rc = lpfc_sli4_dealloc_extent(phba, 6588 LPFC_RSC_TYPE_FCOE_VPI); 6589 rc = lpfc_sli4_dealloc_extent(phba, 6590 LPFC_RSC_TYPE_FCOE_XRI); 6591 rc = lpfc_sli4_dealloc_extent(phba, 6592 LPFC_RSC_TYPE_FCOE_RPI); 6593 } else 6594 return 0; 6595 } 6596 6597 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6598 if (unlikely(rc)) 6599 goto err_exit; 6600 6601 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6602 if (unlikely(rc)) 6603 goto err_exit; 6604 6605 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6606 if (unlikely(rc)) 6607 goto err_exit; 6608 6609 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6610 if (unlikely(rc)) 6611 goto err_exit; 6612 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6613 LPFC_IDX_RSRC_RDY); 6614 return rc; 6615 } else { 6616 /* 6617 * The port does not support resource extents. The XRI, VPI, 6618 * VFI, RPI resource ids were determined from READ_CONFIG. 6619 * Just allocate the bitmasks and provision the resource id 6620 * arrays. If a port reset is active, the resources don't 6621 * need any action - just exit. 6622 */ 6623 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6624 LPFC_IDX_RSRC_RDY) { 6625 lpfc_sli4_dealloc_resource_identifiers(phba); 6626 lpfc_sli4_remove_rpis(phba); 6627 } 6628 /* RPIs. */ 6629 count = phba->sli4_hba.max_cfg_param.max_rpi; 6630 if (count <= 0) { 6631 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6632 "3279 Invalid provisioning of " 6633 "rpi:%d\n", count); 6634 rc = -EINVAL; 6635 goto err_exit; 6636 } 6637 base = phba->sli4_hba.max_cfg_param.rpi_base; 6638 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6639 phba->sli4_hba.rpi_bmask = kcalloc(longs, 6640 sizeof(unsigned long), 6641 GFP_KERNEL); 6642 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 6643 rc = -ENOMEM; 6644 goto err_exit; 6645 } 6646 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), 6647 GFP_KERNEL); 6648 if (unlikely(!phba->sli4_hba.rpi_ids)) { 6649 rc = -ENOMEM; 6650 goto free_rpi_bmask; 6651 } 6652 6653 for (i = 0; i < count; i++) 6654 phba->sli4_hba.rpi_ids[i] = base + i; 6655 6656 /* VPIs. */ 6657 count = phba->sli4_hba.max_cfg_param.max_vpi; 6658 if (count <= 0) { 6659 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6660 "3280 Invalid provisioning of " 6661 "vpi:%d\n", count); 6662 rc = -EINVAL; 6663 goto free_rpi_ids; 6664 } 6665 base = phba->sli4_hba.max_cfg_param.vpi_base; 6666 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6667 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 6668 GFP_KERNEL); 6669 if (unlikely(!phba->vpi_bmask)) { 6670 rc = -ENOMEM; 6671 goto free_rpi_ids; 6672 } 6673 phba->vpi_ids = kcalloc(count, sizeof(uint16_t), 6674 GFP_KERNEL); 6675 if (unlikely(!phba->vpi_ids)) { 6676 rc = -ENOMEM; 6677 goto free_vpi_bmask; 6678 } 6679 6680 for (i = 0; i < count; i++) 6681 phba->vpi_ids[i] = base + i; 6682 6683 /* XRIs. */ 6684 count = phba->sli4_hba.max_cfg_param.max_xri; 6685 if (count <= 0) { 6686 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6687 "3281 Invalid provisioning of " 6688 "xri:%d\n", count); 6689 rc = -EINVAL; 6690 goto free_vpi_ids; 6691 } 6692 base = phba->sli4_hba.max_cfg_param.xri_base; 6693 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6694 phba->sli4_hba.xri_bmask = kcalloc(longs, 6695 sizeof(unsigned long), 6696 GFP_KERNEL); 6697 if (unlikely(!phba->sli4_hba.xri_bmask)) { 6698 rc = -ENOMEM; 6699 goto free_vpi_ids; 6700 } 6701 phba->sli4_hba.max_cfg_param.xri_used = 0; 6702 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), 6703 GFP_KERNEL); 6704 if (unlikely(!phba->sli4_hba.xri_ids)) { 6705 rc = -ENOMEM; 6706 goto free_xri_bmask; 6707 } 6708 6709 for (i = 0; i < count; i++) 6710 phba->sli4_hba.xri_ids[i] = base + i; 6711 6712 /* VFIs. */ 6713 count = phba->sli4_hba.max_cfg_param.max_vfi; 6714 if (count <= 0) { 6715 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6716 "3282 Invalid provisioning of " 6717 "vfi:%d\n", count); 6718 rc = -EINVAL; 6719 goto free_xri_ids; 6720 } 6721 base = phba->sli4_hba.max_cfg_param.vfi_base; 6722 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6723 phba->sli4_hba.vfi_bmask = kcalloc(longs, 6724 sizeof(unsigned long), 6725 GFP_KERNEL); 6726 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 6727 rc = -ENOMEM; 6728 goto free_xri_ids; 6729 } 6730 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), 6731 GFP_KERNEL); 6732 if (unlikely(!phba->sli4_hba.vfi_ids)) { 6733 rc = -ENOMEM; 6734 goto free_vfi_bmask; 6735 } 6736 6737 for (i = 0; i < count; i++) 6738 phba->sli4_hba.vfi_ids[i] = base + i; 6739 6740 /* 6741 * Mark all resources ready. An HBA reset doesn't need 6742 * to reset the initialization. 6743 */ 6744 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6745 LPFC_IDX_RSRC_RDY); 6746 return 0; 6747 } 6748 6749 free_vfi_bmask: 6750 kfree(phba->sli4_hba.vfi_bmask); 6751 phba->sli4_hba.vfi_bmask = NULL; 6752 free_xri_ids: 6753 kfree(phba->sli4_hba.xri_ids); 6754 phba->sli4_hba.xri_ids = NULL; 6755 free_xri_bmask: 6756 kfree(phba->sli4_hba.xri_bmask); 6757 phba->sli4_hba.xri_bmask = NULL; 6758 free_vpi_ids: 6759 kfree(phba->vpi_ids); 6760 phba->vpi_ids = NULL; 6761 free_vpi_bmask: 6762 kfree(phba->vpi_bmask); 6763 phba->vpi_bmask = NULL; 6764 free_rpi_ids: 6765 kfree(phba->sli4_hba.rpi_ids); 6766 phba->sli4_hba.rpi_ids = NULL; 6767 free_rpi_bmask: 6768 kfree(phba->sli4_hba.rpi_bmask); 6769 phba->sli4_hba.rpi_bmask = NULL; 6770 err_exit: 6771 return rc; 6772 } 6773 6774 /** 6775 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 6776 * @phba: Pointer to HBA context object. 6777 * 6778 * This function allocates the number of elements for the specified 6779 * resource type. 6780 **/ 6781 int 6782 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 6783 { 6784 if (phba->sli4_hba.extents_in_use) { 6785 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6786 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6787 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6788 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6789 } else { 6790 kfree(phba->vpi_bmask); 6791 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6792 kfree(phba->vpi_ids); 6793 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6794 kfree(phba->sli4_hba.xri_bmask); 6795 kfree(phba->sli4_hba.xri_ids); 6796 kfree(phba->sli4_hba.vfi_bmask); 6797 kfree(phba->sli4_hba.vfi_ids); 6798 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6799 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6800 } 6801 6802 return 0; 6803 } 6804 6805 /** 6806 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 6807 * @phba: Pointer to HBA context object. 6808 * @type: The resource extent type. 6809 * @extnt_count: buffer to hold port extent count response 6810 * @extnt_size: buffer to hold port extent size response. 6811 * 6812 * This function calls the port to read the host allocated extents 6813 * for a particular type. 6814 **/ 6815 int 6816 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 6817 uint16_t *extnt_cnt, uint16_t *extnt_size) 6818 { 6819 bool emb; 6820 int rc = 0; 6821 uint16_t curr_blks = 0; 6822 uint32_t req_len, emb_len; 6823 uint32_t alloc_len, mbox_tmo; 6824 struct list_head *blk_list_head; 6825 struct lpfc_rsrc_blks *rsrc_blk; 6826 LPFC_MBOXQ_t *mbox; 6827 void *virtaddr = NULL; 6828 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 6829 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 6830 union lpfc_sli4_cfg_shdr *shdr; 6831 6832 switch (type) { 6833 case LPFC_RSC_TYPE_FCOE_VPI: 6834 blk_list_head = &phba->lpfc_vpi_blk_list; 6835 break; 6836 case LPFC_RSC_TYPE_FCOE_XRI: 6837 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 6838 break; 6839 case LPFC_RSC_TYPE_FCOE_VFI: 6840 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 6841 break; 6842 case LPFC_RSC_TYPE_FCOE_RPI: 6843 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 6844 break; 6845 default: 6846 return -EIO; 6847 } 6848 6849 /* Count the number of extents currently allocatd for this type. */ 6850 list_for_each_entry(rsrc_blk, blk_list_head, list) { 6851 if (curr_blks == 0) { 6852 /* 6853 * The GET_ALLOCATED mailbox does not return the size, 6854 * just the count. The size should be just the size 6855 * stored in the current allocated block and all sizes 6856 * for an extent type are the same so set the return 6857 * value now. 6858 */ 6859 *extnt_size = rsrc_blk->rsrc_size; 6860 } 6861 curr_blks++; 6862 } 6863 6864 /* 6865 * Calculate the size of an embedded mailbox. The uint32_t 6866 * accounts for extents-specific word. 6867 */ 6868 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 6869 sizeof(uint32_t); 6870 6871 /* 6872 * Presume the allocation and response will fit into an embedded 6873 * mailbox. If not true, reconfigure to a non-embedded mailbox. 6874 */ 6875 emb = LPFC_SLI4_MBX_EMBED; 6876 req_len = emb_len; 6877 if (req_len > emb_len) { 6878 req_len = curr_blks * sizeof(uint16_t) + 6879 sizeof(union lpfc_sli4_cfg_shdr) + 6880 sizeof(uint32_t); 6881 emb = LPFC_SLI4_MBX_NEMBED; 6882 } 6883 6884 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6885 if (!mbox) 6886 return -ENOMEM; 6887 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 6888 6889 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6890 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 6891 req_len, emb); 6892 if (alloc_len < req_len) { 6893 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6894 "2983 Allocated DMA memory size (x%x) is " 6895 "less than the requested DMA memory " 6896 "size (x%x)\n", alloc_len, req_len); 6897 rc = -ENOMEM; 6898 goto err_exit; 6899 } 6900 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 6901 if (unlikely(rc)) { 6902 rc = -EIO; 6903 goto err_exit; 6904 } 6905 6906 if (!phba->sli4_hba.intr_enable) 6907 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6908 else { 6909 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6910 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6911 } 6912 6913 if (unlikely(rc)) { 6914 rc = -EIO; 6915 goto err_exit; 6916 } 6917 6918 /* 6919 * Figure out where the response is located. Then get local pointers 6920 * to the response data. The port does not guarantee to respond to 6921 * all extents counts request so update the local variable with the 6922 * allocated count from the port. 6923 */ 6924 if (emb == LPFC_SLI4_MBX_EMBED) { 6925 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 6926 shdr = &rsrc_ext->header.cfg_shdr; 6927 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 6928 } else { 6929 virtaddr = mbox->sge_array->addr[0]; 6930 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 6931 shdr = &n_rsrc->cfg_shdr; 6932 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 6933 } 6934 6935 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 6936 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6937 "2984 Failed to read allocated resources " 6938 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 6939 type, 6940 bf_get(lpfc_mbox_hdr_status, &shdr->response), 6941 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 6942 rc = -EIO; 6943 goto err_exit; 6944 } 6945 err_exit: 6946 lpfc_sli4_mbox_cmd_free(phba, mbox); 6947 return rc; 6948 } 6949 6950 /** 6951 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block 6952 * @phba: pointer to lpfc hba data structure. 6953 * @pring: Pointer to driver SLI ring object. 6954 * @sgl_list: linked link of sgl buffers to post 6955 * @cnt: number of linked list buffers 6956 * 6957 * This routine walks the list of buffers that have been allocated and 6958 * repost them to the port by using SGL block post. This is needed after a 6959 * pci_function_reset/warm_start or start. It attempts to construct blocks 6960 * of buffer sgls which contains contiguous xris and uses the non-embedded 6961 * SGL block post mailbox commands to post them to the port. For single 6962 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 6963 * mailbox command for posting. 6964 * 6965 * Returns: 0 = success, non-zero failure. 6966 **/ 6967 static int 6968 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, 6969 struct list_head *sgl_list, int cnt) 6970 { 6971 struct lpfc_sglq *sglq_entry = NULL; 6972 struct lpfc_sglq *sglq_entry_next = NULL; 6973 struct lpfc_sglq *sglq_entry_first = NULL; 6974 int status, total_cnt; 6975 int post_cnt = 0, num_posted = 0, block_cnt = 0; 6976 int last_xritag = NO_XRI; 6977 LIST_HEAD(prep_sgl_list); 6978 LIST_HEAD(blck_sgl_list); 6979 LIST_HEAD(allc_sgl_list); 6980 LIST_HEAD(post_sgl_list); 6981 LIST_HEAD(free_sgl_list); 6982 6983 spin_lock_irq(&phba->hbalock); 6984 spin_lock(&phba->sli4_hba.sgl_list_lock); 6985 list_splice_init(sgl_list, &allc_sgl_list); 6986 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6987 spin_unlock_irq(&phba->hbalock); 6988 6989 total_cnt = cnt; 6990 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6991 &allc_sgl_list, list) { 6992 list_del_init(&sglq_entry->list); 6993 block_cnt++; 6994 if ((last_xritag != NO_XRI) && 6995 (sglq_entry->sli4_xritag != last_xritag + 1)) { 6996 /* a hole in xri block, form a sgl posting block */ 6997 list_splice_init(&prep_sgl_list, &blck_sgl_list); 6998 post_cnt = block_cnt - 1; 6999 /* prepare list for next posting block */ 7000 list_add_tail(&sglq_entry->list, &prep_sgl_list); 7001 block_cnt = 1; 7002 } else { 7003 /* prepare list for next posting block */ 7004 list_add_tail(&sglq_entry->list, &prep_sgl_list); 7005 /* enough sgls for non-embed sgl mbox command */ 7006 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 7007 list_splice_init(&prep_sgl_list, 7008 &blck_sgl_list); 7009 post_cnt = block_cnt; 7010 block_cnt = 0; 7011 } 7012 } 7013 num_posted++; 7014 7015 /* keep track of last sgl's xritag */ 7016 last_xritag = sglq_entry->sli4_xritag; 7017 7018 /* end of repost sgl list condition for buffers */ 7019 if (num_posted == total_cnt) { 7020 if (post_cnt == 0) { 7021 list_splice_init(&prep_sgl_list, 7022 &blck_sgl_list); 7023 post_cnt = block_cnt; 7024 } else if (block_cnt == 1) { 7025 status = lpfc_sli4_post_sgl(phba, 7026 sglq_entry->phys, 0, 7027 sglq_entry->sli4_xritag); 7028 if (!status) { 7029 /* successful, put sgl to posted list */ 7030 list_add_tail(&sglq_entry->list, 7031 &post_sgl_list); 7032 } else { 7033 /* Failure, put sgl to free list */ 7034 lpfc_printf_log(phba, KERN_WARNING, 7035 LOG_SLI, 7036 "3159 Failed to post " 7037 "sgl, xritag:x%x\n", 7038 sglq_entry->sli4_xritag); 7039 list_add_tail(&sglq_entry->list, 7040 &free_sgl_list); 7041 total_cnt--; 7042 } 7043 } 7044 } 7045 7046 /* continue until a nembed page worth of sgls */ 7047 if (post_cnt == 0) 7048 continue; 7049 7050 /* post the buffer list sgls as a block */ 7051 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, 7052 post_cnt); 7053 7054 if (!status) { 7055 /* success, put sgl list to posted sgl list */ 7056 list_splice_init(&blck_sgl_list, &post_sgl_list); 7057 } else { 7058 /* Failure, put sgl list to free sgl list */ 7059 sglq_entry_first = list_first_entry(&blck_sgl_list, 7060 struct lpfc_sglq, 7061 list); 7062 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 7063 "3160 Failed to post sgl-list, " 7064 "xritag:x%x-x%x\n", 7065 sglq_entry_first->sli4_xritag, 7066 (sglq_entry_first->sli4_xritag + 7067 post_cnt - 1)); 7068 list_splice_init(&blck_sgl_list, &free_sgl_list); 7069 total_cnt -= post_cnt; 7070 } 7071 7072 /* don't reset xirtag due to hole in xri block */ 7073 if (block_cnt == 0) 7074 last_xritag = NO_XRI; 7075 7076 /* reset sgl post count for next round of posting */ 7077 post_cnt = 0; 7078 } 7079 7080 /* free the sgls failed to post */ 7081 lpfc_free_sgl_list(phba, &free_sgl_list); 7082 7083 /* push sgls posted to the available list */ 7084 if (!list_empty(&post_sgl_list)) { 7085 spin_lock_irq(&phba->hbalock); 7086 spin_lock(&phba->sli4_hba.sgl_list_lock); 7087 list_splice_init(&post_sgl_list, sgl_list); 7088 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7089 spin_unlock_irq(&phba->hbalock); 7090 } else { 7091 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7092 "3161 Failure to post sgl to port.\n"); 7093 return -EIO; 7094 } 7095 7096 /* return the number of XRIs actually posted */ 7097 return total_cnt; 7098 } 7099 7100 /** 7101 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls 7102 * @phba: pointer to lpfc hba data structure. 7103 * 7104 * This routine walks the list of nvme buffers that have been allocated and 7105 * repost them to the port by using SGL block post. This is needed after a 7106 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 7107 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list 7108 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. 7109 * 7110 * Returns: 0 = success, non-zero failure. 7111 **/ 7112 static int 7113 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) 7114 { 7115 LIST_HEAD(post_nblist); 7116 int num_posted, rc = 0; 7117 7118 /* get all NVME buffers need to repost to a local list */ 7119 lpfc_io_buf_flush(phba, &post_nblist); 7120 7121 /* post the list of nvme buffer sgls to port if available */ 7122 if (!list_empty(&post_nblist)) { 7123 num_posted = lpfc_sli4_post_io_sgl_list( 7124 phba, &post_nblist, phba->sli4_hba.io_xri_cnt); 7125 /* failed to post any nvme buffer, return error */ 7126 if (num_posted == 0) 7127 rc = -EIO; 7128 } 7129 return rc; 7130 } 7131 7132 static void 7133 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 7134 { 7135 uint32_t len; 7136 7137 len = sizeof(struct lpfc_mbx_set_host_data) - 7138 sizeof(struct lpfc_sli4_cfg_mhdr); 7139 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7140 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 7141 LPFC_SLI4_MBX_EMBED); 7142 7143 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; 7144 mbox->u.mqe.un.set_host_data.param_len = 7145 LPFC_HOST_OS_DRIVER_VERSION_SIZE; 7146 snprintf(mbox->u.mqe.un.set_host_data.data, 7147 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 7148 "Linux %s v"LPFC_DRIVER_VERSION, 7149 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 7150 } 7151 7152 int 7153 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, 7154 struct lpfc_queue *drq, int count, int idx) 7155 { 7156 int rc, i; 7157 struct lpfc_rqe hrqe; 7158 struct lpfc_rqe drqe; 7159 struct lpfc_rqb *rqbp; 7160 unsigned long flags; 7161 struct rqb_dmabuf *rqb_buffer; 7162 LIST_HEAD(rqb_buf_list); 7163 7164 spin_lock_irqsave(&phba->hbalock, flags); 7165 rqbp = hrq->rqbp; 7166 for (i = 0; i < count; i++) { 7167 /* IF RQ is already full, don't bother */ 7168 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) 7169 break; 7170 rqb_buffer = rqbp->rqb_alloc_buffer(phba); 7171 if (!rqb_buffer) 7172 break; 7173 rqb_buffer->hrq = hrq; 7174 rqb_buffer->drq = drq; 7175 rqb_buffer->idx = idx; 7176 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); 7177 } 7178 while (!list_empty(&rqb_buf_list)) { 7179 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, 7180 hbuf.list); 7181 7182 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); 7183 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); 7184 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); 7185 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); 7186 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 7187 if (rc < 0) { 7188 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7189 "6421 Cannot post to HRQ %d: %x %x %x " 7190 "DRQ %x %x\n", 7191 hrq->queue_id, 7192 hrq->host_index, 7193 hrq->hba_index, 7194 hrq->entry_count, 7195 drq->host_index, 7196 drq->hba_index); 7197 rqbp->rqb_free_buffer(phba, rqb_buffer); 7198 } else { 7199 list_add_tail(&rqb_buffer->hbuf.list, 7200 &rqbp->rqb_buffer_list); 7201 rqbp->buffer_count++; 7202 } 7203 } 7204 spin_unlock_irqrestore(&phba->hbalock, flags); 7205 return 1; 7206 } 7207 7208 /** 7209 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 7210 * @phba: Pointer to HBA context object. 7211 * 7212 * This function is the main SLI4 device initialization PCI function. This 7213 * function is called by the HBA initialization code, HBA reset code and 7214 * HBA error attention handler code. Caller is not required to hold any 7215 * locks. 7216 **/ 7217 int 7218 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 7219 { 7220 int rc, i, cnt, len, dd; 7221 LPFC_MBOXQ_t *mboxq; 7222 struct lpfc_mqe *mqe; 7223 uint8_t *vpd; 7224 uint32_t vpd_size; 7225 uint32_t ftr_rsp = 0; 7226 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 7227 struct lpfc_vport *vport = phba->pport; 7228 struct lpfc_dmabuf *mp; 7229 struct lpfc_rqb *rqbp; 7230 7231 /* Perform a PCI function reset to start from clean */ 7232 rc = lpfc_pci_function_reset(phba); 7233 if (unlikely(rc)) 7234 return -ENODEV; 7235 7236 /* Check the HBA Host Status Register for readyness */ 7237 rc = lpfc_sli4_post_status_check(phba); 7238 if (unlikely(rc)) 7239 return -ENODEV; 7240 else { 7241 spin_lock_irq(&phba->hbalock); 7242 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 7243 spin_unlock_irq(&phba->hbalock); 7244 } 7245 7246 /* 7247 * Allocate a single mailbox container for initializing the 7248 * port. 7249 */ 7250 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7251 if (!mboxq) 7252 return -ENOMEM; 7253 7254 /* Issue READ_REV to collect vpd and FW information. */ 7255 vpd_size = SLI4_PAGE_SIZE; 7256 vpd = kzalloc(vpd_size, GFP_KERNEL); 7257 if (!vpd) { 7258 rc = -ENOMEM; 7259 goto out_free_mbox; 7260 } 7261 7262 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 7263 if (unlikely(rc)) { 7264 kfree(vpd); 7265 goto out_free_mbox; 7266 } 7267 7268 mqe = &mboxq->u.mqe; 7269 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 7270 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 7271 phba->hba_flag |= HBA_FCOE_MODE; 7272 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 7273 } else { 7274 phba->hba_flag &= ~HBA_FCOE_MODE; 7275 } 7276 7277 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 7278 LPFC_DCBX_CEE_MODE) 7279 phba->hba_flag |= HBA_FIP_SUPPORT; 7280 else 7281 phba->hba_flag &= ~HBA_FIP_SUPPORT; 7282 7283 phba->hba_flag &= ~HBA_IOQ_FLUSH; 7284 7285 if (phba->sli_rev != LPFC_SLI_REV4) { 7286 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7287 "0376 READ_REV Error. SLI Level %d " 7288 "FCoE enabled %d\n", 7289 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 7290 rc = -EIO; 7291 kfree(vpd); 7292 goto out_free_mbox; 7293 } 7294 7295 /* 7296 * Continue initialization with default values even if driver failed 7297 * to read FCoE param config regions, only read parameters if the 7298 * board is FCoE 7299 */ 7300 if (phba->hba_flag & HBA_FCOE_MODE && 7301 lpfc_sli4_read_fcoe_params(phba)) 7302 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 7303 "2570 Failed to read FCoE parameters\n"); 7304 7305 /* 7306 * Retrieve sli4 device physical port name, failure of doing it 7307 * is considered as non-fatal. 7308 */ 7309 rc = lpfc_sli4_retrieve_pport_name(phba); 7310 if (!rc) 7311 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7312 "3080 Successful retrieving SLI4 device " 7313 "physical port name: %s.\n", phba->Port); 7314 7315 rc = lpfc_sli4_get_ctl_attr(phba); 7316 if (!rc) 7317 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7318 "8351 Successful retrieving SLI4 device " 7319 "CTL ATTR\n"); 7320 7321 /* 7322 * Evaluate the read rev and vpd data. Populate the driver 7323 * state with the results. If this routine fails, the failure 7324 * is not fatal as the driver will use generic values. 7325 */ 7326 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 7327 if (unlikely(!rc)) { 7328 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7329 "0377 Error %d parsing vpd. " 7330 "Using defaults.\n", rc); 7331 rc = 0; 7332 } 7333 kfree(vpd); 7334 7335 /* Save information as VPD data */ 7336 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 7337 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 7338 7339 /* 7340 * This is because first G7 ASIC doesn't support the standard 7341 * 0x5a NVME cmd descriptor type/subtype 7342 */ 7343 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7344 LPFC_SLI_INTF_IF_TYPE_6) && 7345 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) && 7346 (phba->vpd.rev.smRev == 0) && 7347 (phba->cfg_nvme_embed_cmd == 1)) 7348 phba->cfg_nvme_embed_cmd = 0; 7349 7350 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 7351 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 7352 &mqe->un.read_rev); 7353 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 7354 &mqe->un.read_rev); 7355 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 7356 &mqe->un.read_rev); 7357 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 7358 &mqe->un.read_rev); 7359 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 7360 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 7361 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 7362 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 7363 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 7364 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 7365 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7366 "(%d):0380 READ_REV Status x%x " 7367 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 7368 mboxq->vport ? mboxq->vport->vpi : 0, 7369 bf_get(lpfc_mqe_status, mqe), 7370 phba->vpd.rev.opFwName, 7371 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 7372 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 7373 7374 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 7375 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); 7376 if (phba->pport->cfg_lun_queue_depth > rc) { 7377 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7378 "3362 LUN queue depth changed from %d to %d\n", 7379 phba->pport->cfg_lun_queue_depth, rc); 7380 phba->pport->cfg_lun_queue_depth = rc; 7381 } 7382 7383 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7384 LPFC_SLI_INTF_IF_TYPE_0) { 7385 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); 7386 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7387 if (rc == MBX_SUCCESS) { 7388 phba->hba_flag |= HBA_RECOVERABLE_UE; 7389 /* Set 1Sec interval to detect UE */ 7390 phba->eratt_poll_interval = 1; 7391 phba->sli4_hba.ue_to_sr = bf_get( 7392 lpfc_mbx_set_feature_UESR, 7393 &mboxq->u.mqe.un.set_feature); 7394 phba->sli4_hba.ue_to_rp = bf_get( 7395 lpfc_mbx_set_feature_UERP, 7396 &mboxq->u.mqe.un.set_feature); 7397 } 7398 } 7399 7400 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { 7401 /* Enable MDS Diagnostics only if the SLI Port supports it */ 7402 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); 7403 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7404 if (rc != MBX_SUCCESS) 7405 phba->mds_diags_support = 0; 7406 } 7407 7408 /* 7409 * Discover the port's supported feature set and match it against the 7410 * hosts requests. 7411 */ 7412 lpfc_request_features(phba, mboxq); 7413 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7414 if (unlikely(rc)) { 7415 rc = -EIO; 7416 goto out_free_mbox; 7417 } 7418 7419 /* 7420 * The port must support FCP initiator mode as this is the 7421 * only mode running in the host. 7422 */ 7423 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 7424 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7425 "0378 No support for fcpi mode.\n"); 7426 ftr_rsp++; 7427 } 7428 7429 /* Performance Hints are ONLY for FCoE */ 7430 if (phba->hba_flag & HBA_FCOE_MODE) { 7431 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 7432 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 7433 else 7434 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 7435 } 7436 7437 /* 7438 * If the port cannot support the host's requested features 7439 * then turn off the global config parameters to disable the 7440 * feature in the driver. This is not a fatal error. 7441 */ 7442 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 7443 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { 7444 phba->cfg_enable_bg = 0; 7445 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 7446 ftr_rsp++; 7447 } 7448 } 7449 7450 if (phba->max_vpi && phba->cfg_enable_npiv && 7451 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7452 ftr_rsp++; 7453 7454 if (ftr_rsp) { 7455 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7456 "0379 Feature Mismatch Data: x%08x %08x " 7457 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 7458 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 7459 phba->cfg_enable_npiv, phba->max_vpi); 7460 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 7461 phba->cfg_enable_bg = 0; 7462 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7463 phba->cfg_enable_npiv = 0; 7464 } 7465 7466 /* These SLI3 features are assumed in SLI4 */ 7467 spin_lock_irq(&phba->hbalock); 7468 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 7469 spin_unlock_irq(&phba->hbalock); 7470 7471 /* Always try to enable dual dump feature if we can */ 7472 lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP); 7473 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7474 dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature); 7475 if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP)) 7476 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT, 7477 "6448 Dual Dump is enabled\n"); 7478 else 7479 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT, 7480 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, " 7481 "rc:x%x dd:x%x\n", 7482 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 7483 lpfc_sli_config_mbox_subsys_get( 7484 phba, mboxq), 7485 lpfc_sli_config_mbox_opcode_get( 7486 phba, mboxq), 7487 rc, dd); 7488 /* 7489 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 7490 * calls depends on these resources to complete port setup. 7491 */ 7492 rc = lpfc_sli4_alloc_resource_identifiers(phba); 7493 if (rc) { 7494 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7495 "2920 Failed to alloc Resource IDs " 7496 "rc = x%x\n", rc); 7497 goto out_free_mbox; 7498 } 7499 7500 lpfc_set_host_data(phba, mboxq); 7501 7502 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7503 if (rc) { 7504 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7505 "2134 Failed to set host os driver version %x", 7506 rc); 7507 } 7508 7509 /* Read the port's service parameters. */ 7510 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 7511 if (rc) { 7512 phba->link_state = LPFC_HBA_ERROR; 7513 rc = -ENOMEM; 7514 goto out_free_mbox; 7515 } 7516 7517 mboxq->vport = vport; 7518 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7519 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 7520 if (rc == MBX_SUCCESS) { 7521 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 7522 rc = 0; 7523 } 7524 7525 /* 7526 * This memory was allocated by the lpfc_read_sparam routine. Release 7527 * it to the mbuf pool. 7528 */ 7529 lpfc_mbuf_free(phba, mp->virt, mp->phys); 7530 kfree(mp); 7531 mboxq->ctx_buf = NULL; 7532 if (unlikely(rc)) { 7533 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7534 "0382 READ_SPARAM command failed " 7535 "status %d, mbxStatus x%x\n", 7536 rc, bf_get(lpfc_mqe_status, mqe)); 7537 phba->link_state = LPFC_HBA_ERROR; 7538 rc = -EIO; 7539 goto out_free_mbox; 7540 } 7541 7542 lpfc_update_vport_wwn(vport); 7543 7544 /* Update the fc_host data structures with new wwn. */ 7545 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 7546 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 7547 7548 /* Create all the SLI4 queues */ 7549 rc = lpfc_sli4_queue_create(phba); 7550 if (rc) { 7551 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7552 "3089 Failed to allocate queues\n"); 7553 rc = -ENODEV; 7554 goto out_free_mbox; 7555 } 7556 /* Set up all the queues to the device */ 7557 rc = lpfc_sli4_queue_setup(phba); 7558 if (unlikely(rc)) { 7559 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7560 "0381 Error %d during queue setup.\n ", rc); 7561 goto out_stop_timers; 7562 } 7563 /* Initialize the driver internal SLI layer lists. */ 7564 lpfc_sli4_setup(phba); 7565 lpfc_sli4_queue_init(phba); 7566 7567 /* update host els xri-sgl sizes and mappings */ 7568 rc = lpfc_sli4_els_sgl_update(phba); 7569 if (unlikely(rc)) { 7570 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7571 "1400 Failed to update xri-sgl size and " 7572 "mapping: %d\n", rc); 7573 goto out_destroy_queue; 7574 } 7575 7576 /* register the els sgl pool to the port */ 7577 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, 7578 phba->sli4_hba.els_xri_cnt); 7579 if (unlikely(rc < 0)) { 7580 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7581 "0582 Error %d during els sgl post " 7582 "operation\n", rc); 7583 rc = -ENODEV; 7584 goto out_destroy_queue; 7585 } 7586 phba->sli4_hba.els_xri_cnt = rc; 7587 7588 if (phba->nvmet_support) { 7589 /* update host nvmet xri-sgl sizes and mappings */ 7590 rc = lpfc_sli4_nvmet_sgl_update(phba); 7591 if (unlikely(rc)) { 7592 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7593 "6308 Failed to update nvmet-sgl size " 7594 "and mapping: %d\n", rc); 7595 goto out_destroy_queue; 7596 } 7597 7598 /* register the nvmet sgl pool to the port */ 7599 rc = lpfc_sli4_repost_sgl_list( 7600 phba, 7601 &phba->sli4_hba.lpfc_nvmet_sgl_list, 7602 phba->sli4_hba.nvmet_xri_cnt); 7603 if (unlikely(rc < 0)) { 7604 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7605 "3117 Error %d during nvmet " 7606 "sgl post\n", rc); 7607 rc = -ENODEV; 7608 goto out_destroy_queue; 7609 } 7610 phba->sli4_hba.nvmet_xri_cnt = rc; 7611 7612 /* We allocate an iocbq for every receive context SGL. 7613 * The additional allocation is for abort and ls handling. 7614 */ 7615 cnt = phba->sli4_hba.nvmet_xri_cnt + 7616 phba->sli4_hba.max_cfg_param.max_xri; 7617 } else { 7618 /* update host common xri-sgl sizes and mappings */ 7619 rc = lpfc_sli4_io_sgl_update(phba); 7620 if (unlikely(rc)) { 7621 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7622 "6082 Failed to update nvme-sgl size " 7623 "and mapping: %d\n", rc); 7624 goto out_destroy_queue; 7625 } 7626 7627 /* register the allocated common sgl pool to the port */ 7628 rc = lpfc_sli4_repost_io_sgl_list(phba); 7629 if (unlikely(rc)) { 7630 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7631 "6116 Error %d during nvme sgl post " 7632 "operation\n", rc); 7633 /* Some NVME buffers were moved to abort nvme list */ 7634 /* A pci function reset will repost them */ 7635 rc = -ENODEV; 7636 goto out_destroy_queue; 7637 } 7638 /* Each lpfc_io_buf job structure has an iocbq element. 7639 * This cnt provides for abort, els, ct and ls requests. 7640 */ 7641 cnt = phba->sli4_hba.max_cfg_param.max_xri; 7642 } 7643 7644 if (!phba->sli.iocbq_lookup) { 7645 /* Initialize and populate the iocb list per host */ 7646 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7647 "2821 initialize iocb list with %d entries\n", 7648 cnt); 7649 rc = lpfc_init_iocb_list(phba, cnt); 7650 if (rc) { 7651 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7652 "1413 Failed to init iocb list.\n"); 7653 goto out_destroy_queue; 7654 } 7655 } 7656 7657 if (phba->nvmet_support) 7658 lpfc_nvmet_create_targetport(phba); 7659 7660 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 7661 /* Post initial buffers to all RQs created */ 7662 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 7663 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 7664 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 7665 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 7666 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 7667 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; 7668 rqbp->buffer_count = 0; 7669 7670 lpfc_post_rq_buffer( 7671 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 7672 phba->sli4_hba.nvmet_mrq_data[i], 7673 phba->cfg_nvmet_mrq_post, i); 7674 } 7675 } 7676 7677 /* Post the rpi header region to the device. */ 7678 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 7679 if (unlikely(rc)) { 7680 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7681 "0393 Error %d during rpi post operation\n", 7682 rc); 7683 rc = -ENODEV; 7684 goto out_destroy_queue; 7685 } 7686 lpfc_sli4_node_prep(phba); 7687 7688 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 7689 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { 7690 /* 7691 * The FC Port needs to register FCFI (index 0) 7692 */ 7693 lpfc_reg_fcfi(phba, mboxq); 7694 mboxq->vport = phba->pport; 7695 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7696 if (rc != MBX_SUCCESS) 7697 goto out_unset_queue; 7698 rc = 0; 7699 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 7700 &mboxq->u.mqe.un.reg_fcfi); 7701 } else { 7702 /* We are a NVME Target mode with MRQ > 1 */ 7703 7704 /* First register the FCFI */ 7705 lpfc_reg_fcfi_mrq(phba, mboxq, 0); 7706 mboxq->vport = phba->pport; 7707 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7708 if (rc != MBX_SUCCESS) 7709 goto out_unset_queue; 7710 rc = 0; 7711 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, 7712 &mboxq->u.mqe.un.reg_fcfi_mrq); 7713 7714 /* Next register the MRQs */ 7715 lpfc_reg_fcfi_mrq(phba, mboxq, 1); 7716 mboxq->vport = phba->pport; 7717 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7718 if (rc != MBX_SUCCESS) 7719 goto out_unset_queue; 7720 rc = 0; 7721 } 7722 /* Check if the port is configured to be disabled */ 7723 lpfc_sli_read_link_ste(phba); 7724 } 7725 7726 /* Don't post more new bufs if repost already recovered 7727 * the nvme sgls. 7728 */ 7729 if (phba->nvmet_support == 0) { 7730 if (phba->sli4_hba.io_xri_cnt == 0) { 7731 len = lpfc_new_io_buf( 7732 phba, phba->sli4_hba.io_xri_max); 7733 if (len == 0) { 7734 rc = -ENOMEM; 7735 goto out_unset_queue; 7736 } 7737 7738 if (phba->cfg_xri_rebalancing) 7739 lpfc_create_multixri_pools(phba); 7740 } 7741 } else { 7742 phba->cfg_xri_rebalancing = 0; 7743 } 7744 7745 /* Allow asynchronous mailbox command to go through */ 7746 spin_lock_irq(&phba->hbalock); 7747 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7748 spin_unlock_irq(&phba->hbalock); 7749 7750 /* Post receive buffers to the device */ 7751 lpfc_sli4_rb_setup(phba); 7752 7753 /* Reset HBA FCF states after HBA reset */ 7754 phba->fcf.fcf_flag = 0; 7755 phba->fcf.current_rec.flag = 0; 7756 7757 /* Start the ELS watchdog timer */ 7758 mod_timer(&vport->els_tmofunc, 7759 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 7760 7761 /* Start heart beat timer */ 7762 mod_timer(&phba->hb_tmofunc, 7763 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 7764 phba->hb_outstanding = 0; 7765 phba->last_completion_time = jiffies; 7766 7767 /* start eq_delay heartbeat */ 7768 if (phba->cfg_auto_imax) 7769 queue_delayed_work(phba->wq, &phba->eq_delay_work, 7770 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 7771 7772 /* Start error attention (ERATT) polling timer */ 7773 mod_timer(&phba->eratt_poll, 7774 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 7775 7776 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 7777 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 7778 rc = pci_enable_pcie_error_reporting(phba->pcidev); 7779 if (!rc) { 7780 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7781 "2829 This device supports " 7782 "Advanced Error Reporting (AER)\n"); 7783 spin_lock_irq(&phba->hbalock); 7784 phba->hba_flag |= HBA_AER_ENABLED; 7785 spin_unlock_irq(&phba->hbalock); 7786 } else { 7787 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7788 "2830 This device does not support " 7789 "Advanced Error Reporting (AER)\n"); 7790 phba->cfg_aer_support = 0; 7791 } 7792 rc = 0; 7793 } 7794 7795 /* 7796 * The port is ready, set the host's link state to LINK_DOWN 7797 * in preparation for link interrupts. 7798 */ 7799 spin_lock_irq(&phba->hbalock); 7800 phba->link_state = LPFC_LINK_DOWN; 7801 7802 /* Check if physical ports are trunked */ 7803 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) 7804 phba->trunk_link.link0.state = LPFC_LINK_DOWN; 7805 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) 7806 phba->trunk_link.link1.state = LPFC_LINK_DOWN; 7807 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) 7808 phba->trunk_link.link2.state = LPFC_LINK_DOWN; 7809 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) 7810 phba->trunk_link.link3.state = LPFC_LINK_DOWN; 7811 spin_unlock_irq(&phba->hbalock); 7812 7813 /* Arm the CQs and then EQs on device */ 7814 lpfc_sli4_arm_cqeq_intr(phba); 7815 7816 /* Indicate device interrupt mode */ 7817 phba->sli4_hba.intr_enable = 1; 7818 7819 if (!(phba->hba_flag & HBA_FCOE_MODE) && 7820 (phba->hba_flag & LINK_DISABLED)) { 7821 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7822 "3103 Adapter Link is disabled.\n"); 7823 lpfc_down_link(phba, mboxq); 7824 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7825 if (rc != MBX_SUCCESS) { 7826 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7827 "3104 Adapter failed to issue " 7828 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 7829 goto out_io_buff_free; 7830 } 7831 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 7832 /* don't perform init_link on SLI4 FC port loopback test */ 7833 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 7834 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 7835 if (rc) 7836 goto out_io_buff_free; 7837 } 7838 } 7839 mempool_free(mboxq, phba->mbox_mem_pool); 7840 return rc; 7841 out_io_buff_free: 7842 /* Free allocated IO Buffers */ 7843 lpfc_io_free(phba); 7844 out_unset_queue: 7845 /* Unset all the queues set up in this routine when error out */ 7846 lpfc_sli4_queue_unset(phba); 7847 out_destroy_queue: 7848 lpfc_free_iocb_list(phba); 7849 lpfc_sli4_queue_destroy(phba); 7850 out_stop_timers: 7851 lpfc_stop_hba_timers(phba); 7852 out_free_mbox: 7853 mempool_free(mboxq, phba->mbox_mem_pool); 7854 return rc; 7855 } 7856 7857 /** 7858 * lpfc_mbox_timeout - Timeout call back function for mbox timer 7859 * @ptr: context object - pointer to hba structure. 7860 * 7861 * This is the callback function for mailbox timer. The mailbox 7862 * timer is armed when a new mailbox command is issued and the timer 7863 * is deleted when the mailbox complete. The function is called by 7864 * the kernel timer code when a mailbox does not complete within 7865 * expected time. This function wakes up the worker thread to 7866 * process the mailbox timeout and returns. All the processing is 7867 * done by the worker thread function lpfc_mbox_timeout_handler. 7868 **/ 7869 void 7870 lpfc_mbox_timeout(struct timer_list *t) 7871 { 7872 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); 7873 unsigned long iflag; 7874 uint32_t tmo_posted; 7875 7876 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 7877 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 7878 if (!tmo_posted) 7879 phba->pport->work_port_events |= WORKER_MBOX_TMO; 7880 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 7881 7882 if (!tmo_posted) 7883 lpfc_worker_wake_up(phba); 7884 return; 7885 } 7886 7887 /** 7888 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 7889 * are pending 7890 * @phba: Pointer to HBA context object. 7891 * 7892 * This function checks if any mailbox completions are present on the mailbox 7893 * completion queue. 7894 **/ 7895 static bool 7896 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 7897 { 7898 7899 uint32_t idx; 7900 struct lpfc_queue *mcq; 7901 struct lpfc_mcqe *mcqe; 7902 bool pending_completions = false; 7903 uint8_t qe_valid; 7904 7905 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7906 return false; 7907 7908 /* Check for completions on mailbox completion queue */ 7909 7910 mcq = phba->sli4_hba.mbx_cq; 7911 idx = mcq->hba_index; 7912 qe_valid = mcq->qe_valid; 7913 while (bf_get_le32(lpfc_cqe_valid, 7914 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) { 7915 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx)); 7916 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 7917 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 7918 pending_completions = true; 7919 break; 7920 } 7921 idx = (idx + 1) % mcq->entry_count; 7922 if (mcq->hba_index == idx) 7923 break; 7924 7925 /* if the index wrapped around, toggle the valid bit */ 7926 if (phba->sli4_hba.pc_sli4_params.cqav && !idx) 7927 qe_valid = (qe_valid) ? 0 : 1; 7928 } 7929 return pending_completions; 7930 7931 } 7932 7933 /** 7934 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 7935 * that were missed. 7936 * @phba: Pointer to HBA context object. 7937 * 7938 * For sli4, it is possible to miss an interrupt. As such mbox completions 7939 * maybe missed causing erroneous mailbox timeouts to occur. This function 7940 * checks to see if mbox completions are on the mailbox completion queue 7941 * and will process all the completions associated with the eq for the 7942 * mailbox completion queue. 7943 **/ 7944 static bool 7945 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 7946 { 7947 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 7948 uint32_t eqidx; 7949 struct lpfc_queue *fpeq = NULL; 7950 struct lpfc_queue *eq; 7951 bool mbox_pending; 7952 7953 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7954 return false; 7955 7956 /* Find the EQ associated with the mbox CQ */ 7957 if (sli4_hba->hdwq) { 7958 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) { 7959 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq; 7960 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { 7961 fpeq = eq; 7962 break; 7963 } 7964 } 7965 } 7966 if (!fpeq) 7967 return false; 7968 7969 /* Turn off interrupts from this EQ */ 7970 7971 sli4_hba->sli4_eq_clr_intr(fpeq); 7972 7973 /* Check to see if a mbox completion is pending */ 7974 7975 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 7976 7977 /* 7978 * If a mbox completion is pending, process all the events on EQ 7979 * associated with the mbox completion queue (this could include 7980 * mailbox commands, async events, els commands, receive queue data 7981 * and fcp commands) 7982 */ 7983 7984 if (mbox_pending) 7985 /* process and rearm the EQ */ 7986 lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); 7987 else 7988 /* Always clear and re-arm the EQ */ 7989 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); 7990 7991 return mbox_pending; 7992 7993 } 7994 7995 /** 7996 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 7997 * @phba: Pointer to HBA context object. 7998 * 7999 * This function is called from worker thread when a mailbox command times out. 8000 * The caller is not required to hold any locks. This function will reset the 8001 * HBA and recover all the pending commands. 8002 **/ 8003 void 8004 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 8005 { 8006 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 8007 MAILBOX_t *mb = NULL; 8008 8009 struct lpfc_sli *psli = &phba->sli; 8010 8011 /* If the mailbox completed, process the completion and return */ 8012 if (lpfc_sli4_process_missed_mbox_completions(phba)) 8013 return; 8014 8015 if (pmbox != NULL) 8016 mb = &pmbox->u.mb; 8017 /* Check the pmbox pointer first. There is a race condition 8018 * between the mbox timeout handler getting executed in the 8019 * worklist and the mailbox actually completing. When this 8020 * race condition occurs, the mbox_active will be NULL. 8021 */ 8022 spin_lock_irq(&phba->hbalock); 8023 if (pmbox == NULL) { 8024 lpfc_printf_log(phba, KERN_WARNING, 8025 LOG_MBOX | LOG_SLI, 8026 "0353 Active Mailbox cleared - mailbox timeout " 8027 "exiting\n"); 8028 spin_unlock_irq(&phba->hbalock); 8029 return; 8030 } 8031 8032 /* Mbox cmd <mbxCommand> timeout */ 8033 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8034 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n", 8035 mb->mbxCommand, 8036 phba->pport->port_state, 8037 phba->sli.sli_flag, 8038 phba->sli.mbox_active); 8039 spin_unlock_irq(&phba->hbalock); 8040 8041 /* Setting state unknown so lpfc_sli_abort_iocb_ring 8042 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 8043 * it to fail all outstanding SCSI IO. 8044 */ 8045 spin_lock_irq(&phba->pport->work_port_lock); 8046 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 8047 spin_unlock_irq(&phba->pport->work_port_lock); 8048 spin_lock_irq(&phba->hbalock); 8049 phba->link_state = LPFC_LINK_UNKNOWN; 8050 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 8051 spin_unlock_irq(&phba->hbalock); 8052 8053 lpfc_sli_abort_fcp_rings(phba); 8054 8055 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8056 "0345 Resetting board due to mailbox timeout\n"); 8057 8058 /* Reset the HBA device */ 8059 lpfc_reset_hba(phba); 8060 } 8061 8062 /** 8063 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 8064 * @phba: Pointer to HBA context object. 8065 * @pmbox: Pointer to mailbox object. 8066 * @flag: Flag indicating how the mailbox need to be processed. 8067 * 8068 * This function is called by discovery code and HBA management code 8069 * to submit a mailbox command to firmware with SLI-3 interface spec. This 8070 * function gets the hbalock to protect the data structures. 8071 * The mailbox command can be submitted in polling mode, in which case 8072 * this function will wait in a polling loop for the completion of the 8073 * mailbox. 8074 * If the mailbox is submitted in no_wait mode (not polling) the 8075 * function will submit the command and returns immediately without waiting 8076 * for the mailbox completion. The no_wait is supported only when HBA 8077 * is in SLI2/SLI3 mode - interrupts are enabled. 8078 * The SLI interface allows only one mailbox pending at a time. If the 8079 * mailbox is issued in polling mode and there is already a mailbox 8080 * pending, then the function will return an error. If the mailbox is issued 8081 * in NO_WAIT mode and there is a mailbox pending already, the function 8082 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 8083 * The sli layer owns the mailbox object until the completion of mailbox 8084 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 8085 * return codes the caller owns the mailbox command after the return of 8086 * the function. 8087 **/ 8088 static int 8089 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 8090 uint32_t flag) 8091 { 8092 MAILBOX_t *mbx; 8093 struct lpfc_sli *psli = &phba->sli; 8094 uint32_t status, evtctr; 8095 uint32_t ha_copy, hc_copy; 8096 int i; 8097 unsigned long timeout; 8098 unsigned long drvr_flag = 0; 8099 uint32_t word0, ldata; 8100 void __iomem *to_slim; 8101 int processing_queue = 0; 8102 8103 spin_lock_irqsave(&phba->hbalock, drvr_flag); 8104 if (!pmbox) { 8105 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8106 /* processing mbox queue from intr_handler */ 8107 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8108 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8109 return MBX_SUCCESS; 8110 } 8111 processing_queue = 1; 8112 pmbox = lpfc_mbox_get(phba); 8113 if (!pmbox) { 8114 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8115 return MBX_SUCCESS; 8116 } 8117 } 8118 8119 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 8120 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 8121 if(!pmbox->vport) { 8122 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8123 lpfc_printf_log(phba, KERN_ERR, 8124 LOG_MBOX | LOG_VPORT, 8125 "1806 Mbox x%x failed. No vport\n", 8126 pmbox->u.mb.mbxCommand); 8127 dump_stack(); 8128 goto out_not_finished; 8129 } 8130 } 8131 8132 /* If the PCI channel is in offline state, do not post mbox. */ 8133 if (unlikely(pci_channel_offline(phba->pcidev))) { 8134 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8135 goto out_not_finished; 8136 } 8137 8138 /* If HBA has a deferred error attention, fail the iocb. */ 8139 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8140 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8141 goto out_not_finished; 8142 } 8143 8144 psli = &phba->sli; 8145 8146 mbx = &pmbox->u.mb; 8147 status = MBX_SUCCESS; 8148 8149 if (phba->link_state == LPFC_HBA_ERROR) { 8150 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8151 8152 /* Mbox command <mbxCommand> cannot issue */ 8153 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8154 "(%d):0311 Mailbox command x%x cannot " 8155 "issue Data: x%x x%x\n", 8156 pmbox->vport ? pmbox->vport->vpi : 0, 8157 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 8158 goto out_not_finished; 8159 } 8160 8161 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 8162 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 8163 !(hc_copy & HC_MBINT_ENA)) { 8164 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8165 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8166 "(%d):2528 Mailbox command x%x cannot " 8167 "issue Data: x%x x%x\n", 8168 pmbox->vport ? pmbox->vport->vpi : 0, 8169 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 8170 goto out_not_finished; 8171 } 8172 } 8173 8174 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8175 /* Polling for a mbox command when another one is already active 8176 * is not allowed in SLI. Also, the driver must have established 8177 * SLI2 mode to queue and process multiple mbox commands. 8178 */ 8179 8180 if (flag & MBX_POLL) { 8181 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8182 8183 /* Mbox command <mbxCommand> cannot issue */ 8184 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8185 "(%d):2529 Mailbox command x%x " 8186 "cannot issue Data: x%x x%x\n", 8187 pmbox->vport ? pmbox->vport->vpi : 0, 8188 pmbox->u.mb.mbxCommand, 8189 psli->sli_flag, flag); 8190 goto out_not_finished; 8191 } 8192 8193 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 8194 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8195 /* Mbox command <mbxCommand> cannot issue */ 8196 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8197 "(%d):2530 Mailbox command x%x " 8198 "cannot issue Data: x%x x%x\n", 8199 pmbox->vport ? pmbox->vport->vpi : 0, 8200 pmbox->u.mb.mbxCommand, 8201 psli->sli_flag, flag); 8202 goto out_not_finished; 8203 } 8204 8205 /* Another mailbox command is still being processed, queue this 8206 * command to be processed later. 8207 */ 8208 lpfc_mbox_put(phba, pmbox); 8209 8210 /* Mbox cmd issue - BUSY */ 8211 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8212 "(%d):0308 Mbox cmd issue - BUSY Data: " 8213 "x%x x%x x%x x%x\n", 8214 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 8215 mbx->mbxCommand, 8216 phba->pport ? phba->pport->port_state : 0xff, 8217 psli->sli_flag, flag); 8218 8219 psli->slistat.mbox_busy++; 8220 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8221 8222 if (pmbox->vport) { 8223 lpfc_debugfs_disc_trc(pmbox->vport, 8224 LPFC_DISC_TRC_MBOX_VPORT, 8225 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 8226 (uint32_t)mbx->mbxCommand, 8227 mbx->un.varWords[0], mbx->un.varWords[1]); 8228 } 8229 else { 8230 lpfc_debugfs_disc_trc(phba->pport, 8231 LPFC_DISC_TRC_MBOX, 8232 "MBOX Bsy: cmd:x%x mb:x%x x%x", 8233 (uint32_t)mbx->mbxCommand, 8234 mbx->un.varWords[0], mbx->un.varWords[1]); 8235 } 8236 8237 return MBX_BUSY; 8238 } 8239 8240 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8241 8242 /* If we are not polling, we MUST be in SLI2 mode */ 8243 if (flag != MBX_POLL) { 8244 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 8245 (mbx->mbxCommand != MBX_KILL_BOARD)) { 8246 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8247 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8248 /* Mbox command <mbxCommand> cannot issue */ 8249 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8250 "(%d):2531 Mailbox command x%x " 8251 "cannot issue Data: x%x x%x\n", 8252 pmbox->vport ? pmbox->vport->vpi : 0, 8253 pmbox->u.mb.mbxCommand, 8254 psli->sli_flag, flag); 8255 goto out_not_finished; 8256 } 8257 /* timeout active mbox command */ 8258 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8259 1000); 8260 mod_timer(&psli->mbox_tmo, jiffies + timeout); 8261 } 8262 8263 /* Mailbox cmd <cmd> issue */ 8264 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8265 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 8266 "x%x\n", 8267 pmbox->vport ? pmbox->vport->vpi : 0, 8268 mbx->mbxCommand, 8269 phba->pport ? phba->pport->port_state : 0xff, 8270 psli->sli_flag, flag); 8271 8272 if (mbx->mbxCommand != MBX_HEARTBEAT) { 8273 if (pmbox->vport) { 8274 lpfc_debugfs_disc_trc(pmbox->vport, 8275 LPFC_DISC_TRC_MBOX_VPORT, 8276 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8277 (uint32_t)mbx->mbxCommand, 8278 mbx->un.varWords[0], mbx->un.varWords[1]); 8279 } 8280 else { 8281 lpfc_debugfs_disc_trc(phba->pport, 8282 LPFC_DISC_TRC_MBOX, 8283 "MBOX Send: cmd:x%x mb:x%x x%x", 8284 (uint32_t)mbx->mbxCommand, 8285 mbx->un.varWords[0], mbx->un.varWords[1]); 8286 } 8287 } 8288 8289 psli->slistat.mbox_cmd++; 8290 evtctr = psli->slistat.mbox_event; 8291 8292 /* next set own bit for the adapter and copy over command word */ 8293 mbx->mbxOwner = OWN_CHIP; 8294 8295 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8296 /* Populate mbox extension offset word. */ 8297 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 8298 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8299 = (uint8_t *)phba->mbox_ext 8300 - (uint8_t *)phba->mbox; 8301 } 8302 8303 /* Copy the mailbox extension data */ 8304 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) { 8305 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf, 8306 (uint8_t *)phba->mbox_ext, 8307 pmbox->in_ext_byte_len); 8308 } 8309 /* Copy command data to host SLIM area */ 8310 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 8311 } else { 8312 /* Populate mbox extension offset word. */ 8313 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 8314 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8315 = MAILBOX_HBA_EXT_OFFSET; 8316 8317 /* Copy the mailbox extension data */ 8318 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) 8319 lpfc_memcpy_to_slim(phba->MBslimaddr + 8320 MAILBOX_HBA_EXT_OFFSET, 8321 pmbox->ctx_buf, pmbox->in_ext_byte_len); 8322 8323 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8324 /* copy command data into host mbox for cmpl */ 8325 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, 8326 MAILBOX_CMD_SIZE); 8327 8328 /* First copy mbox command data to HBA SLIM, skip past first 8329 word */ 8330 to_slim = phba->MBslimaddr + sizeof (uint32_t); 8331 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 8332 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 8333 8334 /* Next copy over first word, with mbxOwner set */ 8335 ldata = *((uint32_t *)mbx); 8336 to_slim = phba->MBslimaddr; 8337 writel(ldata, to_slim); 8338 readl(to_slim); /* flush */ 8339 8340 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8341 /* switch over to host mailbox */ 8342 psli->sli_flag |= LPFC_SLI_ACTIVE; 8343 } 8344 8345 wmb(); 8346 8347 switch (flag) { 8348 case MBX_NOWAIT: 8349 /* Set up reference to mailbox command */ 8350 psli->mbox_active = pmbox; 8351 /* Interrupt board to do it */ 8352 writel(CA_MBATT, phba->CAregaddr); 8353 readl(phba->CAregaddr); /* flush */ 8354 /* Don't wait for it to finish, just return */ 8355 break; 8356 8357 case MBX_POLL: 8358 /* Set up null reference to mailbox command */ 8359 psli->mbox_active = NULL; 8360 /* Interrupt board to do it */ 8361 writel(CA_MBATT, phba->CAregaddr); 8362 readl(phba->CAregaddr); /* flush */ 8363 8364 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8365 /* First read mbox status word */ 8366 word0 = *((uint32_t *)phba->mbox); 8367 word0 = le32_to_cpu(word0); 8368 } else { 8369 /* First read mbox status word */ 8370 if (lpfc_readl(phba->MBslimaddr, &word0)) { 8371 spin_unlock_irqrestore(&phba->hbalock, 8372 drvr_flag); 8373 goto out_not_finished; 8374 } 8375 } 8376 8377 /* Read the HBA Host Attention Register */ 8378 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8379 spin_unlock_irqrestore(&phba->hbalock, 8380 drvr_flag); 8381 goto out_not_finished; 8382 } 8383 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8384 1000) + jiffies; 8385 i = 0; 8386 /* Wait for command to complete */ 8387 while (((word0 & OWN_CHIP) == OWN_CHIP) || 8388 (!(ha_copy & HA_MBATT) && 8389 (phba->link_state > LPFC_WARM_START))) { 8390 if (time_after(jiffies, timeout)) { 8391 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8392 spin_unlock_irqrestore(&phba->hbalock, 8393 drvr_flag); 8394 goto out_not_finished; 8395 } 8396 8397 /* Check if we took a mbox interrupt while we were 8398 polling */ 8399 if (((word0 & OWN_CHIP) != OWN_CHIP) 8400 && (evtctr != psli->slistat.mbox_event)) 8401 break; 8402 8403 if (i++ > 10) { 8404 spin_unlock_irqrestore(&phba->hbalock, 8405 drvr_flag); 8406 msleep(1); 8407 spin_lock_irqsave(&phba->hbalock, drvr_flag); 8408 } 8409 8410 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8411 /* First copy command data */ 8412 word0 = *((uint32_t *)phba->mbox); 8413 word0 = le32_to_cpu(word0); 8414 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 8415 MAILBOX_t *slimmb; 8416 uint32_t slimword0; 8417 /* Check real SLIM for any errors */ 8418 slimword0 = readl(phba->MBslimaddr); 8419 slimmb = (MAILBOX_t *) & slimword0; 8420 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 8421 && slimmb->mbxStatus) { 8422 psli->sli_flag &= 8423 ~LPFC_SLI_ACTIVE; 8424 word0 = slimword0; 8425 } 8426 } 8427 } else { 8428 /* First copy command data */ 8429 word0 = readl(phba->MBslimaddr); 8430 } 8431 /* Read the HBA Host Attention Register */ 8432 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8433 spin_unlock_irqrestore(&phba->hbalock, 8434 drvr_flag); 8435 goto out_not_finished; 8436 } 8437 } 8438 8439 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8440 /* copy results back to user */ 8441 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, 8442 MAILBOX_CMD_SIZE); 8443 /* Copy the mailbox extension data */ 8444 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { 8445 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 8446 pmbox->ctx_buf, 8447 pmbox->out_ext_byte_len); 8448 } 8449 } else { 8450 /* First copy command data */ 8451 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 8452 MAILBOX_CMD_SIZE); 8453 /* Copy the mailbox extension data */ 8454 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { 8455 lpfc_memcpy_from_slim( 8456 pmbox->ctx_buf, 8457 phba->MBslimaddr + 8458 MAILBOX_HBA_EXT_OFFSET, 8459 pmbox->out_ext_byte_len); 8460 } 8461 } 8462 8463 writel(HA_MBATT, phba->HAregaddr); 8464 readl(phba->HAregaddr); /* flush */ 8465 8466 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8467 status = mbx->mbxStatus; 8468 } 8469 8470 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8471 return status; 8472 8473 out_not_finished: 8474 if (processing_queue) { 8475 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 8476 lpfc_mbox_cmpl_put(phba, pmbox); 8477 } 8478 return MBX_NOT_FINISHED; 8479 } 8480 8481 /** 8482 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 8483 * @phba: Pointer to HBA context object. 8484 * 8485 * The function blocks the posting of SLI4 asynchronous mailbox commands from 8486 * the driver internal pending mailbox queue. It will then try to wait out the 8487 * possible outstanding mailbox command before return. 8488 * 8489 * Returns: 8490 * 0 - the outstanding mailbox command completed; otherwise, the wait for 8491 * the outstanding mailbox command timed out. 8492 **/ 8493 static int 8494 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 8495 { 8496 struct lpfc_sli *psli = &phba->sli; 8497 int rc = 0; 8498 unsigned long timeout = 0; 8499 8500 /* Mark the asynchronous mailbox command posting as blocked */ 8501 spin_lock_irq(&phba->hbalock); 8502 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 8503 /* Determine how long we might wait for the active mailbox 8504 * command to be gracefully completed by firmware. 8505 */ 8506 if (phba->sli.mbox_active) 8507 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 8508 phba->sli.mbox_active) * 8509 1000) + jiffies; 8510 spin_unlock_irq(&phba->hbalock); 8511 8512 /* Make sure the mailbox is really active */ 8513 if (timeout) 8514 lpfc_sli4_process_missed_mbox_completions(phba); 8515 8516 /* Wait for the outstnading mailbox command to complete */ 8517 while (phba->sli.mbox_active) { 8518 /* Check active mailbox complete status every 2ms */ 8519 msleep(2); 8520 if (time_after(jiffies, timeout)) { 8521 /* Timeout, marked the outstanding cmd not complete */ 8522 rc = 1; 8523 break; 8524 } 8525 } 8526 8527 /* Can not cleanly block async mailbox command, fails it */ 8528 if (rc) { 8529 spin_lock_irq(&phba->hbalock); 8530 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8531 spin_unlock_irq(&phba->hbalock); 8532 } 8533 return rc; 8534 } 8535 8536 /** 8537 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 8538 * @phba: Pointer to HBA context object. 8539 * 8540 * The function unblocks and resume posting of SLI4 asynchronous mailbox 8541 * commands from the driver internal pending mailbox queue. It makes sure 8542 * that there is no outstanding mailbox command before resuming posting 8543 * asynchronous mailbox commands. If, for any reason, there is outstanding 8544 * mailbox command, it will try to wait it out before resuming asynchronous 8545 * mailbox command posting. 8546 **/ 8547 static void 8548 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 8549 { 8550 struct lpfc_sli *psli = &phba->sli; 8551 8552 spin_lock_irq(&phba->hbalock); 8553 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8554 /* Asynchronous mailbox posting is not blocked, do nothing */ 8555 spin_unlock_irq(&phba->hbalock); 8556 return; 8557 } 8558 8559 /* Outstanding synchronous mailbox command is guaranteed to be done, 8560 * successful or timeout, after timing-out the outstanding mailbox 8561 * command shall always be removed, so just unblock posting async 8562 * mailbox command and resume 8563 */ 8564 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8565 spin_unlock_irq(&phba->hbalock); 8566 8567 /* wake up worker thread to post asynchronous mailbox command */ 8568 lpfc_worker_wake_up(phba); 8569 } 8570 8571 /** 8572 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 8573 * @phba: Pointer to HBA context object. 8574 * @mboxq: Pointer to mailbox object. 8575 * 8576 * The function waits for the bootstrap mailbox register ready bit from 8577 * port for twice the regular mailbox command timeout value. 8578 * 8579 * 0 - no timeout on waiting for bootstrap mailbox register ready. 8580 * MBXERR_ERROR - wait for bootstrap mailbox register timed out. 8581 **/ 8582 static int 8583 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8584 { 8585 uint32_t db_ready; 8586 unsigned long timeout; 8587 struct lpfc_register bmbx_reg; 8588 8589 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 8590 * 1000) + jiffies; 8591 8592 do { 8593 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 8594 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 8595 if (!db_ready) 8596 mdelay(2); 8597 8598 if (time_after(jiffies, timeout)) 8599 return MBXERR_ERROR; 8600 } while (!db_ready); 8601 8602 return 0; 8603 } 8604 8605 /** 8606 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 8607 * @phba: Pointer to HBA context object. 8608 * @mboxq: Pointer to mailbox object. 8609 * 8610 * The function posts a mailbox to the port. The mailbox is expected 8611 * to be comletely filled in and ready for the port to operate on it. 8612 * This routine executes a synchronous completion operation on the 8613 * mailbox by polling for its completion. 8614 * 8615 * The caller must not be holding any locks when calling this routine. 8616 * 8617 * Returns: 8618 * MBX_SUCCESS - mailbox posted successfully 8619 * Any of the MBX error values. 8620 **/ 8621 static int 8622 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8623 { 8624 int rc = MBX_SUCCESS; 8625 unsigned long iflag; 8626 uint32_t mcqe_status; 8627 uint32_t mbx_cmnd; 8628 struct lpfc_sli *psli = &phba->sli; 8629 struct lpfc_mqe *mb = &mboxq->u.mqe; 8630 struct lpfc_bmbx_create *mbox_rgn; 8631 struct dma_address *dma_address; 8632 8633 /* 8634 * Only one mailbox can be active to the bootstrap mailbox region 8635 * at a time and there is no queueing provided. 8636 */ 8637 spin_lock_irqsave(&phba->hbalock, iflag); 8638 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8639 spin_unlock_irqrestore(&phba->hbalock, iflag); 8640 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8641 "(%d):2532 Mailbox command x%x (x%x/x%x) " 8642 "cannot issue Data: x%x x%x\n", 8643 mboxq->vport ? mboxq->vport->vpi : 0, 8644 mboxq->u.mb.mbxCommand, 8645 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8646 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8647 psli->sli_flag, MBX_POLL); 8648 return MBXERR_ERROR; 8649 } 8650 /* The server grabs the token and owns it until release */ 8651 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8652 phba->sli.mbox_active = mboxq; 8653 spin_unlock_irqrestore(&phba->hbalock, iflag); 8654 8655 /* wait for bootstrap mbox register for readyness */ 8656 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8657 if (rc) 8658 goto exit; 8659 /* 8660 * Initialize the bootstrap memory region to avoid stale data areas 8661 * in the mailbox post. Then copy the caller's mailbox contents to 8662 * the bmbx mailbox region. 8663 */ 8664 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 8665 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 8666 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 8667 sizeof(struct lpfc_mqe)); 8668 8669 /* Post the high mailbox dma address to the port and wait for ready. */ 8670 dma_address = &phba->sli4_hba.bmbx.dma_address; 8671 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 8672 8673 /* wait for bootstrap mbox register for hi-address write done */ 8674 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8675 if (rc) 8676 goto exit; 8677 8678 /* Post the low mailbox dma address to the port. */ 8679 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 8680 8681 /* wait for bootstrap mbox register for low address write done */ 8682 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8683 if (rc) 8684 goto exit; 8685 8686 /* 8687 * Read the CQ to ensure the mailbox has completed. 8688 * If so, update the mailbox status so that the upper layers 8689 * can complete the request normally. 8690 */ 8691 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 8692 sizeof(struct lpfc_mqe)); 8693 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 8694 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 8695 sizeof(struct lpfc_mcqe)); 8696 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 8697 /* 8698 * When the CQE status indicates a failure and the mailbox status 8699 * indicates success then copy the CQE status into the mailbox status 8700 * (and prefix it with x4000). 8701 */ 8702 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 8703 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 8704 bf_set(lpfc_mqe_status, mb, 8705 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 8706 rc = MBXERR_ERROR; 8707 } else 8708 lpfc_sli4_swap_str(phba, mboxq); 8709 8710 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8711 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 8712 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 8713 " x%x x%x CQ: x%x x%x x%x x%x\n", 8714 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8715 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8716 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8717 bf_get(lpfc_mqe_status, mb), 8718 mb->un.mb_words[0], mb->un.mb_words[1], 8719 mb->un.mb_words[2], mb->un.mb_words[3], 8720 mb->un.mb_words[4], mb->un.mb_words[5], 8721 mb->un.mb_words[6], mb->un.mb_words[7], 8722 mb->un.mb_words[8], mb->un.mb_words[9], 8723 mb->un.mb_words[10], mb->un.mb_words[11], 8724 mb->un.mb_words[12], mboxq->mcqe.word0, 8725 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 8726 mboxq->mcqe.trailer); 8727 exit: 8728 /* We are holding the token, no needed for lock when release */ 8729 spin_lock_irqsave(&phba->hbalock, iflag); 8730 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8731 phba->sli.mbox_active = NULL; 8732 spin_unlock_irqrestore(&phba->hbalock, iflag); 8733 return rc; 8734 } 8735 8736 /** 8737 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 8738 * @phba: Pointer to HBA context object. 8739 * @pmbox: Pointer to mailbox object. 8740 * @flag: Flag indicating how the mailbox need to be processed. 8741 * 8742 * This function is called by discovery code and HBA management code to submit 8743 * a mailbox command to firmware with SLI-4 interface spec. 8744 * 8745 * Return codes the caller owns the mailbox command after the return of the 8746 * function. 8747 **/ 8748 static int 8749 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 8750 uint32_t flag) 8751 { 8752 struct lpfc_sli *psli = &phba->sli; 8753 unsigned long iflags; 8754 int rc; 8755 8756 /* dump from issue mailbox command if setup */ 8757 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 8758 8759 rc = lpfc_mbox_dev_check(phba); 8760 if (unlikely(rc)) { 8761 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8762 "(%d):2544 Mailbox command x%x (x%x/x%x) " 8763 "cannot issue Data: x%x x%x\n", 8764 mboxq->vport ? mboxq->vport->vpi : 0, 8765 mboxq->u.mb.mbxCommand, 8766 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8767 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8768 psli->sli_flag, flag); 8769 goto out_not_finished; 8770 } 8771 8772 /* Detect polling mode and jump to a handler */ 8773 if (!phba->sli4_hba.intr_enable) { 8774 if (flag == MBX_POLL) 8775 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8776 else 8777 rc = -EIO; 8778 if (rc != MBX_SUCCESS) 8779 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8780 "(%d):2541 Mailbox command x%x " 8781 "(x%x/x%x) failure: " 8782 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8783 "Data: x%x x%x\n,", 8784 mboxq->vport ? mboxq->vport->vpi : 0, 8785 mboxq->u.mb.mbxCommand, 8786 lpfc_sli_config_mbox_subsys_get(phba, 8787 mboxq), 8788 lpfc_sli_config_mbox_opcode_get(phba, 8789 mboxq), 8790 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8791 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8792 bf_get(lpfc_mcqe_ext_status, 8793 &mboxq->mcqe), 8794 psli->sli_flag, flag); 8795 return rc; 8796 } else if (flag == MBX_POLL) { 8797 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8798 "(%d):2542 Try to issue mailbox command " 8799 "x%x (x%x/x%x) synchronously ahead of async " 8800 "mailbox command queue: x%x x%x\n", 8801 mboxq->vport ? mboxq->vport->vpi : 0, 8802 mboxq->u.mb.mbxCommand, 8803 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8804 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8805 psli->sli_flag, flag); 8806 /* Try to block the asynchronous mailbox posting */ 8807 rc = lpfc_sli4_async_mbox_block(phba); 8808 if (!rc) { 8809 /* Successfully blocked, now issue sync mbox cmd */ 8810 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8811 if (rc != MBX_SUCCESS) 8812 lpfc_printf_log(phba, KERN_WARNING, 8813 LOG_MBOX | LOG_SLI, 8814 "(%d):2597 Sync Mailbox command " 8815 "x%x (x%x/x%x) failure: " 8816 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8817 "Data: x%x x%x\n,", 8818 mboxq->vport ? mboxq->vport->vpi : 0, 8819 mboxq->u.mb.mbxCommand, 8820 lpfc_sli_config_mbox_subsys_get(phba, 8821 mboxq), 8822 lpfc_sli_config_mbox_opcode_get(phba, 8823 mboxq), 8824 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8825 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8826 bf_get(lpfc_mcqe_ext_status, 8827 &mboxq->mcqe), 8828 psli->sli_flag, flag); 8829 /* Unblock the async mailbox posting afterward */ 8830 lpfc_sli4_async_mbox_unblock(phba); 8831 } 8832 return rc; 8833 } 8834 8835 /* Now, interrupt mode asynchronous mailbox command */ 8836 rc = lpfc_mbox_cmd_check(phba, mboxq); 8837 if (rc) { 8838 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8839 "(%d):2543 Mailbox command x%x (x%x/x%x) " 8840 "cannot issue Data: x%x x%x\n", 8841 mboxq->vport ? mboxq->vport->vpi : 0, 8842 mboxq->u.mb.mbxCommand, 8843 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8844 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8845 psli->sli_flag, flag); 8846 goto out_not_finished; 8847 } 8848 8849 /* Put the mailbox command to the driver internal FIFO */ 8850 psli->slistat.mbox_busy++; 8851 spin_lock_irqsave(&phba->hbalock, iflags); 8852 lpfc_mbox_put(phba, mboxq); 8853 spin_unlock_irqrestore(&phba->hbalock, iflags); 8854 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8855 "(%d):0354 Mbox cmd issue - Enqueue Data: " 8856 "x%x (x%x/x%x) x%x x%x x%x\n", 8857 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 8858 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8859 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8860 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8861 phba->pport->port_state, 8862 psli->sli_flag, MBX_NOWAIT); 8863 /* Wake up worker thread to transport mailbox command from head */ 8864 lpfc_worker_wake_up(phba); 8865 8866 return MBX_BUSY; 8867 8868 out_not_finished: 8869 return MBX_NOT_FINISHED; 8870 } 8871 8872 /** 8873 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 8874 * @phba: Pointer to HBA context object. 8875 * 8876 * This function is called by worker thread to send a mailbox command to 8877 * SLI4 HBA firmware. 8878 * 8879 **/ 8880 int 8881 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 8882 { 8883 struct lpfc_sli *psli = &phba->sli; 8884 LPFC_MBOXQ_t *mboxq; 8885 int rc = MBX_SUCCESS; 8886 unsigned long iflags; 8887 struct lpfc_mqe *mqe; 8888 uint32_t mbx_cmnd; 8889 8890 /* Check interrupt mode before post async mailbox command */ 8891 if (unlikely(!phba->sli4_hba.intr_enable)) 8892 return MBX_NOT_FINISHED; 8893 8894 /* Check for mailbox command service token */ 8895 spin_lock_irqsave(&phba->hbalock, iflags); 8896 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8897 spin_unlock_irqrestore(&phba->hbalock, iflags); 8898 return MBX_NOT_FINISHED; 8899 } 8900 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8901 spin_unlock_irqrestore(&phba->hbalock, iflags); 8902 return MBX_NOT_FINISHED; 8903 } 8904 if (unlikely(phba->sli.mbox_active)) { 8905 spin_unlock_irqrestore(&phba->hbalock, iflags); 8906 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8907 "0384 There is pending active mailbox cmd\n"); 8908 return MBX_NOT_FINISHED; 8909 } 8910 /* Take the mailbox command service token */ 8911 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8912 8913 /* Get the next mailbox command from head of queue */ 8914 mboxq = lpfc_mbox_get(phba); 8915 8916 /* If no more mailbox command waiting for post, we're done */ 8917 if (!mboxq) { 8918 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8919 spin_unlock_irqrestore(&phba->hbalock, iflags); 8920 return MBX_SUCCESS; 8921 } 8922 phba->sli.mbox_active = mboxq; 8923 spin_unlock_irqrestore(&phba->hbalock, iflags); 8924 8925 /* Check device readiness for posting mailbox command */ 8926 rc = lpfc_mbox_dev_check(phba); 8927 if (unlikely(rc)) 8928 /* Driver clean routine will clean up pending mailbox */ 8929 goto out_not_finished; 8930 8931 /* Prepare the mbox command to be posted */ 8932 mqe = &mboxq->u.mqe; 8933 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 8934 8935 /* Start timer for the mbox_tmo and log some mailbox post messages */ 8936 mod_timer(&psli->mbox_tmo, (jiffies + 8937 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 8938 8939 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8940 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 8941 "x%x x%x\n", 8942 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8943 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8944 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8945 phba->pport->port_state, psli->sli_flag); 8946 8947 if (mbx_cmnd != MBX_HEARTBEAT) { 8948 if (mboxq->vport) { 8949 lpfc_debugfs_disc_trc(mboxq->vport, 8950 LPFC_DISC_TRC_MBOX_VPORT, 8951 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8952 mbx_cmnd, mqe->un.mb_words[0], 8953 mqe->un.mb_words[1]); 8954 } else { 8955 lpfc_debugfs_disc_trc(phba->pport, 8956 LPFC_DISC_TRC_MBOX, 8957 "MBOX Send: cmd:x%x mb:x%x x%x", 8958 mbx_cmnd, mqe->un.mb_words[0], 8959 mqe->un.mb_words[1]); 8960 } 8961 } 8962 psli->slistat.mbox_cmd++; 8963 8964 /* Post the mailbox command to the port */ 8965 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 8966 if (rc != MBX_SUCCESS) { 8967 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8968 "(%d):2533 Mailbox command x%x (x%x/x%x) " 8969 "cannot issue Data: x%x x%x\n", 8970 mboxq->vport ? mboxq->vport->vpi : 0, 8971 mboxq->u.mb.mbxCommand, 8972 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8973 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8974 psli->sli_flag, MBX_NOWAIT); 8975 goto out_not_finished; 8976 } 8977 8978 return rc; 8979 8980 out_not_finished: 8981 spin_lock_irqsave(&phba->hbalock, iflags); 8982 if (phba->sli.mbox_active) { 8983 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8984 __lpfc_mbox_cmpl_put(phba, mboxq); 8985 /* Release the token */ 8986 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8987 phba->sli.mbox_active = NULL; 8988 } 8989 spin_unlock_irqrestore(&phba->hbalock, iflags); 8990 8991 return MBX_NOT_FINISHED; 8992 } 8993 8994 /** 8995 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 8996 * @phba: Pointer to HBA context object. 8997 * @pmbox: Pointer to mailbox object. 8998 * @flag: Flag indicating how the mailbox need to be processed. 8999 * 9000 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 9001 * the API jump table function pointer from the lpfc_hba struct. 9002 * 9003 * Return codes the caller owns the mailbox command after the return of the 9004 * function. 9005 **/ 9006 int 9007 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 9008 { 9009 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 9010 } 9011 9012 /** 9013 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 9014 * @phba: The hba struct for which this call is being executed. 9015 * @dev_grp: The HBA PCI-Device group number. 9016 * 9017 * This routine sets up the mbox interface API function jump table in @phba 9018 * struct. 9019 * Returns: 0 - success, -ENODEV - failure. 9020 **/ 9021 int 9022 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 9023 { 9024 9025 switch (dev_grp) { 9026 case LPFC_PCI_DEV_LP: 9027 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 9028 phba->lpfc_sli_handle_slow_ring_event = 9029 lpfc_sli_handle_slow_ring_event_s3; 9030 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 9031 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 9032 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 9033 break; 9034 case LPFC_PCI_DEV_OC: 9035 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 9036 phba->lpfc_sli_handle_slow_ring_event = 9037 lpfc_sli_handle_slow_ring_event_s4; 9038 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 9039 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 9040 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 9041 break; 9042 default: 9043 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9044 "1420 Invalid HBA PCI-device group: 0x%x\n", 9045 dev_grp); 9046 return -ENODEV; 9047 break; 9048 } 9049 return 0; 9050 } 9051 9052 /** 9053 * __lpfc_sli_ringtx_put - Add an iocb to the txq 9054 * @phba: Pointer to HBA context object. 9055 * @pring: Pointer to driver SLI ring object. 9056 * @piocb: Pointer to address of newly added command iocb. 9057 * 9058 * This function is called with hbalock held for SLI3 ports or 9059 * the ring lock held for SLI4 ports to add a command 9060 * iocb to the txq when SLI layer cannot submit the command iocb 9061 * to the ring. 9062 **/ 9063 void 9064 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9065 struct lpfc_iocbq *piocb) 9066 { 9067 if (phba->sli_rev == LPFC_SLI_REV4) 9068 lockdep_assert_held(&pring->ring_lock); 9069 else 9070 lockdep_assert_held(&phba->hbalock); 9071 /* Insert the caller's iocb in the txq tail for later processing. */ 9072 list_add_tail(&piocb->list, &pring->txq); 9073 } 9074 9075 /** 9076 * lpfc_sli_next_iocb - Get the next iocb in the txq 9077 * @phba: Pointer to HBA context object. 9078 * @pring: Pointer to driver SLI ring object. 9079 * @piocb: Pointer to address of newly added command iocb. 9080 * 9081 * This function is called with hbalock held before a new 9082 * iocb is submitted to the firmware. This function checks 9083 * txq to flush the iocbs in txq to Firmware before 9084 * submitting new iocbs to the Firmware. 9085 * If there are iocbs in the txq which need to be submitted 9086 * to firmware, lpfc_sli_next_iocb returns the first element 9087 * of the txq after dequeuing it from txq. 9088 * If there is no iocb in the txq then the function will return 9089 * *piocb and *piocb is set to NULL. Caller needs to check 9090 * *piocb to find if there are more commands in the txq. 9091 **/ 9092 static struct lpfc_iocbq * 9093 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9094 struct lpfc_iocbq **piocb) 9095 { 9096 struct lpfc_iocbq * nextiocb; 9097 9098 lockdep_assert_held(&phba->hbalock); 9099 9100 nextiocb = lpfc_sli_ringtx_get(phba, pring); 9101 if (!nextiocb) { 9102 nextiocb = *piocb; 9103 *piocb = NULL; 9104 } 9105 9106 return nextiocb; 9107 } 9108 9109 /** 9110 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 9111 * @phba: Pointer to HBA context object. 9112 * @ring_number: SLI ring number to issue iocb on. 9113 * @piocb: Pointer to command iocb. 9114 * @flag: Flag indicating if this command can be put into txq. 9115 * 9116 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 9117 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 9118 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 9119 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 9120 * this function allows only iocbs for posting buffers. This function finds 9121 * next available slot in the command ring and posts the command to the 9122 * available slot and writes the port attention register to request HBA start 9123 * processing new iocb. If there is no slot available in the ring and 9124 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 9125 * the function returns IOCB_BUSY. 9126 * 9127 * This function is called with hbalock held. The function will return success 9128 * after it successfully submit the iocb to firmware or after adding to the 9129 * txq. 9130 **/ 9131 static int 9132 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 9133 struct lpfc_iocbq *piocb, uint32_t flag) 9134 { 9135 struct lpfc_iocbq *nextiocb; 9136 IOCB_t *iocb; 9137 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; 9138 9139 lockdep_assert_held(&phba->hbalock); 9140 9141 if (piocb->iocb_cmpl && (!piocb->vport) && 9142 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 9143 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 9144 lpfc_printf_log(phba, KERN_ERR, 9145 LOG_SLI | LOG_VPORT, 9146 "1807 IOCB x%x failed. No vport\n", 9147 piocb->iocb.ulpCommand); 9148 dump_stack(); 9149 return IOCB_ERROR; 9150 } 9151 9152 9153 /* If the PCI channel is in offline state, do not post iocbs. */ 9154 if (unlikely(pci_channel_offline(phba->pcidev))) 9155 return IOCB_ERROR; 9156 9157 /* If HBA has a deferred error attention, fail the iocb. */ 9158 if (unlikely(phba->hba_flag & DEFER_ERATT)) 9159 return IOCB_ERROR; 9160 9161 /* 9162 * We should never get an IOCB if we are in a < LINK_DOWN state 9163 */ 9164 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 9165 return IOCB_ERROR; 9166 9167 /* 9168 * Check to see if we are blocking IOCB processing because of a 9169 * outstanding event. 9170 */ 9171 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 9172 goto iocb_busy; 9173 9174 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 9175 /* 9176 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 9177 * can be issued if the link is not up. 9178 */ 9179 switch (piocb->iocb.ulpCommand) { 9180 case CMD_GEN_REQUEST64_CR: 9181 case CMD_GEN_REQUEST64_CX: 9182 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 9183 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 9184 FC_RCTL_DD_UNSOL_CMD) || 9185 (piocb->iocb.un.genreq64.w5.hcsw.Type != 9186 MENLO_TRANSPORT_TYPE)) 9187 9188 goto iocb_busy; 9189 break; 9190 case CMD_QUE_RING_BUF_CN: 9191 case CMD_QUE_RING_BUF64_CN: 9192 /* 9193 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 9194 * completion, iocb_cmpl MUST be 0. 9195 */ 9196 if (piocb->iocb_cmpl) 9197 piocb->iocb_cmpl = NULL; 9198 /*FALLTHROUGH*/ 9199 case CMD_CREATE_XRI_CR: 9200 case CMD_CLOSE_XRI_CN: 9201 case CMD_CLOSE_XRI_CX: 9202 break; 9203 default: 9204 goto iocb_busy; 9205 } 9206 9207 /* 9208 * For FCP commands, we must be in a state where we can process link 9209 * attention events. 9210 */ 9211 } else if (unlikely(pring->ringno == LPFC_FCP_RING && 9212 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 9213 goto iocb_busy; 9214 } 9215 9216 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 9217 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 9218 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 9219 9220 if (iocb) 9221 lpfc_sli_update_ring(phba, pring); 9222 else 9223 lpfc_sli_update_full_ring(phba, pring); 9224 9225 if (!piocb) 9226 return IOCB_SUCCESS; 9227 9228 goto out_busy; 9229 9230 iocb_busy: 9231 pring->stats.iocb_cmd_delay++; 9232 9233 out_busy: 9234 9235 if (!(flag & SLI_IOCB_RET_IOCB)) { 9236 __lpfc_sli_ringtx_put(phba, pring, piocb); 9237 return IOCB_SUCCESS; 9238 } 9239 9240 return IOCB_BUSY; 9241 } 9242 9243 /** 9244 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 9245 * @phba: Pointer to HBA context object. 9246 * @piocb: Pointer to command iocb. 9247 * @sglq: Pointer to the scatter gather queue object. 9248 * 9249 * This routine converts the bpl or bde that is in the IOCB 9250 * to a sgl list for the sli4 hardware. The physical address 9251 * of the bpl/bde is converted back to a virtual address. 9252 * If the IOCB contains a BPL then the list of BDE's is 9253 * converted to sli4_sge's. If the IOCB contains a single 9254 * BDE then it is converted to a single sli_sge. 9255 * The IOCB is still in cpu endianess so the contents of 9256 * the bpl can be used without byte swapping. 9257 * 9258 * Returns valid XRI = Success, NO_XRI = Failure. 9259 **/ 9260 static uint16_t 9261 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 9262 struct lpfc_sglq *sglq) 9263 { 9264 uint16_t xritag = NO_XRI; 9265 struct ulp_bde64 *bpl = NULL; 9266 struct ulp_bde64 bde; 9267 struct sli4_sge *sgl = NULL; 9268 struct lpfc_dmabuf *dmabuf; 9269 IOCB_t *icmd; 9270 int numBdes = 0; 9271 int i = 0; 9272 uint32_t offset = 0; /* accumulated offset in the sg request list */ 9273 int inbound = 0; /* number of sg reply entries inbound from firmware */ 9274 9275 if (!piocbq || !sglq) 9276 return xritag; 9277 9278 sgl = (struct sli4_sge *)sglq->sgl; 9279 icmd = &piocbq->iocb; 9280 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 9281 return sglq->sli4_xritag; 9282 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9283 numBdes = icmd->un.genreq64.bdl.bdeSize / 9284 sizeof(struct ulp_bde64); 9285 /* The addrHigh and addrLow fields within the IOCB 9286 * have not been byteswapped yet so there is no 9287 * need to swap them back. 9288 */ 9289 if (piocbq->context3) 9290 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 9291 else 9292 return xritag; 9293 9294 bpl = (struct ulp_bde64 *)dmabuf->virt; 9295 if (!bpl) 9296 return xritag; 9297 9298 for (i = 0; i < numBdes; i++) { 9299 /* Should already be byte swapped. */ 9300 sgl->addr_hi = bpl->addrHigh; 9301 sgl->addr_lo = bpl->addrLow; 9302 9303 sgl->word2 = le32_to_cpu(sgl->word2); 9304 if ((i+1) == numBdes) 9305 bf_set(lpfc_sli4_sge_last, sgl, 1); 9306 else 9307 bf_set(lpfc_sli4_sge_last, sgl, 0); 9308 /* swap the size field back to the cpu so we 9309 * can assign it to the sgl. 9310 */ 9311 bde.tus.w = le32_to_cpu(bpl->tus.w); 9312 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 9313 /* The offsets in the sgl need to be accumulated 9314 * separately for the request and reply lists. 9315 * The request is always first, the reply follows. 9316 */ 9317 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 9318 /* add up the reply sg entries */ 9319 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 9320 inbound++; 9321 /* first inbound? reset the offset */ 9322 if (inbound == 1) 9323 offset = 0; 9324 bf_set(lpfc_sli4_sge_offset, sgl, offset); 9325 bf_set(lpfc_sli4_sge_type, sgl, 9326 LPFC_SGE_TYPE_DATA); 9327 offset += bde.tus.f.bdeSize; 9328 } 9329 sgl->word2 = cpu_to_le32(sgl->word2); 9330 bpl++; 9331 sgl++; 9332 } 9333 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 9334 /* The addrHigh and addrLow fields of the BDE have not 9335 * been byteswapped yet so they need to be swapped 9336 * before putting them in the sgl. 9337 */ 9338 sgl->addr_hi = 9339 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 9340 sgl->addr_lo = 9341 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 9342 sgl->word2 = le32_to_cpu(sgl->word2); 9343 bf_set(lpfc_sli4_sge_last, sgl, 1); 9344 sgl->word2 = cpu_to_le32(sgl->word2); 9345 sgl->sge_len = 9346 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 9347 } 9348 return sglq->sli4_xritag; 9349 } 9350 9351 /** 9352 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 9353 * @phba: Pointer to HBA context object. 9354 * @piocb: Pointer to command iocb. 9355 * @wqe: Pointer to the work queue entry. 9356 * 9357 * This routine converts the iocb command to its Work Queue Entry 9358 * equivalent. The wqe pointer should not have any fields set when 9359 * this routine is called because it will memcpy over them. 9360 * This routine does not set the CQ_ID or the WQEC bits in the 9361 * wqe. 9362 * 9363 * Returns: 0 = Success, IOCB_ERROR = Failure. 9364 **/ 9365 static int 9366 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 9367 union lpfc_wqe128 *wqe) 9368 { 9369 uint32_t xmit_len = 0, total_len = 0; 9370 uint8_t ct = 0; 9371 uint32_t fip; 9372 uint32_t abort_tag; 9373 uint8_t command_type = ELS_COMMAND_NON_FIP; 9374 uint8_t cmnd; 9375 uint16_t xritag; 9376 uint16_t abrt_iotag; 9377 struct lpfc_iocbq *abrtiocbq; 9378 struct ulp_bde64 *bpl = NULL; 9379 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 9380 int numBdes, i; 9381 struct ulp_bde64 bde; 9382 struct lpfc_nodelist *ndlp; 9383 uint32_t *pcmd; 9384 uint32_t if_type; 9385 9386 fip = phba->hba_flag & HBA_FIP_SUPPORT; 9387 /* The fcp commands will set command type */ 9388 if (iocbq->iocb_flag & LPFC_IO_FCP) 9389 command_type = FCP_COMMAND; 9390 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 9391 command_type = ELS_COMMAND_FIP; 9392 else 9393 command_type = ELS_COMMAND_NON_FIP; 9394 9395 if (phba->fcp_embed_io) 9396 memset(wqe, 0, sizeof(union lpfc_wqe128)); 9397 /* Some of the fields are in the right position already */ 9398 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 9399 /* The ct field has moved so reset */ 9400 wqe->generic.wqe_com.word7 = 0; 9401 wqe->generic.wqe_com.word10 = 0; 9402 9403 abort_tag = (uint32_t) iocbq->iotag; 9404 xritag = iocbq->sli4_xritag; 9405 /* words0-2 bpl convert bde */ 9406 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9407 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9408 sizeof(struct ulp_bde64); 9409 bpl = (struct ulp_bde64 *) 9410 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 9411 if (!bpl) 9412 return IOCB_ERROR; 9413 9414 /* Should already be byte swapped. */ 9415 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 9416 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 9417 /* swap the size field back to the cpu so we 9418 * can assign it to the sgl. 9419 */ 9420 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 9421 xmit_len = wqe->generic.bde.tus.f.bdeSize; 9422 total_len = 0; 9423 for (i = 0; i < numBdes; i++) { 9424 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9425 total_len += bde.tus.f.bdeSize; 9426 } 9427 } else 9428 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 9429 9430 iocbq->iocb.ulpIoTag = iocbq->iotag; 9431 cmnd = iocbq->iocb.ulpCommand; 9432 9433 switch (iocbq->iocb.ulpCommand) { 9434 case CMD_ELS_REQUEST64_CR: 9435 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 9436 ndlp = iocbq->context_un.ndlp; 9437 else 9438 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9439 if (!iocbq->iocb.ulpLe) { 9440 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9441 "2007 Only Limited Edition cmd Format" 9442 " supported 0x%x\n", 9443 iocbq->iocb.ulpCommand); 9444 return IOCB_ERROR; 9445 } 9446 9447 wqe->els_req.payload_len = xmit_len; 9448 /* Els_reguest64 has a TMO */ 9449 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 9450 iocbq->iocb.ulpTimeout); 9451 /* Need a VF for word 4 set the vf bit*/ 9452 bf_set(els_req64_vf, &wqe->els_req, 0); 9453 /* And a VFID for word 12 */ 9454 bf_set(els_req64_vfid, &wqe->els_req, 0); 9455 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9456 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9457 iocbq->iocb.ulpContext); 9458 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 9459 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 9460 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 9461 if (command_type == ELS_COMMAND_FIP) 9462 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 9463 >> LPFC_FIP_ELS_ID_SHIFT); 9464 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9465 iocbq->context2)->virt); 9466 if_type = bf_get(lpfc_sli_intf_if_type, 9467 &phba->sli4_hba.sli_intf); 9468 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9469 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 9470 *pcmd == ELS_CMD_SCR || 9471 *pcmd == ELS_CMD_RSCN_XMT || 9472 *pcmd == ELS_CMD_FDISC || 9473 *pcmd == ELS_CMD_LOGO || 9474 *pcmd == ELS_CMD_PLOGI)) { 9475 bf_set(els_req64_sp, &wqe->els_req, 1); 9476 bf_set(els_req64_sid, &wqe->els_req, 9477 iocbq->vport->fc_myDID); 9478 if ((*pcmd == ELS_CMD_FLOGI) && 9479 !(phba->fc_topology == 9480 LPFC_TOPOLOGY_LOOP)) 9481 bf_set(els_req64_sid, &wqe->els_req, 0); 9482 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 9483 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9484 phba->vpi_ids[iocbq->vport->vpi]); 9485 } else if (pcmd && iocbq->context1) { 9486 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 9487 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9488 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9489 } 9490 } 9491 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 9492 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9493 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 9494 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 9495 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 9496 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 9497 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9498 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 9499 wqe->els_req.max_response_payload_len = total_len - xmit_len; 9500 break; 9501 case CMD_XMIT_SEQUENCE64_CX: 9502 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 9503 iocbq->iocb.un.ulpWord[3]); 9504 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 9505 iocbq->iocb.unsli3.rcvsli3.ox_id); 9506 /* The entire sequence is transmitted for this IOCB */ 9507 xmit_len = total_len; 9508 cmnd = CMD_XMIT_SEQUENCE64_CR; 9509 if (phba->link_flag & LS_LOOPBACK_MODE) 9510 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 9511 /* fall through */ 9512 case CMD_XMIT_SEQUENCE64_CR: 9513 /* word3 iocb=io_tag32 wqe=reserved */ 9514 wqe->xmit_sequence.rsvd3 = 0; 9515 /* word4 relative_offset memcpy */ 9516 /* word5 r_ctl/df_ctl memcpy */ 9517 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 9518 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 9519 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 9520 LPFC_WQE_IOD_WRITE); 9521 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 9522 LPFC_WQE_LENLOC_WORD12); 9523 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 9524 wqe->xmit_sequence.xmit_len = xmit_len; 9525 command_type = OTHER_COMMAND; 9526 break; 9527 case CMD_XMIT_BCAST64_CN: 9528 /* word3 iocb=iotag32 wqe=seq_payload_len */ 9529 wqe->xmit_bcast64.seq_payload_len = xmit_len; 9530 /* word4 iocb=rsvd wqe=rsvd */ 9531 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 9532 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 9533 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 9534 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9535 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 9536 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 9537 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 9538 LPFC_WQE_LENLOC_WORD3); 9539 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 9540 break; 9541 case CMD_FCP_IWRITE64_CR: 9542 command_type = FCP_COMMAND_DATA_OUT; 9543 /* word3 iocb=iotag wqe=payload_offset_len */ 9544 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9545 bf_set(payload_offset_len, &wqe->fcp_iwrite, 9546 xmit_len + sizeof(struct fcp_rsp)); 9547 bf_set(cmd_buff_len, &wqe->fcp_iwrite, 9548 0); 9549 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9550 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9551 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 9552 iocbq->iocb.ulpFCP2Rcvy); 9553 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 9554 /* Always open the exchange */ 9555 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 9556 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 9557 LPFC_WQE_LENLOC_WORD4); 9558 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 9559 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 9560 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9561 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); 9562 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 9563 if (iocbq->priority) { 9564 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9565 (iocbq->priority << 1)); 9566 } else { 9567 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9568 (phba->cfg_XLanePriority << 1)); 9569 } 9570 } 9571 /* Note, word 10 is already initialized to 0 */ 9572 9573 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9574 if (phba->cfg_enable_pbde) 9575 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1); 9576 else 9577 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); 9578 9579 if (phba->fcp_embed_io) { 9580 struct lpfc_io_buf *lpfc_cmd; 9581 struct sli4_sge *sgl; 9582 struct fcp_cmnd *fcp_cmnd; 9583 uint32_t *ptr; 9584 9585 /* 128 byte wqe support here */ 9586 9587 lpfc_cmd = iocbq->context1; 9588 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9589 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9590 9591 /* Word 0-2 - FCP_CMND */ 9592 wqe->generic.bde.tus.f.bdeFlags = 9593 BUFF_TYPE_BDE_IMMED; 9594 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9595 wqe->generic.bde.addrHigh = 0; 9596 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9597 9598 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 9599 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); 9600 9601 /* Word 22-29 FCP CMND Payload */ 9602 ptr = &wqe->words[22]; 9603 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9604 } 9605 break; 9606 case CMD_FCP_IREAD64_CR: 9607 /* word3 iocb=iotag wqe=payload_offset_len */ 9608 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9609 bf_set(payload_offset_len, &wqe->fcp_iread, 9610 xmit_len + sizeof(struct fcp_rsp)); 9611 bf_set(cmd_buff_len, &wqe->fcp_iread, 9612 0); 9613 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9614 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9615 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 9616 iocbq->iocb.ulpFCP2Rcvy); 9617 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 9618 /* Always open the exchange */ 9619 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 9620 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 9621 LPFC_WQE_LENLOC_WORD4); 9622 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 9623 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 9624 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9625 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); 9626 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); 9627 if (iocbq->priority) { 9628 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9629 (iocbq->priority << 1)); 9630 } else { 9631 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9632 (phba->cfg_XLanePriority << 1)); 9633 } 9634 } 9635 /* Note, word 10 is already initialized to 0 */ 9636 9637 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9638 if (phba->cfg_enable_pbde) 9639 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1); 9640 else 9641 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); 9642 9643 if (phba->fcp_embed_io) { 9644 struct lpfc_io_buf *lpfc_cmd; 9645 struct sli4_sge *sgl; 9646 struct fcp_cmnd *fcp_cmnd; 9647 uint32_t *ptr; 9648 9649 /* 128 byte wqe support here */ 9650 9651 lpfc_cmd = iocbq->context1; 9652 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9653 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9654 9655 /* Word 0-2 - FCP_CMND */ 9656 wqe->generic.bde.tus.f.bdeFlags = 9657 BUFF_TYPE_BDE_IMMED; 9658 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9659 wqe->generic.bde.addrHigh = 0; 9660 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9661 9662 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); 9663 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); 9664 9665 /* Word 22-29 FCP CMND Payload */ 9666 ptr = &wqe->words[22]; 9667 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9668 } 9669 break; 9670 case CMD_FCP_ICMND64_CR: 9671 /* word3 iocb=iotag wqe=payload_offset_len */ 9672 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9673 bf_set(payload_offset_len, &wqe->fcp_icmd, 9674 xmit_len + sizeof(struct fcp_rsp)); 9675 bf_set(cmd_buff_len, &wqe->fcp_icmd, 9676 0); 9677 /* word3 iocb=IO_TAG wqe=reserved */ 9678 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 9679 /* Always open the exchange */ 9680 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 9681 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 9682 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 9683 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 9684 LPFC_WQE_LENLOC_NONE); 9685 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 9686 iocbq->iocb.ulpFCP2Rcvy); 9687 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9688 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); 9689 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); 9690 if (iocbq->priority) { 9691 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9692 (iocbq->priority << 1)); 9693 } else { 9694 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9695 (phba->cfg_XLanePriority << 1)); 9696 } 9697 } 9698 /* Note, word 10 is already initialized to 0 */ 9699 9700 if (phba->fcp_embed_io) { 9701 struct lpfc_io_buf *lpfc_cmd; 9702 struct sli4_sge *sgl; 9703 struct fcp_cmnd *fcp_cmnd; 9704 uint32_t *ptr; 9705 9706 /* 128 byte wqe support here */ 9707 9708 lpfc_cmd = iocbq->context1; 9709 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9710 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9711 9712 /* Word 0-2 - FCP_CMND */ 9713 wqe->generic.bde.tus.f.bdeFlags = 9714 BUFF_TYPE_BDE_IMMED; 9715 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9716 wqe->generic.bde.addrHigh = 0; 9717 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9718 9719 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); 9720 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); 9721 9722 /* Word 22-29 FCP CMND Payload */ 9723 ptr = &wqe->words[22]; 9724 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9725 } 9726 break; 9727 case CMD_GEN_REQUEST64_CR: 9728 /* For this command calculate the xmit length of the 9729 * request bde. 9730 */ 9731 xmit_len = 0; 9732 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9733 sizeof(struct ulp_bde64); 9734 for (i = 0; i < numBdes; i++) { 9735 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9736 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 9737 break; 9738 xmit_len += bde.tus.f.bdeSize; 9739 } 9740 /* word3 iocb=IO_TAG wqe=request_payload_len */ 9741 wqe->gen_req.request_payload_len = xmit_len; 9742 /* word4 iocb=parameter wqe=relative_offset memcpy */ 9743 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 9744 /* word6 context tag copied in memcpy */ 9745 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 9746 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9747 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9748 "2015 Invalid CT %x command 0x%x\n", 9749 ct, iocbq->iocb.ulpCommand); 9750 return IOCB_ERROR; 9751 } 9752 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 9753 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 9754 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 9755 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 9756 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 9757 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 9758 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9759 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 9760 wqe->gen_req.max_response_payload_len = total_len - xmit_len; 9761 command_type = OTHER_COMMAND; 9762 break; 9763 case CMD_XMIT_ELS_RSP64_CX: 9764 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9765 /* words0-2 BDE memcpy */ 9766 /* word3 iocb=iotag32 wqe=response_payload_len */ 9767 wqe->xmit_els_rsp.response_payload_len = xmit_len; 9768 /* word4 */ 9769 wqe->xmit_els_rsp.word4 = 0; 9770 /* word5 iocb=rsvd wge=did */ 9771 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 9772 iocbq->iocb.un.xseq64.xmit_els_remoteID); 9773 9774 if_type = bf_get(lpfc_sli_intf_if_type, 9775 &phba->sli4_hba.sli_intf); 9776 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9777 if (iocbq->vport->fc_flag & FC_PT2PT) { 9778 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9779 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9780 iocbq->vport->fc_myDID); 9781 if (iocbq->vport->fc_myDID == Fabric_DID) { 9782 bf_set(wqe_els_did, 9783 &wqe->xmit_els_rsp.wqe_dest, 0); 9784 } 9785 } 9786 } 9787 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 9788 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9789 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 9790 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9791 iocbq->iocb.unsli3.rcvsli3.ox_id); 9792 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 9793 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9794 phba->vpi_ids[iocbq->vport->vpi]); 9795 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 9796 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 9797 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 9798 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 9799 LPFC_WQE_LENLOC_WORD3); 9800 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 9801 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 9802 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9803 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9804 iocbq->context2)->virt); 9805 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9806 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9807 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9808 iocbq->vport->fc_myDID); 9809 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 9810 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9811 phba->vpi_ids[phba->pport->vpi]); 9812 } 9813 command_type = OTHER_COMMAND; 9814 break; 9815 case CMD_CLOSE_XRI_CN: 9816 case CMD_ABORT_XRI_CN: 9817 case CMD_ABORT_XRI_CX: 9818 /* words 0-2 memcpy should be 0 rserved */ 9819 /* port will send abts */ 9820 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 9821 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 9822 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 9823 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 9824 } else 9825 fip = 0; 9826 9827 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 9828 /* 9829 * The link is down, or the command was ELS_FIP 9830 * so the fw does not need to send abts 9831 * on the wire. 9832 */ 9833 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 9834 else 9835 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 9836 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 9837 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 9838 wqe->abort_cmd.rsrvd5 = 0; 9839 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 9840 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9841 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 9842 /* 9843 * The abort handler will send us CMD_ABORT_XRI_CN or 9844 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 9845 */ 9846 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 9847 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 9848 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 9849 LPFC_WQE_LENLOC_NONE); 9850 cmnd = CMD_ABORT_XRI_CX; 9851 command_type = OTHER_COMMAND; 9852 xritag = 0; 9853 break; 9854 case CMD_XMIT_BLS_RSP64_CX: 9855 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9856 /* As BLS ABTS RSP WQE is very different from other WQEs, 9857 * we re-construct this WQE here based on information in 9858 * iocbq from scratch. 9859 */ 9860 memset(wqe, 0, sizeof(*wqe)); 9861 /* OX_ID is invariable to who sent ABTS to CT exchange */ 9862 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 9863 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 9864 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 9865 LPFC_ABTS_UNSOL_INT) { 9866 /* ABTS sent by initiator to CT exchange, the 9867 * RX_ID field will be filled with the newly 9868 * allocated responder XRI. 9869 */ 9870 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9871 iocbq->sli4_xritag); 9872 } else { 9873 /* ABTS sent by responder to CT exchange, the 9874 * RX_ID field will be filled with the responder 9875 * RX_ID from ABTS. 9876 */ 9877 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9878 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 9879 } 9880 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 9881 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 9882 9883 /* Use CT=VPI */ 9884 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 9885 ndlp->nlp_DID); 9886 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 9887 iocbq->iocb.ulpContext); 9888 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 9889 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 9890 phba->vpi_ids[phba->pport->vpi]); 9891 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 9892 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 9893 LPFC_WQE_LENLOC_NONE); 9894 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 9895 command_type = OTHER_COMMAND; 9896 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 9897 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 9898 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 9899 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 9900 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 9901 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 9902 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 9903 } 9904 9905 break; 9906 case CMD_SEND_FRAME: 9907 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME); 9908 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */ 9909 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */ 9910 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1); 9911 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1); 9912 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); 9913 bf_set(wqe_xc, &wqe->generic.wqe_com, 1); 9914 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA); 9915 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 9916 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9917 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9918 return 0; 9919 case CMD_XRI_ABORTED_CX: 9920 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 9921 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 9922 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 9923 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 9924 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 9925 default: 9926 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9927 "2014 Invalid command 0x%x\n", 9928 iocbq->iocb.ulpCommand); 9929 return IOCB_ERROR; 9930 break; 9931 } 9932 9933 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) 9934 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 9935 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) 9936 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 9937 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) 9938 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 9939 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | 9940 LPFC_IO_DIF_INSERT); 9941 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9942 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9943 wqe->generic.wqe_com.abort_tag = abort_tag; 9944 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 9945 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 9946 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 9947 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 9948 return 0; 9949 } 9950 9951 /** 9952 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 9953 * @phba: Pointer to HBA context object. 9954 * @ring_number: SLI ring number to issue iocb on. 9955 * @piocb: Pointer to command iocb. 9956 * @flag: Flag indicating if this command can be put into txq. 9957 * 9958 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 9959 * an iocb command to an HBA with SLI-4 interface spec. 9960 * 9961 * This function is called with ringlock held. The function will return success 9962 * after it successfully submit the iocb to firmware or after adding to the 9963 * txq. 9964 **/ 9965 static int 9966 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 9967 struct lpfc_iocbq *piocb, uint32_t flag) 9968 { 9969 struct lpfc_sglq *sglq; 9970 union lpfc_wqe128 wqe; 9971 struct lpfc_queue *wq; 9972 struct lpfc_sli_ring *pring; 9973 9974 /* Get the WQ */ 9975 if ((piocb->iocb_flag & LPFC_IO_FCP) || 9976 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9977 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq; 9978 } else { 9979 wq = phba->sli4_hba.els_wq; 9980 } 9981 9982 /* Get corresponding ring */ 9983 pring = wq->pring; 9984 9985 /* 9986 * The WQE can be either 64 or 128 bytes, 9987 */ 9988 9989 lockdep_assert_held(&pring->ring_lock); 9990 9991 if (piocb->sli4_xritag == NO_XRI) { 9992 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 9993 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 9994 sglq = NULL; 9995 else { 9996 if (!list_empty(&pring->txq)) { 9997 if (!(flag & SLI_IOCB_RET_IOCB)) { 9998 __lpfc_sli_ringtx_put(phba, 9999 pring, piocb); 10000 return IOCB_SUCCESS; 10001 } else { 10002 return IOCB_BUSY; 10003 } 10004 } else { 10005 sglq = __lpfc_sli_get_els_sglq(phba, piocb); 10006 if (!sglq) { 10007 if (!(flag & SLI_IOCB_RET_IOCB)) { 10008 __lpfc_sli_ringtx_put(phba, 10009 pring, 10010 piocb); 10011 return IOCB_SUCCESS; 10012 } else 10013 return IOCB_BUSY; 10014 } 10015 } 10016 } 10017 } else if (piocb->iocb_flag & LPFC_IO_FCP) 10018 /* These IO's already have an XRI and a mapped sgl. */ 10019 sglq = NULL; 10020 else { 10021 /* 10022 * This is a continuation of a commandi,(CX) so this 10023 * sglq is on the active list 10024 */ 10025 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 10026 if (!sglq) 10027 return IOCB_ERROR; 10028 } 10029 10030 if (sglq) { 10031 piocb->sli4_lxritag = sglq->sli4_lxritag; 10032 piocb->sli4_xritag = sglq->sli4_xritag; 10033 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 10034 return IOCB_ERROR; 10035 } 10036 10037 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 10038 return IOCB_ERROR; 10039 10040 if (lpfc_sli4_wq_put(wq, &wqe)) 10041 return IOCB_ERROR; 10042 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 10043 10044 return 0; 10045 } 10046 10047 /** 10048 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 10049 * 10050 * This routine wraps the actual lockless version for issusing IOCB function 10051 * pointer from the lpfc_hba struct. 10052 * 10053 * Return codes: 10054 * IOCB_ERROR - Error 10055 * IOCB_SUCCESS - Success 10056 * IOCB_BUSY - Busy 10057 **/ 10058 int 10059 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 10060 struct lpfc_iocbq *piocb, uint32_t flag) 10061 { 10062 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10063 } 10064 10065 /** 10066 * lpfc_sli_api_table_setup - Set up sli api function jump table 10067 * @phba: The hba struct for which this call is being executed. 10068 * @dev_grp: The HBA PCI-Device group number. 10069 * 10070 * This routine sets up the SLI interface API function jump table in @phba 10071 * struct. 10072 * Returns: 0 - success, -ENODEV - failure. 10073 **/ 10074 int 10075 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 10076 { 10077 10078 switch (dev_grp) { 10079 case LPFC_PCI_DEV_LP: 10080 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 10081 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 10082 break; 10083 case LPFC_PCI_DEV_OC: 10084 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 10085 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 10086 break; 10087 default: 10088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10089 "1419 Invalid HBA PCI-device group: 0x%x\n", 10090 dev_grp); 10091 return -ENODEV; 10092 break; 10093 } 10094 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 10095 return 0; 10096 } 10097 10098 /** 10099 * lpfc_sli4_calc_ring - Calculates which ring to use 10100 * @phba: Pointer to HBA context object. 10101 * @piocb: Pointer to command iocb. 10102 * 10103 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on 10104 * hba_wqidx, thus we need to calculate the corresponding ring. 10105 * Since ABORTS must go on the same WQ of the command they are 10106 * aborting, we use command's hba_wqidx. 10107 */ 10108 struct lpfc_sli_ring * 10109 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 10110 { 10111 struct lpfc_io_buf *lpfc_cmd; 10112 10113 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { 10114 if (unlikely(!phba->sli4_hba.hdwq)) 10115 return NULL; 10116 /* 10117 * for abort iocb hba_wqidx should already 10118 * be setup based on what work queue we used. 10119 */ 10120 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 10121 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1; 10122 piocb->hba_wqidx = lpfc_cmd->hdwq_no; 10123 } 10124 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring; 10125 } else { 10126 if (unlikely(!phba->sli4_hba.els_wq)) 10127 return NULL; 10128 piocb->hba_wqidx = 0; 10129 return phba->sli4_hba.els_wq->pring; 10130 } 10131 } 10132 10133 /** 10134 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 10135 * @phba: Pointer to HBA context object. 10136 * @pring: Pointer to driver SLI ring object. 10137 * @piocb: Pointer to command iocb. 10138 * @flag: Flag indicating if this command can be put into txq. 10139 * 10140 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 10141 * function. This function gets the hbalock and calls 10142 * __lpfc_sli_issue_iocb function and will return the error returned 10143 * by __lpfc_sli_issue_iocb function. This wrapper is used by 10144 * functions which do not hold hbalock. 10145 **/ 10146 int 10147 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 10148 struct lpfc_iocbq *piocb, uint32_t flag) 10149 { 10150 struct lpfc_sli_ring *pring; 10151 struct lpfc_queue *eq; 10152 unsigned long iflags; 10153 int rc; 10154 10155 if (phba->sli_rev == LPFC_SLI_REV4) { 10156 eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq; 10157 10158 pring = lpfc_sli4_calc_ring(phba, piocb); 10159 if (unlikely(pring == NULL)) 10160 return IOCB_ERROR; 10161 10162 spin_lock_irqsave(&pring->ring_lock, iflags); 10163 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10164 spin_unlock_irqrestore(&pring->ring_lock, iflags); 10165 10166 lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH); 10167 } else { 10168 /* For now, SLI2/3 will still use hbalock */ 10169 spin_lock_irqsave(&phba->hbalock, iflags); 10170 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10171 spin_unlock_irqrestore(&phba->hbalock, iflags); 10172 } 10173 return rc; 10174 } 10175 10176 /** 10177 * lpfc_extra_ring_setup - Extra ring setup function 10178 * @phba: Pointer to HBA context object. 10179 * 10180 * This function is called while driver attaches with the 10181 * HBA to setup the extra ring. The extra ring is used 10182 * only when driver needs to support target mode functionality 10183 * or IP over FC functionalities. 10184 * 10185 * This function is called with no lock held. SLI3 only. 10186 **/ 10187 static int 10188 lpfc_extra_ring_setup( struct lpfc_hba *phba) 10189 { 10190 struct lpfc_sli *psli; 10191 struct lpfc_sli_ring *pring; 10192 10193 psli = &phba->sli; 10194 10195 /* Adjust cmd/rsp ring iocb entries more evenly */ 10196 10197 /* Take some away from the FCP ring */ 10198 pring = &psli->sli3_ring[LPFC_FCP_RING]; 10199 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10200 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10201 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10202 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10203 10204 /* and give them to the extra ring */ 10205 pring = &psli->sli3_ring[LPFC_EXTRA_RING]; 10206 10207 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10208 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10209 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10210 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10211 10212 /* Setup default profile for this ring */ 10213 pring->iotag_max = 4096; 10214 pring->num_mask = 1; 10215 pring->prt[0].profile = 0; /* Mask 0 */ 10216 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 10217 pring->prt[0].type = phba->cfg_multi_ring_type; 10218 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 10219 return 0; 10220 } 10221 10222 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 10223 * @phba: Pointer to HBA context object. 10224 * @iocbq: Pointer to iocb object. 10225 * 10226 * The async_event handler calls this routine when it receives 10227 * an ASYNC_STATUS_CN event from the port. The port generates 10228 * this event when an Abort Sequence request to an rport fails 10229 * twice in succession. The abort could be originated by the 10230 * driver or by the port. The ABTS could have been for an ELS 10231 * or FCP IO. The port only generates this event when an ABTS 10232 * fails to complete after one retry. 10233 */ 10234 static void 10235 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 10236 struct lpfc_iocbq *iocbq) 10237 { 10238 struct lpfc_nodelist *ndlp = NULL; 10239 uint16_t rpi = 0, vpi = 0; 10240 struct lpfc_vport *vport = NULL; 10241 10242 /* The rpi in the ulpContext is vport-sensitive. */ 10243 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 10244 rpi = iocbq->iocb.ulpContext; 10245 10246 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10247 "3092 Port generated ABTS async event " 10248 "on vpi %d rpi %d status 0x%x\n", 10249 vpi, rpi, iocbq->iocb.ulpStatus); 10250 10251 vport = lpfc_find_vport_by_vpid(phba, vpi); 10252 if (!vport) 10253 goto err_exit; 10254 ndlp = lpfc_findnode_rpi(vport, rpi); 10255 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 10256 goto err_exit; 10257 10258 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 10259 lpfc_sli_abts_recover_port(vport, ndlp); 10260 return; 10261 10262 err_exit: 10263 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10264 "3095 Event Context not found, no " 10265 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 10266 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 10267 vpi, rpi); 10268 } 10269 10270 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 10271 * @phba: pointer to HBA context object. 10272 * @ndlp: nodelist pointer for the impacted rport. 10273 * @axri: pointer to the wcqe containing the failed exchange. 10274 * 10275 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 10276 * port. The port generates this event when an abort exchange request to an 10277 * rport fails twice in succession with no reply. The abort could be originated 10278 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 10279 */ 10280 void 10281 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 10282 struct lpfc_nodelist *ndlp, 10283 struct sli4_wcqe_xri_aborted *axri) 10284 { 10285 struct lpfc_vport *vport; 10286 uint32_t ext_status = 0; 10287 10288 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 10289 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10290 "3115 Node Context not found, driver " 10291 "ignoring abts err event\n"); 10292 return; 10293 } 10294 10295 vport = ndlp->vport; 10296 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10297 "3116 Port generated FCP XRI ABORT event on " 10298 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 10299 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 10300 bf_get(lpfc_wcqe_xa_xri, axri), 10301 bf_get(lpfc_wcqe_xa_status, axri), 10302 axri->parameter); 10303 10304 /* 10305 * Catch the ABTS protocol failure case. Older OCe FW releases returned 10306 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 10307 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 10308 */ 10309 ext_status = axri->parameter & IOERR_PARAM_MASK; 10310 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 10311 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 10312 lpfc_sli_abts_recover_port(vport, ndlp); 10313 } 10314 10315 /** 10316 * lpfc_sli_async_event_handler - ASYNC iocb handler function 10317 * @phba: Pointer to HBA context object. 10318 * @pring: Pointer to driver SLI ring object. 10319 * @iocbq: Pointer to iocb object. 10320 * 10321 * This function is called by the slow ring event handler 10322 * function when there is an ASYNC event iocb in the ring. 10323 * This function is called with no lock held. 10324 * Currently this function handles only temperature related 10325 * ASYNC events. The function decodes the temperature sensor 10326 * event message and posts events for the management applications. 10327 **/ 10328 static void 10329 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 10330 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 10331 { 10332 IOCB_t *icmd; 10333 uint16_t evt_code; 10334 struct temp_event temp_event_data; 10335 struct Scsi_Host *shost; 10336 uint32_t *iocb_w; 10337 10338 icmd = &iocbq->iocb; 10339 evt_code = icmd->un.asyncstat.evt_code; 10340 10341 switch (evt_code) { 10342 case ASYNC_TEMP_WARN: 10343 case ASYNC_TEMP_SAFE: 10344 temp_event_data.data = (uint32_t) icmd->ulpContext; 10345 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 10346 if (evt_code == ASYNC_TEMP_WARN) { 10347 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 10348 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10349 "0347 Adapter is very hot, please take " 10350 "corrective action. temperature : %d Celsius\n", 10351 (uint32_t) icmd->ulpContext); 10352 } else { 10353 temp_event_data.event_code = LPFC_NORMAL_TEMP; 10354 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10355 "0340 Adapter temperature is OK now. " 10356 "temperature : %d Celsius\n", 10357 (uint32_t) icmd->ulpContext); 10358 } 10359 10360 /* Send temperature change event to applications */ 10361 shost = lpfc_shost_from_vport(phba->pport); 10362 fc_host_post_vendor_event(shost, fc_get_event_number(), 10363 sizeof(temp_event_data), (char *) &temp_event_data, 10364 LPFC_NL_VENDOR_ID); 10365 break; 10366 case ASYNC_STATUS_CN: 10367 lpfc_sli_abts_err_handler(phba, iocbq); 10368 break; 10369 default: 10370 iocb_w = (uint32_t *) icmd; 10371 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10372 "0346 Ring %d handler: unexpected ASYNC_STATUS" 10373 " evt_code 0x%x\n" 10374 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 10375 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 10376 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 10377 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 10378 pring->ringno, icmd->un.asyncstat.evt_code, 10379 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 10380 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 10381 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 10382 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 10383 10384 break; 10385 } 10386 } 10387 10388 10389 /** 10390 * lpfc_sli4_setup - SLI ring setup function 10391 * @phba: Pointer to HBA context object. 10392 * 10393 * lpfc_sli_setup sets up rings of the SLI interface with 10394 * number of iocbs per ring and iotags. This function is 10395 * called while driver attach to the HBA and before the 10396 * interrupts are enabled. So there is no need for locking. 10397 * 10398 * This function always returns 0. 10399 **/ 10400 int 10401 lpfc_sli4_setup(struct lpfc_hba *phba) 10402 { 10403 struct lpfc_sli_ring *pring; 10404 10405 pring = phba->sli4_hba.els_wq->pring; 10406 pring->num_mask = LPFC_MAX_RING_MASK; 10407 pring->prt[0].profile = 0; /* Mask 0 */ 10408 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10409 pring->prt[0].type = FC_TYPE_ELS; 10410 pring->prt[0].lpfc_sli_rcv_unsol_event = 10411 lpfc_els_unsol_event; 10412 pring->prt[1].profile = 0; /* Mask 1 */ 10413 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10414 pring->prt[1].type = FC_TYPE_ELS; 10415 pring->prt[1].lpfc_sli_rcv_unsol_event = 10416 lpfc_els_unsol_event; 10417 pring->prt[2].profile = 0; /* Mask 2 */ 10418 /* NameServer Inquiry */ 10419 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10420 /* NameServer */ 10421 pring->prt[2].type = FC_TYPE_CT; 10422 pring->prt[2].lpfc_sli_rcv_unsol_event = 10423 lpfc_ct_unsol_event; 10424 pring->prt[3].profile = 0; /* Mask 3 */ 10425 /* NameServer response */ 10426 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10427 /* NameServer */ 10428 pring->prt[3].type = FC_TYPE_CT; 10429 pring->prt[3].lpfc_sli_rcv_unsol_event = 10430 lpfc_ct_unsol_event; 10431 return 0; 10432 } 10433 10434 /** 10435 * lpfc_sli_setup - SLI ring setup function 10436 * @phba: Pointer to HBA context object. 10437 * 10438 * lpfc_sli_setup sets up rings of the SLI interface with 10439 * number of iocbs per ring and iotags. This function is 10440 * called while driver attach to the HBA and before the 10441 * interrupts are enabled. So there is no need for locking. 10442 * 10443 * This function always returns 0. SLI3 only. 10444 **/ 10445 int 10446 lpfc_sli_setup(struct lpfc_hba *phba) 10447 { 10448 int i, totiocbsize = 0; 10449 struct lpfc_sli *psli = &phba->sli; 10450 struct lpfc_sli_ring *pring; 10451 10452 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 10453 psli->sli_flag = 0; 10454 10455 psli->iocbq_lookup = NULL; 10456 psli->iocbq_lookup_len = 0; 10457 psli->last_iotag = 0; 10458 10459 for (i = 0; i < psli->num_rings; i++) { 10460 pring = &psli->sli3_ring[i]; 10461 switch (i) { 10462 case LPFC_FCP_RING: /* ring 0 - FCP */ 10463 /* numCiocb and numRiocb are used in config_port */ 10464 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 10465 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 10466 pring->sli.sli3.numCiocb += 10467 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10468 pring->sli.sli3.numRiocb += 10469 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10470 pring->sli.sli3.numCiocb += 10471 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10472 pring->sli.sli3.numRiocb += 10473 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10474 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10475 SLI3_IOCB_CMD_SIZE : 10476 SLI2_IOCB_CMD_SIZE; 10477 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10478 SLI3_IOCB_RSP_SIZE : 10479 SLI2_IOCB_RSP_SIZE; 10480 pring->iotag_ctr = 0; 10481 pring->iotag_max = 10482 (phba->cfg_hba_queue_depth * 2); 10483 pring->fast_iotag = pring->iotag_max; 10484 pring->num_mask = 0; 10485 break; 10486 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 10487 /* numCiocb and numRiocb are used in config_port */ 10488 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 10489 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 10490 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10491 SLI3_IOCB_CMD_SIZE : 10492 SLI2_IOCB_CMD_SIZE; 10493 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10494 SLI3_IOCB_RSP_SIZE : 10495 SLI2_IOCB_RSP_SIZE; 10496 pring->iotag_max = phba->cfg_hba_queue_depth; 10497 pring->num_mask = 0; 10498 break; 10499 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 10500 /* numCiocb and numRiocb are used in config_port */ 10501 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 10502 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 10503 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10504 SLI3_IOCB_CMD_SIZE : 10505 SLI2_IOCB_CMD_SIZE; 10506 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10507 SLI3_IOCB_RSP_SIZE : 10508 SLI2_IOCB_RSP_SIZE; 10509 pring->fast_iotag = 0; 10510 pring->iotag_ctr = 0; 10511 pring->iotag_max = 4096; 10512 pring->lpfc_sli_rcv_async_status = 10513 lpfc_sli_async_event_handler; 10514 pring->num_mask = LPFC_MAX_RING_MASK; 10515 pring->prt[0].profile = 0; /* Mask 0 */ 10516 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10517 pring->prt[0].type = FC_TYPE_ELS; 10518 pring->prt[0].lpfc_sli_rcv_unsol_event = 10519 lpfc_els_unsol_event; 10520 pring->prt[1].profile = 0; /* Mask 1 */ 10521 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10522 pring->prt[1].type = FC_TYPE_ELS; 10523 pring->prt[1].lpfc_sli_rcv_unsol_event = 10524 lpfc_els_unsol_event; 10525 pring->prt[2].profile = 0; /* Mask 2 */ 10526 /* NameServer Inquiry */ 10527 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10528 /* NameServer */ 10529 pring->prt[2].type = FC_TYPE_CT; 10530 pring->prt[2].lpfc_sli_rcv_unsol_event = 10531 lpfc_ct_unsol_event; 10532 pring->prt[3].profile = 0; /* Mask 3 */ 10533 /* NameServer response */ 10534 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10535 /* NameServer */ 10536 pring->prt[3].type = FC_TYPE_CT; 10537 pring->prt[3].lpfc_sli_rcv_unsol_event = 10538 lpfc_ct_unsol_event; 10539 break; 10540 } 10541 totiocbsize += (pring->sli.sli3.numCiocb * 10542 pring->sli.sli3.sizeCiocb) + 10543 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 10544 } 10545 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 10546 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 10547 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 10548 "SLI2 SLIM Data: x%x x%lx\n", 10549 phba->brd_no, totiocbsize, 10550 (unsigned long) MAX_SLIM_IOCB_SIZE); 10551 } 10552 if (phba->cfg_multi_ring_support == 2) 10553 lpfc_extra_ring_setup(phba); 10554 10555 return 0; 10556 } 10557 10558 /** 10559 * lpfc_sli4_queue_init - Queue initialization function 10560 * @phba: Pointer to HBA context object. 10561 * 10562 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each 10563 * ring. This function also initializes ring indices of each ring. 10564 * This function is called during the initialization of the SLI 10565 * interface of an HBA. 10566 * This function is called with no lock held and always returns 10567 * 1. 10568 **/ 10569 void 10570 lpfc_sli4_queue_init(struct lpfc_hba *phba) 10571 { 10572 struct lpfc_sli *psli; 10573 struct lpfc_sli_ring *pring; 10574 int i; 10575 10576 psli = &phba->sli; 10577 spin_lock_irq(&phba->hbalock); 10578 INIT_LIST_HEAD(&psli->mboxq); 10579 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10580 /* Initialize list headers for txq and txcmplq as double linked lists */ 10581 for (i = 0; i < phba->cfg_hdw_queue; i++) { 10582 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 10583 pring->flag = 0; 10584 pring->ringno = LPFC_FCP_RING; 10585 pring->txcmplq_cnt = 0; 10586 INIT_LIST_HEAD(&pring->txq); 10587 INIT_LIST_HEAD(&pring->txcmplq); 10588 INIT_LIST_HEAD(&pring->iocb_continueq); 10589 spin_lock_init(&pring->ring_lock); 10590 } 10591 pring = phba->sli4_hba.els_wq->pring; 10592 pring->flag = 0; 10593 pring->ringno = LPFC_ELS_RING; 10594 pring->txcmplq_cnt = 0; 10595 INIT_LIST_HEAD(&pring->txq); 10596 INIT_LIST_HEAD(&pring->txcmplq); 10597 INIT_LIST_HEAD(&pring->iocb_continueq); 10598 spin_lock_init(&pring->ring_lock); 10599 10600 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10601 pring = phba->sli4_hba.nvmels_wq->pring; 10602 pring->flag = 0; 10603 pring->ringno = LPFC_ELS_RING; 10604 pring->txcmplq_cnt = 0; 10605 INIT_LIST_HEAD(&pring->txq); 10606 INIT_LIST_HEAD(&pring->txcmplq); 10607 INIT_LIST_HEAD(&pring->iocb_continueq); 10608 spin_lock_init(&pring->ring_lock); 10609 } 10610 10611 spin_unlock_irq(&phba->hbalock); 10612 } 10613 10614 /** 10615 * lpfc_sli_queue_init - Queue initialization function 10616 * @phba: Pointer to HBA context object. 10617 * 10618 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each 10619 * ring. This function also initializes ring indices of each ring. 10620 * This function is called during the initialization of the SLI 10621 * interface of an HBA. 10622 * This function is called with no lock held and always returns 10623 * 1. 10624 **/ 10625 void 10626 lpfc_sli_queue_init(struct lpfc_hba *phba) 10627 { 10628 struct lpfc_sli *psli; 10629 struct lpfc_sli_ring *pring; 10630 int i; 10631 10632 psli = &phba->sli; 10633 spin_lock_irq(&phba->hbalock); 10634 INIT_LIST_HEAD(&psli->mboxq); 10635 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10636 /* Initialize list headers for txq and txcmplq as double linked lists */ 10637 for (i = 0; i < psli->num_rings; i++) { 10638 pring = &psli->sli3_ring[i]; 10639 pring->ringno = i; 10640 pring->sli.sli3.next_cmdidx = 0; 10641 pring->sli.sli3.local_getidx = 0; 10642 pring->sli.sli3.cmdidx = 0; 10643 INIT_LIST_HEAD(&pring->iocb_continueq); 10644 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 10645 INIT_LIST_HEAD(&pring->postbufq); 10646 pring->flag = 0; 10647 INIT_LIST_HEAD(&pring->txq); 10648 INIT_LIST_HEAD(&pring->txcmplq); 10649 spin_lock_init(&pring->ring_lock); 10650 } 10651 spin_unlock_irq(&phba->hbalock); 10652 } 10653 10654 /** 10655 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 10656 * @phba: Pointer to HBA context object. 10657 * 10658 * This routine flushes the mailbox command subsystem. It will unconditionally 10659 * flush all the mailbox commands in the three possible stages in the mailbox 10660 * command sub-system: pending mailbox command queue; the outstanding mailbox 10661 * command; and completed mailbox command queue. It is caller's responsibility 10662 * to make sure that the driver is in the proper state to flush the mailbox 10663 * command sub-system. Namely, the posting of mailbox commands into the 10664 * pending mailbox command queue from the various clients must be stopped; 10665 * either the HBA is in a state that it will never works on the outstanding 10666 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 10667 * mailbox command has been completed. 10668 **/ 10669 static void 10670 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 10671 { 10672 LIST_HEAD(completions); 10673 struct lpfc_sli *psli = &phba->sli; 10674 LPFC_MBOXQ_t *pmb; 10675 unsigned long iflag; 10676 10677 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10678 local_bh_disable(); 10679 10680 /* Flush all the mailbox commands in the mbox system */ 10681 spin_lock_irqsave(&phba->hbalock, iflag); 10682 10683 /* The pending mailbox command queue */ 10684 list_splice_init(&phba->sli.mboxq, &completions); 10685 /* The outstanding active mailbox command */ 10686 if (psli->mbox_active) { 10687 list_add_tail(&psli->mbox_active->list, &completions); 10688 psli->mbox_active = NULL; 10689 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10690 } 10691 /* The completed mailbox command queue */ 10692 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 10693 spin_unlock_irqrestore(&phba->hbalock, iflag); 10694 10695 /* Enable softirqs again, done with phba->hbalock */ 10696 local_bh_enable(); 10697 10698 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 10699 while (!list_empty(&completions)) { 10700 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 10701 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 10702 if (pmb->mbox_cmpl) 10703 pmb->mbox_cmpl(phba, pmb); 10704 } 10705 } 10706 10707 /** 10708 * lpfc_sli_host_down - Vport cleanup function 10709 * @vport: Pointer to virtual port object. 10710 * 10711 * lpfc_sli_host_down is called to clean up the resources 10712 * associated with a vport before destroying virtual 10713 * port data structures. 10714 * This function does following operations: 10715 * - Free discovery resources associated with this virtual 10716 * port. 10717 * - Free iocbs associated with this virtual port in 10718 * the txq. 10719 * - Send abort for all iocb commands associated with this 10720 * vport in txcmplq. 10721 * 10722 * This function is called with no lock held and always returns 1. 10723 **/ 10724 int 10725 lpfc_sli_host_down(struct lpfc_vport *vport) 10726 { 10727 LIST_HEAD(completions); 10728 struct lpfc_hba *phba = vport->phba; 10729 struct lpfc_sli *psli = &phba->sli; 10730 struct lpfc_queue *qp = NULL; 10731 struct lpfc_sli_ring *pring; 10732 struct lpfc_iocbq *iocb, *next_iocb; 10733 int i; 10734 unsigned long flags = 0; 10735 uint16_t prev_pring_flag; 10736 10737 lpfc_cleanup_discovery_resources(vport); 10738 10739 spin_lock_irqsave(&phba->hbalock, flags); 10740 10741 /* 10742 * Error everything on the txq since these iocbs 10743 * have not been given to the FW yet. 10744 * Also issue ABTS for everything on the txcmplq 10745 */ 10746 if (phba->sli_rev != LPFC_SLI_REV4) { 10747 for (i = 0; i < psli->num_rings; i++) { 10748 pring = &psli->sli3_ring[i]; 10749 prev_pring_flag = pring->flag; 10750 /* Only slow rings */ 10751 if (pring->ringno == LPFC_ELS_RING) { 10752 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10753 /* Set the lpfc data pending flag */ 10754 set_bit(LPFC_DATA_READY, &phba->data_flags); 10755 } 10756 list_for_each_entry_safe(iocb, next_iocb, 10757 &pring->txq, list) { 10758 if (iocb->vport != vport) 10759 continue; 10760 list_move_tail(&iocb->list, &completions); 10761 } 10762 list_for_each_entry_safe(iocb, next_iocb, 10763 &pring->txcmplq, list) { 10764 if (iocb->vport != vport) 10765 continue; 10766 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10767 } 10768 pring->flag = prev_pring_flag; 10769 } 10770 } else { 10771 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10772 pring = qp->pring; 10773 if (!pring) 10774 continue; 10775 if (pring == phba->sli4_hba.els_wq->pring) { 10776 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10777 /* Set the lpfc data pending flag */ 10778 set_bit(LPFC_DATA_READY, &phba->data_flags); 10779 } 10780 prev_pring_flag = pring->flag; 10781 spin_lock(&pring->ring_lock); 10782 list_for_each_entry_safe(iocb, next_iocb, 10783 &pring->txq, list) { 10784 if (iocb->vport != vport) 10785 continue; 10786 list_move_tail(&iocb->list, &completions); 10787 } 10788 spin_unlock(&pring->ring_lock); 10789 list_for_each_entry_safe(iocb, next_iocb, 10790 &pring->txcmplq, list) { 10791 if (iocb->vport != vport) 10792 continue; 10793 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10794 } 10795 pring->flag = prev_pring_flag; 10796 } 10797 } 10798 spin_unlock_irqrestore(&phba->hbalock, flags); 10799 10800 /* Cancel all the IOCBs from the completions list */ 10801 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10802 IOERR_SLI_DOWN); 10803 return 1; 10804 } 10805 10806 /** 10807 * lpfc_sli_hba_down - Resource cleanup function for the HBA 10808 * @phba: Pointer to HBA context object. 10809 * 10810 * This function cleans up all iocb, buffers, mailbox commands 10811 * while shutting down the HBA. This function is called with no 10812 * lock held and always returns 1. 10813 * This function does the following to cleanup driver resources: 10814 * - Free discovery resources for each virtual port 10815 * - Cleanup any pending fabric iocbs 10816 * - Iterate through the iocb txq and free each entry 10817 * in the list. 10818 * - Free up any buffer posted to the HBA 10819 * - Free mailbox commands in the mailbox queue. 10820 **/ 10821 int 10822 lpfc_sli_hba_down(struct lpfc_hba *phba) 10823 { 10824 LIST_HEAD(completions); 10825 struct lpfc_sli *psli = &phba->sli; 10826 struct lpfc_queue *qp = NULL; 10827 struct lpfc_sli_ring *pring; 10828 struct lpfc_dmabuf *buf_ptr; 10829 unsigned long flags = 0; 10830 int i; 10831 10832 /* Shutdown the mailbox command sub-system */ 10833 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 10834 10835 lpfc_hba_down_prep(phba); 10836 10837 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10838 local_bh_disable(); 10839 10840 lpfc_fabric_abort_hba(phba); 10841 10842 spin_lock_irqsave(&phba->hbalock, flags); 10843 10844 /* 10845 * Error everything on the txq since these iocbs 10846 * have not been given to the FW yet. 10847 */ 10848 if (phba->sli_rev != LPFC_SLI_REV4) { 10849 for (i = 0; i < psli->num_rings; i++) { 10850 pring = &psli->sli3_ring[i]; 10851 /* Only slow rings */ 10852 if (pring->ringno == LPFC_ELS_RING) { 10853 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10854 /* Set the lpfc data pending flag */ 10855 set_bit(LPFC_DATA_READY, &phba->data_flags); 10856 } 10857 list_splice_init(&pring->txq, &completions); 10858 } 10859 } else { 10860 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10861 pring = qp->pring; 10862 if (!pring) 10863 continue; 10864 spin_lock(&pring->ring_lock); 10865 list_splice_init(&pring->txq, &completions); 10866 spin_unlock(&pring->ring_lock); 10867 if (pring == phba->sli4_hba.els_wq->pring) { 10868 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10869 /* Set the lpfc data pending flag */ 10870 set_bit(LPFC_DATA_READY, &phba->data_flags); 10871 } 10872 } 10873 } 10874 spin_unlock_irqrestore(&phba->hbalock, flags); 10875 10876 /* Cancel all the IOCBs from the completions list */ 10877 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10878 IOERR_SLI_DOWN); 10879 10880 spin_lock_irqsave(&phba->hbalock, flags); 10881 list_splice_init(&phba->elsbuf, &completions); 10882 phba->elsbuf_cnt = 0; 10883 phba->elsbuf_prev_cnt = 0; 10884 spin_unlock_irqrestore(&phba->hbalock, flags); 10885 10886 while (!list_empty(&completions)) { 10887 list_remove_head(&completions, buf_ptr, 10888 struct lpfc_dmabuf, list); 10889 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 10890 kfree(buf_ptr); 10891 } 10892 10893 /* Enable softirqs again, done with phba->hbalock */ 10894 local_bh_enable(); 10895 10896 /* Return any active mbox cmds */ 10897 del_timer_sync(&psli->mbox_tmo); 10898 10899 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 10900 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 10901 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 10902 10903 return 1; 10904 } 10905 10906 /** 10907 * lpfc_sli_pcimem_bcopy - SLI memory copy function 10908 * @srcp: Source memory pointer. 10909 * @destp: Destination memory pointer. 10910 * @cnt: Number of words required to be copied. 10911 * 10912 * This function is used for copying data between driver memory 10913 * and the SLI memory. This function also changes the endianness 10914 * of each word if native endianness is different from SLI 10915 * endianness. This function can be called with or without 10916 * lock. 10917 **/ 10918 void 10919 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 10920 { 10921 uint32_t *src = srcp; 10922 uint32_t *dest = destp; 10923 uint32_t ldata; 10924 int i; 10925 10926 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 10927 ldata = *src; 10928 ldata = le32_to_cpu(ldata); 10929 *dest = ldata; 10930 src++; 10931 dest++; 10932 } 10933 } 10934 10935 10936 /** 10937 * lpfc_sli_bemem_bcopy - SLI memory copy function 10938 * @srcp: Source memory pointer. 10939 * @destp: Destination memory pointer. 10940 * @cnt: Number of words required to be copied. 10941 * 10942 * This function is used for copying data between a data structure 10943 * with big endian representation to local endianness. 10944 * This function can be called with or without lock. 10945 **/ 10946 void 10947 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 10948 { 10949 uint32_t *src = srcp; 10950 uint32_t *dest = destp; 10951 uint32_t ldata; 10952 int i; 10953 10954 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 10955 ldata = *src; 10956 ldata = be32_to_cpu(ldata); 10957 *dest = ldata; 10958 src++; 10959 dest++; 10960 } 10961 } 10962 10963 /** 10964 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 10965 * @phba: Pointer to HBA context object. 10966 * @pring: Pointer to driver SLI ring object. 10967 * @mp: Pointer to driver buffer object. 10968 * 10969 * This function is called with no lock held. 10970 * It always return zero after adding the buffer to the postbufq 10971 * buffer list. 10972 **/ 10973 int 10974 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10975 struct lpfc_dmabuf *mp) 10976 { 10977 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 10978 later */ 10979 spin_lock_irq(&phba->hbalock); 10980 list_add_tail(&mp->list, &pring->postbufq); 10981 pring->postbufq_cnt++; 10982 spin_unlock_irq(&phba->hbalock); 10983 return 0; 10984 } 10985 10986 /** 10987 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 10988 * @phba: Pointer to HBA context object. 10989 * 10990 * When HBQ is enabled, buffers are searched based on tags. This function 10991 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 10992 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 10993 * does not conflict with tags of buffer posted for unsolicited events. 10994 * The function returns the allocated tag. The function is called with 10995 * no locks held. 10996 **/ 10997 uint32_t 10998 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 10999 { 11000 spin_lock_irq(&phba->hbalock); 11001 phba->buffer_tag_count++; 11002 /* 11003 * Always set the QUE_BUFTAG_BIT to distiguish between 11004 * a tag assigned by HBQ. 11005 */ 11006 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 11007 spin_unlock_irq(&phba->hbalock); 11008 return phba->buffer_tag_count; 11009 } 11010 11011 /** 11012 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 11013 * @phba: Pointer to HBA context object. 11014 * @pring: Pointer to driver SLI ring object. 11015 * @tag: Buffer tag. 11016 * 11017 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 11018 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 11019 * iocb is posted to the response ring with the tag of the buffer. 11020 * This function searches the pring->postbufq list using the tag 11021 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 11022 * iocb. If the buffer is found then lpfc_dmabuf object of the 11023 * buffer is returned to the caller else NULL is returned. 11024 * This function is called with no lock held. 11025 **/ 11026 struct lpfc_dmabuf * 11027 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11028 uint32_t tag) 11029 { 11030 struct lpfc_dmabuf *mp, *next_mp; 11031 struct list_head *slp = &pring->postbufq; 11032 11033 /* Search postbufq, from the beginning, looking for a match on tag */ 11034 spin_lock_irq(&phba->hbalock); 11035 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 11036 if (mp->buffer_tag == tag) { 11037 list_del_init(&mp->list); 11038 pring->postbufq_cnt--; 11039 spin_unlock_irq(&phba->hbalock); 11040 return mp; 11041 } 11042 } 11043 11044 spin_unlock_irq(&phba->hbalock); 11045 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11046 "0402 Cannot find virtual addr for buffer tag on " 11047 "ring %d Data x%lx x%px x%px x%x\n", 11048 pring->ringno, (unsigned long) tag, 11049 slp->next, slp->prev, pring->postbufq_cnt); 11050 11051 return NULL; 11052 } 11053 11054 /** 11055 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 11056 * @phba: Pointer to HBA context object. 11057 * @pring: Pointer to driver SLI ring object. 11058 * @phys: DMA address of the buffer. 11059 * 11060 * This function searches the buffer list using the dma_address 11061 * of unsolicited event to find the driver's lpfc_dmabuf object 11062 * corresponding to the dma_address. The function returns the 11063 * lpfc_dmabuf object if a buffer is found else it returns NULL. 11064 * This function is called by the ct and els unsolicited event 11065 * handlers to get the buffer associated with the unsolicited 11066 * event. 11067 * 11068 * This function is called with no lock held. 11069 **/ 11070 struct lpfc_dmabuf * 11071 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11072 dma_addr_t phys) 11073 { 11074 struct lpfc_dmabuf *mp, *next_mp; 11075 struct list_head *slp = &pring->postbufq; 11076 11077 /* Search postbufq, from the beginning, looking for a match on phys */ 11078 spin_lock_irq(&phba->hbalock); 11079 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 11080 if (mp->phys == phys) { 11081 list_del_init(&mp->list); 11082 pring->postbufq_cnt--; 11083 spin_unlock_irq(&phba->hbalock); 11084 return mp; 11085 } 11086 } 11087 11088 spin_unlock_irq(&phba->hbalock); 11089 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11090 "0410 Cannot find virtual addr for mapped buf on " 11091 "ring %d Data x%llx x%px x%px x%x\n", 11092 pring->ringno, (unsigned long long)phys, 11093 slp->next, slp->prev, pring->postbufq_cnt); 11094 return NULL; 11095 } 11096 11097 /** 11098 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 11099 * @phba: Pointer to HBA context object. 11100 * @cmdiocb: Pointer to driver command iocb object. 11101 * @rspiocb: Pointer to driver response iocb object. 11102 * 11103 * This function is the completion handler for the abort iocbs for 11104 * ELS commands. This function is called from the ELS ring event 11105 * handler with no lock held. This function frees memory resources 11106 * associated with the abort iocb. 11107 **/ 11108 static void 11109 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11110 struct lpfc_iocbq *rspiocb) 11111 { 11112 IOCB_t *irsp = &rspiocb->iocb; 11113 uint16_t abort_iotag, abort_context; 11114 struct lpfc_iocbq *abort_iocb = NULL; 11115 11116 if (irsp->ulpStatus) { 11117 11118 /* 11119 * Assume that the port already completed and returned, or 11120 * will return the iocb. Just Log the message. 11121 */ 11122 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 11123 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 11124 11125 spin_lock_irq(&phba->hbalock); 11126 if (phba->sli_rev < LPFC_SLI_REV4) { 11127 if (irsp->ulpCommand == CMD_ABORT_XRI_CX && 11128 irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 11129 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) { 11130 spin_unlock_irq(&phba->hbalock); 11131 goto release_iocb; 11132 } 11133 if (abort_iotag != 0 && 11134 abort_iotag <= phba->sli.last_iotag) 11135 abort_iocb = 11136 phba->sli.iocbq_lookup[abort_iotag]; 11137 } else 11138 /* For sli4 the abort_tag is the XRI, 11139 * so the abort routine puts the iotag of the iocb 11140 * being aborted in the context field of the abort 11141 * IOCB. 11142 */ 11143 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 11144 11145 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 11146 "0327 Cannot abort els iocb x%px " 11147 "with tag %x context %x, abort status %x, " 11148 "abort code %x\n", 11149 abort_iocb, abort_iotag, abort_context, 11150 irsp->ulpStatus, irsp->un.ulpWord[4]); 11151 11152 spin_unlock_irq(&phba->hbalock); 11153 } 11154 release_iocb: 11155 lpfc_sli_release_iocbq(phba, cmdiocb); 11156 return; 11157 } 11158 11159 /** 11160 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 11161 * @phba: Pointer to HBA context object. 11162 * @cmdiocb: Pointer to driver command iocb object. 11163 * @rspiocb: Pointer to driver response iocb object. 11164 * 11165 * The function is called from SLI ring event handler with no 11166 * lock held. This function is the completion handler for ELS commands 11167 * which are aborted. The function frees memory resources used for 11168 * the aborted ELS commands. 11169 **/ 11170 static void 11171 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11172 struct lpfc_iocbq *rspiocb) 11173 { 11174 IOCB_t *irsp = &rspiocb->iocb; 11175 11176 /* ELS cmd tag <ulpIoTag> completes */ 11177 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11178 "0139 Ignoring ELS cmd tag x%x completion Data: " 11179 "x%x x%x x%x\n", 11180 irsp->ulpIoTag, irsp->ulpStatus, 11181 irsp->un.ulpWord[4], irsp->ulpTimeout); 11182 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 11183 lpfc_ct_free_iocb(phba, cmdiocb); 11184 else 11185 lpfc_els_free_iocb(phba, cmdiocb); 11186 return; 11187 } 11188 11189 /** 11190 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 11191 * @phba: Pointer to HBA context object. 11192 * @pring: Pointer to driver SLI ring object. 11193 * @cmdiocb: Pointer to driver command iocb object. 11194 * 11195 * This function issues an abort iocb for the provided command iocb down to 11196 * the port. Other than the case the outstanding command iocb is an abort 11197 * request, this function issues abort out unconditionally. This function is 11198 * called with hbalock held. The function returns 0 when it fails due to 11199 * memory allocation failure or when the command iocb is an abort request. 11200 **/ 11201 static int 11202 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11203 struct lpfc_iocbq *cmdiocb) 11204 { 11205 struct lpfc_vport *vport = cmdiocb->vport; 11206 struct lpfc_iocbq *abtsiocbp; 11207 IOCB_t *icmd = NULL; 11208 IOCB_t *iabt = NULL; 11209 int retval; 11210 unsigned long iflags; 11211 struct lpfc_nodelist *ndlp; 11212 11213 lockdep_assert_held(&phba->hbalock); 11214 11215 /* 11216 * There are certain command types we don't want to abort. And we 11217 * don't want to abort commands that are already in the process of 11218 * being aborted. 11219 */ 11220 icmd = &cmdiocb->iocb; 11221 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11222 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11223 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11224 return 0; 11225 11226 /* issue ABTS for this IOCB based on iotag */ 11227 abtsiocbp = __lpfc_sli_get_iocbq(phba); 11228 if (abtsiocbp == NULL) 11229 return 0; 11230 11231 /* This signals the response to set the correct status 11232 * before calling the completion handler 11233 */ 11234 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 11235 11236 iabt = &abtsiocbp->iocb; 11237 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 11238 iabt->un.acxri.abortContextTag = icmd->ulpContext; 11239 if (phba->sli_rev == LPFC_SLI_REV4) { 11240 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 11241 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 11242 } else { 11243 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 11244 if (pring->ringno == LPFC_ELS_RING) { 11245 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1); 11246 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi; 11247 } 11248 } 11249 iabt->ulpLe = 1; 11250 iabt->ulpClass = icmd->ulpClass; 11251 11252 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11253 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; 11254 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 11255 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 11256 if (cmdiocb->iocb_flag & LPFC_IO_FOF) 11257 abtsiocbp->iocb_flag |= LPFC_IO_FOF; 11258 11259 if (phba->link_state >= LPFC_LINK_UP) 11260 iabt->ulpCommand = CMD_ABORT_XRI_CN; 11261 else 11262 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 11263 11264 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 11265 abtsiocbp->vport = vport; 11266 11267 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 11268 "0339 Abort xri x%x, original iotag x%x, " 11269 "abort cmd iotag x%x\n", 11270 iabt->un.acxri.abortIoTag, 11271 iabt->un.acxri.abortContextTag, 11272 abtsiocbp->iotag); 11273 11274 if (phba->sli_rev == LPFC_SLI_REV4) { 11275 pring = lpfc_sli4_calc_ring(phba, abtsiocbp); 11276 if (unlikely(pring == NULL)) 11277 return 0; 11278 /* Note: both hbalock and ring_lock need to be set here */ 11279 spin_lock_irqsave(&pring->ring_lock, iflags); 11280 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11281 abtsiocbp, 0); 11282 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11283 } else { 11284 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11285 abtsiocbp, 0); 11286 } 11287 11288 if (retval) 11289 __lpfc_sli_release_iocbq(phba, abtsiocbp); 11290 11291 /* 11292 * Caller to this routine should check for IOCB_ERROR 11293 * and handle it properly. This routine no longer removes 11294 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11295 */ 11296 return retval; 11297 } 11298 11299 /** 11300 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 11301 * @phba: Pointer to HBA context object. 11302 * @pring: Pointer to driver SLI ring object. 11303 * @cmdiocb: Pointer to driver command iocb object. 11304 * 11305 * This function issues an abort iocb for the provided command iocb. In case 11306 * of unloading, the abort iocb will not be issued to commands on the ELS 11307 * ring. Instead, the callback function shall be changed to those commands 11308 * so that nothing happens when them finishes. This function is called with 11309 * hbalock held. The function returns 0 when the command iocb is an abort 11310 * request. 11311 **/ 11312 int 11313 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11314 struct lpfc_iocbq *cmdiocb) 11315 { 11316 struct lpfc_vport *vport = cmdiocb->vport; 11317 int retval = IOCB_ERROR; 11318 IOCB_t *icmd = NULL; 11319 11320 lockdep_assert_held(&phba->hbalock); 11321 11322 /* 11323 * There are certain command types we don't want to abort. And we 11324 * don't want to abort commands that are already in the process of 11325 * being aborted. 11326 */ 11327 icmd = &cmdiocb->iocb; 11328 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11329 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11330 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11331 return 0; 11332 11333 if (!pring) { 11334 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11335 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11336 else 11337 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11338 goto abort_iotag_exit; 11339 } 11340 11341 /* 11342 * If we're unloading, don't abort iocb on the ELS ring, but change 11343 * the callback so that nothing happens when it finishes. 11344 */ 11345 if ((vport->load_flag & FC_UNLOADING) && 11346 (pring->ringno == LPFC_ELS_RING)) { 11347 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11348 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11349 else 11350 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11351 goto abort_iotag_exit; 11352 } 11353 11354 /* Now, we try to issue the abort to the cmdiocb out */ 11355 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 11356 11357 abort_iotag_exit: 11358 /* 11359 * Caller to this routine should check for IOCB_ERROR 11360 * and handle it properly. This routine no longer removes 11361 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11362 */ 11363 return retval; 11364 } 11365 11366 /** 11367 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 11368 * @phba: pointer to lpfc HBA data structure. 11369 * 11370 * This routine will abort all pending and outstanding iocbs to an HBA. 11371 **/ 11372 void 11373 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 11374 { 11375 struct lpfc_sli *psli = &phba->sli; 11376 struct lpfc_sli_ring *pring; 11377 struct lpfc_queue *qp = NULL; 11378 int i; 11379 11380 if (phba->sli_rev != LPFC_SLI_REV4) { 11381 for (i = 0; i < psli->num_rings; i++) { 11382 pring = &psli->sli3_ring[i]; 11383 lpfc_sli_abort_iocb_ring(phba, pring); 11384 } 11385 return; 11386 } 11387 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 11388 pring = qp->pring; 11389 if (!pring) 11390 continue; 11391 lpfc_sli_abort_iocb_ring(phba, pring); 11392 } 11393 } 11394 11395 /** 11396 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 11397 * @iocbq: Pointer to driver iocb object. 11398 * @vport: Pointer to driver virtual port object. 11399 * @tgt_id: SCSI ID of the target. 11400 * @lun_id: LUN ID of the scsi device. 11401 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 11402 * 11403 * This function acts as an iocb filter for functions which abort or count 11404 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 11405 * 0 if the filtering criteria is met for the given iocb and will return 11406 * 1 if the filtering criteria is not met. 11407 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 11408 * given iocb is for the SCSI device specified by vport, tgt_id and 11409 * lun_id parameter. 11410 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 11411 * given iocb is for the SCSI target specified by vport and tgt_id 11412 * parameters. 11413 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 11414 * given iocb is for the SCSI host associated with the given vport. 11415 * This function is called with no locks held. 11416 **/ 11417 static int 11418 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 11419 uint16_t tgt_id, uint64_t lun_id, 11420 lpfc_ctx_cmd ctx_cmd) 11421 { 11422 struct lpfc_io_buf *lpfc_cmd; 11423 int rc = 1; 11424 11425 if (iocbq->vport != vport) 11426 return rc; 11427 11428 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 11429 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) 11430 return rc; 11431 11432 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 11433 11434 if (lpfc_cmd->pCmd == NULL) 11435 return rc; 11436 11437 switch (ctx_cmd) { 11438 case LPFC_CTX_LUN: 11439 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11440 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 11441 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 11442 rc = 0; 11443 break; 11444 case LPFC_CTX_TGT: 11445 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11446 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 11447 rc = 0; 11448 break; 11449 case LPFC_CTX_HOST: 11450 rc = 0; 11451 break; 11452 default: 11453 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 11454 __func__, ctx_cmd); 11455 break; 11456 } 11457 11458 return rc; 11459 } 11460 11461 /** 11462 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 11463 * @vport: Pointer to virtual port. 11464 * @tgt_id: SCSI ID of the target. 11465 * @lun_id: LUN ID of the scsi device. 11466 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11467 * 11468 * This function returns number of FCP commands pending for the vport. 11469 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 11470 * commands pending on the vport associated with SCSI device specified 11471 * by tgt_id and lun_id parameters. 11472 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 11473 * commands pending on the vport associated with SCSI target specified 11474 * by tgt_id parameter. 11475 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 11476 * commands pending on the vport. 11477 * This function returns the number of iocbs which satisfy the filter. 11478 * This function is called without any lock held. 11479 **/ 11480 int 11481 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 11482 lpfc_ctx_cmd ctx_cmd) 11483 { 11484 struct lpfc_hba *phba = vport->phba; 11485 struct lpfc_iocbq *iocbq; 11486 int sum, i; 11487 11488 spin_lock_irq(&phba->hbalock); 11489 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 11490 iocbq = phba->sli.iocbq_lookup[i]; 11491 11492 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 11493 ctx_cmd) == 0) 11494 sum++; 11495 } 11496 spin_unlock_irq(&phba->hbalock); 11497 11498 return sum; 11499 } 11500 11501 /** 11502 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 11503 * @phba: Pointer to HBA context object 11504 * @cmdiocb: Pointer to command iocb object. 11505 * @rspiocb: Pointer to response iocb object. 11506 * 11507 * This function is called when an aborted FCP iocb completes. This 11508 * function is called by the ring event handler with no lock held. 11509 * This function frees the iocb. 11510 **/ 11511 void 11512 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11513 struct lpfc_iocbq *rspiocb) 11514 { 11515 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11516 "3096 ABORT_XRI_CN completing on rpi x%x " 11517 "original iotag x%x, abort cmd iotag x%x " 11518 "status 0x%x, reason 0x%x\n", 11519 cmdiocb->iocb.un.acxri.abortContextTag, 11520 cmdiocb->iocb.un.acxri.abortIoTag, 11521 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 11522 rspiocb->iocb.un.ulpWord[4]); 11523 lpfc_sli_release_iocbq(phba, cmdiocb); 11524 return; 11525 } 11526 11527 /** 11528 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 11529 * @vport: Pointer to virtual port. 11530 * @pring: Pointer to driver SLI ring object. 11531 * @tgt_id: SCSI ID of the target. 11532 * @lun_id: LUN ID of the scsi device. 11533 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11534 * 11535 * This function sends an abort command for every SCSI command 11536 * associated with the given virtual port pending on the ring 11537 * filtered by lpfc_sli_validate_fcp_iocb function. 11538 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 11539 * FCP iocbs associated with lun specified by tgt_id and lun_id 11540 * parameters 11541 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 11542 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11543 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 11544 * FCP iocbs associated with virtual port. 11545 * This function returns number of iocbs it failed to abort. 11546 * This function is called with no locks held. 11547 **/ 11548 int 11549 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11550 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 11551 { 11552 struct lpfc_hba *phba = vport->phba; 11553 struct lpfc_iocbq *iocbq; 11554 struct lpfc_iocbq *abtsiocb; 11555 struct lpfc_sli_ring *pring_s4; 11556 IOCB_t *cmd = NULL; 11557 int errcnt = 0, ret_val = 0; 11558 int i; 11559 11560 /* all I/Os are in process of being flushed */ 11561 if (phba->hba_flag & HBA_IOQ_FLUSH) 11562 return errcnt; 11563 11564 for (i = 1; i <= phba->sli.last_iotag; i++) { 11565 iocbq = phba->sli.iocbq_lookup[i]; 11566 11567 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11568 abort_cmd) != 0) 11569 continue; 11570 11571 /* 11572 * If the iocbq is already being aborted, don't take a second 11573 * action, but do count it. 11574 */ 11575 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 11576 continue; 11577 11578 /* issue ABTS for this IOCB based on iotag */ 11579 abtsiocb = lpfc_sli_get_iocbq(phba); 11580 if (abtsiocb == NULL) { 11581 errcnt++; 11582 continue; 11583 } 11584 11585 /* indicate the IO is being aborted by the driver. */ 11586 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11587 11588 cmd = &iocbq->iocb; 11589 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11590 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 11591 if (phba->sli_rev == LPFC_SLI_REV4) 11592 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 11593 else 11594 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 11595 abtsiocb->iocb.ulpLe = 1; 11596 abtsiocb->iocb.ulpClass = cmd->ulpClass; 11597 abtsiocb->vport = vport; 11598 11599 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11600 abtsiocb->hba_wqidx = iocbq->hba_wqidx; 11601 if (iocbq->iocb_flag & LPFC_IO_FCP) 11602 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 11603 if (iocbq->iocb_flag & LPFC_IO_FOF) 11604 abtsiocb->iocb_flag |= LPFC_IO_FOF; 11605 11606 if (lpfc_is_link_up(phba)) 11607 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11608 else 11609 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11610 11611 /* Setup callback routine and issue the command. */ 11612 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11613 if (phba->sli_rev == LPFC_SLI_REV4) { 11614 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); 11615 if (!pring_s4) 11616 continue; 11617 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11618 abtsiocb, 0); 11619 } else 11620 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 11621 abtsiocb, 0); 11622 if (ret_val == IOCB_ERROR) { 11623 lpfc_sli_release_iocbq(phba, abtsiocb); 11624 errcnt++; 11625 continue; 11626 } 11627 } 11628 11629 return errcnt; 11630 } 11631 11632 /** 11633 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 11634 * @vport: Pointer to virtual port. 11635 * @pring: Pointer to driver SLI ring object. 11636 * @tgt_id: SCSI ID of the target. 11637 * @lun_id: LUN ID of the scsi device. 11638 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11639 * 11640 * This function sends an abort command for every SCSI command 11641 * associated with the given virtual port pending on the ring 11642 * filtered by lpfc_sli_validate_fcp_iocb function. 11643 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 11644 * FCP iocbs associated with lun specified by tgt_id and lun_id 11645 * parameters 11646 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 11647 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11648 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 11649 * FCP iocbs associated with virtual port. 11650 * This function returns number of iocbs it aborted . 11651 * This function is called with no locks held right after a taskmgmt 11652 * command is sent. 11653 **/ 11654 int 11655 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11656 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 11657 { 11658 struct lpfc_hba *phba = vport->phba; 11659 struct lpfc_io_buf *lpfc_cmd; 11660 struct lpfc_iocbq *abtsiocbq; 11661 struct lpfc_nodelist *ndlp; 11662 struct lpfc_iocbq *iocbq; 11663 IOCB_t *icmd; 11664 int sum, i, ret_val; 11665 unsigned long iflags; 11666 struct lpfc_sli_ring *pring_s4 = NULL; 11667 11668 spin_lock_irqsave(&phba->hbalock, iflags); 11669 11670 /* all I/Os are in process of being flushed */ 11671 if (phba->hba_flag & HBA_IOQ_FLUSH) { 11672 spin_unlock_irqrestore(&phba->hbalock, iflags); 11673 return 0; 11674 } 11675 sum = 0; 11676 11677 for (i = 1; i <= phba->sli.last_iotag; i++) { 11678 iocbq = phba->sli.iocbq_lookup[i]; 11679 11680 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11681 cmd) != 0) 11682 continue; 11683 11684 /* Guard against IO completion being called at same time */ 11685 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 11686 spin_lock(&lpfc_cmd->buf_lock); 11687 11688 if (!lpfc_cmd->pCmd) { 11689 spin_unlock(&lpfc_cmd->buf_lock); 11690 continue; 11691 } 11692 11693 if (phba->sli_rev == LPFC_SLI_REV4) { 11694 pring_s4 = 11695 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring; 11696 if (!pring_s4) { 11697 spin_unlock(&lpfc_cmd->buf_lock); 11698 continue; 11699 } 11700 /* Note: both hbalock and ring_lock must be set here */ 11701 spin_lock(&pring_s4->ring_lock); 11702 } 11703 11704 /* 11705 * If the iocbq is already being aborted, don't take a second 11706 * action, but do count it. 11707 */ 11708 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) || 11709 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 11710 if (phba->sli_rev == LPFC_SLI_REV4) 11711 spin_unlock(&pring_s4->ring_lock); 11712 spin_unlock(&lpfc_cmd->buf_lock); 11713 continue; 11714 } 11715 11716 /* issue ABTS for this IOCB based on iotag */ 11717 abtsiocbq = __lpfc_sli_get_iocbq(phba); 11718 if (!abtsiocbq) { 11719 if (phba->sli_rev == LPFC_SLI_REV4) 11720 spin_unlock(&pring_s4->ring_lock); 11721 spin_unlock(&lpfc_cmd->buf_lock); 11722 continue; 11723 } 11724 11725 icmd = &iocbq->iocb; 11726 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11727 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; 11728 if (phba->sli_rev == LPFC_SLI_REV4) 11729 abtsiocbq->iocb.un.acxri.abortIoTag = 11730 iocbq->sli4_xritag; 11731 else 11732 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; 11733 abtsiocbq->iocb.ulpLe = 1; 11734 abtsiocbq->iocb.ulpClass = icmd->ulpClass; 11735 abtsiocbq->vport = vport; 11736 11737 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11738 abtsiocbq->hba_wqidx = iocbq->hba_wqidx; 11739 if (iocbq->iocb_flag & LPFC_IO_FCP) 11740 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 11741 if (iocbq->iocb_flag & LPFC_IO_FOF) 11742 abtsiocbq->iocb_flag |= LPFC_IO_FOF; 11743 11744 ndlp = lpfc_cmd->rdata->pnode; 11745 11746 if (lpfc_is_link_up(phba) && 11747 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 11748 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11749 else 11750 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11751 11752 /* Setup callback routine and issue the command. */ 11753 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11754 11755 /* 11756 * Indicate the IO is being aborted by the driver and set 11757 * the caller's flag into the aborted IO. 11758 */ 11759 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11760 11761 if (phba->sli_rev == LPFC_SLI_REV4) { 11762 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11763 abtsiocbq, 0); 11764 spin_unlock(&pring_s4->ring_lock); 11765 } else { 11766 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 11767 abtsiocbq, 0); 11768 } 11769 11770 spin_unlock(&lpfc_cmd->buf_lock); 11771 11772 if (ret_val == IOCB_ERROR) 11773 __lpfc_sli_release_iocbq(phba, abtsiocbq); 11774 else 11775 sum++; 11776 } 11777 spin_unlock_irqrestore(&phba->hbalock, iflags); 11778 return sum; 11779 } 11780 11781 /** 11782 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 11783 * @phba: Pointer to HBA context object. 11784 * @cmdiocbq: Pointer to command iocb. 11785 * @rspiocbq: Pointer to response iocb. 11786 * 11787 * This function is the completion handler for iocbs issued using 11788 * lpfc_sli_issue_iocb_wait function. This function is called by the 11789 * ring event handler function without any lock held. This function 11790 * can be called from both worker thread context and interrupt 11791 * context. This function also can be called from other thread which 11792 * cleans up the SLI layer objects. 11793 * This function copy the contents of the response iocb to the 11794 * response iocb memory object provided by the caller of 11795 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 11796 * sleeps for the iocb completion. 11797 **/ 11798 static void 11799 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 11800 struct lpfc_iocbq *cmdiocbq, 11801 struct lpfc_iocbq *rspiocbq) 11802 { 11803 wait_queue_head_t *pdone_q; 11804 unsigned long iflags; 11805 struct lpfc_io_buf *lpfc_cmd; 11806 11807 spin_lock_irqsave(&phba->hbalock, iflags); 11808 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { 11809 11810 /* 11811 * A time out has occurred for the iocb. If a time out 11812 * completion handler has been supplied, call it. Otherwise, 11813 * just free the iocbq. 11814 */ 11815 11816 spin_unlock_irqrestore(&phba->hbalock, iflags); 11817 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; 11818 cmdiocbq->wait_iocb_cmpl = NULL; 11819 if (cmdiocbq->iocb_cmpl) 11820 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); 11821 else 11822 lpfc_sli_release_iocbq(phba, cmdiocbq); 11823 return; 11824 } 11825 11826 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 11827 if (cmdiocbq->context2 && rspiocbq) 11828 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 11829 &rspiocbq->iocb, sizeof(IOCB_t)); 11830 11831 /* Set the exchange busy flag for task management commands */ 11832 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 11833 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 11834 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, 11835 cur_iocbq); 11836 if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY)) 11837 lpfc_cmd->flags |= LPFC_SBUF_XBUSY; 11838 else 11839 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; 11840 } 11841 11842 pdone_q = cmdiocbq->context_un.wait_queue; 11843 if (pdone_q) 11844 wake_up(pdone_q); 11845 spin_unlock_irqrestore(&phba->hbalock, iflags); 11846 return; 11847 } 11848 11849 /** 11850 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 11851 * @phba: Pointer to HBA context object.. 11852 * @piocbq: Pointer to command iocb. 11853 * @flag: Flag to test. 11854 * 11855 * This routine grabs the hbalock and then test the iocb_flag to 11856 * see if the passed in flag is set. 11857 * Returns: 11858 * 1 if flag is set. 11859 * 0 if flag is not set. 11860 **/ 11861 static int 11862 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 11863 struct lpfc_iocbq *piocbq, uint32_t flag) 11864 { 11865 unsigned long iflags; 11866 int ret; 11867 11868 spin_lock_irqsave(&phba->hbalock, iflags); 11869 ret = piocbq->iocb_flag & flag; 11870 spin_unlock_irqrestore(&phba->hbalock, iflags); 11871 return ret; 11872 11873 } 11874 11875 /** 11876 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 11877 * @phba: Pointer to HBA context object.. 11878 * @pring: Pointer to sli ring. 11879 * @piocb: Pointer to command iocb. 11880 * @prspiocbq: Pointer to response iocb. 11881 * @timeout: Timeout in number of seconds. 11882 * 11883 * This function issues the iocb to firmware and waits for the 11884 * iocb to complete. The iocb_cmpl field of the shall be used 11885 * to handle iocbs which time out. If the field is NULL, the 11886 * function shall free the iocbq structure. If more clean up is 11887 * needed, the caller is expected to provide a completion function 11888 * that will provide the needed clean up. If the iocb command is 11889 * not completed within timeout seconds, the function will either 11890 * free the iocbq structure (if iocb_cmpl == NULL) or execute the 11891 * completion function set in the iocb_cmpl field and then return 11892 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 11893 * resources if this function returns IOCB_TIMEDOUT. 11894 * The function waits for the iocb completion using an 11895 * non-interruptible wait. 11896 * This function will sleep while waiting for iocb completion. 11897 * So, this function should not be called from any context which 11898 * does not allow sleeping. Due to the same reason, this function 11899 * cannot be called with interrupt disabled. 11900 * This function assumes that the iocb completions occur while 11901 * this function sleep. So, this function cannot be called from 11902 * the thread which process iocb completion for this ring. 11903 * This function clears the iocb_flag of the iocb object before 11904 * issuing the iocb and the iocb completion handler sets this 11905 * flag and wakes this thread when the iocb completes. 11906 * The contents of the response iocb will be copied to prspiocbq 11907 * by the completion handler when the command completes. 11908 * This function returns IOCB_SUCCESS when success. 11909 * This function is called with no lock held. 11910 **/ 11911 int 11912 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 11913 uint32_t ring_number, 11914 struct lpfc_iocbq *piocb, 11915 struct lpfc_iocbq *prspiocbq, 11916 uint32_t timeout) 11917 { 11918 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 11919 long timeleft, timeout_req = 0; 11920 int retval = IOCB_SUCCESS; 11921 uint32_t creg_val; 11922 struct lpfc_iocbq *iocb; 11923 int txq_cnt = 0; 11924 int txcmplq_cnt = 0; 11925 struct lpfc_sli_ring *pring; 11926 unsigned long iflags; 11927 bool iocb_completed = true; 11928 11929 if (phba->sli_rev >= LPFC_SLI_REV4) 11930 pring = lpfc_sli4_calc_ring(phba, piocb); 11931 else 11932 pring = &phba->sli.sli3_ring[ring_number]; 11933 /* 11934 * If the caller has provided a response iocbq buffer, then context2 11935 * is NULL or its an error. 11936 */ 11937 if (prspiocbq) { 11938 if (piocb->context2) 11939 return IOCB_ERROR; 11940 piocb->context2 = prspiocbq; 11941 } 11942 11943 piocb->wait_iocb_cmpl = piocb->iocb_cmpl; 11944 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 11945 piocb->context_un.wait_queue = &done_q; 11946 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 11947 11948 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11949 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11950 return IOCB_ERROR; 11951 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 11952 writel(creg_val, phba->HCregaddr); 11953 readl(phba->HCregaddr); /* flush */ 11954 } 11955 11956 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 11957 SLI_IOCB_RET_IOCB); 11958 if (retval == IOCB_SUCCESS) { 11959 timeout_req = msecs_to_jiffies(timeout * 1000); 11960 timeleft = wait_event_timeout(done_q, 11961 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 11962 timeout_req); 11963 spin_lock_irqsave(&phba->hbalock, iflags); 11964 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 11965 11966 /* 11967 * IOCB timed out. Inform the wake iocb wait 11968 * completion function and set local status 11969 */ 11970 11971 iocb_completed = false; 11972 piocb->iocb_flag |= LPFC_IO_WAKE_TMO; 11973 } 11974 spin_unlock_irqrestore(&phba->hbalock, iflags); 11975 if (iocb_completed) { 11976 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11977 "0331 IOCB wake signaled\n"); 11978 /* Note: we are not indicating if the IOCB has a success 11979 * status or not - that's for the caller to check. 11980 * IOCB_SUCCESS means just that the command was sent and 11981 * completed. Not that it completed successfully. 11982 * */ 11983 } else if (timeleft == 0) { 11984 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11985 "0338 IOCB wait timeout error - no " 11986 "wake response Data x%x\n", timeout); 11987 retval = IOCB_TIMEDOUT; 11988 } else { 11989 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11990 "0330 IOCB wake NOT set, " 11991 "Data x%x x%lx\n", 11992 timeout, (timeleft / jiffies)); 11993 retval = IOCB_TIMEDOUT; 11994 } 11995 } else if (retval == IOCB_BUSY) { 11996 if (phba->cfg_log_verbose & LOG_SLI) { 11997 list_for_each_entry(iocb, &pring->txq, list) { 11998 txq_cnt++; 11999 } 12000 list_for_each_entry(iocb, &pring->txcmplq, list) { 12001 txcmplq_cnt++; 12002 } 12003 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 12004 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 12005 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 12006 } 12007 return retval; 12008 } else { 12009 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 12010 "0332 IOCB wait issue failed, Data x%x\n", 12011 retval); 12012 retval = IOCB_ERROR; 12013 } 12014 12015 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 12016 if (lpfc_readl(phba->HCregaddr, &creg_val)) 12017 return IOCB_ERROR; 12018 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 12019 writel(creg_val, phba->HCregaddr); 12020 readl(phba->HCregaddr); /* flush */ 12021 } 12022 12023 if (prspiocbq) 12024 piocb->context2 = NULL; 12025 12026 piocb->context_un.wait_queue = NULL; 12027 piocb->iocb_cmpl = NULL; 12028 return retval; 12029 } 12030 12031 /** 12032 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 12033 * @phba: Pointer to HBA context object. 12034 * @pmboxq: Pointer to driver mailbox object. 12035 * @timeout: Timeout in number of seconds. 12036 * 12037 * This function issues the mailbox to firmware and waits for the 12038 * mailbox command to complete. If the mailbox command is not 12039 * completed within timeout seconds, it returns MBX_TIMEOUT. 12040 * The function waits for the mailbox completion using an 12041 * interruptible wait. If the thread is woken up due to a 12042 * signal, MBX_TIMEOUT error is returned to the caller. Caller 12043 * should not free the mailbox resources, if this function returns 12044 * MBX_TIMEOUT. 12045 * This function will sleep while waiting for mailbox completion. 12046 * So, this function should not be called from any context which 12047 * does not allow sleeping. Due to the same reason, this function 12048 * cannot be called with interrupt disabled. 12049 * This function assumes that the mailbox completion occurs while 12050 * this function sleep. So, this function cannot be called from 12051 * the worker thread which processes mailbox completion. 12052 * This function is called in the context of HBA management 12053 * applications. 12054 * This function returns MBX_SUCCESS when successful. 12055 * This function is called with no lock held. 12056 **/ 12057 int 12058 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 12059 uint32_t timeout) 12060 { 12061 struct completion mbox_done; 12062 int retval; 12063 unsigned long flag; 12064 12065 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 12066 /* setup wake call as IOCB callback */ 12067 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 12068 12069 /* setup context3 field to pass wait_queue pointer to wake function */ 12070 init_completion(&mbox_done); 12071 pmboxq->context3 = &mbox_done; 12072 /* now issue the command */ 12073 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 12074 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 12075 wait_for_completion_timeout(&mbox_done, 12076 msecs_to_jiffies(timeout * 1000)); 12077 12078 spin_lock_irqsave(&phba->hbalock, flag); 12079 pmboxq->context3 = NULL; 12080 /* 12081 * if LPFC_MBX_WAKE flag is set the mailbox is completed 12082 * else do not free the resources. 12083 */ 12084 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 12085 retval = MBX_SUCCESS; 12086 } else { 12087 retval = MBX_TIMEOUT; 12088 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12089 } 12090 spin_unlock_irqrestore(&phba->hbalock, flag); 12091 } 12092 return retval; 12093 } 12094 12095 /** 12096 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 12097 * @phba: Pointer to HBA context. 12098 * 12099 * This function is called to shutdown the driver's mailbox sub-system. 12100 * It first marks the mailbox sub-system is in a block state to prevent 12101 * the asynchronous mailbox command from issued off the pending mailbox 12102 * command queue. If the mailbox command sub-system shutdown is due to 12103 * HBA error conditions such as EEH or ERATT, this routine shall invoke 12104 * the mailbox sub-system flush routine to forcefully bring down the 12105 * mailbox sub-system. Otherwise, if it is due to normal condition (such 12106 * as with offline or HBA function reset), this routine will wait for the 12107 * outstanding mailbox command to complete before invoking the mailbox 12108 * sub-system flush routine to gracefully bring down mailbox sub-system. 12109 **/ 12110 void 12111 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 12112 { 12113 struct lpfc_sli *psli = &phba->sli; 12114 unsigned long timeout; 12115 12116 if (mbx_action == LPFC_MBX_NO_WAIT) { 12117 /* delay 100ms for port state */ 12118 msleep(100); 12119 lpfc_sli_mbox_sys_flush(phba); 12120 return; 12121 } 12122 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 12123 12124 /* Disable softirqs, including timers from obtaining phba->hbalock */ 12125 local_bh_disable(); 12126 12127 spin_lock_irq(&phba->hbalock); 12128 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 12129 12130 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 12131 /* Determine how long we might wait for the active mailbox 12132 * command to be gracefully completed by firmware. 12133 */ 12134 if (phba->sli.mbox_active) 12135 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 12136 phba->sli.mbox_active) * 12137 1000) + jiffies; 12138 spin_unlock_irq(&phba->hbalock); 12139 12140 /* Enable softirqs again, done with phba->hbalock */ 12141 local_bh_enable(); 12142 12143 while (phba->sli.mbox_active) { 12144 /* Check active mailbox complete status every 2ms */ 12145 msleep(2); 12146 if (time_after(jiffies, timeout)) 12147 /* Timeout, let the mailbox flush routine to 12148 * forcefully release active mailbox command 12149 */ 12150 break; 12151 } 12152 } else { 12153 spin_unlock_irq(&phba->hbalock); 12154 12155 /* Enable softirqs again, done with phba->hbalock */ 12156 local_bh_enable(); 12157 } 12158 12159 lpfc_sli_mbox_sys_flush(phba); 12160 } 12161 12162 /** 12163 * lpfc_sli_eratt_read - read sli-3 error attention events 12164 * @phba: Pointer to HBA context. 12165 * 12166 * This function is called to read the SLI3 device error attention registers 12167 * for possible error attention events. The caller must hold the hostlock 12168 * with spin_lock_irq(). 12169 * 12170 * This function returns 1 when there is Error Attention in the Host Attention 12171 * Register and returns 0 otherwise. 12172 **/ 12173 static int 12174 lpfc_sli_eratt_read(struct lpfc_hba *phba) 12175 { 12176 uint32_t ha_copy; 12177 12178 /* Read chip Host Attention (HA) register */ 12179 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12180 goto unplug_err; 12181 12182 if (ha_copy & HA_ERATT) { 12183 /* Read host status register to retrieve error event */ 12184 if (lpfc_sli_read_hs(phba)) 12185 goto unplug_err; 12186 12187 /* Check if there is a deferred error condition is active */ 12188 if ((HS_FFER1 & phba->work_hs) && 12189 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12190 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 12191 phba->hba_flag |= DEFER_ERATT; 12192 /* Clear all interrupt enable conditions */ 12193 writel(0, phba->HCregaddr); 12194 readl(phba->HCregaddr); 12195 } 12196 12197 /* Set the driver HA work bitmap */ 12198 phba->work_ha |= HA_ERATT; 12199 /* Indicate polling handles this ERATT */ 12200 phba->hba_flag |= HBA_ERATT_HANDLED; 12201 return 1; 12202 } 12203 return 0; 12204 12205 unplug_err: 12206 /* Set the driver HS work bitmap */ 12207 phba->work_hs |= UNPLUG_ERR; 12208 /* Set the driver HA work bitmap */ 12209 phba->work_ha |= HA_ERATT; 12210 /* Indicate polling handles this ERATT */ 12211 phba->hba_flag |= HBA_ERATT_HANDLED; 12212 return 1; 12213 } 12214 12215 /** 12216 * lpfc_sli4_eratt_read - read sli-4 error attention events 12217 * @phba: Pointer to HBA context. 12218 * 12219 * This function is called to read the SLI4 device error attention registers 12220 * for possible error attention events. The caller must hold the hostlock 12221 * with spin_lock_irq(). 12222 * 12223 * This function returns 1 when there is Error Attention in the Host Attention 12224 * Register and returns 0 otherwise. 12225 **/ 12226 static int 12227 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 12228 { 12229 uint32_t uerr_sta_hi, uerr_sta_lo; 12230 uint32_t if_type, portsmphr; 12231 struct lpfc_register portstat_reg; 12232 12233 /* 12234 * For now, use the SLI4 device internal unrecoverable error 12235 * registers for error attention. This can be changed later. 12236 */ 12237 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 12238 switch (if_type) { 12239 case LPFC_SLI_INTF_IF_TYPE_0: 12240 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 12241 &uerr_sta_lo) || 12242 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 12243 &uerr_sta_hi)) { 12244 phba->work_hs |= UNPLUG_ERR; 12245 phba->work_ha |= HA_ERATT; 12246 phba->hba_flag |= HBA_ERATT_HANDLED; 12247 return 1; 12248 } 12249 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 12250 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 12251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12252 "1423 HBA Unrecoverable error: " 12253 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 12254 "ue_mask_lo_reg=0x%x, " 12255 "ue_mask_hi_reg=0x%x\n", 12256 uerr_sta_lo, uerr_sta_hi, 12257 phba->sli4_hba.ue_mask_lo, 12258 phba->sli4_hba.ue_mask_hi); 12259 phba->work_status[0] = uerr_sta_lo; 12260 phba->work_status[1] = uerr_sta_hi; 12261 phba->work_ha |= HA_ERATT; 12262 phba->hba_flag |= HBA_ERATT_HANDLED; 12263 return 1; 12264 } 12265 break; 12266 case LPFC_SLI_INTF_IF_TYPE_2: 12267 case LPFC_SLI_INTF_IF_TYPE_6: 12268 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 12269 &portstat_reg.word0) || 12270 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 12271 &portsmphr)){ 12272 phba->work_hs |= UNPLUG_ERR; 12273 phba->work_ha |= HA_ERATT; 12274 phba->hba_flag |= HBA_ERATT_HANDLED; 12275 return 1; 12276 } 12277 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 12278 phba->work_status[0] = 12279 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 12280 phba->work_status[1] = 12281 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 12282 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12283 "2885 Port Status Event: " 12284 "port status reg 0x%x, " 12285 "port smphr reg 0x%x, " 12286 "error 1=0x%x, error 2=0x%x\n", 12287 portstat_reg.word0, 12288 portsmphr, 12289 phba->work_status[0], 12290 phba->work_status[1]); 12291 phba->work_ha |= HA_ERATT; 12292 phba->hba_flag |= HBA_ERATT_HANDLED; 12293 return 1; 12294 } 12295 break; 12296 case LPFC_SLI_INTF_IF_TYPE_1: 12297 default: 12298 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12299 "2886 HBA Error Attention on unsupported " 12300 "if type %d.", if_type); 12301 return 1; 12302 } 12303 12304 return 0; 12305 } 12306 12307 /** 12308 * lpfc_sli_check_eratt - check error attention events 12309 * @phba: Pointer to HBA context. 12310 * 12311 * This function is called from timer soft interrupt context to check HBA's 12312 * error attention register bit for error attention events. 12313 * 12314 * This function returns 1 when there is Error Attention in the Host Attention 12315 * Register and returns 0 otherwise. 12316 **/ 12317 int 12318 lpfc_sli_check_eratt(struct lpfc_hba *phba) 12319 { 12320 uint32_t ha_copy; 12321 12322 /* If somebody is waiting to handle an eratt, don't process it 12323 * here. The brdkill function will do this. 12324 */ 12325 if (phba->link_flag & LS_IGNORE_ERATT) 12326 return 0; 12327 12328 /* Check if interrupt handler handles this ERATT */ 12329 spin_lock_irq(&phba->hbalock); 12330 if (phba->hba_flag & HBA_ERATT_HANDLED) { 12331 /* Interrupt handler has handled ERATT */ 12332 spin_unlock_irq(&phba->hbalock); 12333 return 0; 12334 } 12335 12336 /* 12337 * If there is deferred error attention, do not check for error 12338 * attention 12339 */ 12340 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12341 spin_unlock_irq(&phba->hbalock); 12342 return 0; 12343 } 12344 12345 /* If PCI channel is offline, don't process it */ 12346 if (unlikely(pci_channel_offline(phba->pcidev))) { 12347 spin_unlock_irq(&phba->hbalock); 12348 return 0; 12349 } 12350 12351 switch (phba->sli_rev) { 12352 case LPFC_SLI_REV2: 12353 case LPFC_SLI_REV3: 12354 /* Read chip Host Attention (HA) register */ 12355 ha_copy = lpfc_sli_eratt_read(phba); 12356 break; 12357 case LPFC_SLI_REV4: 12358 /* Read device Uncoverable Error (UERR) registers */ 12359 ha_copy = lpfc_sli4_eratt_read(phba); 12360 break; 12361 default: 12362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12363 "0299 Invalid SLI revision (%d)\n", 12364 phba->sli_rev); 12365 ha_copy = 0; 12366 break; 12367 } 12368 spin_unlock_irq(&phba->hbalock); 12369 12370 return ha_copy; 12371 } 12372 12373 /** 12374 * lpfc_intr_state_check - Check device state for interrupt handling 12375 * @phba: Pointer to HBA context. 12376 * 12377 * This inline routine checks whether a device or its PCI slot is in a state 12378 * that the interrupt should be handled. 12379 * 12380 * This function returns 0 if the device or the PCI slot is in a state that 12381 * interrupt should be handled, otherwise -EIO. 12382 */ 12383 static inline int 12384 lpfc_intr_state_check(struct lpfc_hba *phba) 12385 { 12386 /* If the pci channel is offline, ignore all the interrupts */ 12387 if (unlikely(pci_channel_offline(phba->pcidev))) 12388 return -EIO; 12389 12390 /* Update device level interrupt statistics */ 12391 phba->sli.slistat.sli_intr++; 12392 12393 /* Ignore all interrupts during initialization. */ 12394 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 12395 return -EIO; 12396 12397 return 0; 12398 } 12399 12400 /** 12401 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 12402 * @irq: Interrupt number. 12403 * @dev_id: The device context pointer. 12404 * 12405 * This function is directly called from the PCI layer as an interrupt 12406 * service routine when device with SLI-3 interface spec is enabled with 12407 * MSI-X multi-message interrupt mode and there are slow-path events in 12408 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 12409 * interrupt mode, this function is called as part of the device-level 12410 * interrupt handler. When the PCI slot is in error recovery or the HBA 12411 * is undergoing initialization, the interrupt handler will not process 12412 * the interrupt. The link attention and ELS ring attention events are 12413 * handled by the worker thread. The interrupt handler signals the worker 12414 * thread and returns for these events. This function is called without 12415 * any lock held. It gets the hbalock to access and update SLI data 12416 * structures. 12417 * 12418 * This function returns IRQ_HANDLED when interrupt is handled else it 12419 * returns IRQ_NONE. 12420 **/ 12421 irqreturn_t 12422 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 12423 { 12424 struct lpfc_hba *phba; 12425 uint32_t ha_copy, hc_copy; 12426 uint32_t work_ha_copy; 12427 unsigned long status; 12428 unsigned long iflag; 12429 uint32_t control; 12430 12431 MAILBOX_t *mbox, *pmbox; 12432 struct lpfc_vport *vport; 12433 struct lpfc_nodelist *ndlp; 12434 struct lpfc_dmabuf *mp; 12435 LPFC_MBOXQ_t *pmb; 12436 int rc; 12437 12438 /* 12439 * Get the driver's phba structure from the dev_id and 12440 * assume the HBA is not interrupting. 12441 */ 12442 phba = (struct lpfc_hba *)dev_id; 12443 12444 if (unlikely(!phba)) 12445 return IRQ_NONE; 12446 12447 /* 12448 * Stuff needs to be attented to when this function is invoked as an 12449 * individual interrupt handler in MSI-X multi-message interrupt mode 12450 */ 12451 if (phba->intr_type == MSIX) { 12452 /* Check device state for handling interrupt */ 12453 if (lpfc_intr_state_check(phba)) 12454 return IRQ_NONE; 12455 /* Need to read HA REG for slow-path events */ 12456 spin_lock_irqsave(&phba->hbalock, iflag); 12457 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12458 goto unplug_error; 12459 /* If somebody is waiting to handle an eratt don't process it 12460 * here. The brdkill function will do this. 12461 */ 12462 if (phba->link_flag & LS_IGNORE_ERATT) 12463 ha_copy &= ~HA_ERATT; 12464 /* Check the need for handling ERATT in interrupt handler */ 12465 if (ha_copy & HA_ERATT) { 12466 if (phba->hba_flag & HBA_ERATT_HANDLED) 12467 /* ERATT polling has handled ERATT */ 12468 ha_copy &= ~HA_ERATT; 12469 else 12470 /* Indicate interrupt handler handles ERATT */ 12471 phba->hba_flag |= HBA_ERATT_HANDLED; 12472 } 12473 12474 /* 12475 * If there is deferred error attention, do not check for any 12476 * interrupt. 12477 */ 12478 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12479 spin_unlock_irqrestore(&phba->hbalock, iflag); 12480 return IRQ_NONE; 12481 } 12482 12483 /* Clear up only attention source related to slow-path */ 12484 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 12485 goto unplug_error; 12486 12487 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 12488 HC_LAINT_ENA | HC_ERINT_ENA), 12489 phba->HCregaddr); 12490 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 12491 phba->HAregaddr); 12492 writel(hc_copy, phba->HCregaddr); 12493 readl(phba->HAregaddr); /* flush */ 12494 spin_unlock_irqrestore(&phba->hbalock, iflag); 12495 } else 12496 ha_copy = phba->ha_copy; 12497 12498 work_ha_copy = ha_copy & phba->work_ha_mask; 12499 12500 if (work_ha_copy) { 12501 if (work_ha_copy & HA_LATT) { 12502 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 12503 /* 12504 * Turn off Link Attention interrupts 12505 * until CLEAR_LA done 12506 */ 12507 spin_lock_irqsave(&phba->hbalock, iflag); 12508 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 12509 if (lpfc_readl(phba->HCregaddr, &control)) 12510 goto unplug_error; 12511 control &= ~HC_LAINT_ENA; 12512 writel(control, phba->HCregaddr); 12513 readl(phba->HCregaddr); /* flush */ 12514 spin_unlock_irqrestore(&phba->hbalock, iflag); 12515 } 12516 else 12517 work_ha_copy &= ~HA_LATT; 12518 } 12519 12520 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 12521 /* 12522 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 12523 * the only slow ring. 12524 */ 12525 status = (work_ha_copy & 12526 (HA_RXMASK << (4*LPFC_ELS_RING))); 12527 status >>= (4*LPFC_ELS_RING); 12528 if (status & HA_RXMASK) { 12529 spin_lock_irqsave(&phba->hbalock, iflag); 12530 if (lpfc_readl(phba->HCregaddr, &control)) 12531 goto unplug_error; 12532 12533 lpfc_debugfs_slow_ring_trc(phba, 12534 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 12535 control, status, 12536 (uint32_t)phba->sli.slistat.sli_intr); 12537 12538 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 12539 lpfc_debugfs_slow_ring_trc(phba, 12540 "ISR Disable ring:" 12541 "pwork:x%x hawork:x%x wait:x%x", 12542 phba->work_ha, work_ha_copy, 12543 (uint32_t)((unsigned long) 12544 &phba->work_waitq)); 12545 12546 control &= 12547 ~(HC_R0INT_ENA << LPFC_ELS_RING); 12548 writel(control, phba->HCregaddr); 12549 readl(phba->HCregaddr); /* flush */ 12550 } 12551 else { 12552 lpfc_debugfs_slow_ring_trc(phba, 12553 "ISR slow ring: pwork:" 12554 "x%x hawork:x%x wait:x%x", 12555 phba->work_ha, work_ha_copy, 12556 (uint32_t)((unsigned long) 12557 &phba->work_waitq)); 12558 } 12559 spin_unlock_irqrestore(&phba->hbalock, iflag); 12560 } 12561 } 12562 spin_lock_irqsave(&phba->hbalock, iflag); 12563 if (work_ha_copy & HA_ERATT) { 12564 if (lpfc_sli_read_hs(phba)) 12565 goto unplug_error; 12566 /* 12567 * Check if there is a deferred error condition 12568 * is active 12569 */ 12570 if ((HS_FFER1 & phba->work_hs) && 12571 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12572 HS_FFER6 | HS_FFER7 | HS_FFER8) & 12573 phba->work_hs)) { 12574 phba->hba_flag |= DEFER_ERATT; 12575 /* Clear all interrupt enable conditions */ 12576 writel(0, phba->HCregaddr); 12577 readl(phba->HCregaddr); 12578 } 12579 } 12580 12581 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 12582 pmb = phba->sli.mbox_active; 12583 pmbox = &pmb->u.mb; 12584 mbox = phba->mbox; 12585 vport = pmb->vport; 12586 12587 /* First check out the status word */ 12588 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 12589 if (pmbox->mbxOwner != OWN_HOST) { 12590 spin_unlock_irqrestore(&phba->hbalock, iflag); 12591 /* 12592 * Stray Mailbox Interrupt, mbxCommand <cmd> 12593 * mbxStatus <status> 12594 */ 12595 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12596 LOG_SLI, 12597 "(%d):0304 Stray Mailbox " 12598 "Interrupt mbxCommand x%x " 12599 "mbxStatus x%x\n", 12600 (vport ? vport->vpi : 0), 12601 pmbox->mbxCommand, 12602 pmbox->mbxStatus); 12603 /* clear mailbox attention bit */ 12604 work_ha_copy &= ~HA_MBATT; 12605 } else { 12606 phba->sli.mbox_active = NULL; 12607 spin_unlock_irqrestore(&phba->hbalock, iflag); 12608 phba->last_completion_time = jiffies; 12609 del_timer(&phba->sli.mbox_tmo); 12610 if (pmb->mbox_cmpl) { 12611 lpfc_sli_pcimem_bcopy(mbox, pmbox, 12612 MAILBOX_CMD_SIZE); 12613 if (pmb->out_ext_byte_len && 12614 pmb->ctx_buf) 12615 lpfc_sli_pcimem_bcopy( 12616 phba->mbox_ext, 12617 pmb->ctx_buf, 12618 pmb->out_ext_byte_len); 12619 } 12620 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 12621 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 12622 12623 lpfc_debugfs_disc_trc(vport, 12624 LPFC_DISC_TRC_MBOX_VPORT, 12625 "MBOX dflt rpi: : " 12626 "status:x%x rpi:x%x", 12627 (uint32_t)pmbox->mbxStatus, 12628 pmbox->un.varWords[0], 0); 12629 12630 if (!pmbox->mbxStatus) { 12631 mp = (struct lpfc_dmabuf *) 12632 (pmb->ctx_buf); 12633 ndlp = (struct lpfc_nodelist *) 12634 pmb->ctx_ndlp; 12635 12636 /* Reg_LOGIN of dflt RPI was 12637 * successful. new lets get 12638 * rid of the RPI using the 12639 * same mbox buffer. 12640 */ 12641 lpfc_unreg_login(phba, 12642 vport->vpi, 12643 pmbox->un.varWords[0], 12644 pmb); 12645 pmb->mbox_cmpl = 12646 lpfc_mbx_cmpl_dflt_rpi; 12647 pmb->ctx_buf = mp; 12648 pmb->ctx_ndlp = ndlp; 12649 pmb->vport = vport; 12650 rc = lpfc_sli_issue_mbox(phba, 12651 pmb, 12652 MBX_NOWAIT); 12653 if (rc != MBX_BUSY) 12654 lpfc_printf_log(phba, 12655 KERN_ERR, 12656 LOG_MBOX | LOG_SLI, 12657 "0350 rc should have" 12658 "been MBX_BUSY\n"); 12659 if (rc != MBX_NOT_FINISHED) 12660 goto send_current_mbox; 12661 } 12662 } 12663 spin_lock_irqsave( 12664 &phba->pport->work_port_lock, 12665 iflag); 12666 phba->pport->work_port_events &= 12667 ~WORKER_MBOX_TMO; 12668 spin_unlock_irqrestore( 12669 &phba->pport->work_port_lock, 12670 iflag); 12671 lpfc_mbox_cmpl_put(phba, pmb); 12672 } 12673 } else 12674 spin_unlock_irqrestore(&phba->hbalock, iflag); 12675 12676 if ((work_ha_copy & HA_MBATT) && 12677 (phba->sli.mbox_active == NULL)) { 12678 send_current_mbox: 12679 /* Process next mailbox command if there is one */ 12680 do { 12681 rc = lpfc_sli_issue_mbox(phba, NULL, 12682 MBX_NOWAIT); 12683 } while (rc == MBX_NOT_FINISHED); 12684 if (rc != MBX_SUCCESS) 12685 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12686 LOG_SLI, "0349 rc should be " 12687 "MBX_SUCCESS\n"); 12688 } 12689 12690 spin_lock_irqsave(&phba->hbalock, iflag); 12691 phba->work_ha |= work_ha_copy; 12692 spin_unlock_irqrestore(&phba->hbalock, iflag); 12693 lpfc_worker_wake_up(phba); 12694 } 12695 return IRQ_HANDLED; 12696 unplug_error: 12697 spin_unlock_irqrestore(&phba->hbalock, iflag); 12698 return IRQ_HANDLED; 12699 12700 } /* lpfc_sli_sp_intr_handler */ 12701 12702 /** 12703 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 12704 * @irq: Interrupt number. 12705 * @dev_id: The device context pointer. 12706 * 12707 * This function is directly called from the PCI layer as an interrupt 12708 * service routine when device with SLI-3 interface spec is enabled with 12709 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 12710 * ring event in the HBA. However, when the device is enabled with either 12711 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12712 * device-level interrupt handler. When the PCI slot is in error recovery 12713 * or the HBA is undergoing initialization, the interrupt handler will not 12714 * process the interrupt. The SCSI FCP fast-path ring event are handled in 12715 * the intrrupt context. This function is called without any lock held. 12716 * It gets the hbalock to access and update SLI data structures. 12717 * 12718 * This function returns IRQ_HANDLED when interrupt is handled else it 12719 * returns IRQ_NONE. 12720 **/ 12721 irqreturn_t 12722 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 12723 { 12724 struct lpfc_hba *phba; 12725 uint32_t ha_copy; 12726 unsigned long status; 12727 unsigned long iflag; 12728 struct lpfc_sli_ring *pring; 12729 12730 /* Get the driver's phba structure from the dev_id and 12731 * assume the HBA is not interrupting. 12732 */ 12733 phba = (struct lpfc_hba *) dev_id; 12734 12735 if (unlikely(!phba)) 12736 return IRQ_NONE; 12737 12738 /* 12739 * Stuff needs to be attented to when this function is invoked as an 12740 * individual interrupt handler in MSI-X multi-message interrupt mode 12741 */ 12742 if (phba->intr_type == MSIX) { 12743 /* Check device state for handling interrupt */ 12744 if (lpfc_intr_state_check(phba)) 12745 return IRQ_NONE; 12746 /* Need to read HA REG for FCP ring and other ring events */ 12747 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12748 return IRQ_HANDLED; 12749 /* Clear up only attention source related to fast-path */ 12750 spin_lock_irqsave(&phba->hbalock, iflag); 12751 /* 12752 * If there is deferred error attention, do not check for 12753 * any interrupt. 12754 */ 12755 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12756 spin_unlock_irqrestore(&phba->hbalock, iflag); 12757 return IRQ_NONE; 12758 } 12759 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 12760 phba->HAregaddr); 12761 readl(phba->HAregaddr); /* flush */ 12762 spin_unlock_irqrestore(&phba->hbalock, iflag); 12763 } else 12764 ha_copy = phba->ha_copy; 12765 12766 /* 12767 * Process all events on FCP ring. Take the optimized path for FCP IO. 12768 */ 12769 ha_copy &= ~(phba->work_ha_mask); 12770 12771 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12772 status >>= (4*LPFC_FCP_RING); 12773 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 12774 if (status & HA_RXMASK) 12775 lpfc_sli_handle_fast_ring_event(phba, pring, status); 12776 12777 if (phba->cfg_multi_ring_support == 2) { 12778 /* 12779 * Process all events on extra ring. Take the optimized path 12780 * for extra ring IO. 12781 */ 12782 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12783 status >>= (4*LPFC_EXTRA_RING); 12784 if (status & HA_RXMASK) { 12785 lpfc_sli_handle_fast_ring_event(phba, 12786 &phba->sli.sli3_ring[LPFC_EXTRA_RING], 12787 status); 12788 } 12789 } 12790 return IRQ_HANDLED; 12791 } /* lpfc_sli_fp_intr_handler */ 12792 12793 /** 12794 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 12795 * @irq: Interrupt number. 12796 * @dev_id: The device context pointer. 12797 * 12798 * This function is the HBA device-level interrupt handler to device with 12799 * SLI-3 interface spec, called from the PCI layer when either MSI or 12800 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 12801 * requires driver attention. This function invokes the slow-path interrupt 12802 * attention handling function and fast-path interrupt attention handling 12803 * function in turn to process the relevant HBA attention events. This 12804 * function is called without any lock held. It gets the hbalock to access 12805 * and update SLI data structures. 12806 * 12807 * This function returns IRQ_HANDLED when interrupt is handled, else it 12808 * returns IRQ_NONE. 12809 **/ 12810 irqreturn_t 12811 lpfc_sli_intr_handler(int irq, void *dev_id) 12812 { 12813 struct lpfc_hba *phba; 12814 irqreturn_t sp_irq_rc, fp_irq_rc; 12815 unsigned long status1, status2; 12816 uint32_t hc_copy; 12817 12818 /* 12819 * Get the driver's phba structure from the dev_id and 12820 * assume the HBA is not interrupting. 12821 */ 12822 phba = (struct lpfc_hba *) dev_id; 12823 12824 if (unlikely(!phba)) 12825 return IRQ_NONE; 12826 12827 /* Check device state for handling interrupt */ 12828 if (lpfc_intr_state_check(phba)) 12829 return IRQ_NONE; 12830 12831 spin_lock(&phba->hbalock); 12832 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 12833 spin_unlock(&phba->hbalock); 12834 return IRQ_HANDLED; 12835 } 12836 12837 if (unlikely(!phba->ha_copy)) { 12838 spin_unlock(&phba->hbalock); 12839 return IRQ_NONE; 12840 } else if (phba->ha_copy & HA_ERATT) { 12841 if (phba->hba_flag & HBA_ERATT_HANDLED) 12842 /* ERATT polling has handled ERATT */ 12843 phba->ha_copy &= ~HA_ERATT; 12844 else 12845 /* Indicate interrupt handler handles ERATT */ 12846 phba->hba_flag |= HBA_ERATT_HANDLED; 12847 } 12848 12849 /* 12850 * If there is deferred error attention, do not check for any interrupt. 12851 */ 12852 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12853 spin_unlock(&phba->hbalock); 12854 return IRQ_NONE; 12855 } 12856 12857 /* Clear attention sources except link and error attentions */ 12858 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 12859 spin_unlock(&phba->hbalock); 12860 return IRQ_HANDLED; 12861 } 12862 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 12863 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 12864 phba->HCregaddr); 12865 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 12866 writel(hc_copy, phba->HCregaddr); 12867 readl(phba->HAregaddr); /* flush */ 12868 spin_unlock(&phba->hbalock); 12869 12870 /* 12871 * Invokes slow-path host attention interrupt handling as appropriate. 12872 */ 12873 12874 /* status of events with mailbox and link attention */ 12875 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 12876 12877 /* status of events with ELS ring */ 12878 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 12879 status2 >>= (4*LPFC_ELS_RING); 12880 12881 if (status1 || (status2 & HA_RXMASK)) 12882 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 12883 else 12884 sp_irq_rc = IRQ_NONE; 12885 12886 /* 12887 * Invoke fast-path host attention interrupt handling as appropriate. 12888 */ 12889 12890 /* status of events with FCP ring */ 12891 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12892 status1 >>= (4*LPFC_FCP_RING); 12893 12894 /* status of events with extra ring */ 12895 if (phba->cfg_multi_ring_support == 2) { 12896 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12897 status2 >>= (4*LPFC_EXTRA_RING); 12898 } else 12899 status2 = 0; 12900 12901 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 12902 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 12903 else 12904 fp_irq_rc = IRQ_NONE; 12905 12906 /* Return device-level interrupt handling status */ 12907 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 12908 } /* lpfc_sli_intr_handler */ 12909 12910 /** 12911 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 12912 * @phba: pointer to lpfc hba data structure. 12913 * 12914 * This routine is invoked by the worker thread to process all the pending 12915 * SLI4 els abort xri events. 12916 **/ 12917 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 12918 { 12919 struct lpfc_cq_event *cq_event; 12920 12921 /* First, declare the els xri abort event has been handled */ 12922 spin_lock_irq(&phba->hbalock); 12923 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 12924 spin_unlock_irq(&phba->hbalock); 12925 /* Now, handle all the els xri abort events */ 12926 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 12927 /* Get the first event from the head of the event queue */ 12928 spin_lock_irq(&phba->hbalock); 12929 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 12930 cq_event, struct lpfc_cq_event, list); 12931 spin_unlock_irq(&phba->hbalock); 12932 /* Notify aborted XRI for ELS work queue */ 12933 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12934 /* Free the event processed back to the free pool */ 12935 lpfc_sli4_cq_event_release(phba, cq_event); 12936 } 12937 } 12938 12939 /** 12940 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 12941 * @phba: pointer to lpfc hba data structure 12942 * @pIocbIn: pointer to the rspiocbq 12943 * @pIocbOut: pointer to the cmdiocbq 12944 * @wcqe: pointer to the complete wcqe 12945 * 12946 * This routine transfers the fields of a command iocbq to a response iocbq 12947 * by copying all the IOCB fields from command iocbq and transferring the 12948 * completion status information from the complete wcqe. 12949 **/ 12950 static void 12951 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 12952 struct lpfc_iocbq *pIocbIn, 12953 struct lpfc_iocbq *pIocbOut, 12954 struct lpfc_wcqe_complete *wcqe) 12955 { 12956 int numBdes, i; 12957 unsigned long iflags; 12958 uint32_t status, max_response; 12959 struct lpfc_dmabuf *dmabuf; 12960 struct ulp_bde64 *bpl, bde; 12961 size_t offset = offsetof(struct lpfc_iocbq, iocb); 12962 12963 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 12964 sizeof(struct lpfc_iocbq) - offset); 12965 /* Map WCQE parameters into irspiocb parameters */ 12966 status = bf_get(lpfc_wcqe_c_status, wcqe); 12967 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 12968 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 12969 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 12970 pIocbIn->iocb.un.fcpi.fcpi_parm = 12971 pIocbOut->iocb.un.fcpi.fcpi_parm - 12972 wcqe->total_data_placed; 12973 else 12974 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12975 else { 12976 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12977 switch (pIocbOut->iocb.ulpCommand) { 12978 case CMD_ELS_REQUEST64_CR: 12979 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12980 bpl = (struct ulp_bde64 *)dmabuf->virt; 12981 bde.tus.w = le32_to_cpu(bpl[1].tus.w); 12982 max_response = bde.tus.f.bdeSize; 12983 break; 12984 case CMD_GEN_REQUEST64_CR: 12985 max_response = 0; 12986 if (!pIocbOut->context3) 12987 break; 12988 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 12989 sizeof(struct ulp_bde64); 12990 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12991 bpl = (struct ulp_bde64 *)dmabuf->virt; 12992 for (i = 0; i < numBdes; i++) { 12993 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 12994 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 12995 max_response += bde.tus.f.bdeSize; 12996 } 12997 break; 12998 default: 12999 max_response = wcqe->total_data_placed; 13000 break; 13001 } 13002 if (max_response < wcqe->total_data_placed) 13003 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 13004 else 13005 pIocbIn->iocb.un.genreq64.bdl.bdeSize = 13006 wcqe->total_data_placed; 13007 } 13008 13009 /* Convert BG errors for completion status */ 13010 if (status == CQE_STATUS_DI_ERROR) { 13011 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 13012 13013 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 13014 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 13015 else 13016 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 13017 13018 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 13019 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 13020 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 13021 BGS_GUARD_ERR_MASK; 13022 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 13023 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 13024 BGS_APPTAG_ERR_MASK; 13025 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 13026 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 13027 BGS_REFTAG_ERR_MASK; 13028 13029 /* Check to see if there was any good data before the error */ 13030 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 13031 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 13032 BGS_HI_WATER_MARK_PRESENT_MASK; 13033 pIocbIn->iocb.unsli3.sli3_bg.bghm = 13034 wcqe->total_data_placed; 13035 } 13036 13037 /* 13038 * Set ALL the error bits to indicate we don't know what 13039 * type of error it is. 13040 */ 13041 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 13042 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 13043 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 13044 BGS_GUARD_ERR_MASK); 13045 } 13046 13047 /* Pick up HBA exchange busy condition */ 13048 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 13049 spin_lock_irqsave(&phba->hbalock, iflags); 13050 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 13051 spin_unlock_irqrestore(&phba->hbalock, iflags); 13052 } 13053 } 13054 13055 /** 13056 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 13057 * @phba: Pointer to HBA context object. 13058 * @wcqe: Pointer to work-queue completion queue entry. 13059 * 13060 * This routine handles an ELS work-queue completion event and construct 13061 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 13062 * discovery engine to handle. 13063 * 13064 * Return: Pointer to the receive IOCBQ, NULL otherwise. 13065 **/ 13066 static struct lpfc_iocbq * 13067 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 13068 struct lpfc_iocbq *irspiocbq) 13069 { 13070 struct lpfc_sli_ring *pring; 13071 struct lpfc_iocbq *cmdiocbq; 13072 struct lpfc_wcqe_complete *wcqe; 13073 unsigned long iflags; 13074 13075 pring = lpfc_phba_elsring(phba); 13076 if (unlikely(!pring)) 13077 return NULL; 13078 13079 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 13080 pring->stats.iocb_event++; 13081 /* Look up the ELS command IOCB and create pseudo response IOCB */ 13082 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13083 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13084 if (unlikely(!cmdiocbq)) { 13085 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13086 "0386 ELS complete with no corresponding " 13087 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", 13088 wcqe->word0, wcqe->total_data_placed, 13089 wcqe->parameter, wcqe->word3); 13090 lpfc_sli_release_iocbq(phba, irspiocbq); 13091 return NULL; 13092 } 13093 13094 spin_lock_irqsave(&pring->ring_lock, iflags); 13095 /* Put the iocb back on the txcmplq */ 13096 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); 13097 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13098 13099 /* Fake the irspiocbq and copy necessary response information */ 13100 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 13101 13102 return irspiocbq; 13103 } 13104 13105 inline struct lpfc_cq_event * 13106 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) 13107 { 13108 struct lpfc_cq_event *cq_event; 13109 13110 /* Allocate a new internal CQ_EVENT entry */ 13111 cq_event = lpfc_sli4_cq_event_alloc(phba); 13112 if (!cq_event) { 13113 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13114 "0602 Failed to alloc CQ_EVENT entry\n"); 13115 return NULL; 13116 } 13117 13118 /* Move the CQE into the event */ 13119 memcpy(&cq_event->cqe, entry, size); 13120 return cq_event; 13121 } 13122 13123 /** 13124 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event 13125 * @phba: Pointer to HBA context object. 13126 * @cqe: Pointer to mailbox completion queue entry. 13127 * 13128 * This routine process a mailbox completion queue entry with asynchronous 13129 * event. 13130 * 13131 * Return: true if work posted to worker thread, otherwise false. 13132 **/ 13133 static bool 13134 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13135 { 13136 struct lpfc_cq_event *cq_event; 13137 unsigned long iflags; 13138 13139 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13140 "0392 Async Event: word0:x%x, word1:x%x, " 13141 "word2:x%x, word3:x%x\n", mcqe->word0, 13142 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 13143 13144 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); 13145 if (!cq_event) 13146 return false; 13147 spin_lock_irqsave(&phba->hbalock, iflags); 13148 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 13149 /* Set the async event flag */ 13150 phba->hba_flag |= ASYNC_EVENT; 13151 spin_unlock_irqrestore(&phba->hbalock, iflags); 13152 13153 return true; 13154 } 13155 13156 /** 13157 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 13158 * @phba: Pointer to HBA context object. 13159 * @cqe: Pointer to mailbox completion queue entry. 13160 * 13161 * This routine process a mailbox completion queue entry with mailbox 13162 * completion event. 13163 * 13164 * Return: true if work posted to worker thread, otherwise false. 13165 **/ 13166 static bool 13167 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13168 { 13169 uint32_t mcqe_status; 13170 MAILBOX_t *mbox, *pmbox; 13171 struct lpfc_mqe *mqe; 13172 struct lpfc_vport *vport; 13173 struct lpfc_nodelist *ndlp; 13174 struct lpfc_dmabuf *mp; 13175 unsigned long iflags; 13176 LPFC_MBOXQ_t *pmb; 13177 bool workposted = false; 13178 int rc; 13179 13180 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 13181 if (!bf_get(lpfc_trailer_completed, mcqe)) 13182 goto out_no_mqe_complete; 13183 13184 /* Get the reference to the active mbox command */ 13185 spin_lock_irqsave(&phba->hbalock, iflags); 13186 pmb = phba->sli.mbox_active; 13187 if (unlikely(!pmb)) { 13188 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13189 "1832 No pending MBOX command to handle\n"); 13190 spin_unlock_irqrestore(&phba->hbalock, iflags); 13191 goto out_no_mqe_complete; 13192 } 13193 spin_unlock_irqrestore(&phba->hbalock, iflags); 13194 mqe = &pmb->u.mqe; 13195 pmbox = (MAILBOX_t *)&pmb->u.mqe; 13196 mbox = phba->mbox; 13197 vport = pmb->vport; 13198 13199 /* Reset heartbeat timer */ 13200 phba->last_completion_time = jiffies; 13201 del_timer(&phba->sli.mbox_tmo); 13202 13203 /* Move mbox data to caller's mailbox region, do endian swapping */ 13204 if (pmb->mbox_cmpl && mbox) 13205 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 13206 13207 /* 13208 * For mcqe errors, conditionally move a modified error code to 13209 * the mbox so that the error will not be missed. 13210 */ 13211 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 13212 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 13213 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 13214 bf_set(lpfc_mqe_status, mqe, 13215 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 13216 } 13217 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 13218 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 13219 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 13220 "MBOX dflt rpi: status:x%x rpi:x%x", 13221 mcqe_status, 13222 pmbox->un.varWords[0], 0); 13223 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 13224 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 13225 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 13226 /* Reg_LOGIN of dflt RPI was successful. Now lets get 13227 * RID of the PPI using the same mbox buffer. 13228 */ 13229 lpfc_unreg_login(phba, vport->vpi, 13230 pmbox->un.varWords[0], pmb); 13231 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 13232 pmb->ctx_buf = mp; 13233 pmb->ctx_ndlp = ndlp; 13234 pmb->vport = vport; 13235 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 13236 if (rc != MBX_BUSY) 13237 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 13238 LOG_SLI, "0385 rc should " 13239 "have been MBX_BUSY\n"); 13240 if (rc != MBX_NOT_FINISHED) 13241 goto send_current_mbox; 13242 } 13243 } 13244 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 13245 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 13246 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 13247 13248 /* There is mailbox completion work to do */ 13249 spin_lock_irqsave(&phba->hbalock, iflags); 13250 __lpfc_mbox_cmpl_put(phba, pmb); 13251 phba->work_ha |= HA_MBATT; 13252 spin_unlock_irqrestore(&phba->hbalock, iflags); 13253 workposted = true; 13254 13255 send_current_mbox: 13256 spin_lock_irqsave(&phba->hbalock, iflags); 13257 /* Release the mailbox command posting token */ 13258 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13259 /* Setting active mailbox pointer need to be in sync to flag clear */ 13260 phba->sli.mbox_active = NULL; 13261 if (bf_get(lpfc_trailer_consumed, mcqe)) 13262 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 13263 spin_unlock_irqrestore(&phba->hbalock, iflags); 13264 /* Wake up worker thread to post the next pending mailbox command */ 13265 lpfc_worker_wake_up(phba); 13266 return workposted; 13267 13268 out_no_mqe_complete: 13269 spin_lock_irqsave(&phba->hbalock, iflags); 13270 if (bf_get(lpfc_trailer_consumed, mcqe)) 13271 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 13272 spin_unlock_irqrestore(&phba->hbalock, iflags); 13273 return false; 13274 } 13275 13276 /** 13277 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 13278 * @phba: Pointer to HBA context object. 13279 * @cqe: Pointer to mailbox completion queue entry. 13280 * 13281 * This routine process a mailbox completion queue entry, it invokes the 13282 * proper mailbox complete handling or asynchronous event handling routine 13283 * according to the MCQE's async bit. 13284 * 13285 * Return: true if work posted to worker thread, otherwise false. 13286 **/ 13287 static bool 13288 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13289 struct lpfc_cqe *cqe) 13290 { 13291 struct lpfc_mcqe mcqe; 13292 bool workposted; 13293 13294 cq->CQ_mbox++; 13295 13296 /* Copy the mailbox MCQE and convert endian order as needed */ 13297 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 13298 13299 /* Invoke the proper event handling routine */ 13300 if (!bf_get(lpfc_trailer_async, &mcqe)) 13301 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 13302 else 13303 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 13304 return workposted; 13305 } 13306 13307 /** 13308 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 13309 * @phba: Pointer to HBA context object. 13310 * @cq: Pointer to associated CQ 13311 * @wcqe: Pointer to work-queue completion queue entry. 13312 * 13313 * This routine handles an ELS work-queue completion event. 13314 * 13315 * Return: true if work posted to worker thread, otherwise false. 13316 **/ 13317 static bool 13318 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13319 struct lpfc_wcqe_complete *wcqe) 13320 { 13321 struct lpfc_iocbq *irspiocbq; 13322 unsigned long iflags; 13323 struct lpfc_sli_ring *pring = cq->pring; 13324 int txq_cnt = 0; 13325 int txcmplq_cnt = 0; 13326 13327 /* Check for response status */ 13328 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13329 /* Log the error status */ 13330 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13331 "0357 ELS CQE error: status=x%x: " 13332 "CQE: %08x %08x %08x %08x\n", 13333 bf_get(lpfc_wcqe_c_status, wcqe), 13334 wcqe->word0, wcqe->total_data_placed, 13335 wcqe->parameter, wcqe->word3); 13336 } 13337 13338 /* Get an irspiocbq for later ELS response processing use */ 13339 irspiocbq = lpfc_sli_get_iocbq(phba); 13340 if (!irspiocbq) { 13341 if (!list_empty(&pring->txq)) 13342 txq_cnt++; 13343 if (!list_empty(&pring->txcmplq)) 13344 txcmplq_cnt++; 13345 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13346 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 13347 "els_txcmplq_cnt=%d\n", 13348 txq_cnt, phba->iocb_cnt, 13349 txcmplq_cnt); 13350 return false; 13351 } 13352 13353 /* Save off the slow-path queue event for work thread to process */ 13354 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 13355 spin_lock_irqsave(&phba->hbalock, iflags); 13356 list_add_tail(&irspiocbq->cq_event.list, 13357 &phba->sli4_hba.sp_queue_event); 13358 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13359 spin_unlock_irqrestore(&phba->hbalock, iflags); 13360 13361 return true; 13362 } 13363 13364 /** 13365 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 13366 * @phba: Pointer to HBA context object. 13367 * @wcqe: Pointer to work-queue completion queue entry. 13368 * 13369 * This routine handles slow-path WQ entry consumed event by invoking the 13370 * proper WQ release routine to the slow-path WQ. 13371 **/ 13372 static void 13373 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 13374 struct lpfc_wcqe_release *wcqe) 13375 { 13376 /* sanity check on queue memory */ 13377 if (unlikely(!phba->sli4_hba.els_wq)) 13378 return; 13379 /* Check for the slow-path ELS work queue */ 13380 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 13381 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 13382 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13383 else 13384 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13385 "2579 Slow-path wqe consume event carries " 13386 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 13387 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 13388 phba->sli4_hba.els_wq->queue_id); 13389 } 13390 13391 /** 13392 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 13393 * @phba: Pointer to HBA context object. 13394 * @cq: Pointer to a WQ completion queue. 13395 * @wcqe: Pointer to work-queue completion queue entry. 13396 * 13397 * This routine handles an XRI abort event. 13398 * 13399 * Return: true if work posted to worker thread, otherwise false. 13400 **/ 13401 static bool 13402 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 13403 struct lpfc_queue *cq, 13404 struct sli4_wcqe_xri_aborted *wcqe) 13405 { 13406 bool workposted = false; 13407 struct lpfc_cq_event *cq_event; 13408 unsigned long iflags; 13409 13410 switch (cq->subtype) { 13411 case LPFC_IO: 13412 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq); 13413 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13414 /* Notify aborted XRI for NVME work queue */ 13415 if (phba->nvmet_support) 13416 lpfc_sli4_nvmet_xri_aborted(phba, wcqe); 13417 } 13418 workposted = false; 13419 break; 13420 case LPFC_NVME_LS: /* NVME LS uses ELS resources */ 13421 case LPFC_ELS: 13422 cq_event = lpfc_cq_event_setup( 13423 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 13424 if (!cq_event) 13425 return false; 13426 cq_event->hdwq = cq->hdwq; 13427 spin_lock_irqsave(&phba->hbalock, iflags); 13428 list_add_tail(&cq_event->list, 13429 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 13430 /* Set the els xri abort event flag */ 13431 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 13432 spin_unlock_irqrestore(&phba->hbalock, iflags); 13433 workposted = true; 13434 break; 13435 default: 13436 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13437 "0603 Invalid CQ subtype %d: " 13438 "%08x %08x %08x %08x\n", 13439 cq->subtype, wcqe->word0, wcqe->parameter, 13440 wcqe->word2, wcqe->word3); 13441 workposted = false; 13442 break; 13443 } 13444 return workposted; 13445 } 13446 13447 #define FC_RCTL_MDS_DIAGS 0xF4 13448 13449 /** 13450 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 13451 * @phba: Pointer to HBA context object. 13452 * @rcqe: Pointer to receive-queue completion queue entry. 13453 * 13454 * This routine process a receive-queue completion queue entry. 13455 * 13456 * Return: true if work posted to worker thread, otherwise false. 13457 **/ 13458 static bool 13459 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 13460 { 13461 bool workposted = false; 13462 struct fc_frame_header *fc_hdr; 13463 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 13464 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 13465 struct lpfc_nvmet_tgtport *tgtp; 13466 struct hbq_dmabuf *dma_buf; 13467 uint32_t status, rq_id; 13468 unsigned long iflags; 13469 13470 /* sanity check on queue memory */ 13471 if (unlikely(!hrq) || unlikely(!drq)) 13472 return workposted; 13473 13474 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13475 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13476 else 13477 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13478 if (rq_id != hrq->queue_id) 13479 goto out; 13480 13481 status = bf_get(lpfc_rcqe_status, rcqe); 13482 switch (status) { 13483 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13484 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13485 "2537 Receive Frame Truncated!!\n"); 13486 /* fall through */ 13487 case FC_STATUS_RQ_SUCCESS: 13488 spin_lock_irqsave(&phba->hbalock, iflags); 13489 lpfc_sli4_rq_release(hrq, drq); 13490 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 13491 if (!dma_buf) { 13492 hrq->RQ_no_buf_found++; 13493 spin_unlock_irqrestore(&phba->hbalock, iflags); 13494 goto out; 13495 } 13496 hrq->RQ_rcv_buf++; 13497 hrq->RQ_buf_posted--; 13498 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 13499 13500 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13501 13502 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 13503 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 13504 spin_unlock_irqrestore(&phba->hbalock, iflags); 13505 /* Handle MDS Loopback frames */ 13506 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf); 13507 break; 13508 } 13509 13510 /* save off the frame for the work thread to process */ 13511 list_add_tail(&dma_buf->cq_event.list, 13512 &phba->sli4_hba.sp_queue_event); 13513 /* Frame received */ 13514 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13515 spin_unlock_irqrestore(&phba->hbalock, iflags); 13516 workposted = true; 13517 break; 13518 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13519 if (phba->nvmet_support) { 13520 tgtp = phba->targetport->private; 13521 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13522 "6402 RQE Error x%x, posted %d err_cnt " 13523 "%d: %x %x %x\n", 13524 status, hrq->RQ_buf_posted, 13525 hrq->RQ_no_posted_buf, 13526 atomic_read(&tgtp->rcv_fcp_cmd_in), 13527 atomic_read(&tgtp->rcv_fcp_cmd_out), 13528 atomic_read(&tgtp->xmt_fcp_release)); 13529 } 13530 /* fallthrough */ 13531 13532 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13533 hrq->RQ_no_posted_buf++; 13534 /* Post more buffers if possible */ 13535 spin_lock_irqsave(&phba->hbalock, iflags); 13536 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 13537 spin_unlock_irqrestore(&phba->hbalock, iflags); 13538 workposted = true; 13539 break; 13540 } 13541 out: 13542 return workposted; 13543 } 13544 13545 /** 13546 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 13547 * @phba: Pointer to HBA context object. 13548 * @cq: Pointer to the completion queue. 13549 * @cqe: Pointer to a completion queue entry. 13550 * 13551 * This routine process a slow-path work-queue or receive queue completion queue 13552 * entry. 13553 * 13554 * Return: true if work posted to worker thread, otherwise false. 13555 **/ 13556 static bool 13557 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13558 struct lpfc_cqe *cqe) 13559 { 13560 struct lpfc_cqe cqevt; 13561 bool workposted = false; 13562 13563 /* Copy the work queue CQE and convert endian order if needed */ 13564 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 13565 13566 /* Check and process for different type of WCQE and dispatch */ 13567 switch (bf_get(lpfc_cqe_code, &cqevt)) { 13568 case CQE_CODE_COMPL_WQE: 13569 /* Process the WQ/RQ complete event */ 13570 phba->last_completion_time = jiffies; 13571 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 13572 (struct lpfc_wcqe_complete *)&cqevt); 13573 break; 13574 case CQE_CODE_RELEASE_WQE: 13575 /* Process the WQ release event */ 13576 lpfc_sli4_sp_handle_rel_wcqe(phba, 13577 (struct lpfc_wcqe_release *)&cqevt); 13578 break; 13579 case CQE_CODE_XRI_ABORTED: 13580 /* Process the WQ XRI abort event */ 13581 phba->last_completion_time = jiffies; 13582 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13583 (struct sli4_wcqe_xri_aborted *)&cqevt); 13584 break; 13585 case CQE_CODE_RECEIVE: 13586 case CQE_CODE_RECEIVE_V1: 13587 /* Process the RQ event */ 13588 phba->last_completion_time = jiffies; 13589 workposted = lpfc_sli4_sp_handle_rcqe(phba, 13590 (struct lpfc_rcqe *)&cqevt); 13591 break; 13592 default: 13593 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13594 "0388 Not a valid WCQE code: x%x\n", 13595 bf_get(lpfc_cqe_code, &cqevt)); 13596 break; 13597 } 13598 return workposted; 13599 } 13600 13601 /** 13602 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 13603 * @phba: Pointer to HBA context object. 13604 * @eqe: Pointer to fast-path event queue entry. 13605 * 13606 * This routine process a event queue entry from the slow-path event queue. 13607 * It will check the MajorCode and MinorCode to determine this is for a 13608 * completion event on a completion queue, if not, an error shall be logged 13609 * and just return. Otherwise, it will get to the corresponding completion 13610 * queue and process all the entries on that completion queue, rearm the 13611 * completion queue, and then return. 13612 * 13613 **/ 13614 static void 13615 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13616 struct lpfc_queue *speq) 13617 { 13618 struct lpfc_queue *cq = NULL, *childq; 13619 uint16_t cqid; 13620 13621 /* Get the reference to the corresponding CQ */ 13622 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13623 13624 list_for_each_entry(childq, &speq->child_list, list) { 13625 if (childq->queue_id == cqid) { 13626 cq = childq; 13627 break; 13628 } 13629 } 13630 if (unlikely(!cq)) { 13631 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13632 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13633 "0365 Slow-path CQ identifier " 13634 "(%d) does not exist\n", cqid); 13635 return; 13636 } 13637 13638 /* Save EQ associated with this CQ */ 13639 cq->assoc_qp = speq; 13640 13641 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork)) 13642 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13643 "0390 Cannot schedule soft IRQ " 13644 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13645 cqid, cq->queue_id, raw_smp_processor_id()); 13646 } 13647 13648 /** 13649 * __lpfc_sli4_process_cq - Process elements of a CQ 13650 * @phba: Pointer to HBA context object. 13651 * @cq: Pointer to CQ to be processed 13652 * @handler: Routine to process each cqe 13653 * @delay: Pointer to usdelay to set in case of rescheduling of the handler 13654 * 13655 * This routine processes completion queue entries in a CQ. While a valid 13656 * queue element is found, the handler is called. During processing checks 13657 * are made for periodic doorbell writes to let the hardware know of 13658 * element consumption. 13659 * 13660 * If the max limit on cqes to process is hit, or there are no more valid 13661 * entries, the loop stops. If we processed a sufficient number of elements, 13662 * meaning there is sufficient load, rather than rearming and generating 13663 * another interrupt, a cq rescheduling delay will be set. A delay of 0 13664 * indicates no rescheduling. 13665 * 13666 * Returns True if work scheduled, False otherwise. 13667 **/ 13668 static bool 13669 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, 13670 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, 13671 struct lpfc_cqe *), unsigned long *delay) 13672 { 13673 struct lpfc_cqe *cqe; 13674 bool workposted = false; 13675 int count = 0, consumed = 0; 13676 bool arm = true; 13677 13678 /* default - no reschedule */ 13679 *delay = 0; 13680 13681 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) 13682 goto rearm_and_exit; 13683 13684 /* Process all the entries to the CQ */ 13685 cq->q_flag = 0; 13686 cqe = lpfc_sli4_cq_get(cq); 13687 while (cqe) { 13688 workposted |= handler(phba, cq, cqe); 13689 __lpfc_sli4_consume_cqe(phba, cq, cqe); 13690 13691 consumed++; 13692 if (!(++count % cq->max_proc_limit)) 13693 break; 13694 13695 if (!(count % cq->notify_interval)) { 13696 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 13697 LPFC_QUEUE_NOARM); 13698 consumed = 0; 13699 cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK; 13700 } 13701 13702 if (count == LPFC_NVMET_CQ_NOTIFY) 13703 cq->q_flag |= HBA_NVMET_CQ_NOTIFY; 13704 13705 cqe = lpfc_sli4_cq_get(cq); 13706 } 13707 if (count >= phba->cfg_cq_poll_threshold) { 13708 *delay = 1; 13709 arm = false; 13710 } 13711 13712 /* Track the max number of CQEs processed in 1 EQ */ 13713 if (count > cq->CQ_max_cqe) 13714 cq->CQ_max_cqe = count; 13715 13716 cq->assoc_qp->EQ_cqe_cnt += count; 13717 13718 /* Catch the no cq entry condition */ 13719 if (unlikely(count == 0)) 13720 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13721 "0369 No entry from completion queue " 13722 "qid=%d\n", cq->queue_id); 13723 13724 cq->queue_claimed = 0; 13725 13726 rearm_and_exit: 13727 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 13728 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM); 13729 13730 return workposted; 13731 } 13732 13733 /** 13734 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry 13735 * @cq: pointer to CQ to process 13736 * 13737 * This routine calls the cq processing routine with a handler specific 13738 * to the type of queue bound to it. 13739 * 13740 * The CQ routine returns two values: the first is the calling status, 13741 * which indicates whether work was queued to the background discovery 13742 * thread. If true, the routine should wakeup the discovery thread; 13743 * the second is the delay parameter. If non-zero, rather than rearming 13744 * the CQ and yet another interrupt, the CQ handler should be queued so 13745 * that it is processed in a subsequent polling action. The value of 13746 * the delay indicates when to reschedule it. 13747 **/ 13748 static void 13749 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) 13750 { 13751 struct lpfc_hba *phba = cq->phba; 13752 unsigned long delay; 13753 bool workposted = false; 13754 13755 /* Process and rearm the CQ */ 13756 switch (cq->type) { 13757 case LPFC_MCQ: 13758 workposted |= __lpfc_sli4_process_cq(phba, cq, 13759 lpfc_sli4_sp_handle_mcqe, 13760 &delay); 13761 break; 13762 case LPFC_WCQ: 13763 if (cq->subtype == LPFC_IO) 13764 workposted |= __lpfc_sli4_process_cq(phba, cq, 13765 lpfc_sli4_fp_handle_cqe, 13766 &delay); 13767 else 13768 workposted |= __lpfc_sli4_process_cq(phba, cq, 13769 lpfc_sli4_sp_handle_cqe, 13770 &delay); 13771 break; 13772 default: 13773 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13774 "0370 Invalid completion queue type (%d)\n", 13775 cq->type); 13776 return; 13777 } 13778 13779 if (delay) { 13780 if (!queue_delayed_work_on(cq->chann, phba->wq, 13781 &cq->sched_spwork, delay)) 13782 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13783 "0394 Cannot schedule soft IRQ " 13784 "for cqid=%d on CPU %d\n", 13785 cq->queue_id, cq->chann); 13786 } 13787 13788 /* wake up worker thread if there are works to be done */ 13789 if (workposted) 13790 lpfc_worker_wake_up(phba); 13791 } 13792 13793 /** 13794 * lpfc_sli4_sp_process_cq - slow-path work handler when started by 13795 * interrupt 13796 * @work: pointer to work element 13797 * 13798 * translates from the work handler and calls the slow-path handler. 13799 **/ 13800 static void 13801 lpfc_sli4_sp_process_cq(struct work_struct *work) 13802 { 13803 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); 13804 13805 __lpfc_sli4_sp_process_cq(cq); 13806 } 13807 13808 /** 13809 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer 13810 * @work: pointer to work element 13811 * 13812 * translates from the work handler and calls the slow-path handler. 13813 **/ 13814 static void 13815 lpfc_sli4_dly_sp_process_cq(struct work_struct *work) 13816 { 13817 struct lpfc_queue *cq = container_of(to_delayed_work(work), 13818 struct lpfc_queue, sched_spwork); 13819 13820 __lpfc_sli4_sp_process_cq(cq); 13821 } 13822 13823 /** 13824 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 13825 * @phba: Pointer to HBA context object. 13826 * @cq: Pointer to associated CQ 13827 * @wcqe: Pointer to work-queue completion queue entry. 13828 * 13829 * This routine process a fast-path work queue completion entry from fast-path 13830 * event queue for FCP command response completion. 13831 **/ 13832 static void 13833 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13834 struct lpfc_wcqe_complete *wcqe) 13835 { 13836 struct lpfc_sli_ring *pring = cq->pring; 13837 struct lpfc_iocbq *cmdiocbq; 13838 struct lpfc_iocbq irspiocbq; 13839 unsigned long iflags; 13840 13841 /* Check for response status */ 13842 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13843 /* If resource errors reported from HBA, reduce queue 13844 * depth of the SCSI device. 13845 */ 13846 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 13847 IOSTAT_LOCAL_REJECT)) && 13848 ((wcqe->parameter & IOERR_PARAM_MASK) == 13849 IOERR_NO_RESOURCES)) 13850 phba->lpfc_rampdown_queue_depth(phba); 13851 13852 /* Log the error status */ 13853 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13854 "0373 FCP CQE error: status=x%x: " 13855 "CQE: %08x %08x %08x %08x\n", 13856 bf_get(lpfc_wcqe_c_status, wcqe), 13857 wcqe->word0, wcqe->total_data_placed, 13858 wcqe->parameter, wcqe->word3); 13859 } 13860 13861 /* Look up the FCP command IOCB and create pseudo response IOCB */ 13862 spin_lock_irqsave(&pring->ring_lock, iflags); 13863 pring->stats.iocb_event++; 13864 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13865 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13866 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13867 if (unlikely(!cmdiocbq)) { 13868 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13869 "0374 FCP complete with no corresponding " 13870 "cmdiocb: iotag (%d)\n", 13871 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13872 return; 13873 } 13874 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13875 cmdiocbq->isr_timestamp = cq->isr_timestamp; 13876 #endif 13877 if (cmdiocbq->iocb_cmpl == NULL) { 13878 if (cmdiocbq->wqe_cmpl) { 13879 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13880 spin_lock_irqsave(&phba->hbalock, iflags); 13881 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13882 spin_unlock_irqrestore(&phba->hbalock, iflags); 13883 } 13884 13885 /* Pass the cmd_iocb and the wcqe to the upper layer */ 13886 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe); 13887 return; 13888 } 13889 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13890 "0375 FCP cmdiocb not callback function " 13891 "iotag: (%d)\n", 13892 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13893 return; 13894 } 13895 13896 /* Fake the irspiocb and copy necessary response information */ 13897 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 13898 13899 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13900 spin_lock_irqsave(&phba->hbalock, iflags); 13901 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13902 spin_unlock_irqrestore(&phba->hbalock, iflags); 13903 } 13904 13905 /* Pass the cmd_iocb and the rsp state to the upper layer */ 13906 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 13907 } 13908 13909 /** 13910 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 13911 * @phba: Pointer to HBA context object. 13912 * @cq: Pointer to completion queue. 13913 * @wcqe: Pointer to work-queue completion queue entry. 13914 * 13915 * This routine handles an fast-path WQ entry consumed event by invoking the 13916 * proper WQ release routine to the slow-path WQ. 13917 **/ 13918 static void 13919 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13920 struct lpfc_wcqe_release *wcqe) 13921 { 13922 struct lpfc_queue *childwq; 13923 bool wqid_matched = false; 13924 uint16_t hba_wqid; 13925 13926 /* Check for fast-path FCP work queue release */ 13927 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 13928 list_for_each_entry(childwq, &cq->child_list, list) { 13929 if (childwq->queue_id == hba_wqid) { 13930 lpfc_sli4_wq_release(childwq, 13931 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13932 if (childwq->q_flag & HBA_NVMET_WQFULL) 13933 lpfc_nvmet_wqfull_process(phba, childwq); 13934 wqid_matched = true; 13935 break; 13936 } 13937 } 13938 /* Report warning log message if no match found */ 13939 if (wqid_matched != true) 13940 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13941 "2580 Fast-path wqe consume event carries " 13942 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); 13943 } 13944 13945 /** 13946 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry 13947 * @phba: Pointer to HBA context object. 13948 * @rcqe: Pointer to receive-queue completion queue entry. 13949 * 13950 * This routine process a receive-queue completion queue entry. 13951 * 13952 * Return: true if work posted to worker thread, otherwise false. 13953 **/ 13954 static bool 13955 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13956 struct lpfc_rcqe *rcqe) 13957 { 13958 bool workposted = false; 13959 struct lpfc_queue *hrq; 13960 struct lpfc_queue *drq; 13961 struct rqb_dmabuf *dma_buf; 13962 struct fc_frame_header *fc_hdr; 13963 struct lpfc_nvmet_tgtport *tgtp; 13964 uint32_t status, rq_id; 13965 unsigned long iflags; 13966 uint32_t fctl, idx; 13967 13968 if ((phba->nvmet_support == 0) || 13969 (phba->sli4_hba.nvmet_cqset == NULL)) 13970 return workposted; 13971 13972 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; 13973 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; 13974 drq = phba->sli4_hba.nvmet_mrq_data[idx]; 13975 13976 /* sanity check on queue memory */ 13977 if (unlikely(!hrq) || unlikely(!drq)) 13978 return workposted; 13979 13980 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13981 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13982 else 13983 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13984 13985 if ((phba->nvmet_support == 0) || 13986 (rq_id != hrq->queue_id)) 13987 return workposted; 13988 13989 status = bf_get(lpfc_rcqe_status, rcqe); 13990 switch (status) { 13991 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13992 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13993 "6126 Receive Frame Truncated!!\n"); 13994 /* fall through */ 13995 case FC_STATUS_RQ_SUCCESS: 13996 spin_lock_irqsave(&phba->hbalock, iflags); 13997 lpfc_sli4_rq_release(hrq, drq); 13998 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 13999 if (!dma_buf) { 14000 hrq->RQ_no_buf_found++; 14001 spin_unlock_irqrestore(&phba->hbalock, iflags); 14002 goto out; 14003 } 14004 spin_unlock_irqrestore(&phba->hbalock, iflags); 14005 hrq->RQ_rcv_buf++; 14006 hrq->RQ_buf_posted--; 14007 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 14008 14009 /* Just some basic sanity checks on FCP Command frame */ 14010 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 14011 fc_hdr->fh_f_ctl[1] << 8 | 14012 fc_hdr->fh_f_ctl[2]); 14013 if (((fctl & 14014 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != 14015 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || 14016 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ 14017 goto drop; 14018 14019 if (fc_hdr->fh_type == FC_TYPE_FCP) { 14020 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 14021 lpfc_nvmet_unsol_fcp_event( 14022 phba, idx, dma_buf, cq->isr_timestamp, 14023 cq->q_flag & HBA_NVMET_CQ_NOTIFY); 14024 return false; 14025 } 14026 drop: 14027 lpfc_rq_buf_free(phba, &dma_buf->hbuf); 14028 break; 14029 case FC_STATUS_INSUFF_BUF_FRM_DISC: 14030 if (phba->nvmet_support) { 14031 tgtp = phba->targetport->private; 14032 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 14033 "6401 RQE Error x%x, posted %d err_cnt " 14034 "%d: %x %x %x\n", 14035 status, hrq->RQ_buf_posted, 14036 hrq->RQ_no_posted_buf, 14037 atomic_read(&tgtp->rcv_fcp_cmd_in), 14038 atomic_read(&tgtp->rcv_fcp_cmd_out), 14039 atomic_read(&tgtp->xmt_fcp_release)); 14040 } 14041 /* fallthrough */ 14042 14043 case FC_STATUS_INSUFF_BUF_NEED_BUF: 14044 hrq->RQ_no_posted_buf++; 14045 /* Post more buffers if possible */ 14046 break; 14047 } 14048 out: 14049 return workposted; 14050 } 14051 14052 /** 14053 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry 14054 * @phba: adapter with cq 14055 * @cq: Pointer to the completion queue. 14056 * @eqe: Pointer to fast-path completion queue entry. 14057 * 14058 * This routine process a fast-path work queue completion entry from fast-path 14059 * event queue for FCP command response completion. 14060 * 14061 * Return: true if work posted to worker thread, otherwise false. 14062 **/ 14063 static bool 14064 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 14065 struct lpfc_cqe *cqe) 14066 { 14067 struct lpfc_wcqe_release wcqe; 14068 bool workposted = false; 14069 14070 /* Copy the work queue CQE and convert endian order if needed */ 14071 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 14072 14073 /* Check and process for different type of WCQE and dispatch */ 14074 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 14075 case CQE_CODE_COMPL_WQE: 14076 case CQE_CODE_NVME_ERSP: 14077 cq->CQ_wq++; 14078 /* Process the WQ complete event */ 14079 phba->last_completion_time = jiffies; 14080 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS) 14081 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 14082 (struct lpfc_wcqe_complete *)&wcqe); 14083 break; 14084 case CQE_CODE_RELEASE_WQE: 14085 cq->CQ_release_wqe++; 14086 /* Process the WQ release event */ 14087 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 14088 (struct lpfc_wcqe_release *)&wcqe); 14089 break; 14090 case CQE_CODE_XRI_ABORTED: 14091 cq->CQ_xri_aborted++; 14092 /* Process the WQ XRI abort event */ 14093 phba->last_completion_time = jiffies; 14094 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 14095 (struct sli4_wcqe_xri_aborted *)&wcqe); 14096 break; 14097 case CQE_CODE_RECEIVE_V1: 14098 case CQE_CODE_RECEIVE: 14099 phba->last_completion_time = jiffies; 14100 if (cq->subtype == LPFC_NVMET) { 14101 workposted = lpfc_sli4_nvmet_handle_rcqe( 14102 phba, cq, (struct lpfc_rcqe *)&wcqe); 14103 } 14104 break; 14105 default: 14106 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14107 "0144 Not a valid CQE code: x%x\n", 14108 bf_get(lpfc_wcqe_c_code, &wcqe)); 14109 break; 14110 } 14111 return workposted; 14112 } 14113 14114 /** 14115 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 14116 * @phba: Pointer to HBA context object. 14117 * @eqe: Pointer to fast-path event queue entry. 14118 * 14119 * This routine process a event queue entry from the fast-path event queue. 14120 * It will check the MajorCode and MinorCode to determine this is for a 14121 * completion event on a completion queue, if not, an error shall be logged 14122 * and just return. Otherwise, it will get to the corresponding completion 14123 * queue and process all the entries on the completion queue, rearm the 14124 * completion queue, and then return. 14125 **/ 14126 static void 14127 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 14128 struct lpfc_eqe *eqe) 14129 { 14130 struct lpfc_queue *cq = NULL; 14131 uint32_t qidx = eq->hdwq; 14132 uint16_t cqid, id; 14133 14134 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 14135 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14136 "0366 Not a valid completion " 14137 "event: majorcode=x%x, minorcode=x%x\n", 14138 bf_get_le32(lpfc_eqe_major_code, eqe), 14139 bf_get_le32(lpfc_eqe_minor_code, eqe)); 14140 return; 14141 } 14142 14143 /* Get the reference to the corresponding CQ */ 14144 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 14145 14146 /* Use the fast lookup method first */ 14147 if (cqid <= phba->sli4_hba.cq_max) { 14148 cq = phba->sli4_hba.cq_lookup[cqid]; 14149 if (cq) 14150 goto work_cq; 14151 } 14152 14153 /* Next check for NVMET completion */ 14154 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { 14155 id = phba->sli4_hba.nvmet_cqset[0]->queue_id; 14156 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { 14157 /* Process NVMET unsol rcv */ 14158 cq = phba->sli4_hba.nvmet_cqset[cqid - id]; 14159 goto process_cq; 14160 } 14161 } 14162 14163 if (phba->sli4_hba.nvmels_cq && 14164 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { 14165 /* Process NVME unsol rcv */ 14166 cq = phba->sli4_hba.nvmels_cq; 14167 } 14168 14169 /* Otherwise this is a Slow path event */ 14170 if (cq == NULL) { 14171 lpfc_sli4_sp_handle_eqe(phba, eqe, 14172 phba->sli4_hba.hdwq[qidx].hba_eq); 14173 return; 14174 } 14175 14176 process_cq: 14177 if (unlikely(cqid != cq->queue_id)) { 14178 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14179 "0368 Miss-matched fast-path completion " 14180 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 14181 cqid, cq->queue_id); 14182 return; 14183 } 14184 14185 work_cq: 14186 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS) 14187 if (phba->ktime_on) 14188 cq->isr_timestamp = ktime_get_ns(); 14189 else 14190 cq->isr_timestamp = 0; 14191 #endif 14192 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork)) 14193 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14194 "0363 Cannot schedule soft IRQ " 14195 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 14196 cqid, cq->queue_id, raw_smp_processor_id()); 14197 } 14198 14199 /** 14200 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry 14201 * @cq: Pointer to CQ to be processed 14202 * 14203 * This routine calls the cq processing routine with the handler for 14204 * fast path CQEs. 14205 * 14206 * The CQ routine returns two values: the first is the calling status, 14207 * which indicates whether work was queued to the background discovery 14208 * thread. If true, the routine should wakeup the discovery thread; 14209 * the second is the delay parameter. If non-zero, rather than rearming 14210 * the CQ and yet another interrupt, the CQ handler should be queued so 14211 * that it is processed in a subsequent polling action. The value of 14212 * the delay indicates when to reschedule it. 14213 **/ 14214 static void 14215 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) 14216 { 14217 struct lpfc_hba *phba = cq->phba; 14218 unsigned long delay; 14219 bool workposted = false; 14220 14221 /* process and rearm the CQ */ 14222 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, 14223 &delay); 14224 14225 if (delay) { 14226 if (!queue_delayed_work_on(cq->chann, phba->wq, 14227 &cq->sched_irqwork, delay)) 14228 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14229 "0367 Cannot schedule soft IRQ " 14230 "for cqid=%d on CPU %d\n", 14231 cq->queue_id, cq->chann); 14232 } 14233 14234 /* wake up worker thread if there are works to be done */ 14235 if (workposted) 14236 lpfc_worker_wake_up(phba); 14237 } 14238 14239 /** 14240 * lpfc_sli4_hba_process_cq - fast-path work handler when started by 14241 * interrupt 14242 * @work: pointer to work element 14243 * 14244 * translates from the work handler and calls the fast-path handler. 14245 **/ 14246 static void 14247 lpfc_sli4_hba_process_cq(struct work_struct *work) 14248 { 14249 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); 14250 14251 __lpfc_sli4_hba_process_cq(cq); 14252 } 14253 14254 /** 14255 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer 14256 * @work: pointer to work element 14257 * 14258 * translates from the work handler and calls the fast-path handler. 14259 **/ 14260 static void 14261 lpfc_sli4_dly_hba_process_cq(struct work_struct *work) 14262 { 14263 struct lpfc_queue *cq = container_of(to_delayed_work(work), 14264 struct lpfc_queue, sched_irqwork); 14265 14266 __lpfc_sli4_hba_process_cq(cq); 14267 } 14268 14269 /** 14270 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 14271 * @irq: Interrupt number. 14272 * @dev_id: The device context pointer. 14273 * 14274 * This function is directly called from the PCI layer as an interrupt 14275 * service routine when device with SLI-4 interface spec is enabled with 14276 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 14277 * ring event in the HBA. However, when the device is enabled with either 14278 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 14279 * device-level interrupt handler. When the PCI slot is in error recovery 14280 * or the HBA is undergoing initialization, the interrupt handler will not 14281 * process the interrupt. The SCSI FCP fast-path ring event are handled in 14282 * the intrrupt context. This function is called without any lock held. 14283 * It gets the hbalock to access and update SLI data structures. Note that, 14284 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 14285 * equal to that of FCP CQ index. 14286 * 14287 * The link attention and ELS ring attention events are handled 14288 * by the worker thread. The interrupt handler signals the worker thread 14289 * and returns for these events. This function is called without any lock 14290 * held. It gets the hbalock to access and update SLI data structures. 14291 * 14292 * This function returns IRQ_HANDLED when interrupt is handled else it 14293 * returns IRQ_NONE. 14294 **/ 14295 irqreturn_t 14296 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 14297 { 14298 struct lpfc_hba *phba; 14299 struct lpfc_hba_eq_hdl *hba_eq_hdl; 14300 struct lpfc_queue *fpeq; 14301 unsigned long iflag; 14302 int ecount = 0; 14303 int hba_eqidx; 14304 struct lpfc_eq_intr_info *eqi; 14305 uint32_t icnt; 14306 14307 /* Get the driver's phba structure from the dev_id */ 14308 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 14309 phba = hba_eq_hdl->phba; 14310 hba_eqidx = hba_eq_hdl->idx; 14311 14312 if (unlikely(!phba)) 14313 return IRQ_NONE; 14314 if (unlikely(!phba->sli4_hba.hdwq)) 14315 return IRQ_NONE; 14316 14317 /* Get to the EQ struct associated with this vector */ 14318 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; 14319 if (unlikely(!fpeq)) 14320 return IRQ_NONE; 14321 14322 /* Check device state for handling interrupt */ 14323 if (unlikely(lpfc_intr_state_check(phba))) { 14324 /* Check again for link_state with lock held */ 14325 spin_lock_irqsave(&phba->hbalock, iflag); 14326 if (phba->link_state < LPFC_LINK_DOWN) 14327 /* Flush, clear interrupt, and rearm the EQ */ 14328 lpfc_sli4_eqcq_flush(phba, fpeq); 14329 spin_unlock_irqrestore(&phba->hbalock, iflag); 14330 return IRQ_NONE; 14331 } 14332 14333 eqi = phba->sli4_hba.eq_info; 14334 icnt = this_cpu_inc_return(eqi->icnt); 14335 fpeq->last_cpu = raw_smp_processor_id(); 14336 14337 if (icnt > LPFC_EQD_ISR_TRIGGER && 14338 fpeq->q_flag & HBA_EQ_DELAY_CHK && 14339 phba->cfg_auto_imax && 14340 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && 14341 phba->sli.sli_flag & LPFC_SLI_USE_EQDR) 14342 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); 14343 14344 /* process and rearm the EQ */ 14345 ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); 14346 14347 if (unlikely(ecount == 0)) { 14348 fpeq->EQ_no_entry++; 14349 if (phba->intr_type == MSIX) 14350 /* MSI-X treated interrupt served as no EQ share INT */ 14351 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14352 "0358 MSI-X interrupt with no EQE\n"); 14353 else 14354 /* Non MSI-X treated on interrupt as EQ share INT */ 14355 return IRQ_NONE; 14356 } 14357 14358 return IRQ_HANDLED; 14359 } /* lpfc_sli4_fp_intr_handler */ 14360 14361 /** 14362 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 14363 * @irq: Interrupt number. 14364 * @dev_id: The device context pointer. 14365 * 14366 * This function is the device-level interrupt handler to device with SLI-4 14367 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 14368 * interrupt mode is enabled and there is an event in the HBA which requires 14369 * driver attention. This function invokes the slow-path interrupt attention 14370 * handling function and fast-path interrupt attention handling function in 14371 * turn to process the relevant HBA attention events. This function is called 14372 * without any lock held. It gets the hbalock to access and update SLI data 14373 * structures. 14374 * 14375 * This function returns IRQ_HANDLED when interrupt is handled, else it 14376 * returns IRQ_NONE. 14377 **/ 14378 irqreturn_t 14379 lpfc_sli4_intr_handler(int irq, void *dev_id) 14380 { 14381 struct lpfc_hba *phba; 14382 irqreturn_t hba_irq_rc; 14383 bool hba_handled = false; 14384 int qidx; 14385 14386 /* Get the driver's phba structure from the dev_id */ 14387 phba = (struct lpfc_hba *)dev_id; 14388 14389 if (unlikely(!phba)) 14390 return IRQ_NONE; 14391 14392 /* 14393 * Invoke fast-path host attention interrupt handling as appropriate. 14394 */ 14395 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 14396 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 14397 &phba->sli4_hba.hba_eq_hdl[qidx]); 14398 if (hba_irq_rc == IRQ_HANDLED) 14399 hba_handled |= true; 14400 } 14401 14402 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 14403 } /* lpfc_sli4_intr_handler */ 14404 14405 void lpfc_sli4_poll_hbtimer(struct timer_list *t) 14406 { 14407 struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer); 14408 struct lpfc_queue *eq; 14409 int i = 0; 14410 14411 rcu_read_lock(); 14412 14413 list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list) 14414 i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH); 14415 if (!list_empty(&phba->poll_list)) 14416 mod_timer(&phba->cpuhp_poll_timer, 14417 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 14418 14419 rcu_read_unlock(); 14420 } 14421 14422 inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path) 14423 { 14424 struct lpfc_hba *phba = eq->phba; 14425 int i = 0; 14426 14427 /* 14428 * Unlocking an irq is one of the entry point to check 14429 * for re-schedule, but we are good for io submission 14430 * path as midlayer does a get_cpu to glue us in. Flush 14431 * out the invalidate queue so we can see the updated 14432 * value for flag. 14433 */ 14434 smp_rmb(); 14435 14436 if (READ_ONCE(eq->mode) == LPFC_EQ_POLL) 14437 /* We will not likely get the completion for the caller 14438 * during this iteration but i guess that's fine. 14439 * Future io's coming on this eq should be able to 14440 * pick it up. As for the case of single io's, they 14441 * will be handled through a sched from polling timer 14442 * function which is currently triggered every 1msec. 14443 */ 14444 i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM); 14445 14446 return i; 14447 } 14448 14449 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq) 14450 { 14451 struct lpfc_hba *phba = eq->phba; 14452 14453 if (list_empty(&phba->poll_list)) { 14454 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); 14455 /* kickstart slowpath processing for this eq */ 14456 mod_timer(&phba->cpuhp_poll_timer, 14457 jiffies + msecs_to_jiffies(LPFC_POLL_HB)); 14458 } 14459 14460 list_add_rcu(&eq->_poll_list, &phba->poll_list); 14461 synchronize_rcu(); 14462 } 14463 14464 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq) 14465 { 14466 struct lpfc_hba *phba = eq->phba; 14467 14468 /* Disable slowpath processing for this eq. Kick start the eq 14469 * by RE-ARMING the eq's ASAP 14470 */ 14471 list_del_rcu(&eq->_poll_list); 14472 synchronize_rcu(); 14473 14474 if (list_empty(&phba->poll_list)) 14475 del_timer_sync(&phba->cpuhp_poll_timer); 14476 } 14477 14478 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba) 14479 { 14480 struct lpfc_queue *eq, *next; 14481 14482 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) 14483 list_del(&eq->_poll_list); 14484 14485 INIT_LIST_HEAD(&phba->poll_list); 14486 synchronize_rcu(); 14487 } 14488 14489 static inline void 14490 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode) 14491 { 14492 if (mode == eq->mode) 14493 return; 14494 /* 14495 * currently this function is only called during a hotplug 14496 * event and the cpu on which this function is executing 14497 * is going offline. By now the hotplug has instructed 14498 * the scheduler to remove this cpu from cpu active mask. 14499 * So we don't need to work about being put aside by the 14500 * scheduler for a high priority process. Yes, the inte- 14501 * rrupts could come but they are known to retire ASAP. 14502 */ 14503 14504 /* Disable polling in the fastpath */ 14505 WRITE_ONCE(eq->mode, mode); 14506 /* flush out the store buffer */ 14507 smp_wmb(); 14508 14509 /* 14510 * Add this eq to the polling list and start polling. For 14511 * a grace period both interrupt handler and poller will 14512 * try to process the eq _but_ that's fine. We have a 14513 * synchronization mechanism in place (queue_claimed) to 14514 * deal with it. This is just a draining phase for int- 14515 * errupt handler (not eq's) as we have guranteed through 14516 * barrier that all the CPUs have seen the new CQ_POLLED 14517 * state. which will effectively disable the REARMING of 14518 * the EQ. The whole idea is eq's die off eventually as 14519 * we are not rearming EQ's anymore. 14520 */ 14521 mode ? lpfc_sli4_add_to_poll_list(eq) : 14522 lpfc_sli4_remove_from_poll_list(eq); 14523 } 14524 14525 void lpfc_sli4_start_polling(struct lpfc_queue *eq) 14526 { 14527 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL); 14528 } 14529 14530 void lpfc_sli4_stop_polling(struct lpfc_queue *eq) 14531 { 14532 struct lpfc_hba *phba = eq->phba; 14533 14534 __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT); 14535 14536 /* Kick start for the pending io's in h/w. 14537 * Once we switch back to interrupt processing on a eq 14538 * the io path completion will only arm eq's when it 14539 * receives a completion. But since eq's are in disa- 14540 * rmed state it doesn't receive a completion. This 14541 * creates a deadlock scenaro. 14542 */ 14543 phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM); 14544 } 14545 14546 /** 14547 * lpfc_sli4_queue_free - free a queue structure and associated memory 14548 * @queue: The queue structure to free. 14549 * 14550 * This function frees a queue structure and the DMAable memory used for 14551 * the host resident queue. This function must be called after destroying the 14552 * queue on the HBA. 14553 **/ 14554 void 14555 lpfc_sli4_queue_free(struct lpfc_queue *queue) 14556 { 14557 struct lpfc_dmabuf *dmabuf; 14558 14559 if (!queue) 14560 return; 14561 14562 if (!list_empty(&queue->wq_list)) 14563 list_del(&queue->wq_list); 14564 14565 while (!list_empty(&queue->page_list)) { 14566 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 14567 list); 14568 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size, 14569 dmabuf->virt, dmabuf->phys); 14570 kfree(dmabuf); 14571 } 14572 if (queue->rqbp) { 14573 lpfc_free_rq_buffer(queue->phba, queue); 14574 kfree(queue->rqbp); 14575 } 14576 14577 if (!list_empty(&queue->cpu_list)) 14578 list_del(&queue->cpu_list); 14579 14580 kfree(queue); 14581 return; 14582 } 14583 14584 /** 14585 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 14586 * @phba: The HBA that this queue is being created on. 14587 * @page_size: The size of a queue page 14588 * @entry_size: The size of each queue entry for this queue. 14589 * @entry count: The number of entries that this queue will handle. 14590 * @cpu: The cpu that will primarily utilize this queue. 14591 * 14592 * This function allocates a queue structure and the DMAable memory used for 14593 * the host resident queue. This function must be called before creating the 14594 * queue on the HBA. 14595 **/ 14596 struct lpfc_queue * 14597 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, 14598 uint32_t entry_size, uint32_t entry_count, int cpu) 14599 { 14600 struct lpfc_queue *queue; 14601 struct lpfc_dmabuf *dmabuf; 14602 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14603 uint16_t x, pgcnt; 14604 14605 if (!phba->sli4_hba.pc_sli4_params.supported) 14606 hw_page_size = page_size; 14607 14608 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size; 14609 14610 /* If needed, Adjust page count to match the max the adapter supports */ 14611 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt) 14612 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt; 14613 14614 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt), 14615 GFP_KERNEL, cpu_to_node(cpu)); 14616 if (!queue) 14617 return NULL; 14618 14619 INIT_LIST_HEAD(&queue->list); 14620 INIT_LIST_HEAD(&queue->_poll_list); 14621 INIT_LIST_HEAD(&queue->wq_list); 14622 INIT_LIST_HEAD(&queue->wqfull_list); 14623 INIT_LIST_HEAD(&queue->page_list); 14624 INIT_LIST_HEAD(&queue->child_list); 14625 INIT_LIST_HEAD(&queue->cpu_list); 14626 14627 /* Set queue parameters now. If the system cannot provide memory 14628 * resources, the free routine needs to know what was allocated. 14629 */ 14630 queue->page_count = pgcnt; 14631 queue->q_pgs = (void **)&queue[1]; 14632 queue->entry_cnt_per_pg = hw_page_size / entry_size; 14633 queue->entry_size = entry_size; 14634 queue->entry_count = entry_count; 14635 queue->page_size = hw_page_size; 14636 queue->phba = phba; 14637 14638 for (x = 0; x < queue->page_count; x++) { 14639 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL, 14640 dev_to_node(&phba->pcidev->dev)); 14641 if (!dmabuf) 14642 goto out_fail; 14643 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14644 hw_page_size, &dmabuf->phys, 14645 GFP_KERNEL); 14646 if (!dmabuf->virt) { 14647 kfree(dmabuf); 14648 goto out_fail; 14649 } 14650 dmabuf->buffer_tag = x; 14651 list_add_tail(&dmabuf->list, &queue->page_list); 14652 /* use lpfc_sli4_qe to index a paritcular entry in this page */ 14653 queue->q_pgs[x] = dmabuf->virt; 14654 } 14655 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); 14656 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); 14657 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq); 14658 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq); 14659 14660 /* notify_interval will be set during q creation */ 14661 14662 return queue; 14663 out_fail: 14664 lpfc_sli4_queue_free(queue); 14665 return NULL; 14666 } 14667 14668 /** 14669 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 14670 * @phba: HBA structure that indicates port to create a queue on. 14671 * @pci_barset: PCI BAR set flag. 14672 * 14673 * This function shall perform iomap of the specified PCI BAR address to host 14674 * memory address if not already done so and return it. The returned host 14675 * memory address can be NULL. 14676 */ 14677 static void __iomem * 14678 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 14679 { 14680 if (!phba->pcidev) 14681 return NULL; 14682 14683 switch (pci_barset) { 14684 case WQ_PCI_BAR_0_AND_1: 14685 return phba->pci_bar0_memmap_p; 14686 case WQ_PCI_BAR_2_AND_3: 14687 return phba->pci_bar2_memmap_p; 14688 case WQ_PCI_BAR_4_AND_5: 14689 return phba->pci_bar4_memmap_p; 14690 default: 14691 break; 14692 } 14693 return NULL; 14694 } 14695 14696 /** 14697 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs 14698 * @phba: HBA structure that EQs are on. 14699 * @startq: The starting EQ index to modify 14700 * @numq: The number of EQs (consecutive indexes) to modify 14701 * @usdelay: amount of delay 14702 * 14703 * This function revises the EQ delay on 1 or more EQs. The EQ delay 14704 * is set either by writing to a register (if supported by the SLI Port) 14705 * or by mailbox command. The mailbox command allows several EQs to be 14706 * updated at once. 14707 * 14708 * The @phba struct is used to send a mailbox command to HBA. The @startq 14709 * is used to get the starting EQ index to change. The @numq value is 14710 * used to specify how many consecutive EQ indexes, starting at EQ index, 14711 * are to be changed. This function is asynchronous and will wait for any 14712 * mailbox commands to finish before returning. 14713 * 14714 * On success this function will return a zero. If unable to allocate 14715 * enough memory this function will return -ENOMEM. If a mailbox command 14716 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may 14717 * have had their delay multipler changed. 14718 **/ 14719 void 14720 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, 14721 uint32_t numq, uint32_t usdelay) 14722 { 14723 struct lpfc_mbx_modify_eq_delay *eq_delay; 14724 LPFC_MBOXQ_t *mbox; 14725 struct lpfc_queue *eq; 14726 int cnt = 0, rc, length; 14727 uint32_t shdr_status, shdr_add_status; 14728 uint32_t dmult; 14729 int qidx; 14730 union lpfc_sli4_cfg_shdr *shdr; 14731 14732 if (startq >= phba->cfg_irq_chann) 14733 return; 14734 14735 if (usdelay > 0xFFFF) { 14736 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME, 14737 "6429 usdelay %d too large. Scaled down to " 14738 "0xFFFF.\n", usdelay); 14739 usdelay = 0xFFFF; 14740 } 14741 14742 /* set values by EQ_DELAY register if supported */ 14743 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { 14744 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 14745 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 14746 if (!eq) 14747 continue; 14748 14749 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay); 14750 14751 if (++cnt >= numq) 14752 break; 14753 } 14754 return; 14755 } 14756 14757 /* Otherwise, set values by mailbox cmd */ 14758 14759 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14760 if (!mbox) { 14761 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME, 14762 "6428 Failed allocating mailbox cmd buffer." 14763 " EQ delay was not set.\n"); 14764 return; 14765 } 14766 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 14767 sizeof(struct lpfc_sli4_cfg_mhdr)); 14768 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14769 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 14770 length, LPFC_SLI4_MBX_EMBED); 14771 eq_delay = &mbox->u.mqe.un.eq_delay; 14772 14773 /* Calculate delay multiper from maximum interrupt per second */ 14774 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC; 14775 if (dmult) 14776 dmult--; 14777 if (dmult > LPFC_DMULT_MAX) 14778 dmult = LPFC_DMULT_MAX; 14779 14780 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 14781 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 14782 if (!eq) 14783 continue; 14784 eq->q_mode = usdelay; 14785 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 14786 eq_delay->u.request.eq[cnt].phase = 0; 14787 eq_delay->u.request.eq[cnt].delay_multi = dmult; 14788 14789 if (++cnt >= numq) 14790 break; 14791 } 14792 eq_delay->u.request.num_eq = cnt; 14793 14794 mbox->vport = phba->pport; 14795 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14796 mbox->ctx_buf = NULL; 14797 mbox->ctx_ndlp = NULL; 14798 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14799 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 14800 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14801 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14802 if (shdr_status || shdr_add_status || rc) { 14803 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14804 "2512 MODIFY_EQ_DELAY mailbox failed with " 14805 "status x%x add_status x%x, mbx status x%x\n", 14806 shdr_status, shdr_add_status, rc); 14807 } 14808 mempool_free(mbox, phba->mbox_mem_pool); 14809 return; 14810 } 14811 14812 /** 14813 * lpfc_eq_create - Create an Event Queue on the HBA 14814 * @phba: HBA structure that indicates port to create a queue on. 14815 * @eq: The queue structure to use to create the event queue. 14816 * @imax: The maximum interrupt per second limit. 14817 * 14818 * This function creates an event queue, as detailed in @eq, on a port, 14819 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 14820 * 14821 * The @phba struct is used to send mailbox command to HBA. The @eq struct 14822 * is used to get the entry count and entry size that are necessary to 14823 * determine the number of pages to allocate and use for this queue. This 14824 * function will send the EQ_CREATE mailbox command to the HBA to setup the 14825 * event queue. This function is asynchronous and will wait for the mailbox 14826 * command to finish before continuing. 14827 * 14828 * On success this function will return a zero. If unable to allocate enough 14829 * memory this function will return -ENOMEM. If the queue create mailbox command 14830 * fails this function will return -ENXIO. 14831 **/ 14832 int 14833 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 14834 { 14835 struct lpfc_mbx_eq_create *eq_create; 14836 LPFC_MBOXQ_t *mbox; 14837 int rc, length, status = 0; 14838 struct lpfc_dmabuf *dmabuf; 14839 uint32_t shdr_status, shdr_add_status; 14840 union lpfc_sli4_cfg_shdr *shdr; 14841 uint16_t dmult; 14842 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14843 14844 /* sanity check on queue memory */ 14845 if (!eq) 14846 return -ENODEV; 14847 if (!phba->sli4_hba.pc_sli4_params.supported) 14848 hw_page_size = SLI4_PAGE_SIZE; 14849 14850 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14851 if (!mbox) 14852 return -ENOMEM; 14853 length = (sizeof(struct lpfc_mbx_eq_create) - 14854 sizeof(struct lpfc_sli4_cfg_mhdr)); 14855 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14856 LPFC_MBOX_OPCODE_EQ_CREATE, 14857 length, LPFC_SLI4_MBX_EMBED); 14858 eq_create = &mbox->u.mqe.un.eq_create; 14859 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 14860 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 14861 eq->page_count); 14862 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 14863 LPFC_EQE_SIZE); 14864 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 14865 14866 /* Use version 2 of CREATE_EQ if eqav is set */ 14867 if (phba->sli4_hba.pc_sli4_params.eqav) { 14868 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14869 LPFC_Q_CREATE_VERSION_2); 14870 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context, 14871 phba->sli4_hba.pc_sli4_params.eqav); 14872 } 14873 14874 /* don't setup delay multiplier using EQ_CREATE */ 14875 dmult = 0; 14876 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 14877 dmult); 14878 switch (eq->entry_count) { 14879 default: 14880 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14881 "0360 Unsupported EQ count. (%d)\n", 14882 eq->entry_count); 14883 if (eq->entry_count < 256) { 14884 status = -EINVAL; 14885 goto out; 14886 } 14887 /* fall through - otherwise default to smallest count */ 14888 case 256: 14889 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14890 LPFC_EQ_CNT_256); 14891 break; 14892 case 512: 14893 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14894 LPFC_EQ_CNT_512); 14895 break; 14896 case 1024: 14897 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14898 LPFC_EQ_CNT_1024); 14899 break; 14900 case 2048: 14901 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14902 LPFC_EQ_CNT_2048); 14903 break; 14904 case 4096: 14905 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14906 LPFC_EQ_CNT_4096); 14907 break; 14908 } 14909 list_for_each_entry(dmabuf, &eq->page_list, list) { 14910 memset(dmabuf->virt, 0, hw_page_size); 14911 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14912 putPaddrLow(dmabuf->phys); 14913 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14914 putPaddrHigh(dmabuf->phys); 14915 } 14916 mbox->vport = phba->pport; 14917 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14918 mbox->ctx_buf = NULL; 14919 mbox->ctx_ndlp = NULL; 14920 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14921 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14922 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14923 if (shdr_status || shdr_add_status || rc) { 14924 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14925 "2500 EQ_CREATE mailbox failed with " 14926 "status x%x add_status x%x, mbx status x%x\n", 14927 shdr_status, shdr_add_status, rc); 14928 status = -ENXIO; 14929 } 14930 eq->type = LPFC_EQ; 14931 eq->subtype = LPFC_NONE; 14932 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 14933 if (eq->queue_id == 0xFFFF) 14934 status = -ENXIO; 14935 eq->host_index = 0; 14936 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL; 14937 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT; 14938 out: 14939 mempool_free(mbox, phba->mbox_mem_pool); 14940 return status; 14941 } 14942 14943 /** 14944 * lpfc_cq_create - Create a Completion Queue on the HBA 14945 * @phba: HBA structure that indicates port to create a queue on. 14946 * @cq: The queue structure to use to create the completion queue. 14947 * @eq: The event queue to bind this completion queue to. 14948 * 14949 * This function creates a completion queue, as detailed in @wq, on a port, 14950 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 14951 * 14952 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14953 * is used to get the entry count and entry size that are necessary to 14954 * determine the number of pages to allocate and use for this queue. The @eq 14955 * is used to indicate which event queue to bind this completion queue to. This 14956 * function will send the CQ_CREATE mailbox command to the HBA to setup the 14957 * completion queue. This function is asynchronous and will wait for the mailbox 14958 * command to finish before continuing. 14959 * 14960 * On success this function will return a zero. If unable to allocate enough 14961 * memory this function will return -ENOMEM. If the queue create mailbox command 14962 * fails this function will return -ENXIO. 14963 **/ 14964 int 14965 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 14966 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 14967 { 14968 struct lpfc_mbx_cq_create *cq_create; 14969 struct lpfc_dmabuf *dmabuf; 14970 LPFC_MBOXQ_t *mbox; 14971 int rc, length, status = 0; 14972 uint32_t shdr_status, shdr_add_status; 14973 union lpfc_sli4_cfg_shdr *shdr; 14974 14975 /* sanity check on queue memory */ 14976 if (!cq || !eq) 14977 return -ENODEV; 14978 14979 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14980 if (!mbox) 14981 return -ENOMEM; 14982 length = (sizeof(struct lpfc_mbx_cq_create) - 14983 sizeof(struct lpfc_sli4_cfg_mhdr)); 14984 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14985 LPFC_MBOX_OPCODE_CQ_CREATE, 14986 length, LPFC_SLI4_MBX_EMBED); 14987 cq_create = &mbox->u.mqe.un.cq_create; 14988 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 14989 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 14990 cq->page_count); 14991 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 14992 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 14993 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14994 phba->sli4_hba.pc_sli4_params.cqv); 14995 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 14996 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 14997 (cq->page_size / SLI4_PAGE_SIZE)); 14998 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 14999 eq->queue_id); 15000 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context, 15001 phba->sli4_hba.pc_sli4_params.cqav); 15002 } else { 15003 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 15004 eq->queue_id); 15005 } 15006 switch (cq->entry_count) { 15007 case 2048: 15008 case 4096: 15009 if (phba->sli4_hba.pc_sli4_params.cqv == 15010 LPFC_Q_CREATE_VERSION_2) { 15011 cq_create->u.request.context.lpfc_cq_context_count = 15012 cq->entry_count; 15013 bf_set(lpfc_cq_context_count, 15014 &cq_create->u.request.context, 15015 LPFC_CQ_CNT_WORD7); 15016 break; 15017 } 15018 /* fall through */ 15019 default: 15020 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15021 "0361 Unsupported CQ count: " 15022 "entry cnt %d sz %d pg cnt %d\n", 15023 cq->entry_count, cq->entry_size, 15024 cq->page_count); 15025 if (cq->entry_count < 256) { 15026 status = -EINVAL; 15027 goto out; 15028 } 15029 /* fall through - otherwise default to smallest count */ 15030 case 256: 15031 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 15032 LPFC_CQ_CNT_256); 15033 break; 15034 case 512: 15035 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 15036 LPFC_CQ_CNT_512); 15037 break; 15038 case 1024: 15039 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 15040 LPFC_CQ_CNT_1024); 15041 break; 15042 } 15043 list_for_each_entry(dmabuf, &cq->page_list, list) { 15044 memset(dmabuf->virt, 0, cq->page_size); 15045 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15046 putPaddrLow(dmabuf->phys); 15047 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15048 putPaddrHigh(dmabuf->phys); 15049 } 15050 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15051 15052 /* The IOCTL status is embedded in the mailbox subheader. */ 15053 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15054 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15055 if (shdr_status || shdr_add_status || rc) { 15056 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15057 "2501 CQ_CREATE mailbox failed with " 15058 "status x%x add_status x%x, mbx status x%x\n", 15059 shdr_status, shdr_add_status, rc); 15060 status = -ENXIO; 15061 goto out; 15062 } 15063 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 15064 if (cq->queue_id == 0xFFFF) { 15065 status = -ENXIO; 15066 goto out; 15067 } 15068 /* link the cq onto the parent eq child list */ 15069 list_add_tail(&cq->list, &eq->child_list); 15070 /* Set up completion queue's type and subtype */ 15071 cq->type = type; 15072 cq->subtype = subtype; 15073 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 15074 cq->assoc_qid = eq->queue_id; 15075 cq->assoc_qp = eq; 15076 cq->host_index = 0; 15077 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 15078 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); 15079 15080 if (cq->queue_id > phba->sli4_hba.cq_max) 15081 phba->sli4_hba.cq_max = cq->queue_id; 15082 out: 15083 mempool_free(mbox, phba->mbox_mem_pool); 15084 return status; 15085 } 15086 15087 /** 15088 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ 15089 * @phba: HBA structure that indicates port to create a queue on. 15090 * @cqp: The queue structure array to use to create the completion queues. 15091 * @hdwq: The hardware queue array with the EQ to bind completion queues to. 15092 * 15093 * This function creates a set of completion queue, s to support MRQ 15094 * as detailed in @cqp, on a port, 15095 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. 15096 * 15097 * The @phba struct is used to send mailbox command to HBA. The @cq struct 15098 * is used to get the entry count and entry size that are necessary to 15099 * determine the number of pages to allocate and use for this queue. The @eq 15100 * is used to indicate which event queue to bind this completion queue to. This 15101 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the 15102 * completion queue. This function is asynchronous and will wait for the mailbox 15103 * command to finish before continuing. 15104 * 15105 * On success this function will return a zero. If unable to allocate enough 15106 * memory this function will return -ENOMEM. If the queue create mailbox command 15107 * fails this function will return -ENXIO. 15108 **/ 15109 int 15110 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 15111 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, 15112 uint32_t subtype) 15113 { 15114 struct lpfc_queue *cq; 15115 struct lpfc_queue *eq; 15116 struct lpfc_mbx_cq_create_set *cq_set; 15117 struct lpfc_dmabuf *dmabuf; 15118 LPFC_MBOXQ_t *mbox; 15119 int rc, length, alloclen, status = 0; 15120 int cnt, idx, numcq, page_idx = 0; 15121 uint32_t shdr_status, shdr_add_status; 15122 union lpfc_sli4_cfg_shdr *shdr; 15123 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15124 15125 /* sanity check on queue memory */ 15126 numcq = phba->cfg_nvmet_mrq; 15127 if (!cqp || !hdwq || !numcq) 15128 return -ENODEV; 15129 15130 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15131 if (!mbox) 15132 return -ENOMEM; 15133 15134 length = sizeof(struct lpfc_mbx_cq_create_set); 15135 length += ((numcq * cqp[0]->page_count) * 15136 sizeof(struct dma_address)); 15137 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15138 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, 15139 LPFC_SLI4_MBX_NEMBED); 15140 if (alloclen < length) { 15141 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15142 "3098 Allocated DMA memory size (%d) is " 15143 "less than the requested DMA memory size " 15144 "(%d)\n", alloclen, length); 15145 status = -ENOMEM; 15146 goto out; 15147 } 15148 cq_set = mbox->sge_array->addr[0]; 15149 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; 15150 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); 15151 15152 for (idx = 0; idx < numcq; idx++) { 15153 cq = cqp[idx]; 15154 eq = hdwq[idx].hba_eq; 15155 if (!cq || !eq) { 15156 status = -ENOMEM; 15157 goto out; 15158 } 15159 if (!phba->sli4_hba.pc_sli4_params.supported) 15160 hw_page_size = cq->page_size; 15161 15162 switch (idx) { 15163 case 0: 15164 bf_set(lpfc_mbx_cq_create_set_page_size, 15165 &cq_set->u.request, 15166 (hw_page_size / SLI4_PAGE_SIZE)); 15167 bf_set(lpfc_mbx_cq_create_set_num_pages, 15168 &cq_set->u.request, cq->page_count); 15169 bf_set(lpfc_mbx_cq_create_set_evt, 15170 &cq_set->u.request, 1); 15171 bf_set(lpfc_mbx_cq_create_set_valid, 15172 &cq_set->u.request, 1); 15173 bf_set(lpfc_mbx_cq_create_set_cqe_size, 15174 &cq_set->u.request, 0); 15175 bf_set(lpfc_mbx_cq_create_set_num_cq, 15176 &cq_set->u.request, numcq); 15177 bf_set(lpfc_mbx_cq_create_set_autovalid, 15178 &cq_set->u.request, 15179 phba->sli4_hba.pc_sli4_params.cqav); 15180 switch (cq->entry_count) { 15181 case 2048: 15182 case 4096: 15183 if (phba->sli4_hba.pc_sli4_params.cqv == 15184 LPFC_Q_CREATE_VERSION_2) { 15185 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15186 &cq_set->u.request, 15187 cq->entry_count); 15188 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15189 &cq_set->u.request, 15190 LPFC_CQ_CNT_WORD7); 15191 break; 15192 } 15193 /* fall through */ 15194 default: 15195 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15196 "3118 Bad CQ count. (%d)\n", 15197 cq->entry_count); 15198 if (cq->entry_count < 256) { 15199 status = -EINVAL; 15200 goto out; 15201 } 15202 /* fall through - otherwise default to smallest */ 15203 case 256: 15204 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15205 &cq_set->u.request, LPFC_CQ_CNT_256); 15206 break; 15207 case 512: 15208 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15209 &cq_set->u.request, LPFC_CQ_CNT_512); 15210 break; 15211 case 1024: 15212 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15213 &cq_set->u.request, LPFC_CQ_CNT_1024); 15214 break; 15215 } 15216 bf_set(lpfc_mbx_cq_create_set_eq_id0, 15217 &cq_set->u.request, eq->queue_id); 15218 break; 15219 case 1: 15220 bf_set(lpfc_mbx_cq_create_set_eq_id1, 15221 &cq_set->u.request, eq->queue_id); 15222 break; 15223 case 2: 15224 bf_set(lpfc_mbx_cq_create_set_eq_id2, 15225 &cq_set->u.request, eq->queue_id); 15226 break; 15227 case 3: 15228 bf_set(lpfc_mbx_cq_create_set_eq_id3, 15229 &cq_set->u.request, eq->queue_id); 15230 break; 15231 case 4: 15232 bf_set(lpfc_mbx_cq_create_set_eq_id4, 15233 &cq_set->u.request, eq->queue_id); 15234 break; 15235 case 5: 15236 bf_set(lpfc_mbx_cq_create_set_eq_id5, 15237 &cq_set->u.request, eq->queue_id); 15238 break; 15239 case 6: 15240 bf_set(lpfc_mbx_cq_create_set_eq_id6, 15241 &cq_set->u.request, eq->queue_id); 15242 break; 15243 case 7: 15244 bf_set(lpfc_mbx_cq_create_set_eq_id7, 15245 &cq_set->u.request, eq->queue_id); 15246 break; 15247 case 8: 15248 bf_set(lpfc_mbx_cq_create_set_eq_id8, 15249 &cq_set->u.request, eq->queue_id); 15250 break; 15251 case 9: 15252 bf_set(lpfc_mbx_cq_create_set_eq_id9, 15253 &cq_set->u.request, eq->queue_id); 15254 break; 15255 case 10: 15256 bf_set(lpfc_mbx_cq_create_set_eq_id10, 15257 &cq_set->u.request, eq->queue_id); 15258 break; 15259 case 11: 15260 bf_set(lpfc_mbx_cq_create_set_eq_id11, 15261 &cq_set->u.request, eq->queue_id); 15262 break; 15263 case 12: 15264 bf_set(lpfc_mbx_cq_create_set_eq_id12, 15265 &cq_set->u.request, eq->queue_id); 15266 break; 15267 case 13: 15268 bf_set(lpfc_mbx_cq_create_set_eq_id13, 15269 &cq_set->u.request, eq->queue_id); 15270 break; 15271 case 14: 15272 bf_set(lpfc_mbx_cq_create_set_eq_id14, 15273 &cq_set->u.request, eq->queue_id); 15274 break; 15275 case 15: 15276 bf_set(lpfc_mbx_cq_create_set_eq_id15, 15277 &cq_set->u.request, eq->queue_id); 15278 break; 15279 } 15280 15281 /* link the cq onto the parent eq child list */ 15282 list_add_tail(&cq->list, &eq->child_list); 15283 /* Set up completion queue's type and subtype */ 15284 cq->type = type; 15285 cq->subtype = subtype; 15286 cq->assoc_qid = eq->queue_id; 15287 cq->assoc_qp = eq; 15288 cq->host_index = 0; 15289 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 15290 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, 15291 cq->entry_count); 15292 cq->chann = idx; 15293 15294 rc = 0; 15295 list_for_each_entry(dmabuf, &cq->page_list, list) { 15296 memset(dmabuf->virt, 0, hw_page_size); 15297 cnt = page_idx + dmabuf->buffer_tag; 15298 cq_set->u.request.page[cnt].addr_lo = 15299 putPaddrLow(dmabuf->phys); 15300 cq_set->u.request.page[cnt].addr_hi = 15301 putPaddrHigh(dmabuf->phys); 15302 rc++; 15303 } 15304 page_idx += rc; 15305 } 15306 15307 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15308 15309 /* The IOCTL status is embedded in the mailbox subheader. */ 15310 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15311 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15312 if (shdr_status || shdr_add_status || rc) { 15313 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15314 "3119 CQ_CREATE_SET mailbox failed with " 15315 "status x%x add_status x%x, mbx status x%x\n", 15316 shdr_status, shdr_add_status, rc); 15317 status = -ENXIO; 15318 goto out; 15319 } 15320 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); 15321 if (rc == 0xFFFF) { 15322 status = -ENXIO; 15323 goto out; 15324 } 15325 15326 for (idx = 0; idx < numcq; idx++) { 15327 cq = cqp[idx]; 15328 cq->queue_id = rc + idx; 15329 if (cq->queue_id > phba->sli4_hba.cq_max) 15330 phba->sli4_hba.cq_max = cq->queue_id; 15331 } 15332 15333 out: 15334 lpfc_sli4_mbox_cmd_free(phba, mbox); 15335 return status; 15336 } 15337 15338 /** 15339 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 15340 * @phba: HBA structure that indicates port to create a queue on. 15341 * @mq: The queue structure to use to create the mailbox queue. 15342 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 15343 * @cq: The completion queue to associate with this cq. 15344 * 15345 * This function provides failback (fb) functionality when the 15346 * mq_create_ext fails on older FW generations. It's purpose is identical 15347 * to mq_create_ext otherwise. 15348 * 15349 * This routine cannot fail as all attributes were previously accessed and 15350 * initialized in mq_create_ext. 15351 **/ 15352 static void 15353 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 15354 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 15355 { 15356 struct lpfc_mbx_mq_create *mq_create; 15357 struct lpfc_dmabuf *dmabuf; 15358 int length; 15359 15360 length = (sizeof(struct lpfc_mbx_mq_create) - 15361 sizeof(struct lpfc_sli4_cfg_mhdr)); 15362 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15363 LPFC_MBOX_OPCODE_MQ_CREATE, 15364 length, LPFC_SLI4_MBX_EMBED); 15365 mq_create = &mbox->u.mqe.un.mq_create; 15366 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 15367 mq->page_count); 15368 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 15369 cq->queue_id); 15370 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 15371 switch (mq->entry_count) { 15372 case 16: 15373 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15374 LPFC_MQ_RING_SIZE_16); 15375 break; 15376 case 32: 15377 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15378 LPFC_MQ_RING_SIZE_32); 15379 break; 15380 case 64: 15381 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15382 LPFC_MQ_RING_SIZE_64); 15383 break; 15384 case 128: 15385 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15386 LPFC_MQ_RING_SIZE_128); 15387 break; 15388 } 15389 list_for_each_entry(dmabuf, &mq->page_list, list) { 15390 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15391 putPaddrLow(dmabuf->phys); 15392 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15393 putPaddrHigh(dmabuf->phys); 15394 } 15395 } 15396 15397 /** 15398 * lpfc_mq_create - Create a mailbox Queue on the HBA 15399 * @phba: HBA structure that indicates port to create a queue on. 15400 * @mq: The queue structure to use to create the mailbox queue. 15401 * @cq: The completion queue to associate with this cq. 15402 * @subtype: The queue's subtype. 15403 * 15404 * This function creates a mailbox queue, as detailed in @mq, on a port, 15405 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 15406 * 15407 * The @phba struct is used to send mailbox command to HBA. The @cq struct 15408 * is used to get the entry count and entry size that are necessary to 15409 * determine the number of pages to allocate and use for this queue. This 15410 * function will send the MQ_CREATE mailbox command to the HBA to setup the 15411 * mailbox queue. This function is asynchronous and will wait for the mailbox 15412 * command to finish before continuing. 15413 * 15414 * On success this function will return a zero. If unable to allocate enough 15415 * memory this function will return -ENOMEM. If the queue create mailbox command 15416 * fails this function will return -ENXIO. 15417 **/ 15418 int32_t 15419 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 15420 struct lpfc_queue *cq, uint32_t subtype) 15421 { 15422 struct lpfc_mbx_mq_create *mq_create; 15423 struct lpfc_mbx_mq_create_ext *mq_create_ext; 15424 struct lpfc_dmabuf *dmabuf; 15425 LPFC_MBOXQ_t *mbox; 15426 int rc, length, status = 0; 15427 uint32_t shdr_status, shdr_add_status; 15428 union lpfc_sli4_cfg_shdr *shdr; 15429 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15430 15431 /* sanity check on queue memory */ 15432 if (!mq || !cq) 15433 return -ENODEV; 15434 if (!phba->sli4_hba.pc_sli4_params.supported) 15435 hw_page_size = SLI4_PAGE_SIZE; 15436 15437 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15438 if (!mbox) 15439 return -ENOMEM; 15440 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 15441 sizeof(struct lpfc_sli4_cfg_mhdr)); 15442 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15443 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 15444 length, LPFC_SLI4_MBX_EMBED); 15445 15446 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 15447 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 15448 bf_set(lpfc_mbx_mq_create_ext_num_pages, 15449 &mq_create_ext->u.request, mq->page_count); 15450 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 15451 &mq_create_ext->u.request, 1); 15452 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 15453 &mq_create_ext->u.request, 1); 15454 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 15455 &mq_create_ext->u.request, 1); 15456 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 15457 &mq_create_ext->u.request, 1); 15458 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 15459 &mq_create_ext->u.request, 1); 15460 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 15461 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15462 phba->sli4_hba.pc_sli4_params.mqv); 15463 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 15464 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 15465 cq->queue_id); 15466 else 15467 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 15468 cq->queue_id); 15469 switch (mq->entry_count) { 15470 default: 15471 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15472 "0362 Unsupported MQ count. (%d)\n", 15473 mq->entry_count); 15474 if (mq->entry_count < 16) { 15475 status = -EINVAL; 15476 goto out; 15477 } 15478 /* fall through - otherwise default to smallest count */ 15479 case 16: 15480 bf_set(lpfc_mq_context_ring_size, 15481 &mq_create_ext->u.request.context, 15482 LPFC_MQ_RING_SIZE_16); 15483 break; 15484 case 32: 15485 bf_set(lpfc_mq_context_ring_size, 15486 &mq_create_ext->u.request.context, 15487 LPFC_MQ_RING_SIZE_32); 15488 break; 15489 case 64: 15490 bf_set(lpfc_mq_context_ring_size, 15491 &mq_create_ext->u.request.context, 15492 LPFC_MQ_RING_SIZE_64); 15493 break; 15494 case 128: 15495 bf_set(lpfc_mq_context_ring_size, 15496 &mq_create_ext->u.request.context, 15497 LPFC_MQ_RING_SIZE_128); 15498 break; 15499 } 15500 list_for_each_entry(dmabuf, &mq->page_list, list) { 15501 memset(dmabuf->virt, 0, hw_page_size); 15502 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 15503 putPaddrLow(dmabuf->phys); 15504 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 15505 putPaddrHigh(dmabuf->phys); 15506 } 15507 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15508 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15509 &mq_create_ext->u.response); 15510 if (rc != MBX_SUCCESS) { 15511 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15512 "2795 MQ_CREATE_EXT failed with " 15513 "status x%x. Failback to MQ_CREATE.\n", 15514 rc); 15515 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 15516 mq_create = &mbox->u.mqe.un.mq_create; 15517 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15518 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 15519 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15520 &mq_create->u.response); 15521 } 15522 15523 /* The IOCTL status is embedded in the mailbox subheader. */ 15524 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15525 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15526 if (shdr_status || shdr_add_status || rc) { 15527 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15528 "2502 MQ_CREATE mailbox failed with " 15529 "status x%x add_status x%x, mbx status x%x\n", 15530 shdr_status, shdr_add_status, rc); 15531 status = -ENXIO; 15532 goto out; 15533 } 15534 if (mq->queue_id == 0xFFFF) { 15535 status = -ENXIO; 15536 goto out; 15537 } 15538 mq->type = LPFC_MQ; 15539 mq->assoc_qid = cq->queue_id; 15540 mq->subtype = subtype; 15541 mq->host_index = 0; 15542 mq->hba_index = 0; 15543 15544 /* link the mq onto the parent cq child list */ 15545 list_add_tail(&mq->list, &cq->child_list); 15546 out: 15547 mempool_free(mbox, phba->mbox_mem_pool); 15548 return status; 15549 } 15550 15551 /** 15552 * lpfc_wq_create - Create a Work Queue on the HBA 15553 * @phba: HBA structure that indicates port to create a queue on. 15554 * @wq: The queue structure to use to create the work queue. 15555 * @cq: The completion queue to bind this work queue to. 15556 * @subtype: The subtype of the work queue indicating its functionality. 15557 * 15558 * This function creates a work queue, as detailed in @wq, on a port, described 15559 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 15560 * 15561 * The @phba struct is used to send mailbox command to HBA. The @wq struct 15562 * is used to get the entry count and entry size that are necessary to 15563 * determine the number of pages to allocate and use for this queue. The @cq 15564 * is used to indicate which completion queue to bind this work queue to. This 15565 * function will send the WQ_CREATE mailbox command to the HBA to setup the 15566 * work queue. This function is asynchronous and will wait for the mailbox 15567 * command to finish before continuing. 15568 * 15569 * On success this function will return a zero. If unable to allocate enough 15570 * memory this function will return -ENOMEM. If the queue create mailbox command 15571 * fails this function will return -ENXIO. 15572 **/ 15573 int 15574 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 15575 struct lpfc_queue *cq, uint32_t subtype) 15576 { 15577 struct lpfc_mbx_wq_create *wq_create; 15578 struct lpfc_dmabuf *dmabuf; 15579 LPFC_MBOXQ_t *mbox; 15580 int rc, length, status = 0; 15581 uint32_t shdr_status, shdr_add_status; 15582 union lpfc_sli4_cfg_shdr *shdr; 15583 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15584 struct dma_address *page; 15585 void __iomem *bar_memmap_p; 15586 uint32_t db_offset; 15587 uint16_t pci_barset; 15588 uint8_t dpp_barset; 15589 uint32_t dpp_offset; 15590 unsigned long pg_addr; 15591 uint8_t wq_create_version; 15592 15593 /* sanity check on queue memory */ 15594 if (!wq || !cq) 15595 return -ENODEV; 15596 if (!phba->sli4_hba.pc_sli4_params.supported) 15597 hw_page_size = wq->page_size; 15598 15599 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15600 if (!mbox) 15601 return -ENOMEM; 15602 length = (sizeof(struct lpfc_mbx_wq_create) - 15603 sizeof(struct lpfc_sli4_cfg_mhdr)); 15604 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15605 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 15606 length, LPFC_SLI4_MBX_EMBED); 15607 wq_create = &mbox->u.mqe.un.wq_create; 15608 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 15609 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 15610 wq->page_count); 15611 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 15612 cq->queue_id); 15613 15614 /* wqv is the earliest version supported, NOT the latest */ 15615 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15616 phba->sli4_hba.pc_sli4_params.wqv); 15617 15618 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) || 15619 (wq->page_size > SLI4_PAGE_SIZE)) 15620 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15621 else 15622 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15623 15624 15625 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) 15626 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15627 else 15628 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15629 15630 switch (wq_create_version) { 15631 case LPFC_Q_CREATE_VERSION_1: 15632 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 15633 wq->entry_count); 15634 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15635 LPFC_Q_CREATE_VERSION_1); 15636 15637 switch (wq->entry_size) { 15638 default: 15639 case 64: 15640 bf_set(lpfc_mbx_wq_create_wqe_size, 15641 &wq_create->u.request_1, 15642 LPFC_WQ_WQE_SIZE_64); 15643 break; 15644 case 128: 15645 bf_set(lpfc_mbx_wq_create_wqe_size, 15646 &wq_create->u.request_1, 15647 LPFC_WQ_WQE_SIZE_128); 15648 break; 15649 } 15650 /* Request DPP by default */ 15651 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); 15652 bf_set(lpfc_mbx_wq_create_page_size, 15653 &wq_create->u.request_1, 15654 (wq->page_size / SLI4_PAGE_SIZE)); 15655 page = wq_create->u.request_1.page; 15656 break; 15657 default: 15658 page = wq_create->u.request.page; 15659 break; 15660 } 15661 15662 list_for_each_entry(dmabuf, &wq->page_list, list) { 15663 memset(dmabuf->virt, 0, hw_page_size); 15664 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 15665 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 15666 } 15667 15668 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15669 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 15670 15671 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15672 /* The IOCTL status is embedded in the mailbox subheader. */ 15673 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15674 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15675 if (shdr_status || shdr_add_status || rc) { 15676 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15677 "2503 WQ_CREATE mailbox failed with " 15678 "status x%x add_status x%x, mbx status x%x\n", 15679 shdr_status, shdr_add_status, rc); 15680 status = -ENXIO; 15681 goto out; 15682 } 15683 15684 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) 15685 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, 15686 &wq_create->u.response); 15687 else 15688 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, 15689 &wq_create->u.response_1); 15690 15691 if (wq->queue_id == 0xFFFF) { 15692 status = -ENXIO; 15693 goto out; 15694 } 15695 15696 wq->db_format = LPFC_DB_LIST_FORMAT; 15697 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) { 15698 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15699 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 15700 &wq_create->u.response); 15701 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 15702 (wq->db_format != LPFC_DB_RING_FORMAT)) { 15703 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15704 "3265 WQ[%d] doorbell format " 15705 "not supported: x%x\n", 15706 wq->queue_id, wq->db_format); 15707 status = -EINVAL; 15708 goto out; 15709 } 15710 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 15711 &wq_create->u.response); 15712 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15713 pci_barset); 15714 if (!bar_memmap_p) { 15715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15716 "3263 WQ[%d] failed to memmap " 15717 "pci barset:x%x\n", 15718 wq->queue_id, pci_barset); 15719 status = -ENOMEM; 15720 goto out; 15721 } 15722 db_offset = wq_create->u.response.doorbell_offset; 15723 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 15724 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 15725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15726 "3252 WQ[%d] doorbell offset " 15727 "not supported: x%x\n", 15728 wq->queue_id, db_offset); 15729 status = -EINVAL; 15730 goto out; 15731 } 15732 wq->db_regaddr = bar_memmap_p + db_offset; 15733 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15734 "3264 WQ[%d]: barset:x%x, offset:x%x, " 15735 "format:x%x\n", wq->queue_id, 15736 pci_barset, db_offset, wq->db_format); 15737 } else 15738 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15739 } else { 15740 /* Check if DPP was honored by the firmware */ 15741 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, 15742 &wq_create->u.response_1); 15743 if (wq->dpp_enable) { 15744 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set, 15745 &wq_create->u.response_1); 15746 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15747 pci_barset); 15748 if (!bar_memmap_p) { 15749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15750 "3267 WQ[%d] failed to memmap " 15751 "pci barset:x%x\n", 15752 wq->queue_id, pci_barset); 15753 status = -ENOMEM; 15754 goto out; 15755 } 15756 db_offset = wq_create->u.response_1.doorbell_offset; 15757 wq->db_regaddr = bar_memmap_p + db_offset; 15758 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, 15759 &wq_create->u.response_1); 15760 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar, 15761 &wq_create->u.response_1); 15762 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15763 dpp_barset); 15764 if (!bar_memmap_p) { 15765 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15766 "3268 WQ[%d] failed to memmap " 15767 "pci barset:x%x\n", 15768 wq->queue_id, dpp_barset); 15769 status = -ENOMEM; 15770 goto out; 15771 } 15772 dpp_offset = wq_create->u.response_1.dpp_offset; 15773 wq->dpp_regaddr = bar_memmap_p + dpp_offset; 15774 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15775 "3271 WQ[%d]: barset:x%x, offset:x%x, " 15776 "dpp_id:x%x dpp_barset:x%x " 15777 "dpp_offset:x%x\n", 15778 wq->queue_id, pci_barset, db_offset, 15779 wq->dpp_id, dpp_barset, dpp_offset); 15780 15781 /* Enable combined writes for DPP aperture */ 15782 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; 15783 #ifdef CONFIG_X86 15784 rc = set_memory_wc(pg_addr, 1); 15785 if (rc) { 15786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15787 "3272 Cannot setup Combined " 15788 "Write on WQ[%d] - disable DPP\n", 15789 wq->queue_id); 15790 phba->cfg_enable_dpp = 0; 15791 } 15792 #else 15793 phba->cfg_enable_dpp = 0; 15794 #endif 15795 } else 15796 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15797 } 15798 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); 15799 if (wq->pring == NULL) { 15800 status = -ENOMEM; 15801 goto out; 15802 } 15803 wq->type = LPFC_WQ; 15804 wq->assoc_qid = cq->queue_id; 15805 wq->subtype = subtype; 15806 wq->host_index = 0; 15807 wq->hba_index = 0; 15808 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; 15809 15810 /* link the wq onto the parent cq child list */ 15811 list_add_tail(&wq->list, &cq->child_list); 15812 out: 15813 mempool_free(mbox, phba->mbox_mem_pool); 15814 return status; 15815 } 15816 15817 /** 15818 * lpfc_rq_create - Create a Receive Queue on the HBA 15819 * @phba: HBA structure that indicates port to create a queue on. 15820 * @hrq: The queue structure to use to create the header receive queue. 15821 * @drq: The queue structure to use to create the data receive queue. 15822 * @cq: The completion queue to bind this work queue to. 15823 * 15824 * This function creates a receive buffer queue pair , as detailed in @hrq and 15825 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15826 * to the HBA. 15827 * 15828 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15829 * struct is used to get the entry count that is necessary to determine the 15830 * number of pages to use for this queue. The @cq is used to indicate which 15831 * completion queue to bind received buffers that are posted to these queues to. 15832 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15833 * receive queue pair. This function is asynchronous and will wait for the 15834 * mailbox command to finish before continuing. 15835 * 15836 * On success this function will return a zero. If unable to allocate enough 15837 * memory this function will return -ENOMEM. If the queue create mailbox command 15838 * fails this function will return -ENXIO. 15839 **/ 15840 int 15841 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 15842 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 15843 { 15844 struct lpfc_mbx_rq_create *rq_create; 15845 struct lpfc_dmabuf *dmabuf; 15846 LPFC_MBOXQ_t *mbox; 15847 int rc, length, status = 0; 15848 uint32_t shdr_status, shdr_add_status; 15849 union lpfc_sli4_cfg_shdr *shdr; 15850 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15851 void __iomem *bar_memmap_p; 15852 uint32_t db_offset; 15853 uint16_t pci_barset; 15854 15855 /* sanity check on queue memory */ 15856 if (!hrq || !drq || !cq) 15857 return -ENODEV; 15858 if (!phba->sli4_hba.pc_sli4_params.supported) 15859 hw_page_size = SLI4_PAGE_SIZE; 15860 15861 if (hrq->entry_count != drq->entry_count) 15862 return -EINVAL; 15863 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15864 if (!mbox) 15865 return -ENOMEM; 15866 length = (sizeof(struct lpfc_mbx_rq_create) - 15867 sizeof(struct lpfc_sli4_cfg_mhdr)); 15868 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15869 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15870 length, LPFC_SLI4_MBX_EMBED); 15871 rq_create = &mbox->u.mqe.un.rq_create; 15872 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15873 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15874 phba->sli4_hba.pc_sli4_params.rqv); 15875 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15876 bf_set(lpfc_rq_context_rqe_count_1, 15877 &rq_create->u.request.context, 15878 hrq->entry_count); 15879 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 15880 bf_set(lpfc_rq_context_rqe_size, 15881 &rq_create->u.request.context, 15882 LPFC_RQE_SIZE_8); 15883 bf_set(lpfc_rq_context_page_size, 15884 &rq_create->u.request.context, 15885 LPFC_RQ_PAGE_SIZE_4096); 15886 } else { 15887 switch (hrq->entry_count) { 15888 default: 15889 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15890 "2535 Unsupported RQ count. (%d)\n", 15891 hrq->entry_count); 15892 if (hrq->entry_count < 512) { 15893 status = -EINVAL; 15894 goto out; 15895 } 15896 /* fall through - otherwise default to smallest count */ 15897 case 512: 15898 bf_set(lpfc_rq_context_rqe_count, 15899 &rq_create->u.request.context, 15900 LPFC_RQ_RING_SIZE_512); 15901 break; 15902 case 1024: 15903 bf_set(lpfc_rq_context_rqe_count, 15904 &rq_create->u.request.context, 15905 LPFC_RQ_RING_SIZE_1024); 15906 break; 15907 case 2048: 15908 bf_set(lpfc_rq_context_rqe_count, 15909 &rq_create->u.request.context, 15910 LPFC_RQ_RING_SIZE_2048); 15911 break; 15912 case 4096: 15913 bf_set(lpfc_rq_context_rqe_count, 15914 &rq_create->u.request.context, 15915 LPFC_RQ_RING_SIZE_4096); 15916 break; 15917 } 15918 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 15919 LPFC_HDR_BUF_SIZE); 15920 } 15921 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15922 cq->queue_id); 15923 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15924 hrq->page_count); 15925 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15926 memset(dmabuf->virt, 0, hw_page_size); 15927 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15928 putPaddrLow(dmabuf->phys); 15929 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15930 putPaddrHigh(dmabuf->phys); 15931 } 15932 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15933 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15934 15935 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15936 /* The IOCTL status is embedded in the mailbox subheader. */ 15937 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15938 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15939 if (shdr_status || shdr_add_status || rc) { 15940 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15941 "2504 RQ_CREATE mailbox failed with " 15942 "status x%x add_status x%x, mbx status x%x\n", 15943 shdr_status, shdr_add_status, rc); 15944 status = -ENXIO; 15945 goto out; 15946 } 15947 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15948 if (hrq->queue_id == 0xFFFF) { 15949 status = -ENXIO; 15950 goto out; 15951 } 15952 15953 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15954 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 15955 &rq_create->u.response); 15956 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 15957 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 15958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15959 "3262 RQ [%d] doorbell format not " 15960 "supported: x%x\n", hrq->queue_id, 15961 hrq->db_format); 15962 status = -EINVAL; 15963 goto out; 15964 } 15965 15966 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 15967 &rq_create->u.response); 15968 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 15969 if (!bar_memmap_p) { 15970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15971 "3269 RQ[%d] failed to memmap pci " 15972 "barset:x%x\n", hrq->queue_id, 15973 pci_barset); 15974 status = -ENOMEM; 15975 goto out; 15976 } 15977 15978 db_offset = rq_create->u.response.doorbell_offset; 15979 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 15980 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 15981 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15982 "3270 RQ[%d] doorbell offset not " 15983 "supported: x%x\n", hrq->queue_id, 15984 db_offset); 15985 status = -EINVAL; 15986 goto out; 15987 } 15988 hrq->db_regaddr = bar_memmap_p + db_offset; 15989 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15990 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 15991 "format:x%x\n", hrq->queue_id, pci_barset, 15992 db_offset, hrq->db_format); 15993 } else { 15994 hrq->db_format = LPFC_DB_RING_FORMAT; 15995 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15996 } 15997 hrq->type = LPFC_HRQ; 15998 hrq->assoc_qid = cq->queue_id; 15999 hrq->subtype = subtype; 16000 hrq->host_index = 0; 16001 hrq->hba_index = 0; 16002 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16003 16004 /* now create the data queue */ 16005 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16006 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 16007 length, LPFC_SLI4_MBX_EMBED); 16008 bf_set(lpfc_mbox_hdr_version, &shdr->request, 16009 phba->sli4_hba.pc_sli4_params.rqv); 16010 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 16011 bf_set(lpfc_rq_context_rqe_count_1, 16012 &rq_create->u.request.context, hrq->entry_count); 16013 if (subtype == LPFC_NVMET) 16014 rq_create->u.request.context.buffer_size = 16015 LPFC_NVMET_DATA_BUF_SIZE; 16016 else 16017 rq_create->u.request.context.buffer_size = 16018 LPFC_DATA_BUF_SIZE; 16019 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 16020 LPFC_RQE_SIZE_8); 16021 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 16022 (PAGE_SIZE/SLI4_PAGE_SIZE)); 16023 } else { 16024 switch (drq->entry_count) { 16025 default: 16026 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16027 "2536 Unsupported RQ count. (%d)\n", 16028 drq->entry_count); 16029 if (drq->entry_count < 512) { 16030 status = -EINVAL; 16031 goto out; 16032 } 16033 /* fall through - otherwise default to smallest count */ 16034 case 512: 16035 bf_set(lpfc_rq_context_rqe_count, 16036 &rq_create->u.request.context, 16037 LPFC_RQ_RING_SIZE_512); 16038 break; 16039 case 1024: 16040 bf_set(lpfc_rq_context_rqe_count, 16041 &rq_create->u.request.context, 16042 LPFC_RQ_RING_SIZE_1024); 16043 break; 16044 case 2048: 16045 bf_set(lpfc_rq_context_rqe_count, 16046 &rq_create->u.request.context, 16047 LPFC_RQ_RING_SIZE_2048); 16048 break; 16049 case 4096: 16050 bf_set(lpfc_rq_context_rqe_count, 16051 &rq_create->u.request.context, 16052 LPFC_RQ_RING_SIZE_4096); 16053 break; 16054 } 16055 if (subtype == LPFC_NVMET) 16056 bf_set(lpfc_rq_context_buf_size, 16057 &rq_create->u.request.context, 16058 LPFC_NVMET_DATA_BUF_SIZE); 16059 else 16060 bf_set(lpfc_rq_context_buf_size, 16061 &rq_create->u.request.context, 16062 LPFC_DATA_BUF_SIZE); 16063 } 16064 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 16065 cq->queue_id); 16066 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 16067 drq->page_count); 16068 list_for_each_entry(dmabuf, &drq->page_list, list) { 16069 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 16070 putPaddrLow(dmabuf->phys); 16071 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 16072 putPaddrHigh(dmabuf->phys); 16073 } 16074 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 16075 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 16076 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16077 /* The IOCTL status is embedded in the mailbox subheader. */ 16078 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 16079 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16080 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16081 if (shdr_status || shdr_add_status || rc) { 16082 status = -ENXIO; 16083 goto out; 16084 } 16085 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 16086 if (drq->queue_id == 0xFFFF) { 16087 status = -ENXIO; 16088 goto out; 16089 } 16090 drq->type = LPFC_DRQ; 16091 drq->assoc_qid = cq->queue_id; 16092 drq->subtype = subtype; 16093 drq->host_index = 0; 16094 drq->hba_index = 0; 16095 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16096 16097 /* link the header and data RQs onto the parent cq child list */ 16098 list_add_tail(&hrq->list, &cq->child_list); 16099 list_add_tail(&drq->list, &cq->child_list); 16100 16101 out: 16102 mempool_free(mbox, phba->mbox_mem_pool); 16103 return status; 16104 } 16105 16106 /** 16107 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA 16108 * @phba: HBA structure that indicates port to create a queue on. 16109 * @hrqp: The queue structure array to use to create the header receive queues. 16110 * @drqp: The queue structure array to use to create the data receive queues. 16111 * @cqp: The completion queue array to bind these receive queues to. 16112 * 16113 * This function creates a receive buffer queue pair , as detailed in @hrq and 16114 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 16115 * to the HBA. 16116 * 16117 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 16118 * struct is used to get the entry count that is necessary to determine the 16119 * number of pages to use for this queue. The @cq is used to indicate which 16120 * completion queue to bind received buffers that are posted to these queues to. 16121 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 16122 * receive queue pair. This function is asynchronous and will wait for the 16123 * mailbox command to finish before continuing. 16124 * 16125 * On success this function will return a zero. If unable to allocate enough 16126 * memory this function will return -ENOMEM. If the queue create mailbox command 16127 * fails this function will return -ENXIO. 16128 **/ 16129 int 16130 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 16131 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 16132 uint32_t subtype) 16133 { 16134 struct lpfc_queue *hrq, *drq, *cq; 16135 struct lpfc_mbx_rq_create_v2 *rq_create; 16136 struct lpfc_dmabuf *dmabuf; 16137 LPFC_MBOXQ_t *mbox; 16138 int rc, length, alloclen, status = 0; 16139 int cnt, idx, numrq, page_idx = 0; 16140 uint32_t shdr_status, shdr_add_status; 16141 union lpfc_sli4_cfg_shdr *shdr; 16142 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 16143 16144 numrq = phba->cfg_nvmet_mrq; 16145 /* sanity check on array memory */ 16146 if (!hrqp || !drqp || !cqp || !numrq) 16147 return -ENODEV; 16148 if (!phba->sli4_hba.pc_sli4_params.supported) 16149 hw_page_size = SLI4_PAGE_SIZE; 16150 16151 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16152 if (!mbox) 16153 return -ENOMEM; 16154 16155 length = sizeof(struct lpfc_mbx_rq_create_v2); 16156 length += ((2 * numrq * hrqp[0]->page_count) * 16157 sizeof(struct dma_address)); 16158 16159 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16160 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, 16161 LPFC_SLI4_MBX_NEMBED); 16162 if (alloclen < length) { 16163 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16164 "3099 Allocated DMA memory size (%d) is " 16165 "less than the requested DMA memory size " 16166 "(%d)\n", alloclen, length); 16167 status = -ENOMEM; 16168 goto out; 16169 } 16170 16171 16172 16173 rq_create = mbox->sge_array->addr[0]; 16174 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; 16175 16176 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); 16177 cnt = 0; 16178 16179 for (idx = 0; idx < numrq; idx++) { 16180 hrq = hrqp[idx]; 16181 drq = drqp[idx]; 16182 cq = cqp[idx]; 16183 16184 /* sanity check on queue memory */ 16185 if (!hrq || !drq || !cq) { 16186 status = -ENODEV; 16187 goto out; 16188 } 16189 16190 if (hrq->entry_count != drq->entry_count) { 16191 status = -EINVAL; 16192 goto out; 16193 } 16194 16195 if (idx == 0) { 16196 bf_set(lpfc_mbx_rq_create_num_pages, 16197 &rq_create->u.request, 16198 hrq->page_count); 16199 bf_set(lpfc_mbx_rq_create_rq_cnt, 16200 &rq_create->u.request, (numrq * 2)); 16201 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, 16202 1); 16203 bf_set(lpfc_rq_context_base_cq, 16204 &rq_create->u.request.context, 16205 cq->queue_id); 16206 bf_set(lpfc_rq_context_data_size, 16207 &rq_create->u.request.context, 16208 LPFC_NVMET_DATA_BUF_SIZE); 16209 bf_set(lpfc_rq_context_hdr_size, 16210 &rq_create->u.request.context, 16211 LPFC_HDR_BUF_SIZE); 16212 bf_set(lpfc_rq_context_rqe_count_1, 16213 &rq_create->u.request.context, 16214 hrq->entry_count); 16215 bf_set(lpfc_rq_context_rqe_size, 16216 &rq_create->u.request.context, 16217 LPFC_RQE_SIZE_8); 16218 bf_set(lpfc_rq_context_page_size, 16219 &rq_create->u.request.context, 16220 (PAGE_SIZE/SLI4_PAGE_SIZE)); 16221 } 16222 rc = 0; 16223 list_for_each_entry(dmabuf, &hrq->page_list, list) { 16224 memset(dmabuf->virt, 0, hw_page_size); 16225 cnt = page_idx + dmabuf->buffer_tag; 16226 rq_create->u.request.page[cnt].addr_lo = 16227 putPaddrLow(dmabuf->phys); 16228 rq_create->u.request.page[cnt].addr_hi = 16229 putPaddrHigh(dmabuf->phys); 16230 rc++; 16231 } 16232 page_idx += rc; 16233 16234 rc = 0; 16235 list_for_each_entry(dmabuf, &drq->page_list, list) { 16236 memset(dmabuf->virt, 0, hw_page_size); 16237 cnt = page_idx + dmabuf->buffer_tag; 16238 rq_create->u.request.page[cnt].addr_lo = 16239 putPaddrLow(dmabuf->phys); 16240 rq_create->u.request.page[cnt].addr_hi = 16241 putPaddrHigh(dmabuf->phys); 16242 rc++; 16243 } 16244 page_idx += rc; 16245 16246 hrq->db_format = LPFC_DB_RING_FORMAT; 16247 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16248 hrq->type = LPFC_HRQ; 16249 hrq->assoc_qid = cq->queue_id; 16250 hrq->subtype = subtype; 16251 hrq->host_index = 0; 16252 hrq->hba_index = 0; 16253 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16254 16255 drq->db_format = LPFC_DB_RING_FORMAT; 16256 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16257 drq->type = LPFC_DRQ; 16258 drq->assoc_qid = cq->queue_id; 16259 drq->subtype = subtype; 16260 drq->host_index = 0; 16261 drq->hba_index = 0; 16262 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16263 16264 list_add_tail(&hrq->list, &cq->child_list); 16265 list_add_tail(&drq->list, &cq->child_list); 16266 } 16267 16268 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16269 /* The IOCTL status is embedded in the mailbox subheader. */ 16270 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16271 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16272 if (shdr_status || shdr_add_status || rc) { 16273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16274 "3120 RQ_CREATE mailbox failed with " 16275 "status x%x add_status x%x, mbx status x%x\n", 16276 shdr_status, shdr_add_status, rc); 16277 status = -ENXIO; 16278 goto out; 16279 } 16280 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 16281 if (rc == 0xFFFF) { 16282 status = -ENXIO; 16283 goto out; 16284 } 16285 16286 /* Initialize all RQs with associated queue id */ 16287 for (idx = 0; idx < numrq; idx++) { 16288 hrq = hrqp[idx]; 16289 hrq->queue_id = rc + (2 * idx); 16290 drq = drqp[idx]; 16291 drq->queue_id = rc + (2 * idx) + 1; 16292 } 16293 16294 out: 16295 lpfc_sli4_mbox_cmd_free(phba, mbox); 16296 return status; 16297 } 16298 16299 /** 16300 * lpfc_eq_destroy - Destroy an event Queue on the HBA 16301 * @eq: The queue structure associated with the queue to destroy. 16302 * 16303 * This function destroys a queue, as detailed in @eq by sending an mailbox 16304 * command, specific to the type of queue, to the HBA. 16305 * 16306 * The @eq struct is used to get the queue ID of the queue to destroy. 16307 * 16308 * On success this function will return a zero. If the queue destroy mailbox 16309 * command fails this function will return -ENXIO. 16310 **/ 16311 int 16312 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 16313 { 16314 LPFC_MBOXQ_t *mbox; 16315 int rc, length, status = 0; 16316 uint32_t shdr_status, shdr_add_status; 16317 union lpfc_sli4_cfg_shdr *shdr; 16318 16319 /* sanity check on queue memory */ 16320 if (!eq) 16321 return -ENODEV; 16322 16323 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 16324 if (!mbox) 16325 return -ENOMEM; 16326 length = (sizeof(struct lpfc_mbx_eq_destroy) - 16327 sizeof(struct lpfc_sli4_cfg_mhdr)); 16328 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16329 LPFC_MBOX_OPCODE_EQ_DESTROY, 16330 length, LPFC_SLI4_MBX_EMBED); 16331 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 16332 eq->queue_id); 16333 mbox->vport = eq->phba->pport; 16334 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16335 16336 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 16337 /* The IOCTL status is embedded in the mailbox subheader. */ 16338 shdr = (union lpfc_sli4_cfg_shdr *) 16339 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 16340 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16341 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16342 if (shdr_status || shdr_add_status || rc) { 16343 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16344 "2505 EQ_DESTROY mailbox failed with " 16345 "status x%x add_status x%x, mbx status x%x\n", 16346 shdr_status, shdr_add_status, rc); 16347 status = -ENXIO; 16348 } 16349 16350 /* Remove eq from any list */ 16351 list_del_init(&eq->list); 16352 mempool_free(mbox, eq->phba->mbox_mem_pool); 16353 return status; 16354 } 16355 16356 /** 16357 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 16358 * @cq: The queue structure associated with the queue to destroy. 16359 * 16360 * This function destroys a queue, as detailed in @cq by sending an mailbox 16361 * command, specific to the type of queue, to the HBA. 16362 * 16363 * The @cq struct is used to get the queue ID of the queue to destroy. 16364 * 16365 * On success this function will return a zero. If the queue destroy mailbox 16366 * command fails this function will return -ENXIO. 16367 **/ 16368 int 16369 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 16370 { 16371 LPFC_MBOXQ_t *mbox; 16372 int rc, length, status = 0; 16373 uint32_t shdr_status, shdr_add_status; 16374 union lpfc_sli4_cfg_shdr *shdr; 16375 16376 /* sanity check on queue memory */ 16377 if (!cq) 16378 return -ENODEV; 16379 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 16380 if (!mbox) 16381 return -ENOMEM; 16382 length = (sizeof(struct lpfc_mbx_cq_destroy) - 16383 sizeof(struct lpfc_sli4_cfg_mhdr)); 16384 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16385 LPFC_MBOX_OPCODE_CQ_DESTROY, 16386 length, LPFC_SLI4_MBX_EMBED); 16387 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 16388 cq->queue_id); 16389 mbox->vport = cq->phba->pport; 16390 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16391 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 16392 /* The IOCTL status is embedded in the mailbox subheader. */ 16393 shdr = (union lpfc_sli4_cfg_shdr *) 16394 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 16395 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16396 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16397 if (shdr_status || shdr_add_status || rc) { 16398 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16399 "2506 CQ_DESTROY mailbox failed with " 16400 "status x%x add_status x%x, mbx status x%x\n", 16401 shdr_status, shdr_add_status, rc); 16402 status = -ENXIO; 16403 } 16404 /* Remove cq from any list */ 16405 list_del_init(&cq->list); 16406 mempool_free(mbox, cq->phba->mbox_mem_pool); 16407 return status; 16408 } 16409 16410 /** 16411 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 16412 * @qm: The queue structure associated with the queue to destroy. 16413 * 16414 * This function destroys a queue, as detailed in @mq by sending an mailbox 16415 * command, specific to the type of queue, to the HBA. 16416 * 16417 * The @mq struct is used to get the queue ID of the queue to destroy. 16418 * 16419 * On success this function will return a zero. If the queue destroy mailbox 16420 * command fails this function will return -ENXIO. 16421 **/ 16422 int 16423 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 16424 { 16425 LPFC_MBOXQ_t *mbox; 16426 int rc, length, status = 0; 16427 uint32_t shdr_status, shdr_add_status; 16428 union lpfc_sli4_cfg_shdr *shdr; 16429 16430 /* sanity check on queue memory */ 16431 if (!mq) 16432 return -ENODEV; 16433 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 16434 if (!mbox) 16435 return -ENOMEM; 16436 length = (sizeof(struct lpfc_mbx_mq_destroy) - 16437 sizeof(struct lpfc_sli4_cfg_mhdr)); 16438 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16439 LPFC_MBOX_OPCODE_MQ_DESTROY, 16440 length, LPFC_SLI4_MBX_EMBED); 16441 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 16442 mq->queue_id); 16443 mbox->vport = mq->phba->pport; 16444 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16445 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 16446 /* The IOCTL status is embedded in the mailbox subheader. */ 16447 shdr = (union lpfc_sli4_cfg_shdr *) 16448 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 16449 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16450 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16451 if (shdr_status || shdr_add_status || rc) { 16452 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16453 "2507 MQ_DESTROY mailbox failed with " 16454 "status x%x add_status x%x, mbx status x%x\n", 16455 shdr_status, shdr_add_status, rc); 16456 status = -ENXIO; 16457 } 16458 /* Remove mq from any list */ 16459 list_del_init(&mq->list); 16460 mempool_free(mbox, mq->phba->mbox_mem_pool); 16461 return status; 16462 } 16463 16464 /** 16465 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 16466 * @wq: The queue structure associated with the queue to destroy. 16467 * 16468 * This function destroys a queue, as detailed in @wq by sending an mailbox 16469 * command, specific to the type of queue, to the HBA. 16470 * 16471 * The @wq struct is used to get the queue ID of the queue to destroy. 16472 * 16473 * On success this function will return a zero. If the queue destroy mailbox 16474 * command fails this function will return -ENXIO. 16475 **/ 16476 int 16477 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 16478 { 16479 LPFC_MBOXQ_t *mbox; 16480 int rc, length, status = 0; 16481 uint32_t shdr_status, shdr_add_status; 16482 union lpfc_sli4_cfg_shdr *shdr; 16483 16484 /* sanity check on queue memory */ 16485 if (!wq) 16486 return -ENODEV; 16487 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 16488 if (!mbox) 16489 return -ENOMEM; 16490 length = (sizeof(struct lpfc_mbx_wq_destroy) - 16491 sizeof(struct lpfc_sli4_cfg_mhdr)); 16492 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16493 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 16494 length, LPFC_SLI4_MBX_EMBED); 16495 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 16496 wq->queue_id); 16497 mbox->vport = wq->phba->pport; 16498 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16499 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 16500 shdr = (union lpfc_sli4_cfg_shdr *) 16501 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 16502 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16503 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16504 if (shdr_status || shdr_add_status || rc) { 16505 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16506 "2508 WQ_DESTROY mailbox failed with " 16507 "status x%x add_status x%x, mbx status x%x\n", 16508 shdr_status, shdr_add_status, rc); 16509 status = -ENXIO; 16510 } 16511 /* Remove wq from any list */ 16512 list_del_init(&wq->list); 16513 kfree(wq->pring); 16514 wq->pring = NULL; 16515 mempool_free(mbox, wq->phba->mbox_mem_pool); 16516 return status; 16517 } 16518 16519 /** 16520 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 16521 * @rq: The queue structure associated with the queue to destroy. 16522 * 16523 * This function destroys a queue, as detailed in @rq by sending an mailbox 16524 * command, specific to the type of queue, to the HBA. 16525 * 16526 * The @rq struct is used to get the queue ID of the queue to destroy. 16527 * 16528 * On success this function will return a zero. If the queue destroy mailbox 16529 * command fails this function will return -ENXIO. 16530 **/ 16531 int 16532 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 16533 struct lpfc_queue *drq) 16534 { 16535 LPFC_MBOXQ_t *mbox; 16536 int rc, length, status = 0; 16537 uint32_t shdr_status, shdr_add_status; 16538 union lpfc_sli4_cfg_shdr *shdr; 16539 16540 /* sanity check on queue memory */ 16541 if (!hrq || !drq) 16542 return -ENODEV; 16543 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 16544 if (!mbox) 16545 return -ENOMEM; 16546 length = (sizeof(struct lpfc_mbx_rq_destroy) - 16547 sizeof(struct lpfc_sli4_cfg_mhdr)); 16548 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16549 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 16550 length, LPFC_SLI4_MBX_EMBED); 16551 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16552 hrq->queue_id); 16553 mbox->vport = hrq->phba->pport; 16554 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16555 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 16556 /* The IOCTL status is embedded in the mailbox subheader. */ 16557 shdr = (union lpfc_sli4_cfg_shdr *) 16558 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16559 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16560 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16561 if (shdr_status || shdr_add_status || rc) { 16562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16563 "2509 RQ_DESTROY mailbox failed with " 16564 "status x%x add_status x%x, mbx status x%x\n", 16565 shdr_status, shdr_add_status, rc); 16566 if (rc != MBX_TIMEOUT) 16567 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16568 return -ENXIO; 16569 } 16570 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16571 drq->queue_id); 16572 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 16573 shdr = (union lpfc_sli4_cfg_shdr *) 16574 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16575 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16576 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16577 if (shdr_status || shdr_add_status || rc) { 16578 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16579 "2510 RQ_DESTROY mailbox failed with " 16580 "status x%x add_status x%x, mbx status x%x\n", 16581 shdr_status, shdr_add_status, rc); 16582 status = -ENXIO; 16583 } 16584 list_del_init(&hrq->list); 16585 list_del_init(&drq->list); 16586 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16587 return status; 16588 } 16589 16590 /** 16591 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 16592 * @phba: The virtual port for which this call being executed. 16593 * @pdma_phys_addr0: Physical address of the 1st SGL page. 16594 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 16595 * @xritag: the xritag that ties this io to the SGL pages. 16596 * 16597 * This routine will post the sgl pages for the IO that has the xritag 16598 * that is in the iocbq structure. The xritag is assigned during iocbq 16599 * creation and persists for as long as the driver is loaded. 16600 * if the caller has fewer than 256 scatter gather segments to map then 16601 * pdma_phys_addr1 should be 0. 16602 * If the caller needs to map more than 256 scatter gather segment then 16603 * pdma_phys_addr1 should be a valid physical address. 16604 * physical address for SGLs must be 64 byte aligned. 16605 * If you are going to map 2 SGL's then the first one must have 256 entries 16606 * the second sgl can have between 1 and 256 entries. 16607 * 16608 * Return codes: 16609 * 0 - Success 16610 * -ENXIO, -ENOMEM - Failure 16611 **/ 16612 int 16613 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 16614 dma_addr_t pdma_phys_addr0, 16615 dma_addr_t pdma_phys_addr1, 16616 uint16_t xritag) 16617 { 16618 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 16619 LPFC_MBOXQ_t *mbox; 16620 int rc; 16621 uint32_t shdr_status, shdr_add_status; 16622 uint32_t mbox_tmo; 16623 union lpfc_sli4_cfg_shdr *shdr; 16624 16625 if (xritag == NO_XRI) { 16626 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16627 "0364 Invalid param:\n"); 16628 return -EINVAL; 16629 } 16630 16631 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16632 if (!mbox) 16633 return -ENOMEM; 16634 16635 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16636 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16637 sizeof(struct lpfc_mbx_post_sgl_pages) - 16638 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 16639 16640 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 16641 &mbox->u.mqe.un.post_sgl_pages; 16642 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 16643 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 16644 16645 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 16646 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 16647 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 16648 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 16649 16650 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 16651 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 16652 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 16653 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 16654 if (!phba->sli4_hba.intr_enable) 16655 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16656 else { 16657 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16658 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16659 } 16660 /* The IOCTL status is embedded in the mailbox subheader. */ 16661 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 16662 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16663 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16664 if (rc != MBX_TIMEOUT) 16665 mempool_free(mbox, phba->mbox_mem_pool); 16666 if (shdr_status || shdr_add_status || rc) { 16667 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16668 "2511 POST_SGL mailbox failed with " 16669 "status x%x add_status x%x, mbx status x%x\n", 16670 shdr_status, shdr_add_status, rc); 16671 } 16672 return 0; 16673 } 16674 16675 /** 16676 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 16677 * @phba: pointer to lpfc hba data structure. 16678 * 16679 * This routine is invoked to post rpi header templates to the 16680 * HBA consistent with the SLI-4 interface spec. This routine 16681 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 16682 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 16683 * 16684 * Returns 16685 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 16686 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 16687 **/ 16688 static uint16_t 16689 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 16690 { 16691 unsigned long xri; 16692 16693 /* 16694 * Fetch the next logical xri. Because this index is logical, 16695 * the driver starts at 0 each time. 16696 */ 16697 spin_lock_irq(&phba->hbalock); 16698 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 16699 phba->sli4_hba.max_cfg_param.max_xri, 0); 16700 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 16701 spin_unlock_irq(&phba->hbalock); 16702 return NO_XRI; 16703 } else { 16704 set_bit(xri, phba->sli4_hba.xri_bmask); 16705 phba->sli4_hba.max_cfg_param.xri_used++; 16706 } 16707 spin_unlock_irq(&phba->hbalock); 16708 return xri; 16709 } 16710 16711 /** 16712 * lpfc_sli4_free_xri - Release an xri for reuse. 16713 * @phba: pointer to lpfc hba data structure. 16714 * 16715 * This routine is invoked to release an xri to the pool of 16716 * available rpis maintained by the driver. 16717 **/ 16718 static void 16719 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16720 { 16721 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 16722 phba->sli4_hba.max_cfg_param.xri_used--; 16723 } 16724 } 16725 16726 /** 16727 * lpfc_sli4_free_xri - Release an xri for reuse. 16728 * @phba: pointer to lpfc hba data structure. 16729 * 16730 * This routine is invoked to release an xri to the pool of 16731 * available rpis maintained by the driver. 16732 **/ 16733 void 16734 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16735 { 16736 spin_lock_irq(&phba->hbalock); 16737 __lpfc_sli4_free_xri(phba, xri); 16738 spin_unlock_irq(&phba->hbalock); 16739 } 16740 16741 /** 16742 * lpfc_sli4_next_xritag - Get an xritag for the io 16743 * @phba: Pointer to HBA context object. 16744 * 16745 * This function gets an xritag for the iocb. If there is no unused xritag 16746 * it will return 0xffff. 16747 * The function returns the allocated xritag if successful, else returns zero. 16748 * Zero is not a valid xritag. 16749 * The caller is not required to hold any lock. 16750 **/ 16751 uint16_t 16752 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 16753 { 16754 uint16_t xri_index; 16755 16756 xri_index = lpfc_sli4_alloc_xri(phba); 16757 if (xri_index == NO_XRI) 16758 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 16759 "2004 Failed to allocate XRI.last XRITAG is %d" 16760 " Max XRI is %d, Used XRI is %d\n", 16761 xri_index, 16762 phba->sli4_hba.max_cfg_param.max_xri, 16763 phba->sli4_hba.max_cfg_param.xri_used); 16764 return xri_index; 16765 } 16766 16767 /** 16768 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. 16769 * @phba: pointer to lpfc hba data structure. 16770 * @post_sgl_list: pointer to els sgl entry list. 16771 * @count: number of els sgl entries on the list. 16772 * 16773 * This routine is invoked to post a block of driver's sgl pages to the 16774 * HBA using non-embedded mailbox command. No Lock is held. This routine 16775 * is only called when the driver is loading and after all IO has been 16776 * stopped. 16777 **/ 16778 static int 16779 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, 16780 struct list_head *post_sgl_list, 16781 int post_cnt) 16782 { 16783 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 16784 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16785 struct sgl_page_pairs *sgl_pg_pairs; 16786 void *viraddr; 16787 LPFC_MBOXQ_t *mbox; 16788 uint32_t reqlen, alloclen, pg_pairs; 16789 uint32_t mbox_tmo; 16790 uint16_t xritag_start = 0; 16791 int rc = 0; 16792 uint32_t shdr_status, shdr_add_status; 16793 union lpfc_sli4_cfg_shdr *shdr; 16794 16795 reqlen = post_cnt * sizeof(struct sgl_page_pairs) + 16796 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16797 if (reqlen > SLI4_PAGE_SIZE) { 16798 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16799 "2559 Block sgl registration required DMA " 16800 "size (%d) great than a page\n", reqlen); 16801 return -ENOMEM; 16802 } 16803 16804 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16805 if (!mbox) 16806 return -ENOMEM; 16807 16808 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16809 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16810 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 16811 LPFC_SLI4_MBX_NEMBED); 16812 16813 if (alloclen < reqlen) { 16814 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16815 "0285 Allocated DMA memory size (%d) is " 16816 "less than the requested DMA memory " 16817 "size (%d)\n", alloclen, reqlen); 16818 lpfc_sli4_mbox_cmd_free(phba, mbox); 16819 return -ENOMEM; 16820 } 16821 /* Set up the SGL pages in the non-embedded DMA pages */ 16822 viraddr = mbox->sge_array->addr[0]; 16823 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16824 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16825 16826 pg_pairs = 0; 16827 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 16828 /* Set up the sge entry */ 16829 sgl_pg_pairs->sgl_pg0_addr_lo = 16830 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 16831 sgl_pg_pairs->sgl_pg0_addr_hi = 16832 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 16833 sgl_pg_pairs->sgl_pg1_addr_lo = 16834 cpu_to_le32(putPaddrLow(0)); 16835 sgl_pg_pairs->sgl_pg1_addr_hi = 16836 cpu_to_le32(putPaddrHigh(0)); 16837 16838 /* Keep the first xritag on the list */ 16839 if (pg_pairs == 0) 16840 xritag_start = sglq_entry->sli4_xritag; 16841 sgl_pg_pairs++; 16842 pg_pairs++; 16843 } 16844 16845 /* Complete initialization and perform endian conversion. */ 16846 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16847 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); 16848 sgl->word0 = cpu_to_le32(sgl->word0); 16849 16850 if (!phba->sli4_hba.intr_enable) 16851 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16852 else { 16853 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16854 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16855 } 16856 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16857 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16858 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16859 if (rc != MBX_TIMEOUT) 16860 lpfc_sli4_mbox_cmd_free(phba, mbox); 16861 if (shdr_status || shdr_add_status || rc) { 16862 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16863 "2513 POST_SGL_BLOCK mailbox command failed " 16864 "status x%x add_status x%x mbx status x%x\n", 16865 shdr_status, shdr_add_status, rc); 16866 rc = -ENXIO; 16867 } 16868 return rc; 16869 } 16870 16871 /** 16872 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware 16873 * @phba: pointer to lpfc hba data structure. 16874 * @nblist: pointer to nvme buffer list. 16875 * @count: number of scsi buffers on the list. 16876 * 16877 * This routine is invoked to post a block of @count scsi sgl pages from a 16878 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. 16879 * No Lock is held. 16880 * 16881 **/ 16882 static int 16883 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, 16884 int count) 16885 { 16886 struct lpfc_io_buf *lpfc_ncmd; 16887 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16888 struct sgl_page_pairs *sgl_pg_pairs; 16889 void *viraddr; 16890 LPFC_MBOXQ_t *mbox; 16891 uint32_t reqlen, alloclen, pg_pairs; 16892 uint32_t mbox_tmo; 16893 uint16_t xritag_start = 0; 16894 int rc = 0; 16895 uint32_t shdr_status, shdr_add_status; 16896 dma_addr_t pdma_phys_bpl1; 16897 union lpfc_sli4_cfg_shdr *shdr; 16898 16899 /* Calculate the requested length of the dma memory */ 16900 reqlen = count * sizeof(struct sgl_page_pairs) + 16901 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16902 if (reqlen > SLI4_PAGE_SIZE) { 16903 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 16904 "6118 Block sgl registration required DMA " 16905 "size (%d) great than a page\n", reqlen); 16906 return -ENOMEM; 16907 } 16908 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16909 if (!mbox) { 16910 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16911 "6119 Failed to allocate mbox cmd memory\n"); 16912 return -ENOMEM; 16913 } 16914 16915 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16916 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16917 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16918 reqlen, LPFC_SLI4_MBX_NEMBED); 16919 16920 if (alloclen < reqlen) { 16921 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16922 "6120 Allocated DMA memory size (%d) is " 16923 "less than the requested DMA memory " 16924 "size (%d)\n", alloclen, reqlen); 16925 lpfc_sli4_mbox_cmd_free(phba, mbox); 16926 return -ENOMEM; 16927 } 16928 16929 /* Get the first SGE entry from the non-embedded DMA memory */ 16930 viraddr = mbox->sge_array->addr[0]; 16931 16932 /* Set up the SGL pages in the non-embedded DMA pages */ 16933 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16934 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16935 16936 pg_pairs = 0; 16937 list_for_each_entry(lpfc_ncmd, nblist, list) { 16938 /* Set up the sge entry */ 16939 sgl_pg_pairs->sgl_pg0_addr_lo = 16940 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); 16941 sgl_pg_pairs->sgl_pg0_addr_hi = 16942 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); 16943 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 16944 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + 16945 SGL_PAGE_SIZE; 16946 else 16947 pdma_phys_bpl1 = 0; 16948 sgl_pg_pairs->sgl_pg1_addr_lo = 16949 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 16950 sgl_pg_pairs->sgl_pg1_addr_hi = 16951 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 16952 /* Keep the first xritag on the list */ 16953 if (pg_pairs == 0) 16954 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; 16955 sgl_pg_pairs++; 16956 pg_pairs++; 16957 } 16958 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16959 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 16960 /* Perform endian conversion if necessary */ 16961 sgl->word0 = cpu_to_le32(sgl->word0); 16962 16963 if (!phba->sli4_hba.intr_enable) { 16964 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16965 } else { 16966 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16967 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16968 } 16969 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; 16970 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16971 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16972 if (rc != MBX_TIMEOUT) 16973 lpfc_sli4_mbox_cmd_free(phba, mbox); 16974 if (shdr_status || shdr_add_status || rc) { 16975 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16976 "6125 POST_SGL_BLOCK mailbox command failed " 16977 "status x%x add_status x%x mbx status x%x\n", 16978 shdr_status, shdr_add_status, rc); 16979 rc = -ENXIO; 16980 } 16981 return rc; 16982 } 16983 16984 /** 16985 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list 16986 * @phba: pointer to lpfc hba data structure. 16987 * @post_nblist: pointer to the nvme buffer list. 16988 * 16989 * This routine walks a list of nvme buffers that was passed in. It attempts 16990 * to construct blocks of nvme buffer sgls which contains contiguous xris and 16991 * uses the non-embedded SGL block post mailbox commands to post to the port. 16992 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use 16993 * embedded SGL post mailbox command for posting. The @post_nblist passed in 16994 * must be local list, thus no lock is needed when manipulate the list. 16995 * 16996 * Returns: 0 = failure, non-zero number of successfully posted buffers. 16997 **/ 16998 int 16999 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, 17000 struct list_head *post_nblist, int sb_count) 17001 { 17002 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 17003 int status, sgl_size; 17004 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; 17005 dma_addr_t pdma_phys_sgl1; 17006 int last_xritag = NO_XRI; 17007 int cur_xritag; 17008 LIST_HEAD(prep_nblist); 17009 LIST_HEAD(blck_nblist); 17010 LIST_HEAD(nvme_nblist); 17011 17012 /* sanity check */ 17013 if (sb_count <= 0) 17014 return -EINVAL; 17015 17016 sgl_size = phba->cfg_sg_dma_buf_size; 17017 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { 17018 list_del_init(&lpfc_ncmd->list); 17019 block_cnt++; 17020 if ((last_xritag != NO_XRI) && 17021 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { 17022 /* a hole in xri block, form a sgl posting block */ 17023 list_splice_init(&prep_nblist, &blck_nblist); 17024 post_cnt = block_cnt - 1; 17025 /* prepare list for next posting block */ 17026 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 17027 block_cnt = 1; 17028 } else { 17029 /* prepare list for next posting block */ 17030 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 17031 /* enough sgls for non-embed sgl mbox command */ 17032 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 17033 list_splice_init(&prep_nblist, &blck_nblist); 17034 post_cnt = block_cnt; 17035 block_cnt = 0; 17036 } 17037 } 17038 num_posting++; 17039 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 17040 17041 /* end of repost sgl list condition for NVME buffers */ 17042 if (num_posting == sb_count) { 17043 if (post_cnt == 0) { 17044 /* last sgl posting block */ 17045 list_splice_init(&prep_nblist, &blck_nblist); 17046 post_cnt = block_cnt; 17047 } else if (block_cnt == 1) { 17048 /* last single sgl with non-contiguous xri */ 17049 if (sgl_size > SGL_PAGE_SIZE) 17050 pdma_phys_sgl1 = 17051 lpfc_ncmd->dma_phys_sgl + 17052 SGL_PAGE_SIZE; 17053 else 17054 pdma_phys_sgl1 = 0; 17055 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 17056 status = lpfc_sli4_post_sgl( 17057 phba, lpfc_ncmd->dma_phys_sgl, 17058 pdma_phys_sgl1, cur_xritag); 17059 if (status) { 17060 /* Post error. Buffer unavailable. */ 17061 lpfc_ncmd->flags |= 17062 LPFC_SBUF_NOT_POSTED; 17063 } else { 17064 /* Post success. Bffer available. */ 17065 lpfc_ncmd->flags &= 17066 ~LPFC_SBUF_NOT_POSTED; 17067 lpfc_ncmd->status = IOSTAT_SUCCESS; 17068 num_posted++; 17069 } 17070 /* success, put on NVME buffer sgl list */ 17071 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 17072 } 17073 } 17074 17075 /* continue until a nembed page worth of sgls */ 17076 if (post_cnt == 0) 17077 continue; 17078 17079 /* post block of NVME buffer list sgls */ 17080 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist, 17081 post_cnt); 17082 17083 /* don't reset xirtag due to hole in xri block */ 17084 if (block_cnt == 0) 17085 last_xritag = NO_XRI; 17086 17087 /* reset NVME buffer post count for next round of posting */ 17088 post_cnt = 0; 17089 17090 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ 17091 while (!list_empty(&blck_nblist)) { 17092 list_remove_head(&blck_nblist, lpfc_ncmd, 17093 struct lpfc_io_buf, list); 17094 if (status) { 17095 /* Post error. Mark buffer unavailable. */ 17096 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; 17097 } else { 17098 /* Post success, Mark buffer available. */ 17099 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; 17100 lpfc_ncmd->status = IOSTAT_SUCCESS; 17101 num_posted++; 17102 } 17103 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 17104 } 17105 } 17106 /* Push NVME buffers with sgl posted to the available list */ 17107 lpfc_io_buf_replenish(phba, &nvme_nblist); 17108 17109 return num_posted; 17110 } 17111 17112 /** 17113 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 17114 * @phba: pointer to lpfc_hba struct that the frame was received on 17115 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 17116 * 17117 * This function checks the fields in the @fc_hdr to see if the FC frame is a 17118 * valid type of frame that the LPFC driver will handle. This function will 17119 * return a zero if the frame is a valid frame or a non zero value when the 17120 * frame does not pass the check. 17121 **/ 17122 static int 17123 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 17124 { 17125 /* make rctl_names static to save stack space */ 17126 struct fc_vft_header *fc_vft_hdr; 17127 uint32_t *header = (uint32_t *) fc_hdr; 17128 17129 #define FC_RCTL_MDS_DIAGS 0xF4 17130 17131 switch (fc_hdr->fh_r_ctl) { 17132 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 17133 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 17134 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 17135 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 17136 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 17137 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 17138 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 17139 case FC_RCTL_DD_CMD_STATUS: /* command status */ 17140 case FC_RCTL_ELS_REQ: /* extended link services request */ 17141 case FC_RCTL_ELS_REP: /* extended link services reply */ 17142 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 17143 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 17144 case FC_RCTL_BA_NOP: /* basic link service NOP */ 17145 case FC_RCTL_BA_ABTS: /* basic link service abort */ 17146 case FC_RCTL_BA_RMC: /* remove connection */ 17147 case FC_RCTL_BA_ACC: /* basic accept */ 17148 case FC_RCTL_BA_RJT: /* basic reject */ 17149 case FC_RCTL_BA_PRMT: 17150 case FC_RCTL_ACK_1: /* acknowledge_1 */ 17151 case FC_RCTL_ACK_0: /* acknowledge_0 */ 17152 case FC_RCTL_P_RJT: /* port reject */ 17153 case FC_RCTL_F_RJT: /* fabric reject */ 17154 case FC_RCTL_P_BSY: /* port busy */ 17155 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 17156 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 17157 case FC_RCTL_LCR: /* link credit reset */ 17158 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ 17159 case FC_RCTL_END: /* end */ 17160 break; 17161 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 17162 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 17163 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 17164 return lpfc_fc_frame_check(phba, fc_hdr); 17165 default: 17166 goto drop; 17167 } 17168 17169 switch (fc_hdr->fh_type) { 17170 case FC_TYPE_BLS: 17171 case FC_TYPE_ELS: 17172 case FC_TYPE_FCP: 17173 case FC_TYPE_CT: 17174 case FC_TYPE_NVME: 17175 break; 17176 case FC_TYPE_IP: 17177 case FC_TYPE_ILS: 17178 default: 17179 goto drop; 17180 } 17181 17182 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 17183 "2538 Received frame rctl:x%x, type:x%x, " 17184 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 17185 fc_hdr->fh_r_ctl, fc_hdr->fh_type, 17186 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 17187 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 17188 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 17189 be32_to_cpu(header[6])); 17190 return 0; 17191 drop: 17192 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 17193 "2539 Dropped frame rctl:x%x type:x%x\n", 17194 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17195 return 1; 17196 } 17197 17198 /** 17199 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 17200 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 17201 * 17202 * This function processes the FC header to retrieve the VFI from the VF 17203 * header, if one exists. This function will return the VFI if one exists 17204 * or 0 if no VSAN Header exists. 17205 **/ 17206 static uint32_t 17207 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 17208 { 17209 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 17210 17211 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 17212 return 0; 17213 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 17214 } 17215 17216 /** 17217 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 17218 * @phba: Pointer to the HBA structure to search for the vport on 17219 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 17220 * @fcfi: The FC Fabric ID that the frame came from 17221 * 17222 * This function searches the @phba for a vport that matches the content of the 17223 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 17224 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 17225 * returns the matching vport pointer or NULL if unable to match frame to a 17226 * vport. 17227 **/ 17228 static struct lpfc_vport * 17229 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 17230 uint16_t fcfi, uint32_t did) 17231 { 17232 struct lpfc_vport **vports; 17233 struct lpfc_vport *vport = NULL; 17234 int i; 17235 17236 if (did == Fabric_DID) 17237 return phba->pport; 17238 if ((phba->pport->fc_flag & FC_PT2PT) && 17239 !(phba->link_state == LPFC_HBA_READY)) 17240 return phba->pport; 17241 17242 vports = lpfc_create_vport_work_array(phba); 17243 if (vports != NULL) { 17244 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 17245 if (phba->fcf.fcfi == fcfi && 17246 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 17247 vports[i]->fc_myDID == did) { 17248 vport = vports[i]; 17249 break; 17250 } 17251 } 17252 } 17253 lpfc_destroy_vport_work_array(phba, vports); 17254 return vport; 17255 } 17256 17257 /** 17258 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 17259 * @vport: The vport to work on. 17260 * 17261 * This function updates the receive sequence time stamp for this vport. The 17262 * receive sequence time stamp indicates the time that the last frame of the 17263 * the sequence that has been idle for the longest amount of time was received. 17264 * the driver uses this time stamp to indicate if any received sequences have 17265 * timed out. 17266 **/ 17267 static void 17268 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 17269 { 17270 struct lpfc_dmabuf *h_buf; 17271 struct hbq_dmabuf *dmabuf = NULL; 17272 17273 /* get the oldest sequence on the rcv list */ 17274 h_buf = list_get_first(&vport->rcv_buffer_list, 17275 struct lpfc_dmabuf, list); 17276 if (!h_buf) 17277 return; 17278 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17279 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 17280 } 17281 17282 /** 17283 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 17284 * @vport: The vport that the received sequences were sent to. 17285 * 17286 * This function cleans up all outstanding received sequences. This is called 17287 * by the driver when a link event or user action invalidates all the received 17288 * sequences. 17289 **/ 17290 void 17291 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 17292 { 17293 struct lpfc_dmabuf *h_buf, *hnext; 17294 struct lpfc_dmabuf *d_buf, *dnext; 17295 struct hbq_dmabuf *dmabuf = NULL; 17296 17297 /* start with the oldest sequence on the rcv list */ 17298 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17299 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17300 list_del_init(&dmabuf->hbuf.list); 17301 list_for_each_entry_safe(d_buf, dnext, 17302 &dmabuf->dbuf.list, list) { 17303 list_del_init(&d_buf->list); 17304 lpfc_in_buf_free(vport->phba, d_buf); 17305 } 17306 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17307 } 17308 } 17309 17310 /** 17311 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 17312 * @vport: The vport that the received sequences were sent to. 17313 * 17314 * This function determines whether any received sequences have timed out by 17315 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 17316 * indicates that there is at least one timed out sequence this routine will 17317 * go through the received sequences one at a time from most inactive to most 17318 * active to determine which ones need to be cleaned up. Once it has determined 17319 * that a sequence needs to be cleaned up it will simply free up the resources 17320 * without sending an abort. 17321 **/ 17322 void 17323 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 17324 { 17325 struct lpfc_dmabuf *h_buf, *hnext; 17326 struct lpfc_dmabuf *d_buf, *dnext; 17327 struct hbq_dmabuf *dmabuf = NULL; 17328 unsigned long timeout; 17329 int abort_count = 0; 17330 17331 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17332 vport->rcv_buffer_time_stamp); 17333 if (list_empty(&vport->rcv_buffer_list) || 17334 time_before(jiffies, timeout)) 17335 return; 17336 /* start with the oldest sequence on the rcv list */ 17337 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17338 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17339 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17340 dmabuf->time_stamp); 17341 if (time_before(jiffies, timeout)) 17342 break; 17343 abort_count++; 17344 list_del_init(&dmabuf->hbuf.list); 17345 list_for_each_entry_safe(d_buf, dnext, 17346 &dmabuf->dbuf.list, list) { 17347 list_del_init(&d_buf->list); 17348 lpfc_in_buf_free(vport->phba, d_buf); 17349 } 17350 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17351 } 17352 if (abort_count) 17353 lpfc_update_rcv_time_stamp(vport); 17354 } 17355 17356 /** 17357 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 17358 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 17359 * 17360 * This function searches through the existing incomplete sequences that have 17361 * been sent to this @vport. If the frame matches one of the incomplete 17362 * sequences then the dbuf in the @dmabuf is added to the list of frames that 17363 * make up that sequence. If no sequence is found that matches this frame then 17364 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 17365 * This function returns a pointer to the first dmabuf in the sequence list that 17366 * the frame was linked to. 17367 **/ 17368 static struct hbq_dmabuf * 17369 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17370 { 17371 struct fc_frame_header *new_hdr; 17372 struct fc_frame_header *temp_hdr; 17373 struct lpfc_dmabuf *d_buf; 17374 struct lpfc_dmabuf *h_buf; 17375 struct hbq_dmabuf *seq_dmabuf = NULL; 17376 struct hbq_dmabuf *temp_dmabuf = NULL; 17377 uint8_t found = 0; 17378 17379 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17380 dmabuf->time_stamp = jiffies; 17381 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17382 17383 /* Use the hdr_buf to find the sequence that this frame belongs to */ 17384 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17385 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17386 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17387 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17388 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17389 continue; 17390 /* found a pending sequence that matches this frame */ 17391 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17392 break; 17393 } 17394 if (!seq_dmabuf) { 17395 /* 17396 * This indicates first frame received for this sequence. 17397 * Queue the buffer on the vport's rcv_buffer_list. 17398 */ 17399 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17400 lpfc_update_rcv_time_stamp(vport); 17401 return dmabuf; 17402 } 17403 temp_hdr = seq_dmabuf->hbuf.virt; 17404 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 17405 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17406 list_del_init(&seq_dmabuf->hbuf.list); 17407 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17408 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17409 lpfc_update_rcv_time_stamp(vport); 17410 return dmabuf; 17411 } 17412 /* move this sequence to the tail to indicate a young sequence */ 17413 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 17414 seq_dmabuf->time_stamp = jiffies; 17415 lpfc_update_rcv_time_stamp(vport); 17416 if (list_empty(&seq_dmabuf->dbuf.list)) { 17417 temp_hdr = dmabuf->hbuf.virt; 17418 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17419 return seq_dmabuf; 17420 } 17421 /* find the correct place in the sequence to insert this frame */ 17422 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); 17423 while (!found) { 17424 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17425 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 17426 /* 17427 * If the frame's sequence count is greater than the frame on 17428 * the list then insert the frame right after this frame 17429 */ 17430 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 17431 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17432 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 17433 found = 1; 17434 break; 17435 } 17436 17437 if (&d_buf->list == &seq_dmabuf->dbuf.list) 17438 break; 17439 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); 17440 } 17441 17442 if (found) 17443 return seq_dmabuf; 17444 return NULL; 17445 } 17446 17447 /** 17448 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 17449 * @vport: pointer to a vitural port 17450 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17451 * 17452 * This function tries to abort from the partially assembed sequence, described 17453 * by the information from basic abbort @dmabuf. It checks to see whether such 17454 * partially assembled sequence held by the driver. If so, it shall free up all 17455 * the frames from the partially assembled sequence. 17456 * 17457 * Return 17458 * true -- if there is matching partially assembled sequence present and all 17459 * the frames freed with the sequence; 17460 * false -- if there is no matching partially assembled sequence present so 17461 * nothing got aborted in the lower layer driver 17462 **/ 17463 static bool 17464 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 17465 struct hbq_dmabuf *dmabuf) 17466 { 17467 struct fc_frame_header *new_hdr; 17468 struct fc_frame_header *temp_hdr; 17469 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 17470 struct hbq_dmabuf *seq_dmabuf = NULL; 17471 17472 /* Use the hdr_buf to find the sequence that matches this frame */ 17473 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17474 INIT_LIST_HEAD(&dmabuf->hbuf.list); 17475 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17476 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17477 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17478 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17479 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17480 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17481 continue; 17482 /* found a pending sequence that matches this frame */ 17483 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17484 break; 17485 } 17486 17487 /* Free up all the frames from the partially assembled sequence */ 17488 if (seq_dmabuf) { 17489 list_for_each_entry_safe(d_buf, n_buf, 17490 &seq_dmabuf->dbuf.list, list) { 17491 list_del_init(&d_buf->list); 17492 lpfc_in_buf_free(vport->phba, d_buf); 17493 } 17494 return true; 17495 } 17496 return false; 17497 } 17498 17499 /** 17500 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 17501 * @vport: pointer to a vitural port 17502 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17503 * 17504 * This function tries to abort from the assembed sequence from upper level 17505 * protocol, described by the information from basic abbort @dmabuf. It 17506 * checks to see whether such pending context exists at upper level protocol. 17507 * If so, it shall clean up the pending context. 17508 * 17509 * Return 17510 * true -- if there is matching pending context of the sequence cleaned 17511 * at ulp; 17512 * false -- if there is no matching pending context of the sequence present 17513 * at ulp. 17514 **/ 17515 static bool 17516 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17517 { 17518 struct lpfc_hba *phba = vport->phba; 17519 int handled; 17520 17521 /* Accepting abort at ulp with SLI4 only */ 17522 if (phba->sli_rev < LPFC_SLI_REV4) 17523 return false; 17524 17525 /* Register all caring upper level protocols to attend abort */ 17526 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 17527 if (handled) 17528 return true; 17529 17530 return false; 17531 } 17532 17533 /** 17534 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 17535 * @phba: Pointer to HBA context object. 17536 * @cmd_iocbq: pointer to the command iocbq structure. 17537 * @rsp_iocbq: pointer to the response iocbq structure. 17538 * 17539 * This function handles the sequence abort response iocb command complete 17540 * event. It properly releases the memory allocated to the sequence abort 17541 * accept iocb. 17542 **/ 17543 static void 17544 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 17545 struct lpfc_iocbq *cmd_iocbq, 17546 struct lpfc_iocbq *rsp_iocbq) 17547 { 17548 struct lpfc_nodelist *ndlp; 17549 17550 if (cmd_iocbq) { 17551 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; 17552 lpfc_nlp_put(ndlp); 17553 lpfc_nlp_not_used(ndlp); 17554 lpfc_sli_release_iocbq(phba, cmd_iocbq); 17555 } 17556 17557 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 17558 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 17559 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17560 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 17561 rsp_iocbq->iocb.ulpStatus, 17562 rsp_iocbq->iocb.un.ulpWord[4]); 17563 } 17564 17565 /** 17566 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 17567 * @phba: Pointer to HBA context object. 17568 * @xri: xri id in transaction. 17569 * 17570 * This function validates the xri maps to the known range of XRIs allocated an 17571 * used by the driver. 17572 **/ 17573 uint16_t 17574 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 17575 uint16_t xri) 17576 { 17577 uint16_t i; 17578 17579 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 17580 if (xri == phba->sli4_hba.xri_ids[i]) 17581 return i; 17582 } 17583 return NO_XRI; 17584 } 17585 17586 /** 17587 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 17588 * @phba: Pointer to HBA context object. 17589 * @fc_hdr: pointer to a FC frame header. 17590 * 17591 * This function sends a basic response to a previous unsol sequence abort 17592 * event after aborting the sequence handling. 17593 **/ 17594 void 17595 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 17596 struct fc_frame_header *fc_hdr, bool aborted) 17597 { 17598 struct lpfc_hba *phba = vport->phba; 17599 struct lpfc_iocbq *ctiocb = NULL; 17600 struct lpfc_nodelist *ndlp; 17601 uint16_t oxid, rxid, xri, lxri; 17602 uint32_t sid, fctl; 17603 IOCB_t *icmd; 17604 int rc; 17605 17606 if (!lpfc_is_link_up(phba)) 17607 return; 17608 17609 sid = sli4_sid_from_fc_hdr(fc_hdr); 17610 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 17611 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 17612 17613 ndlp = lpfc_findnode_did(vport, sid); 17614 if (!ndlp) { 17615 ndlp = lpfc_nlp_init(vport, sid); 17616 if (!ndlp) { 17617 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17618 "1268 Failed to allocate ndlp for " 17619 "oxid:x%x SID:x%x\n", oxid, sid); 17620 return; 17621 } 17622 /* Put ndlp onto pport node list */ 17623 lpfc_enqueue_node(vport, ndlp); 17624 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 17625 /* re-setup ndlp without removing from node list */ 17626 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 17627 if (!ndlp) { 17628 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17629 "3275 Failed to active ndlp found " 17630 "for oxid:x%x SID:x%x\n", oxid, sid); 17631 return; 17632 } 17633 } 17634 17635 /* Allocate buffer for rsp iocb */ 17636 ctiocb = lpfc_sli_get_iocbq(phba); 17637 if (!ctiocb) 17638 return; 17639 17640 /* Extract the F_CTL field from FC_HDR */ 17641 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 17642 17643 icmd = &ctiocb->iocb; 17644 icmd->un.xseq64.bdl.bdeSize = 0; 17645 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 17646 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 17647 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 17648 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 17649 17650 /* Fill in the rest of iocb fields */ 17651 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 17652 icmd->ulpBdeCount = 0; 17653 icmd->ulpLe = 1; 17654 icmd->ulpClass = CLASS3; 17655 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 17656 ctiocb->context1 = lpfc_nlp_get(ndlp); 17657 17658 ctiocb->vport = phba->pport; 17659 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 17660 ctiocb->sli4_lxritag = NO_XRI; 17661 ctiocb->sli4_xritag = NO_XRI; 17662 17663 if (fctl & FC_FC_EX_CTX) 17664 /* Exchange responder sent the abort so we 17665 * own the oxid. 17666 */ 17667 xri = oxid; 17668 else 17669 xri = rxid; 17670 lxri = lpfc_sli4_xri_inrange(phba, xri); 17671 if (lxri != NO_XRI) 17672 lpfc_set_rrq_active(phba, ndlp, lxri, 17673 (xri == oxid) ? rxid : oxid, 0); 17674 /* For BA_ABTS from exchange responder, if the logical xri with 17675 * the oxid maps to the FCP XRI range, the port no longer has 17676 * that exchange context, send a BLS_RJT. Override the IOCB for 17677 * a BA_RJT. 17678 */ 17679 if ((fctl & FC_FC_EX_CTX) && 17680 (lxri > lpfc_sli4_get_iocb_cnt(phba))) { 17681 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17682 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17683 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17684 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17685 } 17686 17687 /* If BA_ABTS failed to abort a partially assembled receive sequence, 17688 * the driver no longer has that exchange, send a BLS_RJT. Override 17689 * the IOCB for a BA_RJT. 17690 */ 17691 if (aborted == false) { 17692 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17693 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17694 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17695 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17696 } 17697 17698 if (fctl & FC_FC_EX_CTX) { 17699 /* ABTS sent by responder to CT exchange, construction 17700 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 17701 * field and RX_ID from ABTS for RX_ID field. 17702 */ 17703 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 17704 } else { 17705 /* ABTS sent by initiator to CT exchange, construction 17706 * of BA_ACC will need to allocate a new XRI as for the 17707 * XRI_TAG field. 17708 */ 17709 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 17710 } 17711 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 17712 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 17713 17714 /* Xmit CT abts response on exchange <xid> */ 17715 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 17716 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 17717 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 17718 17719 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 17720 if (rc == IOCB_ERROR) { 17721 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 17722 "2925 Failed to issue CT ABTS RSP x%x on " 17723 "xri x%x, Data x%x\n", 17724 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 17725 phba->link_state); 17726 lpfc_nlp_put(ndlp); 17727 ctiocb->context1 = NULL; 17728 lpfc_sli_release_iocbq(phba, ctiocb); 17729 } 17730 } 17731 17732 /** 17733 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 17734 * @vport: Pointer to the vport on which this sequence was received 17735 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17736 * 17737 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 17738 * receive sequence is only partially assembed by the driver, it shall abort 17739 * the partially assembled frames for the sequence. Otherwise, if the 17740 * unsolicited receive sequence has been completely assembled and passed to 17741 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 17742 * unsolicited sequence has been aborted. After that, it will issue a basic 17743 * accept to accept the abort. 17744 **/ 17745 static void 17746 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 17747 struct hbq_dmabuf *dmabuf) 17748 { 17749 struct lpfc_hba *phba = vport->phba; 17750 struct fc_frame_header fc_hdr; 17751 uint32_t fctl; 17752 bool aborted; 17753 17754 /* Make a copy of fc_hdr before the dmabuf being released */ 17755 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 17756 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 17757 17758 if (fctl & FC_FC_EX_CTX) { 17759 /* ABTS by responder to exchange, no cleanup needed */ 17760 aborted = true; 17761 } else { 17762 /* ABTS by initiator to exchange, need to do cleanup */ 17763 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 17764 if (aborted == false) 17765 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 17766 } 17767 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17768 17769 if (phba->nvmet_support) { 17770 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); 17771 return; 17772 } 17773 17774 /* Respond with BA_ACC or BA_RJT accordingly */ 17775 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 17776 } 17777 17778 /** 17779 * lpfc_seq_complete - Indicates if a sequence is complete 17780 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17781 * 17782 * This function checks the sequence, starting with the frame described by 17783 * @dmabuf, to see if all the frames associated with this sequence are present. 17784 * the frames associated with this sequence are linked to the @dmabuf using the 17785 * dbuf list. This function looks for two major things. 1) That the first frame 17786 * has a sequence count of zero. 2) There is a frame with last frame of sequence 17787 * set. 3) That there are no holes in the sequence count. The function will 17788 * return 1 when the sequence is complete, otherwise it will return 0. 17789 **/ 17790 static int 17791 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 17792 { 17793 struct fc_frame_header *hdr; 17794 struct lpfc_dmabuf *d_buf; 17795 struct hbq_dmabuf *seq_dmabuf; 17796 uint32_t fctl; 17797 int seq_count = 0; 17798 17799 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17800 /* make sure first fame of sequence has a sequence count of zero */ 17801 if (hdr->fh_seq_cnt != seq_count) 17802 return 0; 17803 fctl = (hdr->fh_f_ctl[0] << 16 | 17804 hdr->fh_f_ctl[1] << 8 | 17805 hdr->fh_f_ctl[2]); 17806 /* If last frame of sequence we can return success. */ 17807 if (fctl & FC_FC_END_SEQ) 17808 return 1; 17809 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 17810 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17811 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17812 /* If there is a hole in the sequence count then fail. */ 17813 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 17814 return 0; 17815 fctl = (hdr->fh_f_ctl[0] << 16 | 17816 hdr->fh_f_ctl[1] << 8 | 17817 hdr->fh_f_ctl[2]); 17818 /* If last frame of sequence we can return success. */ 17819 if (fctl & FC_FC_END_SEQ) 17820 return 1; 17821 } 17822 return 0; 17823 } 17824 17825 /** 17826 * lpfc_prep_seq - Prep sequence for ULP processing 17827 * @vport: Pointer to the vport on which this sequence was received 17828 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17829 * 17830 * This function takes a sequence, described by a list of frames, and creates 17831 * a list of iocbq structures to describe the sequence. This iocbq list will be 17832 * used to issue to the generic unsolicited sequence handler. This routine 17833 * returns a pointer to the first iocbq in the list. If the function is unable 17834 * to allocate an iocbq then it throw out the received frames that were not 17835 * able to be described and return a pointer to the first iocbq. If unable to 17836 * allocate any iocbqs (including the first) this function will return NULL. 17837 **/ 17838 static struct lpfc_iocbq * 17839 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 17840 { 17841 struct hbq_dmabuf *hbq_buf; 17842 struct lpfc_dmabuf *d_buf, *n_buf; 17843 struct lpfc_iocbq *first_iocbq, *iocbq; 17844 struct fc_frame_header *fc_hdr; 17845 uint32_t sid; 17846 uint32_t len, tot_len; 17847 struct ulp_bde64 *pbde; 17848 17849 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17850 /* remove from receive buffer list */ 17851 list_del_init(&seq_dmabuf->hbuf.list); 17852 lpfc_update_rcv_time_stamp(vport); 17853 /* get the Remote Port's SID */ 17854 sid = sli4_sid_from_fc_hdr(fc_hdr); 17855 tot_len = 0; 17856 /* Get an iocbq struct to fill in. */ 17857 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 17858 if (first_iocbq) { 17859 /* Initialize the first IOCB. */ 17860 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 17861 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 17862 first_iocbq->vport = vport; 17863 17864 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 17865 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 17866 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 17867 first_iocbq->iocb.un.rcvels.parmRo = 17868 sli4_did_from_fc_hdr(fc_hdr); 17869 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 17870 } else 17871 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 17872 first_iocbq->iocb.ulpContext = NO_XRI; 17873 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 17874 be16_to_cpu(fc_hdr->fh_ox_id); 17875 /* iocbq is prepped for internal consumption. Physical vpi. */ 17876 first_iocbq->iocb.unsli3.rcvsli3.vpi = 17877 vport->phba->vpi_ids[vport->vpi]; 17878 /* put the first buffer into the first IOCBq */ 17879 tot_len = bf_get(lpfc_rcqe_length, 17880 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 17881 17882 first_iocbq->context2 = &seq_dmabuf->dbuf; 17883 first_iocbq->context3 = NULL; 17884 first_iocbq->iocb.ulpBdeCount = 1; 17885 if (tot_len > LPFC_DATA_BUF_SIZE) 17886 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17887 LPFC_DATA_BUF_SIZE; 17888 else 17889 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; 17890 17891 first_iocbq->iocb.un.rcvels.remoteID = sid; 17892 17893 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17894 } 17895 iocbq = first_iocbq; 17896 /* 17897 * Each IOCBq can have two Buffers assigned, so go through the list 17898 * of buffers for this sequence and save two buffers in each IOCBq 17899 */ 17900 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 17901 if (!iocbq) { 17902 lpfc_in_buf_free(vport->phba, d_buf); 17903 continue; 17904 } 17905 if (!iocbq->context3) { 17906 iocbq->context3 = d_buf; 17907 iocbq->iocb.ulpBdeCount++; 17908 /* We need to get the size out of the right CQE */ 17909 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17910 len = bf_get(lpfc_rcqe_length, 17911 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17912 pbde = (struct ulp_bde64 *) 17913 &iocbq->iocb.unsli3.sli3Words[4]; 17914 if (len > LPFC_DATA_BUF_SIZE) 17915 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 17916 else 17917 pbde->tus.f.bdeSize = len; 17918 17919 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 17920 tot_len += len; 17921 } else { 17922 iocbq = lpfc_sli_get_iocbq(vport->phba); 17923 if (!iocbq) { 17924 if (first_iocbq) { 17925 first_iocbq->iocb.ulpStatus = 17926 IOSTAT_FCP_RSP_ERROR; 17927 first_iocbq->iocb.un.ulpWord[4] = 17928 IOERR_NO_RESOURCES; 17929 } 17930 lpfc_in_buf_free(vport->phba, d_buf); 17931 continue; 17932 } 17933 /* We need to get the size out of the right CQE */ 17934 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17935 len = bf_get(lpfc_rcqe_length, 17936 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17937 iocbq->context2 = d_buf; 17938 iocbq->context3 = NULL; 17939 iocbq->iocb.ulpBdeCount = 1; 17940 if (len > LPFC_DATA_BUF_SIZE) 17941 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17942 LPFC_DATA_BUF_SIZE; 17943 else 17944 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; 17945 17946 tot_len += len; 17947 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17948 17949 iocbq->iocb.un.rcvels.remoteID = sid; 17950 list_add_tail(&iocbq->list, &first_iocbq->list); 17951 } 17952 } 17953 return first_iocbq; 17954 } 17955 17956 static void 17957 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 17958 struct hbq_dmabuf *seq_dmabuf) 17959 { 17960 struct fc_frame_header *fc_hdr; 17961 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 17962 struct lpfc_hba *phba = vport->phba; 17963 17964 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17965 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 17966 if (!iocbq) { 17967 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17968 "2707 Ring %d handler: Failed to allocate " 17969 "iocb Rctl x%x Type x%x received\n", 17970 LPFC_ELS_RING, 17971 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17972 return; 17973 } 17974 if (!lpfc_complete_unsol_iocb(phba, 17975 phba->sli4_hba.els_wq->pring, 17976 iocbq, fc_hdr->fh_r_ctl, 17977 fc_hdr->fh_type)) 17978 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17979 "2540 Ring %d handler: unexpected Rctl " 17980 "x%x Type x%x received\n", 17981 LPFC_ELS_RING, 17982 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17983 17984 /* Free iocb created in lpfc_prep_seq */ 17985 list_for_each_entry_safe(curr_iocb, next_iocb, 17986 &iocbq->list, list) { 17987 list_del_init(&curr_iocb->list); 17988 lpfc_sli_release_iocbq(phba, curr_iocb); 17989 } 17990 lpfc_sli_release_iocbq(phba, iocbq); 17991 } 17992 17993 static void 17994 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 17995 struct lpfc_iocbq *rspiocb) 17996 { 17997 struct lpfc_dmabuf *pcmd = cmdiocb->context2; 17998 17999 if (pcmd && pcmd->virt) 18000 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 18001 kfree(pcmd); 18002 lpfc_sli_release_iocbq(phba, cmdiocb); 18003 lpfc_drain_txq(phba); 18004 } 18005 18006 static void 18007 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 18008 struct hbq_dmabuf *dmabuf) 18009 { 18010 struct fc_frame_header *fc_hdr; 18011 struct lpfc_hba *phba = vport->phba; 18012 struct lpfc_iocbq *iocbq = NULL; 18013 union lpfc_wqe *wqe; 18014 struct lpfc_dmabuf *pcmd = NULL; 18015 uint32_t frame_len; 18016 int rc; 18017 unsigned long iflags; 18018 18019 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 18020 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); 18021 18022 /* Send the received frame back */ 18023 iocbq = lpfc_sli_get_iocbq(phba); 18024 if (!iocbq) { 18025 /* Queue cq event and wakeup worker thread to process it */ 18026 spin_lock_irqsave(&phba->hbalock, iflags); 18027 list_add_tail(&dmabuf->cq_event.list, 18028 &phba->sli4_hba.sp_queue_event); 18029 phba->hba_flag |= HBA_SP_QUEUE_EVT; 18030 spin_unlock_irqrestore(&phba->hbalock, iflags); 18031 lpfc_worker_wake_up(phba); 18032 return; 18033 } 18034 18035 /* Allocate buffer for command payload */ 18036 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 18037 if (pcmd) 18038 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 18039 &pcmd->phys); 18040 if (!pcmd || !pcmd->virt) 18041 goto exit; 18042 18043 INIT_LIST_HEAD(&pcmd->list); 18044 18045 /* copyin the payload */ 18046 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); 18047 18048 /* fill in BDE's for command */ 18049 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys); 18050 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys); 18051 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 18052 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len; 18053 18054 iocbq->context2 = pcmd; 18055 iocbq->vport = vport; 18056 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 18057 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 18058 18059 /* 18060 * Setup rest of the iocb as though it were a WQE 18061 * Build the SEND_FRAME WQE 18062 */ 18063 wqe = (union lpfc_wqe *)&iocbq->iocb; 18064 18065 wqe->send_frame.frame_len = frame_len; 18066 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr)); 18067 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1)); 18068 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2)); 18069 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3)); 18070 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4)); 18071 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5)); 18072 18073 iocbq->iocb.ulpCommand = CMD_SEND_FRAME; 18074 iocbq->iocb.ulpLe = 1; 18075 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl; 18076 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); 18077 if (rc == IOCB_ERROR) 18078 goto exit; 18079 18080 lpfc_in_buf_free(phba, &dmabuf->dbuf); 18081 return; 18082 18083 exit: 18084 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 18085 "2023 Unable to process MDS loopback frame\n"); 18086 if (pcmd && pcmd->virt) 18087 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 18088 kfree(pcmd); 18089 if (iocbq) 18090 lpfc_sli_release_iocbq(phba, iocbq); 18091 lpfc_in_buf_free(phba, &dmabuf->dbuf); 18092 } 18093 18094 /** 18095 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 18096 * @phba: Pointer to HBA context object. 18097 * 18098 * This function is called with no lock held. This function processes all 18099 * the received buffers and gives it to upper layers when a received buffer 18100 * indicates that it is the final frame in the sequence. The interrupt 18101 * service routine processes received buffers at interrupt contexts. 18102 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 18103 * appropriate receive function when the final frame in a sequence is received. 18104 **/ 18105 void 18106 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 18107 struct hbq_dmabuf *dmabuf) 18108 { 18109 struct hbq_dmabuf *seq_dmabuf; 18110 struct fc_frame_header *fc_hdr; 18111 struct lpfc_vport *vport; 18112 uint32_t fcfi; 18113 uint32_t did; 18114 18115 /* Process each received buffer */ 18116 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 18117 18118 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 18119 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 18120 vport = phba->pport; 18121 /* Handle MDS Loopback frames */ 18122 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 18123 return; 18124 } 18125 18126 /* check to see if this a valid type of frame */ 18127 if (lpfc_fc_frame_check(phba, fc_hdr)) { 18128 lpfc_in_buf_free(phba, &dmabuf->dbuf); 18129 return; 18130 } 18131 18132 if ((bf_get(lpfc_cqe_code, 18133 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 18134 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 18135 &dmabuf->cq_event.cqe.rcqe_cmpl); 18136 else 18137 fcfi = bf_get(lpfc_rcqe_fcf_id, 18138 &dmabuf->cq_event.cqe.rcqe_cmpl); 18139 18140 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { 18141 vport = phba->pport; 18142 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 18143 "2023 MDS Loopback %d bytes\n", 18144 bf_get(lpfc_rcqe_length, 18145 &dmabuf->cq_event.cqe.rcqe_cmpl)); 18146 /* Handle MDS Loopback frames */ 18147 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 18148 return; 18149 } 18150 18151 /* d_id this frame is directed to */ 18152 did = sli4_did_from_fc_hdr(fc_hdr); 18153 18154 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); 18155 if (!vport) { 18156 /* throw out the frame */ 18157 lpfc_in_buf_free(phba, &dmabuf->dbuf); 18158 return; 18159 } 18160 18161 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 18162 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 18163 (did != Fabric_DID)) { 18164 /* 18165 * Throw out the frame if we are not pt2pt. 18166 * The pt2pt protocol allows for discovery frames 18167 * to be received without a registered VPI. 18168 */ 18169 if (!(vport->fc_flag & FC_PT2PT) || 18170 (phba->link_state == LPFC_HBA_READY)) { 18171 lpfc_in_buf_free(phba, &dmabuf->dbuf); 18172 return; 18173 } 18174 } 18175 18176 /* Handle the basic abort sequence (BA_ABTS) event */ 18177 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 18178 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 18179 return; 18180 } 18181 18182 /* Link this frame */ 18183 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 18184 if (!seq_dmabuf) { 18185 /* unable to add frame to vport - throw it out */ 18186 lpfc_in_buf_free(phba, &dmabuf->dbuf); 18187 return; 18188 } 18189 /* If not last frame in sequence continue processing frames. */ 18190 if (!lpfc_seq_complete(seq_dmabuf)) 18191 return; 18192 18193 /* Send the complete sequence to the upper layer protocol */ 18194 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 18195 } 18196 18197 /** 18198 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 18199 * @phba: pointer to lpfc hba data structure. 18200 * 18201 * This routine is invoked to post rpi header templates to the 18202 * HBA consistent with the SLI-4 interface spec. This routine 18203 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 18204 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 18205 * 18206 * This routine does not require any locks. It's usage is expected 18207 * to be driver load or reset recovery when the driver is 18208 * sequential. 18209 * 18210 * Return codes 18211 * 0 - successful 18212 * -EIO - The mailbox failed to complete successfully. 18213 * When this error occurs, the driver is not guaranteed 18214 * to have any rpi regions posted to the device and 18215 * must either attempt to repost the regions or take a 18216 * fatal error. 18217 **/ 18218 int 18219 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 18220 { 18221 struct lpfc_rpi_hdr *rpi_page; 18222 uint32_t rc = 0; 18223 uint16_t lrpi = 0; 18224 18225 /* SLI4 ports that support extents do not require RPI headers. */ 18226 if (!phba->sli4_hba.rpi_hdrs_in_use) 18227 goto exit; 18228 if (phba->sli4_hba.extents_in_use) 18229 return -EIO; 18230 18231 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 18232 /* 18233 * Assign the rpi headers a physical rpi only if the driver 18234 * has not initialized those resources. A port reset only 18235 * needs the headers posted. 18236 */ 18237 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 18238 LPFC_RPI_RSRC_RDY) 18239 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 18240 18241 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 18242 if (rc != MBX_SUCCESS) { 18243 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18244 "2008 Error %d posting all rpi " 18245 "headers\n", rc); 18246 rc = -EIO; 18247 break; 18248 } 18249 } 18250 18251 exit: 18252 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 18253 LPFC_RPI_RSRC_RDY); 18254 return rc; 18255 } 18256 18257 /** 18258 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 18259 * @phba: pointer to lpfc hba data structure. 18260 * @rpi_page: pointer to the rpi memory region. 18261 * 18262 * This routine is invoked to post a single rpi header to the 18263 * HBA consistent with the SLI-4 interface spec. This memory region 18264 * maps up to 64 rpi context regions. 18265 * 18266 * Return codes 18267 * 0 - successful 18268 * -ENOMEM - No available memory 18269 * -EIO - The mailbox failed to complete successfully. 18270 **/ 18271 int 18272 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 18273 { 18274 LPFC_MBOXQ_t *mboxq; 18275 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 18276 uint32_t rc = 0; 18277 uint32_t shdr_status, shdr_add_status; 18278 union lpfc_sli4_cfg_shdr *shdr; 18279 18280 /* SLI4 ports that support extents do not require RPI headers. */ 18281 if (!phba->sli4_hba.rpi_hdrs_in_use) 18282 return rc; 18283 if (phba->sli4_hba.extents_in_use) 18284 return -EIO; 18285 18286 /* The port is notified of the header region via a mailbox command. */ 18287 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18288 if (!mboxq) { 18289 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18290 "2001 Unable to allocate memory for issuing " 18291 "SLI_CONFIG_SPECIAL mailbox command\n"); 18292 return -ENOMEM; 18293 } 18294 18295 /* Post all rpi memory regions to the port. */ 18296 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 18297 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18298 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 18299 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 18300 sizeof(struct lpfc_sli4_cfg_mhdr), 18301 LPFC_SLI4_MBX_EMBED); 18302 18303 18304 /* Post the physical rpi to the port for this rpi header. */ 18305 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 18306 rpi_page->start_rpi); 18307 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 18308 hdr_tmpl, rpi_page->page_count); 18309 18310 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 18311 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 18312 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 18313 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 18314 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18315 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18316 if (rc != MBX_TIMEOUT) 18317 mempool_free(mboxq, phba->mbox_mem_pool); 18318 if (shdr_status || shdr_add_status || rc) { 18319 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18320 "2514 POST_RPI_HDR mailbox failed with " 18321 "status x%x add_status x%x, mbx status x%x\n", 18322 shdr_status, shdr_add_status, rc); 18323 rc = -ENXIO; 18324 } else { 18325 /* 18326 * The next_rpi stores the next logical module-64 rpi value used 18327 * to post physical rpis in subsequent rpi postings. 18328 */ 18329 spin_lock_irq(&phba->hbalock); 18330 phba->sli4_hba.next_rpi = rpi_page->next_rpi; 18331 spin_unlock_irq(&phba->hbalock); 18332 } 18333 return rc; 18334 } 18335 18336 /** 18337 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 18338 * @phba: pointer to lpfc hba data structure. 18339 * 18340 * This routine is invoked to post rpi header templates to the 18341 * HBA consistent with the SLI-4 interface spec. This routine 18342 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 18343 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 18344 * 18345 * Returns 18346 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 18347 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 18348 **/ 18349 int 18350 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 18351 { 18352 unsigned long rpi; 18353 uint16_t max_rpi, rpi_limit; 18354 uint16_t rpi_remaining, lrpi = 0; 18355 struct lpfc_rpi_hdr *rpi_hdr; 18356 unsigned long iflag; 18357 18358 /* 18359 * Fetch the next logical rpi. Because this index is logical, 18360 * the driver starts at 0 each time. 18361 */ 18362 spin_lock_irqsave(&phba->hbalock, iflag); 18363 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 18364 rpi_limit = phba->sli4_hba.next_rpi; 18365 18366 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 18367 if (rpi >= rpi_limit) 18368 rpi = LPFC_RPI_ALLOC_ERROR; 18369 else { 18370 set_bit(rpi, phba->sli4_hba.rpi_bmask); 18371 phba->sli4_hba.max_cfg_param.rpi_used++; 18372 phba->sli4_hba.rpi_count++; 18373 } 18374 lpfc_printf_log(phba, KERN_INFO, 18375 LOG_NODE | LOG_DISCOVERY, 18376 "0001 Allocated rpi:x%x max:x%x lim:x%x\n", 18377 (int) rpi, max_rpi, rpi_limit); 18378 18379 /* 18380 * Don't try to allocate more rpi header regions if the device limit 18381 * has been exhausted. 18382 */ 18383 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 18384 (phba->sli4_hba.rpi_count >= max_rpi)) { 18385 spin_unlock_irqrestore(&phba->hbalock, iflag); 18386 return rpi; 18387 } 18388 18389 /* 18390 * RPI header postings are not required for SLI4 ports capable of 18391 * extents. 18392 */ 18393 if (!phba->sli4_hba.rpi_hdrs_in_use) { 18394 spin_unlock_irqrestore(&phba->hbalock, iflag); 18395 return rpi; 18396 } 18397 18398 /* 18399 * If the driver is running low on rpi resources, allocate another 18400 * page now. Note that the next_rpi value is used because 18401 * it represents how many are actually in use whereas max_rpi notes 18402 * how many are supported max by the device. 18403 */ 18404 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 18405 spin_unlock_irqrestore(&phba->hbalock, iflag); 18406 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 18407 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 18408 if (!rpi_hdr) { 18409 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18410 "2002 Error Could not grow rpi " 18411 "count\n"); 18412 } else { 18413 lrpi = rpi_hdr->start_rpi; 18414 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 18415 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 18416 } 18417 } 18418 18419 return rpi; 18420 } 18421 18422 /** 18423 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18424 * @phba: pointer to lpfc hba data structure. 18425 * 18426 * This routine is invoked to release an rpi to the pool of 18427 * available rpis maintained by the driver. 18428 **/ 18429 static void 18430 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18431 { 18432 /* 18433 * if the rpi value indicates a prior unreg has already 18434 * been done, skip the unreg. 18435 */ 18436 if (rpi == LPFC_RPI_ALLOC_ERROR) 18437 return; 18438 18439 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 18440 phba->sli4_hba.rpi_count--; 18441 phba->sli4_hba.max_cfg_param.rpi_used--; 18442 } else { 18443 lpfc_printf_log(phba, KERN_INFO, 18444 LOG_NODE | LOG_DISCOVERY, 18445 "2016 rpi %x not inuse\n", 18446 rpi); 18447 } 18448 } 18449 18450 /** 18451 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18452 * @phba: pointer to lpfc hba data structure. 18453 * 18454 * This routine is invoked to release an rpi to the pool of 18455 * available rpis maintained by the driver. 18456 **/ 18457 void 18458 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18459 { 18460 spin_lock_irq(&phba->hbalock); 18461 __lpfc_sli4_free_rpi(phba, rpi); 18462 spin_unlock_irq(&phba->hbalock); 18463 } 18464 18465 /** 18466 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 18467 * @phba: pointer to lpfc hba data structure. 18468 * 18469 * This routine is invoked to remove the memory region that 18470 * provided rpi via a bitmask. 18471 **/ 18472 void 18473 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 18474 { 18475 kfree(phba->sli4_hba.rpi_bmask); 18476 kfree(phba->sli4_hba.rpi_ids); 18477 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 18478 } 18479 18480 /** 18481 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 18482 * @phba: pointer to lpfc hba data structure. 18483 * 18484 * This routine is invoked to remove the memory region that 18485 * provided rpi via a bitmask. 18486 **/ 18487 int 18488 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 18489 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 18490 { 18491 LPFC_MBOXQ_t *mboxq; 18492 struct lpfc_hba *phba = ndlp->phba; 18493 int rc; 18494 18495 /* The port is notified of the header region via a mailbox command. */ 18496 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18497 if (!mboxq) 18498 return -ENOMEM; 18499 18500 /* Post all rpi memory regions to the port. */ 18501 lpfc_resume_rpi(mboxq, ndlp); 18502 if (cmpl) { 18503 mboxq->mbox_cmpl = cmpl; 18504 mboxq->ctx_buf = arg; 18505 mboxq->ctx_ndlp = ndlp; 18506 } else 18507 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18508 mboxq->vport = ndlp->vport; 18509 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18510 if (rc == MBX_NOT_FINISHED) { 18511 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18512 "2010 Resume RPI Mailbox failed " 18513 "status %d, mbxStatus x%x\n", rc, 18514 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18515 mempool_free(mboxq, phba->mbox_mem_pool); 18516 return -EIO; 18517 } 18518 return 0; 18519 } 18520 18521 /** 18522 * lpfc_sli4_init_vpi - Initialize a vpi with the port 18523 * @vport: Pointer to the vport for which the vpi is being initialized 18524 * 18525 * This routine is invoked to activate a vpi with the port. 18526 * 18527 * Returns: 18528 * 0 success 18529 * -Evalue otherwise 18530 **/ 18531 int 18532 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 18533 { 18534 LPFC_MBOXQ_t *mboxq; 18535 int rc = 0; 18536 int retval = MBX_SUCCESS; 18537 uint32_t mbox_tmo; 18538 struct lpfc_hba *phba = vport->phba; 18539 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18540 if (!mboxq) 18541 return -ENOMEM; 18542 lpfc_init_vpi(phba, mboxq, vport->vpi); 18543 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 18544 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 18545 if (rc != MBX_SUCCESS) { 18546 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 18547 "2022 INIT VPI Mailbox failed " 18548 "status %d, mbxStatus x%x\n", rc, 18549 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18550 retval = -EIO; 18551 } 18552 if (rc != MBX_TIMEOUT) 18553 mempool_free(mboxq, vport->phba->mbox_mem_pool); 18554 18555 return retval; 18556 } 18557 18558 /** 18559 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 18560 * @phba: pointer to lpfc hba data structure. 18561 * @mboxq: Pointer to mailbox object. 18562 * 18563 * This routine is invoked to manually add a single FCF record. The caller 18564 * must pass a completely initialized FCF_Record. This routine takes 18565 * care of the nonembedded mailbox operations. 18566 **/ 18567 static void 18568 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 18569 { 18570 void *virt_addr; 18571 union lpfc_sli4_cfg_shdr *shdr; 18572 uint32_t shdr_status, shdr_add_status; 18573 18574 virt_addr = mboxq->sge_array->addr[0]; 18575 /* The IOCTL status is embedded in the mailbox subheader. */ 18576 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 18577 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18578 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18579 18580 if ((shdr_status || shdr_add_status) && 18581 (shdr_status != STATUS_FCF_IN_USE)) 18582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18583 "2558 ADD_FCF_RECORD mailbox failed with " 18584 "status x%x add_status x%x\n", 18585 shdr_status, shdr_add_status); 18586 18587 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18588 } 18589 18590 /** 18591 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 18592 * @phba: pointer to lpfc hba data structure. 18593 * @fcf_record: pointer to the initialized fcf record to add. 18594 * 18595 * This routine is invoked to manually add a single FCF record. The caller 18596 * must pass a completely initialized FCF_Record. This routine takes 18597 * care of the nonembedded mailbox operations. 18598 **/ 18599 int 18600 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 18601 { 18602 int rc = 0; 18603 LPFC_MBOXQ_t *mboxq; 18604 uint8_t *bytep; 18605 void *virt_addr; 18606 struct lpfc_mbx_sge sge; 18607 uint32_t alloc_len, req_len; 18608 uint32_t fcfindex; 18609 18610 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18611 if (!mboxq) { 18612 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18613 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 18614 return -ENOMEM; 18615 } 18616 18617 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 18618 sizeof(uint32_t); 18619 18620 /* Allocate DMA memory and set up the non-embedded mailbox command */ 18621 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18622 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 18623 req_len, LPFC_SLI4_MBX_NEMBED); 18624 if (alloc_len < req_len) { 18625 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18626 "2523 Allocated DMA memory size (x%x) is " 18627 "less than the requested DMA memory " 18628 "size (x%x)\n", alloc_len, req_len); 18629 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18630 return -ENOMEM; 18631 } 18632 18633 /* 18634 * Get the first SGE entry from the non-embedded DMA memory. This 18635 * routine only uses a single SGE. 18636 */ 18637 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 18638 virt_addr = mboxq->sge_array->addr[0]; 18639 /* 18640 * Configure the FCF record for FCFI 0. This is the driver's 18641 * hardcoded default and gets used in nonFIP mode. 18642 */ 18643 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 18644 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 18645 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 18646 18647 /* 18648 * Copy the fcf_index and the FCF Record Data. The data starts after 18649 * the FCoE header plus word10. The data copy needs to be endian 18650 * correct. 18651 */ 18652 bytep += sizeof(uint32_t); 18653 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 18654 mboxq->vport = phba->pport; 18655 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 18656 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18657 if (rc == MBX_NOT_FINISHED) { 18658 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18659 "2515 ADD_FCF_RECORD mailbox failed with " 18660 "status 0x%x\n", rc); 18661 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18662 rc = -EIO; 18663 } else 18664 rc = 0; 18665 18666 return rc; 18667 } 18668 18669 /** 18670 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 18671 * @phba: pointer to lpfc hba data structure. 18672 * @fcf_record: pointer to the fcf record to write the default data. 18673 * @fcf_index: FCF table entry index. 18674 * 18675 * This routine is invoked to build the driver's default FCF record. The 18676 * values used are hardcoded. This routine handles memory initialization. 18677 * 18678 **/ 18679 void 18680 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 18681 struct fcf_record *fcf_record, 18682 uint16_t fcf_index) 18683 { 18684 memset(fcf_record, 0, sizeof(struct fcf_record)); 18685 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 18686 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 18687 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 18688 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 18689 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 18690 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 18691 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 18692 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 18693 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 18694 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 18695 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 18696 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 18697 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 18698 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 18699 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 18700 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 18701 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 18702 /* Set the VLAN bit map */ 18703 if (phba->valid_vlan) { 18704 fcf_record->vlan_bitmap[phba->vlan_id / 8] 18705 = 1 << (phba->vlan_id % 8); 18706 } 18707 } 18708 18709 /** 18710 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 18711 * @phba: pointer to lpfc hba data structure. 18712 * @fcf_index: FCF table entry offset. 18713 * 18714 * This routine is invoked to scan the entire FCF table by reading FCF 18715 * record and processing it one at a time starting from the @fcf_index 18716 * for initial FCF discovery or fast FCF failover rediscovery. 18717 * 18718 * Return 0 if the mailbox command is submitted successfully, none 0 18719 * otherwise. 18720 **/ 18721 int 18722 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18723 { 18724 int rc = 0, error; 18725 LPFC_MBOXQ_t *mboxq; 18726 18727 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 18728 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 18729 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18730 if (!mboxq) { 18731 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18732 "2000 Failed to allocate mbox for " 18733 "READ_FCF cmd\n"); 18734 error = -ENOMEM; 18735 goto fail_fcf_scan; 18736 } 18737 /* Construct the read FCF record mailbox command */ 18738 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18739 if (rc) { 18740 error = -EINVAL; 18741 goto fail_fcf_scan; 18742 } 18743 /* Issue the mailbox command asynchronously */ 18744 mboxq->vport = phba->pport; 18745 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 18746 18747 spin_lock_irq(&phba->hbalock); 18748 phba->hba_flag |= FCF_TS_INPROG; 18749 spin_unlock_irq(&phba->hbalock); 18750 18751 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18752 if (rc == MBX_NOT_FINISHED) 18753 error = -EIO; 18754 else { 18755 /* Reset eligible FCF count for new scan */ 18756 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 18757 phba->fcf.eligible_fcf_cnt = 0; 18758 error = 0; 18759 } 18760 fail_fcf_scan: 18761 if (error) { 18762 if (mboxq) 18763 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18764 /* FCF scan failed, clear FCF_TS_INPROG flag */ 18765 spin_lock_irq(&phba->hbalock); 18766 phba->hba_flag &= ~FCF_TS_INPROG; 18767 spin_unlock_irq(&phba->hbalock); 18768 } 18769 return error; 18770 } 18771 18772 /** 18773 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 18774 * @phba: pointer to lpfc hba data structure. 18775 * @fcf_index: FCF table entry offset. 18776 * 18777 * This routine is invoked to read an FCF record indicated by @fcf_index 18778 * and to use it for FLOGI roundrobin FCF failover. 18779 * 18780 * Return 0 if the mailbox command is submitted successfully, none 0 18781 * otherwise. 18782 **/ 18783 int 18784 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18785 { 18786 int rc = 0, error; 18787 LPFC_MBOXQ_t *mboxq; 18788 18789 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18790 if (!mboxq) { 18791 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18792 "2763 Failed to allocate mbox for " 18793 "READ_FCF cmd\n"); 18794 error = -ENOMEM; 18795 goto fail_fcf_read; 18796 } 18797 /* Construct the read FCF record mailbox command */ 18798 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18799 if (rc) { 18800 error = -EINVAL; 18801 goto fail_fcf_read; 18802 } 18803 /* Issue the mailbox command asynchronously */ 18804 mboxq->vport = phba->pport; 18805 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 18806 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18807 if (rc == MBX_NOT_FINISHED) 18808 error = -EIO; 18809 else 18810 error = 0; 18811 18812 fail_fcf_read: 18813 if (error && mboxq) 18814 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18815 return error; 18816 } 18817 18818 /** 18819 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 18820 * @phba: pointer to lpfc hba data structure. 18821 * @fcf_index: FCF table entry offset. 18822 * 18823 * This routine is invoked to read an FCF record indicated by @fcf_index to 18824 * determine whether it's eligible for FLOGI roundrobin failover list. 18825 * 18826 * Return 0 if the mailbox command is submitted successfully, none 0 18827 * otherwise. 18828 **/ 18829 int 18830 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18831 { 18832 int rc = 0, error; 18833 LPFC_MBOXQ_t *mboxq; 18834 18835 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18836 if (!mboxq) { 18837 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18838 "2758 Failed to allocate mbox for " 18839 "READ_FCF cmd\n"); 18840 error = -ENOMEM; 18841 goto fail_fcf_read; 18842 } 18843 /* Construct the read FCF record mailbox command */ 18844 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18845 if (rc) { 18846 error = -EINVAL; 18847 goto fail_fcf_read; 18848 } 18849 /* Issue the mailbox command asynchronously */ 18850 mboxq->vport = phba->pport; 18851 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 18852 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18853 if (rc == MBX_NOT_FINISHED) 18854 error = -EIO; 18855 else 18856 error = 0; 18857 18858 fail_fcf_read: 18859 if (error && mboxq) 18860 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18861 return error; 18862 } 18863 18864 /** 18865 * lpfc_check_next_fcf_pri_level 18866 * phba pointer to the lpfc_hba struct for this port. 18867 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 18868 * routine when the rr_bmask is empty. The FCF indecies are put into the 18869 * rr_bmask based on their priority level. Starting from the highest priority 18870 * to the lowest. The most likely FCF candidate will be in the highest 18871 * priority group. When this routine is called it searches the fcf_pri list for 18872 * next lowest priority group and repopulates the rr_bmask with only those 18873 * fcf_indexes. 18874 * returns: 18875 * 1=success 0=failure 18876 **/ 18877 static int 18878 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 18879 { 18880 uint16_t next_fcf_pri; 18881 uint16_t last_index; 18882 struct lpfc_fcf_pri *fcf_pri; 18883 int rc; 18884 int ret = 0; 18885 18886 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 18887 LPFC_SLI4_FCF_TBL_INDX_MAX); 18888 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18889 "3060 Last IDX %d\n", last_index); 18890 18891 /* Verify the priority list has 2 or more entries */ 18892 spin_lock_irq(&phba->hbalock); 18893 if (list_empty(&phba->fcf.fcf_pri_list) || 18894 list_is_singular(&phba->fcf.fcf_pri_list)) { 18895 spin_unlock_irq(&phba->hbalock); 18896 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18897 "3061 Last IDX %d\n", last_index); 18898 return 0; /* Empty rr list */ 18899 } 18900 spin_unlock_irq(&phba->hbalock); 18901 18902 next_fcf_pri = 0; 18903 /* 18904 * Clear the rr_bmask and set all of the bits that are at this 18905 * priority. 18906 */ 18907 memset(phba->fcf.fcf_rr_bmask, 0, 18908 sizeof(*phba->fcf.fcf_rr_bmask)); 18909 spin_lock_irq(&phba->hbalock); 18910 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18911 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 18912 continue; 18913 /* 18914 * the 1st priority that has not FLOGI failed 18915 * will be the highest. 18916 */ 18917 if (!next_fcf_pri) 18918 next_fcf_pri = fcf_pri->fcf_rec.priority; 18919 spin_unlock_irq(&phba->hbalock); 18920 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18921 rc = lpfc_sli4_fcf_rr_index_set(phba, 18922 fcf_pri->fcf_rec.fcf_index); 18923 if (rc) 18924 return 0; 18925 } 18926 spin_lock_irq(&phba->hbalock); 18927 } 18928 /* 18929 * if next_fcf_pri was not set above and the list is not empty then 18930 * we have failed flogis on all of them. So reset flogi failed 18931 * and start at the beginning. 18932 */ 18933 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 18934 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18935 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 18936 /* 18937 * the 1st priority that has not FLOGI failed 18938 * will be the highest. 18939 */ 18940 if (!next_fcf_pri) 18941 next_fcf_pri = fcf_pri->fcf_rec.priority; 18942 spin_unlock_irq(&phba->hbalock); 18943 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18944 rc = lpfc_sli4_fcf_rr_index_set(phba, 18945 fcf_pri->fcf_rec.fcf_index); 18946 if (rc) 18947 return 0; 18948 } 18949 spin_lock_irq(&phba->hbalock); 18950 } 18951 } else 18952 ret = 1; 18953 spin_unlock_irq(&phba->hbalock); 18954 18955 return ret; 18956 } 18957 /** 18958 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 18959 * @phba: pointer to lpfc hba data structure. 18960 * 18961 * This routine is to get the next eligible FCF record index in a round 18962 * robin fashion. If the next eligible FCF record index equals to the 18963 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 18964 * shall be returned, otherwise, the next eligible FCF record's index 18965 * shall be returned. 18966 **/ 18967 uint16_t 18968 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 18969 { 18970 uint16_t next_fcf_index; 18971 18972 initial_priority: 18973 /* Search start from next bit of currently registered FCF index */ 18974 next_fcf_index = phba->fcf.current_rec.fcf_indx; 18975 18976 next_priority: 18977 /* Determine the next fcf index to check */ 18978 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 18979 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18980 LPFC_SLI4_FCF_TBL_INDX_MAX, 18981 next_fcf_index); 18982 18983 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 18984 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18985 /* 18986 * If we have wrapped then we need to clear the bits that 18987 * have been tested so that we can detect when we should 18988 * change the priority level. 18989 */ 18990 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18991 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 18992 } 18993 18994 18995 /* Check roundrobin failover list empty condition */ 18996 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 18997 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 18998 /* 18999 * If next fcf index is not found check if there are lower 19000 * Priority level fcf's in the fcf_priority list. 19001 * Set up the rr_bmask with all of the avaiable fcf bits 19002 * at that level and continue the selection process. 19003 */ 19004 if (lpfc_check_next_fcf_pri_level(phba)) 19005 goto initial_priority; 19006 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 19007 "2844 No roundrobin failover FCF available\n"); 19008 19009 return LPFC_FCOE_FCF_NEXT_NONE; 19010 } 19011 19012 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 19013 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 19014 LPFC_FCF_FLOGI_FAILED) { 19015 if (list_is_singular(&phba->fcf.fcf_pri_list)) 19016 return LPFC_FCOE_FCF_NEXT_NONE; 19017 19018 goto next_priority; 19019 } 19020 19021 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 19022 "2845 Get next roundrobin failover FCF (x%x)\n", 19023 next_fcf_index); 19024 19025 return next_fcf_index; 19026 } 19027 19028 /** 19029 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 19030 * @phba: pointer to lpfc hba data structure. 19031 * 19032 * This routine sets the FCF record index in to the eligible bmask for 19033 * roundrobin failover search. It checks to make sure that the index 19034 * does not go beyond the range of the driver allocated bmask dimension 19035 * before setting the bit. 19036 * 19037 * Returns 0 if the index bit successfully set, otherwise, it returns 19038 * -EINVAL. 19039 **/ 19040 int 19041 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 19042 { 19043 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 19044 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 19045 "2610 FCF (x%x) reached driver's book " 19046 "keeping dimension:x%x\n", 19047 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 19048 return -EINVAL; 19049 } 19050 /* Set the eligible FCF record index bmask */ 19051 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 19052 19053 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 19054 "2790 Set FCF (x%x) to roundrobin FCF failover " 19055 "bmask\n", fcf_index); 19056 19057 return 0; 19058 } 19059 19060 /** 19061 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 19062 * @phba: pointer to lpfc hba data structure. 19063 * 19064 * This routine clears the FCF record index from the eligible bmask for 19065 * roundrobin failover search. It checks to make sure that the index 19066 * does not go beyond the range of the driver allocated bmask dimension 19067 * before clearing the bit. 19068 **/ 19069 void 19070 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 19071 { 19072 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 19073 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 19074 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 19075 "2762 FCF (x%x) reached driver's book " 19076 "keeping dimension:x%x\n", 19077 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 19078 return; 19079 } 19080 /* Clear the eligible FCF record index bmask */ 19081 spin_lock_irq(&phba->hbalock); 19082 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 19083 list) { 19084 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 19085 list_del_init(&fcf_pri->list); 19086 break; 19087 } 19088 } 19089 spin_unlock_irq(&phba->hbalock); 19090 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 19091 19092 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 19093 "2791 Clear FCF (x%x) from roundrobin failover " 19094 "bmask\n", fcf_index); 19095 } 19096 19097 /** 19098 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 19099 * @phba: pointer to lpfc hba data structure. 19100 * 19101 * This routine is the completion routine for the rediscover FCF table mailbox 19102 * command. If the mailbox command returned failure, it will try to stop the 19103 * FCF rediscover wait timer. 19104 **/ 19105 static void 19106 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 19107 { 19108 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 19109 uint32_t shdr_status, shdr_add_status; 19110 19111 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 19112 19113 shdr_status = bf_get(lpfc_mbox_hdr_status, 19114 &redisc_fcf->header.cfg_shdr.response); 19115 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 19116 &redisc_fcf->header.cfg_shdr.response); 19117 if (shdr_status || shdr_add_status) { 19118 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 19119 "2746 Requesting for FCF rediscovery failed " 19120 "status x%x add_status x%x\n", 19121 shdr_status, shdr_add_status); 19122 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 19123 spin_lock_irq(&phba->hbalock); 19124 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 19125 spin_unlock_irq(&phba->hbalock); 19126 /* 19127 * CVL event triggered FCF rediscover request failed, 19128 * last resort to re-try current registered FCF entry. 19129 */ 19130 lpfc_retry_pport_discovery(phba); 19131 } else { 19132 spin_lock_irq(&phba->hbalock); 19133 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 19134 spin_unlock_irq(&phba->hbalock); 19135 /* 19136 * DEAD FCF event triggered FCF rediscover request 19137 * failed, last resort to fail over as a link down 19138 * to FCF registration. 19139 */ 19140 lpfc_sli4_fcf_dead_failthrough(phba); 19141 } 19142 } else { 19143 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 19144 "2775 Start FCF rediscover quiescent timer\n"); 19145 /* 19146 * Start FCF rediscovery wait timer for pending FCF 19147 * before rescan FCF record table. 19148 */ 19149 lpfc_fcf_redisc_wait_start_timer(phba); 19150 } 19151 19152 mempool_free(mbox, phba->mbox_mem_pool); 19153 } 19154 19155 /** 19156 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 19157 * @phba: pointer to lpfc hba data structure. 19158 * 19159 * This routine is invoked to request for rediscovery of the entire FCF table 19160 * by the port. 19161 **/ 19162 int 19163 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 19164 { 19165 LPFC_MBOXQ_t *mbox; 19166 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 19167 int rc, length; 19168 19169 /* Cancel retry delay timers to all vports before FCF rediscover */ 19170 lpfc_cancel_all_vport_retry_delay_timer(phba); 19171 19172 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19173 if (!mbox) { 19174 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19175 "2745 Failed to allocate mbox for " 19176 "requesting FCF rediscover.\n"); 19177 return -ENOMEM; 19178 } 19179 19180 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 19181 sizeof(struct lpfc_sli4_cfg_mhdr)); 19182 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 19183 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 19184 length, LPFC_SLI4_MBX_EMBED); 19185 19186 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 19187 /* Set count to 0 for invalidating the entire FCF database */ 19188 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 19189 19190 /* Issue the mailbox command asynchronously */ 19191 mbox->vport = phba->pport; 19192 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 19193 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 19194 19195 if (rc == MBX_NOT_FINISHED) { 19196 mempool_free(mbox, phba->mbox_mem_pool); 19197 return -EIO; 19198 } 19199 return 0; 19200 } 19201 19202 /** 19203 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 19204 * @phba: pointer to lpfc hba data structure. 19205 * 19206 * This function is the failover routine as a last resort to the FCF DEAD 19207 * event when driver failed to perform fast FCF failover. 19208 **/ 19209 void 19210 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 19211 { 19212 uint32_t link_state; 19213 19214 /* 19215 * Last resort as FCF DEAD event failover will treat this as 19216 * a link down, but save the link state because we don't want 19217 * it to be changed to Link Down unless it is already down. 19218 */ 19219 link_state = phba->link_state; 19220 lpfc_linkdown(phba); 19221 phba->link_state = link_state; 19222 19223 /* Unregister FCF if no devices connected to it */ 19224 lpfc_unregister_unused_fcf(phba); 19225 } 19226 19227 /** 19228 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 19229 * @phba: pointer to lpfc hba data structure. 19230 * @rgn23_data: pointer to configure region 23 data. 19231 * 19232 * This function gets SLI3 port configure region 23 data through memory dump 19233 * mailbox command. When it successfully retrieves data, the size of the data 19234 * will be returned, otherwise, 0 will be returned. 19235 **/ 19236 static uint32_t 19237 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 19238 { 19239 LPFC_MBOXQ_t *pmb = NULL; 19240 MAILBOX_t *mb; 19241 uint32_t offset = 0; 19242 int rc; 19243 19244 if (!rgn23_data) 19245 return 0; 19246 19247 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19248 if (!pmb) { 19249 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19250 "2600 failed to allocate mailbox memory\n"); 19251 return 0; 19252 } 19253 mb = &pmb->u.mb; 19254 19255 do { 19256 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 19257 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 19258 19259 if (rc != MBX_SUCCESS) { 19260 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19261 "2601 failed to read config " 19262 "region 23, rc 0x%x Status 0x%x\n", 19263 rc, mb->mbxStatus); 19264 mb->un.varDmp.word_cnt = 0; 19265 } 19266 /* 19267 * dump mem may return a zero when finished or we got a 19268 * mailbox error, either way we are done. 19269 */ 19270 if (mb->un.varDmp.word_cnt == 0) 19271 break; 19272 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 19273 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 19274 19275 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 19276 rgn23_data + offset, 19277 mb->un.varDmp.word_cnt); 19278 offset += mb->un.varDmp.word_cnt; 19279 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 19280 19281 mempool_free(pmb, phba->mbox_mem_pool); 19282 return offset; 19283 } 19284 19285 /** 19286 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 19287 * @phba: pointer to lpfc hba data structure. 19288 * @rgn23_data: pointer to configure region 23 data. 19289 * 19290 * This function gets SLI4 port configure region 23 data through memory dump 19291 * mailbox command. When it successfully retrieves data, the size of the data 19292 * will be returned, otherwise, 0 will be returned. 19293 **/ 19294 static uint32_t 19295 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 19296 { 19297 LPFC_MBOXQ_t *mboxq = NULL; 19298 struct lpfc_dmabuf *mp = NULL; 19299 struct lpfc_mqe *mqe; 19300 uint32_t data_length = 0; 19301 int rc; 19302 19303 if (!rgn23_data) 19304 return 0; 19305 19306 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19307 if (!mboxq) { 19308 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19309 "3105 failed to allocate mailbox memory\n"); 19310 return 0; 19311 } 19312 19313 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 19314 goto out; 19315 mqe = &mboxq->u.mqe; 19316 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 19317 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 19318 if (rc) 19319 goto out; 19320 data_length = mqe->un.mb_words[5]; 19321 if (data_length == 0) 19322 goto out; 19323 if (data_length > DMP_RGN23_SIZE) { 19324 data_length = 0; 19325 goto out; 19326 } 19327 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 19328 out: 19329 mempool_free(mboxq, phba->mbox_mem_pool); 19330 if (mp) { 19331 lpfc_mbuf_free(phba, mp->virt, mp->phys); 19332 kfree(mp); 19333 } 19334 return data_length; 19335 } 19336 19337 /** 19338 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 19339 * @phba: pointer to lpfc hba data structure. 19340 * 19341 * This function read region 23 and parse TLV for port status to 19342 * decide if the user disaled the port. If the TLV indicates the 19343 * port is disabled, the hba_flag is set accordingly. 19344 **/ 19345 void 19346 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 19347 { 19348 uint8_t *rgn23_data = NULL; 19349 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 19350 uint32_t offset = 0; 19351 19352 /* Get adapter Region 23 data */ 19353 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 19354 if (!rgn23_data) 19355 goto out; 19356 19357 if (phba->sli_rev < LPFC_SLI_REV4) 19358 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 19359 else { 19360 if_type = bf_get(lpfc_sli_intf_if_type, 19361 &phba->sli4_hba.sli_intf); 19362 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 19363 goto out; 19364 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 19365 } 19366 19367 if (!data_size) 19368 goto out; 19369 19370 /* Check the region signature first */ 19371 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 19372 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19373 "2619 Config region 23 has bad signature\n"); 19374 goto out; 19375 } 19376 offset += 4; 19377 19378 /* Check the data structure version */ 19379 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 19380 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19381 "2620 Config region 23 has bad version\n"); 19382 goto out; 19383 } 19384 offset += 4; 19385 19386 /* Parse TLV entries in the region */ 19387 while (offset < data_size) { 19388 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 19389 break; 19390 /* 19391 * If the TLV is not driver specific TLV or driver id is 19392 * not linux driver id, skip the record. 19393 */ 19394 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 19395 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 19396 (rgn23_data[offset + 3] != 0)) { 19397 offset += rgn23_data[offset + 1] * 4 + 4; 19398 continue; 19399 } 19400 19401 /* Driver found a driver specific TLV in the config region */ 19402 sub_tlv_len = rgn23_data[offset + 1] * 4; 19403 offset += 4; 19404 tlv_offset = 0; 19405 19406 /* 19407 * Search for configured port state sub-TLV. 19408 */ 19409 while ((offset < data_size) && 19410 (tlv_offset < sub_tlv_len)) { 19411 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 19412 offset += 4; 19413 tlv_offset += 4; 19414 break; 19415 } 19416 if (rgn23_data[offset] != PORT_STE_TYPE) { 19417 offset += rgn23_data[offset + 1] * 4 + 4; 19418 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 19419 continue; 19420 } 19421 19422 /* This HBA contains PORT_STE configured */ 19423 if (!rgn23_data[offset + 2]) 19424 phba->hba_flag |= LINK_DISABLED; 19425 19426 goto out; 19427 } 19428 } 19429 19430 out: 19431 kfree(rgn23_data); 19432 return; 19433 } 19434 19435 /** 19436 * lpfc_wr_object - write an object to the firmware 19437 * @phba: HBA structure that indicates port to create a queue on. 19438 * @dmabuf_list: list of dmabufs to write to the port. 19439 * @size: the total byte value of the objects to write to the port. 19440 * @offset: the current offset to be used to start the transfer. 19441 * 19442 * This routine will create a wr_object mailbox command to send to the port. 19443 * the mailbox command will be constructed using the dma buffers described in 19444 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 19445 * BDEs that the imbedded mailbox can support. The @offset variable will be 19446 * used to indicate the starting offset of the transfer and will also return 19447 * the offset after the write object mailbox has completed. @size is used to 19448 * determine the end of the object and whether the eof bit should be set. 19449 * 19450 * Return 0 is successful and offset will contain the the new offset to use 19451 * for the next write. 19452 * Return negative value for error cases. 19453 **/ 19454 int 19455 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 19456 uint32_t size, uint32_t *offset) 19457 { 19458 struct lpfc_mbx_wr_object *wr_object; 19459 LPFC_MBOXQ_t *mbox; 19460 int rc = 0, i = 0; 19461 uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf; 19462 uint32_t mbox_tmo; 19463 struct lpfc_dmabuf *dmabuf; 19464 uint32_t written = 0; 19465 bool check_change_status = false; 19466 19467 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19468 if (!mbox) 19469 return -ENOMEM; 19470 19471 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 19472 LPFC_MBOX_OPCODE_WRITE_OBJECT, 19473 sizeof(struct lpfc_mbx_wr_object) - 19474 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 19475 19476 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 19477 wr_object->u.request.write_offset = *offset; 19478 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 19479 wr_object->u.request.object_name[0] = 19480 cpu_to_le32(wr_object->u.request.object_name[0]); 19481 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 19482 list_for_each_entry(dmabuf, dmabuf_list, list) { 19483 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 19484 break; 19485 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 19486 wr_object->u.request.bde[i].addrHigh = 19487 putPaddrHigh(dmabuf->phys); 19488 if (written + SLI4_PAGE_SIZE >= size) { 19489 wr_object->u.request.bde[i].tus.f.bdeSize = 19490 (size - written); 19491 written += (size - written); 19492 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 19493 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1); 19494 check_change_status = true; 19495 } else { 19496 wr_object->u.request.bde[i].tus.f.bdeSize = 19497 SLI4_PAGE_SIZE; 19498 written += SLI4_PAGE_SIZE; 19499 } 19500 i++; 19501 } 19502 wr_object->u.request.bde_count = i; 19503 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 19504 if (!phba->sli4_hba.intr_enable) 19505 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 19506 else { 19507 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 19508 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 19509 } 19510 /* The IOCTL status is embedded in the mailbox subheader. */ 19511 shdr_status = bf_get(lpfc_mbox_hdr_status, 19512 &wr_object->header.cfg_shdr.response); 19513 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 19514 &wr_object->header.cfg_shdr.response); 19515 if (check_change_status) { 19516 shdr_change_status = bf_get(lpfc_wr_object_change_status, 19517 &wr_object->u.response); 19518 19519 if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || 19520 shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { 19521 shdr_csf = bf_get(lpfc_wr_object_csf, 19522 &wr_object->u.response); 19523 if (shdr_csf) 19524 shdr_change_status = 19525 LPFC_CHANGE_STATUS_PCI_RESET; 19526 } 19527 19528 switch (shdr_change_status) { 19529 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): 19530 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19531 "3198 Firmware write complete: System " 19532 "reboot required to instantiate\n"); 19533 break; 19534 case (LPFC_CHANGE_STATUS_FW_RESET): 19535 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19536 "3199 Firmware write complete: Firmware" 19537 " reset required to instantiate\n"); 19538 break; 19539 case (LPFC_CHANGE_STATUS_PORT_MIGRATION): 19540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19541 "3200 Firmware write complete: Port " 19542 "Migration or PCI Reset required to " 19543 "instantiate\n"); 19544 break; 19545 case (LPFC_CHANGE_STATUS_PCI_RESET): 19546 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19547 "3201 Firmware write complete: PCI " 19548 "Reset required to instantiate\n"); 19549 break; 19550 default: 19551 break; 19552 } 19553 } 19554 if (rc != MBX_TIMEOUT) 19555 mempool_free(mbox, phba->mbox_mem_pool); 19556 if (shdr_status || shdr_add_status || rc) { 19557 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19558 "3025 Write Object mailbox failed with " 19559 "status x%x add_status x%x, mbx status x%x\n", 19560 shdr_status, shdr_add_status, rc); 19561 rc = -ENXIO; 19562 *offset = shdr_add_status; 19563 } else 19564 *offset += wr_object->u.response.actual_write_length; 19565 return rc; 19566 } 19567 19568 /** 19569 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 19570 * @vport: pointer to vport data structure. 19571 * 19572 * This function iterate through the mailboxq and clean up all REG_LOGIN 19573 * and REG_VPI mailbox commands associated with the vport. This function 19574 * is called when driver want to restart discovery of the vport due to 19575 * a Clear Virtual Link event. 19576 **/ 19577 void 19578 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 19579 { 19580 struct lpfc_hba *phba = vport->phba; 19581 LPFC_MBOXQ_t *mb, *nextmb; 19582 struct lpfc_dmabuf *mp; 19583 struct lpfc_nodelist *ndlp; 19584 struct lpfc_nodelist *act_mbx_ndlp = NULL; 19585 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 19586 LIST_HEAD(mbox_cmd_list); 19587 uint8_t restart_loop; 19588 19589 /* Clean up internally queued mailbox commands with the vport */ 19590 spin_lock_irq(&phba->hbalock); 19591 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 19592 if (mb->vport != vport) 19593 continue; 19594 19595 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19596 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19597 continue; 19598 19599 list_del(&mb->list); 19600 list_add_tail(&mb->list, &mbox_cmd_list); 19601 } 19602 /* Clean up active mailbox command with the vport */ 19603 mb = phba->sli.mbox_active; 19604 if (mb && (mb->vport == vport)) { 19605 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 19606 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 19607 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19608 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19609 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19610 /* Put reference count for delayed processing */ 19611 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 19612 /* Unregister the RPI when mailbox complete */ 19613 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19614 } 19615 } 19616 /* Cleanup any mailbox completions which are not yet processed */ 19617 do { 19618 restart_loop = 0; 19619 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 19620 /* 19621 * If this mailox is already processed or it is 19622 * for another vport ignore it. 19623 */ 19624 if ((mb->vport != vport) || 19625 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 19626 continue; 19627 19628 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19629 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19630 continue; 19631 19632 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19633 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19634 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19635 /* Unregister the RPI when mailbox complete */ 19636 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19637 restart_loop = 1; 19638 spin_unlock_irq(&phba->hbalock); 19639 spin_lock(shost->host_lock); 19640 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19641 spin_unlock(shost->host_lock); 19642 spin_lock_irq(&phba->hbalock); 19643 break; 19644 } 19645 } 19646 } while (restart_loop); 19647 19648 spin_unlock_irq(&phba->hbalock); 19649 19650 /* Release the cleaned-up mailbox commands */ 19651 while (!list_empty(&mbox_cmd_list)) { 19652 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 19653 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19654 mp = (struct lpfc_dmabuf *)(mb->ctx_buf); 19655 if (mp) { 19656 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 19657 kfree(mp); 19658 } 19659 mb->ctx_buf = NULL; 19660 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19661 mb->ctx_ndlp = NULL; 19662 if (ndlp) { 19663 spin_lock(shost->host_lock); 19664 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19665 spin_unlock(shost->host_lock); 19666 lpfc_nlp_put(ndlp); 19667 } 19668 } 19669 mempool_free(mb, phba->mbox_mem_pool); 19670 } 19671 19672 /* Release the ndlp with the cleaned-up active mailbox command */ 19673 if (act_mbx_ndlp) { 19674 spin_lock(shost->host_lock); 19675 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19676 spin_unlock(shost->host_lock); 19677 lpfc_nlp_put(act_mbx_ndlp); 19678 } 19679 } 19680 19681 /** 19682 * lpfc_drain_txq - Drain the txq 19683 * @phba: Pointer to HBA context object. 19684 * 19685 * This function attempt to submit IOCBs on the txq 19686 * to the adapter. For SLI4 adapters, the txq contains 19687 * ELS IOCBs that have been deferred because the there 19688 * are no SGLs. This congestion can occur with large 19689 * vport counts during node discovery. 19690 **/ 19691 19692 uint32_t 19693 lpfc_drain_txq(struct lpfc_hba *phba) 19694 { 19695 LIST_HEAD(completions); 19696 struct lpfc_sli_ring *pring; 19697 struct lpfc_iocbq *piocbq = NULL; 19698 unsigned long iflags = 0; 19699 char *fail_msg = NULL; 19700 struct lpfc_sglq *sglq; 19701 union lpfc_wqe128 wqe; 19702 uint32_t txq_cnt = 0; 19703 struct lpfc_queue *wq; 19704 19705 if (phba->link_flag & LS_MDS_LOOPBACK) { 19706 /* MDS WQE are posted only to first WQ*/ 19707 wq = phba->sli4_hba.hdwq[0].io_wq; 19708 if (unlikely(!wq)) 19709 return 0; 19710 pring = wq->pring; 19711 } else { 19712 wq = phba->sli4_hba.els_wq; 19713 if (unlikely(!wq)) 19714 return 0; 19715 pring = lpfc_phba_elsring(phba); 19716 } 19717 19718 if (unlikely(!pring) || list_empty(&pring->txq)) 19719 return 0; 19720 19721 spin_lock_irqsave(&pring->ring_lock, iflags); 19722 list_for_each_entry(piocbq, &pring->txq, list) { 19723 txq_cnt++; 19724 } 19725 19726 if (txq_cnt > pring->txq_max) 19727 pring->txq_max = txq_cnt; 19728 19729 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19730 19731 while (!list_empty(&pring->txq)) { 19732 spin_lock_irqsave(&pring->ring_lock, iflags); 19733 19734 piocbq = lpfc_sli_ringtx_get(phba, pring); 19735 if (!piocbq) { 19736 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19737 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19738 "2823 txq empty and txq_cnt is %d\n ", 19739 txq_cnt); 19740 break; 19741 } 19742 sglq = __lpfc_sli_get_els_sglq(phba, piocbq); 19743 if (!sglq) { 19744 __lpfc_sli_ringtx_put(phba, pring, piocbq); 19745 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19746 break; 19747 } 19748 txq_cnt--; 19749 19750 /* The xri and iocb resources secured, 19751 * attempt to issue request 19752 */ 19753 piocbq->sli4_lxritag = sglq->sli4_lxritag; 19754 piocbq->sli4_xritag = sglq->sli4_xritag; 19755 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 19756 fail_msg = "to convert bpl to sgl"; 19757 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 19758 fail_msg = "to convert iocb to wqe"; 19759 else if (lpfc_sli4_wq_put(wq, &wqe)) 19760 fail_msg = " - Wq is full"; 19761 else 19762 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 19763 19764 if (fail_msg) { 19765 /* Failed means we can't issue and need to cancel */ 19766 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19767 "2822 IOCB failed %s iotag 0x%x " 19768 "xri 0x%x\n", 19769 fail_msg, 19770 piocbq->iotag, piocbq->sli4_xritag); 19771 list_add_tail(&piocbq->list, &completions); 19772 } 19773 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19774 } 19775 19776 /* Cancel all the IOCBs that cannot be issued */ 19777 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 19778 IOERR_SLI_ABORTED); 19779 19780 return txq_cnt; 19781 } 19782 19783 /** 19784 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. 19785 * @phba: Pointer to HBA context object. 19786 * @pwqe: Pointer to command WQE. 19787 * @sglq: Pointer to the scatter gather queue object. 19788 * 19789 * This routine converts the bpl or bde that is in the WQE 19790 * to a sgl list for the sli4 hardware. The physical address 19791 * of the bpl/bde is converted back to a virtual address. 19792 * If the WQE contains a BPL then the list of BDE's is 19793 * converted to sli4_sge's. If the WQE contains a single 19794 * BDE then it is converted to a single sli_sge. 19795 * The WQE is still in cpu endianness so the contents of 19796 * the bpl can be used without byte swapping. 19797 * 19798 * Returns valid XRI = Success, NO_XRI = Failure. 19799 */ 19800 static uint16_t 19801 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, 19802 struct lpfc_sglq *sglq) 19803 { 19804 uint16_t xritag = NO_XRI; 19805 struct ulp_bde64 *bpl = NULL; 19806 struct ulp_bde64 bde; 19807 struct sli4_sge *sgl = NULL; 19808 struct lpfc_dmabuf *dmabuf; 19809 union lpfc_wqe128 *wqe; 19810 int numBdes = 0; 19811 int i = 0; 19812 uint32_t offset = 0; /* accumulated offset in the sg request list */ 19813 int inbound = 0; /* number of sg reply entries inbound from firmware */ 19814 uint32_t cmd; 19815 19816 if (!pwqeq || !sglq) 19817 return xritag; 19818 19819 sgl = (struct sli4_sge *)sglq->sgl; 19820 wqe = &pwqeq->wqe; 19821 pwqeq->iocb.ulpIoTag = pwqeq->iotag; 19822 19823 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); 19824 if (cmd == CMD_XMIT_BLS_RSP64_WQE) 19825 return sglq->sli4_xritag; 19826 numBdes = pwqeq->rsvd2; 19827 if (numBdes) { 19828 /* The addrHigh and addrLow fields within the WQE 19829 * have not been byteswapped yet so there is no 19830 * need to swap them back. 19831 */ 19832 if (pwqeq->context3) 19833 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3; 19834 else 19835 return xritag; 19836 19837 bpl = (struct ulp_bde64 *)dmabuf->virt; 19838 if (!bpl) 19839 return xritag; 19840 19841 for (i = 0; i < numBdes; i++) { 19842 /* Should already be byte swapped. */ 19843 sgl->addr_hi = bpl->addrHigh; 19844 sgl->addr_lo = bpl->addrLow; 19845 19846 sgl->word2 = le32_to_cpu(sgl->word2); 19847 if ((i+1) == numBdes) 19848 bf_set(lpfc_sli4_sge_last, sgl, 1); 19849 else 19850 bf_set(lpfc_sli4_sge_last, sgl, 0); 19851 /* swap the size field back to the cpu so we 19852 * can assign it to the sgl. 19853 */ 19854 bde.tus.w = le32_to_cpu(bpl->tus.w); 19855 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 19856 /* The offsets in the sgl need to be accumulated 19857 * separately for the request and reply lists. 19858 * The request is always first, the reply follows. 19859 */ 19860 switch (cmd) { 19861 case CMD_GEN_REQUEST64_WQE: 19862 /* add up the reply sg entries */ 19863 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 19864 inbound++; 19865 /* first inbound? reset the offset */ 19866 if (inbound == 1) 19867 offset = 0; 19868 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19869 bf_set(lpfc_sli4_sge_type, sgl, 19870 LPFC_SGE_TYPE_DATA); 19871 offset += bde.tus.f.bdeSize; 19872 break; 19873 case CMD_FCP_TRSP64_WQE: 19874 bf_set(lpfc_sli4_sge_offset, sgl, 0); 19875 bf_set(lpfc_sli4_sge_type, sgl, 19876 LPFC_SGE_TYPE_DATA); 19877 break; 19878 case CMD_FCP_TSEND64_WQE: 19879 case CMD_FCP_TRECEIVE64_WQE: 19880 bf_set(lpfc_sli4_sge_type, sgl, 19881 bpl->tus.f.bdeFlags); 19882 if (i < 3) 19883 offset = 0; 19884 else 19885 offset += bde.tus.f.bdeSize; 19886 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19887 break; 19888 } 19889 sgl->word2 = cpu_to_le32(sgl->word2); 19890 bpl++; 19891 sgl++; 19892 } 19893 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { 19894 /* The addrHigh and addrLow fields of the BDE have not 19895 * been byteswapped yet so they need to be swapped 19896 * before putting them in the sgl. 19897 */ 19898 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); 19899 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); 19900 sgl->word2 = le32_to_cpu(sgl->word2); 19901 bf_set(lpfc_sli4_sge_last, sgl, 1); 19902 sgl->word2 = cpu_to_le32(sgl->word2); 19903 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); 19904 } 19905 return sglq->sli4_xritag; 19906 } 19907 19908 /** 19909 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) 19910 * @phba: Pointer to HBA context object. 19911 * @ring_number: Base sli ring number 19912 * @pwqe: Pointer to command WQE. 19913 **/ 19914 int 19915 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 19916 struct lpfc_iocbq *pwqe) 19917 { 19918 union lpfc_wqe128 *wqe = &pwqe->wqe; 19919 struct lpfc_nvmet_rcv_ctx *ctxp; 19920 struct lpfc_queue *wq; 19921 struct lpfc_sglq *sglq; 19922 struct lpfc_sli_ring *pring; 19923 unsigned long iflags; 19924 uint32_t ret = 0; 19925 19926 /* NVME_LS and NVME_LS ABTS requests. */ 19927 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { 19928 pring = phba->sli4_hba.nvmels_wq->pring; 19929 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19930 qp, wq_access); 19931 sglq = __lpfc_sli_get_els_sglq(phba, pwqe); 19932 if (!sglq) { 19933 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19934 return WQE_BUSY; 19935 } 19936 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19937 pwqe->sli4_xritag = sglq->sli4_xritag; 19938 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { 19939 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19940 return WQE_ERROR; 19941 } 19942 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19943 pwqe->sli4_xritag); 19944 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); 19945 if (ret) { 19946 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19947 return ret; 19948 } 19949 19950 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19951 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19952 19953 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); 19954 return 0; 19955 } 19956 19957 /* NVME_FCREQ and NVME_ABTS requests */ 19958 if (pwqe->iocb_flag & LPFC_IO_NVME) { 19959 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19960 wq = qp->io_wq; 19961 pring = wq->pring; 19962 19963 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); 19964 19965 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19966 qp, wq_access); 19967 ret = lpfc_sli4_wq_put(wq, wqe); 19968 if (ret) { 19969 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19970 return ret; 19971 } 19972 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19973 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19974 19975 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); 19976 return 0; 19977 } 19978 19979 /* NVMET requests */ 19980 if (pwqe->iocb_flag & LPFC_IO_NVMET) { 19981 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19982 wq = qp->io_wq; 19983 pring = wq->pring; 19984 19985 ctxp = pwqe->context2; 19986 sglq = ctxp->ctxbuf->sglq; 19987 if (pwqe->sli4_xritag == NO_XRI) { 19988 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19989 pwqe->sli4_xritag = sglq->sli4_xritag; 19990 } 19991 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19992 pwqe->sli4_xritag); 19993 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); 19994 19995 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19996 qp, wq_access); 19997 ret = lpfc_sli4_wq_put(wq, wqe); 19998 if (ret) { 19999 spin_unlock_irqrestore(&pring->ring_lock, iflags); 20000 return ret; 20001 } 20002 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 20003 spin_unlock_irqrestore(&pring->ring_lock, iflags); 20004 20005 lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); 20006 return 0; 20007 } 20008 return WQE_ERROR; 20009 } 20010 20011 #ifdef LPFC_MXP_STAT 20012 /** 20013 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count 20014 * @phba: pointer to lpfc hba data structure. 20015 * @hwqid: belong to which HWQ. 20016 * 20017 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count 20018 * 15 seconds after a test case is running. 20019 * 20020 * The user should call lpfc_debugfs_multixripools_write before running a test 20021 * case to clear stat_snapshot_taken. Then the user starts a test case. During 20022 * test case is running, stat_snapshot_taken is incremented by 1 every time when 20023 * this routine is called from heartbeat timer. When stat_snapshot_taken is 20024 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken. 20025 **/ 20026 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) 20027 { 20028 struct lpfc_sli4_hdw_queue *qp; 20029 struct lpfc_multixri_pool *multixri_pool; 20030 struct lpfc_pvt_pool *pvt_pool; 20031 struct lpfc_pbl_pool *pbl_pool; 20032 u32 txcmplq_cnt; 20033 20034 qp = &phba->sli4_hba.hdwq[hwqid]; 20035 multixri_pool = qp->p_multixri_pool; 20036 if (!multixri_pool) 20037 return; 20038 20039 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { 20040 pvt_pool = &qp->p_multixri_pool->pvt_pool; 20041 pbl_pool = &qp->p_multixri_pool->pbl_pool; 20042 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; 20043 20044 multixri_pool->stat_pbl_count = pbl_pool->count; 20045 multixri_pool->stat_pvt_count = pvt_pool->count; 20046 multixri_pool->stat_busy_count = txcmplq_cnt; 20047 } 20048 20049 multixri_pool->stat_snapshot_taken++; 20050 } 20051 #endif 20052 20053 /** 20054 * lpfc_adjust_pvt_pool_count - Adjust private pool count 20055 * @phba: pointer to lpfc hba data structure. 20056 * @hwqid: belong to which HWQ. 20057 * 20058 * This routine moves some XRIs from private to public pool when private pool 20059 * is not busy. 20060 **/ 20061 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid) 20062 { 20063 struct lpfc_multixri_pool *multixri_pool; 20064 u32 io_req_count; 20065 u32 prev_io_req_count; 20066 20067 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 20068 if (!multixri_pool) 20069 return; 20070 io_req_count = multixri_pool->io_req_count; 20071 prev_io_req_count = multixri_pool->prev_io_req_count; 20072 20073 if (prev_io_req_count != io_req_count) { 20074 /* Private pool is busy */ 20075 multixri_pool->prev_io_req_count = io_req_count; 20076 } else { 20077 /* Private pool is not busy. 20078 * Move XRIs from private to public pool. 20079 */ 20080 lpfc_move_xri_pvt_to_pbl(phba, hwqid); 20081 } 20082 } 20083 20084 /** 20085 * lpfc_adjust_high_watermark - Adjust high watermark 20086 * @phba: pointer to lpfc hba data structure. 20087 * @hwqid: belong to which HWQ. 20088 * 20089 * This routine sets high watermark as number of outstanding XRIs, 20090 * but make sure the new value is between xri_limit/2 and xri_limit. 20091 **/ 20092 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) 20093 { 20094 u32 new_watermark; 20095 u32 watermark_max; 20096 u32 watermark_min; 20097 u32 xri_limit; 20098 u32 txcmplq_cnt; 20099 u32 abts_io_bufs; 20100 struct lpfc_multixri_pool *multixri_pool; 20101 struct lpfc_sli4_hdw_queue *qp; 20102 20103 qp = &phba->sli4_hba.hdwq[hwqid]; 20104 multixri_pool = qp->p_multixri_pool; 20105 if (!multixri_pool) 20106 return; 20107 xri_limit = multixri_pool->xri_limit; 20108 20109 watermark_max = xri_limit; 20110 watermark_min = xri_limit / 2; 20111 20112 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; 20113 abts_io_bufs = qp->abts_scsi_io_bufs; 20114 abts_io_bufs += qp->abts_nvme_io_bufs; 20115 20116 new_watermark = txcmplq_cnt + abts_io_bufs; 20117 new_watermark = min(watermark_max, new_watermark); 20118 new_watermark = max(watermark_min, new_watermark); 20119 multixri_pool->pvt_pool.high_watermark = new_watermark; 20120 20121 #ifdef LPFC_MXP_STAT 20122 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm, 20123 new_watermark); 20124 #endif 20125 } 20126 20127 /** 20128 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool 20129 * @phba: pointer to lpfc hba data structure. 20130 * @hwqid: belong to which HWQ. 20131 * 20132 * This routine is called from hearbeat timer when pvt_pool is idle. 20133 * All free XRIs are moved from private to public pool on hwqid with 2 steps. 20134 * The first step moves (all - low_watermark) amount of XRIs. 20135 * The second step moves the rest of XRIs. 20136 **/ 20137 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) 20138 { 20139 struct lpfc_pbl_pool *pbl_pool; 20140 struct lpfc_pvt_pool *pvt_pool; 20141 struct lpfc_sli4_hdw_queue *qp; 20142 struct lpfc_io_buf *lpfc_ncmd; 20143 struct lpfc_io_buf *lpfc_ncmd_next; 20144 unsigned long iflag; 20145 struct list_head tmp_list; 20146 u32 tmp_count; 20147 20148 qp = &phba->sli4_hba.hdwq[hwqid]; 20149 pbl_pool = &qp->p_multixri_pool->pbl_pool; 20150 pvt_pool = &qp->p_multixri_pool->pvt_pool; 20151 tmp_count = 0; 20152 20153 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); 20154 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); 20155 20156 if (pvt_pool->count > pvt_pool->low_watermark) { 20157 /* Step 1: move (all - low_watermark) from pvt_pool 20158 * to pbl_pool 20159 */ 20160 20161 /* Move low watermark of bufs from pvt_pool to tmp_list */ 20162 INIT_LIST_HEAD(&tmp_list); 20163 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 20164 &pvt_pool->list, list) { 20165 list_move_tail(&lpfc_ncmd->list, &tmp_list); 20166 tmp_count++; 20167 if (tmp_count >= pvt_pool->low_watermark) 20168 break; 20169 } 20170 20171 /* Move all bufs from pvt_pool to pbl_pool */ 20172 list_splice_init(&pvt_pool->list, &pbl_pool->list); 20173 20174 /* Move all bufs from tmp_list to pvt_pool */ 20175 list_splice(&tmp_list, &pvt_pool->list); 20176 20177 pbl_pool->count += (pvt_pool->count - tmp_count); 20178 pvt_pool->count = tmp_count; 20179 } else { 20180 /* Step 2: move the rest from pvt_pool to pbl_pool */ 20181 list_splice_init(&pvt_pool->list, &pbl_pool->list); 20182 pbl_pool->count += pvt_pool->count; 20183 pvt_pool->count = 0; 20184 } 20185 20186 spin_unlock(&pvt_pool->lock); 20187 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 20188 } 20189 20190 /** 20191 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 20192 * @phba: pointer to lpfc hba data structure 20193 * @pbl_pool: specified public free XRI pool 20194 * @pvt_pool: specified private free XRI pool 20195 * @count: number of XRIs to move 20196 * 20197 * This routine tries to move some free common bufs from the specified pbl_pool 20198 * to the specified pvt_pool. It might move less than count XRIs if there's not 20199 * enough in public pool. 20200 * 20201 * Return: 20202 * true - if XRIs are successfully moved from the specified pbl_pool to the 20203 * specified pvt_pool 20204 * false - if the specified pbl_pool is empty or locked by someone else 20205 **/ 20206 static bool 20207 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 20208 struct lpfc_pbl_pool *pbl_pool, 20209 struct lpfc_pvt_pool *pvt_pool, u32 count) 20210 { 20211 struct lpfc_io_buf *lpfc_ncmd; 20212 struct lpfc_io_buf *lpfc_ncmd_next; 20213 unsigned long iflag; 20214 int ret; 20215 20216 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag); 20217 if (ret) { 20218 if (pbl_pool->count) { 20219 /* Move a batch of XRIs from public to private pool */ 20220 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); 20221 list_for_each_entry_safe(lpfc_ncmd, 20222 lpfc_ncmd_next, 20223 &pbl_pool->list, 20224 list) { 20225 list_move_tail(&lpfc_ncmd->list, 20226 &pvt_pool->list); 20227 pvt_pool->count++; 20228 pbl_pool->count--; 20229 count--; 20230 if (count == 0) 20231 break; 20232 } 20233 20234 spin_unlock(&pvt_pool->lock); 20235 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 20236 return true; 20237 } 20238 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 20239 } 20240 20241 return false; 20242 } 20243 20244 /** 20245 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 20246 * @phba: pointer to lpfc hba data structure. 20247 * @hwqid: belong to which HWQ. 20248 * @count: number of XRIs to move 20249 * 20250 * This routine tries to find some free common bufs in one of public pools with 20251 * Round Robin method. The search always starts from local hwqid, then the next 20252 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found, 20253 * a batch of free common bufs are moved to private pool on hwqid. 20254 * It might move less than count XRIs if there's not enough in public pool. 20255 **/ 20256 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) 20257 { 20258 struct lpfc_multixri_pool *multixri_pool; 20259 struct lpfc_multixri_pool *next_multixri_pool; 20260 struct lpfc_pvt_pool *pvt_pool; 20261 struct lpfc_pbl_pool *pbl_pool; 20262 struct lpfc_sli4_hdw_queue *qp; 20263 u32 next_hwqid; 20264 u32 hwq_count; 20265 int ret; 20266 20267 qp = &phba->sli4_hba.hdwq[hwqid]; 20268 multixri_pool = qp->p_multixri_pool; 20269 pvt_pool = &multixri_pool->pvt_pool; 20270 pbl_pool = &multixri_pool->pbl_pool; 20271 20272 /* Check if local pbl_pool is available */ 20273 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); 20274 if (ret) { 20275 #ifdef LPFC_MXP_STAT 20276 multixri_pool->local_pbl_hit_count++; 20277 #endif 20278 return; 20279 } 20280 20281 hwq_count = phba->cfg_hdw_queue; 20282 20283 /* Get the next hwqid which was found last time */ 20284 next_hwqid = multixri_pool->rrb_next_hwqid; 20285 20286 do { 20287 /* Go to next hwq */ 20288 next_hwqid = (next_hwqid + 1) % hwq_count; 20289 20290 next_multixri_pool = 20291 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool; 20292 pbl_pool = &next_multixri_pool->pbl_pool; 20293 20294 /* Check if the public free xri pool is available */ 20295 ret = _lpfc_move_xri_pbl_to_pvt( 20296 phba, qp, pbl_pool, pvt_pool, count); 20297 20298 /* Exit while-loop if success or all hwqid are checked */ 20299 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); 20300 20301 /* Starting point for the next time */ 20302 multixri_pool->rrb_next_hwqid = next_hwqid; 20303 20304 if (!ret) { 20305 /* stats: all public pools are empty*/ 20306 multixri_pool->pbl_empty_count++; 20307 } 20308 20309 #ifdef LPFC_MXP_STAT 20310 if (ret) { 20311 if (next_hwqid == hwqid) 20312 multixri_pool->local_pbl_hit_count++; 20313 else 20314 multixri_pool->other_pbl_hit_count++; 20315 } 20316 #endif 20317 } 20318 20319 /** 20320 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark 20321 * @phba: pointer to lpfc hba data structure. 20322 * @qp: belong to which HWQ. 20323 * 20324 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than 20325 * low watermark. 20326 **/ 20327 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid) 20328 { 20329 struct lpfc_multixri_pool *multixri_pool; 20330 struct lpfc_pvt_pool *pvt_pool; 20331 20332 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 20333 pvt_pool = &multixri_pool->pvt_pool; 20334 20335 if (pvt_pool->count < pvt_pool->low_watermark) 20336 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 20337 } 20338 20339 /** 20340 * lpfc_release_io_buf - Return one IO buf back to free pool 20341 * @phba: pointer to lpfc hba data structure. 20342 * @lpfc_ncmd: IO buf to be returned. 20343 * @qp: belong to which HWQ. 20344 * 20345 * This routine returns one IO buf back to free pool. If this is an urgent IO, 20346 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1, 20347 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and 20348 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to 20349 * lpfc_io_buf_list_put. 20350 **/ 20351 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, 20352 struct lpfc_sli4_hdw_queue *qp) 20353 { 20354 unsigned long iflag; 20355 struct lpfc_pbl_pool *pbl_pool; 20356 struct lpfc_pvt_pool *pvt_pool; 20357 struct lpfc_epd_pool *epd_pool; 20358 u32 txcmplq_cnt; 20359 u32 xri_owned; 20360 u32 xri_limit; 20361 u32 abts_io_bufs; 20362 20363 /* MUST zero fields if buffer is reused by another protocol */ 20364 lpfc_ncmd->nvmeCmd = NULL; 20365 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; 20366 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; 20367 20368 if (phba->cfg_xpsgl && !phba->nvmet_support && 20369 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list)) 20370 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 20371 20372 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list)) 20373 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 20374 20375 if (phba->cfg_xri_rebalancing) { 20376 if (lpfc_ncmd->expedite) { 20377 /* Return to expedite pool */ 20378 epd_pool = &phba->epd_pool; 20379 spin_lock_irqsave(&epd_pool->lock, iflag); 20380 list_add_tail(&lpfc_ncmd->list, &epd_pool->list); 20381 epd_pool->count++; 20382 spin_unlock_irqrestore(&epd_pool->lock, iflag); 20383 return; 20384 } 20385 20386 /* Avoid invalid access if an IO sneaks in and is being rejected 20387 * just _after_ xri pools are destroyed in lpfc_offline. 20388 * Nothing much can be done at this point. 20389 */ 20390 if (!qp->p_multixri_pool) 20391 return; 20392 20393 pbl_pool = &qp->p_multixri_pool->pbl_pool; 20394 pvt_pool = &qp->p_multixri_pool->pvt_pool; 20395 20396 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; 20397 abts_io_bufs = qp->abts_scsi_io_bufs; 20398 abts_io_bufs += qp->abts_nvme_io_bufs; 20399 20400 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; 20401 xri_limit = qp->p_multixri_pool->xri_limit; 20402 20403 #ifdef LPFC_MXP_STAT 20404 if (xri_owned <= xri_limit) 20405 qp->p_multixri_pool->below_limit_count++; 20406 else 20407 qp->p_multixri_pool->above_limit_count++; 20408 #endif 20409 20410 /* XRI goes to either public or private free xri pool 20411 * based on watermark and xri_limit 20412 */ 20413 if ((pvt_pool->count < pvt_pool->low_watermark) || 20414 (xri_owned < xri_limit && 20415 pvt_pool->count < pvt_pool->high_watermark)) { 20416 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, 20417 qp, free_pvt_pool); 20418 list_add_tail(&lpfc_ncmd->list, 20419 &pvt_pool->list); 20420 pvt_pool->count++; 20421 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20422 } else { 20423 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, 20424 qp, free_pub_pool); 20425 list_add_tail(&lpfc_ncmd->list, 20426 &pbl_pool->list); 20427 pbl_pool->count++; 20428 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 20429 } 20430 } else { 20431 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, 20432 qp, free_xri); 20433 list_add_tail(&lpfc_ncmd->list, 20434 &qp->lpfc_io_buf_list_put); 20435 qp->put_io_bufs++; 20436 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, 20437 iflag); 20438 } 20439 } 20440 20441 /** 20442 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool 20443 * @phba: pointer to lpfc hba data structure. 20444 * @pvt_pool: pointer to private pool data structure. 20445 * @ndlp: pointer to lpfc nodelist data structure. 20446 * 20447 * This routine tries to get one free IO buf from private pool. 20448 * 20449 * Return: 20450 * pointer to one free IO buf - if private pool is not empty 20451 * NULL - if private pool is empty 20452 **/ 20453 static struct lpfc_io_buf * 20454 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, 20455 struct lpfc_sli4_hdw_queue *qp, 20456 struct lpfc_pvt_pool *pvt_pool, 20457 struct lpfc_nodelist *ndlp) 20458 { 20459 struct lpfc_io_buf *lpfc_ncmd; 20460 struct lpfc_io_buf *lpfc_ncmd_next; 20461 unsigned long iflag; 20462 20463 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); 20464 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 20465 &pvt_pool->list, list) { 20466 if (lpfc_test_rrq_active( 20467 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) 20468 continue; 20469 list_del(&lpfc_ncmd->list); 20470 pvt_pool->count--; 20471 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20472 return lpfc_ncmd; 20473 } 20474 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20475 20476 return NULL; 20477 } 20478 20479 /** 20480 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool 20481 * @phba: pointer to lpfc hba data structure. 20482 * 20483 * This routine tries to get one free IO buf from expedite pool. 20484 * 20485 * Return: 20486 * pointer to one free IO buf - if expedite pool is not empty 20487 * NULL - if expedite pool is empty 20488 **/ 20489 static struct lpfc_io_buf * 20490 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba) 20491 { 20492 struct lpfc_io_buf *lpfc_ncmd; 20493 struct lpfc_io_buf *lpfc_ncmd_next; 20494 unsigned long iflag; 20495 struct lpfc_epd_pool *epd_pool; 20496 20497 epd_pool = &phba->epd_pool; 20498 lpfc_ncmd = NULL; 20499 20500 spin_lock_irqsave(&epd_pool->lock, iflag); 20501 if (epd_pool->count > 0) { 20502 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 20503 &epd_pool->list, list) { 20504 list_del(&lpfc_ncmd->list); 20505 epd_pool->count--; 20506 break; 20507 } 20508 } 20509 spin_unlock_irqrestore(&epd_pool->lock, iflag); 20510 20511 return lpfc_ncmd; 20512 } 20513 20514 /** 20515 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs 20516 * @phba: pointer to lpfc hba data structure. 20517 * @ndlp: pointer to lpfc nodelist data structure. 20518 * @hwqid: belong to which HWQ 20519 * @expedite: 1 means this request is urgent. 20520 * 20521 * This routine will do the following actions and then return a pointer to 20522 * one free IO buf. 20523 * 20524 * 1. If private free xri count is empty, move some XRIs from public to 20525 * private pool. 20526 * 2. Get one XRI from private free xri pool. 20527 * 3. If we fail to get one from pvt_pool and this is an expedite request, 20528 * get one free xri from expedite pool. 20529 * 20530 * Note: ndlp is only used on SCSI side for RRQ testing. 20531 * The caller should pass NULL for ndlp on NVME side. 20532 * 20533 * Return: 20534 * pointer to one free IO buf - if private pool is not empty 20535 * NULL - if private pool is empty 20536 **/ 20537 static struct lpfc_io_buf * 20538 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, 20539 struct lpfc_nodelist *ndlp, 20540 int hwqid, int expedite) 20541 { 20542 struct lpfc_sli4_hdw_queue *qp; 20543 struct lpfc_multixri_pool *multixri_pool; 20544 struct lpfc_pvt_pool *pvt_pool; 20545 struct lpfc_io_buf *lpfc_ncmd; 20546 20547 qp = &phba->sli4_hba.hdwq[hwqid]; 20548 lpfc_ncmd = NULL; 20549 multixri_pool = qp->p_multixri_pool; 20550 pvt_pool = &multixri_pool->pvt_pool; 20551 multixri_pool->io_req_count++; 20552 20553 /* If pvt_pool is empty, move some XRIs from public to private pool */ 20554 if (pvt_pool->count == 0) 20555 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 20556 20557 /* Get one XRI from private free xri pool */ 20558 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); 20559 20560 if (lpfc_ncmd) { 20561 lpfc_ncmd->hdwq = qp; 20562 lpfc_ncmd->hdwq_no = hwqid; 20563 } else if (expedite) { 20564 /* If we fail to get one from pvt_pool and this is an expedite 20565 * request, get one free xri from expedite pool. 20566 */ 20567 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba); 20568 } 20569 20570 return lpfc_ncmd; 20571 } 20572 20573 static inline struct lpfc_io_buf * 20574 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx) 20575 { 20576 struct lpfc_sli4_hdw_queue *qp; 20577 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next; 20578 20579 qp = &phba->sli4_hba.hdwq[idx]; 20580 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 20581 &qp->lpfc_io_buf_list_get, list) { 20582 if (lpfc_test_rrq_active(phba, ndlp, 20583 lpfc_cmd->cur_iocbq.sli4_lxritag)) 20584 continue; 20585 20586 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED) 20587 continue; 20588 20589 list_del_init(&lpfc_cmd->list); 20590 qp->get_io_bufs--; 20591 lpfc_cmd->hdwq = qp; 20592 lpfc_cmd->hdwq_no = idx; 20593 return lpfc_cmd; 20594 } 20595 return NULL; 20596 } 20597 20598 /** 20599 * lpfc_get_io_buf - Get one IO buffer from free pool 20600 * @phba: The HBA for which this call is being executed. 20601 * @ndlp: pointer to lpfc nodelist data structure. 20602 * @hwqid: belong to which HWQ 20603 * @expedite: 1 means this request is urgent. 20604 * 20605 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1, 20606 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes 20607 * a IO buffer from head of @hdwq io_buf_list and returns to caller. 20608 * 20609 * Note: ndlp is only used on SCSI side for RRQ testing. 20610 * The caller should pass NULL for ndlp on NVME side. 20611 * 20612 * Return codes: 20613 * NULL - Error 20614 * Pointer to lpfc_io_buf - Success 20615 **/ 20616 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, 20617 struct lpfc_nodelist *ndlp, 20618 u32 hwqid, int expedite) 20619 { 20620 struct lpfc_sli4_hdw_queue *qp; 20621 unsigned long iflag; 20622 struct lpfc_io_buf *lpfc_cmd; 20623 20624 qp = &phba->sli4_hba.hdwq[hwqid]; 20625 lpfc_cmd = NULL; 20626 20627 if (phba->cfg_xri_rebalancing) 20628 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( 20629 phba, ndlp, hwqid, expedite); 20630 else { 20631 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, 20632 qp, alloc_xri_get); 20633 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) 20634 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 20635 if (!lpfc_cmd) { 20636 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, 20637 qp, alloc_xri_put); 20638 list_splice(&qp->lpfc_io_buf_list_put, 20639 &qp->lpfc_io_buf_list_get); 20640 qp->get_io_bufs += qp->put_io_bufs; 20641 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 20642 qp->put_io_bufs = 0; 20643 spin_unlock(&qp->io_buf_list_put_lock); 20644 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || 20645 expedite) 20646 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 20647 } 20648 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); 20649 } 20650 20651 return lpfc_cmd; 20652 } 20653 20654 /** 20655 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool 20656 * @phba: The HBA for which this call is being executed. 20657 * @lpfc_buf: IO buf structure to append the SGL chunk 20658 * 20659 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool, 20660 * and will allocate an SGL chunk if the pool is empty. 20661 * 20662 * Return codes: 20663 * NULL - Error 20664 * Pointer to sli4_hybrid_sgl - Success 20665 **/ 20666 struct sli4_hybrid_sgl * 20667 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) 20668 { 20669 struct sli4_hybrid_sgl *list_entry = NULL; 20670 struct sli4_hybrid_sgl *tmp = NULL; 20671 struct sli4_hybrid_sgl *allocated_sgl = NULL; 20672 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 20673 struct list_head *buf_list = &hdwq->sgl_list; 20674 unsigned long iflags; 20675 20676 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 20677 20678 if (likely(!list_empty(buf_list))) { 20679 /* break off 1 chunk from the sgl_list */ 20680 list_for_each_entry_safe(list_entry, tmp, 20681 buf_list, list_node) { 20682 list_move_tail(&list_entry->list_node, 20683 &lpfc_buf->dma_sgl_xtra_list); 20684 break; 20685 } 20686 } else { 20687 /* allocate more */ 20688 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 20689 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, 20690 cpu_to_node(hdwq->io_wq->chann)); 20691 if (!tmp) { 20692 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 20693 "8353 error kmalloc memory for HDWQ " 20694 "%d %s\n", 20695 lpfc_buf->hdwq_no, __func__); 20696 return NULL; 20697 } 20698 20699 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, 20700 GFP_ATOMIC, &tmp->dma_phys_sgl); 20701 if (!tmp->dma_sgl) { 20702 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 20703 "8354 error pool_alloc memory for HDWQ " 20704 "%d %s\n", 20705 lpfc_buf->hdwq_no, __func__); 20706 kfree(tmp); 20707 return NULL; 20708 } 20709 20710 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 20711 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list); 20712 } 20713 20714 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list, 20715 struct sli4_hybrid_sgl, 20716 list_node); 20717 20718 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 20719 20720 return allocated_sgl; 20721 } 20722 20723 /** 20724 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool 20725 * @phba: The HBA for which this call is being executed. 20726 * @lpfc_buf: IO buf structure with the SGL chunk 20727 * 20728 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool. 20729 * 20730 * Return codes: 20731 * 0 - Success 20732 * -EINVAL - Error 20733 **/ 20734 int 20735 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) 20736 { 20737 int rc = 0; 20738 struct sli4_hybrid_sgl *list_entry = NULL; 20739 struct sli4_hybrid_sgl *tmp = NULL; 20740 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 20741 struct list_head *buf_list = &hdwq->sgl_list; 20742 unsigned long iflags; 20743 20744 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 20745 20746 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) { 20747 list_for_each_entry_safe(list_entry, tmp, 20748 &lpfc_buf->dma_sgl_xtra_list, 20749 list_node) { 20750 list_move_tail(&list_entry->list_node, 20751 buf_list); 20752 } 20753 } else { 20754 rc = -EINVAL; 20755 } 20756 20757 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 20758 return rc; 20759 } 20760 20761 /** 20762 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool 20763 * @phba: phba object 20764 * @hdwq: hdwq to cleanup sgl buff resources on 20765 * 20766 * This routine frees all SGL chunks of hdwq SGL chunk pool. 20767 * 20768 * Return codes: 20769 * None 20770 **/ 20771 void 20772 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, 20773 struct lpfc_sli4_hdw_queue *hdwq) 20774 { 20775 struct list_head *buf_list = &hdwq->sgl_list; 20776 struct sli4_hybrid_sgl *list_entry = NULL; 20777 struct sli4_hybrid_sgl *tmp = NULL; 20778 unsigned long iflags; 20779 20780 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 20781 20782 /* Free sgl pool */ 20783 list_for_each_entry_safe(list_entry, tmp, 20784 buf_list, list_node) { 20785 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 20786 list_entry->dma_sgl, 20787 list_entry->dma_phys_sgl); 20788 list_del(&list_entry->list_node); 20789 kfree(list_entry); 20790 } 20791 20792 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 20793 } 20794 20795 /** 20796 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq 20797 * @phba: The HBA for which this call is being executed. 20798 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer 20799 * 20800 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool, 20801 * and will allocate an CMD/RSP buffer if the pool is empty. 20802 * 20803 * Return codes: 20804 * NULL - Error 20805 * Pointer to fcp_cmd_rsp_buf - Success 20806 **/ 20807 struct fcp_cmd_rsp_buf * 20808 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, 20809 struct lpfc_io_buf *lpfc_buf) 20810 { 20811 struct fcp_cmd_rsp_buf *list_entry = NULL; 20812 struct fcp_cmd_rsp_buf *tmp = NULL; 20813 struct fcp_cmd_rsp_buf *allocated_buf = NULL; 20814 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 20815 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; 20816 unsigned long iflags; 20817 20818 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 20819 20820 if (likely(!list_empty(buf_list))) { 20821 /* break off 1 chunk from the list */ 20822 list_for_each_entry_safe(list_entry, tmp, 20823 buf_list, 20824 list_node) { 20825 list_move_tail(&list_entry->list_node, 20826 &lpfc_buf->dma_cmd_rsp_list); 20827 break; 20828 } 20829 } else { 20830 /* allocate more */ 20831 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 20832 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, 20833 cpu_to_node(hdwq->io_wq->chann)); 20834 if (!tmp) { 20835 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 20836 "8355 error kmalloc memory for HDWQ " 20837 "%d %s\n", 20838 lpfc_buf->hdwq_no, __func__); 20839 return NULL; 20840 } 20841 20842 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool, 20843 GFP_ATOMIC, 20844 &tmp->fcp_cmd_rsp_dma_handle); 20845 20846 if (!tmp->fcp_cmnd) { 20847 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 20848 "8356 error pool_alloc memory for HDWQ " 20849 "%d %s\n", 20850 lpfc_buf->hdwq_no, __func__); 20851 kfree(tmp); 20852 return NULL; 20853 } 20854 20855 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + 20856 sizeof(struct fcp_cmnd)); 20857 20858 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 20859 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); 20860 } 20861 20862 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list, 20863 struct fcp_cmd_rsp_buf, 20864 list_node); 20865 20866 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 20867 20868 return allocated_buf; 20869 } 20870 20871 /** 20872 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool 20873 * @phba: The HBA for which this call is being executed. 20874 * @lpfc_buf: IO buf structure with the CMD/RSP buf 20875 * 20876 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool. 20877 * 20878 * Return codes: 20879 * 0 - Success 20880 * -EINVAL - Error 20881 **/ 20882 int 20883 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, 20884 struct lpfc_io_buf *lpfc_buf) 20885 { 20886 int rc = 0; 20887 struct fcp_cmd_rsp_buf *list_entry = NULL; 20888 struct fcp_cmd_rsp_buf *tmp = NULL; 20889 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 20890 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; 20891 unsigned long iflags; 20892 20893 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 20894 20895 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) { 20896 list_for_each_entry_safe(list_entry, tmp, 20897 &lpfc_buf->dma_cmd_rsp_list, 20898 list_node) { 20899 list_move_tail(&list_entry->list_node, 20900 buf_list); 20901 } 20902 } else { 20903 rc = -EINVAL; 20904 } 20905 20906 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 20907 return rc; 20908 } 20909 20910 /** 20911 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool 20912 * @phba: phba object 20913 * @hdwq: hdwq to cleanup cmd rsp buff resources on 20914 * 20915 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool. 20916 * 20917 * Return codes: 20918 * None 20919 **/ 20920 void 20921 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, 20922 struct lpfc_sli4_hdw_queue *hdwq) 20923 { 20924 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; 20925 struct fcp_cmd_rsp_buf *list_entry = NULL; 20926 struct fcp_cmd_rsp_buf *tmp = NULL; 20927 unsigned long iflags; 20928 20929 spin_lock_irqsave(&hdwq->hdwq_lock, iflags); 20930 20931 /* Free cmd_rsp buf pool */ 20932 list_for_each_entry_safe(list_entry, tmp, 20933 buf_list, 20934 list_node) { 20935 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool, 20936 list_entry->fcp_cmnd, 20937 list_entry->fcp_cmd_rsp_dma_handle); 20938 list_del(&list_entry->list_node); 20939 kfree(list_entry); 20940 } 20941 20942 spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); 20943 } 20944