1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/interrupt.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/lockdep.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 #include <scsi/fc/fc_fs.h> 37 #include <linux/aer.h> 38 #ifdef CONFIG_X86 39 #include <asm/set_memory.h> 40 #endif 41 42 #include <linux/nvme-fc-driver.h> 43 44 #include "lpfc_hw4.h" 45 #include "lpfc_hw.h" 46 #include "lpfc_sli.h" 47 #include "lpfc_sli4.h" 48 #include "lpfc_nl.h" 49 #include "lpfc_disc.h" 50 #include "lpfc.h" 51 #include "lpfc_scsi.h" 52 #include "lpfc_nvme.h" 53 #include "lpfc_nvmet.h" 54 #include "lpfc_crtn.h" 55 #include "lpfc_logmsg.h" 56 #include "lpfc_compat.h" 57 #include "lpfc_debugfs.h" 58 #include "lpfc_vport.h" 59 #include "lpfc_version.h" 60 61 /* There are only four IOCB completion types. */ 62 typedef enum _lpfc_iocb_type { 63 LPFC_UNKNOWN_IOCB, 64 LPFC_UNSOL_IOCB, 65 LPFC_SOL_IOCB, 66 LPFC_ABORT_IOCB 67 } lpfc_iocb_type; 68 69 70 /* Provide function prototypes local to this module. */ 71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 72 uint32_t); 73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 74 uint8_t *, uint32_t *); 75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 76 struct lpfc_iocbq *); 77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 78 struct hbq_dmabuf *); 79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 80 struct hbq_dmabuf *dmabuf); 81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, 82 struct lpfc_queue *cq, struct lpfc_cqe *cqe); 83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 84 int); 85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, 86 struct lpfc_queue *eq, 87 struct lpfc_eqe *eqe); 88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 90 91 static IOCB_t * 92 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 93 { 94 return &iocbq->iocb; 95 } 96 97 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) 98 /** 99 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function 100 * @srcp: Source memory pointer. 101 * @destp: Destination memory pointer. 102 * @cnt: Number of words required to be copied. 103 * Must be a multiple of sizeof(uint64_t) 104 * 105 * This function is used for copying data between driver memory 106 * and the SLI WQ. This function also changes the endianness 107 * of each word if native endianness is different from SLI 108 * endianness. This function can be called with or without 109 * lock. 110 **/ 111 static void 112 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 113 { 114 uint64_t *src = srcp; 115 uint64_t *dest = destp; 116 int i; 117 118 for (i = 0; i < (int)cnt; i += sizeof(uint64_t)) 119 *dest++ = *src++; 120 } 121 #else 122 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) 123 #endif 124 125 /** 126 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 127 * @q: The Work Queue to operate on. 128 * @wqe: The work Queue Entry to put on the Work queue. 129 * 130 * This routine will copy the contents of @wqe to the next available entry on 131 * the @q. This function will then ring the Work Queue Doorbell to signal the 132 * HBA to start processing the Work Queue Entry. This function returns 0 if 133 * successful. If no entries are available on @q then this function will return 134 * -ENOMEM. 135 * The caller is expected to hold the hbalock when calling this routine. 136 **/ 137 static int 138 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) 139 { 140 union lpfc_wqe *temp_wqe; 141 struct lpfc_register doorbell; 142 uint32_t host_index; 143 uint32_t idx; 144 uint32_t i = 0; 145 uint8_t *tmp; 146 u32 if_type; 147 148 /* sanity check on queue memory */ 149 if (unlikely(!q)) 150 return -ENOMEM; 151 temp_wqe = lpfc_sli4_qe(q, q->host_index); 152 153 /* If the host has not yet processed the next entry then we are done */ 154 idx = ((q->host_index + 1) % q->entry_count); 155 if (idx == q->hba_index) { 156 q->WQ_overflow++; 157 return -EBUSY; 158 } 159 q->WQ_posted++; 160 /* set consumption flag every once in a while */ 161 if (!((q->host_index + 1) % q->notify_interval)) 162 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 163 else 164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); 165 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 166 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 167 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 168 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 169 /* write to DPP aperture taking advatage of Combined Writes */ 170 tmp = (uint8_t *)temp_wqe; 171 #ifdef __raw_writeq 172 for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) 173 __raw_writeq(*((uint64_t *)(tmp + i)), 174 q->dpp_regaddr + i); 175 #else 176 for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) 177 __raw_writel(*((uint32_t *)(tmp + i)), 178 q->dpp_regaddr + i); 179 #endif 180 } 181 /* ensure WQE bcopy and DPP flushed before doorbell write */ 182 wmb(); 183 184 /* Update the host index before invoking device */ 185 host_index = q->host_index; 186 187 q->host_index = idx; 188 189 /* Ring Doorbell */ 190 doorbell.word0 = 0; 191 if (q->db_format == LPFC_DB_LIST_FORMAT) { 192 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 193 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); 194 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); 195 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, 196 q->dpp_id); 197 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell, 198 q->queue_id); 199 } else { 200 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 201 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 202 203 /* Leave bits <23:16> clear for if_type 6 dpp */ 204 if_type = bf_get(lpfc_sli_intf_if_type, 205 &q->phba->sli4_hba.sli_intf); 206 if (if_type != LPFC_SLI_INTF_IF_TYPE_6) 207 bf_set(lpfc_wq_db_list_fm_index, &doorbell, 208 host_index); 209 } 210 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 211 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 212 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 213 } else { 214 return -EINVAL; 215 } 216 writel(doorbell.word0, q->db_regaddr); 217 218 return 0; 219 } 220 221 /** 222 * lpfc_sli4_wq_release - Updates internal hba index for WQ 223 * @q: The Work Queue to operate on. 224 * @index: The index to advance the hba index to. 225 * 226 * This routine will update the HBA index of a queue to reflect consumption of 227 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 228 * an entry the host calls this function to update the queue's internal 229 * pointers. This routine returns the number of entries that were consumed by 230 * the HBA. 231 **/ 232 static uint32_t 233 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 234 { 235 uint32_t released = 0; 236 237 /* sanity check on queue memory */ 238 if (unlikely(!q)) 239 return 0; 240 241 if (q->hba_index == index) 242 return 0; 243 do { 244 q->hba_index = ((q->hba_index + 1) % q->entry_count); 245 released++; 246 } while (q->hba_index != index); 247 return released; 248 } 249 250 /** 251 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 252 * @q: The Mailbox Queue to operate on. 253 * @wqe: The Mailbox Queue Entry to put on the Work queue. 254 * 255 * This routine will copy the contents of @mqe to the next available entry on 256 * the @q. This function will then ring the Work Queue Doorbell to signal the 257 * HBA to start processing the Work Queue Entry. This function returns 0 if 258 * successful. If no entries are available on @q then this function will return 259 * -ENOMEM. 260 * The caller is expected to hold the hbalock when calling this routine. 261 **/ 262 static uint32_t 263 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 264 { 265 struct lpfc_mqe *temp_mqe; 266 struct lpfc_register doorbell; 267 268 /* sanity check on queue memory */ 269 if (unlikely(!q)) 270 return -ENOMEM; 271 temp_mqe = lpfc_sli4_qe(q, q->host_index); 272 273 /* If the host has not yet processed the next entry then we are done */ 274 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 275 return -ENOMEM; 276 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 277 /* Save off the mailbox pointer for completion */ 278 q->phba->mbox = (MAILBOX_t *)temp_mqe; 279 280 /* Update the host index before invoking device */ 281 q->host_index = ((q->host_index + 1) % q->entry_count); 282 283 /* Ring Doorbell */ 284 doorbell.word0 = 0; 285 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 286 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 287 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 288 return 0; 289 } 290 291 /** 292 * lpfc_sli4_mq_release - Updates internal hba index for MQ 293 * @q: The Mailbox Queue to operate on. 294 * 295 * This routine will update the HBA index of a queue to reflect consumption of 296 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 297 * an entry the host calls this function to update the queue's internal 298 * pointers. This routine returns the number of entries that were consumed by 299 * the HBA. 300 **/ 301 static uint32_t 302 lpfc_sli4_mq_release(struct lpfc_queue *q) 303 { 304 /* sanity check on queue memory */ 305 if (unlikely(!q)) 306 return 0; 307 308 /* Clear the mailbox pointer for completion */ 309 q->phba->mbox = NULL; 310 q->hba_index = ((q->hba_index + 1) % q->entry_count); 311 return 1; 312 } 313 314 /** 315 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 316 * @q: The Event Queue to get the first valid EQE from 317 * 318 * This routine will get the first valid Event Queue Entry from @q, update 319 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 320 * the Queue (no more work to do), or the Queue is full of EQEs that have been 321 * processed, but not popped back to the HBA then this routine will return NULL. 322 **/ 323 static struct lpfc_eqe * 324 lpfc_sli4_eq_get(struct lpfc_queue *q) 325 { 326 struct lpfc_eqe *eqe; 327 328 /* sanity check on queue memory */ 329 if (unlikely(!q)) 330 return NULL; 331 eqe = lpfc_sli4_qe(q, q->host_index); 332 333 /* If the next EQE is not valid then we are done */ 334 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) 335 return NULL; 336 337 /* 338 * insert barrier for instruction interlock : data from the hardware 339 * must have the valid bit checked before it can be copied and acted 340 * upon. Speculative instructions were allowing a bcopy at the start 341 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately 342 * after our return, to copy data before the valid bit check above 343 * was done. As such, some of the copied data was stale. The barrier 344 * ensures the check is before any data is copied. 345 */ 346 mb(); 347 return eqe; 348 } 349 350 /** 351 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 352 * @q: The Event Queue to disable interrupts 353 * 354 **/ 355 void 356 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 357 { 358 struct lpfc_register doorbell; 359 360 doorbell.word0 = 0; 361 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 362 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 363 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 364 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 365 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 366 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 367 } 368 369 /** 370 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ 371 * @q: The Event Queue to disable interrupts 372 * 373 **/ 374 void 375 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) 376 { 377 struct lpfc_register doorbell; 378 379 doorbell.word0 = 0; 380 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 382 } 383 384 /** 385 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state 386 * @phba: adapter with EQ 387 * @q: The Event Queue that the host has completed processing for. 388 * @count: Number of elements that have been consumed 389 * @arm: Indicates whether the host wants to arms this CQ. 390 * 391 * This routine will notify the HBA, by ringing the doorbell, that count 392 * number of EQEs have been processed. The @arm parameter indicates whether 393 * the queue should be rearmed when ringing the doorbell. 394 **/ 395 void 396 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 397 uint32_t count, bool arm) 398 { 399 struct lpfc_register doorbell; 400 401 /* sanity check on queue memory */ 402 if (unlikely(!q || (count == 0 && !arm))) 403 return; 404 405 /* ring doorbell for number popped */ 406 doorbell.word0 = 0; 407 if (arm) { 408 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 409 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 410 } 411 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 412 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 413 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 414 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 415 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 416 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 417 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 418 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 419 readl(q->phba->sli4_hba.EQDBregaddr); 420 } 421 422 /** 423 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state 424 * @phba: adapter with EQ 425 * @q: The Event Queue that the host has completed processing for. 426 * @count: Number of elements that have been consumed 427 * @arm: Indicates whether the host wants to arms this CQ. 428 * 429 * This routine will notify the HBA, by ringing the doorbell, that count 430 * number of EQEs have been processed. The @arm parameter indicates whether 431 * the queue should be rearmed when ringing the doorbell. 432 **/ 433 void 434 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 435 uint32_t count, bool arm) 436 { 437 struct lpfc_register doorbell; 438 439 /* sanity check on queue memory */ 440 if (unlikely(!q || (count == 0 && !arm))) 441 return; 442 443 /* ring doorbell for number popped */ 444 doorbell.word0 = 0; 445 if (arm) 446 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); 447 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count); 448 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 449 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 450 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 451 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 452 readl(q->phba->sli4_hba.EQDBregaddr); 453 } 454 455 static void 456 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 457 struct lpfc_eqe *eqe) 458 { 459 if (!phba->sli4_hba.pc_sli4_params.eqav) 460 bf_set_le32(lpfc_eqe_valid, eqe, 0); 461 462 eq->host_index = ((eq->host_index + 1) % eq->entry_count); 463 464 /* if the index wrapped around, toggle the valid bit */ 465 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index) 466 eq->qe_valid = (eq->qe_valid) ? 0 : 1; 467 } 468 469 static void 470 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 471 { 472 struct lpfc_eqe *eqe; 473 uint32_t count = 0; 474 475 /* walk all the EQ entries and drop on the floor */ 476 eqe = lpfc_sli4_eq_get(eq); 477 while (eqe) { 478 __lpfc_sli4_consume_eqe(phba, eq, eqe); 479 count++; 480 eqe = lpfc_sli4_eq_get(eq); 481 } 482 483 /* Clear and re-arm the EQ */ 484 phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM); 485 } 486 487 static int 488 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq) 489 { 490 struct lpfc_eqe *eqe; 491 int count = 0, consumed = 0; 492 493 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0) 494 goto rearm_and_exit; 495 496 eqe = lpfc_sli4_eq_get(eq); 497 while (eqe) { 498 lpfc_sli4_hba_handle_eqe(phba, eq, eqe); 499 __lpfc_sli4_consume_eqe(phba, eq, eqe); 500 501 consumed++; 502 if (!(++count % eq->max_proc_limit)) 503 break; 504 505 if (!(count % eq->notify_interval)) { 506 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, 507 LPFC_QUEUE_NOARM); 508 consumed = 0; 509 } 510 511 eqe = lpfc_sli4_eq_get(eq); 512 } 513 eq->EQ_processed += count; 514 515 /* Track the max number of EQEs processed in 1 intr */ 516 if (count > eq->EQ_max_eqe) 517 eq->EQ_max_eqe = count; 518 519 eq->queue_claimed = 0; 520 521 rearm_and_exit: 522 /* Always clear and re-arm the EQ */ 523 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM); 524 525 return count; 526 } 527 528 /** 529 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 530 * @q: The Completion Queue to get the first valid CQE from 531 * 532 * This routine will get the first valid Completion Queue Entry from @q, update 533 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 534 * the Queue (no more work to do), or the Queue is full of CQEs that have been 535 * processed, but not popped back to the HBA then this routine will return NULL. 536 **/ 537 static struct lpfc_cqe * 538 lpfc_sli4_cq_get(struct lpfc_queue *q) 539 { 540 struct lpfc_cqe *cqe; 541 542 /* sanity check on queue memory */ 543 if (unlikely(!q)) 544 return NULL; 545 cqe = lpfc_sli4_qe(q, q->host_index); 546 547 /* If the next CQE is not valid then we are done */ 548 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) 549 return NULL; 550 551 /* 552 * insert barrier for instruction interlock : data from the hardware 553 * must have the valid bit checked before it can be copied and acted 554 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative 555 * instructions allowing action on content before valid bit checked, 556 * add barrier here as well. May not be needed as "content" is a 557 * single 32-bit entity here (vs multi word structure for cq's). 558 */ 559 mb(); 560 return cqe; 561 } 562 563 static void 564 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 565 struct lpfc_cqe *cqe) 566 { 567 if (!phba->sli4_hba.pc_sli4_params.cqav) 568 bf_set_le32(lpfc_cqe_valid, cqe, 0); 569 570 cq->host_index = ((cq->host_index + 1) % cq->entry_count); 571 572 /* if the index wrapped around, toggle the valid bit */ 573 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index) 574 cq->qe_valid = (cq->qe_valid) ? 0 : 1; 575 } 576 577 /** 578 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state. 579 * @phba: the adapter with the CQ 580 * @q: The Completion Queue that the host has completed processing for. 581 * @count: the number of elements that were consumed 582 * @arm: Indicates whether the host wants to arms this CQ. 583 * 584 * This routine will notify the HBA, by ringing the doorbell, that the 585 * CQEs have been processed. The @arm parameter specifies whether the 586 * queue should be rearmed when ringing the doorbell. 587 **/ 588 void 589 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 590 uint32_t count, bool arm) 591 { 592 struct lpfc_register doorbell; 593 594 /* sanity check on queue memory */ 595 if (unlikely(!q || (count == 0 && !arm))) 596 return; 597 598 /* ring doorbell for number popped */ 599 doorbell.word0 = 0; 600 if (arm) 601 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 602 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count); 603 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 604 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 605 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 606 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 607 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 608 } 609 610 /** 611 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state. 612 * @phba: the adapter with the CQ 613 * @q: The Completion Queue that the host has completed processing for. 614 * @count: the number of elements that were consumed 615 * @arm: Indicates whether the host wants to arms this CQ. 616 * 617 * This routine will notify the HBA, by ringing the doorbell, that the 618 * CQEs have been processed. The @arm parameter specifies whether the 619 * queue should be rearmed when ringing the doorbell. 620 **/ 621 void 622 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, 623 uint32_t count, bool arm) 624 { 625 struct lpfc_register doorbell; 626 627 /* sanity check on queue memory */ 628 if (unlikely(!q || (count == 0 && !arm))) 629 return; 630 631 /* ring doorbell for number popped */ 632 doorbell.word0 = 0; 633 if (arm) 634 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); 635 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count); 636 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); 637 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 638 } 639 640 /** 641 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 642 * @q: The Header Receive Queue to operate on. 643 * @wqe: The Receive Queue Entry to put on the Receive queue. 644 * 645 * This routine will copy the contents of @wqe to the next available entry on 646 * the @q. This function will then ring the Receive Queue Doorbell to signal the 647 * HBA to start processing the Receive Queue Entry. This function returns the 648 * index that the rqe was copied to if successful. If no entries are available 649 * on @q then this function will return -ENOMEM. 650 * The caller is expected to hold the hbalock when calling this routine. 651 **/ 652 int 653 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 654 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 655 { 656 struct lpfc_rqe *temp_hrqe; 657 struct lpfc_rqe *temp_drqe; 658 struct lpfc_register doorbell; 659 int hq_put_index; 660 int dq_put_index; 661 662 /* sanity check on queue memory */ 663 if (unlikely(!hq) || unlikely(!dq)) 664 return -ENOMEM; 665 hq_put_index = hq->host_index; 666 dq_put_index = dq->host_index; 667 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index); 668 temp_drqe = lpfc_sli4_qe(dq, dq_put_index); 669 670 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 671 return -EINVAL; 672 if (hq_put_index != dq_put_index) 673 return -EINVAL; 674 /* If the host has not yet processed the next entry then we are done */ 675 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) 676 return -EBUSY; 677 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 678 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 679 680 /* Update the host index to point to the next slot */ 681 hq->host_index = ((hq_put_index + 1) % hq->entry_count); 682 dq->host_index = ((dq_put_index + 1) % dq->entry_count); 683 hq->RQ_buf_posted++; 684 685 /* Ring The Header Receive Queue Doorbell */ 686 if (!(hq->host_index % hq->notify_interval)) { 687 doorbell.word0 = 0; 688 if (hq->db_format == LPFC_DB_RING_FORMAT) { 689 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 690 hq->notify_interval); 691 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 692 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 693 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 694 hq->notify_interval); 695 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 696 hq->host_index); 697 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 698 } else { 699 return -EINVAL; 700 } 701 writel(doorbell.word0, hq->db_regaddr); 702 } 703 return hq_put_index; 704 } 705 706 /** 707 * lpfc_sli4_rq_release - Updates internal hba index for RQ 708 * @q: The Header Receive Queue to operate on. 709 * 710 * This routine will update the HBA index of a queue to reflect consumption of 711 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 712 * consumed an entry the host calls this function to update the queue's 713 * internal pointers. This routine returns the number of entries that were 714 * consumed by the HBA. 715 **/ 716 static uint32_t 717 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 718 { 719 /* sanity check on queue memory */ 720 if (unlikely(!hq) || unlikely(!dq)) 721 return 0; 722 723 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 724 return 0; 725 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 726 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 727 return 1; 728 } 729 730 /** 731 * lpfc_cmd_iocb - Get next command iocb entry in the ring 732 * @phba: Pointer to HBA context object. 733 * @pring: Pointer to driver SLI ring object. 734 * 735 * This function returns pointer to next command iocb entry 736 * in the command ring. The caller must hold hbalock to prevent 737 * other threads consume the next command iocb. 738 * SLI-2/SLI-3 provide different sized iocbs. 739 **/ 740 static inline IOCB_t * 741 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 742 { 743 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 744 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 745 } 746 747 /** 748 * lpfc_resp_iocb - Get next response iocb entry in the ring 749 * @phba: Pointer to HBA context object. 750 * @pring: Pointer to driver SLI ring object. 751 * 752 * This function returns pointer to next response iocb entry 753 * in the response ring. The caller must hold hbalock to make sure 754 * that no other thread consume the next response iocb. 755 * SLI-2/SLI-3 provide different sized iocbs. 756 **/ 757 static inline IOCB_t * 758 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 759 { 760 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 761 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 762 } 763 764 /** 765 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 766 * @phba: Pointer to HBA context object. 767 * 768 * This function is called with hbalock held. This function 769 * allocates a new driver iocb object from the iocb pool. If the 770 * allocation is successful, it returns pointer to the newly 771 * allocated iocb object else it returns NULL. 772 **/ 773 struct lpfc_iocbq * 774 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 775 { 776 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 777 struct lpfc_iocbq * iocbq = NULL; 778 779 lockdep_assert_held(&phba->hbalock); 780 781 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 782 if (iocbq) 783 phba->iocb_cnt++; 784 if (phba->iocb_cnt > phba->iocb_max) 785 phba->iocb_max = phba->iocb_cnt; 786 return iocbq; 787 } 788 789 /** 790 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 791 * @phba: Pointer to HBA context object. 792 * @xritag: XRI value. 793 * 794 * This function clears the sglq pointer from the array of acive 795 * sglq's. The xritag that is passed in is used to index into the 796 * array. Before the xritag can be used it needs to be adjusted 797 * by subtracting the xribase. 798 * 799 * Returns sglq ponter = success, NULL = Failure. 800 **/ 801 struct lpfc_sglq * 802 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 803 { 804 struct lpfc_sglq *sglq; 805 806 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 807 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 808 return sglq; 809 } 810 811 /** 812 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 813 * @phba: Pointer to HBA context object. 814 * @xritag: XRI value. 815 * 816 * This function returns the sglq pointer from the array of acive 817 * sglq's. The xritag that is passed in is used to index into the 818 * array. Before the xritag can be used it needs to be adjusted 819 * by subtracting the xribase. 820 * 821 * Returns sglq ponter = success, NULL = Failure. 822 **/ 823 struct lpfc_sglq * 824 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 825 { 826 struct lpfc_sglq *sglq; 827 828 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 829 return sglq; 830 } 831 832 /** 833 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 834 * @phba: Pointer to HBA context object. 835 * @xritag: xri used in this exchange. 836 * @rrq: The RRQ to be cleared. 837 * 838 **/ 839 void 840 lpfc_clr_rrq_active(struct lpfc_hba *phba, 841 uint16_t xritag, 842 struct lpfc_node_rrq *rrq) 843 { 844 struct lpfc_nodelist *ndlp = NULL; 845 846 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 847 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 848 849 /* The target DID could have been swapped (cable swap) 850 * we should use the ndlp from the findnode if it is 851 * available. 852 */ 853 if ((!ndlp) && rrq->ndlp) 854 ndlp = rrq->ndlp; 855 856 if (!ndlp) 857 goto out; 858 859 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 860 rrq->send_rrq = 0; 861 rrq->xritag = 0; 862 rrq->rrq_stop_time = 0; 863 } 864 out: 865 mempool_free(rrq, phba->rrq_pool); 866 } 867 868 /** 869 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 870 * @phba: Pointer to HBA context object. 871 * 872 * This function is called with hbalock held. This function 873 * Checks if stop_time (ratov from setting rrq active) has 874 * been reached, if it has and the send_rrq flag is set then 875 * it will call lpfc_send_rrq. If the send_rrq flag is not set 876 * then it will just call the routine to clear the rrq and 877 * free the rrq resource. 878 * The timer is set to the next rrq that is going to expire before 879 * leaving the routine. 880 * 881 **/ 882 void 883 lpfc_handle_rrq_active(struct lpfc_hba *phba) 884 { 885 struct lpfc_node_rrq *rrq; 886 struct lpfc_node_rrq *nextrrq; 887 unsigned long next_time; 888 unsigned long iflags; 889 LIST_HEAD(send_rrq); 890 891 spin_lock_irqsave(&phba->hbalock, iflags); 892 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 893 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 894 list_for_each_entry_safe(rrq, nextrrq, 895 &phba->active_rrq_list, list) { 896 if (time_after(jiffies, rrq->rrq_stop_time)) 897 list_move(&rrq->list, &send_rrq); 898 else if (time_before(rrq->rrq_stop_time, next_time)) 899 next_time = rrq->rrq_stop_time; 900 } 901 spin_unlock_irqrestore(&phba->hbalock, iflags); 902 if ((!list_empty(&phba->active_rrq_list)) && 903 (!(phba->pport->load_flag & FC_UNLOADING))) 904 mod_timer(&phba->rrq_tmr, next_time); 905 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 906 list_del(&rrq->list); 907 if (!rrq->send_rrq) { 908 /* this call will free the rrq */ 909 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 910 } else if (lpfc_send_rrq(phba, rrq)) { 911 /* if we send the rrq then the completion handler 912 * will clear the bit in the xribitmap. 913 */ 914 lpfc_clr_rrq_active(phba, rrq->xritag, 915 rrq); 916 } 917 } 918 } 919 920 /** 921 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 922 * @vport: Pointer to vport context object. 923 * @xri: The xri used in the exchange. 924 * @did: The targets DID for this exchange. 925 * 926 * returns NULL = rrq not found in the phba->active_rrq_list. 927 * rrq = rrq for this xri and target. 928 **/ 929 struct lpfc_node_rrq * 930 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 931 { 932 struct lpfc_hba *phba = vport->phba; 933 struct lpfc_node_rrq *rrq; 934 struct lpfc_node_rrq *nextrrq; 935 unsigned long iflags; 936 937 if (phba->sli_rev != LPFC_SLI_REV4) 938 return NULL; 939 spin_lock_irqsave(&phba->hbalock, iflags); 940 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 941 if (rrq->vport == vport && rrq->xritag == xri && 942 rrq->nlp_DID == did){ 943 list_del(&rrq->list); 944 spin_unlock_irqrestore(&phba->hbalock, iflags); 945 return rrq; 946 } 947 } 948 spin_unlock_irqrestore(&phba->hbalock, iflags); 949 return NULL; 950 } 951 952 /** 953 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 954 * @vport: Pointer to vport context object. 955 * @ndlp: Pointer to the lpfc_node_list structure. 956 * If ndlp is NULL Remove all active RRQs for this vport from the 957 * phba->active_rrq_list and clear the rrq. 958 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 959 **/ 960 void 961 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 962 963 { 964 struct lpfc_hba *phba = vport->phba; 965 struct lpfc_node_rrq *rrq; 966 struct lpfc_node_rrq *nextrrq; 967 unsigned long iflags; 968 LIST_HEAD(rrq_list); 969 970 if (phba->sli_rev != LPFC_SLI_REV4) 971 return; 972 if (!ndlp) { 973 lpfc_sli4_vport_delete_els_xri_aborted(vport); 974 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 975 } 976 spin_lock_irqsave(&phba->hbalock, iflags); 977 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 978 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 979 list_move(&rrq->list, &rrq_list); 980 spin_unlock_irqrestore(&phba->hbalock, iflags); 981 982 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 983 list_del(&rrq->list); 984 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 985 } 986 } 987 988 /** 989 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 990 * @phba: Pointer to HBA context object. 991 * @ndlp: Targets nodelist pointer for this exchange. 992 * @xritag the xri in the bitmap to test. 993 * 994 * This function returns: 995 * 0 = rrq not active for this xri 996 * 1 = rrq is valid for this xri. 997 **/ 998 int 999 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1000 uint16_t xritag) 1001 { 1002 if (!ndlp) 1003 return 0; 1004 if (!ndlp->active_rrqs_xri_bitmap) 1005 return 0; 1006 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1007 return 1; 1008 else 1009 return 0; 1010 } 1011 1012 /** 1013 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 1014 * @phba: Pointer to HBA context object. 1015 * @ndlp: nodelist pointer for this target. 1016 * @xritag: xri used in this exchange. 1017 * @rxid: Remote Exchange ID. 1018 * @send_rrq: Flag used to determine if we should send rrq els cmd. 1019 * 1020 * This function takes the hbalock. 1021 * The active bit is always set in the active rrq xri_bitmap even 1022 * if there is no slot avaiable for the other rrq information. 1023 * 1024 * returns 0 rrq actived for this xri 1025 * < 0 No memory or invalid ndlp. 1026 **/ 1027 int 1028 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1029 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 1030 { 1031 unsigned long iflags; 1032 struct lpfc_node_rrq *rrq; 1033 int empty; 1034 1035 if (!ndlp) 1036 return -EINVAL; 1037 1038 if (!phba->cfg_enable_rrq) 1039 return -EINVAL; 1040 1041 spin_lock_irqsave(&phba->hbalock, iflags); 1042 if (phba->pport->load_flag & FC_UNLOADING) { 1043 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1044 goto out; 1045 } 1046 1047 /* 1048 * set the active bit even if there is no mem available. 1049 */ 1050 if (NLP_CHK_FREE_REQ(ndlp)) 1051 goto out; 1052 1053 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 1054 goto out; 1055 1056 if (!ndlp->active_rrqs_xri_bitmap) 1057 goto out; 1058 1059 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1060 goto out; 1061 1062 spin_unlock_irqrestore(&phba->hbalock, iflags); 1063 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 1064 if (!rrq) { 1065 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1066 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 1067 " DID:0x%x Send:%d\n", 1068 xritag, rxid, ndlp->nlp_DID, send_rrq); 1069 return -EINVAL; 1070 } 1071 if (phba->cfg_enable_rrq == 1) 1072 rrq->send_rrq = send_rrq; 1073 else 1074 rrq->send_rrq = 0; 1075 rrq->xritag = xritag; 1076 rrq->rrq_stop_time = jiffies + 1077 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 1078 rrq->ndlp = ndlp; 1079 rrq->nlp_DID = ndlp->nlp_DID; 1080 rrq->vport = ndlp->vport; 1081 rrq->rxid = rxid; 1082 spin_lock_irqsave(&phba->hbalock, iflags); 1083 empty = list_empty(&phba->active_rrq_list); 1084 list_add_tail(&rrq->list, &phba->active_rrq_list); 1085 phba->hba_flag |= HBA_RRQ_ACTIVE; 1086 if (empty) 1087 lpfc_worker_wake_up(phba); 1088 spin_unlock_irqrestore(&phba->hbalock, iflags); 1089 return 0; 1090 out: 1091 spin_unlock_irqrestore(&phba->hbalock, iflags); 1092 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1093 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 1094 " DID:0x%x Send:%d\n", 1095 xritag, rxid, ndlp->nlp_DID, send_rrq); 1096 return -EINVAL; 1097 } 1098 1099 /** 1100 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool 1101 * @phba: Pointer to HBA context object. 1102 * @piocb: Pointer to the iocbq. 1103 * 1104 * The driver calls this function with either the nvme ls ring lock 1105 * or the fc els ring lock held depending on the iocb usage. This function 1106 * gets a new driver sglq object from the sglq list. If the list is not empty 1107 * then it is successful, it returns pointer to the newly allocated sglq 1108 * object else it returns NULL. 1109 **/ 1110 static struct lpfc_sglq * 1111 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1112 { 1113 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; 1114 struct lpfc_sglq *sglq = NULL; 1115 struct lpfc_sglq *start_sglq = NULL; 1116 struct lpfc_io_buf *lpfc_cmd; 1117 struct lpfc_nodelist *ndlp; 1118 struct lpfc_sli_ring *pring = NULL; 1119 int found = 0; 1120 1121 if (piocbq->iocb_flag & LPFC_IO_NVME_LS) 1122 pring = phba->sli4_hba.nvmels_wq->pring; 1123 else 1124 pring = lpfc_phba_elsring(phba); 1125 1126 lockdep_assert_held(&pring->ring_lock); 1127 1128 if (piocbq->iocb_flag & LPFC_IO_FCP) { 1129 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1; 1130 ndlp = lpfc_cmd->rdata->pnode; 1131 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 1132 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { 1133 ndlp = piocbq->context_un.ndlp; 1134 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) { 1135 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK) 1136 ndlp = NULL; 1137 else 1138 ndlp = piocbq->context_un.ndlp; 1139 } else { 1140 ndlp = piocbq->context1; 1141 } 1142 1143 spin_lock(&phba->sli4_hba.sgl_list_lock); 1144 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); 1145 start_sglq = sglq; 1146 while (!found) { 1147 if (!sglq) 1148 break; 1149 if (ndlp && ndlp->active_rrqs_xri_bitmap && 1150 test_bit(sglq->sli4_lxritag, 1151 ndlp->active_rrqs_xri_bitmap)) { 1152 /* This xri has an rrq outstanding for this DID. 1153 * put it back in the list and get another xri. 1154 */ 1155 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1156 sglq = NULL; 1157 list_remove_head(lpfc_els_sgl_list, sglq, 1158 struct lpfc_sglq, list); 1159 if (sglq == start_sglq) { 1160 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1161 sglq = NULL; 1162 break; 1163 } else 1164 continue; 1165 } 1166 sglq->ndlp = ndlp; 1167 found = 1; 1168 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1169 sglq->state = SGL_ALLOCATED; 1170 } 1171 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1172 return sglq; 1173 } 1174 1175 /** 1176 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool 1177 * @phba: Pointer to HBA context object. 1178 * @piocb: Pointer to the iocbq. 1179 * 1180 * This function is called with the sgl_list lock held. This function 1181 * gets a new driver sglq object from the sglq list. If the 1182 * list is not empty then it is successful, it returns pointer to the newly 1183 * allocated sglq object else it returns NULL. 1184 **/ 1185 struct lpfc_sglq * 1186 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1187 { 1188 struct list_head *lpfc_nvmet_sgl_list; 1189 struct lpfc_sglq *sglq = NULL; 1190 1191 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; 1192 1193 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); 1194 1195 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); 1196 if (!sglq) 1197 return NULL; 1198 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1199 sglq->state = SGL_ALLOCATED; 1200 return sglq; 1201 } 1202 1203 /** 1204 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 1205 * @phba: Pointer to HBA context object. 1206 * 1207 * This function is called with no lock held. This function 1208 * allocates a new driver iocb object from the iocb pool. If the 1209 * allocation is successful, it returns pointer to the newly 1210 * allocated iocb object else it returns NULL. 1211 **/ 1212 struct lpfc_iocbq * 1213 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 1214 { 1215 struct lpfc_iocbq * iocbq = NULL; 1216 unsigned long iflags; 1217 1218 spin_lock_irqsave(&phba->hbalock, iflags); 1219 iocbq = __lpfc_sli_get_iocbq(phba); 1220 spin_unlock_irqrestore(&phba->hbalock, iflags); 1221 return iocbq; 1222 } 1223 1224 /** 1225 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 1226 * @phba: Pointer to HBA context object. 1227 * @iocbq: Pointer to driver iocb object. 1228 * 1229 * This function is called with hbalock held to release driver 1230 * iocb object to the iocb pool. The iotag in the iocb object 1231 * does not change for each use of the iocb object. This function 1232 * clears all other fields of the iocb object when it is freed. 1233 * The sqlq structure that holds the xritag and phys and virtual 1234 * mappings for the scatter gather list is retrieved from the 1235 * active array of sglq. The get of the sglq pointer also clears 1236 * the entry in the array. If the status of the IO indiactes that 1237 * this IO was aborted then the sglq entry it put on the 1238 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1239 * IO has good status or fails for any other reason then the sglq 1240 * entry is added to the free list (lpfc_els_sgl_list). 1241 **/ 1242 static void 1243 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1244 { 1245 struct lpfc_sglq *sglq; 1246 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1247 unsigned long iflag = 0; 1248 struct lpfc_sli_ring *pring; 1249 1250 lockdep_assert_held(&phba->hbalock); 1251 1252 if (iocbq->sli4_xritag == NO_XRI) 1253 sglq = NULL; 1254 else 1255 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1256 1257 1258 if (sglq) { 1259 if (iocbq->iocb_flag & LPFC_IO_NVMET) { 1260 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1261 iflag); 1262 sglq->state = SGL_FREED; 1263 sglq->ndlp = NULL; 1264 list_add_tail(&sglq->list, 1265 &phba->sli4_hba.lpfc_nvmet_sgl_list); 1266 spin_unlock_irqrestore( 1267 &phba->sli4_hba.sgl_list_lock, iflag); 1268 goto out; 1269 } 1270 1271 pring = phba->sli4_hba.els_wq->pring; 1272 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1273 (sglq->state != SGL_XRI_ABORTED)) { 1274 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1275 iflag); 1276 list_add(&sglq->list, 1277 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1278 spin_unlock_irqrestore( 1279 &phba->sli4_hba.sgl_list_lock, iflag); 1280 } else { 1281 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1282 iflag); 1283 sglq->state = SGL_FREED; 1284 sglq->ndlp = NULL; 1285 list_add_tail(&sglq->list, 1286 &phba->sli4_hba.lpfc_els_sgl_list); 1287 spin_unlock_irqrestore( 1288 &phba->sli4_hba.sgl_list_lock, iflag); 1289 1290 /* Check if TXQ queue needs to be serviced */ 1291 if (!list_empty(&pring->txq)) 1292 lpfc_worker_wake_up(phba); 1293 } 1294 } 1295 1296 out: 1297 /* 1298 * Clean all volatile data fields, preserve iotag and node struct. 1299 */ 1300 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1301 iocbq->sli4_lxritag = NO_XRI; 1302 iocbq->sli4_xritag = NO_XRI; 1303 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | 1304 LPFC_IO_NVME_LS); 1305 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1306 } 1307 1308 1309 /** 1310 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1311 * @phba: Pointer to HBA context object. 1312 * @iocbq: Pointer to driver iocb object. 1313 * 1314 * This function is called with hbalock held to release driver 1315 * iocb object to the iocb pool. The iotag in the iocb object 1316 * does not change for each use of the iocb object. This function 1317 * clears all other fields of the iocb object when it is freed. 1318 **/ 1319 static void 1320 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1321 { 1322 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1323 1324 lockdep_assert_held(&phba->hbalock); 1325 1326 /* 1327 * Clean all volatile data fields, preserve iotag and node struct. 1328 */ 1329 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1330 iocbq->sli4_xritag = NO_XRI; 1331 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1332 } 1333 1334 /** 1335 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1336 * @phba: Pointer to HBA context object. 1337 * @iocbq: Pointer to driver iocb object. 1338 * 1339 * This function is called with hbalock held to release driver 1340 * iocb object to the iocb pool. The iotag in the iocb object 1341 * does not change for each use of the iocb object. This function 1342 * clears all other fields of the iocb object when it is freed. 1343 **/ 1344 static void 1345 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1346 { 1347 lockdep_assert_held(&phba->hbalock); 1348 1349 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1350 phba->iocb_cnt--; 1351 } 1352 1353 /** 1354 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1355 * @phba: Pointer to HBA context object. 1356 * @iocbq: Pointer to driver iocb object. 1357 * 1358 * This function is called with no lock held to release the iocb to 1359 * iocb pool. 1360 **/ 1361 void 1362 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1363 { 1364 unsigned long iflags; 1365 1366 /* 1367 * Clean all volatile data fields, preserve iotag and node struct. 1368 */ 1369 spin_lock_irqsave(&phba->hbalock, iflags); 1370 __lpfc_sli_release_iocbq(phba, iocbq); 1371 spin_unlock_irqrestore(&phba->hbalock, iflags); 1372 } 1373 1374 /** 1375 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1376 * @phba: Pointer to HBA context object. 1377 * @iocblist: List of IOCBs. 1378 * @ulpstatus: ULP status in IOCB command field. 1379 * @ulpWord4: ULP word-4 in IOCB command field. 1380 * 1381 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1382 * on the list by invoking the complete callback function associated with the 1383 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1384 * fields. 1385 **/ 1386 void 1387 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1388 uint32_t ulpstatus, uint32_t ulpWord4) 1389 { 1390 struct lpfc_iocbq *piocb; 1391 1392 while (!list_empty(iocblist)) { 1393 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1394 if (!piocb->iocb_cmpl) { 1395 if (piocb->iocb_flag & LPFC_IO_NVME) 1396 lpfc_nvme_cancel_iocb(phba, piocb); 1397 else 1398 lpfc_sli_release_iocbq(phba, piocb); 1399 } else { 1400 piocb->iocb.ulpStatus = ulpstatus; 1401 piocb->iocb.un.ulpWord[4] = ulpWord4; 1402 (piocb->iocb_cmpl) (phba, piocb, piocb); 1403 } 1404 } 1405 return; 1406 } 1407 1408 /** 1409 * lpfc_sli_iocb_cmd_type - Get the iocb type 1410 * @iocb_cmnd: iocb command code. 1411 * 1412 * This function is called by ring event handler function to get the iocb type. 1413 * This function translates the iocb command to an iocb command type used to 1414 * decide the final disposition of each completed IOCB. 1415 * The function returns 1416 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1417 * LPFC_SOL_IOCB if it is a solicited iocb completion 1418 * LPFC_ABORT_IOCB if it is an abort iocb 1419 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1420 * 1421 * The caller is not required to hold any lock. 1422 **/ 1423 static lpfc_iocb_type 1424 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1425 { 1426 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1427 1428 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1429 return 0; 1430 1431 switch (iocb_cmnd) { 1432 case CMD_XMIT_SEQUENCE_CR: 1433 case CMD_XMIT_SEQUENCE_CX: 1434 case CMD_XMIT_BCAST_CN: 1435 case CMD_XMIT_BCAST_CX: 1436 case CMD_ELS_REQUEST_CR: 1437 case CMD_ELS_REQUEST_CX: 1438 case CMD_CREATE_XRI_CR: 1439 case CMD_CREATE_XRI_CX: 1440 case CMD_GET_RPI_CN: 1441 case CMD_XMIT_ELS_RSP_CX: 1442 case CMD_GET_RPI_CR: 1443 case CMD_FCP_IWRITE_CR: 1444 case CMD_FCP_IWRITE_CX: 1445 case CMD_FCP_IREAD_CR: 1446 case CMD_FCP_IREAD_CX: 1447 case CMD_FCP_ICMND_CR: 1448 case CMD_FCP_ICMND_CX: 1449 case CMD_FCP_TSEND_CX: 1450 case CMD_FCP_TRSP_CX: 1451 case CMD_FCP_TRECEIVE_CX: 1452 case CMD_FCP_AUTO_TRSP_CX: 1453 case CMD_ADAPTER_MSG: 1454 case CMD_ADAPTER_DUMP: 1455 case CMD_XMIT_SEQUENCE64_CR: 1456 case CMD_XMIT_SEQUENCE64_CX: 1457 case CMD_XMIT_BCAST64_CN: 1458 case CMD_XMIT_BCAST64_CX: 1459 case CMD_ELS_REQUEST64_CR: 1460 case CMD_ELS_REQUEST64_CX: 1461 case CMD_FCP_IWRITE64_CR: 1462 case CMD_FCP_IWRITE64_CX: 1463 case CMD_FCP_IREAD64_CR: 1464 case CMD_FCP_IREAD64_CX: 1465 case CMD_FCP_ICMND64_CR: 1466 case CMD_FCP_ICMND64_CX: 1467 case CMD_FCP_TSEND64_CX: 1468 case CMD_FCP_TRSP64_CX: 1469 case CMD_FCP_TRECEIVE64_CX: 1470 case CMD_GEN_REQUEST64_CR: 1471 case CMD_GEN_REQUEST64_CX: 1472 case CMD_XMIT_ELS_RSP64_CX: 1473 case DSSCMD_IWRITE64_CR: 1474 case DSSCMD_IWRITE64_CX: 1475 case DSSCMD_IREAD64_CR: 1476 case DSSCMD_IREAD64_CX: 1477 type = LPFC_SOL_IOCB; 1478 break; 1479 case CMD_ABORT_XRI_CN: 1480 case CMD_ABORT_XRI_CX: 1481 case CMD_CLOSE_XRI_CN: 1482 case CMD_CLOSE_XRI_CX: 1483 case CMD_XRI_ABORTED_CX: 1484 case CMD_ABORT_MXRI64_CN: 1485 case CMD_XMIT_BLS_RSP64_CX: 1486 type = LPFC_ABORT_IOCB; 1487 break; 1488 case CMD_RCV_SEQUENCE_CX: 1489 case CMD_RCV_ELS_REQ_CX: 1490 case CMD_RCV_SEQUENCE64_CX: 1491 case CMD_RCV_ELS_REQ64_CX: 1492 case CMD_ASYNC_STATUS: 1493 case CMD_IOCB_RCV_SEQ64_CX: 1494 case CMD_IOCB_RCV_ELS64_CX: 1495 case CMD_IOCB_RCV_CONT64_CX: 1496 case CMD_IOCB_RET_XRI64_CX: 1497 type = LPFC_UNSOL_IOCB; 1498 break; 1499 case CMD_IOCB_XMIT_MSEQ64_CR: 1500 case CMD_IOCB_XMIT_MSEQ64_CX: 1501 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1502 case CMD_IOCB_RCV_ELS_LIST64_CX: 1503 case CMD_IOCB_CLOSE_EXTENDED_CN: 1504 case CMD_IOCB_ABORT_EXTENDED_CN: 1505 case CMD_IOCB_RET_HBQE64_CN: 1506 case CMD_IOCB_FCP_IBIDIR64_CR: 1507 case CMD_IOCB_FCP_IBIDIR64_CX: 1508 case CMD_IOCB_FCP_ITASKMGT64_CX: 1509 case CMD_IOCB_LOGENTRY_CN: 1510 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1511 printk("%s - Unhandled SLI-3 Command x%x\n", 1512 __func__, iocb_cmnd); 1513 type = LPFC_UNKNOWN_IOCB; 1514 break; 1515 default: 1516 type = LPFC_UNKNOWN_IOCB; 1517 break; 1518 } 1519 1520 return type; 1521 } 1522 1523 /** 1524 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1525 * @phba: Pointer to HBA context object. 1526 * 1527 * This function is called from SLI initialization code 1528 * to configure every ring of the HBA's SLI interface. The 1529 * caller is not required to hold any lock. This function issues 1530 * a config_ring mailbox command for each ring. 1531 * This function returns zero if successful else returns a negative 1532 * error code. 1533 **/ 1534 static int 1535 lpfc_sli_ring_map(struct lpfc_hba *phba) 1536 { 1537 struct lpfc_sli *psli = &phba->sli; 1538 LPFC_MBOXQ_t *pmb; 1539 MAILBOX_t *pmbox; 1540 int i, rc, ret = 0; 1541 1542 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1543 if (!pmb) 1544 return -ENOMEM; 1545 pmbox = &pmb->u.mb; 1546 phba->link_state = LPFC_INIT_MBX_CMDS; 1547 for (i = 0; i < psli->num_rings; i++) { 1548 lpfc_config_ring(phba, i, pmb); 1549 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1550 if (rc != MBX_SUCCESS) { 1551 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1552 "0446 Adapter failed to init (%d), " 1553 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1554 "ring %d\n", 1555 rc, pmbox->mbxCommand, 1556 pmbox->mbxStatus, i); 1557 phba->link_state = LPFC_HBA_ERROR; 1558 ret = -ENXIO; 1559 break; 1560 } 1561 } 1562 mempool_free(pmb, phba->mbox_mem_pool); 1563 return ret; 1564 } 1565 1566 /** 1567 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1568 * @phba: Pointer to HBA context object. 1569 * @pring: Pointer to driver SLI ring object. 1570 * @piocb: Pointer to the driver iocb object. 1571 * 1572 * The driver calls this function with the hbalock held for SLI3 ports or 1573 * the ring lock held for SLI4 ports. The function adds the 1574 * new iocb to txcmplq of the given ring. This function always returns 1575 * 0. If this function is called for ELS ring, this function checks if 1576 * there is a vport associated with the ELS command. This function also 1577 * starts els_tmofunc timer if this is an ELS command. 1578 **/ 1579 static int 1580 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1581 struct lpfc_iocbq *piocb) 1582 { 1583 if (phba->sli_rev == LPFC_SLI_REV4) 1584 lockdep_assert_held(&pring->ring_lock); 1585 else 1586 lockdep_assert_held(&phba->hbalock); 1587 1588 BUG_ON(!piocb); 1589 1590 list_add_tail(&piocb->list, &pring->txcmplq); 1591 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1592 pring->txcmplq_cnt++; 1593 1594 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1595 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1596 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1597 BUG_ON(!piocb->vport); 1598 if (!(piocb->vport->load_flag & FC_UNLOADING)) 1599 mod_timer(&piocb->vport->els_tmofunc, 1600 jiffies + 1601 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1602 } 1603 1604 return 0; 1605 } 1606 1607 /** 1608 * lpfc_sli_ringtx_get - Get first element of the txq 1609 * @phba: Pointer to HBA context object. 1610 * @pring: Pointer to driver SLI ring object. 1611 * 1612 * This function is called with hbalock held to get next 1613 * iocb in txq of the given ring. If there is any iocb in 1614 * the txq, the function returns first iocb in the list after 1615 * removing the iocb from the list, else it returns NULL. 1616 **/ 1617 struct lpfc_iocbq * 1618 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1619 { 1620 struct lpfc_iocbq *cmd_iocb; 1621 1622 lockdep_assert_held(&phba->hbalock); 1623 1624 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1625 return cmd_iocb; 1626 } 1627 1628 /** 1629 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1630 * @phba: Pointer to HBA context object. 1631 * @pring: Pointer to driver SLI ring object. 1632 * 1633 * This function is called with hbalock held and the caller must post the 1634 * iocb without releasing the lock. If the caller releases the lock, 1635 * iocb slot returned by the function is not guaranteed to be available. 1636 * The function returns pointer to the next available iocb slot if there 1637 * is available slot in the ring, else it returns NULL. 1638 * If the get index of the ring is ahead of the put index, the function 1639 * will post an error attention event to the worker thread to take the 1640 * HBA to offline state. 1641 **/ 1642 static IOCB_t * 1643 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1644 { 1645 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1646 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 1647 1648 lockdep_assert_held(&phba->hbalock); 1649 1650 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 1651 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 1652 pring->sli.sli3.next_cmdidx = 0; 1653 1654 if (unlikely(pring->sli.sli3.local_getidx == 1655 pring->sli.sli3.next_cmdidx)) { 1656 1657 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1658 1659 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1660 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1661 "0315 Ring %d issue: portCmdGet %d " 1662 "is bigger than cmd ring %d\n", 1663 pring->ringno, 1664 pring->sli.sli3.local_getidx, 1665 max_cmd_idx); 1666 1667 phba->link_state = LPFC_HBA_ERROR; 1668 /* 1669 * All error attention handlers are posted to 1670 * worker thread 1671 */ 1672 phba->work_ha |= HA_ERATT; 1673 phba->work_hs = HS_FFER3; 1674 1675 lpfc_worker_wake_up(phba); 1676 1677 return NULL; 1678 } 1679 1680 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 1681 return NULL; 1682 } 1683 1684 return lpfc_cmd_iocb(phba, pring); 1685 } 1686 1687 /** 1688 * lpfc_sli_next_iotag - Get an iotag for the iocb 1689 * @phba: Pointer to HBA context object. 1690 * @iocbq: Pointer to driver iocb object. 1691 * 1692 * This function gets an iotag for the iocb. If there is no unused iotag and 1693 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1694 * array and assigns a new iotag. 1695 * The function returns the allocated iotag if successful, else returns zero. 1696 * Zero is not a valid iotag. 1697 * The caller is not required to hold any lock. 1698 **/ 1699 uint16_t 1700 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1701 { 1702 struct lpfc_iocbq **new_arr; 1703 struct lpfc_iocbq **old_arr; 1704 size_t new_len; 1705 struct lpfc_sli *psli = &phba->sli; 1706 uint16_t iotag; 1707 1708 spin_lock_irq(&phba->hbalock); 1709 iotag = psli->last_iotag; 1710 if(++iotag < psli->iocbq_lookup_len) { 1711 psli->last_iotag = iotag; 1712 psli->iocbq_lookup[iotag] = iocbq; 1713 spin_unlock_irq(&phba->hbalock); 1714 iocbq->iotag = iotag; 1715 return iotag; 1716 } else if (psli->iocbq_lookup_len < (0xffff 1717 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1718 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1719 spin_unlock_irq(&phba->hbalock); 1720 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *), 1721 GFP_KERNEL); 1722 if (new_arr) { 1723 spin_lock_irq(&phba->hbalock); 1724 old_arr = psli->iocbq_lookup; 1725 if (new_len <= psli->iocbq_lookup_len) { 1726 /* highly unprobable case */ 1727 kfree(new_arr); 1728 iotag = psli->last_iotag; 1729 if(++iotag < psli->iocbq_lookup_len) { 1730 psli->last_iotag = iotag; 1731 psli->iocbq_lookup[iotag] = iocbq; 1732 spin_unlock_irq(&phba->hbalock); 1733 iocbq->iotag = iotag; 1734 return iotag; 1735 } 1736 spin_unlock_irq(&phba->hbalock); 1737 return 0; 1738 } 1739 if (psli->iocbq_lookup) 1740 memcpy(new_arr, old_arr, 1741 ((psli->last_iotag + 1) * 1742 sizeof (struct lpfc_iocbq *))); 1743 psli->iocbq_lookup = new_arr; 1744 psli->iocbq_lookup_len = new_len; 1745 psli->last_iotag = iotag; 1746 psli->iocbq_lookup[iotag] = iocbq; 1747 spin_unlock_irq(&phba->hbalock); 1748 iocbq->iotag = iotag; 1749 kfree(old_arr); 1750 return iotag; 1751 } 1752 } else 1753 spin_unlock_irq(&phba->hbalock); 1754 1755 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1756 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1757 psli->last_iotag); 1758 1759 return 0; 1760 } 1761 1762 /** 1763 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1764 * @phba: Pointer to HBA context object. 1765 * @pring: Pointer to driver SLI ring object. 1766 * @iocb: Pointer to iocb slot in the ring. 1767 * @nextiocb: Pointer to driver iocb object which need to be 1768 * posted to firmware. 1769 * 1770 * This function is called with hbalock held to post a new iocb to 1771 * the firmware. This function copies the new iocb to ring iocb slot and 1772 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1773 * a completion call back for this iocb else the function will free the 1774 * iocb object. 1775 **/ 1776 static void 1777 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1778 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1779 { 1780 lockdep_assert_held(&phba->hbalock); 1781 /* 1782 * Set up an iotag 1783 */ 1784 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1785 1786 1787 if (pring->ringno == LPFC_ELS_RING) { 1788 lpfc_debugfs_slow_ring_trc(phba, 1789 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1790 *(((uint32_t *) &nextiocb->iocb) + 4), 1791 *(((uint32_t *) &nextiocb->iocb) + 6), 1792 *(((uint32_t *) &nextiocb->iocb) + 7)); 1793 } 1794 1795 /* 1796 * Issue iocb command to adapter 1797 */ 1798 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1799 wmb(); 1800 pring->stats.iocb_cmd++; 1801 1802 /* 1803 * If there is no completion routine to call, we can release the 1804 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1805 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1806 */ 1807 if (nextiocb->iocb_cmpl) 1808 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1809 else 1810 __lpfc_sli_release_iocbq(phba, nextiocb); 1811 1812 /* 1813 * Let the HBA know what IOCB slot will be the next one the 1814 * driver will put a command into. 1815 */ 1816 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 1817 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1818 } 1819 1820 /** 1821 * lpfc_sli_update_full_ring - Update the chip attention register 1822 * @phba: Pointer to HBA context object. 1823 * @pring: Pointer to driver SLI ring object. 1824 * 1825 * The caller is not required to hold any lock for calling this function. 1826 * This function updates the chip attention bits for the ring to inform firmware 1827 * that there are pending work to be done for this ring and requests an 1828 * interrupt when there is space available in the ring. This function is 1829 * called when the driver is unable to post more iocbs to the ring due 1830 * to unavailability of space in the ring. 1831 **/ 1832 static void 1833 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1834 { 1835 int ringno = pring->ringno; 1836 1837 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1838 1839 wmb(); 1840 1841 /* 1842 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1843 * The HBA will tell us when an IOCB entry is available. 1844 */ 1845 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1846 readl(phba->CAregaddr); /* flush */ 1847 1848 pring->stats.iocb_cmd_full++; 1849 } 1850 1851 /** 1852 * lpfc_sli_update_ring - Update chip attention register 1853 * @phba: Pointer to HBA context object. 1854 * @pring: Pointer to driver SLI ring object. 1855 * 1856 * This function updates the chip attention register bit for the 1857 * given ring to inform HBA that there is more work to be done 1858 * in this ring. The caller is not required to hold any lock. 1859 **/ 1860 static void 1861 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1862 { 1863 int ringno = pring->ringno; 1864 1865 /* 1866 * Tell the HBA that there is work to do in this ring. 1867 */ 1868 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1869 wmb(); 1870 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1871 readl(phba->CAregaddr); /* flush */ 1872 } 1873 } 1874 1875 /** 1876 * lpfc_sli_resume_iocb - Process iocbs in the txq 1877 * @phba: Pointer to HBA context object. 1878 * @pring: Pointer to driver SLI ring object. 1879 * 1880 * This function is called with hbalock held to post pending iocbs 1881 * in the txq to the firmware. This function is called when driver 1882 * detects space available in the ring. 1883 **/ 1884 static void 1885 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1886 { 1887 IOCB_t *iocb; 1888 struct lpfc_iocbq *nextiocb; 1889 1890 lockdep_assert_held(&phba->hbalock); 1891 1892 /* 1893 * Check to see if: 1894 * (a) there is anything on the txq to send 1895 * (b) link is up 1896 * (c) link attention events can be processed (fcp ring only) 1897 * (d) IOCB processing is not blocked by the outstanding mbox command. 1898 */ 1899 1900 if (lpfc_is_link_up(phba) && 1901 (!list_empty(&pring->txq)) && 1902 (pring->ringno != LPFC_FCP_RING || 1903 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1904 1905 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1906 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1907 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1908 1909 if (iocb) 1910 lpfc_sli_update_ring(phba, pring); 1911 else 1912 lpfc_sli_update_full_ring(phba, pring); 1913 } 1914 1915 return; 1916 } 1917 1918 /** 1919 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1920 * @phba: Pointer to HBA context object. 1921 * @hbqno: HBQ number. 1922 * 1923 * This function is called with hbalock held to get the next 1924 * available slot for the given HBQ. If there is free slot 1925 * available for the HBQ it will return pointer to the next available 1926 * HBQ entry else it will return NULL. 1927 **/ 1928 static struct lpfc_hbq_entry * 1929 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1930 { 1931 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1932 1933 lockdep_assert_held(&phba->hbalock); 1934 1935 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1936 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1937 hbqp->next_hbqPutIdx = 0; 1938 1939 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1940 uint32_t raw_index = phba->hbq_get[hbqno]; 1941 uint32_t getidx = le32_to_cpu(raw_index); 1942 1943 hbqp->local_hbqGetIdx = getidx; 1944 1945 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1946 lpfc_printf_log(phba, KERN_ERR, 1947 LOG_SLI | LOG_VPORT, 1948 "1802 HBQ %d: local_hbqGetIdx " 1949 "%u is > than hbqp->entry_count %u\n", 1950 hbqno, hbqp->local_hbqGetIdx, 1951 hbqp->entry_count); 1952 1953 phba->link_state = LPFC_HBA_ERROR; 1954 return NULL; 1955 } 1956 1957 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1958 return NULL; 1959 } 1960 1961 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1962 hbqp->hbqPutIdx; 1963 } 1964 1965 /** 1966 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1967 * @phba: Pointer to HBA context object. 1968 * 1969 * This function is called with no lock held to free all the 1970 * hbq buffers while uninitializing the SLI interface. It also 1971 * frees the HBQ buffers returned by the firmware but not yet 1972 * processed by the upper layers. 1973 **/ 1974 void 1975 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1976 { 1977 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1978 struct hbq_dmabuf *hbq_buf; 1979 unsigned long flags; 1980 int i, hbq_count; 1981 1982 hbq_count = lpfc_sli_hbq_count(); 1983 /* Return all memory used by all HBQs */ 1984 spin_lock_irqsave(&phba->hbalock, flags); 1985 for (i = 0; i < hbq_count; ++i) { 1986 list_for_each_entry_safe(dmabuf, next_dmabuf, 1987 &phba->hbqs[i].hbq_buffer_list, list) { 1988 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1989 list_del(&hbq_buf->dbuf.list); 1990 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1991 } 1992 phba->hbqs[i].buffer_count = 0; 1993 } 1994 1995 /* Mark the HBQs not in use */ 1996 phba->hbq_in_use = 0; 1997 spin_unlock_irqrestore(&phba->hbalock, flags); 1998 } 1999 2000 /** 2001 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 2002 * @phba: Pointer to HBA context object. 2003 * @hbqno: HBQ number. 2004 * @hbq_buf: Pointer to HBQ buffer. 2005 * 2006 * This function is called with the hbalock held to post a 2007 * hbq buffer to the firmware. If the function finds an empty 2008 * slot in the HBQ, it will post the buffer. The function will return 2009 * pointer to the hbq entry if it successfully post the buffer 2010 * else it will return NULL. 2011 **/ 2012 static int 2013 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 2014 struct hbq_dmabuf *hbq_buf) 2015 { 2016 lockdep_assert_held(&phba->hbalock); 2017 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 2018 } 2019 2020 /** 2021 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 2022 * @phba: Pointer to HBA context object. 2023 * @hbqno: HBQ number. 2024 * @hbq_buf: Pointer to HBQ buffer. 2025 * 2026 * This function is called with the hbalock held to post a hbq buffer to the 2027 * firmware. If the function finds an empty slot in the HBQ, it will post the 2028 * buffer and place it on the hbq_buffer_list. The function will return zero if 2029 * it successfully post the buffer else it will return an error. 2030 **/ 2031 static int 2032 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 2033 struct hbq_dmabuf *hbq_buf) 2034 { 2035 struct lpfc_hbq_entry *hbqe; 2036 dma_addr_t physaddr = hbq_buf->dbuf.phys; 2037 2038 lockdep_assert_held(&phba->hbalock); 2039 /* Get next HBQ entry slot to use */ 2040 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 2041 if (hbqe) { 2042 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 2043 2044 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 2045 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 2046 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; 2047 hbqe->bde.tus.f.bdeFlags = 0; 2048 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 2049 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 2050 /* Sync SLIM */ 2051 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 2052 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 2053 /* flush */ 2054 readl(phba->hbq_put + hbqno); 2055 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 2056 return 0; 2057 } else 2058 return -ENOMEM; 2059 } 2060 2061 /** 2062 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 2063 * @phba: Pointer to HBA context object. 2064 * @hbqno: HBQ number. 2065 * @hbq_buf: Pointer to HBQ buffer. 2066 * 2067 * This function is called with the hbalock held to post an RQE to the SLI4 2068 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 2069 * the hbq_buffer_list and return zero, otherwise it will return an error. 2070 **/ 2071 static int 2072 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 2073 struct hbq_dmabuf *hbq_buf) 2074 { 2075 int rc; 2076 struct lpfc_rqe hrqe; 2077 struct lpfc_rqe drqe; 2078 struct lpfc_queue *hrq; 2079 struct lpfc_queue *drq; 2080 2081 if (hbqno != LPFC_ELS_HBQ) 2082 return 1; 2083 hrq = phba->sli4_hba.hdr_rq; 2084 drq = phba->sli4_hba.dat_rq; 2085 2086 lockdep_assert_held(&phba->hbalock); 2087 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 2088 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 2089 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 2090 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 2091 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 2092 if (rc < 0) 2093 return rc; 2094 hbq_buf->tag = (rc | (hbqno << 16)); 2095 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 2096 return 0; 2097 } 2098 2099 /* HBQ for ELS and CT traffic. */ 2100 static struct lpfc_hbq_init lpfc_els_hbq = { 2101 .rn = 1, 2102 .entry_count = 256, 2103 .mask_count = 0, 2104 .profile = 0, 2105 .ring_mask = (1 << LPFC_ELS_RING), 2106 .buffer_count = 0, 2107 .init_count = 40, 2108 .add_count = 40, 2109 }; 2110 2111 /* Array of HBQs */ 2112 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 2113 &lpfc_els_hbq, 2114 }; 2115 2116 /** 2117 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 2118 * @phba: Pointer to HBA context object. 2119 * @hbqno: HBQ number. 2120 * @count: Number of HBQ buffers to be posted. 2121 * 2122 * This function is called with no lock held to post more hbq buffers to the 2123 * given HBQ. The function returns the number of HBQ buffers successfully 2124 * posted. 2125 **/ 2126 static int 2127 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 2128 { 2129 uint32_t i, posted = 0; 2130 unsigned long flags; 2131 struct hbq_dmabuf *hbq_buffer; 2132 LIST_HEAD(hbq_buf_list); 2133 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 2134 return 0; 2135 2136 if ((phba->hbqs[hbqno].buffer_count + count) > 2137 lpfc_hbq_defs[hbqno]->entry_count) 2138 count = lpfc_hbq_defs[hbqno]->entry_count - 2139 phba->hbqs[hbqno].buffer_count; 2140 if (!count) 2141 return 0; 2142 /* Allocate HBQ entries */ 2143 for (i = 0; i < count; i++) { 2144 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 2145 if (!hbq_buffer) 2146 break; 2147 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 2148 } 2149 /* Check whether HBQ is still in use */ 2150 spin_lock_irqsave(&phba->hbalock, flags); 2151 if (!phba->hbq_in_use) 2152 goto err; 2153 while (!list_empty(&hbq_buf_list)) { 2154 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2155 dbuf.list); 2156 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 2157 (hbqno << 16)); 2158 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 2159 phba->hbqs[hbqno].buffer_count++; 2160 posted++; 2161 } else 2162 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2163 } 2164 spin_unlock_irqrestore(&phba->hbalock, flags); 2165 return posted; 2166 err: 2167 spin_unlock_irqrestore(&phba->hbalock, flags); 2168 while (!list_empty(&hbq_buf_list)) { 2169 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2170 dbuf.list); 2171 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2172 } 2173 return 0; 2174 } 2175 2176 /** 2177 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 2178 * @phba: Pointer to HBA context object. 2179 * @qno: HBQ number. 2180 * 2181 * This function posts more buffers to the HBQ. This function 2182 * is called with no lock held. The function returns the number of HBQ entries 2183 * successfully allocated. 2184 **/ 2185 int 2186 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 2187 { 2188 if (phba->sli_rev == LPFC_SLI_REV4) 2189 return 0; 2190 else 2191 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2192 lpfc_hbq_defs[qno]->add_count); 2193 } 2194 2195 /** 2196 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 2197 * @phba: Pointer to HBA context object. 2198 * @qno: HBQ queue number. 2199 * 2200 * This function is called from SLI initialization code path with 2201 * no lock held to post initial HBQ buffers to firmware. The 2202 * function returns the number of HBQ entries successfully allocated. 2203 **/ 2204 static int 2205 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 2206 { 2207 if (phba->sli_rev == LPFC_SLI_REV4) 2208 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2209 lpfc_hbq_defs[qno]->entry_count); 2210 else 2211 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2212 lpfc_hbq_defs[qno]->init_count); 2213 } 2214 2215 /** 2216 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 2217 * @phba: Pointer to HBA context object. 2218 * @hbqno: HBQ number. 2219 * 2220 * This function removes the first hbq buffer on an hbq list and returns a 2221 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2222 **/ 2223 static struct hbq_dmabuf * 2224 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 2225 { 2226 struct lpfc_dmabuf *d_buf; 2227 2228 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 2229 if (!d_buf) 2230 return NULL; 2231 return container_of(d_buf, struct hbq_dmabuf, dbuf); 2232 } 2233 2234 /** 2235 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list 2236 * @phba: Pointer to HBA context object. 2237 * @hbqno: HBQ number. 2238 * 2239 * This function removes the first RQ buffer on an RQ buffer list and returns a 2240 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2241 **/ 2242 static struct rqb_dmabuf * 2243 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) 2244 { 2245 struct lpfc_dmabuf *h_buf; 2246 struct lpfc_rqb *rqbp; 2247 2248 rqbp = hrq->rqbp; 2249 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 2250 struct lpfc_dmabuf, list); 2251 if (!h_buf) 2252 return NULL; 2253 rqbp->buffer_count--; 2254 return container_of(h_buf, struct rqb_dmabuf, hbuf); 2255 } 2256 2257 /** 2258 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 2259 * @phba: Pointer to HBA context object. 2260 * @tag: Tag of the hbq buffer. 2261 * 2262 * This function searches for the hbq buffer associated with the given tag in 2263 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer 2264 * otherwise it returns NULL. 2265 **/ 2266 static struct hbq_dmabuf * 2267 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 2268 { 2269 struct lpfc_dmabuf *d_buf; 2270 struct hbq_dmabuf *hbq_buf; 2271 uint32_t hbqno; 2272 2273 hbqno = tag >> 16; 2274 if (hbqno >= LPFC_MAX_HBQS) 2275 return NULL; 2276 2277 spin_lock_irq(&phba->hbalock); 2278 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2279 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2280 if (hbq_buf->tag == tag) { 2281 spin_unlock_irq(&phba->hbalock); 2282 return hbq_buf; 2283 } 2284 } 2285 spin_unlock_irq(&phba->hbalock); 2286 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2287 "1803 Bad hbq tag. Data: x%x x%x\n", 2288 tag, phba->hbqs[tag >> 16].buffer_count); 2289 return NULL; 2290 } 2291 2292 /** 2293 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2294 * @phba: Pointer to HBA context object. 2295 * @hbq_buffer: Pointer to HBQ buffer. 2296 * 2297 * This function is called with hbalock. This function gives back 2298 * the hbq buffer to firmware. If the HBQ does not have space to 2299 * post the buffer, it will free the buffer. 2300 **/ 2301 void 2302 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2303 { 2304 uint32_t hbqno; 2305 2306 if (hbq_buffer) { 2307 hbqno = hbq_buffer->tag >> 16; 2308 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2309 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2310 } 2311 } 2312 2313 /** 2314 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2315 * @mbxCommand: mailbox command code. 2316 * 2317 * This function is called by the mailbox event handler function to verify 2318 * that the completed mailbox command is a legitimate mailbox command. If the 2319 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2320 * and the mailbox event handler will take the HBA offline. 2321 **/ 2322 static int 2323 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2324 { 2325 uint8_t ret; 2326 2327 switch (mbxCommand) { 2328 case MBX_LOAD_SM: 2329 case MBX_READ_NV: 2330 case MBX_WRITE_NV: 2331 case MBX_WRITE_VPARMS: 2332 case MBX_RUN_BIU_DIAG: 2333 case MBX_INIT_LINK: 2334 case MBX_DOWN_LINK: 2335 case MBX_CONFIG_LINK: 2336 case MBX_CONFIG_RING: 2337 case MBX_RESET_RING: 2338 case MBX_READ_CONFIG: 2339 case MBX_READ_RCONFIG: 2340 case MBX_READ_SPARM: 2341 case MBX_READ_STATUS: 2342 case MBX_READ_RPI: 2343 case MBX_READ_XRI: 2344 case MBX_READ_REV: 2345 case MBX_READ_LNK_STAT: 2346 case MBX_REG_LOGIN: 2347 case MBX_UNREG_LOGIN: 2348 case MBX_CLEAR_LA: 2349 case MBX_DUMP_MEMORY: 2350 case MBX_DUMP_CONTEXT: 2351 case MBX_RUN_DIAGS: 2352 case MBX_RESTART: 2353 case MBX_UPDATE_CFG: 2354 case MBX_DOWN_LOAD: 2355 case MBX_DEL_LD_ENTRY: 2356 case MBX_RUN_PROGRAM: 2357 case MBX_SET_MASK: 2358 case MBX_SET_VARIABLE: 2359 case MBX_UNREG_D_ID: 2360 case MBX_KILL_BOARD: 2361 case MBX_CONFIG_FARP: 2362 case MBX_BEACON: 2363 case MBX_LOAD_AREA: 2364 case MBX_RUN_BIU_DIAG64: 2365 case MBX_CONFIG_PORT: 2366 case MBX_READ_SPARM64: 2367 case MBX_READ_RPI64: 2368 case MBX_REG_LOGIN64: 2369 case MBX_READ_TOPOLOGY: 2370 case MBX_WRITE_WWN: 2371 case MBX_SET_DEBUG: 2372 case MBX_LOAD_EXP_ROM: 2373 case MBX_ASYNCEVT_ENABLE: 2374 case MBX_REG_VPI: 2375 case MBX_UNREG_VPI: 2376 case MBX_HEARTBEAT: 2377 case MBX_PORT_CAPABILITIES: 2378 case MBX_PORT_IOV_CONTROL: 2379 case MBX_SLI4_CONFIG: 2380 case MBX_SLI4_REQ_FTRS: 2381 case MBX_REG_FCFI: 2382 case MBX_UNREG_FCFI: 2383 case MBX_REG_VFI: 2384 case MBX_UNREG_VFI: 2385 case MBX_INIT_VPI: 2386 case MBX_INIT_VFI: 2387 case MBX_RESUME_RPI: 2388 case MBX_READ_EVENT_LOG_STATUS: 2389 case MBX_READ_EVENT_LOG: 2390 case MBX_SECURITY_MGMT: 2391 case MBX_AUTH_PORT: 2392 case MBX_ACCESS_VDATA: 2393 ret = mbxCommand; 2394 break; 2395 default: 2396 ret = MBX_SHUTDOWN; 2397 break; 2398 } 2399 return ret; 2400 } 2401 2402 /** 2403 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2404 * @phba: Pointer to HBA context object. 2405 * @pmboxq: Pointer to mailbox command. 2406 * 2407 * This is completion handler function for mailbox commands issued from 2408 * lpfc_sli_issue_mbox_wait function. This function is called by the 2409 * mailbox event handler function with no lock held. This function 2410 * will wake up thread waiting on the wait queue pointed by context1 2411 * of the mailbox. 2412 **/ 2413 void 2414 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2415 { 2416 unsigned long drvr_flag; 2417 struct completion *pmbox_done; 2418 2419 /* 2420 * If pmbox_done is empty, the driver thread gave up waiting and 2421 * continued running. 2422 */ 2423 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2424 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2425 pmbox_done = (struct completion *)pmboxq->context3; 2426 if (pmbox_done) 2427 complete(pmbox_done); 2428 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2429 return; 2430 } 2431 2432 static void 2433 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 2434 { 2435 unsigned long iflags; 2436 2437 if (ndlp->nlp_flag & NLP_RELEASE_RPI) { 2438 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); 2439 spin_lock_irqsave(&vport->phba->ndlp_lock, iflags); 2440 ndlp->nlp_flag &= ~NLP_RELEASE_RPI; 2441 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; 2442 spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags); 2443 } 2444 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2445 } 2446 2447 /** 2448 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2449 * @phba: Pointer to HBA context object. 2450 * @pmb: Pointer to mailbox object. 2451 * 2452 * This function is the default mailbox completion handler. It 2453 * frees the memory resources associated with the completed mailbox 2454 * command. If the completed command is a REG_LOGIN mailbox command, 2455 * this function will issue a UREG_LOGIN to re-claim the RPI. 2456 **/ 2457 void 2458 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2459 { 2460 struct lpfc_vport *vport = pmb->vport; 2461 struct lpfc_dmabuf *mp; 2462 struct lpfc_nodelist *ndlp; 2463 struct Scsi_Host *shost; 2464 uint16_t rpi, vpi; 2465 int rc; 2466 2467 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 2468 2469 if (mp) { 2470 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2471 kfree(mp); 2472 } 2473 2474 /* 2475 * If a REG_LOGIN succeeded after node is destroyed or node 2476 * is in re-discovery driver need to cleanup the RPI. 2477 */ 2478 if (!(phba->pport->load_flag & FC_UNLOADING) && 2479 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2480 !pmb->u.mb.mbxStatus) { 2481 rpi = pmb->u.mb.un.varWords[0]; 2482 vpi = pmb->u.mb.un.varRegLogin.vpi; 2483 lpfc_unreg_login(phba, vpi, rpi, pmb); 2484 pmb->vport = vport; 2485 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2486 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2487 if (rc != MBX_NOT_FINISHED) 2488 return; 2489 } 2490 2491 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2492 !(phba->pport->load_flag & FC_UNLOADING) && 2493 !pmb->u.mb.mbxStatus) { 2494 shost = lpfc_shost_from_vport(vport); 2495 spin_lock_irq(shost->host_lock); 2496 vport->vpi_state |= LPFC_VPI_REGISTERED; 2497 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2498 spin_unlock_irq(shost->host_lock); 2499 } 2500 2501 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2502 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2503 lpfc_nlp_put(ndlp); 2504 pmb->ctx_buf = NULL; 2505 pmb->ctx_ndlp = NULL; 2506 } 2507 2508 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2509 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 2510 2511 /* Check to see if there are any deferred events to process */ 2512 if (ndlp) { 2513 lpfc_printf_vlog( 2514 vport, 2515 KERN_INFO, LOG_MBOX | LOG_DISCOVERY, 2516 "1438 UNREG cmpl deferred mbox x%x " 2517 "on NPort x%x Data: x%x x%x %px\n", 2518 ndlp->nlp_rpi, ndlp->nlp_DID, 2519 ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp); 2520 2521 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2522 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { 2523 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2524 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; 2525 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); 2526 } else { 2527 __lpfc_sli_rpi_release(vport, ndlp); 2528 } 2529 pmb->ctx_ndlp = NULL; 2530 } 2531 } 2532 2533 /* Check security permission status on INIT_LINK mailbox command */ 2534 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2535 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2536 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2537 "2860 SLI authentication is required " 2538 "for INIT_LINK but has not done yet\n"); 2539 2540 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2541 lpfc_sli4_mbox_cmd_free(phba, pmb); 2542 else 2543 mempool_free(pmb, phba->mbox_mem_pool); 2544 } 2545 /** 2546 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler 2547 * @phba: Pointer to HBA context object. 2548 * @pmb: Pointer to mailbox object. 2549 * 2550 * This function is the unreg rpi mailbox completion handler. It 2551 * frees the memory resources associated with the completed mailbox 2552 * command. An additional refrenece is put on the ndlp to prevent 2553 * lpfc_nlp_release from freeing the rpi bit in the bitmask before 2554 * the unreg mailbox command completes, this routine puts the 2555 * reference back. 2556 * 2557 **/ 2558 void 2559 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2560 { 2561 struct lpfc_vport *vport = pmb->vport; 2562 struct lpfc_nodelist *ndlp; 2563 2564 ndlp = pmb->ctx_ndlp; 2565 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2566 if (phba->sli_rev == LPFC_SLI_REV4 && 2567 (bf_get(lpfc_sli_intf_if_type, 2568 &phba->sli4_hba.sli_intf) >= 2569 LPFC_SLI_INTF_IF_TYPE_2)) { 2570 if (ndlp) { 2571 lpfc_printf_vlog( 2572 vport, KERN_INFO, LOG_MBOX | LOG_SLI, 2573 "0010 UNREG_LOGIN vpi:%x " 2574 "rpi:%x DID:%x defer x%x flg x%x " 2575 "map:%x %px\n", 2576 vport->vpi, ndlp->nlp_rpi, 2577 ndlp->nlp_DID, ndlp->nlp_defer_did, 2578 ndlp->nlp_flag, 2579 ndlp->nlp_usg_map, ndlp); 2580 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2581 lpfc_nlp_put(ndlp); 2582 2583 /* Check to see if there are any deferred 2584 * events to process 2585 */ 2586 if ((ndlp->nlp_flag & NLP_UNREG_INP) && 2587 (ndlp->nlp_defer_did != 2588 NLP_EVT_NOTHING_PENDING)) { 2589 lpfc_printf_vlog( 2590 vport, KERN_INFO, LOG_DISCOVERY, 2591 "4111 UNREG cmpl deferred " 2592 "clr x%x on " 2593 "NPort x%x Data: x%x x%px\n", 2594 ndlp->nlp_rpi, ndlp->nlp_DID, 2595 ndlp->nlp_defer_did, ndlp); 2596 ndlp->nlp_flag &= ~NLP_UNREG_INP; 2597 ndlp->nlp_defer_did = 2598 NLP_EVT_NOTHING_PENDING; 2599 lpfc_issue_els_plogi( 2600 vport, ndlp->nlp_DID, 0); 2601 } else { 2602 __lpfc_sli_rpi_release(vport, ndlp); 2603 } 2604 } 2605 } 2606 } 2607 2608 mempool_free(pmb, phba->mbox_mem_pool); 2609 } 2610 2611 /** 2612 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2613 * @phba: Pointer to HBA context object. 2614 * 2615 * This function is called with no lock held. This function processes all 2616 * the completed mailbox commands and gives it to upper layers. The interrupt 2617 * service routine processes mailbox completion interrupt and adds completed 2618 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2619 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2620 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2621 * function returns the mailbox commands to the upper layer by calling the 2622 * completion handler function of each mailbox. 2623 **/ 2624 int 2625 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2626 { 2627 MAILBOX_t *pmbox; 2628 LPFC_MBOXQ_t *pmb; 2629 int rc; 2630 LIST_HEAD(cmplq); 2631 2632 phba->sli.slistat.mbox_event++; 2633 2634 /* Get all completed mailboxe buffers into the cmplq */ 2635 spin_lock_irq(&phba->hbalock); 2636 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2637 spin_unlock_irq(&phba->hbalock); 2638 2639 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2640 do { 2641 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2642 if (pmb == NULL) 2643 break; 2644 2645 pmbox = &pmb->u.mb; 2646 2647 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2648 if (pmb->vport) { 2649 lpfc_debugfs_disc_trc(pmb->vport, 2650 LPFC_DISC_TRC_MBOX_VPORT, 2651 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2652 (uint32_t)pmbox->mbxCommand, 2653 pmbox->un.varWords[0], 2654 pmbox->un.varWords[1]); 2655 } 2656 else { 2657 lpfc_debugfs_disc_trc(phba->pport, 2658 LPFC_DISC_TRC_MBOX, 2659 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2660 (uint32_t)pmbox->mbxCommand, 2661 pmbox->un.varWords[0], 2662 pmbox->un.varWords[1]); 2663 } 2664 } 2665 2666 /* 2667 * It is a fatal error if unknown mbox command completion. 2668 */ 2669 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2670 MBX_SHUTDOWN) { 2671 /* Unknown mailbox command compl */ 2672 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2673 "(%d):0323 Unknown Mailbox command " 2674 "x%x (x%x/x%x) Cmpl\n", 2675 pmb->vport ? pmb->vport->vpi : 0, 2676 pmbox->mbxCommand, 2677 lpfc_sli_config_mbox_subsys_get(phba, 2678 pmb), 2679 lpfc_sli_config_mbox_opcode_get(phba, 2680 pmb)); 2681 phba->link_state = LPFC_HBA_ERROR; 2682 phba->work_hs = HS_FFER3; 2683 lpfc_handle_eratt(phba); 2684 continue; 2685 } 2686 2687 if (pmbox->mbxStatus) { 2688 phba->sli.slistat.mbox_stat_err++; 2689 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2690 /* Mbox cmd cmpl error - RETRYing */ 2691 lpfc_printf_log(phba, KERN_INFO, 2692 LOG_MBOX | LOG_SLI, 2693 "(%d):0305 Mbox cmd cmpl " 2694 "error - RETRYing Data: x%x " 2695 "(x%x/x%x) x%x x%x x%x\n", 2696 pmb->vport ? pmb->vport->vpi : 0, 2697 pmbox->mbxCommand, 2698 lpfc_sli_config_mbox_subsys_get(phba, 2699 pmb), 2700 lpfc_sli_config_mbox_opcode_get(phba, 2701 pmb), 2702 pmbox->mbxStatus, 2703 pmbox->un.varWords[0], 2704 pmb->vport->port_state); 2705 pmbox->mbxStatus = 0; 2706 pmbox->mbxOwner = OWN_HOST; 2707 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2708 if (rc != MBX_NOT_FINISHED) 2709 continue; 2710 } 2711 } 2712 2713 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2714 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2715 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps " 2716 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2717 "x%x x%x x%x\n", 2718 pmb->vport ? pmb->vport->vpi : 0, 2719 pmbox->mbxCommand, 2720 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2721 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2722 pmb->mbox_cmpl, 2723 *((uint32_t *) pmbox), 2724 pmbox->un.varWords[0], 2725 pmbox->un.varWords[1], 2726 pmbox->un.varWords[2], 2727 pmbox->un.varWords[3], 2728 pmbox->un.varWords[4], 2729 pmbox->un.varWords[5], 2730 pmbox->un.varWords[6], 2731 pmbox->un.varWords[7], 2732 pmbox->un.varWords[8], 2733 pmbox->un.varWords[9], 2734 pmbox->un.varWords[10]); 2735 2736 if (pmb->mbox_cmpl) 2737 pmb->mbox_cmpl(phba,pmb); 2738 } while (1); 2739 return 0; 2740 } 2741 2742 /** 2743 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2744 * @phba: Pointer to HBA context object. 2745 * @pring: Pointer to driver SLI ring object. 2746 * @tag: buffer tag. 2747 * 2748 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2749 * is set in the tag the buffer is posted for a particular exchange, 2750 * the function will return the buffer without replacing the buffer. 2751 * If the buffer is for unsolicited ELS or CT traffic, this function 2752 * returns the buffer and also posts another buffer to the firmware. 2753 **/ 2754 static struct lpfc_dmabuf * 2755 lpfc_sli_get_buff(struct lpfc_hba *phba, 2756 struct lpfc_sli_ring *pring, 2757 uint32_t tag) 2758 { 2759 struct hbq_dmabuf *hbq_entry; 2760 2761 if (tag & QUE_BUFTAG_BIT) 2762 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2763 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2764 if (!hbq_entry) 2765 return NULL; 2766 return &hbq_entry->dbuf; 2767 } 2768 2769 /** 2770 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2771 * @phba: Pointer to HBA context object. 2772 * @pring: Pointer to driver SLI ring object. 2773 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2774 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2775 * @fch_type: the type for the first frame of the sequence. 2776 * 2777 * This function is called with no lock held. This function uses the r_ctl and 2778 * type of the received sequence to find the correct callback function to call 2779 * to process the sequence. 2780 **/ 2781 static int 2782 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2783 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2784 uint32_t fch_type) 2785 { 2786 int i; 2787 2788 switch (fch_type) { 2789 case FC_TYPE_NVME: 2790 lpfc_nvmet_unsol_ls_event(phba, pring, saveq); 2791 return 1; 2792 default: 2793 break; 2794 } 2795 2796 /* unSolicited Responses */ 2797 if (pring->prt[0].profile) { 2798 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2799 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2800 saveq); 2801 return 1; 2802 } 2803 /* We must search, based on rctl / type 2804 for the right routine */ 2805 for (i = 0; i < pring->num_mask; i++) { 2806 if ((pring->prt[i].rctl == fch_r_ctl) && 2807 (pring->prt[i].type == fch_type)) { 2808 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2809 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2810 (phba, pring, saveq); 2811 return 1; 2812 } 2813 } 2814 return 0; 2815 } 2816 2817 /** 2818 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2819 * @phba: Pointer to HBA context object. 2820 * @pring: Pointer to driver SLI ring object. 2821 * @saveq: Pointer to the unsolicited iocb. 2822 * 2823 * This function is called with no lock held by the ring event handler 2824 * when there is an unsolicited iocb posted to the response ring by the 2825 * firmware. This function gets the buffer associated with the iocbs 2826 * and calls the event handler for the ring. This function handles both 2827 * qring buffers and hbq buffers. 2828 * When the function returns 1 the caller can free the iocb object otherwise 2829 * upper layer functions will free the iocb objects. 2830 **/ 2831 static int 2832 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2833 struct lpfc_iocbq *saveq) 2834 { 2835 IOCB_t * irsp; 2836 WORD5 * w5p; 2837 uint32_t Rctl, Type; 2838 struct lpfc_iocbq *iocbq; 2839 struct lpfc_dmabuf *dmzbuf; 2840 2841 irsp = &(saveq->iocb); 2842 2843 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2844 if (pring->lpfc_sli_rcv_async_status) 2845 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2846 else 2847 lpfc_printf_log(phba, 2848 KERN_WARNING, 2849 LOG_SLI, 2850 "0316 Ring %d handler: unexpected " 2851 "ASYNC_STATUS iocb received evt_code " 2852 "0x%x\n", 2853 pring->ringno, 2854 irsp->un.asyncstat.evt_code); 2855 return 1; 2856 } 2857 2858 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2859 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2860 if (irsp->ulpBdeCount > 0) { 2861 dmzbuf = lpfc_sli_get_buff(phba, pring, 2862 irsp->un.ulpWord[3]); 2863 lpfc_in_buf_free(phba, dmzbuf); 2864 } 2865 2866 if (irsp->ulpBdeCount > 1) { 2867 dmzbuf = lpfc_sli_get_buff(phba, pring, 2868 irsp->unsli3.sli3Words[3]); 2869 lpfc_in_buf_free(phba, dmzbuf); 2870 } 2871 2872 if (irsp->ulpBdeCount > 2) { 2873 dmzbuf = lpfc_sli_get_buff(phba, pring, 2874 irsp->unsli3.sli3Words[7]); 2875 lpfc_in_buf_free(phba, dmzbuf); 2876 } 2877 2878 return 1; 2879 } 2880 2881 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2882 if (irsp->ulpBdeCount != 0) { 2883 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2884 irsp->un.ulpWord[3]); 2885 if (!saveq->context2) 2886 lpfc_printf_log(phba, 2887 KERN_ERR, 2888 LOG_SLI, 2889 "0341 Ring %d Cannot find buffer for " 2890 "an unsolicited iocb. tag 0x%x\n", 2891 pring->ringno, 2892 irsp->un.ulpWord[3]); 2893 } 2894 if (irsp->ulpBdeCount == 2) { 2895 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2896 irsp->unsli3.sli3Words[7]); 2897 if (!saveq->context3) 2898 lpfc_printf_log(phba, 2899 KERN_ERR, 2900 LOG_SLI, 2901 "0342 Ring %d Cannot find buffer for an" 2902 " unsolicited iocb. tag 0x%x\n", 2903 pring->ringno, 2904 irsp->unsli3.sli3Words[7]); 2905 } 2906 list_for_each_entry(iocbq, &saveq->list, list) { 2907 irsp = &(iocbq->iocb); 2908 if (irsp->ulpBdeCount != 0) { 2909 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2910 irsp->un.ulpWord[3]); 2911 if (!iocbq->context2) 2912 lpfc_printf_log(phba, 2913 KERN_ERR, 2914 LOG_SLI, 2915 "0343 Ring %d Cannot find " 2916 "buffer for an unsolicited iocb" 2917 ". tag 0x%x\n", pring->ringno, 2918 irsp->un.ulpWord[3]); 2919 } 2920 if (irsp->ulpBdeCount == 2) { 2921 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2922 irsp->unsli3.sli3Words[7]); 2923 if (!iocbq->context3) 2924 lpfc_printf_log(phba, 2925 KERN_ERR, 2926 LOG_SLI, 2927 "0344 Ring %d Cannot find " 2928 "buffer for an unsolicited " 2929 "iocb. tag 0x%x\n", 2930 pring->ringno, 2931 irsp->unsli3.sli3Words[7]); 2932 } 2933 } 2934 } 2935 if (irsp->ulpBdeCount != 0 && 2936 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2937 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2938 int found = 0; 2939 2940 /* search continue save q for same XRI */ 2941 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2942 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2943 saveq->iocb.unsli3.rcvsli3.ox_id) { 2944 list_add_tail(&saveq->list, &iocbq->list); 2945 found = 1; 2946 break; 2947 } 2948 } 2949 if (!found) 2950 list_add_tail(&saveq->clist, 2951 &pring->iocb_continue_saveq); 2952 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2953 list_del_init(&iocbq->clist); 2954 saveq = iocbq; 2955 irsp = &(saveq->iocb); 2956 } else 2957 return 0; 2958 } 2959 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2960 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2961 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2962 Rctl = FC_RCTL_ELS_REQ; 2963 Type = FC_TYPE_ELS; 2964 } else { 2965 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2966 Rctl = w5p->hcsw.Rctl; 2967 Type = w5p->hcsw.Type; 2968 2969 /* Firmware Workaround */ 2970 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2971 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2972 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2973 Rctl = FC_RCTL_ELS_REQ; 2974 Type = FC_TYPE_ELS; 2975 w5p->hcsw.Rctl = Rctl; 2976 w5p->hcsw.Type = Type; 2977 } 2978 } 2979 2980 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2981 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2982 "0313 Ring %d handler: unexpected Rctl x%x " 2983 "Type x%x received\n", 2984 pring->ringno, Rctl, Type); 2985 2986 return 1; 2987 } 2988 2989 /** 2990 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2991 * @phba: Pointer to HBA context object. 2992 * @pring: Pointer to driver SLI ring object. 2993 * @prspiocb: Pointer to response iocb object. 2994 * 2995 * This function looks up the iocb_lookup table to get the command iocb 2996 * corresponding to the given response iocb using the iotag of the 2997 * response iocb. The driver calls this function with the hbalock held 2998 * for SLI3 ports or the ring lock held for SLI4 ports. 2999 * This function returns the command iocb object if it finds the command 3000 * iocb else returns NULL. 3001 **/ 3002 static struct lpfc_iocbq * 3003 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 3004 struct lpfc_sli_ring *pring, 3005 struct lpfc_iocbq *prspiocb) 3006 { 3007 struct lpfc_iocbq *cmd_iocb = NULL; 3008 uint16_t iotag; 3009 spinlock_t *temp_lock = NULL; 3010 unsigned long iflag = 0; 3011 3012 if (phba->sli_rev == LPFC_SLI_REV4) 3013 temp_lock = &pring->ring_lock; 3014 else 3015 temp_lock = &phba->hbalock; 3016 3017 spin_lock_irqsave(temp_lock, iflag); 3018 iotag = prspiocb->iocb.ulpIoTag; 3019 3020 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3021 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3022 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 3023 /* remove from txcmpl queue list */ 3024 list_del_init(&cmd_iocb->list); 3025 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 3026 pring->txcmplq_cnt--; 3027 spin_unlock_irqrestore(temp_lock, iflag); 3028 return cmd_iocb; 3029 } 3030 } 3031 3032 spin_unlock_irqrestore(temp_lock, iflag); 3033 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3034 "0317 iotag x%x is out of " 3035 "range: max iotag x%x wd0 x%x\n", 3036 iotag, phba->sli.last_iotag, 3037 *(((uint32_t *) &prspiocb->iocb) + 7)); 3038 return NULL; 3039 } 3040 3041 /** 3042 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 3043 * @phba: Pointer to HBA context object. 3044 * @pring: Pointer to driver SLI ring object. 3045 * @iotag: IOCB tag. 3046 * 3047 * This function looks up the iocb_lookup table to get the command iocb 3048 * corresponding to the given iotag. The driver calls this function with 3049 * the ring lock held because this function is an SLI4 port only helper. 3050 * This function returns the command iocb object if it finds the command 3051 * iocb else returns NULL. 3052 **/ 3053 static struct lpfc_iocbq * 3054 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 3055 struct lpfc_sli_ring *pring, uint16_t iotag) 3056 { 3057 struct lpfc_iocbq *cmd_iocb = NULL; 3058 spinlock_t *temp_lock = NULL; 3059 unsigned long iflag = 0; 3060 3061 if (phba->sli_rev == LPFC_SLI_REV4) 3062 temp_lock = &pring->ring_lock; 3063 else 3064 temp_lock = &phba->hbalock; 3065 3066 spin_lock_irqsave(temp_lock, iflag); 3067 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 3068 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 3069 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 3070 /* remove from txcmpl queue list */ 3071 list_del_init(&cmd_iocb->list); 3072 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 3073 pring->txcmplq_cnt--; 3074 spin_unlock_irqrestore(temp_lock, iflag); 3075 return cmd_iocb; 3076 } 3077 } 3078 3079 spin_unlock_irqrestore(temp_lock, iflag); 3080 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3081 "0372 iotag x%x lookup error: max iotag (x%x) " 3082 "iocb_flag x%x\n", 3083 iotag, phba->sli.last_iotag, 3084 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff); 3085 return NULL; 3086 } 3087 3088 /** 3089 * lpfc_sli_process_sol_iocb - process solicited iocb completion 3090 * @phba: Pointer to HBA context object. 3091 * @pring: Pointer to driver SLI ring object. 3092 * @saveq: Pointer to the response iocb to be processed. 3093 * 3094 * This function is called by the ring event handler for non-fcp 3095 * rings when there is a new response iocb in the response ring. 3096 * The caller is not required to hold any locks. This function 3097 * gets the command iocb associated with the response iocb and 3098 * calls the completion handler for the command iocb. If there 3099 * is no completion handler, the function will free the resources 3100 * associated with command iocb. If the response iocb is for 3101 * an already aborted command iocb, the status of the completion 3102 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 3103 * This function always returns 1. 3104 **/ 3105 static int 3106 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3107 struct lpfc_iocbq *saveq) 3108 { 3109 struct lpfc_iocbq *cmdiocbp; 3110 int rc = 1; 3111 unsigned long iflag; 3112 3113 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 3114 if (cmdiocbp) { 3115 if (cmdiocbp->iocb_cmpl) { 3116 /* 3117 * If an ELS command failed send an event to mgmt 3118 * application. 3119 */ 3120 if (saveq->iocb.ulpStatus && 3121 (pring->ringno == LPFC_ELS_RING) && 3122 (cmdiocbp->iocb.ulpCommand == 3123 CMD_ELS_REQUEST64_CR)) 3124 lpfc_send_els_failure_event(phba, 3125 cmdiocbp, saveq); 3126 3127 /* 3128 * Post all ELS completions to the worker thread. 3129 * All other are passed to the completion callback. 3130 */ 3131 if (pring->ringno == LPFC_ELS_RING) { 3132 if ((phba->sli_rev < LPFC_SLI_REV4) && 3133 (cmdiocbp->iocb_flag & 3134 LPFC_DRIVER_ABORTED)) { 3135 spin_lock_irqsave(&phba->hbalock, 3136 iflag); 3137 cmdiocbp->iocb_flag &= 3138 ~LPFC_DRIVER_ABORTED; 3139 spin_unlock_irqrestore(&phba->hbalock, 3140 iflag); 3141 saveq->iocb.ulpStatus = 3142 IOSTAT_LOCAL_REJECT; 3143 saveq->iocb.un.ulpWord[4] = 3144 IOERR_SLI_ABORTED; 3145 3146 /* Firmware could still be in progress 3147 * of DMAing payload, so don't free data 3148 * buffer till after a hbeat. 3149 */ 3150 spin_lock_irqsave(&phba->hbalock, 3151 iflag); 3152 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 3153 spin_unlock_irqrestore(&phba->hbalock, 3154 iflag); 3155 } 3156 if (phba->sli_rev == LPFC_SLI_REV4) { 3157 if (saveq->iocb_flag & 3158 LPFC_EXCHANGE_BUSY) { 3159 /* Set cmdiocb flag for the 3160 * exchange busy so sgl (xri) 3161 * will not be released until 3162 * the abort xri is received 3163 * from hba. 3164 */ 3165 spin_lock_irqsave( 3166 &phba->hbalock, iflag); 3167 cmdiocbp->iocb_flag |= 3168 LPFC_EXCHANGE_BUSY; 3169 spin_unlock_irqrestore( 3170 &phba->hbalock, iflag); 3171 } 3172 if (cmdiocbp->iocb_flag & 3173 LPFC_DRIVER_ABORTED) { 3174 /* 3175 * Clear LPFC_DRIVER_ABORTED 3176 * bit in case it was driver 3177 * initiated abort. 3178 */ 3179 spin_lock_irqsave( 3180 &phba->hbalock, iflag); 3181 cmdiocbp->iocb_flag &= 3182 ~LPFC_DRIVER_ABORTED; 3183 spin_unlock_irqrestore( 3184 &phba->hbalock, iflag); 3185 cmdiocbp->iocb.ulpStatus = 3186 IOSTAT_LOCAL_REJECT; 3187 cmdiocbp->iocb.un.ulpWord[4] = 3188 IOERR_ABORT_REQUESTED; 3189 /* 3190 * For SLI4, irsiocb contains 3191 * NO_XRI in sli_xritag, it 3192 * shall not affect releasing 3193 * sgl (xri) process. 3194 */ 3195 saveq->iocb.ulpStatus = 3196 IOSTAT_LOCAL_REJECT; 3197 saveq->iocb.un.ulpWord[4] = 3198 IOERR_SLI_ABORTED; 3199 spin_lock_irqsave( 3200 &phba->hbalock, iflag); 3201 saveq->iocb_flag |= 3202 LPFC_DELAY_MEM_FREE; 3203 spin_unlock_irqrestore( 3204 &phba->hbalock, iflag); 3205 } 3206 } 3207 } 3208 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 3209 } else 3210 lpfc_sli_release_iocbq(phba, cmdiocbp); 3211 } else { 3212 /* 3213 * Unknown initiating command based on the response iotag. 3214 * This could be the case on the ELS ring because of 3215 * lpfc_els_abort(). 3216 */ 3217 if (pring->ringno != LPFC_ELS_RING) { 3218 /* 3219 * Ring <ringno> handler: unexpected completion IoTag 3220 * <IoTag> 3221 */ 3222 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3223 "0322 Ring %d handler: " 3224 "unexpected completion IoTag x%x " 3225 "Data: x%x x%x x%x x%x\n", 3226 pring->ringno, 3227 saveq->iocb.ulpIoTag, 3228 saveq->iocb.ulpStatus, 3229 saveq->iocb.un.ulpWord[4], 3230 saveq->iocb.ulpCommand, 3231 saveq->iocb.ulpContext); 3232 } 3233 } 3234 3235 return rc; 3236 } 3237 3238 /** 3239 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 3240 * @phba: Pointer to HBA context object. 3241 * @pring: Pointer to driver SLI ring object. 3242 * 3243 * This function is called from the iocb ring event handlers when 3244 * put pointer is ahead of the get pointer for a ring. This function signal 3245 * an error attention condition to the worker thread and the worker 3246 * thread will transition the HBA to offline state. 3247 **/ 3248 static void 3249 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3250 { 3251 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3252 /* 3253 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3254 * rsp ring <portRspMax> 3255 */ 3256 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3257 "0312 Ring %d handler: portRspPut %d " 3258 "is bigger than rsp ring %d\n", 3259 pring->ringno, le32_to_cpu(pgp->rspPutInx), 3260 pring->sli.sli3.numRiocb); 3261 3262 phba->link_state = LPFC_HBA_ERROR; 3263 3264 /* 3265 * All error attention handlers are posted to 3266 * worker thread 3267 */ 3268 phba->work_ha |= HA_ERATT; 3269 phba->work_hs = HS_FFER3; 3270 3271 lpfc_worker_wake_up(phba); 3272 3273 return; 3274 } 3275 3276 /** 3277 * lpfc_poll_eratt - Error attention polling timer timeout handler 3278 * @ptr: Pointer to address of HBA context object. 3279 * 3280 * This function is invoked by the Error Attention polling timer when the 3281 * timer times out. It will check the SLI Error Attention register for 3282 * possible attention events. If so, it will post an Error Attention event 3283 * and wake up worker thread to process it. Otherwise, it will set up the 3284 * Error Attention polling timer for the next poll. 3285 **/ 3286 void lpfc_poll_eratt(struct timer_list *t) 3287 { 3288 struct lpfc_hba *phba; 3289 uint32_t eratt = 0; 3290 uint64_t sli_intr, cnt; 3291 3292 phba = from_timer(phba, t, eratt_poll); 3293 3294 /* Here we will also keep track of interrupts per sec of the hba */ 3295 sli_intr = phba->sli.slistat.sli_intr; 3296 3297 if (phba->sli.slistat.sli_prev_intr > sli_intr) 3298 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 3299 sli_intr); 3300 else 3301 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 3302 3303 /* 64-bit integer division not supported on 32-bit x86 - use do_div */ 3304 do_div(cnt, phba->eratt_poll_interval); 3305 phba->sli.slistat.sli_ips = cnt; 3306 3307 phba->sli.slistat.sli_prev_intr = sli_intr; 3308 3309 /* Check chip HA register for error event */ 3310 eratt = lpfc_sli_check_eratt(phba); 3311 3312 if (eratt) 3313 /* Tell the worker thread there is work to do */ 3314 lpfc_worker_wake_up(phba); 3315 else 3316 /* Restart the timer for next eratt poll */ 3317 mod_timer(&phba->eratt_poll, 3318 jiffies + 3319 msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 3320 return; 3321 } 3322 3323 3324 /** 3325 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 3326 * @phba: Pointer to HBA context object. 3327 * @pring: Pointer to driver SLI ring object. 3328 * @mask: Host attention register mask for this ring. 3329 * 3330 * This function is called from the interrupt context when there is a ring 3331 * event for the fcp ring. The caller does not hold any lock. 3332 * The function processes each response iocb in the response ring until it 3333 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 3334 * LE bit set. The function will call the completion handler of the command iocb 3335 * if the response iocb indicates a completion for a command iocb or it is 3336 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 3337 * function if this is an unsolicited iocb. 3338 * This routine presumes LPFC_FCP_RING handling and doesn't bother 3339 * to check it explicitly. 3340 */ 3341 int 3342 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 3343 struct lpfc_sli_ring *pring, uint32_t mask) 3344 { 3345 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3346 IOCB_t *irsp = NULL; 3347 IOCB_t *entry = NULL; 3348 struct lpfc_iocbq *cmdiocbq = NULL; 3349 struct lpfc_iocbq rspiocbq; 3350 uint32_t status; 3351 uint32_t portRspPut, portRspMax; 3352 int rc = 1; 3353 lpfc_iocb_type type; 3354 unsigned long iflag; 3355 uint32_t rsp_cmpl = 0; 3356 3357 spin_lock_irqsave(&phba->hbalock, iflag); 3358 pring->stats.iocb_event++; 3359 3360 /* 3361 * The next available response entry should never exceed the maximum 3362 * entries. If it does, treat it as an adapter hardware error. 3363 */ 3364 portRspMax = pring->sli.sli3.numRiocb; 3365 portRspPut = le32_to_cpu(pgp->rspPutInx); 3366 if (unlikely(portRspPut >= portRspMax)) { 3367 lpfc_sli_rsp_pointers_error(phba, pring); 3368 spin_unlock_irqrestore(&phba->hbalock, iflag); 3369 return 1; 3370 } 3371 if (phba->fcp_ring_in_use) { 3372 spin_unlock_irqrestore(&phba->hbalock, iflag); 3373 return 1; 3374 } else 3375 phba->fcp_ring_in_use = 1; 3376 3377 rmb(); 3378 while (pring->sli.sli3.rspidx != portRspPut) { 3379 /* 3380 * Fetch an entry off the ring and copy it into a local data 3381 * structure. The copy involves a byte-swap since the 3382 * network byte order and pci byte orders are different. 3383 */ 3384 entry = lpfc_resp_iocb(phba, pring); 3385 phba->last_completion_time = jiffies; 3386 3387 if (++pring->sli.sli3.rspidx >= portRspMax) 3388 pring->sli.sli3.rspidx = 0; 3389 3390 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 3391 (uint32_t *) &rspiocbq.iocb, 3392 phba->iocb_rsp_size); 3393 INIT_LIST_HEAD(&(rspiocbq.list)); 3394 irsp = &rspiocbq.iocb; 3395 3396 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 3397 pring->stats.iocb_rsp++; 3398 rsp_cmpl++; 3399 3400 if (unlikely(irsp->ulpStatus)) { 3401 /* 3402 * If resource errors reported from HBA, reduce 3403 * queuedepths of the SCSI device. 3404 */ 3405 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3406 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3407 IOERR_NO_RESOURCES)) { 3408 spin_unlock_irqrestore(&phba->hbalock, iflag); 3409 phba->lpfc_rampdown_queue_depth(phba); 3410 spin_lock_irqsave(&phba->hbalock, iflag); 3411 } 3412 3413 /* Rsp ring <ringno> error: IOCB */ 3414 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3415 "0336 Rsp Ring %d error: IOCB Data: " 3416 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 3417 pring->ringno, 3418 irsp->un.ulpWord[0], 3419 irsp->un.ulpWord[1], 3420 irsp->un.ulpWord[2], 3421 irsp->un.ulpWord[3], 3422 irsp->un.ulpWord[4], 3423 irsp->un.ulpWord[5], 3424 *(uint32_t *)&irsp->un1, 3425 *((uint32_t *)&irsp->un1 + 1)); 3426 } 3427 3428 switch (type) { 3429 case LPFC_ABORT_IOCB: 3430 case LPFC_SOL_IOCB: 3431 /* 3432 * Idle exchange closed via ABTS from port. No iocb 3433 * resources need to be recovered. 3434 */ 3435 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 3436 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3437 "0333 IOCB cmd 0x%x" 3438 " processed. Skipping" 3439 " completion\n", 3440 irsp->ulpCommand); 3441 break; 3442 } 3443 3444 spin_unlock_irqrestore(&phba->hbalock, iflag); 3445 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 3446 &rspiocbq); 3447 spin_lock_irqsave(&phba->hbalock, iflag); 3448 if (unlikely(!cmdiocbq)) 3449 break; 3450 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 3451 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3452 if (cmdiocbq->iocb_cmpl) { 3453 spin_unlock_irqrestore(&phba->hbalock, iflag); 3454 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 3455 &rspiocbq); 3456 spin_lock_irqsave(&phba->hbalock, iflag); 3457 } 3458 break; 3459 case LPFC_UNSOL_IOCB: 3460 spin_unlock_irqrestore(&phba->hbalock, iflag); 3461 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 3462 spin_lock_irqsave(&phba->hbalock, iflag); 3463 break; 3464 default: 3465 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3466 char adaptermsg[LPFC_MAX_ADPTMSG]; 3467 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3468 memcpy(&adaptermsg[0], (uint8_t *) irsp, 3469 MAX_MSG_DATA); 3470 dev_warn(&((phba->pcidev)->dev), 3471 "lpfc%d: %s\n", 3472 phba->brd_no, adaptermsg); 3473 } else { 3474 /* Unknown IOCB command */ 3475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3476 "0334 Unknown IOCB command " 3477 "Data: x%x, x%x x%x x%x x%x\n", 3478 type, irsp->ulpCommand, 3479 irsp->ulpStatus, 3480 irsp->ulpIoTag, 3481 irsp->ulpContext); 3482 } 3483 break; 3484 } 3485 3486 /* 3487 * The response IOCB has been processed. Update the ring 3488 * pointer in SLIM. If the port response put pointer has not 3489 * been updated, sync the pgp->rspPutInx and fetch the new port 3490 * response put pointer. 3491 */ 3492 writel(pring->sli.sli3.rspidx, 3493 &phba->host_gp[pring->ringno].rspGetInx); 3494 3495 if (pring->sli.sli3.rspidx == portRspPut) 3496 portRspPut = le32_to_cpu(pgp->rspPutInx); 3497 } 3498 3499 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3500 pring->stats.iocb_rsp_full++; 3501 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3502 writel(status, phba->CAregaddr); 3503 readl(phba->CAregaddr); 3504 } 3505 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3506 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3507 pring->stats.iocb_cmd_empty++; 3508 3509 /* Force update of the local copy of cmdGetInx */ 3510 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3511 lpfc_sli_resume_iocb(phba, pring); 3512 3513 if ((pring->lpfc_sli_cmd_available)) 3514 (pring->lpfc_sli_cmd_available) (phba, pring); 3515 3516 } 3517 3518 phba->fcp_ring_in_use = 0; 3519 spin_unlock_irqrestore(&phba->hbalock, iflag); 3520 return rc; 3521 } 3522 3523 /** 3524 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3525 * @phba: Pointer to HBA context object. 3526 * @pring: Pointer to driver SLI ring object. 3527 * @rspiocbp: Pointer to driver response IOCB object. 3528 * 3529 * This function is called from the worker thread when there is a slow-path 3530 * response IOCB to process. This function chains all the response iocbs until 3531 * seeing the iocb with the LE bit set. The function will call 3532 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3533 * completion of a command iocb. The function will call the 3534 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3535 * The function frees the resources or calls the completion handler if this 3536 * iocb is an abort completion. The function returns NULL when the response 3537 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3538 * this function shall chain the iocb on to the iocb_continueq and return the 3539 * response iocb passed in. 3540 **/ 3541 static struct lpfc_iocbq * 3542 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3543 struct lpfc_iocbq *rspiocbp) 3544 { 3545 struct lpfc_iocbq *saveq; 3546 struct lpfc_iocbq *cmdiocbp; 3547 struct lpfc_iocbq *next_iocb; 3548 IOCB_t *irsp = NULL; 3549 uint32_t free_saveq; 3550 uint8_t iocb_cmd_type; 3551 lpfc_iocb_type type; 3552 unsigned long iflag; 3553 int rc; 3554 3555 spin_lock_irqsave(&phba->hbalock, iflag); 3556 /* First add the response iocb to the countinueq list */ 3557 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3558 pring->iocb_continueq_cnt++; 3559 3560 /* Now, determine whether the list is completed for processing */ 3561 irsp = &rspiocbp->iocb; 3562 if (irsp->ulpLe) { 3563 /* 3564 * By default, the driver expects to free all resources 3565 * associated with this iocb completion. 3566 */ 3567 free_saveq = 1; 3568 saveq = list_get_first(&pring->iocb_continueq, 3569 struct lpfc_iocbq, list); 3570 irsp = &(saveq->iocb); 3571 list_del_init(&pring->iocb_continueq); 3572 pring->iocb_continueq_cnt = 0; 3573 3574 pring->stats.iocb_rsp++; 3575 3576 /* 3577 * If resource errors reported from HBA, reduce 3578 * queuedepths of the SCSI device. 3579 */ 3580 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3581 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3582 IOERR_NO_RESOURCES)) { 3583 spin_unlock_irqrestore(&phba->hbalock, iflag); 3584 phba->lpfc_rampdown_queue_depth(phba); 3585 spin_lock_irqsave(&phba->hbalock, iflag); 3586 } 3587 3588 if (irsp->ulpStatus) { 3589 /* Rsp ring <ringno> error: IOCB */ 3590 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3591 "0328 Rsp Ring %d error: " 3592 "IOCB Data: " 3593 "x%x x%x x%x x%x " 3594 "x%x x%x x%x x%x " 3595 "x%x x%x x%x x%x " 3596 "x%x x%x x%x x%x\n", 3597 pring->ringno, 3598 irsp->un.ulpWord[0], 3599 irsp->un.ulpWord[1], 3600 irsp->un.ulpWord[2], 3601 irsp->un.ulpWord[3], 3602 irsp->un.ulpWord[4], 3603 irsp->un.ulpWord[5], 3604 *(((uint32_t *) irsp) + 6), 3605 *(((uint32_t *) irsp) + 7), 3606 *(((uint32_t *) irsp) + 8), 3607 *(((uint32_t *) irsp) + 9), 3608 *(((uint32_t *) irsp) + 10), 3609 *(((uint32_t *) irsp) + 11), 3610 *(((uint32_t *) irsp) + 12), 3611 *(((uint32_t *) irsp) + 13), 3612 *(((uint32_t *) irsp) + 14), 3613 *(((uint32_t *) irsp) + 15)); 3614 } 3615 3616 /* 3617 * Fetch the IOCB command type and call the correct completion 3618 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3619 * get freed back to the lpfc_iocb_list by the discovery 3620 * kernel thread. 3621 */ 3622 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3623 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3624 switch (type) { 3625 case LPFC_SOL_IOCB: 3626 spin_unlock_irqrestore(&phba->hbalock, iflag); 3627 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3628 spin_lock_irqsave(&phba->hbalock, iflag); 3629 break; 3630 3631 case LPFC_UNSOL_IOCB: 3632 spin_unlock_irqrestore(&phba->hbalock, iflag); 3633 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3634 spin_lock_irqsave(&phba->hbalock, iflag); 3635 if (!rc) 3636 free_saveq = 0; 3637 break; 3638 3639 case LPFC_ABORT_IOCB: 3640 cmdiocbp = NULL; 3641 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) { 3642 spin_unlock_irqrestore(&phba->hbalock, iflag); 3643 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3644 saveq); 3645 spin_lock_irqsave(&phba->hbalock, iflag); 3646 } 3647 if (cmdiocbp) { 3648 /* Call the specified completion routine */ 3649 if (cmdiocbp->iocb_cmpl) { 3650 spin_unlock_irqrestore(&phba->hbalock, 3651 iflag); 3652 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3653 saveq); 3654 spin_lock_irqsave(&phba->hbalock, 3655 iflag); 3656 } else 3657 __lpfc_sli_release_iocbq(phba, 3658 cmdiocbp); 3659 } 3660 break; 3661 3662 case LPFC_UNKNOWN_IOCB: 3663 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3664 char adaptermsg[LPFC_MAX_ADPTMSG]; 3665 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3666 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3667 MAX_MSG_DATA); 3668 dev_warn(&((phba->pcidev)->dev), 3669 "lpfc%d: %s\n", 3670 phba->brd_no, adaptermsg); 3671 } else { 3672 /* Unknown IOCB command */ 3673 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3674 "0335 Unknown IOCB " 3675 "command Data: x%x " 3676 "x%x x%x x%x\n", 3677 irsp->ulpCommand, 3678 irsp->ulpStatus, 3679 irsp->ulpIoTag, 3680 irsp->ulpContext); 3681 } 3682 break; 3683 } 3684 3685 if (free_saveq) { 3686 list_for_each_entry_safe(rspiocbp, next_iocb, 3687 &saveq->list, list) { 3688 list_del_init(&rspiocbp->list); 3689 __lpfc_sli_release_iocbq(phba, rspiocbp); 3690 } 3691 __lpfc_sli_release_iocbq(phba, saveq); 3692 } 3693 rspiocbp = NULL; 3694 } 3695 spin_unlock_irqrestore(&phba->hbalock, iflag); 3696 return rspiocbp; 3697 } 3698 3699 /** 3700 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3701 * @phba: Pointer to HBA context object. 3702 * @pring: Pointer to driver SLI ring object. 3703 * @mask: Host attention register mask for this ring. 3704 * 3705 * This routine wraps the actual slow_ring event process routine from the 3706 * API jump table function pointer from the lpfc_hba struct. 3707 **/ 3708 void 3709 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3710 struct lpfc_sli_ring *pring, uint32_t mask) 3711 { 3712 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3713 } 3714 3715 /** 3716 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3717 * @phba: Pointer to HBA context object. 3718 * @pring: Pointer to driver SLI ring object. 3719 * @mask: Host attention register mask for this ring. 3720 * 3721 * This function is called from the worker thread when there is a ring event 3722 * for non-fcp rings. The caller does not hold any lock. The function will 3723 * remove each response iocb in the response ring and calls the handle 3724 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3725 **/ 3726 static void 3727 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3728 struct lpfc_sli_ring *pring, uint32_t mask) 3729 { 3730 struct lpfc_pgp *pgp; 3731 IOCB_t *entry; 3732 IOCB_t *irsp = NULL; 3733 struct lpfc_iocbq *rspiocbp = NULL; 3734 uint32_t portRspPut, portRspMax; 3735 unsigned long iflag; 3736 uint32_t status; 3737 3738 pgp = &phba->port_gp[pring->ringno]; 3739 spin_lock_irqsave(&phba->hbalock, iflag); 3740 pring->stats.iocb_event++; 3741 3742 /* 3743 * The next available response entry should never exceed the maximum 3744 * entries. If it does, treat it as an adapter hardware error. 3745 */ 3746 portRspMax = pring->sli.sli3.numRiocb; 3747 portRspPut = le32_to_cpu(pgp->rspPutInx); 3748 if (portRspPut >= portRspMax) { 3749 /* 3750 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3751 * rsp ring <portRspMax> 3752 */ 3753 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3754 "0303 Ring %d handler: portRspPut %d " 3755 "is bigger than rsp ring %d\n", 3756 pring->ringno, portRspPut, portRspMax); 3757 3758 phba->link_state = LPFC_HBA_ERROR; 3759 spin_unlock_irqrestore(&phba->hbalock, iflag); 3760 3761 phba->work_hs = HS_FFER3; 3762 lpfc_handle_eratt(phba); 3763 3764 return; 3765 } 3766 3767 rmb(); 3768 while (pring->sli.sli3.rspidx != portRspPut) { 3769 /* 3770 * Build a completion list and call the appropriate handler. 3771 * The process is to get the next available response iocb, get 3772 * a free iocb from the list, copy the response data into the 3773 * free iocb, insert to the continuation list, and update the 3774 * next response index to slim. This process makes response 3775 * iocb's in the ring available to DMA as fast as possible but 3776 * pays a penalty for a copy operation. Since the iocb is 3777 * only 32 bytes, this penalty is considered small relative to 3778 * the PCI reads for register values and a slim write. When 3779 * the ulpLe field is set, the entire Command has been 3780 * received. 3781 */ 3782 entry = lpfc_resp_iocb(phba, pring); 3783 3784 phba->last_completion_time = jiffies; 3785 rspiocbp = __lpfc_sli_get_iocbq(phba); 3786 if (rspiocbp == NULL) { 3787 printk(KERN_ERR "%s: out of buffers! Failing " 3788 "completion.\n", __func__); 3789 break; 3790 } 3791 3792 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3793 phba->iocb_rsp_size); 3794 irsp = &rspiocbp->iocb; 3795 3796 if (++pring->sli.sli3.rspidx >= portRspMax) 3797 pring->sli.sli3.rspidx = 0; 3798 3799 if (pring->ringno == LPFC_ELS_RING) { 3800 lpfc_debugfs_slow_ring_trc(phba, 3801 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3802 *(((uint32_t *) irsp) + 4), 3803 *(((uint32_t *) irsp) + 6), 3804 *(((uint32_t *) irsp) + 7)); 3805 } 3806 3807 writel(pring->sli.sli3.rspidx, 3808 &phba->host_gp[pring->ringno].rspGetInx); 3809 3810 spin_unlock_irqrestore(&phba->hbalock, iflag); 3811 /* Handle the response IOCB */ 3812 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3813 spin_lock_irqsave(&phba->hbalock, iflag); 3814 3815 /* 3816 * If the port response put pointer has not been updated, sync 3817 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3818 * response put pointer. 3819 */ 3820 if (pring->sli.sli3.rspidx == portRspPut) { 3821 portRspPut = le32_to_cpu(pgp->rspPutInx); 3822 } 3823 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 3824 3825 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3826 /* At least one response entry has been freed */ 3827 pring->stats.iocb_rsp_full++; 3828 /* SET RxRE_RSP in Chip Att register */ 3829 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3830 writel(status, phba->CAregaddr); 3831 readl(phba->CAregaddr); /* flush */ 3832 } 3833 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3834 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3835 pring->stats.iocb_cmd_empty++; 3836 3837 /* Force update of the local copy of cmdGetInx */ 3838 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3839 lpfc_sli_resume_iocb(phba, pring); 3840 3841 if ((pring->lpfc_sli_cmd_available)) 3842 (pring->lpfc_sli_cmd_available) (phba, pring); 3843 3844 } 3845 3846 spin_unlock_irqrestore(&phba->hbalock, iflag); 3847 return; 3848 } 3849 3850 /** 3851 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3852 * @phba: Pointer to HBA context object. 3853 * @pring: Pointer to driver SLI ring object. 3854 * @mask: Host attention register mask for this ring. 3855 * 3856 * This function is called from the worker thread when there is a pending 3857 * ELS response iocb on the driver internal slow-path response iocb worker 3858 * queue. The caller does not hold any lock. The function will remove each 3859 * response iocb from the response worker queue and calls the handle 3860 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3861 **/ 3862 static void 3863 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3864 struct lpfc_sli_ring *pring, uint32_t mask) 3865 { 3866 struct lpfc_iocbq *irspiocbq; 3867 struct hbq_dmabuf *dmabuf; 3868 struct lpfc_cq_event *cq_event; 3869 unsigned long iflag; 3870 int count = 0; 3871 3872 spin_lock_irqsave(&phba->hbalock, iflag); 3873 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3874 spin_unlock_irqrestore(&phba->hbalock, iflag); 3875 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3876 /* Get the response iocb from the head of work queue */ 3877 spin_lock_irqsave(&phba->hbalock, iflag); 3878 list_remove_head(&phba->sli4_hba.sp_queue_event, 3879 cq_event, struct lpfc_cq_event, list); 3880 spin_unlock_irqrestore(&phba->hbalock, iflag); 3881 3882 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3883 case CQE_CODE_COMPL_WQE: 3884 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3885 cq_event); 3886 /* Translate ELS WCQE to response IOCBQ */ 3887 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3888 irspiocbq); 3889 if (irspiocbq) 3890 lpfc_sli_sp_handle_rspiocb(phba, pring, 3891 irspiocbq); 3892 count++; 3893 break; 3894 case CQE_CODE_RECEIVE: 3895 case CQE_CODE_RECEIVE_V1: 3896 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3897 cq_event); 3898 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3899 count++; 3900 break; 3901 default: 3902 break; 3903 } 3904 3905 /* Limit the number of events to 64 to avoid soft lockups */ 3906 if (count == 64) 3907 break; 3908 } 3909 } 3910 3911 /** 3912 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3913 * @phba: Pointer to HBA context object. 3914 * @pring: Pointer to driver SLI ring object. 3915 * 3916 * This function aborts all iocbs in the given ring and frees all the iocb 3917 * objects in txq. This function issues an abort iocb for all the iocb commands 3918 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3919 * the return of this function. The caller is not required to hold any locks. 3920 **/ 3921 void 3922 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3923 { 3924 LIST_HEAD(completions); 3925 struct lpfc_iocbq *iocb, *next_iocb; 3926 3927 if (pring->ringno == LPFC_ELS_RING) { 3928 lpfc_fabric_abort_hba(phba); 3929 } 3930 3931 /* Error everything on txq and txcmplq 3932 * First do the txq. 3933 */ 3934 if (phba->sli_rev >= LPFC_SLI_REV4) { 3935 spin_lock_irq(&pring->ring_lock); 3936 list_splice_init(&pring->txq, &completions); 3937 pring->txq_cnt = 0; 3938 spin_unlock_irq(&pring->ring_lock); 3939 3940 spin_lock_irq(&phba->hbalock); 3941 /* Next issue ABTS for everything on the txcmplq */ 3942 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3943 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3944 spin_unlock_irq(&phba->hbalock); 3945 } else { 3946 spin_lock_irq(&phba->hbalock); 3947 list_splice_init(&pring->txq, &completions); 3948 pring->txq_cnt = 0; 3949 3950 /* Next issue ABTS for everything on the txcmplq */ 3951 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3952 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3953 spin_unlock_irq(&phba->hbalock); 3954 } 3955 3956 /* Cancel all the IOCBs from the completions list */ 3957 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3958 IOERR_SLI_ABORTED); 3959 } 3960 3961 /** 3962 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 3963 * @phba: Pointer to HBA context object. 3964 * @pring: Pointer to driver SLI ring object. 3965 * 3966 * This function aborts all iocbs in FCP rings and frees all the iocb 3967 * objects in txq. This function issues an abort iocb for all the iocb commands 3968 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3969 * the return of this function. The caller is not required to hold any locks. 3970 **/ 3971 void 3972 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 3973 { 3974 struct lpfc_sli *psli = &phba->sli; 3975 struct lpfc_sli_ring *pring; 3976 uint32_t i; 3977 3978 /* Look on all the FCP Rings for the iotag */ 3979 if (phba->sli_rev >= LPFC_SLI_REV4) { 3980 for (i = 0; i < phba->cfg_hdw_queue; i++) { 3981 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 3982 lpfc_sli_abort_iocb_ring(phba, pring); 3983 } 3984 } else { 3985 pring = &psli->sli3_ring[LPFC_FCP_RING]; 3986 lpfc_sli_abort_iocb_ring(phba, pring); 3987 } 3988 } 3989 3990 /** 3991 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring 3992 * @phba: Pointer to HBA context object. 3993 * 3994 * This function flushes all iocbs in the IO ring and frees all the iocb 3995 * objects in txq and txcmplq. This function will not issue abort iocbs 3996 * for all the iocb commands in txcmplq, they will just be returned with 3997 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3998 * slot has been permanently disabled. 3999 **/ 4000 void 4001 lpfc_sli_flush_io_rings(struct lpfc_hba *phba) 4002 { 4003 LIST_HEAD(txq); 4004 LIST_HEAD(txcmplq); 4005 struct lpfc_sli *psli = &phba->sli; 4006 struct lpfc_sli_ring *pring; 4007 uint32_t i; 4008 struct lpfc_iocbq *piocb, *next_iocb; 4009 4010 spin_lock_irq(&phba->hbalock); 4011 /* Indicate the I/O queues are flushed */ 4012 phba->hba_flag |= HBA_IOQ_FLUSH; 4013 spin_unlock_irq(&phba->hbalock); 4014 4015 /* Look on all the FCP Rings for the iotag */ 4016 if (phba->sli_rev >= LPFC_SLI_REV4) { 4017 for (i = 0; i < phba->cfg_hdw_queue; i++) { 4018 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 4019 4020 spin_lock_irq(&pring->ring_lock); 4021 /* Retrieve everything on txq */ 4022 list_splice_init(&pring->txq, &txq); 4023 list_for_each_entry_safe(piocb, next_iocb, 4024 &pring->txcmplq, list) 4025 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4026 /* Retrieve everything on the txcmplq */ 4027 list_splice_init(&pring->txcmplq, &txcmplq); 4028 pring->txq_cnt = 0; 4029 pring->txcmplq_cnt = 0; 4030 spin_unlock_irq(&pring->ring_lock); 4031 4032 /* Flush the txq */ 4033 lpfc_sli_cancel_iocbs(phba, &txq, 4034 IOSTAT_LOCAL_REJECT, 4035 IOERR_SLI_DOWN); 4036 /* Flush the txcmpq */ 4037 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4038 IOSTAT_LOCAL_REJECT, 4039 IOERR_SLI_DOWN); 4040 } 4041 } else { 4042 pring = &psli->sli3_ring[LPFC_FCP_RING]; 4043 4044 spin_lock_irq(&phba->hbalock); 4045 /* Retrieve everything on txq */ 4046 list_splice_init(&pring->txq, &txq); 4047 list_for_each_entry_safe(piocb, next_iocb, 4048 &pring->txcmplq, list) 4049 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4050 /* Retrieve everything on the txcmplq */ 4051 list_splice_init(&pring->txcmplq, &txcmplq); 4052 pring->txq_cnt = 0; 4053 pring->txcmplq_cnt = 0; 4054 spin_unlock_irq(&phba->hbalock); 4055 4056 /* Flush the txq */ 4057 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 4058 IOERR_SLI_DOWN); 4059 /* Flush the txcmpq */ 4060 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 4061 IOERR_SLI_DOWN); 4062 } 4063 } 4064 4065 /** 4066 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 4067 * @phba: Pointer to HBA context object. 4068 * @mask: Bit mask to be checked. 4069 * 4070 * This function reads the host status register and compares 4071 * with the provided bit mask to check if HBA completed 4072 * the restart. This function will wait in a loop for the 4073 * HBA to complete restart. If the HBA does not restart within 4074 * 15 iterations, the function will reset the HBA again. The 4075 * function returns 1 when HBA fail to restart otherwise returns 4076 * zero. 4077 **/ 4078 static int 4079 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 4080 { 4081 uint32_t status; 4082 int i = 0; 4083 int retval = 0; 4084 4085 /* Read the HBA Host Status Register */ 4086 if (lpfc_readl(phba->HSregaddr, &status)) 4087 return 1; 4088 4089 /* 4090 * Check status register every 100ms for 5 retries, then every 4091 * 500ms for 5, then every 2.5 sec for 5, then reset board and 4092 * every 2.5 sec for 4. 4093 * Break our of the loop if errors occurred during init. 4094 */ 4095 while (((status & mask) != mask) && 4096 !(status & HS_FFERM) && 4097 i++ < 20) { 4098 4099 if (i <= 5) 4100 msleep(10); 4101 else if (i <= 10) 4102 msleep(500); 4103 else 4104 msleep(2500); 4105 4106 if (i == 15) { 4107 /* Do post */ 4108 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4109 lpfc_sli_brdrestart(phba); 4110 } 4111 /* Read the HBA Host Status Register */ 4112 if (lpfc_readl(phba->HSregaddr, &status)) { 4113 retval = 1; 4114 break; 4115 } 4116 } 4117 4118 /* Check to see if any errors occurred during init */ 4119 if ((status & HS_FFERM) || (i >= 20)) { 4120 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4121 "2751 Adapter failed to restart, " 4122 "status reg x%x, FW Data: A8 x%x AC x%x\n", 4123 status, 4124 readl(phba->MBslimaddr + 0xa8), 4125 readl(phba->MBslimaddr + 0xac)); 4126 phba->link_state = LPFC_HBA_ERROR; 4127 retval = 1; 4128 } 4129 4130 return retval; 4131 } 4132 4133 /** 4134 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 4135 * @phba: Pointer to HBA context object. 4136 * @mask: Bit mask to be checked. 4137 * 4138 * This function checks the host status register to check if HBA is 4139 * ready. This function will wait in a loop for the HBA to be ready 4140 * If the HBA is not ready , the function will will reset the HBA PCI 4141 * function again. The function returns 1 when HBA fail to be ready 4142 * otherwise returns zero. 4143 **/ 4144 static int 4145 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 4146 { 4147 uint32_t status; 4148 int retval = 0; 4149 4150 /* Read the HBA Host Status Register */ 4151 status = lpfc_sli4_post_status_check(phba); 4152 4153 if (status) { 4154 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4155 lpfc_sli_brdrestart(phba); 4156 status = lpfc_sli4_post_status_check(phba); 4157 } 4158 4159 /* Check to see if any errors occurred during init */ 4160 if (status) { 4161 phba->link_state = LPFC_HBA_ERROR; 4162 retval = 1; 4163 } else 4164 phba->sli4_hba.intr_enable = 0; 4165 4166 return retval; 4167 } 4168 4169 /** 4170 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 4171 * @phba: Pointer to HBA context object. 4172 * @mask: Bit mask to be checked. 4173 * 4174 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 4175 * from the API jump table function pointer from the lpfc_hba struct. 4176 **/ 4177 int 4178 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 4179 { 4180 return phba->lpfc_sli_brdready(phba, mask); 4181 } 4182 4183 #define BARRIER_TEST_PATTERN (0xdeadbeef) 4184 4185 /** 4186 * lpfc_reset_barrier - Make HBA ready for HBA reset 4187 * @phba: Pointer to HBA context object. 4188 * 4189 * This function is called before resetting an HBA. This function is called 4190 * with hbalock held and requests HBA to quiesce DMAs before a reset. 4191 **/ 4192 void lpfc_reset_barrier(struct lpfc_hba *phba) 4193 { 4194 uint32_t __iomem *resp_buf; 4195 uint32_t __iomem *mbox_buf; 4196 volatile uint32_t mbox; 4197 uint32_t hc_copy, ha_copy, resp_data; 4198 int i; 4199 uint8_t hdrtype; 4200 4201 lockdep_assert_held(&phba->hbalock); 4202 4203 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 4204 if (hdrtype != 0x80 || 4205 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 4206 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 4207 return; 4208 4209 /* 4210 * Tell the other part of the chip to suspend temporarily all 4211 * its DMA activity. 4212 */ 4213 resp_buf = phba->MBslimaddr; 4214 4215 /* Disable the error attention */ 4216 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 4217 return; 4218 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 4219 readl(phba->HCregaddr); /* flush */ 4220 phba->link_flag |= LS_IGNORE_ERATT; 4221 4222 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4223 return; 4224 if (ha_copy & HA_ERATT) { 4225 /* Clear Chip error bit */ 4226 writel(HA_ERATT, phba->HAregaddr); 4227 phba->pport->stopped = 1; 4228 } 4229 4230 mbox = 0; 4231 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 4232 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 4233 4234 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 4235 mbox_buf = phba->MBslimaddr; 4236 writel(mbox, mbox_buf); 4237 4238 for (i = 0; i < 50; i++) { 4239 if (lpfc_readl((resp_buf + 1), &resp_data)) 4240 return; 4241 if (resp_data != ~(BARRIER_TEST_PATTERN)) 4242 mdelay(1); 4243 else 4244 break; 4245 } 4246 resp_data = 0; 4247 if (lpfc_readl((resp_buf + 1), &resp_data)) 4248 return; 4249 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 4250 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 4251 phba->pport->stopped) 4252 goto restore_hc; 4253 else 4254 goto clear_errat; 4255 } 4256 4257 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 4258 resp_data = 0; 4259 for (i = 0; i < 500; i++) { 4260 if (lpfc_readl(resp_buf, &resp_data)) 4261 return; 4262 if (resp_data != mbox) 4263 mdelay(1); 4264 else 4265 break; 4266 } 4267 4268 clear_errat: 4269 4270 while (++i < 500) { 4271 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4272 return; 4273 if (!(ha_copy & HA_ERATT)) 4274 mdelay(1); 4275 else 4276 break; 4277 } 4278 4279 if (readl(phba->HAregaddr) & HA_ERATT) { 4280 writel(HA_ERATT, phba->HAregaddr); 4281 phba->pport->stopped = 1; 4282 } 4283 4284 restore_hc: 4285 phba->link_flag &= ~LS_IGNORE_ERATT; 4286 writel(hc_copy, phba->HCregaddr); 4287 readl(phba->HCregaddr); /* flush */ 4288 } 4289 4290 /** 4291 * lpfc_sli_brdkill - Issue a kill_board mailbox command 4292 * @phba: Pointer to HBA context object. 4293 * 4294 * This function issues a kill_board mailbox command and waits for 4295 * the error attention interrupt. This function is called for stopping 4296 * the firmware processing. The caller is not required to hold any 4297 * locks. This function calls lpfc_hba_down_post function to free 4298 * any pending commands after the kill. The function will return 1 when it 4299 * fails to kill the board else will return 0. 4300 **/ 4301 int 4302 lpfc_sli_brdkill(struct lpfc_hba *phba) 4303 { 4304 struct lpfc_sli *psli; 4305 LPFC_MBOXQ_t *pmb; 4306 uint32_t status; 4307 uint32_t ha_copy; 4308 int retval; 4309 int i = 0; 4310 4311 psli = &phba->sli; 4312 4313 /* Kill HBA */ 4314 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4315 "0329 Kill HBA Data: x%x x%x\n", 4316 phba->pport->port_state, psli->sli_flag); 4317 4318 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4319 if (!pmb) 4320 return 1; 4321 4322 /* Disable the error attention */ 4323 spin_lock_irq(&phba->hbalock); 4324 if (lpfc_readl(phba->HCregaddr, &status)) { 4325 spin_unlock_irq(&phba->hbalock); 4326 mempool_free(pmb, phba->mbox_mem_pool); 4327 return 1; 4328 } 4329 status &= ~HC_ERINT_ENA; 4330 writel(status, phba->HCregaddr); 4331 readl(phba->HCregaddr); /* flush */ 4332 phba->link_flag |= LS_IGNORE_ERATT; 4333 spin_unlock_irq(&phba->hbalock); 4334 4335 lpfc_kill_board(phba, pmb); 4336 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4337 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4338 4339 if (retval != MBX_SUCCESS) { 4340 if (retval != MBX_BUSY) 4341 mempool_free(pmb, phba->mbox_mem_pool); 4342 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4343 "2752 KILL_BOARD command failed retval %d\n", 4344 retval); 4345 spin_lock_irq(&phba->hbalock); 4346 phba->link_flag &= ~LS_IGNORE_ERATT; 4347 spin_unlock_irq(&phba->hbalock); 4348 return 1; 4349 } 4350 4351 spin_lock_irq(&phba->hbalock); 4352 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 4353 spin_unlock_irq(&phba->hbalock); 4354 4355 mempool_free(pmb, phba->mbox_mem_pool); 4356 4357 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 4358 * attention every 100ms for 3 seconds. If we don't get ERATT after 4359 * 3 seconds we still set HBA_ERROR state because the status of the 4360 * board is now undefined. 4361 */ 4362 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4363 return 1; 4364 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 4365 mdelay(100); 4366 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4367 return 1; 4368 } 4369 4370 del_timer_sync(&psli->mbox_tmo); 4371 if (ha_copy & HA_ERATT) { 4372 writel(HA_ERATT, phba->HAregaddr); 4373 phba->pport->stopped = 1; 4374 } 4375 spin_lock_irq(&phba->hbalock); 4376 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4377 psli->mbox_active = NULL; 4378 phba->link_flag &= ~LS_IGNORE_ERATT; 4379 spin_unlock_irq(&phba->hbalock); 4380 4381 lpfc_hba_down_post(phba); 4382 phba->link_state = LPFC_HBA_ERROR; 4383 4384 return ha_copy & HA_ERATT ? 0 : 1; 4385 } 4386 4387 /** 4388 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 4389 * @phba: Pointer to HBA context object. 4390 * 4391 * This function resets the HBA by writing HC_INITFF to the control 4392 * register. After the HBA resets, this function resets all the iocb ring 4393 * indices. This function disables PCI layer parity checking during 4394 * the reset. 4395 * This function returns 0 always. 4396 * The caller is not required to hold any locks. 4397 **/ 4398 int 4399 lpfc_sli_brdreset(struct lpfc_hba *phba) 4400 { 4401 struct lpfc_sli *psli; 4402 struct lpfc_sli_ring *pring; 4403 uint16_t cfg_value; 4404 int i; 4405 4406 psli = &phba->sli; 4407 4408 /* Reset HBA */ 4409 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4410 "0325 Reset HBA Data: x%x x%x\n", 4411 (phba->pport) ? phba->pport->port_state : 0, 4412 psli->sli_flag); 4413 4414 /* perform board reset */ 4415 phba->fc_eventTag = 0; 4416 phba->link_events = 0; 4417 if (phba->pport) { 4418 phba->pport->fc_myDID = 0; 4419 phba->pport->fc_prevDID = 0; 4420 } 4421 4422 /* Turn off parity checking and serr during the physical reset */ 4423 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) 4424 return -EIO; 4425 4426 pci_write_config_word(phba->pcidev, PCI_COMMAND, 4427 (cfg_value & 4428 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4429 4430 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 4431 4432 /* Now toggle INITFF bit in the Host Control Register */ 4433 writel(HC_INITFF, phba->HCregaddr); 4434 mdelay(1); 4435 readl(phba->HCregaddr); /* flush */ 4436 writel(0, phba->HCregaddr); 4437 readl(phba->HCregaddr); /* flush */ 4438 4439 /* Restore PCI cmd register */ 4440 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4441 4442 /* Initialize relevant SLI info */ 4443 for (i = 0; i < psli->num_rings; i++) { 4444 pring = &psli->sli3_ring[i]; 4445 pring->flag = 0; 4446 pring->sli.sli3.rspidx = 0; 4447 pring->sli.sli3.next_cmdidx = 0; 4448 pring->sli.sli3.local_getidx = 0; 4449 pring->sli.sli3.cmdidx = 0; 4450 pring->missbufcnt = 0; 4451 } 4452 4453 phba->link_state = LPFC_WARM_START; 4454 return 0; 4455 } 4456 4457 /** 4458 * lpfc_sli4_brdreset - Reset a sli-4 HBA 4459 * @phba: Pointer to HBA context object. 4460 * 4461 * This function resets a SLI4 HBA. This function disables PCI layer parity 4462 * checking during resets the device. The caller is not required to hold 4463 * any locks. 4464 * 4465 * This function returns 0 on success else returns negative error code. 4466 **/ 4467 int 4468 lpfc_sli4_brdreset(struct lpfc_hba *phba) 4469 { 4470 struct lpfc_sli *psli = &phba->sli; 4471 uint16_t cfg_value; 4472 int rc = 0; 4473 4474 /* Reset HBA */ 4475 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4476 "0295 Reset HBA Data: x%x x%x x%x\n", 4477 phba->pport->port_state, psli->sli_flag, 4478 phba->hba_flag); 4479 4480 /* perform board reset */ 4481 phba->fc_eventTag = 0; 4482 phba->link_events = 0; 4483 phba->pport->fc_myDID = 0; 4484 phba->pport->fc_prevDID = 0; 4485 4486 spin_lock_irq(&phba->hbalock); 4487 psli->sli_flag &= ~(LPFC_PROCESS_LA); 4488 phba->fcf.fcf_flag = 0; 4489 spin_unlock_irq(&phba->hbalock); 4490 4491 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ 4492 if (phba->hba_flag & HBA_FW_DUMP_OP) { 4493 phba->hba_flag &= ~HBA_FW_DUMP_OP; 4494 return rc; 4495 } 4496 4497 /* Now physically reset the device */ 4498 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4499 "0389 Performing PCI function reset!\n"); 4500 4501 /* Turn off parity checking and serr during the physical reset */ 4502 if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) { 4503 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4504 "3205 PCI read Config failed\n"); 4505 return -EIO; 4506 } 4507 4508 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 4509 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4510 4511 /* Perform FCoE PCI function reset before freeing queue memory */ 4512 rc = lpfc_pci_function_reset(phba); 4513 4514 /* Restore PCI cmd register */ 4515 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4516 4517 return rc; 4518 } 4519 4520 /** 4521 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 4522 * @phba: Pointer to HBA context object. 4523 * 4524 * This function is called in the SLI initialization code path to 4525 * restart the HBA. The caller is not required to hold any lock. 4526 * This function writes MBX_RESTART mailbox command to the SLIM and 4527 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 4528 * function to free any pending commands. The function enables 4529 * POST only during the first initialization. The function returns zero. 4530 * The function does not guarantee completion of MBX_RESTART mailbox 4531 * command before the return of this function. 4532 **/ 4533 static int 4534 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 4535 { 4536 MAILBOX_t *mb; 4537 struct lpfc_sli *psli; 4538 volatile uint32_t word0; 4539 void __iomem *to_slim; 4540 uint32_t hba_aer_enabled; 4541 4542 spin_lock_irq(&phba->hbalock); 4543 4544 /* Take PCIe device Advanced Error Reporting (AER) state */ 4545 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4546 4547 psli = &phba->sli; 4548 4549 /* Restart HBA */ 4550 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4551 "0337 Restart HBA Data: x%x x%x\n", 4552 (phba->pport) ? phba->pport->port_state : 0, 4553 psli->sli_flag); 4554 4555 word0 = 0; 4556 mb = (MAILBOX_t *) &word0; 4557 mb->mbxCommand = MBX_RESTART; 4558 mb->mbxHc = 1; 4559 4560 lpfc_reset_barrier(phba); 4561 4562 to_slim = phba->MBslimaddr; 4563 writel(*(uint32_t *) mb, to_slim); 4564 readl(to_slim); /* flush */ 4565 4566 /* Only skip post after fc_ffinit is completed */ 4567 if (phba->pport && phba->pport->port_state) 4568 word0 = 1; /* This is really setting up word1 */ 4569 else 4570 word0 = 0; /* This is really setting up word1 */ 4571 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4572 writel(*(uint32_t *) mb, to_slim); 4573 readl(to_slim); /* flush */ 4574 4575 lpfc_sli_brdreset(phba); 4576 if (phba->pport) 4577 phba->pport->stopped = 0; 4578 phba->link_state = LPFC_INIT_START; 4579 phba->hba_flag = 0; 4580 spin_unlock_irq(&phba->hbalock); 4581 4582 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4583 psli->stats_start = ktime_get_seconds(); 4584 4585 /* Give the INITFF and Post time to settle. */ 4586 mdelay(100); 4587 4588 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4589 if (hba_aer_enabled) 4590 pci_disable_pcie_error_reporting(phba->pcidev); 4591 4592 lpfc_hba_down_post(phba); 4593 4594 return 0; 4595 } 4596 4597 /** 4598 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4599 * @phba: Pointer to HBA context object. 4600 * 4601 * This function is called in the SLI initialization code path to restart 4602 * a SLI4 HBA. The caller is not required to hold any lock. 4603 * At the end of the function, it calls lpfc_hba_down_post function to 4604 * free any pending commands. 4605 **/ 4606 static int 4607 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4608 { 4609 struct lpfc_sli *psli = &phba->sli; 4610 uint32_t hba_aer_enabled; 4611 int rc; 4612 4613 /* Restart HBA */ 4614 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4615 "0296 Restart HBA Data: x%x x%x\n", 4616 phba->pport->port_state, psli->sli_flag); 4617 4618 /* Take PCIe device Advanced Error Reporting (AER) state */ 4619 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4620 4621 rc = lpfc_sli4_brdreset(phba); 4622 if (rc) { 4623 phba->link_state = LPFC_HBA_ERROR; 4624 goto hba_down_queue; 4625 } 4626 4627 spin_lock_irq(&phba->hbalock); 4628 phba->pport->stopped = 0; 4629 phba->link_state = LPFC_INIT_START; 4630 phba->hba_flag = 0; 4631 spin_unlock_irq(&phba->hbalock); 4632 4633 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4634 psli->stats_start = ktime_get_seconds(); 4635 4636 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4637 if (hba_aer_enabled) 4638 pci_disable_pcie_error_reporting(phba->pcidev); 4639 4640 hba_down_queue: 4641 lpfc_hba_down_post(phba); 4642 lpfc_sli4_queue_destroy(phba); 4643 4644 return rc; 4645 } 4646 4647 /** 4648 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4649 * @phba: Pointer to HBA context object. 4650 * 4651 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4652 * API jump table function pointer from the lpfc_hba struct. 4653 **/ 4654 int 4655 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4656 { 4657 return phba->lpfc_sli_brdrestart(phba); 4658 } 4659 4660 /** 4661 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4662 * @phba: Pointer to HBA context object. 4663 * 4664 * This function is called after a HBA restart to wait for successful 4665 * restart of the HBA. Successful restart of the HBA is indicated by 4666 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4667 * iteration, the function will restart the HBA again. The function returns 4668 * zero if HBA successfully restarted else returns negative error code. 4669 **/ 4670 int 4671 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4672 { 4673 uint32_t status, i = 0; 4674 4675 /* Read the HBA Host Status Register */ 4676 if (lpfc_readl(phba->HSregaddr, &status)) 4677 return -EIO; 4678 4679 /* Check status register to see what current state is */ 4680 i = 0; 4681 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4682 4683 /* Check every 10ms for 10 retries, then every 100ms for 90 4684 * retries, then every 1 sec for 50 retires for a total of 4685 * ~60 seconds before reset the board again and check every 4686 * 1 sec for 50 retries. The up to 60 seconds before the 4687 * board ready is required by the Falcon FIPS zeroization 4688 * complete, and any reset the board in between shall cause 4689 * restart of zeroization, further delay the board ready. 4690 */ 4691 if (i++ >= 200) { 4692 /* Adapter failed to init, timeout, status reg 4693 <status> */ 4694 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4695 "0436 Adapter failed to init, " 4696 "timeout, status reg x%x, " 4697 "FW Data: A8 x%x AC x%x\n", status, 4698 readl(phba->MBslimaddr + 0xa8), 4699 readl(phba->MBslimaddr + 0xac)); 4700 phba->link_state = LPFC_HBA_ERROR; 4701 return -ETIMEDOUT; 4702 } 4703 4704 /* Check to see if any errors occurred during init */ 4705 if (status & HS_FFERM) { 4706 /* ERROR: During chipset initialization */ 4707 /* Adapter failed to init, chipset, status reg 4708 <status> */ 4709 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4710 "0437 Adapter failed to init, " 4711 "chipset, status reg x%x, " 4712 "FW Data: A8 x%x AC x%x\n", status, 4713 readl(phba->MBslimaddr + 0xa8), 4714 readl(phba->MBslimaddr + 0xac)); 4715 phba->link_state = LPFC_HBA_ERROR; 4716 return -EIO; 4717 } 4718 4719 if (i <= 10) 4720 msleep(10); 4721 else if (i <= 100) 4722 msleep(100); 4723 else 4724 msleep(1000); 4725 4726 if (i == 150) { 4727 /* Do post */ 4728 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4729 lpfc_sli_brdrestart(phba); 4730 } 4731 /* Read the HBA Host Status Register */ 4732 if (lpfc_readl(phba->HSregaddr, &status)) 4733 return -EIO; 4734 } 4735 4736 /* Check to see if any errors occurred during init */ 4737 if (status & HS_FFERM) { 4738 /* ERROR: During chipset initialization */ 4739 /* Adapter failed to init, chipset, status reg <status> */ 4740 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4741 "0438 Adapter failed to init, chipset, " 4742 "status reg x%x, " 4743 "FW Data: A8 x%x AC x%x\n", status, 4744 readl(phba->MBslimaddr + 0xa8), 4745 readl(phba->MBslimaddr + 0xac)); 4746 phba->link_state = LPFC_HBA_ERROR; 4747 return -EIO; 4748 } 4749 4750 /* Clear all interrupt enable conditions */ 4751 writel(0, phba->HCregaddr); 4752 readl(phba->HCregaddr); /* flush */ 4753 4754 /* setup host attn register */ 4755 writel(0xffffffff, phba->HAregaddr); 4756 readl(phba->HAregaddr); /* flush */ 4757 return 0; 4758 } 4759 4760 /** 4761 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4762 * 4763 * This function calculates and returns the number of HBQs required to be 4764 * configured. 4765 **/ 4766 int 4767 lpfc_sli_hbq_count(void) 4768 { 4769 return ARRAY_SIZE(lpfc_hbq_defs); 4770 } 4771 4772 /** 4773 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4774 * 4775 * This function adds the number of hbq entries in every HBQ to get 4776 * the total number of hbq entries required for the HBA and returns 4777 * the total count. 4778 **/ 4779 static int 4780 lpfc_sli_hbq_entry_count(void) 4781 { 4782 int hbq_count = lpfc_sli_hbq_count(); 4783 int count = 0; 4784 int i; 4785 4786 for (i = 0; i < hbq_count; ++i) 4787 count += lpfc_hbq_defs[i]->entry_count; 4788 return count; 4789 } 4790 4791 /** 4792 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4793 * 4794 * This function calculates amount of memory required for all hbq entries 4795 * to be configured and returns the total memory required. 4796 **/ 4797 int 4798 lpfc_sli_hbq_size(void) 4799 { 4800 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4801 } 4802 4803 /** 4804 * lpfc_sli_hbq_setup - configure and initialize HBQs 4805 * @phba: Pointer to HBA context object. 4806 * 4807 * This function is called during the SLI initialization to configure 4808 * all the HBQs and post buffers to the HBQ. The caller is not 4809 * required to hold any locks. This function will return zero if successful 4810 * else it will return negative error code. 4811 **/ 4812 static int 4813 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4814 { 4815 int hbq_count = lpfc_sli_hbq_count(); 4816 LPFC_MBOXQ_t *pmb; 4817 MAILBOX_t *pmbox; 4818 uint32_t hbqno; 4819 uint32_t hbq_entry_index; 4820 4821 /* Get a Mailbox buffer to setup mailbox 4822 * commands for HBA initialization 4823 */ 4824 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4825 4826 if (!pmb) 4827 return -ENOMEM; 4828 4829 pmbox = &pmb->u.mb; 4830 4831 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4832 phba->link_state = LPFC_INIT_MBX_CMDS; 4833 phba->hbq_in_use = 1; 4834 4835 hbq_entry_index = 0; 4836 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4837 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4838 phba->hbqs[hbqno].hbqPutIdx = 0; 4839 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4840 phba->hbqs[hbqno].entry_count = 4841 lpfc_hbq_defs[hbqno]->entry_count; 4842 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4843 hbq_entry_index, pmb); 4844 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4845 4846 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4847 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4848 mbxStatus <status>, ring <num> */ 4849 4850 lpfc_printf_log(phba, KERN_ERR, 4851 LOG_SLI | LOG_VPORT, 4852 "1805 Adapter failed to init. " 4853 "Data: x%x x%x x%x\n", 4854 pmbox->mbxCommand, 4855 pmbox->mbxStatus, hbqno); 4856 4857 phba->link_state = LPFC_HBA_ERROR; 4858 mempool_free(pmb, phba->mbox_mem_pool); 4859 return -ENXIO; 4860 } 4861 } 4862 phba->hbq_count = hbq_count; 4863 4864 mempool_free(pmb, phba->mbox_mem_pool); 4865 4866 /* Initially populate or replenish the HBQs */ 4867 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4868 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4869 return 0; 4870 } 4871 4872 /** 4873 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4874 * @phba: Pointer to HBA context object. 4875 * 4876 * This function is called during the SLI initialization to configure 4877 * all the HBQs and post buffers to the HBQ. The caller is not 4878 * required to hold any locks. This function will return zero if successful 4879 * else it will return negative error code. 4880 **/ 4881 static int 4882 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4883 { 4884 phba->hbq_in_use = 1; 4885 phba->hbqs[LPFC_ELS_HBQ].entry_count = 4886 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; 4887 phba->hbq_count = 1; 4888 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); 4889 /* Initially populate or replenish the HBQs */ 4890 return 0; 4891 } 4892 4893 /** 4894 * lpfc_sli_config_port - Issue config port mailbox command 4895 * @phba: Pointer to HBA context object. 4896 * @sli_mode: sli mode - 2/3 4897 * 4898 * This function is called by the sli initialization code path 4899 * to issue config_port mailbox command. This function restarts the 4900 * HBA firmware and issues a config_port mailbox command to configure 4901 * the SLI interface in the sli mode specified by sli_mode 4902 * variable. The caller is not required to hold any locks. 4903 * The function returns 0 if successful, else returns negative error 4904 * code. 4905 **/ 4906 int 4907 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4908 { 4909 LPFC_MBOXQ_t *pmb; 4910 uint32_t resetcount = 0, rc = 0, done = 0; 4911 4912 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4913 if (!pmb) { 4914 phba->link_state = LPFC_HBA_ERROR; 4915 return -ENOMEM; 4916 } 4917 4918 phba->sli_rev = sli_mode; 4919 while (resetcount < 2 && !done) { 4920 spin_lock_irq(&phba->hbalock); 4921 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4922 spin_unlock_irq(&phba->hbalock); 4923 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4924 lpfc_sli_brdrestart(phba); 4925 rc = lpfc_sli_chipset_init(phba); 4926 if (rc) 4927 break; 4928 4929 spin_lock_irq(&phba->hbalock); 4930 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4931 spin_unlock_irq(&phba->hbalock); 4932 resetcount++; 4933 4934 /* Call pre CONFIG_PORT mailbox command initialization. A 4935 * value of 0 means the call was successful. Any other 4936 * nonzero value is a failure, but if ERESTART is returned, 4937 * the driver may reset the HBA and try again. 4938 */ 4939 rc = lpfc_config_port_prep(phba); 4940 if (rc == -ERESTART) { 4941 phba->link_state = LPFC_LINK_UNKNOWN; 4942 continue; 4943 } else if (rc) 4944 break; 4945 4946 phba->link_state = LPFC_INIT_MBX_CMDS; 4947 lpfc_config_port(phba, pmb); 4948 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4949 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4950 LPFC_SLI3_HBQ_ENABLED | 4951 LPFC_SLI3_CRP_ENABLED | 4952 LPFC_SLI3_DSS_ENABLED); 4953 if (rc != MBX_SUCCESS) { 4954 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4955 "0442 Adapter failed to init, mbxCmd x%x " 4956 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4957 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4958 spin_lock_irq(&phba->hbalock); 4959 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4960 spin_unlock_irq(&phba->hbalock); 4961 rc = -ENXIO; 4962 } else { 4963 /* Allow asynchronous mailbox command to go through */ 4964 spin_lock_irq(&phba->hbalock); 4965 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4966 spin_unlock_irq(&phba->hbalock); 4967 done = 1; 4968 4969 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 4970 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 4971 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4972 "3110 Port did not grant ASABT\n"); 4973 } 4974 } 4975 if (!done) { 4976 rc = -EINVAL; 4977 goto do_prep_failed; 4978 } 4979 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4980 if (!pmb->u.mb.un.varCfgPort.cMA) { 4981 rc = -ENXIO; 4982 goto do_prep_failed; 4983 } 4984 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 4985 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 4986 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 4987 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 4988 phba->max_vpi : phba->max_vports; 4989 4990 } else 4991 phba->max_vpi = 0; 4992 phba->fips_level = 0; 4993 phba->fips_spec_rev = 0; 4994 if (pmb->u.mb.un.varCfgPort.gdss) { 4995 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 4996 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 4997 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 4998 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4999 "2850 Security Crypto Active. FIPS x%d " 5000 "(Spec Rev: x%d)", 5001 phba->fips_level, phba->fips_spec_rev); 5002 } 5003 if (pmb->u.mb.un.varCfgPort.sec_err) { 5004 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5005 "2856 Config Port Security Crypto " 5006 "Error: x%x ", 5007 pmb->u.mb.un.varCfgPort.sec_err); 5008 } 5009 if (pmb->u.mb.un.varCfgPort.gerbm) 5010 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 5011 if (pmb->u.mb.un.varCfgPort.gcrp) 5012 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 5013 5014 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 5015 phba->port_gp = phba->mbox->us.s3_pgp.port; 5016 5017 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 5018 if (pmb->u.mb.un.varCfgPort.gbg == 0) { 5019 phba->cfg_enable_bg = 0; 5020 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 5021 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5022 "0443 Adapter did not grant " 5023 "BlockGuard\n"); 5024 } 5025 } 5026 } else { 5027 phba->hbq_get = NULL; 5028 phba->port_gp = phba->mbox->us.s2.port; 5029 phba->max_vpi = 0; 5030 } 5031 do_prep_failed: 5032 mempool_free(pmb, phba->mbox_mem_pool); 5033 return rc; 5034 } 5035 5036 5037 /** 5038 * lpfc_sli_hba_setup - SLI initialization function 5039 * @phba: Pointer to HBA context object. 5040 * 5041 * This function is the main SLI initialization function. This function 5042 * is called by the HBA initialization code, HBA reset code and HBA 5043 * error attention handler code. Caller is not required to hold any 5044 * locks. This function issues config_port mailbox command to configure 5045 * the SLI, setup iocb rings and HBQ rings. In the end the function 5046 * calls the config_port_post function to issue init_link mailbox 5047 * command and to start the discovery. The function will return zero 5048 * if successful, else it will return negative error code. 5049 **/ 5050 int 5051 lpfc_sli_hba_setup(struct lpfc_hba *phba) 5052 { 5053 uint32_t rc; 5054 int mode = 3, i; 5055 int longs; 5056 5057 switch (phba->cfg_sli_mode) { 5058 case 2: 5059 if (phba->cfg_enable_npiv) { 5060 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5061 "1824 NPIV enabled: Override sli_mode " 5062 "parameter (%d) to auto (0).\n", 5063 phba->cfg_sli_mode); 5064 break; 5065 } 5066 mode = 2; 5067 break; 5068 case 0: 5069 case 3: 5070 break; 5071 default: 5072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5073 "1819 Unrecognized sli_mode parameter: %d.\n", 5074 phba->cfg_sli_mode); 5075 5076 break; 5077 } 5078 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 5079 5080 rc = lpfc_sli_config_port(phba, mode); 5081 5082 if (rc && phba->cfg_sli_mode == 3) 5083 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5084 "1820 Unable to select SLI-3. " 5085 "Not supported by adapter.\n"); 5086 if (rc && mode != 2) 5087 rc = lpfc_sli_config_port(phba, 2); 5088 else if (rc && mode == 2) 5089 rc = lpfc_sli_config_port(phba, 3); 5090 if (rc) 5091 goto lpfc_sli_hba_setup_error; 5092 5093 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 5094 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 5095 rc = pci_enable_pcie_error_reporting(phba->pcidev); 5096 if (!rc) { 5097 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5098 "2709 This device supports " 5099 "Advanced Error Reporting (AER)\n"); 5100 spin_lock_irq(&phba->hbalock); 5101 phba->hba_flag |= HBA_AER_ENABLED; 5102 spin_unlock_irq(&phba->hbalock); 5103 } else { 5104 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5105 "2708 This device does not support " 5106 "Advanced Error Reporting (AER): %d\n", 5107 rc); 5108 phba->cfg_aer_support = 0; 5109 } 5110 } 5111 5112 if (phba->sli_rev == 3) { 5113 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 5114 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 5115 } else { 5116 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 5117 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 5118 phba->sli3_options = 0; 5119 } 5120 5121 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5122 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 5123 phba->sli_rev, phba->max_vpi); 5124 rc = lpfc_sli_ring_map(phba); 5125 5126 if (rc) 5127 goto lpfc_sli_hba_setup_error; 5128 5129 /* Initialize VPIs. */ 5130 if (phba->sli_rev == LPFC_SLI_REV3) { 5131 /* 5132 * The VPI bitmask and physical ID array are allocated 5133 * and initialized once only - at driver load. A port 5134 * reset doesn't need to reinitialize this memory. 5135 */ 5136 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 5137 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 5138 phba->vpi_bmask = kcalloc(longs, 5139 sizeof(unsigned long), 5140 GFP_KERNEL); 5141 if (!phba->vpi_bmask) { 5142 rc = -ENOMEM; 5143 goto lpfc_sli_hba_setup_error; 5144 } 5145 5146 phba->vpi_ids = kcalloc(phba->max_vpi + 1, 5147 sizeof(uint16_t), 5148 GFP_KERNEL); 5149 if (!phba->vpi_ids) { 5150 kfree(phba->vpi_bmask); 5151 rc = -ENOMEM; 5152 goto lpfc_sli_hba_setup_error; 5153 } 5154 for (i = 0; i < phba->max_vpi; i++) 5155 phba->vpi_ids[i] = i; 5156 } 5157 } 5158 5159 /* Init HBQs */ 5160 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 5161 rc = lpfc_sli_hbq_setup(phba); 5162 if (rc) 5163 goto lpfc_sli_hba_setup_error; 5164 } 5165 spin_lock_irq(&phba->hbalock); 5166 phba->sli.sli_flag |= LPFC_PROCESS_LA; 5167 spin_unlock_irq(&phba->hbalock); 5168 5169 rc = lpfc_config_port_post(phba); 5170 if (rc) 5171 goto lpfc_sli_hba_setup_error; 5172 5173 return rc; 5174 5175 lpfc_sli_hba_setup_error: 5176 phba->link_state = LPFC_HBA_ERROR; 5177 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5178 "0445 Firmware initialization failed\n"); 5179 return rc; 5180 } 5181 5182 /** 5183 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 5184 * @phba: Pointer to HBA context object. 5185 * @mboxq: mailbox pointer. 5186 * This function issue a dump mailbox command to read config region 5187 * 23 and parse the records in the region and populate driver 5188 * data structure. 5189 **/ 5190 static int 5191 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 5192 { 5193 LPFC_MBOXQ_t *mboxq; 5194 struct lpfc_dmabuf *mp; 5195 struct lpfc_mqe *mqe; 5196 uint32_t data_length; 5197 int rc; 5198 5199 /* Program the default value of vlan_id and fc_map */ 5200 phba->valid_vlan = 0; 5201 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5202 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5203 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5204 5205 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5206 if (!mboxq) 5207 return -ENOMEM; 5208 5209 mqe = &mboxq->u.mqe; 5210 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 5211 rc = -ENOMEM; 5212 goto out_free_mboxq; 5213 } 5214 5215 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 5216 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5217 5218 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5219 "(%d):2571 Mailbox cmd x%x Status x%x " 5220 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5221 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5222 "CQ: x%x x%x x%x x%x\n", 5223 mboxq->vport ? mboxq->vport->vpi : 0, 5224 bf_get(lpfc_mqe_command, mqe), 5225 bf_get(lpfc_mqe_status, mqe), 5226 mqe->un.mb_words[0], mqe->un.mb_words[1], 5227 mqe->un.mb_words[2], mqe->un.mb_words[3], 5228 mqe->un.mb_words[4], mqe->un.mb_words[5], 5229 mqe->un.mb_words[6], mqe->un.mb_words[7], 5230 mqe->un.mb_words[8], mqe->un.mb_words[9], 5231 mqe->un.mb_words[10], mqe->un.mb_words[11], 5232 mqe->un.mb_words[12], mqe->un.mb_words[13], 5233 mqe->un.mb_words[14], mqe->un.mb_words[15], 5234 mqe->un.mb_words[16], mqe->un.mb_words[50], 5235 mboxq->mcqe.word0, 5236 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5237 mboxq->mcqe.trailer); 5238 5239 if (rc) { 5240 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5241 kfree(mp); 5242 rc = -EIO; 5243 goto out_free_mboxq; 5244 } 5245 data_length = mqe->un.mb_words[5]; 5246 if (data_length > DMP_RGN23_SIZE) { 5247 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5248 kfree(mp); 5249 rc = -EIO; 5250 goto out_free_mboxq; 5251 } 5252 5253 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 5254 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5255 kfree(mp); 5256 rc = 0; 5257 5258 out_free_mboxq: 5259 mempool_free(mboxq, phba->mbox_mem_pool); 5260 return rc; 5261 } 5262 5263 /** 5264 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 5265 * @phba: pointer to lpfc hba data structure. 5266 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 5267 * @vpd: pointer to the memory to hold resulting port vpd data. 5268 * @vpd_size: On input, the number of bytes allocated to @vpd. 5269 * On output, the number of data bytes in @vpd. 5270 * 5271 * This routine executes a READ_REV SLI4 mailbox command. In 5272 * addition, this routine gets the port vpd data. 5273 * 5274 * Return codes 5275 * 0 - successful 5276 * -ENOMEM - could not allocated memory. 5277 **/ 5278 static int 5279 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5280 uint8_t *vpd, uint32_t *vpd_size) 5281 { 5282 int rc = 0; 5283 uint32_t dma_size; 5284 struct lpfc_dmabuf *dmabuf; 5285 struct lpfc_mqe *mqe; 5286 5287 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5288 if (!dmabuf) 5289 return -ENOMEM; 5290 5291 /* 5292 * Get a DMA buffer for the vpd data resulting from the READ_REV 5293 * mailbox command. 5294 */ 5295 dma_size = *vpd_size; 5296 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size, 5297 &dmabuf->phys, GFP_KERNEL); 5298 if (!dmabuf->virt) { 5299 kfree(dmabuf); 5300 return -ENOMEM; 5301 } 5302 5303 /* 5304 * The SLI4 implementation of READ_REV conflicts at word1, 5305 * bits 31:16 and SLI4 adds vpd functionality not present 5306 * in SLI3. This code corrects the conflicts. 5307 */ 5308 lpfc_read_rev(phba, mboxq); 5309 mqe = &mboxq->u.mqe; 5310 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 5311 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 5312 mqe->un.read_rev.word1 &= 0x0000FFFF; 5313 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 5314 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 5315 5316 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5317 if (rc) { 5318 dma_free_coherent(&phba->pcidev->dev, dma_size, 5319 dmabuf->virt, dmabuf->phys); 5320 kfree(dmabuf); 5321 return -EIO; 5322 } 5323 5324 /* 5325 * The available vpd length cannot be bigger than the 5326 * DMA buffer passed to the port. Catch the less than 5327 * case and update the caller's size. 5328 */ 5329 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 5330 *vpd_size = mqe->un.read_rev.avail_vpd_len; 5331 5332 memcpy(vpd, dmabuf->virt, *vpd_size); 5333 5334 dma_free_coherent(&phba->pcidev->dev, dma_size, 5335 dmabuf->virt, dmabuf->phys); 5336 kfree(dmabuf); 5337 return 0; 5338 } 5339 5340 /** 5341 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes 5342 * @phba: pointer to lpfc hba data structure. 5343 * 5344 * This routine retrieves SLI4 device physical port name this PCI function 5345 * is attached to. 5346 * 5347 * Return codes 5348 * 0 - successful 5349 * otherwise - failed to retrieve controller attributes 5350 **/ 5351 static int 5352 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba) 5353 { 5354 LPFC_MBOXQ_t *mboxq; 5355 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 5356 struct lpfc_controller_attribute *cntl_attr; 5357 void *virtaddr = NULL; 5358 uint32_t alloclen, reqlen; 5359 uint32_t shdr_status, shdr_add_status; 5360 union lpfc_sli4_cfg_shdr *shdr; 5361 int rc; 5362 5363 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5364 if (!mboxq) 5365 return -ENOMEM; 5366 5367 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */ 5368 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 5369 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5370 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 5371 LPFC_SLI4_MBX_NEMBED); 5372 5373 if (alloclen < reqlen) { 5374 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5375 "3084 Allocated DMA memory size (%d) is " 5376 "less than the requested DMA memory size " 5377 "(%d)\n", alloclen, reqlen); 5378 rc = -ENOMEM; 5379 goto out_free_mboxq; 5380 } 5381 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5382 virtaddr = mboxq->sge_array->addr[0]; 5383 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 5384 shdr = &mbx_cntl_attr->cfg_shdr; 5385 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5386 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5387 if (shdr_status || shdr_add_status || rc) { 5388 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5389 "3085 Mailbox x%x (x%x/x%x) failed, " 5390 "rc:x%x, status:x%x, add_status:x%x\n", 5391 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5392 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5393 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5394 rc, shdr_status, shdr_add_status); 5395 rc = -ENXIO; 5396 goto out_free_mboxq; 5397 } 5398 5399 cntl_attr = &mbx_cntl_attr->cntl_attr; 5400 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 5401 phba->sli4_hba.lnk_info.lnk_tp = 5402 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 5403 phba->sli4_hba.lnk_info.lnk_no = 5404 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 5405 5406 memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion)); 5407 strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str, 5408 sizeof(phba->BIOSVersion)); 5409 5410 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5411 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n", 5412 phba->sli4_hba.lnk_info.lnk_tp, 5413 phba->sli4_hba.lnk_info.lnk_no, 5414 phba->BIOSVersion); 5415 out_free_mboxq: 5416 if (rc != MBX_TIMEOUT) { 5417 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5418 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5419 else 5420 mempool_free(mboxq, phba->mbox_mem_pool); 5421 } 5422 return rc; 5423 } 5424 5425 /** 5426 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 5427 * @phba: pointer to lpfc hba data structure. 5428 * 5429 * This routine retrieves SLI4 device physical port name this PCI function 5430 * is attached to. 5431 * 5432 * Return codes 5433 * 0 - successful 5434 * otherwise - failed to retrieve physical port name 5435 **/ 5436 static int 5437 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 5438 { 5439 LPFC_MBOXQ_t *mboxq; 5440 struct lpfc_mbx_get_port_name *get_port_name; 5441 uint32_t shdr_status, shdr_add_status; 5442 union lpfc_sli4_cfg_shdr *shdr; 5443 char cport_name = 0; 5444 int rc; 5445 5446 /* We assume nothing at this point */ 5447 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5448 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 5449 5450 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5451 if (!mboxq) 5452 return -ENOMEM; 5453 /* obtain link type and link number via READ_CONFIG */ 5454 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5455 lpfc_sli4_read_config(phba); 5456 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 5457 goto retrieve_ppname; 5458 5459 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 5460 rc = lpfc_sli4_get_ctl_attr(phba); 5461 if (rc) 5462 goto out_free_mboxq; 5463 5464 retrieve_ppname: 5465 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5466 LPFC_MBOX_OPCODE_GET_PORT_NAME, 5467 sizeof(struct lpfc_mbx_get_port_name) - 5468 sizeof(struct lpfc_sli4_cfg_mhdr), 5469 LPFC_SLI4_MBX_EMBED); 5470 get_port_name = &mboxq->u.mqe.un.get_port_name; 5471 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 5472 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 5473 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 5474 phba->sli4_hba.lnk_info.lnk_tp); 5475 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5476 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5477 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5478 if (shdr_status || shdr_add_status || rc) { 5479 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5480 "3087 Mailbox x%x (x%x/x%x) failed: " 5481 "rc:x%x, status:x%x, add_status:x%x\n", 5482 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5483 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5484 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5485 rc, shdr_status, shdr_add_status); 5486 rc = -ENXIO; 5487 goto out_free_mboxq; 5488 } 5489 switch (phba->sli4_hba.lnk_info.lnk_no) { 5490 case LPFC_LINK_NUMBER_0: 5491 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 5492 &get_port_name->u.response); 5493 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5494 break; 5495 case LPFC_LINK_NUMBER_1: 5496 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 5497 &get_port_name->u.response); 5498 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5499 break; 5500 case LPFC_LINK_NUMBER_2: 5501 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 5502 &get_port_name->u.response); 5503 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5504 break; 5505 case LPFC_LINK_NUMBER_3: 5506 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 5507 &get_port_name->u.response); 5508 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5509 break; 5510 default: 5511 break; 5512 } 5513 5514 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 5515 phba->Port[0] = cport_name; 5516 phba->Port[1] = '\0'; 5517 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5518 "3091 SLI get port name: %s\n", phba->Port); 5519 } 5520 5521 out_free_mboxq: 5522 if (rc != MBX_TIMEOUT) { 5523 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5524 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5525 else 5526 mempool_free(mboxq, phba->mbox_mem_pool); 5527 } 5528 return rc; 5529 } 5530 5531 /** 5532 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 5533 * @phba: pointer to lpfc hba data structure. 5534 * 5535 * This routine is called to explicitly arm the SLI4 device's completion and 5536 * event queues 5537 **/ 5538 static void 5539 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 5540 { 5541 int qidx; 5542 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 5543 struct lpfc_sli4_hdw_queue *qp; 5544 struct lpfc_queue *eq; 5545 5546 sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM); 5547 sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM); 5548 if (sli4_hba->nvmels_cq) 5549 sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0, 5550 LPFC_QUEUE_REARM); 5551 5552 if (sli4_hba->hdwq) { 5553 /* Loop thru all Hardware Queues */ 5554 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { 5555 qp = &sli4_hba->hdwq[qidx]; 5556 /* ARM the corresponding CQ */ 5557 sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0, 5558 LPFC_QUEUE_REARM); 5559 } 5560 5561 /* Loop thru all IRQ vectors */ 5562 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 5563 eq = sli4_hba->hba_eq_hdl[qidx].eq; 5564 /* ARM the corresponding EQ */ 5565 sli4_hba->sli4_write_eq_db(phba, eq, 5566 0, LPFC_QUEUE_REARM); 5567 } 5568 } 5569 5570 if (phba->nvmet_support) { 5571 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { 5572 sli4_hba->sli4_write_cq_db(phba, 5573 sli4_hba->nvmet_cqset[qidx], 0, 5574 LPFC_QUEUE_REARM); 5575 } 5576 } 5577 } 5578 5579 /** 5580 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 5581 * @phba: Pointer to HBA context object. 5582 * @type: The resource extent type. 5583 * @extnt_count: buffer to hold port available extent count. 5584 * @extnt_size: buffer to hold element count per extent. 5585 * 5586 * This function calls the port and retrievs the number of available 5587 * extents and their size for a particular extent type. 5588 * 5589 * Returns: 0 if successful. Nonzero otherwise. 5590 **/ 5591 int 5592 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 5593 uint16_t *extnt_count, uint16_t *extnt_size) 5594 { 5595 int rc = 0; 5596 uint32_t length; 5597 uint32_t mbox_tmo; 5598 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 5599 LPFC_MBOXQ_t *mbox; 5600 5601 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5602 if (!mbox) 5603 return -ENOMEM; 5604 5605 /* Find out how many extents are available for this resource type */ 5606 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 5607 sizeof(struct lpfc_sli4_cfg_mhdr)); 5608 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5609 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 5610 length, LPFC_SLI4_MBX_EMBED); 5611 5612 /* Send an extents count of 0 - the GET doesn't use it. */ 5613 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5614 LPFC_SLI4_MBX_EMBED); 5615 if (unlikely(rc)) { 5616 rc = -EIO; 5617 goto err_exit; 5618 } 5619 5620 if (!phba->sli4_hba.intr_enable) 5621 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5622 else { 5623 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5624 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5625 } 5626 if (unlikely(rc)) { 5627 rc = -EIO; 5628 goto err_exit; 5629 } 5630 5631 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5632 if (bf_get(lpfc_mbox_hdr_status, 5633 &rsrc_info->header.cfg_shdr.response)) { 5634 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5635 "2930 Failed to get resource extents " 5636 "Status 0x%x Add'l Status 0x%x\n", 5637 bf_get(lpfc_mbox_hdr_status, 5638 &rsrc_info->header.cfg_shdr.response), 5639 bf_get(lpfc_mbox_hdr_add_status, 5640 &rsrc_info->header.cfg_shdr.response)); 5641 rc = -EIO; 5642 goto err_exit; 5643 } 5644 5645 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 5646 &rsrc_info->u.rsp); 5647 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 5648 &rsrc_info->u.rsp); 5649 5650 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5651 "3162 Retrieved extents type-%d from port: count:%d, " 5652 "size:%d\n", type, *extnt_count, *extnt_size); 5653 5654 err_exit: 5655 mempool_free(mbox, phba->mbox_mem_pool); 5656 return rc; 5657 } 5658 5659 /** 5660 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 5661 * @phba: Pointer to HBA context object. 5662 * @type: The extent type to check. 5663 * 5664 * This function reads the current available extents from the port and checks 5665 * if the extent count or extent size has changed since the last access. 5666 * Callers use this routine post port reset to understand if there is a 5667 * extent reprovisioning requirement. 5668 * 5669 * Returns: 5670 * -Error: error indicates problem. 5671 * 1: Extent count or size has changed. 5672 * 0: No changes. 5673 **/ 5674 static int 5675 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5676 { 5677 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5678 uint16_t size_diff, rsrc_ext_size; 5679 int rc = 0; 5680 struct lpfc_rsrc_blks *rsrc_entry; 5681 struct list_head *rsrc_blk_list = NULL; 5682 5683 size_diff = 0; 5684 curr_ext_cnt = 0; 5685 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5686 &rsrc_ext_cnt, 5687 &rsrc_ext_size); 5688 if (unlikely(rc)) 5689 return -EIO; 5690 5691 switch (type) { 5692 case LPFC_RSC_TYPE_FCOE_RPI: 5693 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5694 break; 5695 case LPFC_RSC_TYPE_FCOE_VPI: 5696 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5697 break; 5698 case LPFC_RSC_TYPE_FCOE_XRI: 5699 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5700 break; 5701 case LPFC_RSC_TYPE_FCOE_VFI: 5702 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5703 break; 5704 default: 5705 break; 5706 } 5707 5708 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5709 curr_ext_cnt++; 5710 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5711 size_diff++; 5712 } 5713 5714 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5715 rc = 1; 5716 5717 return rc; 5718 } 5719 5720 /** 5721 * lpfc_sli4_cfg_post_extnts - 5722 * @phba: Pointer to HBA context object. 5723 * @extnt_cnt - number of available extents. 5724 * @type - the extent type (rpi, xri, vfi, vpi). 5725 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5726 * @mbox - pointer to the caller's allocated mailbox structure. 5727 * 5728 * This function executes the extents allocation request. It also 5729 * takes care of the amount of memory needed to allocate or get the 5730 * allocated extents. It is the caller's responsibility to evaluate 5731 * the response. 5732 * 5733 * Returns: 5734 * -Error: Error value describes the condition found. 5735 * 0: if successful 5736 **/ 5737 static int 5738 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5739 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5740 { 5741 int rc = 0; 5742 uint32_t req_len; 5743 uint32_t emb_len; 5744 uint32_t alloc_len, mbox_tmo; 5745 5746 /* Calculate the total requested length of the dma memory */ 5747 req_len = extnt_cnt * sizeof(uint16_t); 5748 5749 /* 5750 * Calculate the size of an embedded mailbox. The uint32_t 5751 * accounts for extents-specific word. 5752 */ 5753 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5754 sizeof(uint32_t); 5755 5756 /* 5757 * Presume the allocation and response will fit into an embedded 5758 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5759 */ 5760 *emb = LPFC_SLI4_MBX_EMBED; 5761 if (req_len > emb_len) { 5762 req_len = extnt_cnt * sizeof(uint16_t) + 5763 sizeof(union lpfc_sli4_cfg_shdr) + 5764 sizeof(uint32_t); 5765 *emb = LPFC_SLI4_MBX_NEMBED; 5766 } 5767 5768 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5769 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5770 req_len, *emb); 5771 if (alloc_len < req_len) { 5772 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5773 "2982 Allocated DMA memory size (x%x) is " 5774 "less than the requested DMA memory " 5775 "size (x%x)\n", alloc_len, req_len); 5776 return -ENOMEM; 5777 } 5778 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5779 if (unlikely(rc)) 5780 return -EIO; 5781 5782 if (!phba->sli4_hba.intr_enable) 5783 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5784 else { 5785 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5786 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5787 } 5788 5789 if (unlikely(rc)) 5790 rc = -EIO; 5791 return rc; 5792 } 5793 5794 /** 5795 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5796 * @phba: Pointer to HBA context object. 5797 * @type: The resource extent type to allocate. 5798 * 5799 * This function allocates the number of elements for the specified 5800 * resource type. 5801 **/ 5802 static int 5803 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5804 { 5805 bool emb = false; 5806 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5807 uint16_t rsrc_id, rsrc_start, j, k; 5808 uint16_t *ids; 5809 int i, rc; 5810 unsigned long longs; 5811 unsigned long *bmask; 5812 struct lpfc_rsrc_blks *rsrc_blks; 5813 LPFC_MBOXQ_t *mbox; 5814 uint32_t length; 5815 struct lpfc_id_range *id_array = NULL; 5816 void *virtaddr = NULL; 5817 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5818 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5819 struct list_head *ext_blk_list; 5820 5821 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5822 &rsrc_cnt, 5823 &rsrc_size); 5824 if (unlikely(rc)) 5825 return -EIO; 5826 5827 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5828 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5829 "3009 No available Resource Extents " 5830 "for resource type 0x%x: Count: 0x%x, " 5831 "Size 0x%x\n", type, rsrc_cnt, 5832 rsrc_size); 5833 return -ENOMEM; 5834 } 5835 5836 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5837 "2903 Post resource extents type-0x%x: " 5838 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5839 5840 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5841 if (!mbox) 5842 return -ENOMEM; 5843 5844 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5845 if (unlikely(rc)) { 5846 rc = -EIO; 5847 goto err_exit; 5848 } 5849 5850 /* 5851 * Figure out where the response is located. Then get local pointers 5852 * to the response data. The port does not guarantee to respond to 5853 * all extents counts request so update the local variable with the 5854 * allocated count from the port. 5855 */ 5856 if (emb == LPFC_SLI4_MBX_EMBED) { 5857 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5858 id_array = &rsrc_ext->u.rsp.id[0]; 5859 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5860 } else { 5861 virtaddr = mbox->sge_array->addr[0]; 5862 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5863 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5864 id_array = &n_rsrc->id; 5865 } 5866 5867 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5868 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5869 5870 /* 5871 * Based on the resource size and count, correct the base and max 5872 * resource values. 5873 */ 5874 length = sizeof(struct lpfc_rsrc_blks); 5875 switch (type) { 5876 case LPFC_RSC_TYPE_FCOE_RPI: 5877 phba->sli4_hba.rpi_bmask = kcalloc(longs, 5878 sizeof(unsigned long), 5879 GFP_KERNEL); 5880 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5881 rc = -ENOMEM; 5882 goto err_exit; 5883 } 5884 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, 5885 sizeof(uint16_t), 5886 GFP_KERNEL); 5887 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5888 kfree(phba->sli4_hba.rpi_bmask); 5889 rc = -ENOMEM; 5890 goto err_exit; 5891 } 5892 5893 /* 5894 * The next_rpi was initialized with the maximum available 5895 * count but the port may allocate a smaller number. Catch 5896 * that case and update the next_rpi. 5897 */ 5898 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5899 5900 /* Initialize local ptrs for common extent processing later. */ 5901 bmask = phba->sli4_hba.rpi_bmask; 5902 ids = phba->sli4_hba.rpi_ids; 5903 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5904 break; 5905 case LPFC_RSC_TYPE_FCOE_VPI: 5906 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 5907 GFP_KERNEL); 5908 if (unlikely(!phba->vpi_bmask)) { 5909 rc = -ENOMEM; 5910 goto err_exit; 5911 } 5912 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), 5913 GFP_KERNEL); 5914 if (unlikely(!phba->vpi_ids)) { 5915 kfree(phba->vpi_bmask); 5916 rc = -ENOMEM; 5917 goto err_exit; 5918 } 5919 5920 /* Initialize local ptrs for common extent processing later. */ 5921 bmask = phba->vpi_bmask; 5922 ids = phba->vpi_ids; 5923 ext_blk_list = &phba->lpfc_vpi_blk_list; 5924 break; 5925 case LPFC_RSC_TYPE_FCOE_XRI: 5926 phba->sli4_hba.xri_bmask = kcalloc(longs, 5927 sizeof(unsigned long), 5928 GFP_KERNEL); 5929 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5930 rc = -ENOMEM; 5931 goto err_exit; 5932 } 5933 phba->sli4_hba.max_cfg_param.xri_used = 0; 5934 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, 5935 sizeof(uint16_t), 5936 GFP_KERNEL); 5937 if (unlikely(!phba->sli4_hba.xri_ids)) { 5938 kfree(phba->sli4_hba.xri_bmask); 5939 rc = -ENOMEM; 5940 goto err_exit; 5941 } 5942 5943 /* Initialize local ptrs for common extent processing later. */ 5944 bmask = phba->sli4_hba.xri_bmask; 5945 ids = phba->sli4_hba.xri_ids; 5946 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5947 break; 5948 case LPFC_RSC_TYPE_FCOE_VFI: 5949 phba->sli4_hba.vfi_bmask = kcalloc(longs, 5950 sizeof(unsigned long), 5951 GFP_KERNEL); 5952 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5953 rc = -ENOMEM; 5954 goto err_exit; 5955 } 5956 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, 5957 sizeof(uint16_t), 5958 GFP_KERNEL); 5959 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5960 kfree(phba->sli4_hba.vfi_bmask); 5961 rc = -ENOMEM; 5962 goto err_exit; 5963 } 5964 5965 /* Initialize local ptrs for common extent processing later. */ 5966 bmask = phba->sli4_hba.vfi_bmask; 5967 ids = phba->sli4_hba.vfi_ids; 5968 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5969 break; 5970 default: 5971 /* Unsupported Opcode. Fail call. */ 5972 id_array = NULL; 5973 bmask = NULL; 5974 ids = NULL; 5975 ext_blk_list = NULL; 5976 goto err_exit; 5977 } 5978 5979 /* 5980 * Complete initializing the extent configuration with the 5981 * allocated ids assigned to this function. The bitmask serves 5982 * as an index into the array and manages the available ids. The 5983 * array just stores the ids communicated to the port via the wqes. 5984 */ 5985 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5986 if ((i % 2) == 0) 5987 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5988 &id_array[k]); 5989 else 5990 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5991 &id_array[k]); 5992 5993 rsrc_blks = kzalloc(length, GFP_KERNEL); 5994 if (unlikely(!rsrc_blks)) { 5995 rc = -ENOMEM; 5996 kfree(bmask); 5997 kfree(ids); 5998 goto err_exit; 5999 } 6000 rsrc_blks->rsrc_start = rsrc_id; 6001 rsrc_blks->rsrc_size = rsrc_size; 6002 list_add_tail(&rsrc_blks->list, ext_blk_list); 6003 rsrc_start = rsrc_id; 6004 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { 6005 phba->sli4_hba.io_xri_start = rsrc_start + 6006 lpfc_sli4_get_iocb_cnt(phba); 6007 } 6008 6009 while (rsrc_id < (rsrc_start + rsrc_size)) { 6010 ids[j] = rsrc_id; 6011 rsrc_id++; 6012 j++; 6013 } 6014 /* Entire word processed. Get next word.*/ 6015 if ((i % 2) == 1) 6016 k++; 6017 } 6018 err_exit: 6019 lpfc_sli4_mbox_cmd_free(phba, mbox); 6020 return rc; 6021 } 6022 6023 6024 6025 /** 6026 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 6027 * @phba: Pointer to HBA context object. 6028 * @type: the extent's type. 6029 * 6030 * This function deallocates all extents of a particular resource type. 6031 * SLI4 does not allow for deallocating a particular extent range. It 6032 * is the caller's responsibility to release all kernel memory resources. 6033 **/ 6034 static int 6035 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 6036 { 6037 int rc; 6038 uint32_t length, mbox_tmo = 0; 6039 LPFC_MBOXQ_t *mbox; 6040 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 6041 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 6042 6043 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6044 if (!mbox) 6045 return -ENOMEM; 6046 6047 /* 6048 * This function sends an embedded mailbox because it only sends the 6049 * the resource type. All extents of this type are released by the 6050 * port. 6051 */ 6052 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 6053 sizeof(struct lpfc_sli4_cfg_mhdr)); 6054 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6055 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 6056 length, LPFC_SLI4_MBX_EMBED); 6057 6058 /* Send an extents count of 0 - the dealloc doesn't use it. */ 6059 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 6060 LPFC_SLI4_MBX_EMBED); 6061 if (unlikely(rc)) { 6062 rc = -EIO; 6063 goto out_free_mbox; 6064 } 6065 if (!phba->sli4_hba.intr_enable) 6066 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6067 else { 6068 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6069 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6070 } 6071 if (unlikely(rc)) { 6072 rc = -EIO; 6073 goto out_free_mbox; 6074 } 6075 6076 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 6077 if (bf_get(lpfc_mbox_hdr_status, 6078 &dealloc_rsrc->header.cfg_shdr.response)) { 6079 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6080 "2919 Failed to release resource extents " 6081 "for type %d - Status 0x%x Add'l Status 0x%x. " 6082 "Resource memory not released.\n", 6083 type, 6084 bf_get(lpfc_mbox_hdr_status, 6085 &dealloc_rsrc->header.cfg_shdr.response), 6086 bf_get(lpfc_mbox_hdr_add_status, 6087 &dealloc_rsrc->header.cfg_shdr.response)); 6088 rc = -EIO; 6089 goto out_free_mbox; 6090 } 6091 6092 /* Release kernel memory resources for the specific type. */ 6093 switch (type) { 6094 case LPFC_RSC_TYPE_FCOE_VPI: 6095 kfree(phba->vpi_bmask); 6096 kfree(phba->vpi_ids); 6097 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6098 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6099 &phba->lpfc_vpi_blk_list, list) { 6100 list_del_init(&rsrc_blk->list); 6101 kfree(rsrc_blk); 6102 } 6103 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6104 break; 6105 case LPFC_RSC_TYPE_FCOE_XRI: 6106 kfree(phba->sli4_hba.xri_bmask); 6107 kfree(phba->sli4_hba.xri_ids); 6108 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6109 &phba->sli4_hba.lpfc_xri_blk_list, list) { 6110 list_del_init(&rsrc_blk->list); 6111 kfree(rsrc_blk); 6112 } 6113 break; 6114 case LPFC_RSC_TYPE_FCOE_VFI: 6115 kfree(phba->sli4_hba.vfi_bmask); 6116 kfree(phba->sli4_hba.vfi_ids); 6117 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6118 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6119 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 6120 list_del_init(&rsrc_blk->list); 6121 kfree(rsrc_blk); 6122 } 6123 break; 6124 case LPFC_RSC_TYPE_FCOE_RPI: 6125 /* RPI bitmask and physical id array are cleaned up earlier. */ 6126 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6127 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 6128 list_del_init(&rsrc_blk->list); 6129 kfree(rsrc_blk); 6130 } 6131 break; 6132 default: 6133 break; 6134 } 6135 6136 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6137 6138 out_free_mbox: 6139 mempool_free(mbox, phba->mbox_mem_pool); 6140 return rc; 6141 } 6142 6143 static void 6144 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, 6145 uint32_t feature) 6146 { 6147 uint32_t len; 6148 6149 len = sizeof(struct lpfc_mbx_set_feature) - 6150 sizeof(struct lpfc_sli4_cfg_mhdr); 6151 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6152 LPFC_MBOX_OPCODE_SET_FEATURES, len, 6153 LPFC_SLI4_MBX_EMBED); 6154 6155 switch (feature) { 6156 case LPFC_SET_UE_RECOVERY: 6157 bf_set(lpfc_mbx_set_feature_UER, 6158 &mbox->u.mqe.un.set_feature, 1); 6159 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; 6160 mbox->u.mqe.un.set_feature.param_len = 8; 6161 break; 6162 case LPFC_SET_MDS_DIAGS: 6163 bf_set(lpfc_mbx_set_feature_mds, 6164 &mbox->u.mqe.un.set_feature, 1); 6165 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 6166 &mbox->u.mqe.un.set_feature, 1); 6167 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 6168 mbox->u.mqe.un.set_feature.param_len = 8; 6169 break; 6170 } 6171 6172 return; 6173 } 6174 6175 /** 6176 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter 6177 * @phba: Pointer to HBA context object. 6178 * 6179 * Disable FW logging into host memory on the adapter. To 6180 * be done before reading logs from the host memory. 6181 **/ 6182 void 6183 lpfc_ras_stop_fwlog(struct lpfc_hba *phba) 6184 { 6185 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6186 6187 ras_fwlog->ras_active = false; 6188 6189 /* Disable FW logging to host memory */ 6190 writel(LPFC_CTL_PDEV_CTL_DDL_RAS, 6191 phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); 6192 } 6193 6194 /** 6195 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. 6196 * @phba: Pointer to HBA context object. 6197 * 6198 * This function is called to free memory allocated for RAS FW logging 6199 * support in the driver. 6200 **/ 6201 void 6202 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) 6203 { 6204 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6205 struct lpfc_dmabuf *dmabuf, *next; 6206 6207 if (!list_empty(&ras_fwlog->fwlog_buff_list)) { 6208 list_for_each_entry_safe(dmabuf, next, 6209 &ras_fwlog->fwlog_buff_list, 6210 list) { 6211 list_del(&dmabuf->list); 6212 dma_free_coherent(&phba->pcidev->dev, 6213 LPFC_RAS_MAX_ENTRY_SIZE, 6214 dmabuf->virt, dmabuf->phys); 6215 kfree(dmabuf); 6216 } 6217 } 6218 6219 if (ras_fwlog->lwpd.virt) { 6220 dma_free_coherent(&phba->pcidev->dev, 6221 sizeof(uint32_t) * 2, 6222 ras_fwlog->lwpd.virt, 6223 ras_fwlog->lwpd.phys); 6224 ras_fwlog->lwpd.virt = NULL; 6225 } 6226 6227 ras_fwlog->ras_active = false; 6228 } 6229 6230 /** 6231 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support 6232 * @phba: Pointer to HBA context object. 6233 * @fwlog_buff_count: Count of buffers to be created. 6234 * 6235 * This routine DMA memory for Log Write Position Data[LPWD] and buffer 6236 * to update FW log is posted to the adapter. 6237 * Buffer count is calculated based on module param ras_fwlog_buffsize 6238 * Size of each buffer posted to FW is 64K. 6239 **/ 6240 6241 static int 6242 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, 6243 uint32_t fwlog_buff_count) 6244 { 6245 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6246 struct lpfc_dmabuf *dmabuf; 6247 int rc = 0, i = 0; 6248 6249 /* Initialize List */ 6250 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list); 6251 6252 /* Allocate memory for the LWPD */ 6253 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev, 6254 sizeof(uint32_t) * 2, 6255 &ras_fwlog->lwpd.phys, 6256 GFP_KERNEL); 6257 if (!ras_fwlog->lwpd.virt) { 6258 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6259 "6185 LWPD Memory Alloc Failed\n"); 6260 6261 return -ENOMEM; 6262 } 6263 6264 ras_fwlog->fw_buffcount = fwlog_buff_count; 6265 for (i = 0; i < ras_fwlog->fw_buffcount; i++) { 6266 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 6267 GFP_KERNEL); 6268 if (!dmabuf) { 6269 rc = -ENOMEM; 6270 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6271 "6186 Memory Alloc failed FW logging"); 6272 goto free_mem; 6273 } 6274 6275 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6276 LPFC_RAS_MAX_ENTRY_SIZE, 6277 &dmabuf->phys, GFP_KERNEL); 6278 if (!dmabuf->virt) { 6279 kfree(dmabuf); 6280 rc = -ENOMEM; 6281 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6282 "6187 DMA Alloc Failed FW logging"); 6283 goto free_mem; 6284 } 6285 dmabuf->buffer_tag = i; 6286 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list); 6287 } 6288 6289 free_mem: 6290 if (rc) 6291 lpfc_sli4_ras_dma_free(phba); 6292 6293 return rc; 6294 } 6295 6296 /** 6297 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command 6298 * @phba: pointer to lpfc hba data structure. 6299 * @pmboxq: pointer to the driver internal queue element for mailbox command. 6300 * 6301 * Completion handler for driver's RAS MBX command to the device. 6302 **/ 6303 static void 6304 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6305 { 6306 MAILBOX_t *mb; 6307 union lpfc_sli4_cfg_shdr *shdr; 6308 uint32_t shdr_status, shdr_add_status; 6309 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6310 6311 mb = &pmb->u.mb; 6312 6313 shdr = (union lpfc_sli4_cfg_shdr *) 6314 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr; 6315 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6316 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6317 6318 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { 6319 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 6320 "6188 FW LOG mailbox " 6321 "completed with status x%x add_status x%x," 6322 " mbx status x%x\n", 6323 shdr_status, shdr_add_status, mb->mbxStatus); 6324 6325 ras_fwlog->ras_hwsupport = false; 6326 goto disable_ras; 6327 } 6328 6329 ras_fwlog->ras_active = true; 6330 mempool_free(pmb, phba->mbox_mem_pool); 6331 6332 return; 6333 6334 disable_ras: 6335 /* Free RAS DMA memory */ 6336 lpfc_sli4_ras_dma_free(phba); 6337 mempool_free(pmb, phba->mbox_mem_pool); 6338 } 6339 6340 /** 6341 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command 6342 * @phba: pointer to lpfc hba data structure. 6343 * @fwlog_level: Logging verbosity level. 6344 * @fwlog_enable: Enable/Disable logging. 6345 * 6346 * Initialize memory and post mailbox command to enable FW logging in host 6347 * memory. 6348 **/ 6349 int 6350 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, 6351 uint32_t fwlog_level, 6352 uint32_t fwlog_enable) 6353 { 6354 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6355 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL; 6356 struct lpfc_dmabuf *dmabuf; 6357 LPFC_MBOXQ_t *mbox; 6358 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; 6359 int rc = 0; 6360 6361 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * 6362 phba->cfg_ras_fwlog_buffsize); 6363 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); 6364 6365 /* 6366 * If re-enabling FW logging support use earlier allocated 6367 * DMA buffers while posting MBX command. 6368 **/ 6369 if (!ras_fwlog->lwpd.virt) { 6370 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count); 6371 if (rc) { 6372 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6373 "6189 FW Log Memory Allocation Failed"); 6374 return rc; 6375 } 6376 } 6377 6378 /* Setup Mailbox command */ 6379 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6380 if (!mbox) { 6381 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6382 "6190 RAS MBX Alloc Failed"); 6383 rc = -ENOMEM; 6384 goto mem_free; 6385 } 6386 6387 ras_fwlog->fw_loglevel = fwlog_level; 6388 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) - 6389 sizeof(struct lpfc_sli4_cfg_mhdr)); 6390 6391 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL, 6392 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION, 6393 len, LPFC_SLI4_MBX_EMBED); 6394 6395 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog; 6396 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request, 6397 fwlog_enable); 6398 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request, 6399 ras_fwlog->fw_loglevel); 6400 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request, 6401 ras_fwlog->fw_buffcount); 6402 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request, 6403 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE); 6404 6405 /* Update DMA buffer address */ 6406 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) { 6407 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); 6408 6409 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo = 6410 putPaddrLow(dmabuf->phys); 6411 6412 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi = 6413 putPaddrHigh(dmabuf->phys); 6414 } 6415 6416 /* Update LPWD address */ 6417 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); 6418 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); 6419 6420 mbox->vport = phba->pport; 6421 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; 6422 6423 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6424 6425 if (rc == MBX_NOT_FINISHED) { 6426 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6427 "6191 FW-Log Mailbox failed. " 6428 "status %d mbxStatus : x%x", rc, 6429 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 6430 mempool_free(mbox, phba->mbox_mem_pool); 6431 rc = -EIO; 6432 goto mem_free; 6433 } else 6434 rc = 0; 6435 mem_free: 6436 if (rc) 6437 lpfc_sli4_ras_dma_free(phba); 6438 6439 return rc; 6440 } 6441 6442 /** 6443 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter 6444 * @phba: Pointer to HBA context object. 6445 * 6446 * Check if RAS is supported on the adapter and initialize it. 6447 **/ 6448 void 6449 lpfc_sli4_ras_setup(struct lpfc_hba *phba) 6450 { 6451 /* Check RAS FW Log needs to be enabled or not */ 6452 if (lpfc_check_fwlog_support(phba)) 6453 return; 6454 6455 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, 6456 LPFC_RAS_ENABLE_LOGGING); 6457 } 6458 6459 /** 6460 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 6461 * @phba: Pointer to HBA context object. 6462 * 6463 * This function allocates all SLI4 resource identifiers. 6464 **/ 6465 int 6466 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 6467 { 6468 int i, rc, error = 0; 6469 uint16_t count, base; 6470 unsigned long longs; 6471 6472 if (!phba->sli4_hba.rpi_hdrs_in_use) 6473 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 6474 if (phba->sli4_hba.extents_in_use) { 6475 /* 6476 * The port supports resource extents. The XRI, VPI, VFI, RPI 6477 * resource extent count must be read and allocated before 6478 * provisioning the resource id arrays. 6479 */ 6480 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6481 LPFC_IDX_RSRC_RDY) { 6482 /* 6483 * Extent-based resources are set - the driver could 6484 * be in a port reset. Figure out if any corrective 6485 * actions need to be taken. 6486 */ 6487 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6488 LPFC_RSC_TYPE_FCOE_VFI); 6489 if (rc != 0) 6490 error++; 6491 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6492 LPFC_RSC_TYPE_FCOE_VPI); 6493 if (rc != 0) 6494 error++; 6495 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6496 LPFC_RSC_TYPE_FCOE_XRI); 6497 if (rc != 0) 6498 error++; 6499 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6500 LPFC_RSC_TYPE_FCOE_RPI); 6501 if (rc != 0) 6502 error++; 6503 6504 /* 6505 * It's possible that the number of resources 6506 * provided to this port instance changed between 6507 * resets. Detect this condition and reallocate 6508 * resources. Otherwise, there is no action. 6509 */ 6510 if (error) { 6511 lpfc_printf_log(phba, KERN_INFO, 6512 LOG_MBOX | LOG_INIT, 6513 "2931 Detected extent resource " 6514 "change. Reallocating all " 6515 "extents.\n"); 6516 rc = lpfc_sli4_dealloc_extent(phba, 6517 LPFC_RSC_TYPE_FCOE_VFI); 6518 rc = lpfc_sli4_dealloc_extent(phba, 6519 LPFC_RSC_TYPE_FCOE_VPI); 6520 rc = lpfc_sli4_dealloc_extent(phba, 6521 LPFC_RSC_TYPE_FCOE_XRI); 6522 rc = lpfc_sli4_dealloc_extent(phba, 6523 LPFC_RSC_TYPE_FCOE_RPI); 6524 } else 6525 return 0; 6526 } 6527 6528 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6529 if (unlikely(rc)) 6530 goto err_exit; 6531 6532 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6533 if (unlikely(rc)) 6534 goto err_exit; 6535 6536 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6537 if (unlikely(rc)) 6538 goto err_exit; 6539 6540 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6541 if (unlikely(rc)) 6542 goto err_exit; 6543 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6544 LPFC_IDX_RSRC_RDY); 6545 return rc; 6546 } else { 6547 /* 6548 * The port does not support resource extents. The XRI, VPI, 6549 * VFI, RPI resource ids were determined from READ_CONFIG. 6550 * Just allocate the bitmasks and provision the resource id 6551 * arrays. If a port reset is active, the resources don't 6552 * need any action - just exit. 6553 */ 6554 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6555 LPFC_IDX_RSRC_RDY) { 6556 lpfc_sli4_dealloc_resource_identifiers(phba); 6557 lpfc_sli4_remove_rpis(phba); 6558 } 6559 /* RPIs. */ 6560 count = phba->sli4_hba.max_cfg_param.max_rpi; 6561 if (count <= 0) { 6562 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6563 "3279 Invalid provisioning of " 6564 "rpi:%d\n", count); 6565 rc = -EINVAL; 6566 goto err_exit; 6567 } 6568 base = phba->sli4_hba.max_cfg_param.rpi_base; 6569 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6570 phba->sli4_hba.rpi_bmask = kcalloc(longs, 6571 sizeof(unsigned long), 6572 GFP_KERNEL); 6573 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 6574 rc = -ENOMEM; 6575 goto err_exit; 6576 } 6577 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), 6578 GFP_KERNEL); 6579 if (unlikely(!phba->sli4_hba.rpi_ids)) { 6580 rc = -ENOMEM; 6581 goto free_rpi_bmask; 6582 } 6583 6584 for (i = 0; i < count; i++) 6585 phba->sli4_hba.rpi_ids[i] = base + i; 6586 6587 /* VPIs. */ 6588 count = phba->sli4_hba.max_cfg_param.max_vpi; 6589 if (count <= 0) { 6590 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6591 "3280 Invalid provisioning of " 6592 "vpi:%d\n", count); 6593 rc = -EINVAL; 6594 goto free_rpi_ids; 6595 } 6596 base = phba->sli4_hba.max_cfg_param.vpi_base; 6597 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6598 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 6599 GFP_KERNEL); 6600 if (unlikely(!phba->vpi_bmask)) { 6601 rc = -ENOMEM; 6602 goto free_rpi_ids; 6603 } 6604 phba->vpi_ids = kcalloc(count, sizeof(uint16_t), 6605 GFP_KERNEL); 6606 if (unlikely(!phba->vpi_ids)) { 6607 rc = -ENOMEM; 6608 goto free_vpi_bmask; 6609 } 6610 6611 for (i = 0; i < count; i++) 6612 phba->vpi_ids[i] = base + i; 6613 6614 /* XRIs. */ 6615 count = phba->sli4_hba.max_cfg_param.max_xri; 6616 if (count <= 0) { 6617 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6618 "3281 Invalid provisioning of " 6619 "xri:%d\n", count); 6620 rc = -EINVAL; 6621 goto free_vpi_ids; 6622 } 6623 base = phba->sli4_hba.max_cfg_param.xri_base; 6624 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6625 phba->sli4_hba.xri_bmask = kcalloc(longs, 6626 sizeof(unsigned long), 6627 GFP_KERNEL); 6628 if (unlikely(!phba->sli4_hba.xri_bmask)) { 6629 rc = -ENOMEM; 6630 goto free_vpi_ids; 6631 } 6632 phba->sli4_hba.max_cfg_param.xri_used = 0; 6633 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), 6634 GFP_KERNEL); 6635 if (unlikely(!phba->sli4_hba.xri_ids)) { 6636 rc = -ENOMEM; 6637 goto free_xri_bmask; 6638 } 6639 6640 for (i = 0; i < count; i++) 6641 phba->sli4_hba.xri_ids[i] = base + i; 6642 6643 /* VFIs. */ 6644 count = phba->sli4_hba.max_cfg_param.max_vfi; 6645 if (count <= 0) { 6646 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6647 "3282 Invalid provisioning of " 6648 "vfi:%d\n", count); 6649 rc = -EINVAL; 6650 goto free_xri_ids; 6651 } 6652 base = phba->sli4_hba.max_cfg_param.vfi_base; 6653 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6654 phba->sli4_hba.vfi_bmask = kcalloc(longs, 6655 sizeof(unsigned long), 6656 GFP_KERNEL); 6657 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 6658 rc = -ENOMEM; 6659 goto free_xri_ids; 6660 } 6661 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), 6662 GFP_KERNEL); 6663 if (unlikely(!phba->sli4_hba.vfi_ids)) { 6664 rc = -ENOMEM; 6665 goto free_vfi_bmask; 6666 } 6667 6668 for (i = 0; i < count; i++) 6669 phba->sli4_hba.vfi_ids[i] = base + i; 6670 6671 /* 6672 * Mark all resources ready. An HBA reset doesn't need 6673 * to reset the initialization. 6674 */ 6675 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6676 LPFC_IDX_RSRC_RDY); 6677 return 0; 6678 } 6679 6680 free_vfi_bmask: 6681 kfree(phba->sli4_hba.vfi_bmask); 6682 phba->sli4_hba.vfi_bmask = NULL; 6683 free_xri_ids: 6684 kfree(phba->sli4_hba.xri_ids); 6685 phba->sli4_hba.xri_ids = NULL; 6686 free_xri_bmask: 6687 kfree(phba->sli4_hba.xri_bmask); 6688 phba->sli4_hba.xri_bmask = NULL; 6689 free_vpi_ids: 6690 kfree(phba->vpi_ids); 6691 phba->vpi_ids = NULL; 6692 free_vpi_bmask: 6693 kfree(phba->vpi_bmask); 6694 phba->vpi_bmask = NULL; 6695 free_rpi_ids: 6696 kfree(phba->sli4_hba.rpi_ids); 6697 phba->sli4_hba.rpi_ids = NULL; 6698 free_rpi_bmask: 6699 kfree(phba->sli4_hba.rpi_bmask); 6700 phba->sli4_hba.rpi_bmask = NULL; 6701 err_exit: 6702 return rc; 6703 } 6704 6705 /** 6706 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 6707 * @phba: Pointer to HBA context object. 6708 * 6709 * This function allocates the number of elements for the specified 6710 * resource type. 6711 **/ 6712 int 6713 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 6714 { 6715 if (phba->sli4_hba.extents_in_use) { 6716 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6717 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6718 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6719 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6720 } else { 6721 kfree(phba->vpi_bmask); 6722 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6723 kfree(phba->vpi_ids); 6724 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6725 kfree(phba->sli4_hba.xri_bmask); 6726 kfree(phba->sli4_hba.xri_ids); 6727 kfree(phba->sli4_hba.vfi_bmask); 6728 kfree(phba->sli4_hba.vfi_ids); 6729 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6730 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6731 } 6732 6733 return 0; 6734 } 6735 6736 /** 6737 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 6738 * @phba: Pointer to HBA context object. 6739 * @type: The resource extent type. 6740 * @extnt_count: buffer to hold port extent count response 6741 * @extnt_size: buffer to hold port extent size response. 6742 * 6743 * This function calls the port to read the host allocated extents 6744 * for a particular type. 6745 **/ 6746 int 6747 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 6748 uint16_t *extnt_cnt, uint16_t *extnt_size) 6749 { 6750 bool emb; 6751 int rc = 0; 6752 uint16_t curr_blks = 0; 6753 uint32_t req_len, emb_len; 6754 uint32_t alloc_len, mbox_tmo; 6755 struct list_head *blk_list_head; 6756 struct lpfc_rsrc_blks *rsrc_blk; 6757 LPFC_MBOXQ_t *mbox; 6758 void *virtaddr = NULL; 6759 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 6760 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 6761 union lpfc_sli4_cfg_shdr *shdr; 6762 6763 switch (type) { 6764 case LPFC_RSC_TYPE_FCOE_VPI: 6765 blk_list_head = &phba->lpfc_vpi_blk_list; 6766 break; 6767 case LPFC_RSC_TYPE_FCOE_XRI: 6768 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 6769 break; 6770 case LPFC_RSC_TYPE_FCOE_VFI: 6771 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 6772 break; 6773 case LPFC_RSC_TYPE_FCOE_RPI: 6774 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 6775 break; 6776 default: 6777 return -EIO; 6778 } 6779 6780 /* Count the number of extents currently allocatd for this type. */ 6781 list_for_each_entry(rsrc_blk, blk_list_head, list) { 6782 if (curr_blks == 0) { 6783 /* 6784 * The GET_ALLOCATED mailbox does not return the size, 6785 * just the count. The size should be just the size 6786 * stored in the current allocated block and all sizes 6787 * for an extent type are the same so set the return 6788 * value now. 6789 */ 6790 *extnt_size = rsrc_blk->rsrc_size; 6791 } 6792 curr_blks++; 6793 } 6794 6795 /* 6796 * Calculate the size of an embedded mailbox. The uint32_t 6797 * accounts for extents-specific word. 6798 */ 6799 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 6800 sizeof(uint32_t); 6801 6802 /* 6803 * Presume the allocation and response will fit into an embedded 6804 * mailbox. If not true, reconfigure to a non-embedded mailbox. 6805 */ 6806 emb = LPFC_SLI4_MBX_EMBED; 6807 req_len = emb_len; 6808 if (req_len > emb_len) { 6809 req_len = curr_blks * sizeof(uint16_t) + 6810 sizeof(union lpfc_sli4_cfg_shdr) + 6811 sizeof(uint32_t); 6812 emb = LPFC_SLI4_MBX_NEMBED; 6813 } 6814 6815 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6816 if (!mbox) 6817 return -ENOMEM; 6818 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 6819 6820 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6821 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 6822 req_len, emb); 6823 if (alloc_len < req_len) { 6824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6825 "2983 Allocated DMA memory size (x%x) is " 6826 "less than the requested DMA memory " 6827 "size (x%x)\n", alloc_len, req_len); 6828 rc = -ENOMEM; 6829 goto err_exit; 6830 } 6831 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 6832 if (unlikely(rc)) { 6833 rc = -EIO; 6834 goto err_exit; 6835 } 6836 6837 if (!phba->sli4_hba.intr_enable) 6838 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6839 else { 6840 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6841 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6842 } 6843 6844 if (unlikely(rc)) { 6845 rc = -EIO; 6846 goto err_exit; 6847 } 6848 6849 /* 6850 * Figure out where the response is located. Then get local pointers 6851 * to the response data. The port does not guarantee to respond to 6852 * all extents counts request so update the local variable with the 6853 * allocated count from the port. 6854 */ 6855 if (emb == LPFC_SLI4_MBX_EMBED) { 6856 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 6857 shdr = &rsrc_ext->header.cfg_shdr; 6858 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 6859 } else { 6860 virtaddr = mbox->sge_array->addr[0]; 6861 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 6862 shdr = &n_rsrc->cfg_shdr; 6863 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 6864 } 6865 6866 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 6867 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6868 "2984 Failed to read allocated resources " 6869 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 6870 type, 6871 bf_get(lpfc_mbox_hdr_status, &shdr->response), 6872 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 6873 rc = -EIO; 6874 goto err_exit; 6875 } 6876 err_exit: 6877 lpfc_sli4_mbox_cmd_free(phba, mbox); 6878 return rc; 6879 } 6880 6881 /** 6882 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block 6883 * @phba: pointer to lpfc hba data structure. 6884 * @pring: Pointer to driver SLI ring object. 6885 * @sgl_list: linked link of sgl buffers to post 6886 * @cnt: number of linked list buffers 6887 * 6888 * This routine walks the list of buffers that have been allocated and 6889 * repost them to the port by using SGL block post. This is needed after a 6890 * pci_function_reset/warm_start or start. It attempts to construct blocks 6891 * of buffer sgls which contains contiguous xris and uses the non-embedded 6892 * SGL block post mailbox commands to post them to the port. For single 6893 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 6894 * mailbox command for posting. 6895 * 6896 * Returns: 0 = success, non-zero failure. 6897 **/ 6898 static int 6899 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, 6900 struct list_head *sgl_list, int cnt) 6901 { 6902 struct lpfc_sglq *sglq_entry = NULL; 6903 struct lpfc_sglq *sglq_entry_next = NULL; 6904 struct lpfc_sglq *sglq_entry_first = NULL; 6905 int status, total_cnt; 6906 int post_cnt = 0, num_posted = 0, block_cnt = 0; 6907 int last_xritag = NO_XRI; 6908 LIST_HEAD(prep_sgl_list); 6909 LIST_HEAD(blck_sgl_list); 6910 LIST_HEAD(allc_sgl_list); 6911 LIST_HEAD(post_sgl_list); 6912 LIST_HEAD(free_sgl_list); 6913 6914 spin_lock_irq(&phba->hbalock); 6915 spin_lock(&phba->sli4_hba.sgl_list_lock); 6916 list_splice_init(sgl_list, &allc_sgl_list); 6917 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6918 spin_unlock_irq(&phba->hbalock); 6919 6920 total_cnt = cnt; 6921 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6922 &allc_sgl_list, list) { 6923 list_del_init(&sglq_entry->list); 6924 block_cnt++; 6925 if ((last_xritag != NO_XRI) && 6926 (sglq_entry->sli4_xritag != last_xritag + 1)) { 6927 /* a hole in xri block, form a sgl posting block */ 6928 list_splice_init(&prep_sgl_list, &blck_sgl_list); 6929 post_cnt = block_cnt - 1; 6930 /* prepare list for next posting block */ 6931 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6932 block_cnt = 1; 6933 } else { 6934 /* prepare list for next posting block */ 6935 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6936 /* enough sgls for non-embed sgl mbox command */ 6937 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 6938 list_splice_init(&prep_sgl_list, 6939 &blck_sgl_list); 6940 post_cnt = block_cnt; 6941 block_cnt = 0; 6942 } 6943 } 6944 num_posted++; 6945 6946 /* keep track of last sgl's xritag */ 6947 last_xritag = sglq_entry->sli4_xritag; 6948 6949 /* end of repost sgl list condition for buffers */ 6950 if (num_posted == total_cnt) { 6951 if (post_cnt == 0) { 6952 list_splice_init(&prep_sgl_list, 6953 &blck_sgl_list); 6954 post_cnt = block_cnt; 6955 } else if (block_cnt == 1) { 6956 status = lpfc_sli4_post_sgl(phba, 6957 sglq_entry->phys, 0, 6958 sglq_entry->sli4_xritag); 6959 if (!status) { 6960 /* successful, put sgl to posted list */ 6961 list_add_tail(&sglq_entry->list, 6962 &post_sgl_list); 6963 } else { 6964 /* Failure, put sgl to free list */ 6965 lpfc_printf_log(phba, KERN_WARNING, 6966 LOG_SLI, 6967 "3159 Failed to post " 6968 "sgl, xritag:x%x\n", 6969 sglq_entry->sli4_xritag); 6970 list_add_tail(&sglq_entry->list, 6971 &free_sgl_list); 6972 total_cnt--; 6973 } 6974 } 6975 } 6976 6977 /* continue until a nembed page worth of sgls */ 6978 if (post_cnt == 0) 6979 continue; 6980 6981 /* post the buffer list sgls as a block */ 6982 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, 6983 post_cnt); 6984 6985 if (!status) { 6986 /* success, put sgl list to posted sgl list */ 6987 list_splice_init(&blck_sgl_list, &post_sgl_list); 6988 } else { 6989 /* Failure, put sgl list to free sgl list */ 6990 sglq_entry_first = list_first_entry(&blck_sgl_list, 6991 struct lpfc_sglq, 6992 list); 6993 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6994 "3160 Failed to post sgl-list, " 6995 "xritag:x%x-x%x\n", 6996 sglq_entry_first->sli4_xritag, 6997 (sglq_entry_first->sli4_xritag + 6998 post_cnt - 1)); 6999 list_splice_init(&blck_sgl_list, &free_sgl_list); 7000 total_cnt -= post_cnt; 7001 } 7002 7003 /* don't reset xirtag due to hole in xri block */ 7004 if (block_cnt == 0) 7005 last_xritag = NO_XRI; 7006 7007 /* reset sgl post count for next round of posting */ 7008 post_cnt = 0; 7009 } 7010 7011 /* free the sgls failed to post */ 7012 lpfc_free_sgl_list(phba, &free_sgl_list); 7013 7014 /* push sgls posted to the available list */ 7015 if (!list_empty(&post_sgl_list)) { 7016 spin_lock_irq(&phba->hbalock); 7017 spin_lock(&phba->sli4_hba.sgl_list_lock); 7018 list_splice_init(&post_sgl_list, sgl_list); 7019 spin_unlock(&phba->sli4_hba.sgl_list_lock); 7020 spin_unlock_irq(&phba->hbalock); 7021 } else { 7022 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 7023 "3161 Failure to post sgl to port.\n"); 7024 return -EIO; 7025 } 7026 7027 /* return the number of XRIs actually posted */ 7028 return total_cnt; 7029 } 7030 7031 /** 7032 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls 7033 * @phba: pointer to lpfc hba data structure. 7034 * 7035 * This routine walks the list of nvme buffers that have been allocated and 7036 * repost them to the port by using SGL block post. This is needed after a 7037 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine 7038 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list 7039 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers. 7040 * 7041 * Returns: 0 = success, non-zero failure. 7042 **/ 7043 static int 7044 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba) 7045 { 7046 LIST_HEAD(post_nblist); 7047 int num_posted, rc = 0; 7048 7049 /* get all NVME buffers need to repost to a local list */ 7050 lpfc_io_buf_flush(phba, &post_nblist); 7051 7052 /* post the list of nvme buffer sgls to port if available */ 7053 if (!list_empty(&post_nblist)) { 7054 num_posted = lpfc_sli4_post_io_sgl_list( 7055 phba, &post_nblist, phba->sli4_hba.io_xri_cnt); 7056 /* failed to post any nvme buffer, return error */ 7057 if (num_posted == 0) 7058 rc = -EIO; 7059 } 7060 return rc; 7061 } 7062 7063 static void 7064 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 7065 { 7066 uint32_t len; 7067 7068 len = sizeof(struct lpfc_mbx_set_host_data) - 7069 sizeof(struct lpfc_sli4_cfg_mhdr); 7070 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 7071 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 7072 LPFC_SLI4_MBX_EMBED); 7073 7074 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; 7075 mbox->u.mqe.un.set_host_data.param_len = 7076 LPFC_HOST_OS_DRIVER_VERSION_SIZE; 7077 snprintf(mbox->u.mqe.un.set_host_data.data, 7078 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 7079 "Linux %s v"LPFC_DRIVER_VERSION, 7080 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 7081 } 7082 7083 int 7084 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, 7085 struct lpfc_queue *drq, int count, int idx) 7086 { 7087 int rc, i; 7088 struct lpfc_rqe hrqe; 7089 struct lpfc_rqe drqe; 7090 struct lpfc_rqb *rqbp; 7091 unsigned long flags; 7092 struct rqb_dmabuf *rqb_buffer; 7093 LIST_HEAD(rqb_buf_list); 7094 7095 spin_lock_irqsave(&phba->hbalock, flags); 7096 rqbp = hrq->rqbp; 7097 for (i = 0; i < count; i++) { 7098 /* IF RQ is already full, don't bother */ 7099 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) 7100 break; 7101 rqb_buffer = rqbp->rqb_alloc_buffer(phba); 7102 if (!rqb_buffer) 7103 break; 7104 rqb_buffer->hrq = hrq; 7105 rqb_buffer->drq = drq; 7106 rqb_buffer->idx = idx; 7107 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); 7108 } 7109 while (!list_empty(&rqb_buf_list)) { 7110 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, 7111 hbuf.list); 7112 7113 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); 7114 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); 7115 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); 7116 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); 7117 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 7118 if (rc < 0) { 7119 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7120 "6421 Cannot post to HRQ %d: %x %x %x " 7121 "DRQ %x %x\n", 7122 hrq->queue_id, 7123 hrq->host_index, 7124 hrq->hba_index, 7125 hrq->entry_count, 7126 drq->host_index, 7127 drq->hba_index); 7128 rqbp->rqb_free_buffer(phba, rqb_buffer); 7129 } else { 7130 list_add_tail(&rqb_buffer->hbuf.list, 7131 &rqbp->rqb_buffer_list); 7132 rqbp->buffer_count++; 7133 } 7134 } 7135 spin_unlock_irqrestore(&phba->hbalock, flags); 7136 return 1; 7137 } 7138 7139 /** 7140 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 7141 * @phba: Pointer to HBA context object. 7142 * 7143 * This function is the main SLI4 device initialization PCI function. This 7144 * function is called by the HBA initialization code, HBA reset code and 7145 * HBA error attention handler code. Caller is not required to hold any 7146 * locks. 7147 **/ 7148 int 7149 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 7150 { 7151 int rc, i, cnt, len; 7152 LPFC_MBOXQ_t *mboxq; 7153 struct lpfc_mqe *mqe; 7154 uint8_t *vpd; 7155 uint32_t vpd_size; 7156 uint32_t ftr_rsp = 0; 7157 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 7158 struct lpfc_vport *vport = phba->pport; 7159 struct lpfc_dmabuf *mp; 7160 struct lpfc_rqb *rqbp; 7161 7162 /* Perform a PCI function reset to start from clean */ 7163 rc = lpfc_pci_function_reset(phba); 7164 if (unlikely(rc)) 7165 return -ENODEV; 7166 7167 /* Check the HBA Host Status Register for readyness */ 7168 rc = lpfc_sli4_post_status_check(phba); 7169 if (unlikely(rc)) 7170 return -ENODEV; 7171 else { 7172 spin_lock_irq(&phba->hbalock); 7173 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 7174 spin_unlock_irq(&phba->hbalock); 7175 } 7176 7177 /* 7178 * Allocate a single mailbox container for initializing the 7179 * port. 7180 */ 7181 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7182 if (!mboxq) 7183 return -ENOMEM; 7184 7185 /* Issue READ_REV to collect vpd and FW information. */ 7186 vpd_size = SLI4_PAGE_SIZE; 7187 vpd = kzalloc(vpd_size, GFP_KERNEL); 7188 if (!vpd) { 7189 rc = -ENOMEM; 7190 goto out_free_mbox; 7191 } 7192 7193 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 7194 if (unlikely(rc)) { 7195 kfree(vpd); 7196 goto out_free_mbox; 7197 } 7198 7199 mqe = &mboxq->u.mqe; 7200 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 7201 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 7202 phba->hba_flag |= HBA_FCOE_MODE; 7203 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 7204 } else { 7205 phba->hba_flag &= ~HBA_FCOE_MODE; 7206 } 7207 7208 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 7209 LPFC_DCBX_CEE_MODE) 7210 phba->hba_flag |= HBA_FIP_SUPPORT; 7211 else 7212 phba->hba_flag &= ~HBA_FIP_SUPPORT; 7213 7214 phba->hba_flag &= ~HBA_IOQ_FLUSH; 7215 7216 if (phba->sli_rev != LPFC_SLI_REV4) { 7217 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7218 "0376 READ_REV Error. SLI Level %d " 7219 "FCoE enabled %d\n", 7220 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 7221 rc = -EIO; 7222 kfree(vpd); 7223 goto out_free_mbox; 7224 } 7225 7226 /* 7227 * Continue initialization with default values even if driver failed 7228 * to read FCoE param config regions, only read parameters if the 7229 * board is FCoE 7230 */ 7231 if (phba->hba_flag & HBA_FCOE_MODE && 7232 lpfc_sli4_read_fcoe_params(phba)) 7233 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 7234 "2570 Failed to read FCoE parameters\n"); 7235 7236 /* 7237 * Retrieve sli4 device physical port name, failure of doing it 7238 * is considered as non-fatal. 7239 */ 7240 rc = lpfc_sli4_retrieve_pport_name(phba); 7241 if (!rc) 7242 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7243 "3080 Successful retrieving SLI4 device " 7244 "physical port name: %s.\n", phba->Port); 7245 7246 rc = lpfc_sli4_get_ctl_attr(phba); 7247 if (!rc) 7248 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7249 "8351 Successful retrieving SLI4 device " 7250 "CTL ATTR\n"); 7251 7252 /* 7253 * Evaluate the read rev and vpd data. Populate the driver 7254 * state with the results. If this routine fails, the failure 7255 * is not fatal as the driver will use generic values. 7256 */ 7257 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 7258 if (unlikely(!rc)) { 7259 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7260 "0377 Error %d parsing vpd. " 7261 "Using defaults.\n", rc); 7262 rc = 0; 7263 } 7264 kfree(vpd); 7265 7266 /* Save information as VPD data */ 7267 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 7268 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 7269 7270 /* 7271 * This is because first G7 ASIC doesn't support the standard 7272 * 0x5a NVME cmd descriptor type/subtype 7273 */ 7274 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7275 LPFC_SLI_INTF_IF_TYPE_6) && 7276 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) && 7277 (phba->vpd.rev.smRev == 0) && 7278 (phba->cfg_nvme_embed_cmd == 1)) 7279 phba->cfg_nvme_embed_cmd = 0; 7280 7281 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 7282 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 7283 &mqe->un.read_rev); 7284 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 7285 &mqe->un.read_rev); 7286 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 7287 &mqe->un.read_rev); 7288 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 7289 &mqe->un.read_rev); 7290 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 7291 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 7292 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 7293 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 7294 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 7295 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 7296 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7297 "(%d):0380 READ_REV Status x%x " 7298 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 7299 mboxq->vport ? mboxq->vport->vpi : 0, 7300 bf_get(lpfc_mqe_status, mqe), 7301 phba->vpd.rev.opFwName, 7302 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 7303 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 7304 7305 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 7306 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); 7307 if (phba->pport->cfg_lun_queue_depth > rc) { 7308 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7309 "3362 LUN queue depth changed from %d to %d\n", 7310 phba->pport->cfg_lun_queue_depth, rc); 7311 phba->pport->cfg_lun_queue_depth = rc; 7312 } 7313 7314 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7315 LPFC_SLI_INTF_IF_TYPE_0) { 7316 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); 7317 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7318 if (rc == MBX_SUCCESS) { 7319 phba->hba_flag |= HBA_RECOVERABLE_UE; 7320 /* Set 1Sec interval to detect UE */ 7321 phba->eratt_poll_interval = 1; 7322 phba->sli4_hba.ue_to_sr = bf_get( 7323 lpfc_mbx_set_feature_UESR, 7324 &mboxq->u.mqe.un.set_feature); 7325 phba->sli4_hba.ue_to_rp = bf_get( 7326 lpfc_mbx_set_feature_UERP, 7327 &mboxq->u.mqe.un.set_feature); 7328 } 7329 } 7330 7331 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { 7332 /* Enable MDS Diagnostics only if the SLI Port supports it */ 7333 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); 7334 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7335 if (rc != MBX_SUCCESS) 7336 phba->mds_diags_support = 0; 7337 } 7338 7339 /* 7340 * Discover the port's supported feature set and match it against the 7341 * hosts requests. 7342 */ 7343 lpfc_request_features(phba, mboxq); 7344 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7345 if (unlikely(rc)) { 7346 rc = -EIO; 7347 goto out_free_mbox; 7348 } 7349 7350 /* 7351 * The port must support FCP initiator mode as this is the 7352 * only mode running in the host. 7353 */ 7354 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 7355 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7356 "0378 No support for fcpi mode.\n"); 7357 ftr_rsp++; 7358 } 7359 7360 /* Performance Hints are ONLY for FCoE */ 7361 if (phba->hba_flag & HBA_FCOE_MODE) { 7362 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 7363 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 7364 else 7365 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 7366 } 7367 7368 /* 7369 * If the port cannot support the host's requested features 7370 * then turn off the global config parameters to disable the 7371 * feature in the driver. This is not a fatal error. 7372 */ 7373 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 7374 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { 7375 phba->cfg_enable_bg = 0; 7376 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 7377 ftr_rsp++; 7378 } 7379 } 7380 7381 if (phba->max_vpi && phba->cfg_enable_npiv && 7382 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7383 ftr_rsp++; 7384 7385 if (ftr_rsp) { 7386 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7387 "0379 Feature Mismatch Data: x%08x %08x " 7388 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 7389 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 7390 phba->cfg_enable_npiv, phba->max_vpi); 7391 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 7392 phba->cfg_enable_bg = 0; 7393 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7394 phba->cfg_enable_npiv = 0; 7395 } 7396 7397 /* These SLI3 features are assumed in SLI4 */ 7398 spin_lock_irq(&phba->hbalock); 7399 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 7400 spin_unlock_irq(&phba->hbalock); 7401 7402 /* 7403 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 7404 * calls depends on these resources to complete port setup. 7405 */ 7406 rc = lpfc_sli4_alloc_resource_identifiers(phba); 7407 if (rc) { 7408 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7409 "2920 Failed to alloc Resource IDs " 7410 "rc = x%x\n", rc); 7411 goto out_free_mbox; 7412 } 7413 7414 lpfc_set_host_data(phba, mboxq); 7415 7416 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7417 if (rc) { 7418 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7419 "2134 Failed to set host os driver version %x", 7420 rc); 7421 } 7422 7423 /* Read the port's service parameters. */ 7424 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 7425 if (rc) { 7426 phba->link_state = LPFC_HBA_ERROR; 7427 rc = -ENOMEM; 7428 goto out_free_mbox; 7429 } 7430 7431 mboxq->vport = vport; 7432 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7433 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 7434 if (rc == MBX_SUCCESS) { 7435 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 7436 rc = 0; 7437 } 7438 7439 /* 7440 * This memory was allocated by the lpfc_read_sparam routine. Release 7441 * it to the mbuf pool. 7442 */ 7443 lpfc_mbuf_free(phba, mp->virt, mp->phys); 7444 kfree(mp); 7445 mboxq->ctx_buf = NULL; 7446 if (unlikely(rc)) { 7447 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7448 "0382 READ_SPARAM command failed " 7449 "status %d, mbxStatus x%x\n", 7450 rc, bf_get(lpfc_mqe_status, mqe)); 7451 phba->link_state = LPFC_HBA_ERROR; 7452 rc = -EIO; 7453 goto out_free_mbox; 7454 } 7455 7456 lpfc_update_vport_wwn(vport); 7457 7458 /* Update the fc_host data structures with new wwn. */ 7459 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 7460 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 7461 7462 /* Create all the SLI4 queues */ 7463 rc = lpfc_sli4_queue_create(phba); 7464 if (rc) { 7465 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7466 "3089 Failed to allocate queues\n"); 7467 rc = -ENODEV; 7468 goto out_free_mbox; 7469 } 7470 /* Set up all the queues to the device */ 7471 rc = lpfc_sli4_queue_setup(phba); 7472 if (unlikely(rc)) { 7473 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7474 "0381 Error %d during queue setup.\n ", rc); 7475 goto out_stop_timers; 7476 } 7477 /* Initialize the driver internal SLI layer lists. */ 7478 lpfc_sli4_setup(phba); 7479 lpfc_sli4_queue_init(phba); 7480 7481 /* update host els xri-sgl sizes and mappings */ 7482 rc = lpfc_sli4_els_sgl_update(phba); 7483 if (unlikely(rc)) { 7484 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7485 "1400 Failed to update xri-sgl size and " 7486 "mapping: %d\n", rc); 7487 goto out_destroy_queue; 7488 } 7489 7490 /* register the els sgl pool to the port */ 7491 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, 7492 phba->sli4_hba.els_xri_cnt); 7493 if (unlikely(rc < 0)) { 7494 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7495 "0582 Error %d during els sgl post " 7496 "operation\n", rc); 7497 rc = -ENODEV; 7498 goto out_destroy_queue; 7499 } 7500 phba->sli4_hba.els_xri_cnt = rc; 7501 7502 if (phba->nvmet_support) { 7503 /* update host nvmet xri-sgl sizes and mappings */ 7504 rc = lpfc_sli4_nvmet_sgl_update(phba); 7505 if (unlikely(rc)) { 7506 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7507 "6308 Failed to update nvmet-sgl size " 7508 "and mapping: %d\n", rc); 7509 goto out_destroy_queue; 7510 } 7511 7512 /* register the nvmet sgl pool to the port */ 7513 rc = lpfc_sli4_repost_sgl_list( 7514 phba, 7515 &phba->sli4_hba.lpfc_nvmet_sgl_list, 7516 phba->sli4_hba.nvmet_xri_cnt); 7517 if (unlikely(rc < 0)) { 7518 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7519 "3117 Error %d during nvmet " 7520 "sgl post\n", rc); 7521 rc = -ENODEV; 7522 goto out_destroy_queue; 7523 } 7524 phba->sli4_hba.nvmet_xri_cnt = rc; 7525 7526 cnt = phba->cfg_iocb_cnt * 1024; 7527 /* We need 1 iocbq for every SGL, for IO processing */ 7528 cnt += phba->sli4_hba.nvmet_xri_cnt; 7529 } else { 7530 /* update host common xri-sgl sizes and mappings */ 7531 rc = lpfc_sli4_io_sgl_update(phba); 7532 if (unlikely(rc)) { 7533 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7534 "6082 Failed to update nvme-sgl size " 7535 "and mapping: %d\n", rc); 7536 goto out_destroy_queue; 7537 } 7538 7539 /* register the allocated common sgl pool to the port */ 7540 rc = lpfc_sli4_repost_io_sgl_list(phba); 7541 if (unlikely(rc)) { 7542 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7543 "6116 Error %d during nvme sgl post " 7544 "operation\n", rc); 7545 /* Some NVME buffers were moved to abort nvme list */ 7546 /* A pci function reset will repost them */ 7547 rc = -ENODEV; 7548 goto out_destroy_queue; 7549 } 7550 cnt = phba->cfg_iocb_cnt * 1024; 7551 } 7552 7553 if (!phba->sli.iocbq_lookup) { 7554 /* Initialize and populate the iocb list per host */ 7555 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7556 "2821 initialize iocb list %d total %d\n", 7557 phba->cfg_iocb_cnt, cnt); 7558 rc = lpfc_init_iocb_list(phba, cnt); 7559 if (rc) { 7560 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7561 "1413 Failed to init iocb list.\n"); 7562 goto out_destroy_queue; 7563 } 7564 } 7565 7566 if (phba->nvmet_support) 7567 lpfc_nvmet_create_targetport(phba); 7568 7569 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 7570 /* Post initial buffers to all RQs created */ 7571 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 7572 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 7573 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 7574 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 7575 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 7576 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; 7577 rqbp->buffer_count = 0; 7578 7579 lpfc_post_rq_buffer( 7580 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 7581 phba->sli4_hba.nvmet_mrq_data[i], 7582 phba->cfg_nvmet_mrq_post, i); 7583 } 7584 } 7585 7586 /* Post the rpi header region to the device. */ 7587 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 7588 if (unlikely(rc)) { 7589 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7590 "0393 Error %d during rpi post operation\n", 7591 rc); 7592 rc = -ENODEV; 7593 goto out_destroy_queue; 7594 } 7595 lpfc_sli4_node_prep(phba); 7596 7597 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 7598 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { 7599 /* 7600 * The FC Port needs to register FCFI (index 0) 7601 */ 7602 lpfc_reg_fcfi(phba, mboxq); 7603 mboxq->vport = phba->pport; 7604 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7605 if (rc != MBX_SUCCESS) 7606 goto out_unset_queue; 7607 rc = 0; 7608 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 7609 &mboxq->u.mqe.un.reg_fcfi); 7610 } else { 7611 /* We are a NVME Target mode with MRQ > 1 */ 7612 7613 /* First register the FCFI */ 7614 lpfc_reg_fcfi_mrq(phba, mboxq, 0); 7615 mboxq->vport = phba->pport; 7616 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7617 if (rc != MBX_SUCCESS) 7618 goto out_unset_queue; 7619 rc = 0; 7620 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, 7621 &mboxq->u.mqe.un.reg_fcfi_mrq); 7622 7623 /* Next register the MRQs */ 7624 lpfc_reg_fcfi_mrq(phba, mboxq, 1); 7625 mboxq->vport = phba->pport; 7626 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7627 if (rc != MBX_SUCCESS) 7628 goto out_unset_queue; 7629 rc = 0; 7630 } 7631 /* Check if the port is configured to be disabled */ 7632 lpfc_sli_read_link_ste(phba); 7633 } 7634 7635 /* Don't post more new bufs if repost already recovered 7636 * the nvme sgls. 7637 */ 7638 if (phba->nvmet_support == 0) { 7639 if (phba->sli4_hba.io_xri_cnt == 0) { 7640 len = lpfc_new_io_buf( 7641 phba, phba->sli4_hba.io_xri_max); 7642 if (len == 0) { 7643 rc = -ENOMEM; 7644 goto out_unset_queue; 7645 } 7646 7647 if (phba->cfg_xri_rebalancing) 7648 lpfc_create_multixri_pools(phba); 7649 } 7650 } else { 7651 phba->cfg_xri_rebalancing = 0; 7652 } 7653 7654 /* Allow asynchronous mailbox command to go through */ 7655 spin_lock_irq(&phba->hbalock); 7656 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7657 spin_unlock_irq(&phba->hbalock); 7658 7659 /* Post receive buffers to the device */ 7660 lpfc_sli4_rb_setup(phba); 7661 7662 /* Reset HBA FCF states after HBA reset */ 7663 phba->fcf.fcf_flag = 0; 7664 phba->fcf.current_rec.flag = 0; 7665 7666 /* Start the ELS watchdog timer */ 7667 mod_timer(&vport->els_tmofunc, 7668 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 7669 7670 /* Start heart beat timer */ 7671 mod_timer(&phba->hb_tmofunc, 7672 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 7673 phba->hb_outstanding = 0; 7674 phba->last_completion_time = jiffies; 7675 7676 /* start eq_delay heartbeat */ 7677 if (phba->cfg_auto_imax) 7678 queue_delayed_work(phba->wq, &phba->eq_delay_work, 7679 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS)); 7680 7681 /* Start error attention (ERATT) polling timer */ 7682 mod_timer(&phba->eratt_poll, 7683 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 7684 7685 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 7686 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 7687 rc = pci_enable_pcie_error_reporting(phba->pcidev); 7688 if (!rc) { 7689 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7690 "2829 This device supports " 7691 "Advanced Error Reporting (AER)\n"); 7692 spin_lock_irq(&phba->hbalock); 7693 phba->hba_flag |= HBA_AER_ENABLED; 7694 spin_unlock_irq(&phba->hbalock); 7695 } else { 7696 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7697 "2830 This device does not support " 7698 "Advanced Error Reporting (AER)\n"); 7699 phba->cfg_aer_support = 0; 7700 } 7701 rc = 0; 7702 } 7703 7704 /* 7705 * The port is ready, set the host's link state to LINK_DOWN 7706 * in preparation for link interrupts. 7707 */ 7708 spin_lock_irq(&phba->hbalock); 7709 phba->link_state = LPFC_LINK_DOWN; 7710 7711 /* Check if physical ports are trunked */ 7712 if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba)) 7713 phba->trunk_link.link0.state = LPFC_LINK_DOWN; 7714 if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba)) 7715 phba->trunk_link.link1.state = LPFC_LINK_DOWN; 7716 if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba)) 7717 phba->trunk_link.link2.state = LPFC_LINK_DOWN; 7718 if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba)) 7719 phba->trunk_link.link3.state = LPFC_LINK_DOWN; 7720 spin_unlock_irq(&phba->hbalock); 7721 7722 /* Arm the CQs and then EQs on device */ 7723 lpfc_sli4_arm_cqeq_intr(phba); 7724 7725 /* Indicate device interrupt mode */ 7726 phba->sli4_hba.intr_enable = 1; 7727 7728 if (!(phba->hba_flag & HBA_FCOE_MODE) && 7729 (phba->hba_flag & LINK_DISABLED)) { 7730 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7731 "3103 Adapter Link is disabled.\n"); 7732 lpfc_down_link(phba, mboxq); 7733 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7734 if (rc != MBX_SUCCESS) { 7735 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7736 "3104 Adapter failed to issue " 7737 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 7738 goto out_io_buff_free; 7739 } 7740 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 7741 /* don't perform init_link on SLI4 FC port loopback test */ 7742 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 7743 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 7744 if (rc) 7745 goto out_io_buff_free; 7746 } 7747 } 7748 mempool_free(mboxq, phba->mbox_mem_pool); 7749 return rc; 7750 out_io_buff_free: 7751 /* Free allocated IO Buffers */ 7752 lpfc_io_free(phba); 7753 out_unset_queue: 7754 /* Unset all the queues set up in this routine when error out */ 7755 lpfc_sli4_queue_unset(phba); 7756 out_destroy_queue: 7757 lpfc_free_iocb_list(phba); 7758 lpfc_sli4_queue_destroy(phba); 7759 out_stop_timers: 7760 lpfc_stop_hba_timers(phba); 7761 out_free_mbox: 7762 mempool_free(mboxq, phba->mbox_mem_pool); 7763 return rc; 7764 } 7765 7766 /** 7767 * lpfc_mbox_timeout - Timeout call back function for mbox timer 7768 * @ptr: context object - pointer to hba structure. 7769 * 7770 * This is the callback function for mailbox timer. The mailbox 7771 * timer is armed when a new mailbox command is issued and the timer 7772 * is deleted when the mailbox complete. The function is called by 7773 * the kernel timer code when a mailbox does not complete within 7774 * expected time. This function wakes up the worker thread to 7775 * process the mailbox timeout and returns. All the processing is 7776 * done by the worker thread function lpfc_mbox_timeout_handler. 7777 **/ 7778 void 7779 lpfc_mbox_timeout(struct timer_list *t) 7780 { 7781 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); 7782 unsigned long iflag; 7783 uint32_t tmo_posted; 7784 7785 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 7786 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 7787 if (!tmo_posted) 7788 phba->pport->work_port_events |= WORKER_MBOX_TMO; 7789 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 7790 7791 if (!tmo_posted) 7792 lpfc_worker_wake_up(phba); 7793 return; 7794 } 7795 7796 /** 7797 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 7798 * are pending 7799 * @phba: Pointer to HBA context object. 7800 * 7801 * This function checks if any mailbox completions are present on the mailbox 7802 * completion queue. 7803 **/ 7804 static bool 7805 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 7806 { 7807 7808 uint32_t idx; 7809 struct lpfc_queue *mcq; 7810 struct lpfc_mcqe *mcqe; 7811 bool pending_completions = false; 7812 uint8_t qe_valid; 7813 7814 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7815 return false; 7816 7817 /* Check for completions on mailbox completion queue */ 7818 7819 mcq = phba->sli4_hba.mbx_cq; 7820 idx = mcq->hba_index; 7821 qe_valid = mcq->qe_valid; 7822 while (bf_get_le32(lpfc_cqe_valid, 7823 (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) { 7824 mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx)); 7825 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 7826 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 7827 pending_completions = true; 7828 break; 7829 } 7830 idx = (idx + 1) % mcq->entry_count; 7831 if (mcq->hba_index == idx) 7832 break; 7833 7834 /* if the index wrapped around, toggle the valid bit */ 7835 if (phba->sli4_hba.pc_sli4_params.cqav && !idx) 7836 qe_valid = (qe_valid) ? 0 : 1; 7837 } 7838 return pending_completions; 7839 7840 } 7841 7842 /** 7843 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 7844 * that were missed. 7845 * @phba: Pointer to HBA context object. 7846 * 7847 * For sli4, it is possible to miss an interrupt. As such mbox completions 7848 * maybe missed causing erroneous mailbox timeouts to occur. This function 7849 * checks to see if mbox completions are on the mailbox completion queue 7850 * and will process all the completions associated with the eq for the 7851 * mailbox completion queue. 7852 **/ 7853 static bool 7854 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 7855 { 7856 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 7857 uint32_t eqidx; 7858 struct lpfc_queue *fpeq = NULL; 7859 struct lpfc_queue *eq; 7860 bool mbox_pending; 7861 7862 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7863 return false; 7864 7865 /* Find the EQ associated with the mbox CQ */ 7866 if (sli4_hba->hdwq) { 7867 for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) { 7868 eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq; 7869 if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { 7870 fpeq = eq; 7871 break; 7872 } 7873 } 7874 } 7875 if (!fpeq) 7876 return false; 7877 7878 /* Turn off interrupts from this EQ */ 7879 7880 sli4_hba->sli4_eq_clr_intr(fpeq); 7881 7882 /* Check to see if a mbox completion is pending */ 7883 7884 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 7885 7886 /* 7887 * If a mbox completion is pending, process all the events on EQ 7888 * associated with the mbox completion queue (this could include 7889 * mailbox commands, async events, els commands, receive queue data 7890 * and fcp commands) 7891 */ 7892 7893 if (mbox_pending) 7894 /* process and rearm the EQ */ 7895 lpfc_sli4_process_eq(phba, fpeq); 7896 else 7897 /* Always clear and re-arm the EQ */ 7898 sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); 7899 7900 return mbox_pending; 7901 7902 } 7903 7904 /** 7905 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 7906 * @phba: Pointer to HBA context object. 7907 * 7908 * This function is called from worker thread when a mailbox command times out. 7909 * The caller is not required to hold any locks. This function will reset the 7910 * HBA and recover all the pending commands. 7911 **/ 7912 void 7913 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 7914 { 7915 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 7916 MAILBOX_t *mb = NULL; 7917 7918 struct lpfc_sli *psli = &phba->sli; 7919 7920 /* If the mailbox completed, process the completion and return */ 7921 if (lpfc_sli4_process_missed_mbox_completions(phba)) 7922 return; 7923 7924 if (pmbox != NULL) 7925 mb = &pmbox->u.mb; 7926 /* Check the pmbox pointer first. There is a race condition 7927 * between the mbox timeout handler getting executed in the 7928 * worklist and the mailbox actually completing. When this 7929 * race condition occurs, the mbox_active will be NULL. 7930 */ 7931 spin_lock_irq(&phba->hbalock); 7932 if (pmbox == NULL) { 7933 lpfc_printf_log(phba, KERN_WARNING, 7934 LOG_MBOX | LOG_SLI, 7935 "0353 Active Mailbox cleared - mailbox timeout " 7936 "exiting\n"); 7937 spin_unlock_irq(&phba->hbalock); 7938 return; 7939 } 7940 7941 /* Mbox cmd <mbxCommand> timeout */ 7942 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7943 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n", 7944 mb->mbxCommand, 7945 phba->pport->port_state, 7946 phba->sli.sli_flag, 7947 phba->sli.mbox_active); 7948 spin_unlock_irq(&phba->hbalock); 7949 7950 /* Setting state unknown so lpfc_sli_abort_iocb_ring 7951 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 7952 * it to fail all outstanding SCSI IO. 7953 */ 7954 spin_lock_irq(&phba->pport->work_port_lock); 7955 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 7956 spin_unlock_irq(&phba->pport->work_port_lock); 7957 spin_lock_irq(&phba->hbalock); 7958 phba->link_state = LPFC_LINK_UNKNOWN; 7959 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7960 spin_unlock_irq(&phba->hbalock); 7961 7962 lpfc_sli_abort_fcp_rings(phba); 7963 7964 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7965 "0345 Resetting board due to mailbox timeout\n"); 7966 7967 /* Reset the HBA device */ 7968 lpfc_reset_hba(phba); 7969 } 7970 7971 /** 7972 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 7973 * @phba: Pointer to HBA context object. 7974 * @pmbox: Pointer to mailbox object. 7975 * @flag: Flag indicating how the mailbox need to be processed. 7976 * 7977 * This function is called by discovery code and HBA management code 7978 * to submit a mailbox command to firmware with SLI-3 interface spec. This 7979 * function gets the hbalock to protect the data structures. 7980 * The mailbox command can be submitted in polling mode, in which case 7981 * this function will wait in a polling loop for the completion of the 7982 * mailbox. 7983 * If the mailbox is submitted in no_wait mode (not polling) the 7984 * function will submit the command and returns immediately without waiting 7985 * for the mailbox completion. The no_wait is supported only when HBA 7986 * is in SLI2/SLI3 mode - interrupts are enabled. 7987 * The SLI interface allows only one mailbox pending at a time. If the 7988 * mailbox is issued in polling mode and there is already a mailbox 7989 * pending, then the function will return an error. If the mailbox is issued 7990 * in NO_WAIT mode and there is a mailbox pending already, the function 7991 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 7992 * The sli layer owns the mailbox object until the completion of mailbox 7993 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 7994 * return codes the caller owns the mailbox command after the return of 7995 * the function. 7996 **/ 7997 static int 7998 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 7999 uint32_t flag) 8000 { 8001 MAILBOX_t *mbx; 8002 struct lpfc_sli *psli = &phba->sli; 8003 uint32_t status, evtctr; 8004 uint32_t ha_copy, hc_copy; 8005 int i; 8006 unsigned long timeout; 8007 unsigned long drvr_flag = 0; 8008 uint32_t word0, ldata; 8009 void __iomem *to_slim; 8010 int processing_queue = 0; 8011 8012 spin_lock_irqsave(&phba->hbalock, drvr_flag); 8013 if (!pmbox) { 8014 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8015 /* processing mbox queue from intr_handler */ 8016 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8017 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8018 return MBX_SUCCESS; 8019 } 8020 processing_queue = 1; 8021 pmbox = lpfc_mbox_get(phba); 8022 if (!pmbox) { 8023 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8024 return MBX_SUCCESS; 8025 } 8026 } 8027 8028 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 8029 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 8030 if(!pmbox->vport) { 8031 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8032 lpfc_printf_log(phba, KERN_ERR, 8033 LOG_MBOX | LOG_VPORT, 8034 "1806 Mbox x%x failed. No vport\n", 8035 pmbox->u.mb.mbxCommand); 8036 dump_stack(); 8037 goto out_not_finished; 8038 } 8039 } 8040 8041 /* If the PCI channel is in offline state, do not post mbox. */ 8042 if (unlikely(pci_channel_offline(phba->pcidev))) { 8043 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8044 goto out_not_finished; 8045 } 8046 8047 /* If HBA has a deferred error attention, fail the iocb. */ 8048 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 8049 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8050 goto out_not_finished; 8051 } 8052 8053 psli = &phba->sli; 8054 8055 mbx = &pmbox->u.mb; 8056 status = MBX_SUCCESS; 8057 8058 if (phba->link_state == LPFC_HBA_ERROR) { 8059 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8060 8061 /* Mbox command <mbxCommand> cannot issue */ 8062 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8063 "(%d):0311 Mailbox command x%x cannot " 8064 "issue Data: x%x x%x\n", 8065 pmbox->vport ? pmbox->vport->vpi : 0, 8066 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 8067 goto out_not_finished; 8068 } 8069 8070 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 8071 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 8072 !(hc_copy & HC_MBINT_ENA)) { 8073 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8074 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8075 "(%d):2528 Mailbox command x%x cannot " 8076 "issue Data: x%x x%x\n", 8077 pmbox->vport ? pmbox->vport->vpi : 0, 8078 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 8079 goto out_not_finished; 8080 } 8081 } 8082 8083 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8084 /* Polling for a mbox command when another one is already active 8085 * is not allowed in SLI. Also, the driver must have established 8086 * SLI2 mode to queue and process multiple mbox commands. 8087 */ 8088 8089 if (flag & MBX_POLL) { 8090 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8091 8092 /* Mbox command <mbxCommand> cannot issue */ 8093 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8094 "(%d):2529 Mailbox command x%x " 8095 "cannot issue Data: x%x x%x\n", 8096 pmbox->vport ? pmbox->vport->vpi : 0, 8097 pmbox->u.mb.mbxCommand, 8098 psli->sli_flag, flag); 8099 goto out_not_finished; 8100 } 8101 8102 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 8103 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8104 /* Mbox command <mbxCommand> cannot issue */ 8105 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8106 "(%d):2530 Mailbox command x%x " 8107 "cannot issue Data: x%x x%x\n", 8108 pmbox->vport ? pmbox->vport->vpi : 0, 8109 pmbox->u.mb.mbxCommand, 8110 psli->sli_flag, flag); 8111 goto out_not_finished; 8112 } 8113 8114 /* Another mailbox command is still being processed, queue this 8115 * command to be processed later. 8116 */ 8117 lpfc_mbox_put(phba, pmbox); 8118 8119 /* Mbox cmd issue - BUSY */ 8120 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8121 "(%d):0308 Mbox cmd issue - BUSY Data: " 8122 "x%x x%x x%x x%x\n", 8123 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 8124 mbx->mbxCommand, 8125 phba->pport ? phba->pport->port_state : 0xff, 8126 psli->sli_flag, flag); 8127 8128 psli->slistat.mbox_busy++; 8129 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8130 8131 if (pmbox->vport) { 8132 lpfc_debugfs_disc_trc(pmbox->vport, 8133 LPFC_DISC_TRC_MBOX_VPORT, 8134 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 8135 (uint32_t)mbx->mbxCommand, 8136 mbx->un.varWords[0], mbx->un.varWords[1]); 8137 } 8138 else { 8139 lpfc_debugfs_disc_trc(phba->pport, 8140 LPFC_DISC_TRC_MBOX, 8141 "MBOX Bsy: cmd:x%x mb:x%x x%x", 8142 (uint32_t)mbx->mbxCommand, 8143 mbx->un.varWords[0], mbx->un.varWords[1]); 8144 } 8145 8146 return MBX_BUSY; 8147 } 8148 8149 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8150 8151 /* If we are not polling, we MUST be in SLI2 mode */ 8152 if (flag != MBX_POLL) { 8153 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 8154 (mbx->mbxCommand != MBX_KILL_BOARD)) { 8155 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8156 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8157 /* Mbox command <mbxCommand> cannot issue */ 8158 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8159 "(%d):2531 Mailbox command x%x " 8160 "cannot issue Data: x%x x%x\n", 8161 pmbox->vport ? pmbox->vport->vpi : 0, 8162 pmbox->u.mb.mbxCommand, 8163 psli->sli_flag, flag); 8164 goto out_not_finished; 8165 } 8166 /* timeout active mbox command */ 8167 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8168 1000); 8169 mod_timer(&psli->mbox_tmo, jiffies + timeout); 8170 } 8171 8172 /* Mailbox cmd <cmd> issue */ 8173 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8174 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 8175 "x%x\n", 8176 pmbox->vport ? pmbox->vport->vpi : 0, 8177 mbx->mbxCommand, 8178 phba->pport ? phba->pport->port_state : 0xff, 8179 psli->sli_flag, flag); 8180 8181 if (mbx->mbxCommand != MBX_HEARTBEAT) { 8182 if (pmbox->vport) { 8183 lpfc_debugfs_disc_trc(pmbox->vport, 8184 LPFC_DISC_TRC_MBOX_VPORT, 8185 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8186 (uint32_t)mbx->mbxCommand, 8187 mbx->un.varWords[0], mbx->un.varWords[1]); 8188 } 8189 else { 8190 lpfc_debugfs_disc_trc(phba->pport, 8191 LPFC_DISC_TRC_MBOX, 8192 "MBOX Send: cmd:x%x mb:x%x x%x", 8193 (uint32_t)mbx->mbxCommand, 8194 mbx->un.varWords[0], mbx->un.varWords[1]); 8195 } 8196 } 8197 8198 psli->slistat.mbox_cmd++; 8199 evtctr = psli->slistat.mbox_event; 8200 8201 /* next set own bit for the adapter and copy over command word */ 8202 mbx->mbxOwner = OWN_CHIP; 8203 8204 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8205 /* Populate mbox extension offset word. */ 8206 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 8207 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8208 = (uint8_t *)phba->mbox_ext 8209 - (uint8_t *)phba->mbox; 8210 } 8211 8212 /* Copy the mailbox extension data */ 8213 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) { 8214 lpfc_sli_pcimem_bcopy(pmbox->ctx_buf, 8215 (uint8_t *)phba->mbox_ext, 8216 pmbox->in_ext_byte_len); 8217 } 8218 /* Copy command data to host SLIM area */ 8219 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 8220 } else { 8221 /* Populate mbox extension offset word. */ 8222 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 8223 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8224 = MAILBOX_HBA_EXT_OFFSET; 8225 8226 /* Copy the mailbox extension data */ 8227 if (pmbox->in_ext_byte_len && pmbox->ctx_buf) 8228 lpfc_memcpy_to_slim(phba->MBslimaddr + 8229 MAILBOX_HBA_EXT_OFFSET, 8230 pmbox->ctx_buf, pmbox->in_ext_byte_len); 8231 8232 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8233 /* copy command data into host mbox for cmpl */ 8234 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, 8235 MAILBOX_CMD_SIZE); 8236 8237 /* First copy mbox command data to HBA SLIM, skip past first 8238 word */ 8239 to_slim = phba->MBslimaddr + sizeof (uint32_t); 8240 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 8241 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 8242 8243 /* Next copy over first word, with mbxOwner set */ 8244 ldata = *((uint32_t *)mbx); 8245 to_slim = phba->MBslimaddr; 8246 writel(ldata, to_slim); 8247 readl(to_slim); /* flush */ 8248 8249 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8250 /* switch over to host mailbox */ 8251 psli->sli_flag |= LPFC_SLI_ACTIVE; 8252 } 8253 8254 wmb(); 8255 8256 switch (flag) { 8257 case MBX_NOWAIT: 8258 /* Set up reference to mailbox command */ 8259 psli->mbox_active = pmbox; 8260 /* Interrupt board to do it */ 8261 writel(CA_MBATT, phba->CAregaddr); 8262 readl(phba->CAregaddr); /* flush */ 8263 /* Don't wait for it to finish, just return */ 8264 break; 8265 8266 case MBX_POLL: 8267 /* Set up null reference to mailbox command */ 8268 psli->mbox_active = NULL; 8269 /* Interrupt board to do it */ 8270 writel(CA_MBATT, phba->CAregaddr); 8271 readl(phba->CAregaddr); /* flush */ 8272 8273 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8274 /* First read mbox status word */ 8275 word0 = *((uint32_t *)phba->mbox); 8276 word0 = le32_to_cpu(word0); 8277 } else { 8278 /* First read mbox status word */ 8279 if (lpfc_readl(phba->MBslimaddr, &word0)) { 8280 spin_unlock_irqrestore(&phba->hbalock, 8281 drvr_flag); 8282 goto out_not_finished; 8283 } 8284 } 8285 8286 /* Read the HBA Host Attention Register */ 8287 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8288 spin_unlock_irqrestore(&phba->hbalock, 8289 drvr_flag); 8290 goto out_not_finished; 8291 } 8292 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8293 1000) + jiffies; 8294 i = 0; 8295 /* Wait for command to complete */ 8296 while (((word0 & OWN_CHIP) == OWN_CHIP) || 8297 (!(ha_copy & HA_MBATT) && 8298 (phba->link_state > LPFC_WARM_START))) { 8299 if (time_after(jiffies, timeout)) { 8300 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8301 spin_unlock_irqrestore(&phba->hbalock, 8302 drvr_flag); 8303 goto out_not_finished; 8304 } 8305 8306 /* Check if we took a mbox interrupt while we were 8307 polling */ 8308 if (((word0 & OWN_CHIP) != OWN_CHIP) 8309 && (evtctr != psli->slistat.mbox_event)) 8310 break; 8311 8312 if (i++ > 10) { 8313 spin_unlock_irqrestore(&phba->hbalock, 8314 drvr_flag); 8315 msleep(1); 8316 spin_lock_irqsave(&phba->hbalock, drvr_flag); 8317 } 8318 8319 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8320 /* First copy command data */ 8321 word0 = *((uint32_t *)phba->mbox); 8322 word0 = le32_to_cpu(word0); 8323 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 8324 MAILBOX_t *slimmb; 8325 uint32_t slimword0; 8326 /* Check real SLIM for any errors */ 8327 slimword0 = readl(phba->MBslimaddr); 8328 slimmb = (MAILBOX_t *) & slimword0; 8329 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 8330 && slimmb->mbxStatus) { 8331 psli->sli_flag &= 8332 ~LPFC_SLI_ACTIVE; 8333 word0 = slimword0; 8334 } 8335 } 8336 } else { 8337 /* First copy command data */ 8338 word0 = readl(phba->MBslimaddr); 8339 } 8340 /* Read the HBA Host Attention Register */ 8341 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8342 spin_unlock_irqrestore(&phba->hbalock, 8343 drvr_flag); 8344 goto out_not_finished; 8345 } 8346 } 8347 8348 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8349 /* copy results back to user */ 8350 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, 8351 MAILBOX_CMD_SIZE); 8352 /* Copy the mailbox extension data */ 8353 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { 8354 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 8355 pmbox->ctx_buf, 8356 pmbox->out_ext_byte_len); 8357 } 8358 } else { 8359 /* First copy command data */ 8360 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 8361 MAILBOX_CMD_SIZE); 8362 /* Copy the mailbox extension data */ 8363 if (pmbox->out_ext_byte_len && pmbox->ctx_buf) { 8364 lpfc_memcpy_from_slim( 8365 pmbox->ctx_buf, 8366 phba->MBslimaddr + 8367 MAILBOX_HBA_EXT_OFFSET, 8368 pmbox->out_ext_byte_len); 8369 } 8370 } 8371 8372 writel(HA_MBATT, phba->HAregaddr); 8373 readl(phba->HAregaddr); /* flush */ 8374 8375 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8376 status = mbx->mbxStatus; 8377 } 8378 8379 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8380 return status; 8381 8382 out_not_finished: 8383 if (processing_queue) { 8384 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 8385 lpfc_mbox_cmpl_put(phba, pmbox); 8386 } 8387 return MBX_NOT_FINISHED; 8388 } 8389 8390 /** 8391 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 8392 * @phba: Pointer to HBA context object. 8393 * 8394 * The function blocks the posting of SLI4 asynchronous mailbox commands from 8395 * the driver internal pending mailbox queue. It will then try to wait out the 8396 * possible outstanding mailbox command before return. 8397 * 8398 * Returns: 8399 * 0 - the outstanding mailbox command completed; otherwise, the wait for 8400 * the outstanding mailbox command timed out. 8401 **/ 8402 static int 8403 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 8404 { 8405 struct lpfc_sli *psli = &phba->sli; 8406 int rc = 0; 8407 unsigned long timeout = 0; 8408 8409 /* Mark the asynchronous mailbox command posting as blocked */ 8410 spin_lock_irq(&phba->hbalock); 8411 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 8412 /* Determine how long we might wait for the active mailbox 8413 * command to be gracefully completed by firmware. 8414 */ 8415 if (phba->sli.mbox_active) 8416 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 8417 phba->sli.mbox_active) * 8418 1000) + jiffies; 8419 spin_unlock_irq(&phba->hbalock); 8420 8421 /* Make sure the mailbox is really active */ 8422 if (timeout) 8423 lpfc_sli4_process_missed_mbox_completions(phba); 8424 8425 /* Wait for the outstnading mailbox command to complete */ 8426 while (phba->sli.mbox_active) { 8427 /* Check active mailbox complete status every 2ms */ 8428 msleep(2); 8429 if (time_after(jiffies, timeout)) { 8430 /* Timeout, marked the outstanding cmd not complete */ 8431 rc = 1; 8432 break; 8433 } 8434 } 8435 8436 /* Can not cleanly block async mailbox command, fails it */ 8437 if (rc) { 8438 spin_lock_irq(&phba->hbalock); 8439 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8440 spin_unlock_irq(&phba->hbalock); 8441 } 8442 return rc; 8443 } 8444 8445 /** 8446 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 8447 * @phba: Pointer to HBA context object. 8448 * 8449 * The function unblocks and resume posting of SLI4 asynchronous mailbox 8450 * commands from the driver internal pending mailbox queue. It makes sure 8451 * that there is no outstanding mailbox command before resuming posting 8452 * asynchronous mailbox commands. If, for any reason, there is outstanding 8453 * mailbox command, it will try to wait it out before resuming asynchronous 8454 * mailbox command posting. 8455 **/ 8456 static void 8457 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 8458 { 8459 struct lpfc_sli *psli = &phba->sli; 8460 8461 spin_lock_irq(&phba->hbalock); 8462 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8463 /* Asynchronous mailbox posting is not blocked, do nothing */ 8464 spin_unlock_irq(&phba->hbalock); 8465 return; 8466 } 8467 8468 /* Outstanding synchronous mailbox command is guaranteed to be done, 8469 * successful or timeout, after timing-out the outstanding mailbox 8470 * command shall always be removed, so just unblock posting async 8471 * mailbox command and resume 8472 */ 8473 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8474 spin_unlock_irq(&phba->hbalock); 8475 8476 /* wake up worker thread to post asynchronlous mailbox command */ 8477 lpfc_worker_wake_up(phba); 8478 } 8479 8480 /** 8481 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 8482 * @phba: Pointer to HBA context object. 8483 * @mboxq: Pointer to mailbox object. 8484 * 8485 * The function waits for the bootstrap mailbox register ready bit from 8486 * port for twice the regular mailbox command timeout value. 8487 * 8488 * 0 - no timeout on waiting for bootstrap mailbox register ready. 8489 * MBXERR_ERROR - wait for bootstrap mailbox register timed out. 8490 **/ 8491 static int 8492 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8493 { 8494 uint32_t db_ready; 8495 unsigned long timeout; 8496 struct lpfc_register bmbx_reg; 8497 8498 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 8499 * 1000) + jiffies; 8500 8501 do { 8502 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 8503 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 8504 if (!db_ready) 8505 mdelay(2); 8506 8507 if (time_after(jiffies, timeout)) 8508 return MBXERR_ERROR; 8509 } while (!db_ready); 8510 8511 return 0; 8512 } 8513 8514 /** 8515 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 8516 * @phba: Pointer to HBA context object. 8517 * @mboxq: Pointer to mailbox object. 8518 * 8519 * The function posts a mailbox to the port. The mailbox is expected 8520 * to be comletely filled in and ready for the port to operate on it. 8521 * This routine executes a synchronous completion operation on the 8522 * mailbox by polling for its completion. 8523 * 8524 * The caller must not be holding any locks when calling this routine. 8525 * 8526 * Returns: 8527 * MBX_SUCCESS - mailbox posted successfully 8528 * Any of the MBX error values. 8529 **/ 8530 static int 8531 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8532 { 8533 int rc = MBX_SUCCESS; 8534 unsigned long iflag; 8535 uint32_t mcqe_status; 8536 uint32_t mbx_cmnd; 8537 struct lpfc_sli *psli = &phba->sli; 8538 struct lpfc_mqe *mb = &mboxq->u.mqe; 8539 struct lpfc_bmbx_create *mbox_rgn; 8540 struct dma_address *dma_address; 8541 8542 /* 8543 * Only one mailbox can be active to the bootstrap mailbox region 8544 * at a time and there is no queueing provided. 8545 */ 8546 spin_lock_irqsave(&phba->hbalock, iflag); 8547 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8548 spin_unlock_irqrestore(&phba->hbalock, iflag); 8549 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8550 "(%d):2532 Mailbox command x%x (x%x/x%x) " 8551 "cannot issue Data: x%x x%x\n", 8552 mboxq->vport ? mboxq->vport->vpi : 0, 8553 mboxq->u.mb.mbxCommand, 8554 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8555 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8556 psli->sli_flag, MBX_POLL); 8557 return MBXERR_ERROR; 8558 } 8559 /* The server grabs the token and owns it until release */ 8560 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8561 phba->sli.mbox_active = mboxq; 8562 spin_unlock_irqrestore(&phba->hbalock, iflag); 8563 8564 /* wait for bootstrap mbox register for readyness */ 8565 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8566 if (rc) 8567 goto exit; 8568 /* 8569 * Initialize the bootstrap memory region to avoid stale data areas 8570 * in the mailbox post. Then copy the caller's mailbox contents to 8571 * the bmbx mailbox region. 8572 */ 8573 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 8574 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 8575 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 8576 sizeof(struct lpfc_mqe)); 8577 8578 /* Post the high mailbox dma address to the port and wait for ready. */ 8579 dma_address = &phba->sli4_hba.bmbx.dma_address; 8580 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 8581 8582 /* wait for bootstrap mbox register for hi-address write done */ 8583 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8584 if (rc) 8585 goto exit; 8586 8587 /* Post the low mailbox dma address to the port. */ 8588 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 8589 8590 /* wait for bootstrap mbox register for low address write done */ 8591 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8592 if (rc) 8593 goto exit; 8594 8595 /* 8596 * Read the CQ to ensure the mailbox has completed. 8597 * If so, update the mailbox status so that the upper layers 8598 * can complete the request normally. 8599 */ 8600 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 8601 sizeof(struct lpfc_mqe)); 8602 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 8603 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 8604 sizeof(struct lpfc_mcqe)); 8605 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 8606 /* 8607 * When the CQE status indicates a failure and the mailbox status 8608 * indicates success then copy the CQE status into the mailbox status 8609 * (and prefix it with x4000). 8610 */ 8611 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 8612 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 8613 bf_set(lpfc_mqe_status, mb, 8614 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 8615 rc = MBXERR_ERROR; 8616 } else 8617 lpfc_sli4_swap_str(phba, mboxq); 8618 8619 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8620 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 8621 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 8622 " x%x x%x CQ: x%x x%x x%x x%x\n", 8623 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8624 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8625 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8626 bf_get(lpfc_mqe_status, mb), 8627 mb->un.mb_words[0], mb->un.mb_words[1], 8628 mb->un.mb_words[2], mb->un.mb_words[3], 8629 mb->un.mb_words[4], mb->un.mb_words[5], 8630 mb->un.mb_words[6], mb->un.mb_words[7], 8631 mb->un.mb_words[8], mb->un.mb_words[9], 8632 mb->un.mb_words[10], mb->un.mb_words[11], 8633 mb->un.mb_words[12], mboxq->mcqe.word0, 8634 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 8635 mboxq->mcqe.trailer); 8636 exit: 8637 /* We are holding the token, no needed for lock when release */ 8638 spin_lock_irqsave(&phba->hbalock, iflag); 8639 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8640 phba->sli.mbox_active = NULL; 8641 spin_unlock_irqrestore(&phba->hbalock, iflag); 8642 return rc; 8643 } 8644 8645 /** 8646 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 8647 * @phba: Pointer to HBA context object. 8648 * @pmbox: Pointer to mailbox object. 8649 * @flag: Flag indicating how the mailbox need to be processed. 8650 * 8651 * This function is called by discovery code and HBA management code to submit 8652 * a mailbox command to firmware with SLI-4 interface spec. 8653 * 8654 * Return codes the caller owns the mailbox command after the return of the 8655 * function. 8656 **/ 8657 static int 8658 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 8659 uint32_t flag) 8660 { 8661 struct lpfc_sli *psli = &phba->sli; 8662 unsigned long iflags; 8663 int rc; 8664 8665 /* dump from issue mailbox command if setup */ 8666 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 8667 8668 rc = lpfc_mbox_dev_check(phba); 8669 if (unlikely(rc)) { 8670 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8671 "(%d):2544 Mailbox command x%x (x%x/x%x) " 8672 "cannot issue Data: x%x x%x\n", 8673 mboxq->vport ? mboxq->vport->vpi : 0, 8674 mboxq->u.mb.mbxCommand, 8675 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8676 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8677 psli->sli_flag, flag); 8678 goto out_not_finished; 8679 } 8680 8681 /* Detect polling mode and jump to a handler */ 8682 if (!phba->sli4_hba.intr_enable) { 8683 if (flag == MBX_POLL) 8684 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8685 else 8686 rc = -EIO; 8687 if (rc != MBX_SUCCESS) 8688 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8689 "(%d):2541 Mailbox command x%x " 8690 "(x%x/x%x) failure: " 8691 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8692 "Data: x%x x%x\n,", 8693 mboxq->vport ? mboxq->vport->vpi : 0, 8694 mboxq->u.mb.mbxCommand, 8695 lpfc_sli_config_mbox_subsys_get(phba, 8696 mboxq), 8697 lpfc_sli_config_mbox_opcode_get(phba, 8698 mboxq), 8699 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8700 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8701 bf_get(lpfc_mcqe_ext_status, 8702 &mboxq->mcqe), 8703 psli->sli_flag, flag); 8704 return rc; 8705 } else if (flag == MBX_POLL) { 8706 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8707 "(%d):2542 Try to issue mailbox command " 8708 "x%x (x%x/x%x) synchronously ahead of async " 8709 "mailbox command queue: x%x x%x\n", 8710 mboxq->vport ? mboxq->vport->vpi : 0, 8711 mboxq->u.mb.mbxCommand, 8712 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8713 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8714 psli->sli_flag, flag); 8715 /* Try to block the asynchronous mailbox posting */ 8716 rc = lpfc_sli4_async_mbox_block(phba); 8717 if (!rc) { 8718 /* Successfully blocked, now issue sync mbox cmd */ 8719 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8720 if (rc != MBX_SUCCESS) 8721 lpfc_printf_log(phba, KERN_WARNING, 8722 LOG_MBOX | LOG_SLI, 8723 "(%d):2597 Sync Mailbox command " 8724 "x%x (x%x/x%x) failure: " 8725 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8726 "Data: x%x x%x\n,", 8727 mboxq->vport ? mboxq->vport->vpi : 0, 8728 mboxq->u.mb.mbxCommand, 8729 lpfc_sli_config_mbox_subsys_get(phba, 8730 mboxq), 8731 lpfc_sli_config_mbox_opcode_get(phba, 8732 mboxq), 8733 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8734 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8735 bf_get(lpfc_mcqe_ext_status, 8736 &mboxq->mcqe), 8737 psli->sli_flag, flag); 8738 /* Unblock the async mailbox posting afterward */ 8739 lpfc_sli4_async_mbox_unblock(phba); 8740 } 8741 return rc; 8742 } 8743 8744 /* Now, interrupt mode asynchrous mailbox command */ 8745 rc = lpfc_mbox_cmd_check(phba, mboxq); 8746 if (rc) { 8747 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8748 "(%d):2543 Mailbox command x%x (x%x/x%x) " 8749 "cannot issue Data: x%x x%x\n", 8750 mboxq->vport ? mboxq->vport->vpi : 0, 8751 mboxq->u.mb.mbxCommand, 8752 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8753 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8754 psli->sli_flag, flag); 8755 goto out_not_finished; 8756 } 8757 8758 /* Put the mailbox command to the driver internal FIFO */ 8759 psli->slistat.mbox_busy++; 8760 spin_lock_irqsave(&phba->hbalock, iflags); 8761 lpfc_mbox_put(phba, mboxq); 8762 spin_unlock_irqrestore(&phba->hbalock, iflags); 8763 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8764 "(%d):0354 Mbox cmd issue - Enqueue Data: " 8765 "x%x (x%x/x%x) x%x x%x x%x\n", 8766 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 8767 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8768 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8769 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8770 phba->pport->port_state, 8771 psli->sli_flag, MBX_NOWAIT); 8772 /* Wake up worker thread to transport mailbox command from head */ 8773 lpfc_worker_wake_up(phba); 8774 8775 return MBX_BUSY; 8776 8777 out_not_finished: 8778 return MBX_NOT_FINISHED; 8779 } 8780 8781 /** 8782 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 8783 * @phba: Pointer to HBA context object. 8784 * 8785 * This function is called by worker thread to send a mailbox command to 8786 * SLI4 HBA firmware. 8787 * 8788 **/ 8789 int 8790 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 8791 { 8792 struct lpfc_sli *psli = &phba->sli; 8793 LPFC_MBOXQ_t *mboxq; 8794 int rc = MBX_SUCCESS; 8795 unsigned long iflags; 8796 struct lpfc_mqe *mqe; 8797 uint32_t mbx_cmnd; 8798 8799 /* Check interrupt mode before post async mailbox command */ 8800 if (unlikely(!phba->sli4_hba.intr_enable)) 8801 return MBX_NOT_FINISHED; 8802 8803 /* Check for mailbox command service token */ 8804 spin_lock_irqsave(&phba->hbalock, iflags); 8805 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8806 spin_unlock_irqrestore(&phba->hbalock, iflags); 8807 return MBX_NOT_FINISHED; 8808 } 8809 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8810 spin_unlock_irqrestore(&phba->hbalock, iflags); 8811 return MBX_NOT_FINISHED; 8812 } 8813 if (unlikely(phba->sli.mbox_active)) { 8814 spin_unlock_irqrestore(&phba->hbalock, iflags); 8815 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8816 "0384 There is pending active mailbox cmd\n"); 8817 return MBX_NOT_FINISHED; 8818 } 8819 /* Take the mailbox command service token */ 8820 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8821 8822 /* Get the next mailbox command from head of queue */ 8823 mboxq = lpfc_mbox_get(phba); 8824 8825 /* If no more mailbox command waiting for post, we're done */ 8826 if (!mboxq) { 8827 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8828 spin_unlock_irqrestore(&phba->hbalock, iflags); 8829 return MBX_SUCCESS; 8830 } 8831 phba->sli.mbox_active = mboxq; 8832 spin_unlock_irqrestore(&phba->hbalock, iflags); 8833 8834 /* Check device readiness for posting mailbox command */ 8835 rc = lpfc_mbox_dev_check(phba); 8836 if (unlikely(rc)) 8837 /* Driver clean routine will clean up pending mailbox */ 8838 goto out_not_finished; 8839 8840 /* Prepare the mbox command to be posted */ 8841 mqe = &mboxq->u.mqe; 8842 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 8843 8844 /* Start timer for the mbox_tmo and log some mailbox post messages */ 8845 mod_timer(&psli->mbox_tmo, (jiffies + 8846 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 8847 8848 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8849 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 8850 "x%x x%x\n", 8851 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8852 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8853 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8854 phba->pport->port_state, psli->sli_flag); 8855 8856 if (mbx_cmnd != MBX_HEARTBEAT) { 8857 if (mboxq->vport) { 8858 lpfc_debugfs_disc_trc(mboxq->vport, 8859 LPFC_DISC_TRC_MBOX_VPORT, 8860 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8861 mbx_cmnd, mqe->un.mb_words[0], 8862 mqe->un.mb_words[1]); 8863 } else { 8864 lpfc_debugfs_disc_trc(phba->pport, 8865 LPFC_DISC_TRC_MBOX, 8866 "MBOX Send: cmd:x%x mb:x%x x%x", 8867 mbx_cmnd, mqe->un.mb_words[0], 8868 mqe->un.mb_words[1]); 8869 } 8870 } 8871 psli->slistat.mbox_cmd++; 8872 8873 /* Post the mailbox command to the port */ 8874 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 8875 if (rc != MBX_SUCCESS) { 8876 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8877 "(%d):2533 Mailbox command x%x (x%x/x%x) " 8878 "cannot issue Data: x%x x%x\n", 8879 mboxq->vport ? mboxq->vport->vpi : 0, 8880 mboxq->u.mb.mbxCommand, 8881 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8882 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8883 psli->sli_flag, MBX_NOWAIT); 8884 goto out_not_finished; 8885 } 8886 8887 return rc; 8888 8889 out_not_finished: 8890 spin_lock_irqsave(&phba->hbalock, iflags); 8891 if (phba->sli.mbox_active) { 8892 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8893 __lpfc_mbox_cmpl_put(phba, mboxq); 8894 /* Release the token */ 8895 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8896 phba->sli.mbox_active = NULL; 8897 } 8898 spin_unlock_irqrestore(&phba->hbalock, iflags); 8899 8900 return MBX_NOT_FINISHED; 8901 } 8902 8903 /** 8904 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 8905 * @phba: Pointer to HBA context object. 8906 * @pmbox: Pointer to mailbox object. 8907 * @flag: Flag indicating how the mailbox need to be processed. 8908 * 8909 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 8910 * the API jump table function pointer from the lpfc_hba struct. 8911 * 8912 * Return codes the caller owns the mailbox command after the return of the 8913 * function. 8914 **/ 8915 int 8916 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 8917 { 8918 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 8919 } 8920 8921 /** 8922 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 8923 * @phba: The hba struct for which this call is being executed. 8924 * @dev_grp: The HBA PCI-Device group number. 8925 * 8926 * This routine sets up the mbox interface API function jump table in @phba 8927 * struct. 8928 * Returns: 0 - success, -ENODEV - failure. 8929 **/ 8930 int 8931 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8932 { 8933 8934 switch (dev_grp) { 8935 case LPFC_PCI_DEV_LP: 8936 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 8937 phba->lpfc_sli_handle_slow_ring_event = 8938 lpfc_sli_handle_slow_ring_event_s3; 8939 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 8940 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 8941 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 8942 break; 8943 case LPFC_PCI_DEV_OC: 8944 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 8945 phba->lpfc_sli_handle_slow_ring_event = 8946 lpfc_sli_handle_slow_ring_event_s4; 8947 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 8948 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 8949 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 8950 break; 8951 default: 8952 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8953 "1420 Invalid HBA PCI-device group: 0x%x\n", 8954 dev_grp); 8955 return -ENODEV; 8956 break; 8957 } 8958 return 0; 8959 } 8960 8961 /** 8962 * __lpfc_sli_ringtx_put - Add an iocb to the txq 8963 * @phba: Pointer to HBA context object. 8964 * @pring: Pointer to driver SLI ring object. 8965 * @piocb: Pointer to address of newly added command iocb. 8966 * 8967 * This function is called with hbalock held to add a command 8968 * iocb to the txq when SLI layer cannot submit the command iocb 8969 * to the ring. 8970 **/ 8971 void 8972 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8973 struct lpfc_iocbq *piocb) 8974 { 8975 lockdep_assert_held(&phba->hbalock); 8976 /* Insert the caller's iocb in the txq tail for later processing. */ 8977 list_add_tail(&piocb->list, &pring->txq); 8978 } 8979 8980 /** 8981 * lpfc_sli_next_iocb - Get the next iocb in the txq 8982 * @phba: Pointer to HBA context object. 8983 * @pring: Pointer to driver SLI ring object. 8984 * @piocb: Pointer to address of newly added command iocb. 8985 * 8986 * This function is called with hbalock held before a new 8987 * iocb is submitted to the firmware. This function checks 8988 * txq to flush the iocbs in txq to Firmware before 8989 * submitting new iocbs to the Firmware. 8990 * If there are iocbs in the txq which need to be submitted 8991 * to firmware, lpfc_sli_next_iocb returns the first element 8992 * of the txq after dequeuing it from txq. 8993 * If there is no iocb in the txq then the function will return 8994 * *piocb and *piocb is set to NULL. Caller needs to check 8995 * *piocb to find if there are more commands in the txq. 8996 **/ 8997 static struct lpfc_iocbq * 8998 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8999 struct lpfc_iocbq **piocb) 9000 { 9001 struct lpfc_iocbq * nextiocb; 9002 9003 lockdep_assert_held(&phba->hbalock); 9004 9005 nextiocb = lpfc_sli_ringtx_get(phba, pring); 9006 if (!nextiocb) { 9007 nextiocb = *piocb; 9008 *piocb = NULL; 9009 } 9010 9011 return nextiocb; 9012 } 9013 9014 /** 9015 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 9016 * @phba: Pointer to HBA context object. 9017 * @ring_number: SLI ring number to issue iocb on. 9018 * @piocb: Pointer to command iocb. 9019 * @flag: Flag indicating if this command can be put into txq. 9020 * 9021 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 9022 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 9023 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 9024 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 9025 * this function allows only iocbs for posting buffers. This function finds 9026 * next available slot in the command ring and posts the command to the 9027 * available slot and writes the port attention register to request HBA start 9028 * processing new iocb. If there is no slot available in the ring and 9029 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 9030 * the function returns IOCB_BUSY. 9031 * 9032 * This function is called with hbalock held. The function will return success 9033 * after it successfully submit the iocb to firmware or after adding to the 9034 * txq. 9035 **/ 9036 static int 9037 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 9038 struct lpfc_iocbq *piocb, uint32_t flag) 9039 { 9040 struct lpfc_iocbq *nextiocb; 9041 IOCB_t *iocb; 9042 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; 9043 9044 lockdep_assert_held(&phba->hbalock); 9045 9046 if (piocb->iocb_cmpl && (!piocb->vport) && 9047 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 9048 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 9049 lpfc_printf_log(phba, KERN_ERR, 9050 LOG_SLI | LOG_VPORT, 9051 "1807 IOCB x%x failed. No vport\n", 9052 piocb->iocb.ulpCommand); 9053 dump_stack(); 9054 return IOCB_ERROR; 9055 } 9056 9057 9058 /* If the PCI channel is in offline state, do not post iocbs. */ 9059 if (unlikely(pci_channel_offline(phba->pcidev))) 9060 return IOCB_ERROR; 9061 9062 /* If HBA has a deferred error attention, fail the iocb. */ 9063 if (unlikely(phba->hba_flag & DEFER_ERATT)) 9064 return IOCB_ERROR; 9065 9066 /* 9067 * We should never get an IOCB if we are in a < LINK_DOWN state 9068 */ 9069 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 9070 return IOCB_ERROR; 9071 9072 /* 9073 * Check to see if we are blocking IOCB processing because of a 9074 * outstanding event. 9075 */ 9076 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 9077 goto iocb_busy; 9078 9079 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 9080 /* 9081 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 9082 * can be issued if the link is not up. 9083 */ 9084 switch (piocb->iocb.ulpCommand) { 9085 case CMD_GEN_REQUEST64_CR: 9086 case CMD_GEN_REQUEST64_CX: 9087 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 9088 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 9089 FC_RCTL_DD_UNSOL_CMD) || 9090 (piocb->iocb.un.genreq64.w5.hcsw.Type != 9091 MENLO_TRANSPORT_TYPE)) 9092 9093 goto iocb_busy; 9094 break; 9095 case CMD_QUE_RING_BUF_CN: 9096 case CMD_QUE_RING_BUF64_CN: 9097 /* 9098 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 9099 * completion, iocb_cmpl MUST be 0. 9100 */ 9101 if (piocb->iocb_cmpl) 9102 piocb->iocb_cmpl = NULL; 9103 /*FALLTHROUGH*/ 9104 case CMD_CREATE_XRI_CR: 9105 case CMD_CLOSE_XRI_CN: 9106 case CMD_CLOSE_XRI_CX: 9107 break; 9108 default: 9109 goto iocb_busy; 9110 } 9111 9112 /* 9113 * For FCP commands, we must be in a state where we can process link 9114 * attention events. 9115 */ 9116 } else if (unlikely(pring->ringno == LPFC_FCP_RING && 9117 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 9118 goto iocb_busy; 9119 } 9120 9121 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 9122 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 9123 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 9124 9125 if (iocb) 9126 lpfc_sli_update_ring(phba, pring); 9127 else 9128 lpfc_sli_update_full_ring(phba, pring); 9129 9130 if (!piocb) 9131 return IOCB_SUCCESS; 9132 9133 goto out_busy; 9134 9135 iocb_busy: 9136 pring->stats.iocb_cmd_delay++; 9137 9138 out_busy: 9139 9140 if (!(flag & SLI_IOCB_RET_IOCB)) { 9141 __lpfc_sli_ringtx_put(phba, pring, piocb); 9142 return IOCB_SUCCESS; 9143 } 9144 9145 return IOCB_BUSY; 9146 } 9147 9148 /** 9149 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 9150 * @phba: Pointer to HBA context object. 9151 * @piocb: Pointer to command iocb. 9152 * @sglq: Pointer to the scatter gather queue object. 9153 * 9154 * This routine converts the bpl or bde that is in the IOCB 9155 * to a sgl list for the sli4 hardware. The physical address 9156 * of the bpl/bde is converted back to a virtual address. 9157 * If the IOCB contains a BPL then the list of BDE's is 9158 * converted to sli4_sge's. If the IOCB contains a single 9159 * BDE then it is converted to a single sli_sge. 9160 * The IOCB is still in cpu endianess so the contents of 9161 * the bpl can be used without byte swapping. 9162 * 9163 * Returns valid XRI = Success, NO_XRI = Failure. 9164 **/ 9165 static uint16_t 9166 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 9167 struct lpfc_sglq *sglq) 9168 { 9169 uint16_t xritag = NO_XRI; 9170 struct ulp_bde64 *bpl = NULL; 9171 struct ulp_bde64 bde; 9172 struct sli4_sge *sgl = NULL; 9173 struct lpfc_dmabuf *dmabuf; 9174 IOCB_t *icmd; 9175 int numBdes = 0; 9176 int i = 0; 9177 uint32_t offset = 0; /* accumulated offset in the sg request list */ 9178 int inbound = 0; /* number of sg reply entries inbound from firmware */ 9179 9180 if (!piocbq || !sglq) 9181 return xritag; 9182 9183 sgl = (struct sli4_sge *)sglq->sgl; 9184 icmd = &piocbq->iocb; 9185 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 9186 return sglq->sli4_xritag; 9187 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9188 numBdes = icmd->un.genreq64.bdl.bdeSize / 9189 sizeof(struct ulp_bde64); 9190 /* The addrHigh and addrLow fields within the IOCB 9191 * have not been byteswapped yet so there is no 9192 * need to swap them back. 9193 */ 9194 if (piocbq->context3) 9195 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 9196 else 9197 return xritag; 9198 9199 bpl = (struct ulp_bde64 *)dmabuf->virt; 9200 if (!bpl) 9201 return xritag; 9202 9203 for (i = 0; i < numBdes; i++) { 9204 /* Should already be byte swapped. */ 9205 sgl->addr_hi = bpl->addrHigh; 9206 sgl->addr_lo = bpl->addrLow; 9207 9208 sgl->word2 = le32_to_cpu(sgl->word2); 9209 if ((i+1) == numBdes) 9210 bf_set(lpfc_sli4_sge_last, sgl, 1); 9211 else 9212 bf_set(lpfc_sli4_sge_last, sgl, 0); 9213 /* swap the size field back to the cpu so we 9214 * can assign it to the sgl. 9215 */ 9216 bde.tus.w = le32_to_cpu(bpl->tus.w); 9217 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 9218 /* The offsets in the sgl need to be accumulated 9219 * separately for the request and reply lists. 9220 * The request is always first, the reply follows. 9221 */ 9222 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 9223 /* add up the reply sg entries */ 9224 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 9225 inbound++; 9226 /* first inbound? reset the offset */ 9227 if (inbound == 1) 9228 offset = 0; 9229 bf_set(lpfc_sli4_sge_offset, sgl, offset); 9230 bf_set(lpfc_sli4_sge_type, sgl, 9231 LPFC_SGE_TYPE_DATA); 9232 offset += bde.tus.f.bdeSize; 9233 } 9234 sgl->word2 = cpu_to_le32(sgl->word2); 9235 bpl++; 9236 sgl++; 9237 } 9238 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 9239 /* The addrHigh and addrLow fields of the BDE have not 9240 * been byteswapped yet so they need to be swapped 9241 * before putting them in the sgl. 9242 */ 9243 sgl->addr_hi = 9244 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 9245 sgl->addr_lo = 9246 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 9247 sgl->word2 = le32_to_cpu(sgl->word2); 9248 bf_set(lpfc_sli4_sge_last, sgl, 1); 9249 sgl->word2 = cpu_to_le32(sgl->word2); 9250 sgl->sge_len = 9251 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 9252 } 9253 return sglq->sli4_xritag; 9254 } 9255 9256 /** 9257 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 9258 * @phba: Pointer to HBA context object. 9259 * @piocb: Pointer to command iocb. 9260 * @wqe: Pointer to the work queue entry. 9261 * 9262 * This routine converts the iocb command to its Work Queue Entry 9263 * equivalent. The wqe pointer should not have any fields set when 9264 * this routine is called because it will memcpy over them. 9265 * This routine does not set the CQ_ID or the WQEC bits in the 9266 * wqe. 9267 * 9268 * Returns: 0 = Success, IOCB_ERROR = Failure. 9269 **/ 9270 static int 9271 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 9272 union lpfc_wqe128 *wqe) 9273 { 9274 uint32_t xmit_len = 0, total_len = 0; 9275 uint8_t ct = 0; 9276 uint32_t fip; 9277 uint32_t abort_tag; 9278 uint8_t command_type = ELS_COMMAND_NON_FIP; 9279 uint8_t cmnd; 9280 uint16_t xritag; 9281 uint16_t abrt_iotag; 9282 struct lpfc_iocbq *abrtiocbq; 9283 struct ulp_bde64 *bpl = NULL; 9284 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 9285 int numBdes, i; 9286 struct ulp_bde64 bde; 9287 struct lpfc_nodelist *ndlp; 9288 uint32_t *pcmd; 9289 uint32_t if_type; 9290 9291 fip = phba->hba_flag & HBA_FIP_SUPPORT; 9292 /* The fcp commands will set command type */ 9293 if (iocbq->iocb_flag & LPFC_IO_FCP) 9294 command_type = FCP_COMMAND; 9295 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 9296 command_type = ELS_COMMAND_FIP; 9297 else 9298 command_type = ELS_COMMAND_NON_FIP; 9299 9300 if (phba->fcp_embed_io) 9301 memset(wqe, 0, sizeof(union lpfc_wqe128)); 9302 /* Some of the fields are in the right position already */ 9303 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 9304 /* The ct field has moved so reset */ 9305 wqe->generic.wqe_com.word7 = 0; 9306 wqe->generic.wqe_com.word10 = 0; 9307 9308 abort_tag = (uint32_t) iocbq->iotag; 9309 xritag = iocbq->sli4_xritag; 9310 /* words0-2 bpl convert bde */ 9311 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9312 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9313 sizeof(struct ulp_bde64); 9314 bpl = (struct ulp_bde64 *) 9315 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 9316 if (!bpl) 9317 return IOCB_ERROR; 9318 9319 /* Should already be byte swapped. */ 9320 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 9321 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 9322 /* swap the size field back to the cpu so we 9323 * can assign it to the sgl. 9324 */ 9325 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 9326 xmit_len = wqe->generic.bde.tus.f.bdeSize; 9327 total_len = 0; 9328 for (i = 0; i < numBdes; i++) { 9329 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9330 total_len += bde.tus.f.bdeSize; 9331 } 9332 } else 9333 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 9334 9335 iocbq->iocb.ulpIoTag = iocbq->iotag; 9336 cmnd = iocbq->iocb.ulpCommand; 9337 9338 switch (iocbq->iocb.ulpCommand) { 9339 case CMD_ELS_REQUEST64_CR: 9340 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 9341 ndlp = iocbq->context_un.ndlp; 9342 else 9343 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9344 if (!iocbq->iocb.ulpLe) { 9345 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9346 "2007 Only Limited Edition cmd Format" 9347 " supported 0x%x\n", 9348 iocbq->iocb.ulpCommand); 9349 return IOCB_ERROR; 9350 } 9351 9352 wqe->els_req.payload_len = xmit_len; 9353 /* Els_reguest64 has a TMO */ 9354 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 9355 iocbq->iocb.ulpTimeout); 9356 /* Need a VF for word 4 set the vf bit*/ 9357 bf_set(els_req64_vf, &wqe->els_req, 0); 9358 /* And a VFID for word 12 */ 9359 bf_set(els_req64_vfid, &wqe->els_req, 0); 9360 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9361 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9362 iocbq->iocb.ulpContext); 9363 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 9364 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 9365 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 9366 if (command_type == ELS_COMMAND_FIP) 9367 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 9368 >> LPFC_FIP_ELS_ID_SHIFT); 9369 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9370 iocbq->context2)->virt); 9371 if_type = bf_get(lpfc_sli_intf_if_type, 9372 &phba->sli4_hba.sli_intf); 9373 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9374 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 9375 *pcmd == ELS_CMD_SCR || 9376 *pcmd == ELS_CMD_RSCN_XMT || 9377 *pcmd == ELS_CMD_FDISC || 9378 *pcmd == ELS_CMD_LOGO || 9379 *pcmd == ELS_CMD_PLOGI)) { 9380 bf_set(els_req64_sp, &wqe->els_req, 1); 9381 bf_set(els_req64_sid, &wqe->els_req, 9382 iocbq->vport->fc_myDID); 9383 if ((*pcmd == ELS_CMD_FLOGI) && 9384 !(phba->fc_topology == 9385 LPFC_TOPOLOGY_LOOP)) 9386 bf_set(els_req64_sid, &wqe->els_req, 0); 9387 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 9388 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9389 phba->vpi_ids[iocbq->vport->vpi]); 9390 } else if (pcmd && iocbq->context1) { 9391 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 9392 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9393 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9394 } 9395 } 9396 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 9397 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9398 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 9399 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 9400 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 9401 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 9402 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9403 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 9404 wqe->els_req.max_response_payload_len = total_len - xmit_len; 9405 break; 9406 case CMD_XMIT_SEQUENCE64_CX: 9407 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 9408 iocbq->iocb.un.ulpWord[3]); 9409 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 9410 iocbq->iocb.unsli3.rcvsli3.ox_id); 9411 /* The entire sequence is transmitted for this IOCB */ 9412 xmit_len = total_len; 9413 cmnd = CMD_XMIT_SEQUENCE64_CR; 9414 if (phba->link_flag & LS_LOOPBACK_MODE) 9415 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 9416 /* fall through */ 9417 case CMD_XMIT_SEQUENCE64_CR: 9418 /* word3 iocb=io_tag32 wqe=reserved */ 9419 wqe->xmit_sequence.rsvd3 = 0; 9420 /* word4 relative_offset memcpy */ 9421 /* word5 r_ctl/df_ctl memcpy */ 9422 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 9423 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 9424 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 9425 LPFC_WQE_IOD_WRITE); 9426 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 9427 LPFC_WQE_LENLOC_WORD12); 9428 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 9429 wqe->xmit_sequence.xmit_len = xmit_len; 9430 command_type = OTHER_COMMAND; 9431 break; 9432 case CMD_XMIT_BCAST64_CN: 9433 /* word3 iocb=iotag32 wqe=seq_payload_len */ 9434 wqe->xmit_bcast64.seq_payload_len = xmit_len; 9435 /* word4 iocb=rsvd wqe=rsvd */ 9436 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 9437 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 9438 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 9439 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9440 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 9441 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 9442 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 9443 LPFC_WQE_LENLOC_WORD3); 9444 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 9445 break; 9446 case CMD_FCP_IWRITE64_CR: 9447 command_type = FCP_COMMAND_DATA_OUT; 9448 /* word3 iocb=iotag wqe=payload_offset_len */ 9449 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9450 bf_set(payload_offset_len, &wqe->fcp_iwrite, 9451 xmit_len + sizeof(struct fcp_rsp)); 9452 bf_set(cmd_buff_len, &wqe->fcp_iwrite, 9453 0); 9454 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9455 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9456 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 9457 iocbq->iocb.ulpFCP2Rcvy); 9458 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 9459 /* Always open the exchange */ 9460 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 9461 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 9462 LPFC_WQE_LENLOC_WORD4); 9463 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 9464 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 9465 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9466 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); 9467 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 9468 if (iocbq->priority) { 9469 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9470 (iocbq->priority << 1)); 9471 } else { 9472 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9473 (phba->cfg_XLanePriority << 1)); 9474 } 9475 } 9476 /* Note, word 10 is already initialized to 0 */ 9477 9478 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9479 if (phba->cfg_enable_pbde) 9480 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1); 9481 else 9482 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); 9483 9484 if (phba->fcp_embed_io) { 9485 struct lpfc_io_buf *lpfc_cmd; 9486 struct sli4_sge *sgl; 9487 struct fcp_cmnd *fcp_cmnd; 9488 uint32_t *ptr; 9489 9490 /* 128 byte wqe support here */ 9491 9492 lpfc_cmd = iocbq->context1; 9493 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9494 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9495 9496 /* Word 0-2 - FCP_CMND */ 9497 wqe->generic.bde.tus.f.bdeFlags = 9498 BUFF_TYPE_BDE_IMMED; 9499 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9500 wqe->generic.bde.addrHigh = 0; 9501 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9502 9503 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 9504 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); 9505 9506 /* Word 22-29 FCP CMND Payload */ 9507 ptr = &wqe->words[22]; 9508 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9509 } 9510 break; 9511 case CMD_FCP_IREAD64_CR: 9512 /* word3 iocb=iotag wqe=payload_offset_len */ 9513 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9514 bf_set(payload_offset_len, &wqe->fcp_iread, 9515 xmit_len + sizeof(struct fcp_rsp)); 9516 bf_set(cmd_buff_len, &wqe->fcp_iread, 9517 0); 9518 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9519 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9520 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 9521 iocbq->iocb.ulpFCP2Rcvy); 9522 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 9523 /* Always open the exchange */ 9524 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 9525 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 9526 LPFC_WQE_LENLOC_WORD4); 9527 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 9528 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 9529 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9530 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); 9531 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); 9532 if (iocbq->priority) { 9533 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9534 (iocbq->priority << 1)); 9535 } else { 9536 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9537 (phba->cfg_XLanePriority << 1)); 9538 } 9539 } 9540 /* Note, word 10 is already initialized to 0 */ 9541 9542 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9543 if (phba->cfg_enable_pbde) 9544 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1); 9545 else 9546 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); 9547 9548 if (phba->fcp_embed_io) { 9549 struct lpfc_io_buf *lpfc_cmd; 9550 struct sli4_sge *sgl; 9551 struct fcp_cmnd *fcp_cmnd; 9552 uint32_t *ptr; 9553 9554 /* 128 byte wqe support here */ 9555 9556 lpfc_cmd = iocbq->context1; 9557 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9558 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9559 9560 /* Word 0-2 - FCP_CMND */ 9561 wqe->generic.bde.tus.f.bdeFlags = 9562 BUFF_TYPE_BDE_IMMED; 9563 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9564 wqe->generic.bde.addrHigh = 0; 9565 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9566 9567 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); 9568 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); 9569 9570 /* Word 22-29 FCP CMND Payload */ 9571 ptr = &wqe->words[22]; 9572 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9573 } 9574 break; 9575 case CMD_FCP_ICMND64_CR: 9576 /* word3 iocb=iotag wqe=payload_offset_len */ 9577 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9578 bf_set(payload_offset_len, &wqe->fcp_icmd, 9579 xmit_len + sizeof(struct fcp_rsp)); 9580 bf_set(cmd_buff_len, &wqe->fcp_icmd, 9581 0); 9582 /* word3 iocb=IO_TAG wqe=reserved */ 9583 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 9584 /* Always open the exchange */ 9585 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 9586 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 9587 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 9588 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 9589 LPFC_WQE_LENLOC_NONE); 9590 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 9591 iocbq->iocb.ulpFCP2Rcvy); 9592 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9593 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); 9594 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); 9595 if (iocbq->priority) { 9596 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9597 (iocbq->priority << 1)); 9598 } else { 9599 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9600 (phba->cfg_XLanePriority << 1)); 9601 } 9602 } 9603 /* Note, word 10 is already initialized to 0 */ 9604 9605 if (phba->fcp_embed_io) { 9606 struct lpfc_io_buf *lpfc_cmd; 9607 struct sli4_sge *sgl; 9608 struct fcp_cmnd *fcp_cmnd; 9609 uint32_t *ptr; 9610 9611 /* 128 byte wqe support here */ 9612 9613 lpfc_cmd = iocbq->context1; 9614 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; 9615 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9616 9617 /* Word 0-2 - FCP_CMND */ 9618 wqe->generic.bde.tus.f.bdeFlags = 9619 BUFF_TYPE_BDE_IMMED; 9620 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9621 wqe->generic.bde.addrHigh = 0; 9622 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9623 9624 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); 9625 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); 9626 9627 /* Word 22-29 FCP CMND Payload */ 9628 ptr = &wqe->words[22]; 9629 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9630 } 9631 break; 9632 case CMD_GEN_REQUEST64_CR: 9633 /* For this command calculate the xmit length of the 9634 * request bde. 9635 */ 9636 xmit_len = 0; 9637 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9638 sizeof(struct ulp_bde64); 9639 for (i = 0; i < numBdes; i++) { 9640 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9641 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 9642 break; 9643 xmit_len += bde.tus.f.bdeSize; 9644 } 9645 /* word3 iocb=IO_TAG wqe=request_payload_len */ 9646 wqe->gen_req.request_payload_len = xmit_len; 9647 /* word4 iocb=parameter wqe=relative_offset memcpy */ 9648 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 9649 /* word6 context tag copied in memcpy */ 9650 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 9651 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9652 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9653 "2015 Invalid CT %x command 0x%x\n", 9654 ct, iocbq->iocb.ulpCommand); 9655 return IOCB_ERROR; 9656 } 9657 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 9658 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 9659 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 9660 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 9661 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 9662 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 9663 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9664 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 9665 wqe->gen_req.max_response_payload_len = total_len - xmit_len; 9666 command_type = OTHER_COMMAND; 9667 break; 9668 case CMD_XMIT_ELS_RSP64_CX: 9669 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9670 /* words0-2 BDE memcpy */ 9671 /* word3 iocb=iotag32 wqe=response_payload_len */ 9672 wqe->xmit_els_rsp.response_payload_len = xmit_len; 9673 /* word4 */ 9674 wqe->xmit_els_rsp.word4 = 0; 9675 /* word5 iocb=rsvd wge=did */ 9676 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 9677 iocbq->iocb.un.xseq64.xmit_els_remoteID); 9678 9679 if_type = bf_get(lpfc_sli_intf_if_type, 9680 &phba->sli4_hba.sli_intf); 9681 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9682 if (iocbq->vport->fc_flag & FC_PT2PT) { 9683 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9684 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9685 iocbq->vport->fc_myDID); 9686 if (iocbq->vport->fc_myDID == Fabric_DID) { 9687 bf_set(wqe_els_did, 9688 &wqe->xmit_els_rsp.wqe_dest, 0); 9689 } 9690 } 9691 } 9692 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 9693 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9694 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 9695 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9696 iocbq->iocb.unsli3.rcvsli3.ox_id); 9697 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 9698 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9699 phba->vpi_ids[iocbq->vport->vpi]); 9700 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 9701 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 9702 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 9703 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 9704 LPFC_WQE_LENLOC_WORD3); 9705 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 9706 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 9707 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9708 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9709 iocbq->context2)->virt); 9710 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9711 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9712 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9713 iocbq->vport->fc_myDID); 9714 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 9715 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9716 phba->vpi_ids[phba->pport->vpi]); 9717 } 9718 command_type = OTHER_COMMAND; 9719 break; 9720 case CMD_CLOSE_XRI_CN: 9721 case CMD_ABORT_XRI_CN: 9722 case CMD_ABORT_XRI_CX: 9723 /* words 0-2 memcpy should be 0 rserved */ 9724 /* port will send abts */ 9725 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 9726 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 9727 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 9728 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 9729 } else 9730 fip = 0; 9731 9732 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 9733 /* 9734 * The link is down, or the command was ELS_FIP 9735 * so the fw does not need to send abts 9736 * on the wire. 9737 */ 9738 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 9739 else 9740 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 9741 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 9742 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 9743 wqe->abort_cmd.rsrvd5 = 0; 9744 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 9745 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9746 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 9747 /* 9748 * The abort handler will send us CMD_ABORT_XRI_CN or 9749 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 9750 */ 9751 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 9752 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 9753 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 9754 LPFC_WQE_LENLOC_NONE); 9755 cmnd = CMD_ABORT_XRI_CX; 9756 command_type = OTHER_COMMAND; 9757 xritag = 0; 9758 break; 9759 case CMD_XMIT_BLS_RSP64_CX: 9760 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9761 /* As BLS ABTS RSP WQE is very different from other WQEs, 9762 * we re-construct this WQE here based on information in 9763 * iocbq from scratch. 9764 */ 9765 memset(wqe, 0, sizeof(*wqe)); 9766 /* OX_ID is invariable to who sent ABTS to CT exchange */ 9767 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 9768 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 9769 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 9770 LPFC_ABTS_UNSOL_INT) { 9771 /* ABTS sent by initiator to CT exchange, the 9772 * RX_ID field will be filled with the newly 9773 * allocated responder XRI. 9774 */ 9775 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9776 iocbq->sli4_xritag); 9777 } else { 9778 /* ABTS sent by responder to CT exchange, the 9779 * RX_ID field will be filled with the responder 9780 * RX_ID from ABTS. 9781 */ 9782 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9783 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 9784 } 9785 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 9786 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 9787 9788 /* Use CT=VPI */ 9789 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 9790 ndlp->nlp_DID); 9791 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 9792 iocbq->iocb.ulpContext); 9793 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 9794 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 9795 phba->vpi_ids[phba->pport->vpi]); 9796 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 9797 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 9798 LPFC_WQE_LENLOC_NONE); 9799 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 9800 command_type = OTHER_COMMAND; 9801 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 9802 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 9803 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 9804 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 9805 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 9806 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 9807 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 9808 } 9809 9810 break; 9811 case CMD_SEND_FRAME: 9812 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME); 9813 bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */ 9814 bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */ 9815 bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1); 9816 bf_set(wqe_xbl, &wqe->generic.wqe_com, 1); 9817 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); 9818 bf_set(wqe_xc, &wqe->generic.wqe_com, 1); 9819 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA); 9820 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 9821 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9822 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9823 return 0; 9824 case CMD_XRI_ABORTED_CX: 9825 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 9826 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 9827 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 9828 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 9829 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 9830 default: 9831 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9832 "2014 Invalid command 0x%x\n", 9833 iocbq->iocb.ulpCommand); 9834 return IOCB_ERROR; 9835 break; 9836 } 9837 9838 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) 9839 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 9840 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) 9841 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 9842 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) 9843 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 9844 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | 9845 LPFC_IO_DIF_INSERT); 9846 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9847 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9848 wqe->generic.wqe_com.abort_tag = abort_tag; 9849 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 9850 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 9851 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 9852 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 9853 return 0; 9854 } 9855 9856 /** 9857 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 9858 * @phba: Pointer to HBA context object. 9859 * @ring_number: SLI ring number to issue iocb on. 9860 * @piocb: Pointer to command iocb. 9861 * @flag: Flag indicating if this command can be put into txq. 9862 * 9863 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 9864 * an iocb command to an HBA with SLI-4 interface spec. 9865 * 9866 * This function is called with hbalock held. The function will return success 9867 * after it successfully submit the iocb to firmware or after adding to the 9868 * txq. 9869 **/ 9870 static int 9871 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 9872 struct lpfc_iocbq *piocb, uint32_t flag) 9873 { 9874 struct lpfc_sglq *sglq; 9875 union lpfc_wqe128 wqe; 9876 struct lpfc_queue *wq; 9877 struct lpfc_sli_ring *pring; 9878 9879 /* Get the WQ */ 9880 if ((piocb->iocb_flag & LPFC_IO_FCP) || 9881 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9882 wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq; 9883 } else { 9884 wq = phba->sli4_hba.els_wq; 9885 } 9886 9887 /* Get corresponding ring */ 9888 pring = wq->pring; 9889 9890 /* 9891 * The WQE can be either 64 or 128 bytes, 9892 */ 9893 9894 lockdep_assert_held(&pring->ring_lock); 9895 9896 if (piocb->sli4_xritag == NO_XRI) { 9897 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 9898 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 9899 sglq = NULL; 9900 else { 9901 if (!list_empty(&pring->txq)) { 9902 if (!(flag & SLI_IOCB_RET_IOCB)) { 9903 __lpfc_sli_ringtx_put(phba, 9904 pring, piocb); 9905 return IOCB_SUCCESS; 9906 } else { 9907 return IOCB_BUSY; 9908 } 9909 } else { 9910 sglq = __lpfc_sli_get_els_sglq(phba, piocb); 9911 if (!sglq) { 9912 if (!(flag & SLI_IOCB_RET_IOCB)) { 9913 __lpfc_sli_ringtx_put(phba, 9914 pring, 9915 piocb); 9916 return IOCB_SUCCESS; 9917 } else 9918 return IOCB_BUSY; 9919 } 9920 } 9921 } 9922 } else if (piocb->iocb_flag & LPFC_IO_FCP) 9923 /* These IO's already have an XRI and a mapped sgl. */ 9924 sglq = NULL; 9925 else { 9926 /* 9927 * This is a continuation of a commandi,(CX) so this 9928 * sglq is on the active list 9929 */ 9930 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 9931 if (!sglq) 9932 return IOCB_ERROR; 9933 } 9934 9935 if (sglq) { 9936 piocb->sli4_lxritag = sglq->sli4_lxritag; 9937 piocb->sli4_xritag = sglq->sli4_xritag; 9938 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 9939 return IOCB_ERROR; 9940 } 9941 9942 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 9943 return IOCB_ERROR; 9944 9945 if (lpfc_sli4_wq_put(wq, &wqe)) 9946 return IOCB_ERROR; 9947 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 9948 9949 return 0; 9950 } 9951 9952 /** 9953 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 9954 * 9955 * This routine wraps the actual lockless version for issusing IOCB function 9956 * pointer from the lpfc_hba struct. 9957 * 9958 * Return codes: 9959 * IOCB_ERROR - Error 9960 * IOCB_SUCCESS - Success 9961 * IOCB_BUSY - Busy 9962 **/ 9963 int 9964 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 9965 struct lpfc_iocbq *piocb, uint32_t flag) 9966 { 9967 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9968 } 9969 9970 /** 9971 * lpfc_sli_api_table_setup - Set up sli api function jump table 9972 * @phba: The hba struct for which this call is being executed. 9973 * @dev_grp: The HBA PCI-Device group number. 9974 * 9975 * This routine sets up the SLI interface API function jump table in @phba 9976 * struct. 9977 * Returns: 0 - success, -ENODEV - failure. 9978 **/ 9979 int 9980 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 9981 { 9982 9983 switch (dev_grp) { 9984 case LPFC_PCI_DEV_LP: 9985 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 9986 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 9987 break; 9988 case LPFC_PCI_DEV_OC: 9989 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 9990 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 9991 break; 9992 default: 9993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9994 "1419 Invalid HBA PCI-device group: 0x%x\n", 9995 dev_grp); 9996 return -ENODEV; 9997 break; 9998 } 9999 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 10000 return 0; 10001 } 10002 10003 /** 10004 * lpfc_sli4_calc_ring - Calculates which ring to use 10005 * @phba: Pointer to HBA context object. 10006 * @piocb: Pointer to command iocb. 10007 * 10008 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on 10009 * hba_wqidx, thus we need to calculate the corresponding ring. 10010 * Since ABORTS must go on the same WQ of the command they are 10011 * aborting, we use command's hba_wqidx. 10012 */ 10013 struct lpfc_sli_ring * 10014 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 10015 { 10016 struct lpfc_io_buf *lpfc_cmd; 10017 10018 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { 10019 if (unlikely(!phba->sli4_hba.hdwq)) 10020 return NULL; 10021 /* 10022 * for abort iocb hba_wqidx should already 10023 * be setup based on what work queue we used. 10024 */ 10025 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 10026 lpfc_cmd = (struct lpfc_io_buf *)piocb->context1; 10027 piocb->hba_wqidx = lpfc_cmd->hdwq_no; 10028 } 10029 return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring; 10030 } else { 10031 if (unlikely(!phba->sli4_hba.els_wq)) 10032 return NULL; 10033 piocb->hba_wqidx = 0; 10034 return phba->sli4_hba.els_wq->pring; 10035 } 10036 } 10037 10038 /** 10039 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 10040 * @phba: Pointer to HBA context object. 10041 * @pring: Pointer to driver SLI ring object. 10042 * @piocb: Pointer to command iocb. 10043 * @flag: Flag indicating if this command can be put into txq. 10044 * 10045 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 10046 * function. This function gets the hbalock and calls 10047 * __lpfc_sli_issue_iocb function and will return the error returned 10048 * by __lpfc_sli_issue_iocb function. This wrapper is used by 10049 * functions which do not hold hbalock. 10050 **/ 10051 int 10052 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 10053 struct lpfc_iocbq *piocb, uint32_t flag) 10054 { 10055 struct lpfc_sli_ring *pring; 10056 unsigned long iflags; 10057 int rc; 10058 10059 if (phba->sli_rev == LPFC_SLI_REV4) { 10060 pring = lpfc_sli4_calc_ring(phba, piocb); 10061 if (unlikely(pring == NULL)) 10062 return IOCB_ERROR; 10063 10064 spin_lock_irqsave(&pring->ring_lock, iflags); 10065 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10066 spin_unlock_irqrestore(&pring->ring_lock, iflags); 10067 } else { 10068 /* For now, SLI2/3 will still use hbalock */ 10069 spin_lock_irqsave(&phba->hbalock, iflags); 10070 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10071 spin_unlock_irqrestore(&phba->hbalock, iflags); 10072 } 10073 return rc; 10074 } 10075 10076 /** 10077 * lpfc_extra_ring_setup - Extra ring setup function 10078 * @phba: Pointer to HBA context object. 10079 * 10080 * This function is called while driver attaches with the 10081 * HBA to setup the extra ring. The extra ring is used 10082 * only when driver needs to support target mode functionality 10083 * or IP over FC functionalities. 10084 * 10085 * This function is called with no lock held. SLI3 only. 10086 **/ 10087 static int 10088 lpfc_extra_ring_setup( struct lpfc_hba *phba) 10089 { 10090 struct lpfc_sli *psli; 10091 struct lpfc_sli_ring *pring; 10092 10093 psli = &phba->sli; 10094 10095 /* Adjust cmd/rsp ring iocb entries more evenly */ 10096 10097 /* Take some away from the FCP ring */ 10098 pring = &psli->sli3_ring[LPFC_FCP_RING]; 10099 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10100 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10101 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10102 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10103 10104 /* and give them to the extra ring */ 10105 pring = &psli->sli3_ring[LPFC_EXTRA_RING]; 10106 10107 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10108 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10109 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10110 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10111 10112 /* Setup default profile for this ring */ 10113 pring->iotag_max = 4096; 10114 pring->num_mask = 1; 10115 pring->prt[0].profile = 0; /* Mask 0 */ 10116 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 10117 pring->prt[0].type = phba->cfg_multi_ring_type; 10118 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 10119 return 0; 10120 } 10121 10122 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 10123 * @phba: Pointer to HBA context object. 10124 * @iocbq: Pointer to iocb object. 10125 * 10126 * The async_event handler calls this routine when it receives 10127 * an ASYNC_STATUS_CN event from the port. The port generates 10128 * this event when an Abort Sequence request to an rport fails 10129 * twice in succession. The abort could be originated by the 10130 * driver or by the port. The ABTS could have been for an ELS 10131 * or FCP IO. The port only generates this event when an ABTS 10132 * fails to complete after one retry. 10133 */ 10134 static void 10135 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 10136 struct lpfc_iocbq *iocbq) 10137 { 10138 struct lpfc_nodelist *ndlp = NULL; 10139 uint16_t rpi = 0, vpi = 0; 10140 struct lpfc_vport *vport = NULL; 10141 10142 /* The rpi in the ulpContext is vport-sensitive. */ 10143 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 10144 rpi = iocbq->iocb.ulpContext; 10145 10146 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10147 "3092 Port generated ABTS async event " 10148 "on vpi %d rpi %d status 0x%x\n", 10149 vpi, rpi, iocbq->iocb.ulpStatus); 10150 10151 vport = lpfc_find_vport_by_vpid(phba, vpi); 10152 if (!vport) 10153 goto err_exit; 10154 ndlp = lpfc_findnode_rpi(vport, rpi); 10155 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 10156 goto err_exit; 10157 10158 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 10159 lpfc_sli_abts_recover_port(vport, ndlp); 10160 return; 10161 10162 err_exit: 10163 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10164 "3095 Event Context not found, no " 10165 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 10166 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 10167 vpi, rpi); 10168 } 10169 10170 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 10171 * @phba: pointer to HBA context object. 10172 * @ndlp: nodelist pointer for the impacted rport. 10173 * @axri: pointer to the wcqe containing the failed exchange. 10174 * 10175 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 10176 * port. The port generates this event when an abort exchange request to an 10177 * rport fails twice in succession with no reply. The abort could be originated 10178 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 10179 */ 10180 void 10181 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 10182 struct lpfc_nodelist *ndlp, 10183 struct sli4_wcqe_xri_aborted *axri) 10184 { 10185 struct lpfc_vport *vport; 10186 uint32_t ext_status = 0; 10187 10188 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 10189 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10190 "3115 Node Context not found, driver " 10191 "ignoring abts err event\n"); 10192 return; 10193 } 10194 10195 vport = ndlp->vport; 10196 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10197 "3116 Port generated FCP XRI ABORT event on " 10198 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 10199 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 10200 bf_get(lpfc_wcqe_xa_xri, axri), 10201 bf_get(lpfc_wcqe_xa_status, axri), 10202 axri->parameter); 10203 10204 /* 10205 * Catch the ABTS protocol failure case. Older OCe FW releases returned 10206 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 10207 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 10208 */ 10209 ext_status = axri->parameter & IOERR_PARAM_MASK; 10210 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 10211 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 10212 lpfc_sli_abts_recover_port(vport, ndlp); 10213 } 10214 10215 /** 10216 * lpfc_sli_async_event_handler - ASYNC iocb handler function 10217 * @phba: Pointer to HBA context object. 10218 * @pring: Pointer to driver SLI ring object. 10219 * @iocbq: Pointer to iocb object. 10220 * 10221 * This function is called by the slow ring event handler 10222 * function when there is an ASYNC event iocb in the ring. 10223 * This function is called with no lock held. 10224 * Currently this function handles only temperature related 10225 * ASYNC events. The function decodes the temperature sensor 10226 * event message and posts events for the management applications. 10227 **/ 10228 static void 10229 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 10230 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 10231 { 10232 IOCB_t *icmd; 10233 uint16_t evt_code; 10234 struct temp_event temp_event_data; 10235 struct Scsi_Host *shost; 10236 uint32_t *iocb_w; 10237 10238 icmd = &iocbq->iocb; 10239 evt_code = icmd->un.asyncstat.evt_code; 10240 10241 switch (evt_code) { 10242 case ASYNC_TEMP_WARN: 10243 case ASYNC_TEMP_SAFE: 10244 temp_event_data.data = (uint32_t) icmd->ulpContext; 10245 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 10246 if (evt_code == ASYNC_TEMP_WARN) { 10247 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 10248 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10249 "0347 Adapter is very hot, please take " 10250 "corrective action. temperature : %d Celsius\n", 10251 (uint32_t) icmd->ulpContext); 10252 } else { 10253 temp_event_data.event_code = LPFC_NORMAL_TEMP; 10254 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10255 "0340 Adapter temperature is OK now. " 10256 "temperature : %d Celsius\n", 10257 (uint32_t) icmd->ulpContext); 10258 } 10259 10260 /* Send temperature change event to applications */ 10261 shost = lpfc_shost_from_vport(phba->pport); 10262 fc_host_post_vendor_event(shost, fc_get_event_number(), 10263 sizeof(temp_event_data), (char *) &temp_event_data, 10264 LPFC_NL_VENDOR_ID); 10265 break; 10266 case ASYNC_STATUS_CN: 10267 lpfc_sli_abts_err_handler(phba, iocbq); 10268 break; 10269 default: 10270 iocb_w = (uint32_t *) icmd; 10271 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10272 "0346 Ring %d handler: unexpected ASYNC_STATUS" 10273 " evt_code 0x%x\n" 10274 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 10275 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 10276 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 10277 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 10278 pring->ringno, icmd->un.asyncstat.evt_code, 10279 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 10280 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 10281 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 10282 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 10283 10284 break; 10285 } 10286 } 10287 10288 10289 /** 10290 * lpfc_sli4_setup - SLI ring setup function 10291 * @phba: Pointer to HBA context object. 10292 * 10293 * lpfc_sli_setup sets up rings of the SLI interface with 10294 * number of iocbs per ring and iotags. This function is 10295 * called while driver attach to the HBA and before the 10296 * interrupts are enabled. So there is no need for locking. 10297 * 10298 * This function always returns 0. 10299 **/ 10300 int 10301 lpfc_sli4_setup(struct lpfc_hba *phba) 10302 { 10303 struct lpfc_sli_ring *pring; 10304 10305 pring = phba->sli4_hba.els_wq->pring; 10306 pring->num_mask = LPFC_MAX_RING_MASK; 10307 pring->prt[0].profile = 0; /* Mask 0 */ 10308 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10309 pring->prt[0].type = FC_TYPE_ELS; 10310 pring->prt[0].lpfc_sli_rcv_unsol_event = 10311 lpfc_els_unsol_event; 10312 pring->prt[1].profile = 0; /* Mask 1 */ 10313 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10314 pring->prt[1].type = FC_TYPE_ELS; 10315 pring->prt[1].lpfc_sli_rcv_unsol_event = 10316 lpfc_els_unsol_event; 10317 pring->prt[2].profile = 0; /* Mask 2 */ 10318 /* NameServer Inquiry */ 10319 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10320 /* NameServer */ 10321 pring->prt[2].type = FC_TYPE_CT; 10322 pring->prt[2].lpfc_sli_rcv_unsol_event = 10323 lpfc_ct_unsol_event; 10324 pring->prt[3].profile = 0; /* Mask 3 */ 10325 /* NameServer response */ 10326 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10327 /* NameServer */ 10328 pring->prt[3].type = FC_TYPE_CT; 10329 pring->prt[3].lpfc_sli_rcv_unsol_event = 10330 lpfc_ct_unsol_event; 10331 return 0; 10332 } 10333 10334 /** 10335 * lpfc_sli_setup - SLI ring setup function 10336 * @phba: Pointer to HBA context object. 10337 * 10338 * lpfc_sli_setup sets up rings of the SLI interface with 10339 * number of iocbs per ring and iotags. This function is 10340 * called while driver attach to the HBA and before the 10341 * interrupts are enabled. So there is no need for locking. 10342 * 10343 * This function always returns 0. SLI3 only. 10344 **/ 10345 int 10346 lpfc_sli_setup(struct lpfc_hba *phba) 10347 { 10348 int i, totiocbsize = 0; 10349 struct lpfc_sli *psli = &phba->sli; 10350 struct lpfc_sli_ring *pring; 10351 10352 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 10353 psli->sli_flag = 0; 10354 10355 psli->iocbq_lookup = NULL; 10356 psli->iocbq_lookup_len = 0; 10357 psli->last_iotag = 0; 10358 10359 for (i = 0; i < psli->num_rings; i++) { 10360 pring = &psli->sli3_ring[i]; 10361 switch (i) { 10362 case LPFC_FCP_RING: /* ring 0 - FCP */ 10363 /* numCiocb and numRiocb are used in config_port */ 10364 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 10365 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 10366 pring->sli.sli3.numCiocb += 10367 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10368 pring->sli.sli3.numRiocb += 10369 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10370 pring->sli.sli3.numCiocb += 10371 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10372 pring->sli.sli3.numRiocb += 10373 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10374 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10375 SLI3_IOCB_CMD_SIZE : 10376 SLI2_IOCB_CMD_SIZE; 10377 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10378 SLI3_IOCB_RSP_SIZE : 10379 SLI2_IOCB_RSP_SIZE; 10380 pring->iotag_ctr = 0; 10381 pring->iotag_max = 10382 (phba->cfg_hba_queue_depth * 2); 10383 pring->fast_iotag = pring->iotag_max; 10384 pring->num_mask = 0; 10385 break; 10386 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 10387 /* numCiocb and numRiocb are used in config_port */ 10388 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 10389 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 10390 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10391 SLI3_IOCB_CMD_SIZE : 10392 SLI2_IOCB_CMD_SIZE; 10393 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10394 SLI3_IOCB_RSP_SIZE : 10395 SLI2_IOCB_RSP_SIZE; 10396 pring->iotag_max = phba->cfg_hba_queue_depth; 10397 pring->num_mask = 0; 10398 break; 10399 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 10400 /* numCiocb and numRiocb are used in config_port */ 10401 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 10402 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 10403 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10404 SLI3_IOCB_CMD_SIZE : 10405 SLI2_IOCB_CMD_SIZE; 10406 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10407 SLI3_IOCB_RSP_SIZE : 10408 SLI2_IOCB_RSP_SIZE; 10409 pring->fast_iotag = 0; 10410 pring->iotag_ctr = 0; 10411 pring->iotag_max = 4096; 10412 pring->lpfc_sli_rcv_async_status = 10413 lpfc_sli_async_event_handler; 10414 pring->num_mask = LPFC_MAX_RING_MASK; 10415 pring->prt[0].profile = 0; /* Mask 0 */ 10416 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10417 pring->prt[0].type = FC_TYPE_ELS; 10418 pring->prt[0].lpfc_sli_rcv_unsol_event = 10419 lpfc_els_unsol_event; 10420 pring->prt[1].profile = 0; /* Mask 1 */ 10421 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10422 pring->prt[1].type = FC_TYPE_ELS; 10423 pring->prt[1].lpfc_sli_rcv_unsol_event = 10424 lpfc_els_unsol_event; 10425 pring->prt[2].profile = 0; /* Mask 2 */ 10426 /* NameServer Inquiry */ 10427 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10428 /* NameServer */ 10429 pring->prt[2].type = FC_TYPE_CT; 10430 pring->prt[2].lpfc_sli_rcv_unsol_event = 10431 lpfc_ct_unsol_event; 10432 pring->prt[3].profile = 0; /* Mask 3 */ 10433 /* NameServer response */ 10434 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10435 /* NameServer */ 10436 pring->prt[3].type = FC_TYPE_CT; 10437 pring->prt[3].lpfc_sli_rcv_unsol_event = 10438 lpfc_ct_unsol_event; 10439 break; 10440 } 10441 totiocbsize += (pring->sli.sli3.numCiocb * 10442 pring->sli.sli3.sizeCiocb) + 10443 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 10444 } 10445 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 10446 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 10447 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 10448 "SLI2 SLIM Data: x%x x%lx\n", 10449 phba->brd_no, totiocbsize, 10450 (unsigned long) MAX_SLIM_IOCB_SIZE); 10451 } 10452 if (phba->cfg_multi_ring_support == 2) 10453 lpfc_extra_ring_setup(phba); 10454 10455 return 0; 10456 } 10457 10458 /** 10459 * lpfc_sli4_queue_init - Queue initialization function 10460 * @phba: Pointer to HBA context object. 10461 * 10462 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each 10463 * ring. This function also initializes ring indices of each ring. 10464 * This function is called during the initialization of the SLI 10465 * interface of an HBA. 10466 * This function is called with no lock held and always returns 10467 * 1. 10468 **/ 10469 void 10470 lpfc_sli4_queue_init(struct lpfc_hba *phba) 10471 { 10472 struct lpfc_sli *psli; 10473 struct lpfc_sli_ring *pring; 10474 int i; 10475 10476 psli = &phba->sli; 10477 spin_lock_irq(&phba->hbalock); 10478 INIT_LIST_HEAD(&psli->mboxq); 10479 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10480 /* Initialize list headers for txq and txcmplq as double linked lists */ 10481 for (i = 0; i < phba->cfg_hdw_queue; i++) { 10482 pring = phba->sli4_hba.hdwq[i].io_wq->pring; 10483 pring->flag = 0; 10484 pring->ringno = LPFC_FCP_RING; 10485 pring->txcmplq_cnt = 0; 10486 INIT_LIST_HEAD(&pring->txq); 10487 INIT_LIST_HEAD(&pring->txcmplq); 10488 INIT_LIST_HEAD(&pring->iocb_continueq); 10489 spin_lock_init(&pring->ring_lock); 10490 } 10491 pring = phba->sli4_hba.els_wq->pring; 10492 pring->flag = 0; 10493 pring->ringno = LPFC_ELS_RING; 10494 pring->txcmplq_cnt = 0; 10495 INIT_LIST_HEAD(&pring->txq); 10496 INIT_LIST_HEAD(&pring->txcmplq); 10497 INIT_LIST_HEAD(&pring->iocb_continueq); 10498 spin_lock_init(&pring->ring_lock); 10499 10500 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 10501 pring = phba->sli4_hba.nvmels_wq->pring; 10502 pring->flag = 0; 10503 pring->ringno = LPFC_ELS_RING; 10504 pring->txcmplq_cnt = 0; 10505 INIT_LIST_HEAD(&pring->txq); 10506 INIT_LIST_HEAD(&pring->txcmplq); 10507 INIT_LIST_HEAD(&pring->iocb_continueq); 10508 spin_lock_init(&pring->ring_lock); 10509 } 10510 10511 spin_unlock_irq(&phba->hbalock); 10512 } 10513 10514 /** 10515 * lpfc_sli_queue_init - Queue initialization function 10516 * @phba: Pointer to HBA context object. 10517 * 10518 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each 10519 * ring. This function also initializes ring indices of each ring. 10520 * This function is called during the initialization of the SLI 10521 * interface of an HBA. 10522 * This function is called with no lock held and always returns 10523 * 1. 10524 **/ 10525 void 10526 lpfc_sli_queue_init(struct lpfc_hba *phba) 10527 { 10528 struct lpfc_sli *psli; 10529 struct lpfc_sli_ring *pring; 10530 int i; 10531 10532 psli = &phba->sli; 10533 spin_lock_irq(&phba->hbalock); 10534 INIT_LIST_HEAD(&psli->mboxq); 10535 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10536 /* Initialize list headers for txq and txcmplq as double linked lists */ 10537 for (i = 0; i < psli->num_rings; i++) { 10538 pring = &psli->sli3_ring[i]; 10539 pring->ringno = i; 10540 pring->sli.sli3.next_cmdidx = 0; 10541 pring->sli.sli3.local_getidx = 0; 10542 pring->sli.sli3.cmdidx = 0; 10543 INIT_LIST_HEAD(&pring->iocb_continueq); 10544 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 10545 INIT_LIST_HEAD(&pring->postbufq); 10546 pring->flag = 0; 10547 INIT_LIST_HEAD(&pring->txq); 10548 INIT_LIST_HEAD(&pring->txcmplq); 10549 spin_lock_init(&pring->ring_lock); 10550 } 10551 spin_unlock_irq(&phba->hbalock); 10552 } 10553 10554 /** 10555 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 10556 * @phba: Pointer to HBA context object. 10557 * 10558 * This routine flushes the mailbox command subsystem. It will unconditionally 10559 * flush all the mailbox commands in the three possible stages in the mailbox 10560 * command sub-system: pending mailbox command queue; the outstanding mailbox 10561 * command; and completed mailbox command queue. It is caller's responsibility 10562 * to make sure that the driver is in the proper state to flush the mailbox 10563 * command sub-system. Namely, the posting of mailbox commands into the 10564 * pending mailbox command queue from the various clients must be stopped; 10565 * either the HBA is in a state that it will never works on the outstanding 10566 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 10567 * mailbox command has been completed. 10568 **/ 10569 static void 10570 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 10571 { 10572 LIST_HEAD(completions); 10573 struct lpfc_sli *psli = &phba->sli; 10574 LPFC_MBOXQ_t *pmb; 10575 unsigned long iflag; 10576 10577 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10578 local_bh_disable(); 10579 10580 /* Flush all the mailbox commands in the mbox system */ 10581 spin_lock_irqsave(&phba->hbalock, iflag); 10582 10583 /* The pending mailbox command queue */ 10584 list_splice_init(&phba->sli.mboxq, &completions); 10585 /* The outstanding active mailbox command */ 10586 if (psli->mbox_active) { 10587 list_add_tail(&psli->mbox_active->list, &completions); 10588 psli->mbox_active = NULL; 10589 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10590 } 10591 /* The completed mailbox command queue */ 10592 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 10593 spin_unlock_irqrestore(&phba->hbalock, iflag); 10594 10595 /* Enable softirqs again, done with phba->hbalock */ 10596 local_bh_enable(); 10597 10598 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 10599 while (!list_empty(&completions)) { 10600 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 10601 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 10602 if (pmb->mbox_cmpl) 10603 pmb->mbox_cmpl(phba, pmb); 10604 } 10605 } 10606 10607 /** 10608 * lpfc_sli_host_down - Vport cleanup function 10609 * @vport: Pointer to virtual port object. 10610 * 10611 * lpfc_sli_host_down is called to clean up the resources 10612 * associated with a vport before destroying virtual 10613 * port data structures. 10614 * This function does following operations: 10615 * - Free discovery resources associated with this virtual 10616 * port. 10617 * - Free iocbs associated with this virtual port in 10618 * the txq. 10619 * - Send abort for all iocb commands associated with this 10620 * vport in txcmplq. 10621 * 10622 * This function is called with no lock held and always returns 1. 10623 **/ 10624 int 10625 lpfc_sli_host_down(struct lpfc_vport *vport) 10626 { 10627 LIST_HEAD(completions); 10628 struct lpfc_hba *phba = vport->phba; 10629 struct lpfc_sli *psli = &phba->sli; 10630 struct lpfc_queue *qp = NULL; 10631 struct lpfc_sli_ring *pring; 10632 struct lpfc_iocbq *iocb, *next_iocb; 10633 int i; 10634 unsigned long flags = 0; 10635 uint16_t prev_pring_flag; 10636 10637 lpfc_cleanup_discovery_resources(vport); 10638 10639 spin_lock_irqsave(&phba->hbalock, flags); 10640 10641 /* 10642 * Error everything on the txq since these iocbs 10643 * have not been given to the FW yet. 10644 * Also issue ABTS for everything on the txcmplq 10645 */ 10646 if (phba->sli_rev != LPFC_SLI_REV4) { 10647 for (i = 0; i < psli->num_rings; i++) { 10648 pring = &psli->sli3_ring[i]; 10649 prev_pring_flag = pring->flag; 10650 /* Only slow rings */ 10651 if (pring->ringno == LPFC_ELS_RING) { 10652 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10653 /* Set the lpfc data pending flag */ 10654 set_bit(LPFC_DATA_READY, &phba->data_flags); 10655 } 10656 list_for_each_entry_safe(iocb, next_iocb, 10657 &pring->txq, list) { 10658 if (iocb->vport != vport) 10659 continue; 10660 list_move_tail(&iocb->list, &completions); 10661 } 10662 list_for_each_entry_safe(iocb, next_iocb, 10663 &pring->txcmplq, list) { 10664 if (iocb->vport != vport) 10665 continue; 10666 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10667 } 10668 pring->flag = prev_pring_flag; 10669 } 10670 } else { 10671 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10672 pring = qp->pring; 10673 if (!pring) 10674 continue; 10675 if (pring == phba->sli4_hba.els_wq->pring) { 10676 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10677 /* Set the lpfc data pending flag */ 10678 set_bit(LPFC_DATA_READY, &phba->data_flags); 10679 } 10680 prev_pring_flag = pring->flag; 10681 spin_lock_irq(&pring->ring_lock); 10682 list_for_each_entry_safe(iocb, next_iocb, 10683 &pring->txq, list) { 10684 if (iocb->vport != vport) 10685 continue; 10686 list_move_tail(&iocb->list, &completions); 10687 } 10688 spin_unlock_irq(&pring->ring_lock); 10689 list_for_each_entry_safe(iocb, next_iocb, 10690 &pring->txcmplq, list) { 10691 if (iocb->vport != vport) 10692 continue; 10693 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10694 } 10695 pring->flag = prev_pring_flag; 10696 } 10697 } 10698 spin_unlock_irqrestore(&phba->hbalock, flags); 10699 10700 /* Cancel all the IOCBs from the completions list */ 10701 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10702 IOERR_SLI_DOWN); 10703 return 1; 10704 } 10705 10706 /** 10707 * lpfc_sli_hba_down - Resource cleanup function for the HBA 10708 * @phba: Pointer to HBA context object. 10709 * 10710 * This function cleans up all iocb, buffers, mailbox commands 10711 * while shutting down the HBA. This function is called with no 10712 * lock held and always returns 1. 10713 * This function does the following to cleanup driver resources: 10714 * - Free discovery resources for each virtual port 10715 * - Cleanup any pending fabric iocbs 10716 * - Iterate through the iocb txq and free each entry 10717 * in the list. 10718 * - Free up any buffer posted to the HBA 10719 * - Free mailbox commands in the mailbox queue. 10720 **/ 10721 int 10722 lpfc_sli_hba_down(struct lpfc_hba *phba) 10723 { 10724 LIST_HEAD(completions); 10725 struct lpfc_sli *psli = &phba->sli; 10726 struct lpfc_queue *qp = NULL; 10727 struct lpfc_sli_ring *pring; 10728 struct lpfc_dmabuf *buf_ptr; 10729 unsigned long flags = 0; 10730 int i; 10731 10732 /* Shutdown the mailbox command sub-system */ 10733 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 10734 10735 lpfc_hba_down_prep(phba); 10736 10737 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10738 local_bh_disable(); 10739 10740 lpfc_fabric_abort_hba(phba); 10741 10742 spin_lock_irqsave(&phba->hbalock, flags); 10743 10744 /* 10745 * Error everything on the txq since these iocbs 10746 * have not been given to the FW yet. 10747 */ 10748 if (phba->sli_rev != LPFC_SLI_REV4) { 10749 for (i = 0; i < psli->num_rings; i++) { 10750 pring = &psli->sli3_ring[i]; 10751 /* Only slow rings */ 10752 if (pring->ringno == LPFC_ELS_RING) { 10753 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10754 /* Set the lpfc data pending flag */ 10755 set_bit(LPFC_DATA_READY, &phba->data_flags); 10756 } 10757 list_splice_init(&pring->txq, &completions); 10758 } 10759 } else { 10760 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10761 pring = qp->pring; 10762 if (!pring) 10763 continue; 10764 spin_lock(&pring->ring_lock); 10765 list_splice_init(&pring->txq, &completions); 10766 spin_unlock(&pring->ring_lock); 10767 if (pring == phba->sli4_hba.els_wq->pring) { 10768 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10769 /* Set the lpfc data pending flag */ 10770 set_bit(LPFC_DATA_READY, &phba->data_flags); 10771 } 10772 } 10773 } 10774 spin_unlock_irqrestore(&phba->hbalock, flags); 10775 10776 /* Cancel all the IOCBs from the completions list */ 10777 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10778 IOERR_SLI_DOWN); 10779 10780 spin_lock_irqsave(&phba->hbalock, flags); 10781 list_splice_init(&phba->elsbuf, &completions); 10782 phba->elsbuf_cnt = 0; 10783 phba->elsbuf_prev_cnt = 0; 10784 spin_unlock_irqrestore(&phba->hbalock, flags); 10785 10786 while (!list_empty(&completions)) { 10787 list_remove_head(&completions, buf_ptr, 10788 struct lpfc_dmabuf, list); 10789 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 10790 kfree(buf_ptr); 10791 } 10792 10793 /* Enable softirqs again, done with phba->hbalock */ 10794 local_bh_enable(); 10795 10796 /* Return any active mbox cmds */ 10797 del_timer_sync(&psli->mbox_tmo); 10798 10799 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 10800 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 10801 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 10802 10803 return 1; 10804 } 10805 10806 /** 10807 * lpfc_sli_pcimem_bcopy - SLI memory copy function 10808 * @srcp: Source memory pointer. 10809 * @destp: Destination memory pointer. 10810 * @cnt: Number of words required to be copied. 10811 * 10812 * This function is used for copying data between driver memory 10813 * and the SLI memory. This function also changes the endianness 10814 * of each word if native endianness is different from SLI 10815 * endianness. This function can be called with or without 10816 * lock. 10817 **/ 10818 void 10819 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 10820 { 10821 uint32_t *src = srcp; 10822 uint32_t *dest = destp; 10823 uint32_t ldata; 10824 int i; 10825 10826 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 10827 ldata = *src; 10828 ldata = le32_to_cpu(ldata); 10829 *dest = ldata; 10830 src++; 10831 dest++; 10832 } 10833 } 10834 10835 10836 /** 10837 * lpfc_sli_bemem_bcopy - SLI memory copy function 10838 * @srcp: Source memory pointer. 10839 * @destp: Destination memory pointer. 10840 * @cnt: Number of words required to be copied. 10841 * 10842 * This function is used for copying data between a data structure 10843 * with big endian representation to local endianness. 10844 * This function can be called with or without lock. 10845 **/ 10846 void 10847 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 10848 { 10849 uint32_t *src = srcp; 10850 uint32_t *dest = destp; 10851 uint32_t ldata; 10852 int i; 10853 10854 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 10855 ldata = *src; 10856 ldata = be32_to_cpu(ldata); 10857 *dest = ldata; 10858 src++; 10859 dest++; 10860 } 10861 } 10862 10863 /** 10864 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 10865 * @phba: Pointer to HBA context object. 10866 * @pring: Pointer to driver SLI ring object. 10867 * @mp: Pointer to driver buffer object. 10868 * 10869 * This function is called with no lock held. 10870 * It always return zero after adding the buffer to the postbufq 10871 * buffer list. 10872 **/ 10873 int 10874 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10875 struct lpfc_dmabuf *mp) 10876 { 10877 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 10878 later */ 10879 spin_lock_irq(&phba->hbalock); 10880 list_add_tail(&mp->list, &pring->postbufq); 10881 pring->postbufq_cnt++; 10882 spin_unlock_irq(&phba->hbalock); 10883 return 0; 10884 } 10885 10886 /** 10887 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 10888 * @phba: Pointer to HBA context object. 10889 * 10890 * When HBQ is enabled, buffers are searched based on tags. This function 10891 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 10892 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 10893 * does not conflict with tags of buffer posted for unsolicited events. 10894 * The function returns the allocated tag. The function is called with 10895 * no locks held. 10896 **/ 10897 uint32_t 10898 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 10899 { 10900 spin_lock_irq(&phba->hbalock); 10901 phba->buffer_tag_count++; 10902 /* 10903 * Always set the QUE_BUFTAG_BIT to distiguish between 10904 * a tag assigned by HBQ. 10905 */ 10906 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 10907 spin_unlock_irq(&phba->hbalock); 10908 return phba->buffer_tag_count; 10909 } 10910 10911 /** 10912 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 10913 * @phba: Pointer to HBA context object. 10914 * @pring: Pointer to driver SLI ring object. 10915 * @tag: Buffer tag. 10916 * 10917 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 10918 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 10919 * iocb is posted to the response ring with the tag of the buffer. 10920 * This function searches the pring->postbufq list using the tag 10921 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 10922 * iocb. If the buffer is found then lpfc_dmabuf object of the 10923 * buffer is returned to the caller else NULL is returned. 10924 * This function is called with no lock held. 10925 **/ 10926 struct lpfc_dmabuf * 10927 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10928 uint32_t tag) 10929 { 10930 struct lpfc_dmabuf *mp, *next_mp; 10931 struct list_head *slp = &pring->postbufq; 10932 10933 /* Search postbufq, from the beginning, looking for a match on tag */ 10934 spin_lock_irq(&phba->hbalock); 10935 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10936 if (mp->buffer_tag == tag) { 10937 list_del_init(&mp->list); 10938 pring->postbufq_cnt--; 10939 spin_unlock_irq(&phba->hbalock); 10940 return mp; 10941 } 10942 } 10943 10944 spin_unlock_irq(&phba->hbalock); 10945 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10946 "0402 Cannot find virtual addr for buffer tag on " 10947 "ring %d Data x%lx x%px x%px x%x\n", 10948 pring->ringno, (unsigned long) tag, 10949 slp->next, slp->prev, pring->postbufq_cnt); 10950 10951 return NULL; 10952 } 10953 10954 /** 10955 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 10956 * @phba: Pointer to HBA context object. 10957 * @pring: Pointer to driver SLI ring object. 10958 * @phys: DMA address of the buffer. 10959 * 10960 * This function searches the buffer list using the dma_address 10961 * of unsolicited event to find the driver's lpfc_dmabuf object 10962 * corresponding to the dma_address. The function returns the 10963 * lpfc_dmabuf object if a buffer is found else it returns NULL. 10964 * This function is called by the ct and els unsolicited event 10965 * handlers to get the buffer associated with the unsolicited 10966 * event. 10967 * 10968 * This function is called with no lock held. 10969 **/ 10970 struct lpfc_dmabuf * 10971 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10972 dma_addr_t phys) 10973 { 10974 struct lpfc_dmabuf *mp, *next_mp; 10975 struct list_head *slp = &pring->postbufq; 10976 10977 /* Search postbufq, from the beginning, looking for a match on phys */ 10978 spin_lock_irq(&phba->hbalock); 10979 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10980 if (mp->phys == phys) { 10981 list_del_init(&mp->list); 10982 pring->postbufq_cnt--; 10983 spin_unlock_irq(&phba->hbalock); 10984 return mp; 10985 } 10986 } 10987 10988 spin_unlock_irq(&phba->hbalock); 10989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10990 "0410 Cannot find virtual addr for mapped buf on " 10991 "ring %d Data x%llx x%px x%px x%x\n", 10992 pring->ringno, (unsigned long long)phys, 10993 slp->next, slp->prev, pring->postbufq_cnt); 10994 return NULL; 10995 } 10996 10997 /** 10998 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 10999 * @phba: Pointer to HBA context object. 11000 * @cmdiocb: Pointer to driver command iocb object. 11001 * @rspiocb: Pointer to driver response iocb object. 11002 * 11003 * This function is the completion handler for the abort iocbs for 11004 * ELS commands. This function is called from the ELS ring event 11005 * handler with no lock held. This function frees memory resources 11006 * associated with the abort iocb. 11007 **/ 11008 static void 11009 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11010 struct lpfc_iocbq *rspiocb) 11011 { 11012 IOCB_t *irsp = &rspiocb->iocb; 11013 uint16_t abort_iotag, abort_context; 11014 struct lpfc_iocbq *abort_iocb = NULL; 11015 11016 if (irsp->ulpStatus) { 11017 11018 /* 11019 * Assume that the port already completed and returned, or 11020 * will return the iocb. Just Log the message. 11021 */ 11022 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 11023 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 11024 11025 spin_lock_irq(&phba->hbalock); 11026 if (phba->sli_rev < LPFC_SLI_REV4) { 11027 if (irsp->ulpCommand == CMD_ABORT_XRI_CX && 11028 irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 11029 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) { 11030 spin_unlock_irq(&phba->hbalock); 11031 goto release_iocb; 11032 } 11033 if (abort_iotag != 0 && 11034 abort_iotag <= phba->sli.last_iotag) 11035 abort_iocb = 11036 phba->sli.iocbq_lookup[abort_iotag]; 11037 } else 11038 /* For sli4 the abort_tag is the XRI, 11039 * so the abort routine puts the iotag of the iocb 11040 * being aborted in the context field of the abort 11041 * IOCB. 11042 */ 11043 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 11044 11045 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 11046 "0327 Cannot abort els iocb x%px " 11047 "with tag %x context %x, abort status %x, " 11048 "abort code %x\n", 11049 abort_iocb, abort_iotag, abort_context, 11050 irsp->ulpStatus, irsp->un.ulpWord[4]); 11051 11052 spin_unlock_irq(&phba->hbalock); 11053 if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 11054 irsp->un.ulpWord[4] == IOERR_SLI_ABORTED) 11055 lpfc_sli_release_iocbq(phba, abort_iocb); 11056 } 11057 release_iocb: 11058 lpfc_sli_release_iocbq(phba, cmdiocb); 11059 return; 11060 } 11061 11062 /** 11063 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 11064 * @phba: Pointer to HBA context object. 11065 * @cmdiocb: Pointer to driver command iocb object. 11066 * @rspiocb: Pointer to driver response iocb object. 11067 * 11068 * The function is called from SLI ring event handler with no 11069 * lock held. This function is the completion handler for ELS commands 11070 * which are aborted. The function frees memory resources used for 11071 * the aborted ELS commands. 11072 **/ 11073 static void 11074 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11075 struct lpfc_iocbq *rspiocb) 11076 { 11077 IOCB_t *irsp = &rspiocb->iocb; 11078 11079 /* ELS cmd tag <ulpIoTag> completes */ 11080 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11081 "0139 Ignoring ELS cmd tag x%x completion Data: " 11082 "x%x x%x x%x\n", 11083 irsp->ulpIoTag, irsp->ulpStatus, 11084 irsp->un.ulpWord[4], irsp->ulpTimeout); 11085 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 11086 lpfc_ct_free_iocb(phba, cmdiocb); 11087 else 11088 lpfc_els_free_iocb(phba, cmdiocb); 11089 return; 11090 } 11091 11092 /** 11093 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 11094 * @phba: Pointer to HBA context object. 11095 * @pring: Pointer to driver SLI ring object. 11096 * @cmdiocb: Pointer to driver command iocb object. 11097 * 11098 * This function issues an abort iocb for the provided command iocb down to 11099 * the port. Other than the case the outstanding command iocb is an abort 11100 * request, this function issues abort out unconditionally. This function is 11101 * called with hbalock held. The function returns 0 when it fails due to 11102 * memory allocation failure or when the command iocb is an abort request. 11103 **/ 11104 static int 11105 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11106 struct lpfc_iocbq *cmdiocb) 11107 { 11108 struct lpfc_vport *vport = cmdiocb->vport; 11109 struct lpfc_iocbq *abtsiocbp; 11110 IOCB_t *icmd = NULL; 11111 IOCB_t *iabt = NULL; 11112 int retval; 11113 unsigned long iflags; 11114 struct lpfc_nodelist *ndlp; 11115 11116 lockdep_assert_held(&phba->hbalock); 11117 11118 /* 11119 * There are certain command types we don't want to abort. And we 11120 * don't want to abort commands that are already in the process of 11121 * being aborted. 11122 */ 11123 icmd = &cmdiocb->iocb; 11124 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11125 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11126 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11127 return 0; 11128 11129 /* issue ABTS for this IOCB based on iotag */ 11130 abtsiocbp = __lpfc_sli_get_iocbq(phba); 11131 if (abtsiocbp == NULL) 11132 return 0; 11133 11134 /* This signals the response to set the correct status 11135 * before calling the completion handler 11136 */ 11137 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 11138 11139 iabt = &abtsiocbp->iocb; 11140 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 11141 iabt->un.acxri.abortContextTag = icmd->ulpContext; 11142 if (phba->sli_rev == LPFC_SLI_REV4) { 11143 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 11144 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 11145 } else { 11146 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 11147 if (pring->ringno == LPFC_ELS_RING) { 11148 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1); 11149 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi; 11150 } 11151 } 11152 iabt->ulpLe = 1; 11153 iabt->ulpClass = icmd->ulpClass; 11154 11155 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11156 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; 11157 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 11158 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 11159 if (cmdiocb->iocb_flag & LPFC_IO_FOF) 11160 abtsiocbp->iocb_flag |= LPFC_IO_FOF; 11161 11162 if (phba->link_state >= LPFC_LINK_UP) 11163 iabt->ulpCommand = CMD_ABORT_XRI_CN; 11164 else 11165 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 11166 11167 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 11168 abtsiocbp->vport = vport; 11169 11170 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 11171 "0339 Abort xri x%x, original iotag x%x, " 11172 "abort cmd iotag x%x\n", 11173 iabt->un.acxri.abortIoTag, 11174 iabt->un.acxri.abortContextTag, 11175 abtsiocbp->iotag); 11176 11177 if (phba->sli_rev == LPFC_SLI_REV4) { 11178 pring = lpfc_sli4_calc_ring(phba, abtsiocbp); 11179 if (unlikely(pring == NULL)) 11180 return 0; 11181 /* Note: both hbalock and ring_lock need to be set here */ 11182 spin_lock_irqsave(&pring->ring_lock, iflags); 11183 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11184 abtsiocbp, 0); 11185 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11186 } else { 11187 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11188 abtsiocbp, 0); 11189 } 11190 11191 if (retval) 11192 __lpfc_sli_release_iocbq(phba, abtsiocbp); 11193 11194 /* 11195 * Caller to this routine should check for IOCB_ERROR 11196 * and handle it properly. This routine no longer removes 11197 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11198 */ 11199 return retval; 11200 } 11201 11202 /** 11203 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 11204 * @phba: Pointer to HBA context object. 11205 * @pring: Pointer to driver SLI ring object. 11206 * @cmdiocb: Pointer to driver command iocb object. 11207 * 11208 * This function issues an abort iocb for the provided command iocb. In case 11209 * of unloading, the abort iocb will not be issued to commands on the ELS 11210 * ring. Instead, the callback function shall be changed to those commands 11211 * so that nothing happens when them finishes. This function is called with 11212 * hbalock held. The function returns 0 when the command iocb is an abort 11213 * request. 11214 **/ 11215 int 11216 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11217 struct lpfc_iocbq *cmdiocb) 11218 { 11219 struct lpfc_vport *vport = cmdiocb->vport; 11220 int retval = IOCB_ERROR; 11221 IOCB_t *icmd = NULL; 11222 11223 lockdep_assert_held(&phba->hbalock); 11224 11225 /* 11226 * There are certain command types we don't want to abort. And we 11227 * don't want to abort commands that are already in the process of 11228 * being aborted. 11229 */ 11230 icmd = &cmdiocb->iocb; 11231 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11232 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11233 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11234 return 0; 11235 11236 if (!pring) { 11237 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11238 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11239 else 11240 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11241 goto abort_iotag_exit; 11242 } 11243 11244 /* 11245 * If we're unloading, don't abort iocb on the ELS ring, but change 11246 * the callback so that nothing happens when it finishes. 11247 */ 11248 if ((vport->load_flag & FC_UNLOADING) && 11249 (pring->ringno == LPFC_ELS_RING)) { 11250 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11251 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11252 else 11253 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11254 goto abort_iotag_exit; 11255 } 11256 11257 /* Now, we try to issue the abort to the cmdiocb out */ 11258 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 11259 11260 abort_iotag_exit: 11261 /* 11262 * Caller to this routine should check for IOCB_ERROR 11263 * and handle it properly. This routine no longer removes 11264 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11265 */ 11266 return retval; 11267 } 11268 11269 /** 11270 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 11271 * @phba: pointer to lpfc HBA data structure. 11272 * 11273 * This routine will abort all pending and outstanding iocbs to an HBA. 11274 **/ 11275 void 11276 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 11277 { 11278 struct lpfc_sli *psli = &phba->sli; 11279 struct lpfc_sli_ring *pring; 11280 struct lpfc_queue *qp = NULL; 11281 int i; 11282 11283 if (phba->sli_rev != LPFC_SLI_REV4) { 11284 for (i = 0; i < psli->num_rings; i++) { 11285 pring = &psli->sli3_ring[i]; 11286 lpfc_sli_abort_iocb_ring(phba, pring); 11287 } 11288 return; 11289 } 11290 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 11291 pring = qp->pring; 11292 if (!pring) 11293 continue; 11294 lpfc_sli_abort_iocb_ring(phba, pring); 11295 } 11296 } 11297 11298 /** 11299 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 11300 * @iocbq: Pointer to driver iocb object. 11301 * @vport: Pointer to driver virtual port object. 11302 * @tgt_id: SCSI ID of the target. 11303 * @lun_id: LUN ID of the scsi device. 11304 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 11305 * 11306 * This function acts as an iocb filter for functions which abort or count 11307 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 11308 * 0 if the filtering criteria is met for the given iocb and will return 11309 * 1 if the filtering criteria is not met. 11310 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 11311 * given iocb is for the SCSI device specified by vport, tgt_id and 11312 * lun_id parameter. 11313 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 11314 * given iocb is for the SCSI target specified by vport and tgt_id 11315 * parameters. 11316 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 11317 * given iocb is for the SCSI host associated with the given vport. 11318 * This function is called with no locks held. 11319 **/ 11320 static int 11321 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 11322 uint16_t tgt_id, uint64_t lun_id, 11323 lpfc_ctx_cmd ctx_cmd) 11324 { 11325 struct lpfc_io_buf *lpfc_cmd; 11326 int rc = 1; 11327 11328 if (iocbq->vport != vport) 11329 return rc; 11330 11331 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 11332 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) 11333 return rc; 11334 11335 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 11336 11337 if (lpfc_cmd->pCmd == NULL) 11338 return rc; 11339 11340 switch (ctx_cmd) { 11341 case LPFC_CTX_LUN: 11342 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11343 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 11344 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 11345 rc = 0; 11346 break; 11347 case LPFC_CTX_TGT: 11348 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11349 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 11350 rc = 0; 11351 break; 11352 case LPFC_CTX_HOST: 11353 rc = 0; 11354 break; 11355 default: 11356 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 11357 __func__, ctx_cmd); 11358 break; 11359 } 11360 11361 return rc; 11362 } 11363 11364 /** 11365 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 11366 * @vport: Pointer to virtual port. 11367 * @tgt_id: SCSI ID of the target. 11368 * @lun_id: LUN ID of the scsi device. 11369 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11370 * 11371 * This function returns number of FCP commands pending for the vport. 11372 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 11373 * commands pending on the vport associated with SCSI device specified 11374 * by tgt_id and lun_id parameters. 11375 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 11376 * commands pending on the vport associated with SCSI target specified 11377 * by tgt_id parameter. 11378 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 11379 * commands pending on the vport. 11380 * This function returns the number of iocbs which satisfy the filter. 11381 * This function is called without any lock held. 11382 **/ 11383 int 11384 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 11385 lpfc_ctx_cmd ctx_cmd) 11386 { 11387 struct lpfc_hba *phba = vport->phba; 11388 struct lpfc_iocbq *iocbq; 11389 int sum, i; 11390 11391 spin_lock_irq(&phba->hbalock); 11392 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 11393 iocbq = phba->sli.iocbq_lookup[i]; 11394 11395 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 11396 ctx_cmd) == 0) 11397 sum++; 11398 } 11399 spin_unlock_irq(&phba->hbalock); 11400 11401 return sum; 11402 } 11403 11404 /** 11405 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 11406 * @phba: Pointer to HBA context object 11407 * @cmdiocb: Pointer to command iocb object. 11408 * @rspiocb: Pointer to response iocb object. 11409 * 11410 * This function is called when an aborted FCP iocb completes. This 11411 * function is called by the ring event handler with no lock held. 11412 * This function frees the iocb. 11413 **/ 11414 void 11415 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11416 struct lpfc_iocbq *rspiocb) 11417 { 11418 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11419 "3096 ABORT_XRI_CN completing on rpi x%x " 11420 "original iotag x%x, abort cmd iotag x%x " 11421 "status 0x%x, reason 0x%x\n", 11422 cmdiocb->iocb.un.acxri.abortContextTag, 11423 cmdiocb->iocb.un.acxri.abortIoTag, 11424 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 11425 rspiocb->iocb.un.ulpWord[4]); 11426 lpfc_sli_release_iocbq(phba, cmdiocb); 11427 return; 11428 } 11429 11430 /** 11431 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 11432 * @vport: Pointer to virtual port. 11433 * @pring: Pointer to driver SLI ring object. 11434 * @tgt_id: SCSI ID of the target. 11435 * @lun_id: LUN ID of the scsi device. 11436 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11437 * 11438 * This function sends an abort command for every SCSI command 11439 * associated with the given virtual port pending on the ring 11440 * filtered by lpfc_sli_validate_fcp_iocb function. 11441 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 11442 * FCP iocbs associated with lun specified by tgt_id and lun_id 11443 * parameters 11444 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 11445 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11446 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 11447 * FCP iocbs associated with virtual port. 11448 * This function returns number of iocbs it failed to abort. 11449 * This function is called with no locks held. 11450 **/ 11451 int 11452 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11453 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 11454 { 11455 struct lpfc_hba *phba = vport->phba; 11456 struct lpfc_iocbq *iocbq; 11457 struct lpfc_iocbq *abtsiocb; 11458 struct lpfc_sli_ring *pring_s4; 11459 IOCB_t *cmd = NULL; 11460 int errcnt = 0, ret_val = 0; 11461 int i; 11462 11463 /* all I/Os are in process of being flushed */ 11464 if (phba->hba_flag & HBA_IOQ_FLUSH) 11465 return errcnt; 11466 11467 for (i = 1; i <= phba->sli.last_iotag; i++) { 11468 iocbq = phba->sli.iocbq_lookup[i]; 11469 11470 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11471 abort_cmd) != 0) 11472 continue; 11473 11474 /* 11475 * If the iocbq is already being aborted, don't take a second 11476 * action, but do count it. 11477 */ 11478 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 11479 continue; 11480 11481 /* issue ABTS for this IOCB based on iotag */ 11482 abtsiocb = lpfc_sli_get_iocbq(phba); 11483 if (abtsiocb == NULL) { 11484 errcnt++; 11485 continue; 11486 } 11487 11488 /* indicate the IO is being aborted by the driver. */ 11489 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11490 11491 cmd = &iocbq->iocb; 11492 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11493 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 11494 if (phba->sli_rev == LPFC_SLI_REV4) 11495 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 11496 else 11497 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 11498 abtsiocb->iocb.ulpLe = 1; 11499 abtsiocb->iocb.ulpClass = cmd->ulpClass; 11500 abtsiocb->vport = vport; 11501 11502 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11503 abtsiocb->hba_wqidx = iocbq->hba_wqidx; 11504 if (iocbq->iocb_flag & LPFC_IO_FCP) 11505 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 11506 if (iocbq->iocb_flag & LPFC_IO_FOF) 11507 abtsiocb->iocb_flag |= LPFC_IO_FOF; 11508 11509 if (lpfc_is_link_up(phba)) 11510 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11511 else 11512 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11513 11514 /* Setup callback routine and issue the command. */ 11515 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11516 if (phba->sli_rev == LPFC_SLI_REV4) { 11517 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); 11518 if (!pring_s4) 11519 continue; 11520 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11521 abtsiocb, 0); 11522 } else 11523 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 11524 abtsiocb, 0); 11525 if (ret_val == IOCB_ERROR) { 11526 lpfc_sli_release_iocbq(phba, abtsiocb); 11527 errcnt++; 11528 continue; 11529 } 11530 } 11531 11532 return errcnt; 11533 } 11534 11535 /** 11536 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 11537 * @vport: Pointer to virtual port. 11538 * @pring: Pointer to driver SLI ring object. 11539 * @tgt_id: SCSI ID of the target. 11540 * @lun_id: LUN ID of the scsi device. 11541 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11542 * 11543 * This function sends an abort command for every SCSI command 11544 * associated with the given virtual port pending on the ring 11545 * filtered by lpfc_sli_validate_fcp_iocb function. 11546 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 11547 * FCP iocbs associated with lun specified by tgt_id and lun_id 11548 * parameters 11549 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 11550 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11551 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 11552 * FCP iocbs associated with virtual port. 11553 * This function returns number of iocbs it aborted . 11554 * This function is called with no locks held right after a taskmgmt 11555 * command is sent. 11556 **/ 11557 int 11558 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11559 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 11560 { 11561 struct lpfc_hba *phba = vport->phba; 11562 struct lpfc_io_buf *lpfc_cmd; 11563 struct lpfc_iocbq *abtsiocbq; 11564 struct lpfc_nodelist *ndlp; 11565 struct lpfc_iocbq *iocbq; 11566 IOCB_t *icmd; 11567 int sum, i, ret_val; 11568 unsigned long iflags; 11569 struct lpfc_sli_ring *pring_s4 = NULL; 11570 11571 spin_lock_irqsave(&phba->hbalock, iflags); 11572 11573 /* all I/Os are in process of being flushed */ 11574 if (phba->hba_flag & HBA_IOQ_FLUSH) { 11575 spin_unlock_irqrestore(&phba->hbalock, iflags); 11576 return 0; 11577 } 11578 sum = 0; 11579 11580 for (i = 1; i <= phba->sli.last_iotag; i++) { 11581 iocbq = phba->sli.iocbq_lookup[i]; 11582 11583 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11584 cmd) != 0) 11585 continue; 11586 11587 /* Guard against IO completion being called at same time */ 11588 lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); 11589 spin_lock(&lpfc_cmd->buf_lock); 11590 11591 if (!lpfc_cmd->pCmd) { 11592 spin_unlock(&lpfc_cmd->buf_lock); 11593 continue; 11594 } 11595 11596 if (phba->sli_rev == LPFC_SLI_REV4) { 11597 pring_s4 = 11598 phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring; 11599 if (!pring_s4) { 11600 spin_unlock(&lpfc_cmd->buf_lock); 11601 continue; 11602 } 11603 /* Note: both hbalock and ring_lock must be set here */ 11604 spin_lock(&pring_s4->ring_lock); 11605 } 11606 11607 /* 11608 * If the iocbq is already being aborted, don't take a second 11609 * action, but do count it. 11610 */ 11611 if ((iocbq->iocb_flag & LPFC_DRIVER_ABORTED) || 11612 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { 11613 if (phba->sli_rev == LPFC_SLI_REV4) 11614 spin_unlock(&pring_s4->ring_lock); 11615 spin_unlock(&lpfc_cmd->buf_lock); 11616 continue; 11617 } 11618 11619 /* issue ABTS for this IOCB based on iotag */ 11620 abtsiocbq = __lpfc_sli_get_iocbq(phba); 11621 if (!abtsiocbq) { 11622 if (phba->sli_rev == LPFC_SLI_REV4) 11623 spin_unlock(&pring_s4->ring_lock); 11624 spin_unlock(&lpfc_cmd->buf_lock); 11625 continue; 11626 } 11627 11628 icmd = &iocbq->iocb; 11629 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11630 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; 11631 if (phba->sli_rev == LPFC_SLI_REV4) 11632 abtsiocbq->iocb.un.acxri.abortIoTag = 11633 iocbq->sli4_xritag; 11634 else 11635 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; 11636 abtsiocbq->iocb.ulpLe = 1; 11637 abtsiocbq->iocb.ulpClass = icmd->ulpClass; 11638 abtsiocbq->vport = vport; 11639 11640 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11641 abtsiocbq->hba_wqidx = iocbq->hba_wqidx; 11642 if (iocbq->iocb_flag & LPFC_IO_FCP) 11643 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 11644 if (iocbq->iocb_flag & LPFC_IO_FOF) 11645 abtsiocbq->iocb_flag |= LPFC_IO_FOF; 11646 11647 ndlp = lpfc_cmd->rdata->pnode; 11648 11649 if (lpfc_is_link_up(phba) && 11650 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 11651 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11652 else 11653 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11654 11655 /* Setup callback routine and issue the command. */ 11656 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11657 11658 /* 11659 * Indicate the IO is being aborted by the driver and set 11660 * the caller's flag into the aborted IO. 11661 */ 11662 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11663 11664 if (phba->sli_rev == LPFC_SLI_REV4) { 11665 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11666 abtsiocbq, 0); 11667 spin_unlock(&pring_s4->ring_lock); 11668 } else { 11669 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 11670 abtsiocbq, 0); 11671 } 11672 11673 spin_unlock(&lpfc_cmd->buf_lock); 11674 11675 if (ret_val == IOCB_ERROR) 11676 __lpfc_sli_release_iocbq(phba, abtsiocbq); 11677 else 11678 sum++; 11679 } 11680 spin_unlock_irqrestore(&phba->hbalock, iflags); 11681 return sum; 11682 } 11683 11684 /** 11685 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 11686 * @phba: Pointer to HBA context object. 11687 * @cmdiocbq: Pointer to command iocb. 11688 * @rspiocbq: Pointer to response iocb. 11689 * 11690 * This function is the completion handler for iocbs issued using 11691 * lpfc_sli_issue_iocb_wait function. This function is called by the 11692 * ring event handler function without any lock held. This function 11693 * can be called from both worker thread context and interrupt 11694 * context. This function also can be called from other thread which 11695 * cleans up the SLI layer objects. 11696 * This function copy the contents of the response iocb to the 11697 * response iocb memory object provided by the caller of 11698 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 11699 * sleeps for the iocb completion. 11700 **/ 11701 static void 11702 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 11703 struct lpfc_iocbq *cmdiocbq, 11704 struct lpfc_iocbq *rspiocbq) 11705 { 11706 wait_queue_head_t *pdone_q; 11707 unsigned long iflags; 11708 struct lpfc_io_buf *lpfc_cmd; 11709 11710 spin_lock_irqsave(&phba->hbalock, iflags); 11711 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { 11712 11713 /* 11714 * A time out has occurred for the iocb. If a time out 11715 * completion handler has been supplied, call it. Otherwise, 11716 * just free the iocbq. 11717 */ 11718 11719 spin_unlock_irqrestore(&phba->hbalock, iflags); 11720 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; 11721 cmdiocbq->wait_iocb_cmpl = NULL; 11722 if (cmdiocbq->iocb_cmpl) 11723 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); 11724 else 11725 lpfc_sli_release_iocbq(phba, cmdiocbq); 11726 return; 11727 } 11728 11729 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 11730 if (cmdiocbq->context2 && rspiocbq) 11731 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 11732 &rspiocbq->iocb, sizeof(IOCB_t)); 11733 11734 /* Set the exchange busy flag for task management commands */ 11735 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 11736 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 11737 lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, 11738 cur_iocbq); 11739 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 11740 } 11741 11742 pdone_q = cmdiocbq->context_un.wait_queue; 11743 if (pdone_q) 11744 wake_up(pdone_q); 11745 spin_unlock_irqrestore(&phba->hbalock, iflags); 11746 return; 11747 } 11748 11749 /** 11750 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 11751 * @phba: Pointer to HBA context object.. 11752 * @piocbq: Pointer to command iocb. 11753 * @flag: Flag to test. 11754 * 11755 * This routine grabs the hbalock and then test the iocb_flag to 11756 * see if the passed in flag is set. 11757 * Returns: 11758 * 1 if flag is set. 11759 * 0 if flag is not set. 11760 **/ 11761 static int 11762 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 11763 struct lpfc_iocbq *piocbq, uint32_t flag) 11764 { 11765 unsigned long iflags; 11766 int ret; 11767 11768 spin_lock_irqsave(&phba->hbalock, iflags); 11769 ret = piocbq->iocb_flag & flag; 11770 spin_unlock_irqrestore(&phba->hbalock, iflags); 11771 return ret; 11772 11773 } 11774 11775 /** 11776 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 11777 * @phba: Pointer to HBA context object.. 11778 * @pring: Pointer to sli ring. 11779 * @piocb: Pointer to command iocb. 11780 * @prspiocbq: Pointer to response iocb. 11781 * @timeout: Timeout in number of seconds. 11782 * 11783 * This function issues the iocb to firmware and waits for the 11784 * iocb to complete. The iocb_cmpl field of the shall be used 11785 * to handle iocbs which time out. If the field is NULL, the 11786 * function shall free the iocbq structure. If more clean up is 11787 * needed, the caller is expected to provide a completion function 11788 * that will provide the needed clean up. If the iocb command is 11789 * not completed within timeout seconds, the function will either 11790 * free the iocbq structure (if iocb_cmpl == NULL) or execute the 11791 * completion function set in the iocb_cmpl field and then return 11792 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 11793 * resources if this function returns IOCB_TIMEDOUT. 11794 * The function waits for the iocb completion using an 11795 * non-interruptible wait. 11796 * This function will sleep while waiting for iocb completion. 11797 * So, this function should not be called from any context which 11798 * does not allow sleeping. Due to the same reason, this function 11799 * cannot be called with interrupt disabled. 11800 * This function assumes that the iocb completions occur while 11801 * this function sleep. So, this function cannot be called from 11802 * the thread which process iocb completion for this ring. 11803 * This function clears the iocb_flag of the iocb object before 11804 * issuing the iocb and the iocb completion handler sets this 11805 * flag and wakes this thread when the iocb completes. 11806 * The contents of the response iocb will be copied to prspiocbq 11807 * by the completion handler when the command completes. 11808 * This function returns IOCB_SUCCESS when success. 11809 * This function is called with no lock held. 11810 **/ 11811 int 11812 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 11813 uint32_t ring_number, 11814 struct lpfc_iocbq *piocb, 11815 struct lpfc_iocbq *prspiocbq, 11816 uint32_t timeout) 11817 { 11818 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 11819 long timeleft, timeout_req = 0; 11820 int retval = IOCB_SUCCESS; 11821 uint32_t creg_val; 11822 struct lpfc_iocbq *iocb; 11823 int txq_cnt = 0; 11824 int txcmplq_cnt = 0; 11825 struct lpfc_sli_ring *pring; 11826 unsigned long iflags; 11827 bool iocb_completed = true; 11828 11829 if (phba->sli_rev >= LPFC_SLI_REV4) 11830 pring = lpfc_sli4_calc_ring(phba, piocb); 11831 else 11832 pring = &phba->sli.sli3_ring[ring_number]; 11833 /* 11834 * If the caller has provided a response iocbq buffer, then context2 11835 * is NULL or its an error. 11836 */ 11837 if (prspiocbq) { 11838 if (piocb->context2) 11839 return IOCB_ERROR; 11840 piocb->context2 = prspiocbq; 11841 } 11842 11843 piocb->wait_iocb_cmpl = piocb->iocb_cmpl; 11844 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 11845 piocb->context_un.wait_queue = &done_q; 11846 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 11847 11848 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11849 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11850 return IOCB_ERROR; 11851 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 11852 writel(creg_val, phba->HCregaddr); 11853 readl(phba->HCregaddr); /* flush */ 11854 } 11855 11856 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 11857 SLI_IOCB_RET_IOCB); 11858 if (retval == IOCB_SUCCESS) { 11859 timeout_req = msecs_to_jiffies(timeout * 1000); 11860 timeleft = wait_event_timeout(done_q, 11861 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 11862 timeout_req); 11863 spin_lock_irqsave(&phba->hbalock, iflags); 11864 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 11865 11866 /* 11867 * IOCB timed out. Inform the wake iocb wait 11868 * completion function and set local status 11869 */ 11870 11871 iocb_completed = false; 11872 piocb->iocb_flag |= LPFC_IO_WAKE_TMO; 11873 } 11874 spin_unlock_irqrestore(&phba->hbalock, iflags); 11875 if (iocb_completed) { 11876 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11877 "0331 IOCB wake signaled\n"); 11878 /* Note: we are not indicating if the IOCB has a success 11879 * status or not - that's for the caller to check. 11880 * IOCB_SUCCESS means just that the command was sent and 11881 * completed. Not that it completed successfully. 11882 * */ 11883 } else if (timeleft == 0) { 11884 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11885 "0338 IOCB wait timeout error - no " 11886 "wake response Data x%x\n", timeout); 11887 retval = IOCB_TIMEDOUT; 11888 } else { 11889 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11890 "0330 IOCB wake NOT set, " 11891 "Data x%x x%lx\n", 11892 timeout, (timeleft / jiffies)); 11893 retval = IOCB_TIMEDOUT; 11894 } 11895 } else if (retval == IOCB_BUSY) { 11896 if (phba->cfg_log_verbose & LOG_SLI) { 11897 list_for_each_entry(iocb, &pring->txq, list) { 11898 txq_cnt++; 11899 } 11900 list_for_each_entry(iocb, &pring->txcmplq, list) { 11901 txcmplq_cnt++; 11902 } 11903 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11904 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 11905 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 11906 } 11907 return retval; 11908 } else { 11909 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11910 "0332 IOCB wait issue failed, Data x%x\n", 11911 retval); 11912 retval = IOCB_ERROR; 11913 } 11914 11915 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11916 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11917 return IOCB_ERROR; 11918 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 11919 writel(creg_val, phba->HCregaddr); 11920 readl(phba->HCregaddr); /* flush */ 11921 } 11922 11923 if (prspiocbq) 11924 piocb->context2 = NULL; 11925 11926 piocb->context_un.wait_queue = NULL; 11927 piocb->iocb_cmpl = NULL; 11928 return retval; 11929 } 11930 11931 /** 11932 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 11933 * @phba: Pointer to HBA context object. 11934 * @pmboxq: Pointer to driver mailbox object. 11935 * @timeout: Timeout in number of seconds. 11936 * 11937 * This function issues the mailbox to firmware and waits for the 11938 * mailbox command to complete. If the mailbox command is not 11939 * completed within timeout seconds, it returns MBX_TIMEOUT. 11940 * The function waits for the mailbox completion using an 11941 * interruptible wait. If the thread is woken up due to a 11942 * signal, MBX_TIMEOUT error is returned to the caller. Caller 11943 * should not free the mailbox resources, if this function returns 11944 * MBX_TIMEOUT. 11945 * This function will sleep while waiting for mailbox completion. 11946 * So, this function should not be called from any context which 11947 * does not allow sleeping. Due to the same reason, this function 11948 * cannot be called with interrupt disabled. 11949 * This function assumes that the mailbox completion occurs while 11950 * this function sleep. So, this function cannot be called from 11951 * the worker thread which processes mailbox completion. 11952 * This function is called in the context of HBA management 11953 * applications. 11954 * This function returns MBX_SUCCESS when successful. 11955 * This function is called with no lock held. 11956 **/ 11957 int 11958 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 11959 uint32_t timeout) 11960 { 11961 struct completion mbox_done; 11962 int retval; 11963 unsigned long flag; 11964 11965 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 11966 /* setup wake call as IOCB callback */ 11967 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 11968 11969 /* setup context3 field to pass wait_queue pointer to wake function */ 11970 init_completion(&mbox_done); 11971 pmboxq->context3 = &mbox_done; 11972 /* now issue the command */ 11973 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 11974 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 11975 wait_for_completion_timeout(&mbox_done, 11976 msecs_to_jiffies(timeout * 1000)); 11977 11978 spin_lock_irqsave(&phba->hbalock, flag); 11979 pmboxq->context3 = NULL; 11980 /* 11981 * if LPFC_MBX_WAKE flag is set the mailbox is completed 11982 * else do not free the resources. 11983 */ 11984 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 11985 retval = MBX_SUCCESS; 11986 } else { 11987 retval = MBX_TIMEOUT; 11988 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 11989 } 11990 spin_unlock_irqrestore(&phba->hbalock, flag); 11991 } 11992 return retval; 11993 } 11994 11995 /** 11996 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 11997 * @phba: Pointer to HBA context. 11998 * 11999 * This function is called to shutdown the driver's mailbox sub-system. 12000 * It first marks the mailbox sub-system is in a block state to prevent 12001 * the asynchronous mailbox command from issued off the pending mailbox 12002 * command queue. If the mailbox command sub-system shutdown is due to 12003 * HBA error conditions such as EEH or ERATT, this routine shall invoke 12004 * the mailbox sub-system flush routine to forcefully bring down the 12005 * mailbox sub-system. Otherwise, if it is due to normal condition (such 12006 * as with offline or HBA function reset), this routine will wait for the 12007 * outstanding mailbox command to complete before invoking the mailbox 12008 * sub-system flush routine to gracefully bring down mailbox sub-system. 12009 **/ 12010 void 12011 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 12012 { 12013 struct lpfc_sli *psli = &phba->sli; 12014 unsigned long timeout; 12015 12016 if (mbx_action == LPFC_MBX_NO_WAIT) { 12017 /* delay 100ms for port state */ 12018 msleep(100); 12019 lpfc_sli_mbox_sys_flush(phba); 12020 return; 12021 } 12022 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 12023 12024 /* Disable softirqs, including timers from obtaining phba->hbalock */ 12025 local_bh_disable(); 12026 12027 spin_lock_irq(&phba->hbalock); 12028 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 12029 12030 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 12031 /* Determine how long we might wait for the active mailbox 12032 * command to be gracefully completed by firmware. 12033 */ 12034 if (phba->sli.mbox_active) 12035 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 12036 phba->sli.mbox_active) * 12037 1000) + jiffies; 12038 spin_unlock_irq(&phba->hbalock); 12039 12040 /* Enable softirqs again, done with phba->hbalock */ 12041 local_bh_enable(); 12042 12043 while (phba->sli.mbox_active) { 12044 /* Check active mailbox complete status every 2ms */ 12045 msleep(2); 12046 if (time_after(jiffies, timeout)) 12047 /* Timeout, let the mailbox flush routine to 12048 * forcefully release active mailbox command 12049 */ 12050 break; 12051 } 12052 } else { 12053 spin_unlock_irq(&phba->hbalock); 12054 12055 /* Enable softirqs again, done with phba->hbalock */ 12056 local_bh_enable(); 12057 } 12058 12059 lpfc_sli_mbox_sys_flush(phba); 12060 } 12061 12062 /** 12063 * lpfc_sli_eratt_read - read sli-3 error attention events 12064 * @phba: Pointer to HBA context. 12065 * 12066 * This function is called to read the SLI3 device error attention registers 12067 * for possible error attention events. The caller must hold the hostlock 12068 * with spin_lock_irq(). 12069 * 12070 * This function returns 1 when there is Error Attention in the Host Attention 12071 * Register and returns 0 otherwise. 12072 **/ 12073 static int 12074 lpfc_sli_eratt_read(struct lpfc_hba *phba) 12075 { 12076 uint32_t ha_copy; 12077 12078 /* Read chip Host Attention (HA) register */ 12079 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12080 goto unplug_err; 12081 12082 if (ha_copy & HA_ERATT) { 12083 /* Read host status register to retrieve error event */ 12084 if (lpfc_sli_read_hs(phba)) 12085 goto unplug_err; 12086 12087 /* Check if there is a deferred error condition is active */ 12088 if ((HS_FFER1 & phba->work_hs) && 12089 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12090 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 12091 phba->hba_flag |= DEFER_ERATT; 12092 /* Clear all interrupt enable conditions */ 12093 writel(0, phba->HCregaddr); 12094 readl(phba->HCregaddr); 12095 } 12096 12097 /* Set the driver HA work bitmap */ 12098 phba->work_ha |= HA_ERATT; 12099 /* Indicate polling handles this ERATT */ 12100 phba->hba_flag |= HBA_ERATT_HANDLED; 12101 return 1; 12102 } 12103 return 0; 12104 12105 unplug_err: 12106 /* Set the driver HS work bitmap */ 12107 phba->work_hs |= UNPLUG_ERR; 12108 /* Set the driver HA work bitmap */ 12109 phba->work_ha |= HA_ERATT; 12110 /* Indicate polling handles this ERATT */ 12111 phba->hba_flag |= HBA_ERATT_HANDLED; 12112 return 1; 12113 } 12114 12115 /** 12116 * lpfc_sli4_eratt_read - read sli-4 error attention events 12117 * @phba: Pointer to HBA context. 12118 * 12119 * This function is called to read the SLI4 device error attention registers 12120 * for possible error attention events. The caller must hold the hostlock 12121 * with spin_lock_irq(). 12122 * 12123 * This function returns 1 when there is Error Attention in the Host Attention 12124 * Register and returns 0 otherwise. 12125 **/ 12126 static int 12127 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 12128 { 12129 uint32_t uerr_sta_hi, uerr_sta_lo; 12130 uint32_t if_type, portsmphr; 12131 struct lpfc_register portstat_reg; 12132 12133 /* 12134 * For now, use the SLI4 device internal unrecoverable error 12135 * registers for error attention. This can be changed later. 12136 */ 12137 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 12138 switch (if_type) { 12139 case LPFC_SLI_INTF_IF_TYPE_0: 12140 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 12141 &uerr_sta_lo) || 12142 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 12143 &uerr_sta_hi)) { 12144 phba->work_hs |= UNPLUG_ERR; 12145 phba->work_ha |= HA_ERATT; 12146 phba->hba_flag |= HBA_ERATT_HANDLED; 12147 return 1; 12148 } 12149 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 12150 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 12151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12152 "1423 HBA Unrecoverable error: " 12153 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 12154 "ue_mask_lo_reg=0x%x, " 12155 "ue_mask_hi_reg=0x%x\n", 12156 uerr_sta_lo, uerr_sta_hi, 12157 phba->sli4_hba.ue_mask_lo, 12158 phba->sli4_hba.ue_mask_hi); 12159 phba->work_status[0] = uerr_sta_lo; 12160 phba->work_status[1] = uerr_sta_hi; 12161 phba->work_ha |= HA_ERATT; 12162 phba->hba_flag |= HBA_ERATT_HANDLED; 12163 return 1; 12164 } 12165 break; 12166 case LPFC_SLI_INTF_IF_TYPE_2: 12167 case LPFC_SLI_INTF_IF_TYPE_6: 12168 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 12169 &portstat_reg.word0) || 12170 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 12171 &portsmphr)){ 12172 phba->work_hs |= UNPLUG_ERR; 12173 phba->work_ha |= HA_ERATT; 12174 phba->hba_flag |= HBA_ERATT_HANDLED; 12175 return 1; 12176 } 12177 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 12178 phba->work_status[0] = 12179 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 12180 phba->work_status[1] = 12181 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 12182 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12183 "2885 Port Status Event: " 12184 "port status reg 0x%x, " 12185 "port smphr reg 0x%x, " 12186 "error 1=0x%x, error 2=0x%x\n", 12187 portstat_reg.word0, 12188 portsmphr, 12189 phba->work_status[0], 12190 phba->work_status[1]); 12191 phba->work_ha |= HA_ERATT; 12192 phba->hba_flag |= HBA_ERATT_HANDLED; 12193 return 1; 12194 } 12195 break; 12196 case LPFC_SLI_INTF_IF_TYPE_1: 12197 default: 12198 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12199 "2886 HBA Error Attention on unsupported " 12200 "if type %d.", if_type); 12201 return 1; 12202 } 12203 12204 return 0; 12205 } 12206 12207 /** 12208 * lpfc_sli_check_eratt - check error attention events 12209 * @phba: Pointer to HBA context. 12210 * 12211 * This function is called from timer soft interrupt context to check HBA's 12212 * error attention register bit for error attention events. 12213 * 12214 * This function returns 1 when there is Error Attention in the Host Attention 12215 * Register and returns 0 otherwise. 12216 **/ 12217 int 12218 lpfc_sli_check_eratt(struct lpfc_hba *phba) 12219 { 12220 uint32_t ha_copy; 12221 12222 /* If somebody is waiting to handle an eratt, don't process it 12223 * here. The brdkill function will do this. 12224 */ 12225 if (phba->link_flag & LS_IGNORE_ERATT) 12226 return 0; 12227 12228 /* Check if interrupt handler handles this ERATT */ 12229 spin_lock_irq(&phba->hbalock); 12230 if (phba->hba_flag & HBA_ERATT_HANDLED) { 12231 /* Interrupt handler has handled ERATT */ 12232 spin_unlock_irq(&phba->hbalock); 12233 return 0; 12234 } 12235 12236 /* 12237 * If there is deferred error attention, do not check for error 12238 * attention 12239 */ 12240 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12241 spin_unlock_irq(&phba->hbalock); 12242 return 0; 12243 } 12244 12245 /* If PCI channel is offline, don't process it */ 12246 if (unlikely(pci_channel_offline(phba->pcidev))) { 12247 spin_unlock_irq(&phba->hbalock); 12248 return 0; 12249 } 12250 12251 switch (phba->sli_rev) { 12252 case LPFC_SLI_REV2: 12253 case LPFC_SLI_REV3: 12254 /* Read chip Host Attention (HA) register */ 12255 ha_copy = lpfc_sli_eratt_read(phba); 12256 break; 12257 case LPFC_SLI_REV4: 12258 /* Read device Uncoverable Error (UERR) registers */ 12259 ha_copy = lpfc_sli4_eratt_read(phba); 12260 break; 12261 default: 12262 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12263 "0299 Invalid SLI revision (%d)\n", 12264 phba->sli_rev); 12265 ha_copy = 0; 12266 break; 12267 } 12268 spin_unlock_irq(&phba->hbalock); 12269 12270 return ha_copy; 12271 } 12272 12273 /** 12274 * lpfc_intr_state_check - Check device state for interrupt handling 12275 * @phba: Pointer to HBA context. 12276 * 12277 * This inline routine checks whether a device or its PCI slot is in a state 12278 * that the interrupt should be handled. 12279 * 12280 * This function returns 0 if the device or the PCI slot is in a state that 12281 * interrupt should be handled, otherwise -EIO. 12282 */ 12283 static inline int 12284 lpfc_intr_state_check(struct lpfc_hba *phba) 12285 { 12286 /* If the pci channel is offline, ignore all the interrupts */ 12287 if (unlikely(pci_channel_offline(phba->pcidev))) 12288 return -EIO; 12289 12290 /* Update device level interrupt statistics */ 12291 phba->sli.slistat.sli_intr++; 12292 12293 /* Ignore all interrupts during initialization. */ 12294 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 12295 return -EIO; 12296 12297 return 0; 12298 } 12299 12300 /** 12301 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 12302 * @irq: Interrupt number. 12303 * @dev_id: The device context pointer. 12304 * 12305 * This function is directly called from the PCI layer as an interrupt 12306 * service routine when device with SLI-3 interface spec is enabled with 12307 * MSI-X multi-message interrupt mode and there are slow-path events in 12308 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 12309 * interrupt mode, this function is called as part of the device-level 12310 * interrupt handler. When the PCI slot is in error recovery or the HBA 12311 * is undergoing initialization, the interrupt handler will not process 12312 * the interrupt. The link attention and ELS ring attention events are 12313 * handled by the worker thread. The interrupt handler signals the worker 12314 * thread and returns for these events. This function is called without 12315 * any lock held. It gets the hbalock to access and update SLI data 12316 * structures. 12317 * 12318 * This function returns IRQ_HANDLED when interrupt is handled else it 12319 * returns IRQ_NONE. 12320 **/ 12321 irqreturn_t 12322 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 12323 { 12324 struct lpfc_hba *phba; 12325 uint32_t ha_copy, hc_copy; 12326 uint32_t work_ha_copy; 12327 unsigned long status; 12328 unsigned long iflag; 12329 uint32_t control; 12330 12331 MAILBOX_t *mbox, *pmbox; 12332 struct lpfc_vport *vport; 12333 struct lpfc_nodelist *ndlp; 12334 struct lpfc_dmabuf *mp; 12335 LPFC_MBOXQ_t *pmb; 12336 int rc; 12337 12338 /* 12339 * Get the driver's phba structure from the dev_id and 12340 * assume the HBA is not interrupting. 12341 */ 12342 phba = (struct lpfc_hba *)dev_id; 12343 12344 if (unlikely(!phba)) 12345 return IRQ_NONE; 12346 12347 /* 12348 * Stuff needs to be attented to when this function is invoked as an 12349 * individual interrupt handler in MSI-X multi-message interrupt mode 12350 */ 12351 if (phba->intr_type == MSIX) { 12352 /* Check device state for handling interrupt */ 12353 if (lpfc_intr_state_check(phba)) 12354 return IRQ_NONE; 12355 /* Need to read HA REG for slow-path events */ 12356 spin_lock_irqsave(&phba->hbalock, iflag); 12357 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12358 goto unplug_error; 12359 /* If somebody is waiting to handle an eratt don't process it 12360 * here. The brdkill function will do this. 12361 */ 12362 if (phba->link_flag & LS_IGNORE_ERATT) 12363 ha_copy &= ~HA_ERATT; 12364 /* Check the need for handling ERATT in interrupt handler */ 12365 if (ha_copy & HA_ERATT) { 12366 if (phba->hba_flag & HBA_ERATT_HANDLED) 12367 /* ERATT polling has handled ERATT */ 12368 ha_copy &= ~HA_ERATT; 12369 else 12370 /* Indicate interrupt handler handles ERATT */ 12371 phba->hba_flag |= HBA_ERATT_HANDLED; 12372 } 12373 12374 /* 12375 * If there is deferred error attention, do not check for any 12376 * interrupt. 12377 */ 12378 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12379 spin_unlock_irqrestore(&phba->hbalock, iflag); 12380 return IRQ_NONE; 12381 } 12382 12383 /* Clear up only attention source related to slow-path */ 12384 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 12385 goto unplug_error; 12386 12387 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 12388 HC_LAINT_ENA | HC_ERINT_ENA), 12389 phba->HCregaddr); 12390 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 12391 phba->HAregaddr); 12392 writel(hc_copy, phba->HCregaddr); 12393 readl(phba->HAregaddr); /* flush */ 12394 spin_unlock_irqrestore(&phba->hbalock, iflag); 12395 } else 12396 ha_copy = phba->ha_copy; 12397 12398 work_ha_copy = ha_copy & phba->work_ha_mask; 12399 12400 if (work_ha_copy) { 12401 if (work_ha_copy & HA_LATT) { 12402 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 12403 /* 12404 * Turn off Link Attention interrupts 12405 * until CLEAR_LA done 12406 */ 12407 spin_lock_irqsave(&phba->hbalock, iflag); 12408 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 12409 if (lpfc_readl(phba->HCregaddr, &control)) 12410 goto unplug_error; 12411 control &= ~HC_LAINT_ENA; 12412 writel(control, phba->HCregaddr); 12413 readl(phba->HCregaddr); /* flush */ 12414 spin_unlock_irqrestore(&phba->hbalock, iflag); 12415 } 12416 else 12417 work_ha_copy &= ~HA_LATT; 12418 } 12419 12420 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 12421 /* 12422 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 12423 * the only slow ring. 12424 */ 12425 status = (work_ha_copy & 12426 (HA_RXMASK << (4*LPFC_ELS_RING))); 12427 status >>= (4*LPFC_ELS_RING); 12428 if (status & HA_RXMASK) { 12429 spin_lock_irqsave(&phba->hbalock, iflag); 12430 if (lpfc_readl(phba->HCregaddr, &control)) 12431 goto unplug_error; 12432 12433 lpfc_debugfs_slow_ring_trc(phba, 12434 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 12435 control, status, 12436 (uint32_t)phba->sli.slistat.sli_intr); 12437 12438 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 12439 lpfc_debugfs_slow_ring_trc(phba, 12440 "ISR Disable ring:" 12441 "pwork:x%x hawork:x%x wait:x%x", 12442 phba->work_ha, work_ha_copy, 12443 (uint32_t)((unsigned long) 12444 &phba->work_waitq)); 12445 12446 control &= 12447 ~(HC_R0INT_ENA << LPFC_ELS_RING); 12448 writel(control, phba->HCregaddr); 12449 readl(phba->HCregaddr); /* flush */ 12450 } 12451 else { 12452 lpfc_debugfs_slow_ring_trc(phba, 12453 "ISR slow ring: pwork:" 12454 "x%x hawork:x%x wait:x%x", 12455 phba->work_ha, work_ha_copy, 12456 (uint32_t)((unsigned long) 12457 &phba->work_waitq)); 12458 } 12459 spin_unlock_irqrestore(&phba->hbalock, iflag); 12460 } 12461 } 12462 spin_lock_irqsave(&phba->hbalock, iflag); 12463 if (work_ha_copy & HA_ERATT) { 12464 if (lpfc_sli_read_hs(phba)) 12465 goto unplug_error; 12466 /* 12467 * Check if there is a deferred error condition 12468 * is active 12469 */ 12470 if ((HS_FFER1 & phba->work_hs) && 12471 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12472 HS_FFER6 | HS_FFER7 | HS_FFER8) & 12473 phba->work_hs)) { 12474 phba->hba_flag |= DEFER_ERATT; 12475 /* Clear all interrupt enable conditions */ 12476 writel(0, phba->HCregaddr); 12477 readl(phba->HCregaddr); 12478 } 12479 } 12480 12481 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 12482 pmb = phba->sli.mbox_active; 12483 pmbox = &pmb->u.mb; 12484 mbox = phba->mbox; 12485 vport = pmb->vport; 12486 12487 /* First check out the status word */ 12488 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 12489 if (pmbox->mbxOwner != OWN_HOST) { 12490 spin_unlock_irqrestore(&phba->hbalock, iflag); 12491 /* 12492 * Stray Mailbox Interrupt, mbxCommand <cmd> 12493 * mbxStatus <status> 12494 */ 12495 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12496 LOG_SLI, 12497 "(%d):0304 Stray Mailbox " 12498 "Interrupt mbxCommand x%x " 12499 "mbxStatus x%x\n", 12500 (vport ? vport->vpi : 0), 12501 pmbox->mbxCommand, 12502 pmbox->mbxStatus); 12503 /* clear mailbox attention bit */ 12504 work_ha_copy &= ~HA_MBATT; 12505 } else { 12506 phba->sli.mbox_active = NULL; 12507 spin_unlock_irqrestore(&phba->hbalock, iflag); 12508 phba->last_completion_time = jiffies; 12509 del_timer(&phba->sli.mbox_tmo); 12510 if (pmb->mbox_cmpl) { 12511 lpfc_sli_pcimem_bcopy(mbox, pmbox, 12512 MAILBOX_CMD_SIZE); 12513 if (pmb->out_ext_byte_len && 12514 pmb->ctx_buf) 12515 lpfc_sli_pcimem_bcopy( 12516 phba->mbox_ext, 12517 pmb->ctx_buf, 12518 pmb->out_ext_byte_len); 12519 } 12520 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 12521 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 12522 12523 lpfc_debugfs_disc_trc(vport, 12524 LPFC_DISC_TRC_MBOX_VPORT, 12525 "MBOX dflt rpi: : " 12526 "status:x%x rpi:x%x", 12527 (uint32_t)pmbox->mbxStatus, 12528 pmbox->un.varWords[0], 0); 12529 12530 if (!pmbox->mbxStatus) { 12531 mp = (struct lpfc_dmabuf *) 12532 (pmb->ctx_buf); 12533 ndlp = (struct lpfc_nodelist *) 12534 pmb->ctx_ndlp; 12535 12536 /* Reg_LOGIN of dflt RPI was 12537 * successful. new lets get 12538 * rid of the RPI using the 12539 * same mbox buffer. 12540 */ 12541 lpfc_unreg_login(phba, 12542 vport->vpi, 12543 pmbox->un.varWords[0], 12544 pmb); 12545 pmb->mbox_cmpl = 12546 lpfc_mbx_cmpl_dflt_rpi; 12547 pmb->ctx_buf = mp; 12548 pmb->ctx_ndlp = ndlp; 12549 pmb->vport = vport; 12550 rc = lpfc_sli_issue_mbox(phba, 12551 pmb, 12552 MBX_NOWAIT); 12553 if (rc != MBX_BUSY) 12554 lpfc_printf_log(phba, 12555 KERN_ERR, 12556 LOG_MBOX | LOG_SLI, 12557 "0350 rc should have" 12558 "been MBX_BUSY\n"); 12559 if (rc != MBX_NOT_FINISHED) 12560 goto send_current_mbox; 12561 } 12562 } 12563 spin_lock_irqsave( 12564 &phba->pport->work_port_lock, 12565 iflag); 12566 phba->pport->work_port_events &= 12567 ~WORKER_MBOX_TMO; 12568 spin_unlock_irqrestore( 12569 &phba->pport->work_port_lock, 12570 iflag); 12571 lpfc_mbox_cmpl_put(phba, pmb); 12572 } 12573 } else 12574 spin_unlock_irqrestore(&phba->hbalock, iflag); 12575 12576 if ((work_ha_copy & HA_MBATT) && 12577 (phba->sli.mbox_active == NULL)) { 12578 send_current_mbox: 12579 /* Process next mailbox command if there is one */ 12580 do { 12581 rc = lpfc_sli_issue_mbox(phba, NULL, 12582 MBX_NOWAIT); 12583 } while (rc == MBX_NOT_FINISHED); 12584 if (rc != MBX_SUCCESS) 12585 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12586 LOG_SLI, "0349 rc should be " 12587 "MBX_SUCCESS\n"); 12588 } 12589 12590 spin_lock_irqsave(&phba->hbalock, iflag); 12591 phba->work_ha |= work_ha_copy; 12592 spin_unlock_irqrestore(&phba->hbalock, iflag); 12593 lpfc_worker_wake_up(phba); 12594 } 12595 return IRQ_HANDLED; 12596 unplug_error: 12597 spin_unlock_irqrestore(&phba->hbalock, iflag); 12598 return IRQ_HANDLED; 12599 12600 } /* lpfc_sli_sp_intr_handler */ 12601 12602 /** 12603 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 12604 * @irq: Interrupt number. 12605 * @dev_id: The device context pointer. 12606 * 12607 * This function is directly called from the PCI layer as an interrupt 12608 * service routine when device with SLI-3 interface spec is enabled with 12609 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 12610 * ring event in the HBA. However, when the device is enabled with either 12611 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12612 * device-level interrupt handler. When the PCI slot is in error recovery 12613 * or the HBA is undergoing initialization, the interrupt handler will not 12614 * process the interrupt. The SCSI FCP fast-path ring event are handled in 12615 * the intrrupt context. This function is called without any lock held. 12616 * It gets the hbalock to access and update SLI data structures. 12617 * 12618 * This function returns IRQ_HANDLED when interrupt is handled else it 12619 * returns IRQ_NONE. 12620 **/ 12621 irqreturn_t 12622 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 12623 { 12624 struct lpfc_hba *phba; 12625 uint32_t ha_copy; 12626 unsigned long status; 12627 unsigned long iflag; 12628 struct lpfc_sli_ring *pring; 12629 12630 /* Get the driver's phba structure from the dev_id and 12631 * assume the HBA is not interrupting. 12632 */ 12633 phba = (struct lpfc_hba *) dev_id; 12634 12635 if (unlikely(!phba)) 12636 return IRQ_NONE; 12637 12638 /* 12639 * Stuff needs to be attented to when this function is invoked as an 12640 * individual interrupt handler in MSI-X multi-message interrupt mode 12641 */ 12642 if (phba->intr_type == MSIX) { 12643 /* Check device state for handling interrupt */ 12644 if (lpfc_intr_state_check(phba)) 12645 return IRQ_NONE; 12646 /* Need to read HA REG for FCP ring and other ring events */ 12647 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12648 return IRQ_HANDLED; 12649 /* Clear up only attention source related to fast-path */ 12650 spin_lock_irqsave(&phba->hbalock, iflag); 12651 /* 12652 * If there is deferred error attention, do not check for 12653 * any interrupt. 12654 */ 12655 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12656 spin_unlock_irqrestore(&phba->hbalock, iflag); 12657 return IRQ_NONE; 12658 } 12659 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 12660 phba->HAregaddr); 12661 readl(phba->HAregaddr); /* flush */ 12662 spin_unlock_irqrestore(&phba->hbalock, iflag); 12663 } else 12664 ha_copy = phba->ha_copy; 12665 12666 /* 12667 * Process all events on FCP ring. Take the optimized path for FCP IO. 12668 */ 12669 ha_copy &= ~(phba->work_ha_mask); 12670 12671 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12672 status >>= (4*LPFC_FCP_RING); 12673 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 12674 if (status & HA_RXMASK) 12675 lpfc_sli_handle_fast_ring_event(phba, pring, status); 12676 12677 if (phba->cfg_multi_ring_support == 2) { 12678 /* 12679 * Process all events on extra ring. Take the optimized path 12680 * for extra ring IO. 12681 */ 12682 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12683 status >>= (4*LPFC_EXTRA_RING); 12684 if (status & HA_RXMASK) { 12685 lpfc_sli_handle_fast_ring_event(phba, 12686 &phba->sli.sli3_ring[LPFC_EXTRA_RING], 12687 status); 12688 } 12689 } 12690 return IRQ_HANDLED; 12691 } /* lpfc_sli_fp_intr_handler */ 12692 12693 /** 12694 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 12695 * @irq: Interrupt number. 12696 * @dev_id: The device context pointer. 12697 * 12698 * This function is the HBA device-level interrupt handler to device with 12699 * SLI-3 interface spec, called from the PCI layer when either MSI or 12700 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 12701 * requires driver attention. This function invokes the slow-path interrupt 12702 * attention handling function and fast-path interrupt attention handling 12703 * function in turn to process the relevant HBA attention events. This 12704 * function is called without any lock held. It gets the hbalock to access 12705 * and update SLI data structures. 12706 * 12707 * This function returns IRQ_HANDLED when interrupt is handled, else it 12708 * returns IRQ_NONE. 12709 **/ 12710 irqreturn_t 12711 lpfc_sli_intr_handler(int irq, void *dev_id) 12712 { 12713 struct lpfc_hba *phba; 12714 irqreturn_t sp_irq_rc, fp_irq_rc; 12715 unsigned long status1, status2; 12716 uint32_t hc_copy; 12717 12718 /* 12719 * Get the driver's phba structure from the dev_id and 12720 * assume the HBA is not interrupting. 12721 */ 12722 phba = (struct lpfc_hba *) dev_id; 12723 12724 if (unlikely(!phba)) 12725 return IRQ_NONE; 12726 12727 /* Check device state for handling interrupt */ 12728 if (lpfc_intr_state_check(phba)) 12729 return IRQ_NONE; 12730 12731 spin_lock(&phba->hbalock); 12732 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 12733 spin_unlock(&phba->hbalock); 12734 return IRQ_HANDLED; 12735 } 12736 12737 if (unlikely(!phba->ha_copy)) { 12738 spin_unlock(&phba->hbalock); 12739 return IRQ_NONE; 12740 } else if (phba->ha_copy & HA_ERATT) { 12741 if (phba->hba_flag & HBA_ERATT_HANDLED) 12742 /* ERATT polling has handled ERATT */ 12743 phba->ha_copy &= ~HA_ERATT; 12744 else 12745 /* Indicate interrupt handler handles ERATT */ 12746 phba->hba_flag |= HBA_ERATT_HANDLED; 12747 } 12748 12749 /* 12750 * If there is deferred error attention, do not check for any interrupt. 12751 */ 12752 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12753 spin_unlock(&phba->hbalock); 12754 return IRQ_NONE; 12755 } 12756 12757 /* Clear attention sources except link and error attentions */ 12758 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 12759 spin_unlock(&phba->hbalock); 12760 return IRQ_HANDLED; 12761 } 12762 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 12763 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 12764 phba->HCregaddr); 12765 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 12766 writel(hc_copy, phba->HCregaddr); 12767 readl(phba->HAregaddr); /* flush */ 12768 spin_unlock(&phba->hbalock); 12769 12770 /* 12771 * Invokes slow-path host attention interrupt handling as appropriate. 12772 */ 12773 12774 /* status of events with mailbox and link attention */ 12775 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 12776 12777 /* status of events with ELS ring */ 12778 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 12779 status2 >>= (4*LPFC_ELS_RING); 12780 12781 if (status1 || (status2 & HA_RXMASK)) 12782 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 12783 else 12784 sp_irq_rc = IRQ_NONE; 12785 12786 /* 12787 * Invoke fast-path host attention interrupt handling as appropriate. 12788 */ 12789 12790 /* status of events with FCP ring */ 12791 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12792 status1 >>= (4*LPFC_FCP_RING); 12793 12794 /* status of events with extra ring */ 12795 if (phba->cfg_multi_ring_support == 2) { 12796 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12797 status2 >>= (4*LPFC_EXTRA_RING); 12798 } else 12799 status2 = 0; 12800 12801 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 12802 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 12803 else 12804 fp_irq_rc = IRQ_NONE; 12805 12806 /* Return device-level interrupt handling status */ 12807 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 12808 } /* lpfc_sli_intr_handler */ 12809 12810 /** 12811 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 12812 * @phba: pointer to lpfc hba data structure. 12813 * 12814 * This routine is invoked by the worker thread to process all the pending 12815 * SLI4 els abort xri events. 12816 **/ 12817 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 12818 { 12819 struct lpfc_cq_event *cq_event; 12820 12821 /* First, declare the els xri abort event has been handled */ 12822 spin_lock_irq(&phba->hbalock); 12823 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 12824 spin_unlock_irq(&phba->hbalock); 12825 /* Now, handle all the els xri abort events */ 12826 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 12827 /* Get the first event from the head of the event queue */ 12828 spin_lock_irq(&phba->hbalock); 12829 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 12830 cq_event, struct lpfc_cq_event, list); 12831 spin_unlock_irq(&phba->hbalock); 12832 /* Notify aborted XRI for ELS work queue */ 12833 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12834 /* Free the event processed back to the free pool */ 12835 lpfc_sli4_cq_event_release(phba, cq_event); 12836 } 12837 } 12838 12839 /** 12840 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 12841 * @phba: pointer to lpfc hba data structure 12842 * @pIocbIn: pointer to the rspiocbq 12843 * @pIocbOut: pointer to the cmdiocbq 12844 * @wcqe: pointer to the complete wcqe 12845 * 12846 * This routine transfers the fields of a command iocbq to a response iocbq 12847 * by copying all the IOCB fields from command iocbq and transferring the 12848 * completion status information from the complete wcqe. 12849 **/ 12850 static void 12851 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 12852 struct lpfc_iocbq *pIocbIn, 12853 struct lpfc_iocbq *pIocbOut, 12854 struct lpfc_wcqe_complete *wcqe) 12855 { 12856 int numBdes, i; 12857 unsigned long iflags; 12858 uint32_t status, max_response; 12859 struct lpfc_dmabuf *dmabuf; 12860 struct ulp_bde64 *bpl, bde; 12861 size_t offset = offsetof(struct lpfc_iocbq, iocb); 12862 12863 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 12864 sizeof(struct lpfc_iocbq) - offset); 12865 /* Map WCQE parameters into irspiocb parameters */ 12866 status = bf_get(lpfc_wcqe_c_status, wcqe); 12867 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 12868 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 12869 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 12870 pIocbIn->iocb.un.fcpi.fcpi_parm = 12871 pIocbOut->iocb.un.fcpi.fcpi_parm - 12872 wcqe->total_data_placed; 12873 else 12874 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12875 else { 12876 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12877 switch (pIocbOut->iocb.ulpCommand) { 12878 case CMD_ELS_REQUEST64_CR: 12879 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12880 bpl = (struct ulp_bde64 *)dmabuf->virt; 12881 bde.tus.w = le32_to_cpu(bpl[1].tus.w); 12882 max_response = bde.tus.f.bdeSize; 12883 break; 12884 case CMD_GEN_REQUEST64_CR: 12885 max_response = 0; 12886 if (!pIocbOut->context3) 12887 break; 12888 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 12889 sizeof(struct ulp_bde64); 12890 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12891 bpl = (struct ulp_bde64 *)dmabuf->virt; 12892 for (i = 0; i < numBdes; i++) { 12893 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 12894 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 12895 max_response += bde.tus.f.bdeSize; 12896 } 12897 break; 12898 default: 12899 max_response = wcqe->total_data_placed; 12900 break; 12901 } 12902 if (max_response < wcqe->total_data_placed) 12903 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 12904 else 12905 pIocbIn->iocb.un.genreq64.bdl.bdeSize = 12906 wcqe->total_data_placed; 12907 } 12908 12909 /* Convert BG errors for completion status */ 12910 if (status == CQE_STATUS_DI_ERROR) { 12911 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 12912 12913 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 12914 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 12915 else 12916 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 12917 12918 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 12919 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 12920 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12921 BGS_GUARD_ERR_MASK; 12922 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 12923 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12924 BGS_APPTAG_ERR_MASK; 12925 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 12926 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12927 BGS_REFTAG_ERR_MASK; 12928 12929 /* Check to see if there was any good data before the error */ 12930 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 12931 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12932 BGS_HI_WATER_MARK_PRESENT_MASK; 12933 pIocbIn->iocb.unsli3.sli3_bg.bghm = 12934 wcqe->total_data_placed; 12935 } 12936 12937 /* 12938 * Set ALL the error bits to indicate we don't know what 12939 * type of error it is. 12940 */ 12941 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 12942 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12943 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 12944 BGS_GUARD_ERR_MASK); 12945 } 12946 12947 /* Pick up HBA exchange busy condition */ 12948 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 12949 spin_lock_irqsave(&phba->hbalock, iflags); 12950 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 12951 spin_unlock_irqrestore(&phba->hbalock, iflags); 12952 } 12953 } 12954 12955 /** 12956 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 12957 * @phba: Pointer to HBA context object. 12958 * @wcqe: Pointer to work-queue completion queue entry. 12959 * 12960 * This routine handles an ELS work-queue completion event and construct 12961 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 12962 * discovery engine to handle. 12963 * 12964 * Return: Pointer to the receive IOCBQ, NULL otherwise. 12965 **/ 12966 static struct lpfc_iocbq * 12967 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 12968 struct lpfc_iocbq *irspiocbq) 12969 { 12970 struct lpfc_sli_ring *pring; 12971 struct lpfc_iocbq *cmdiocbq; 12972 struct lpfc_wcqe_complete *wcqe; 12973 unsigned long iflags; 12974 12975 pring = lpfc_phba_elsring(phba); 12976 if (unlikely(!pring)) 12977 return NULL; 12978 12979 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 12980 pring->stats.iocb_event++; 12981 /* Look up the ELS command IOCB and create pseudo response IOCB */ 12982 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 12983 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 12984 if (unlikely(!cmdiocbq)) { 12985 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12986 "0386 ELS complete with no corresponding " 12987 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", 12988 wcqe->word0, wcqe->total_data_placed, 12989 wcqe->parameter, wcqe->word3); 12990 lpfc_sli_release_iocbq(phba, irspiocbq); 12991 return NULL; 12992 } 12993 12994 spin_lock_irqsave(&pring->ring_lock, iflags); 12995 /* Put the iocb back on the txcmplq */ 12996 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); 12997 spin_unlock_irqrestore(&pring->ring_lock, iflags); 12998 12999 /* Fake the irspiocbq and copy necessary response information */ 13000 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 13001 13002 return irspiocbq; 13003 } 13004 13005 inline struct lpfc_cq_event * 13006 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) 13007 { 13008 struct lpfc_cq_event *cq_event; 13009 13010 /* Allocate a new internal CQ_EVENT entry */ 13011 cq_event = lpfc_sli4_cq_event_alloc(phba); 13012 if (!cq_event) { 13013 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13014 "0602 Failed to alloc CQ_EVENT entry\n"); 13015 return NULL; 13016 } 13017 13018 /* Move the CQE into the event */ 13019 memcpy(&cq_event->cqe, entry, size); 13020 return cq_event; 13021 } 13022 13023 /** 13024 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 13025 * @phba: Pointer to HBA context object. 13026 * @cqe: Pointer to mailbox completion queue entry. 13027 * 13028 * This routine process a mailbox completion queue entry with asynchrous 13029 * event. 13030 * 13031 * Return: true if work posted to worker thread, otherwise false. 13032 **/ 13033 static bool 13034 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13035 { 13036 struct lpfc_cq_event *cq_event; 13037 unsigned long iflags; 13038 13039 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13040 "0392 Async Event: word0:x%x, word1:x%x, " 13041 "word2:x%x, word3:x%x\n", mcqe->word0, 13042 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 13043 13044 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); 13045 if (!cq_event) 13046 return false; 13047 spin_lock_irqsave(&phba->hbalock, iflags); 13048 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 13049 /* Set the async event flag */ 13050 phba->hba_flag |= ASYNC_EVENT; 13051 spin_unlock_irqrestore(&phba->hbalock, iflags); 13052 13053 return true; 13054 } 13055 13056 /** 13057 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 13058 * @phba: Pointer to HBA context object. 13059 * @cqe: Pointer to mailbox completion queue entry. 13060 * 13061 * This routine process a mailbox completion queue entry with mailbox 13062 * completion event. 13063 * 13064 * Return: true if work posted to worker thread, otherwise false. 13065 **/ 13066 static bool 13067 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13068 { 13069 uint32_t mcqe_status; 13070 MAILBOX_t *mbox, *pmbox; 13071 struct lpfc_mqe *mqe; 13072 struct lpfc_vport *vport; 13073 struct lpfc_nodelist *ndlp; 13074 struct lpfc_dmabuf *mp; 13075 unsigned long iflags; 13076 LPFC_MBOXQ_t *pmb; 13077 bool workposted = false; 13078 int rc; 13079 13080 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 13081 if (!bf_get(lpfc_trailer_completed, mcqe)) 13082 goto out_no_mqe_complete; 13083 13084 /* Get the reference to the active mbox command */ 13085 spin_lock_irqsave(&phba->hbalock, iflags); 13086 pmb = phba->sli.mbox_active; 13087 if (unlikely(!pmb)) { 13088 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13089 "1832 No pending MBOX command to handle\n"); 13090 spin_unlock_irqrestore(&phba->hbalock, iflags); 13091 goto out_no_mqe_complete; 13092 } 13093 spin_unlock_irqrestore(&phba->hbalock, iflags); 13094 mqe = &pmb->u.mqe; 13095 pmbox = (MAILBOX_t *)&pmb->u.mqe; 13096 mbox = phba->mbox; 13097 vport = pmb->vport; 13098 13099 /* Reset heartbeat timer */ 13100 phba->last_completion_time = jiffies; 13101 del_timer(&phba->sli.mbox_tmo); 13102 13103 /* Move mbox data to caller's mailbox region, do endian swapping */ 13104 if (pmb->mbox_cmpl && mbox) 13105 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 13106 13107 /* 13108 * For mcqe errors, conditionally move a modified error code to 13109 * the mbox so that the error will not be missed. 13110 */ 13111 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 13112 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 13113 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 13114 bf_set(lpfc_mqe_status, mqe, 13115 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 13116 } 13117 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 13118 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 13119 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 13120 "MBOX dflt rpi: status:x%x rpi:x%x", 13121 mcqe_status, 13122 pmbox->un.varWords[0], 0); 13123 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 13124 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); 13125 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp; 13126 /* Reg_LOGIN of dflt RPI was successful. Now lets get 13127 * RID of the PPI using the same mbox buffer. 13128 */ 13129 lpfc_unreg_login(phba, vport->vpi, 13130 pmbox->un.varWords[0], pmb); 13131 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 13132 pmb->ctx_buf = mp; 13133 pmb->ctx_ndlp = ndlp; 13134 pmb->vport = vport; 13135 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 13136 if (rc != MBX_BUSY) 13137 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 13138 LOG_SLI, "0385 rc should " 13139 "have been MBX_BUSY\n"); 13140 if (rc != MBX_NOT_FINISHED) 13141 goto send_current_mbox; 13142 } 13143 } 13144 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 13145 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 13146 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 13147 13148 /* There is mailbox completion work to do */ 13149 spin_lock_irqsave(&phba->hbalock, iflags); 13150 __lpfc_mbox_cmpl_put(phba, pmb); 13151 phba->work_ha |= HA_MBATT; 13152 spin_unlock_irqrestore(&phba->hbalock, iflags); 13153 workposted = true; 13154 13155 send_current_mbox: 13156 spin_lock_irqsave(&phba->hbalock, iflags); 13157 /* Release the mailbox command posting token */ 13158 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13159 /* Setting active mailbox pointer need to be in sync to flag clear */ 13160 phba->sli.mbox_active = NULL; 13161 spin_unlock_irqrestore(&phba->hbalock, iflags); 13162 /* Wake up worker thread to post the next pending mailbox command */ 13163 lpfc_worker_wake_up(phba); 13164 out_no_mqe_complete: 13165 if (bf_get(lpfc_trailer_consumed, mcqe)) 13166 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 13167 return workposted; 13168 } 13169 13170 /** 13171 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 13172 * @phba: Pointer to HBA context object. 13173 * @cqe: Pointer to mailbox completion queue entry. 13174 * 13175 * This routine process a mailbox completion queue entry, it invokes the 13176 * proper mailbox complete handling or asynchrous event handling routine 13177 * according to the MCQE's async bit. 13178 * 13179 * Return: true if work posted to worker thread, otherwise false. 13180 **/ 13181 static bool 13182 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13183 struct lpfc_cqe *cqe) 13184 { 13185 struct lpfc_mcqe mcqe; 13186 bool workposted; 13187 13188 cq->CQ_mbox++; 13189 13190 /* Copy the mailbox MCQE and convert endian order as needed */ 13191 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 13192 13193 /* Invoke the proper event handling routine */ 13194 if (!bf_get(lpfc_trailer_async, &mcqe)) 13195 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 13196 else 13197 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 13198 return workposted; 13199 } 13200 13201 /** 13202 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 13203 * @phba: Pointer to HBA context object. 13204 * @cq: Pointer to associated CQ 13205 * @wcqe: Pointer to work-queue completion queue entry. 13206 * 13207 * This routine handles an ELS work-queue completion event. 13208 * 13209 * Return: true if work posted to worker thread, otherwise false. 13210 **/ 13211 static bool 13212 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13213 struct lpfc_wcqe_complete *wcqe) 13214 { 13215 struct lpfc_iocbq *irspiocbq; 13216 unsigned long iflags; 13217 struct lpfc_sli_ring *pring = cq->pring; 13218 int txq_cnt = 0; 13219 int txcmplq_cnt = 0; 13220 int fcp_txcmplq_cnt = 0; 13221 13222 /* Check for response status */ 13223 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13224 /* Log the error status */ 13225 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13226 "0357 ELS CQE error: status=x%x: " 13227 "CQE: %08x %08x %08x %08x\n", 13228 bf_get(lpfc_wcqe_c_status, wcqe), 13229 wcqe->word0, wcqe->total_data_placed, 13230 wcqe->parameter, wcqe->word3); 13231 } 13232 13233 /* Get an irspiocbq for later ELS response processing use */ 13234 irspiocbq = lpfc_sli_get_iocbq(phba); 13235 if (!irspiocbq) { 13236 if (!list_empty(&pring->txq)) 13237 txq_cnt++; 13238 if (!list_empty(&pring->txcmplq)) 13239 txcmplq_cnt++; 13240 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13241 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 13242 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 13243 txq_cnt, phba->iocb_cnt, 13244 fcp_txcmplq_cnt, 13245 txcmplq_cnt); 13246 return false; 13247 } 13248 13249 /* Save off the slow-path queue event for work thread to process */ 13250 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 13251 spin_lock_irqsave(&phba->hbalock, iflags); 13252 list_add_tail(&irspiocbq->cq_event.list, 13253 &phba->sli4_hba.sp_queue_event); 13254 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13255 spin_unlock_irqrestore(&phba->hbalock, iflags); 13256 13257 return true; 13258 } 13259 13260 /** 13261 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 13262 * @phba: Pointer to HBA context object. 13263 * @wcqe: Pointer to work-queue completion queue entry. 13264 * 13265 * This routine handles slow-path WQ entry consumed event by invoking the 13266 * proper WQ release routine to the slow-path WQ. 13267 **/ 13268 static void 13269 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 13270 struct lpfc_wcqe_release *wcqe) 13271 { 13272 /* sanity check on queue memory */ 13273 if (unlikely(!phba->sli4_hba.els_wq)) 13274 return; 13275 /* Check for the slow-path ELS work queue */ 13276 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 13277 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 13278 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13279 else 13280 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13281 "2579 Slow-path wqe consume event carries " 13282 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 13283 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 13284 phba->sli4_hba.els_wq->queue_id); 13285 } 13286 13287 /** 13288 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 13289 * @phba: Pointer to HBA context object. 13290 * @cq: Pointer to a WQ completion queue. 13291 * @wcqe: Pointer to work-queue completion queue entry. 13292 * 13293 * This routine handles an XRI abort event. 13294 * 13295 * Return: true if work posted to worker thread, otherwise false. 13296 **/ 13297 static bool 13298 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 13299 struct lpfc_queue *cq, 13300 struct sli4_wcqe_xri_aborted *wcqe) 13301 { 13302 bool workposted = false; 13303 struct lpfc_cq_event *cq_event; 13304 unsigned long iflags; 13305 13306 switch (cq->subtype) { 13307 case LPFC_IO: 13308 lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq); 13309 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { 13310 /* Notify aborted XRI for NVME work queue */ 13311 if (phba->nvmet_support) 13312 lpfc_sli4_nvmet_xri_aborted(phba, wcqe); 13313 } 13314 workposted = false; 13315 break; 13316 case LPFC_NVME_LS: /* NVME LS uses ELS resources */ 13317 case LPFC_ELS: 13318 cq_event = lpfc_cq_event_setup( 13319 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 13320 if (!cq_event) 13321 return false; 13322 cq_event->hdwq = cq->hdwq; 13323 spin_lock_irqsave(&phba->hbalock, iflags); 13324 list_add_tail(&cq_event->list, 13325 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 13326 /* Set the els xri abort event flag */ 13327 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 13328 spin_unlock_irqrestore(&phba->hbalock, iflags); 13329 workposted = true; 13330 break; 13331 default: 13332 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13333 "0603 Invalid CQ subtype %d: " 13334 "%08x %08x %08x %08x\n", 13335 cq->subtype, wcqe->word0, wcqe->parameter, 13336 wcqe->word2, wcqe->word3); 13337 workposted = false; 13338 break; 13339 } 13340 return workposted; 13341 } 13342 13343 #define FC_RCTL_MDS_DIAGS 0xF4 13344 13345 /** 13346 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 13347 * @phba: Pointer to HBA context object. 13348 * @rcqe: Pointer to receive-queue completion queue entry. 13349 * 13350 * This routine process a receive-queue completion queue entry. 13351 * 13352 * Return: true if work posted to worker thread, otherwise false. 13353 **/ 13354 static bool 13355 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 13356 { 13357 bool workposted = false; 13358 struct fc_frame_header *fc_hdr; 13359 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 13360 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 13361 struct lpfc_nvmet_tgtport *tgtp; 13362 struct hbq_dmabuf *dma_buf; 13363 uint32_t status, rq_id; 13364 unsigned long iflags; 13365 13366 /* sanity check on queue memory */ 13367 if (unlikely(!hrq) || unlikely(!drq)) 13368 return workposted; 13369 13370 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13371 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13372 else 13373 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13374 if (rq_id != hrq->queue_id) 13375 goto out; 13376 13377 status = bf_get(lpfc_rcqe_status, rcqe); 13378 switch (status) { 13379 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13380 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13381 "2537 Receive Frame Truncated!!\n"); 13382 /* fall through */ 13383 case FC_STATUS_RQ_SUCCESS: 13384 spin_lock_irqsave(&phba->hbalock, iflags); 13385 lpfc_sli4_rq_release(hrq, drq); 13386 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 13387 if (!dma_buf) { 13388 hrq->RQ_no_buf_found++; 13389 spin_unlock_irqrestore(&phba->hbalock, iflags); 13390 goto out; 13391 } 13392 hrq->RQ_rcv_buf++; 13393 hrq->RQ_buf_posted--; 13394 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 13395 13396 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13397 13398 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 13399 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 13400 spin_unlock_irqrestore(&phba->hbalock, iflags); 13401 /* Handle MDS Loopback frames */ 13402 lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf); 13403 break; 13404 } 13405 13406 /* save off the frame for the work thread to process */ 13407 list_add_tail(&dma_buf->cq_event.list, 13408 &phba->sli4_hba.sp_queue_event); 13409 /* Frame received */ 13410 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13411 spin_unlock_irqrestore(&phba->hbalock, iflags); 13412 workposted = true; 13413 break; 13414 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13415 if (phba->nvmet_support) { 13416 tgtp = phba->targetport->private; 13417 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13418 "6402 RQE Error x%x, posted %d err_cnt " 13419 "%d: %x %x %x\n", 13420 status, hrq->RQ_buf_posted, 13421 hrq->RQ_no_posted_buf, 13422 atomic_read(&tgtp->rcv_fcp_cmd_in), 13423 atomic_read(&tgtp->rcv_fcp_cmd_out), 13424 atomic_read(&tgtp->xmt_fcp_release)); 13425 } 13426 /* fallthrough */ 13427 13428 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13429 hrq->RQ_no_posted_buf++; 13430 /* Post more buffers if possible */ 13431 spin_lock_irqsave(&phba->hbalock, iflags); 13432 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 13433 spin_unlock_irqrestore(&phba->hbalock, iflags); 13434 workposted = true; 13435 break; 13436 } 13437 out: 13438 return workposted; 13439 } 13440 13441 /** 13442 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 13443 * @phba: Pointer to HBA context object. 13444 * @cq: Pointer to the completion queue. 13445 * @cqe: Pointer to a completion queue entry. 13446 * 13447 * This routine process a slow-path work-queue or receive queue completion queue 13448 * entry. 13449 * 13450 * Return: true if work posted to worker thread, otherwise false. 13451 **/ 13452 static bool 13453 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13454 struct lpfc_cqe *cqe) 13455 { 13456 struct lpfc_cqe cqevt; 13457 bool workposted = false; 13458 13459 /* Copy the work queue CQE and convert endian order if needed */ 13460 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 13461 13462 /* Check and process for different type of WCQE and dispatch */ 13463 switch (bf_get(lpfc_cqe_code, &cqevt)) { 13464 case CQE_CODE_COMPL_WQE: 13465 /* Process the WQ/RQ complete event */ 13466 phba->last_completion_time = jiffies; 13467 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 13468 (struct lpfc_wcqe_complete *)&cqevt); 13469 break; 13470 case CQE_CODE_RELEASE_WQE: 13471 /* Process the WQ release event */ 13472 lpfc_sli4_sp_handle_rel_wcqe(phba, 13473 (struct lpfc_wcqe_release *)&cqevt); 13474 break; 13475 case CQE_CODE_XRI_ABORTED: 13476 /* Process the WQ XRI abort event */ 13477 phba->last_completion_time = jiffies; 13478 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13479 (struct sli4_wcqe_xri_aborted *)&cqevt); 13480 break; 13481 case CQE_CODE_RECEIVE: 13482 case CQE_CODE_RECEIVE_V1: 13483 /* Process the RQ event */ 13484 phba->last_completion_time = jiffies; 13485 workposted = lpfc_sli4_sp_handle_rcqe(phba, 13486 (struct lpfc_rcqe *)&cqevt); 13487 break; 13488 default: 13489 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13490 "0388 Not a valid WCQE code: x%x\n", 13491 bf_get(lpfc_cqe_code, &cqevt)); 13492 break; 13493 } 13494 return workposted; 13495 } 13496 13497 /** 13498 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 13499 * @phba: Pointer to HBA context object. 13500 * @eqe: Pointer to fast-path event queue entry. 13501 * 13502 * This routine process a event queue entry from the slow-path event queue. 13503 * It will check the MajorCode and MinorCode to determine this is for a 13504 * completion event on a completion queue, if not, an error shall be logged 13505 * and just return. Otherwise, it will get to the corresponding completion 13506 * queue and process all the entries on that completion queue, rearm the 13507 * completion queue, and then return. 13508 * 13509 **/ 13510 static void 13511 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13512 struct lpfc_queue *speq) 13513 { 13514 struct lpfc_queue *cq = NULL, *childq; 13515 uint16_t cqid; 13516 13517 /* Get the reference to the corresponding CQ */ 13518 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13519 13520 list_for_each_entry(childq, &speq->child_list, list) { 13521 if (childq->queue_id == cqid) { 13522 cq = childq; 13523 break; 13524 } 13525 } 13526 if (unlikely(!cq)) { 13527 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13528 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13529 "0365 Slow-path CQ identifier " 13530 "(%d) does not exist\n", cqid); 13531 return; 13532 } 13533 13534 /* Save EQ associated with this CQ */ 13535 cq->assoc_qp = speq; 13536 13537 if (!queue_work_on(cq->chann, phba->wq, &cq->spwork)) 13538 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13539 "0390 Cannot schedule soft IRQ " 13540 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13541 cqid, cq->queue_id, raw_smp_processor_id()); 13542 } 13543 13544 /** 13545 * __lpfc_sli4_process_cq - Process elements of a CQ 13546 * @phba: Pointer to HBA context object. 13547 * @cq: Pointer to CQ to be processed 13548 * @handler: Routine to process each cqe 13549 * @delay: Pointer to usdelay to set in case of rescheduling of the handler 13550 * 13551 * This routine processes completion queue entries in a CQ. While a valid 13552 * queue element is found, the handler is called. During processing checks 13553 * are made for periodic doorbell writes to let the hardware know of 13554 * element consumption. 13555 * 13556 * If the max limit on cqes to process is hit, or there are no more valid 13557 * entries, the loop stops. If we processed a sufficient number of elements, 13558 * meaning there is sufficient load, rather than rearming and generating 13559 * another interrupt, a cq rescheduling delay will be set. A delay of 0 13560 * indicates no rescheduling. 13561 * 13562 * Returns True if work scheduled, False otherwise. 13563 **/ 13564 static bool 13565 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, 13566 bool (*handler)(struct lpfc_hba *, struct lpfc_queue *, 13567 struct lpfc_cqe *), unsigned long *delay) 13568 { 13569 struct lpfc_cqe *cqe; 13570 bool workposted = false; 13571 int count = 0, consumed = 0; 13572 bool arm = true; 13573 13574 /* default - no reschedule */ 13575 *delay = 0; 13576 13577 if (cmpxchg(&cq->queue_claimed, 0, 1) != 0) 13578 goto rearm_and_exit; 13579 13580 /* Process all the entries to the CQ */ 13581 cq->q_flag = 0; 13582 cqe = lpfc_sli4_cq_get(cq); 13583 while (cqe) { 13584 workposted |= handler(phba, cq, cqe); 13585 __lpfc_sli4_consume_cqe(phba, cq, cqe); 13586 13587 consumed++; 13588 if (!(++count % cq->max_proc_limit)) 13589 break; 13590 13591 if (!(count % cq->notify_interval)) { 13592 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 13593 LPFC_QUEUE_NOARM); 13594 consumed = 0; 13595 } 13596 13597 if (count == LPFC_NVMET_CQ_NOTIFY) 13598 cq->q_flag |= HBA_NVMET_CQ_NOTIFY; 13599 13600 cqe = lpfc_sli4_cq_get(cq); 13601 } 13602 if (count >= phba->cfg_cq_poll_threshold) { 13603 *delay = 1; 13604 arm = false; 13605 } 13606 13607 /* Track the max number of CQEs processed in 1 EQ */ 13608 if (count > cq->CQ_max_cqe) 13609 cq->CQ_max_cqe = count; 13610 13611 cq->assoc_qp->EQ_cqe_cnt += count; 13612 13613 /* Catch the no cq entry condition */ 13614 if (unlikely(count == 0)) 13615 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13616 "0369 No entry from completion queue " 13617 "qid=%d\n", cq->queue_id); 13618 13619 cq->queue_claimed = 0; 13620 13621 rearm_and_exit: 13622 phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, 13623 arm ? LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM); 13624 13625 return workposted; 13626 } 13627 13628 /** 13629 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry 13630 * @cq: pointer to CQ to process 13631 * 13632 * This routine calls the cq processing routine with a handler specific 13633 * to the type of queue bound to it. 13634 * 13635 * The CQ routine returns two values: the first is the calling status, 13636 * which indicates whether work was queued to the background discovery 13637 * thread. If true, the routine should wakeup the discovery thread; 13638 * the second is the delay parameter. If non-zero, rather than rearming 13639 * the CQ and yet another interrupt, the CQ handler should be queued so 13640 * that it is processed in a subsequent polling action. The value of 13641 * the delay indicates when to reschedule it. 13642 **/ 13643 static void 13644 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) 13645 { 13646 struct lpfc_hba *phba = cq->phba; 13647 unsigned long delay; 13648 bool workposted = false; 13649 13650 /* Process and rearm the CQ */ 13651 switch (cq->type) { 13652 case LPFC_MCQ: 13653 workposted |= __lpfc_sli4_process_cq(phba, cq, 13654 lpfc_sli4_sp_handle_mcqe, 13655 &delay); 13656 break; 13657 case LPFC_WCQ: 13658 if (cq->subtype == LPFC_IO) 13659 workposted |= __lpfc_sli4_process_cq(phba, cq, 13660 lpfc_sli4_fp_handle_cqe, 13661 &delay); 13662 else 13663 workposted |= __lpfc_sli4_process_cq(phba, cq, 13664 lpfc_sli4_sp_handle_cqe, 13665 &delay); 13666 break; 13667 default: 13668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13669 "0370 Invalid completion queue type (%d)\n", 13670 cq->type); 13671 return; 13672 } 13673 13674 if (delay) { 13675 if (!queue_delayed_work_on(cq->chann, phba->wq, 13676 &cq->sched_spwork, delay)) 13677 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13678 "0394 Cannot schedule soft IRQ " 13679 "for cqid=%d on CPU %d\n", 13680 cq->queue_id, cq->chann); 13681 } 13682 13683 /* wake up worker thread if there are works to be done */ 13684 if (workposted) 13685 lpfc_worker_wake_up(phba); 13686 } 13687 13688 /** 13689 * lpfc_sli4_sp_process_cq - slow-path work handler when started by 13690 * interrupt 13691 * @work: pointer to work element 13692 * 13693 * translates from the work handler and calls the slow-path handler. 13694 **/ 13695 static void 13696 lpfc_sli4_sp_process_cq(struct work_struct *work) 13697 { 13698 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork); 13699 13700 __lpfc_sli4_sp_process_cq(cq); 13701 } 13702 13703 /** 13704 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer 13705 * @work: pointer to work element 13706 * 13707 * translates from the work handler and calls the slow-path handler. 13708 **/ 13709 static void 13710 lpfc_sli4_dly_sp_process_cq(struct work_struct *work) 13711 { 13712 struct lpfc_queue *cq = container_of(to_delayed_work(work), 13713 struct lpfc_queue, sched_spwork); 13714 13715 __lpfc_sli4_sp_process_cq(cq); 13716 } 13717 13718 /** 13719 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 13720 * @phba: Pointer to HBA context object. 13721 * @cq: Pointer to associated CQ 13722 * @wcqe: Pointer to work-queue completion queue entry. 13723 * 13724 * This routine process a fast-path work queue completion entry from fast-path 13725 * event queue for FCP command response completion. 13726 **/ 13727 static void 13728 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13729 struct lpfc_wcqe_complete *wcqe) 13730 { 13731 struct lpfc_sli_ring *pring = cq->pring; 13732 struct lpfc_iocbq *cmdiocbq; 13733 struct lpfc_iocbq irspiocbq; 13734 unsigned long iflags; 13735 13736 /* Check for response status */ 13737 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13738 /* If resource errors reported from HBA, reduce queue 13739 * depth of the SCSI device. 13740 */ 13741 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 13742 IOSTAT_LOCAL_REJECT)) && 13743 ((wcqe->parameter & IOERR_PARAM_MASK) == 13744 IOERR_NO_RESOURCES)) 13745 phba->lpfc_rampdown_queue_depth(phba); 13746 13747 /* Log the error status */ 13748 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13749 "0373 FCP CQE error: status=x%x: " 13750 "CQE: %08x %08x %08x %08x\n", 13751 bf_get(lpfc_wcqe_c_status, wcqe), 13752 wcqe->word0, wcqe->total_data_placed, 13753 wcqe->parameter, wcqe->word3); 13754 } 13755 13756 /* Look up the FCP command IOCB and create pseudo response IOCB */ 13757 spin_lock_irqsave(&pring->ring_lock, iflags); 13758 pring->stats.iocb_event++; 13759 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13760 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13761 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13762 if (unlikely(!cmdiocbq)) { 13763 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13764 "0374 FCP complete with no corresponding " 13765 "cmdiocb: iotag (%d)\n", 13766 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13767 return; 13768 } 13769 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13770 cmdiocbq->isr_timestamp = cq->isr_timestamp; 13771 #endif 13772 if (cmdiocbq->iocb_cmpl == NULL) { 13773 if (cmdiocbq->wqe_cmpl) { 13774 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13775 spin_lock_irqsave(&phba->hbalock, iflags); 13776 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13777 spin_unlock_irqrestore(&phba->hbalock, iflags); 13778 } 13779 13780 /* Pass the cmd_iocb and the wcqe to the upper layer */ 13781 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe); 13782 return; 13783 } 13784 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13785 "0375 FCP cmdiocb not callback function " 13786 "iotag: (%d)\n", 13787 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13788 return; 13789 } 13790 13791 /* Fake the irspiocb and copy necessary response information */ 13792 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 13793 13794 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13795 spin_lock_irqsave(&phba->hbalock, iflags); 13796 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13797 spin_unlock_irqrestore(&phba->hbalock, iflags); 13798 } 13799 13800 /* Pass the cmd_iocb and the rsp state to the upper layer */ 13801 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 13802 } 13803 13804 /** 13805 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 13806 * @phba: Pointer to HBA context object. 13807 * @cq: Pointer to completion queue. 13808 * @wcqe: Pointer to work-queue completion queue entry. 13809 * 13810 * This routine handles an fast-path WQ entry consumed event by invoking the 13811 * proper WQ release routine to the slow-path WQ. 13812 **/ 13813 static void 13814 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13815 struct lpfc_wcqe_release *wcqe) 13816 { 13817 struct lpfc_queue *childwq; 13818 bool wqid_matched = false; 13819 uint16_t hba_wqid; 13820 13821 /* Check for fast-path FCP work queue release */ 13822 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 13823 list_for_each_entry(childwq, &cq->child_list, list) { 13824 if (childwq->queue_id == hba_wqid) { 13825 lpfc_sli4_wq_release(childwq, 13826 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13827 if (childwq->q_flag & HBA_NVMET_WQFULL) 13828 lpfc_nvmet_wqfull_process(phba, childwq); 13829 wqid_matched = true; 13830 break; 13831 } 13832 } 13833 /* Report warning log message if no match found */ 13834 if (wqid_matched != true) 13835 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13836 "2580 Fast-path wqe consume event carries " 13837 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); 13838 } 13839 13840 /** 13841 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry 13842 * @phba: Pointer to HBA context object. 13843 * @rcqe: Pointer to receive-queue completion queue entry. 13844 * 13845 * This routine process a receive-queue completion queue entry. 13846 * 13847 * Return: true if work posted to worker thread, otherwise false. 13848 **/ 13849 static bool 13850 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13851 struct lpfc_rcqe *rcqe) 13852 { 13853 bool workposted = false; 13854 struct lpfc_queue *hrq; 13855 struct lpfc_queue *drq; 13856 struct rqb_dmabuf *dma_buf; 13857 struct fc_frame_header *fc_hdr; 13858 struct lpfc_nvmet_tgtport *tgtp; 13859 uint32_t status, rq_id; 13860 unsigned long iflags; 13861 uint32_t fctl, idx; 13862 13863 if ((phba->nvmet_support == 0) || 13864 (phba->sli4_hba.nvmet_cqset == NULL)) 13865 return workposted; 13866 13867 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; 13868 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; 13869 drq = phba->sli4_hba.nvmet_mrq_data[idx]; 13870 13871 /* sanity check on queue memory */ 13872 if (unlikely(!hrq) || unlikely(!drq)) 13873 return workposted; 13874 13875 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13876 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13877 else 13878 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13879 13880 if ((phba->nvmet_support == 0) || 13881 (rq_id != hrq->queue_id)) 13882 return workposted; 13883 13884 status = bf_get(lpfc_rcqe_status, rcqe); 13885 switch (status) { 13886 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13887 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13888 "6126 Receive Frame Truncated!!\n"); 13889 /* fall through */ 13890 case FC_STATUS_RQ_SUCCESS: 13891 spin_lock_irqsave(&phba->hbalock, iflags); 13892 lpfc_sli4_rq_release(hrq, drq); 13893 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 13894 if (!dma_buf) { 13895 hrq->RQ_no_buf_found++; 13896 spin_unlock_irqrestore(&phba->hbalock, iflags); 13897 goto out; 13898 } 13899 spin_unlock_irqrestore(&phba->hbalock, iflags); 13900 hrq->RQ_rcv_buf++; 13901 hrq->RQ_buf_posted--; 13902 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13903 13904 /* Just some basic sanity checks on FCP Command frame */ 13905 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 13906 fc_hdr->fh_f_ctl[1] << 8 | 13907 fc_hdr->fh_f_ctl[2]); 13908 if (((fctl & 13909 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != 13910 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || 13911 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ 13912 goto drop; 13913 13914 if (fc_hdr->fh_type == FC_TYPE_FCP) { 13915 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 13916 lpfc_nvmet_unsol_fcp_event( 13917 phba, idx, dma_buf, cq->isr_timestamp, 13918 cq->q_flag & HBA_NVMET_CQ_NOTIFY); 13919 return false; 13920 } 13921 drop: 13922 lpfc_rq_buf_free(phba, &dma_buf->hbuf); 13923 break; 13924 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13925 if (phba->nvmet_support) { 13926 tgtp = phba->targetport->private; 13927 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13928 "6401 RQE Error x%x, posted %d err_cnt " 13929 "%d: %x %x %x\n", 13930 status, hrq->RQ_buf_posted, 13931 hrq->RQ_no_posted_buf, 13932 atomic_read(&tgtp->rcv_fcp_cmd_in), 13933 atomic_read(&tgtp->rcv_fcp_cmd_out), 13934 atomic_read(&tgtp->xmt_fcp_release)); 13935 } 13936 /* fallthrough */ 13937 13938 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13939 hrq->RQ_no_posted_buf++; 13940 /* Post more buffers if possible */ 13941 break; 13942 } 13943 out: 13944 return workposted; 13945 } 13946 13947 /** 13948 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry 13949 * @phba: adapter with cq 13950 * @cq: Pointer to the completion queue. 13951 * @eqe: Pointer to fast-path completion queue entry. 13952 * 13953 * This routine process a fast-path work queue completion entry from fast-path 13954 * event queue for FCP command response completion. 13955 * 13956 * Return: true if work posted to worker thread, otherwise false. 13957 **/ 13958 static bool 13959 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13960 struct lpfc_cqe *cqe) 13961 { 13962 struct lpfc_wcqe_release wcqe; 13963 bool workposted = false; 13964 13965 /* Copy the work queue CQE and convert endian order if needed */ 13966 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 13967 13968 /* Check and process for different type of WCQE and dispatch */ 13969 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 13970 case CQE_CODE_COMPL_WQE: 13971 case CQE_CODE_NVME_ERSP: 13972 cq->CQ_wq++; 13973 /* Process the WQ complete event */ 13974 phba->last_completion_time = jiffies; 13975 if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS) 13976 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 13977 (struct lpfc_wcqe_complete *)&wcqe); 13978 break; 13979 case CQE_CODE_RELEASE_WQE: 13980 cq->CQ_release_wqe++; 13981 /* Process the WQ release event */ 13982 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 13983 (struct lpfc_wcqe_release *)&wcqe); 13984 break; 13985 case CQE_CODE_XRI_ABORTED: 13986 cq->CQ_xri_aborted++; 13987 /* Process the WQ XRI abort event */ 13988 phba->last_completion_time = jiffies; 13989 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13990 (struct sli4_wcqe_xri_aborted *)&wcqe); 13991 break; 13992 case CQE_CODE_RECEIVE_V1: 13993 case CQE_CODE_RECEIVE: 13994 phba->last_completion_time = jiffies; 13995 if (cq->subtype == LPFC_NVMET) { 13996 workposted = lpfc_sli4_nvmet_handle_rcqe( 13997 phba, cq, (struct lpfc_rcqe *)&wcqe); 13998 } 13999 break; 14000 default: 14001 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14002 "0144 Not a valid CQE code: x%x\n", 14003 bf_get(lpfc_wcqe_c_code, &wcqe)); 14004 break; 14005 } 14006 return workposted; 14007 } 14008 14009 /** 14010 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 14011 * @phba: Pointer to HBA context object. 14012 * @eqe: Pointer to fast-path event queue entry. 14013 * 14014 * This routine process a event queue entry from the fast-path event queue. 14015 * It will check the MajorCode and MinorCode to determine this is for a 14016 * completion event on a completion queue, if not, an error shall be logged 14017 * and just return. Otherwise, it will get to the corresponding completion 14018 * queue and process all the entries on the completion queue, rearm the 14019 * completion queue, and then return. 14020 **/ 14021 static void 14022 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, 14023 struct lpfc_eqe *eqe) 14024 { 14025 struct lpfc_queue *cq = NULL; 14026 uint32_t qidx = eq->hdwq; 14027 uint16_t cqid, id; 14028 14029 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 14030 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14031 "0366 Not a valid completion " 14032 "event: majorcode=x%x, minorcode=x%x\n", 14033 bf_get_le32(lpfc_eqe_major_code, eqe), 14034 bf_get_le32(lpfc_eqe_minor_code, eqe)); 14035 return; 14036 } 14037 14038 /* Get the reference to the corresponding CQ */ 14039 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 14040 14041 /* Use the fast lookup method first */ 14042 if (cqid <= phba->sli4_hba.cq_max) { 14043 cq = phba->sli4_hba.cq_lookup[cqid]; 14044 if (cq) 14045 goto work_cq; 14046 } 14047 14048 /* Next check for NVMET completion */ 14049 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { 14050 id = phba->sli4_hba.nvmet_cqset[0]->queue_id; 14051 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { 14052 /* Process NVMET unsol rcv */ 14053 cq = phba->sli4_hba.nvmet_cqset[cqid - id]; 14054 goto process_cq; 14055 } 14056 } 14057 14058 if (phba->sli4_hba.nvmels_cq && 14059 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { 14060 /* Process NVME unsol rcv */ 14061 cq = phba->sli4_hba.nvmels_cq; 14062 } 14063 14064 /* Otherwise this is a Slow path event */ 14065 if (cq == NULL) { 14066 lpfc_sli4_sp_handle_eqe(phba, eqe, 14067 phba->sli4_hba.hdwq[qidx].hba_eq); 14068 return; 14069 } 14070 14071 process_cq: 14072 if (unlikely(cqid != cq->queue_id)) { 14073 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14074 "0368 Miss-matched fast-path completion " 14075 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 14076 cqid, cq->queue_id); 14077 return; 14078 } 14079 14080 work_cq: 14081 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS) 14082 if (phba->ktime_on) 14083 cq->isr_timestamp = ktime_get_ns(); 14084 else 14085 cq->isr_timestamp = 0; 14086 #endif 14087 if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork)) 14088 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14089 "0363 Cannot schedule soft IRQ " 14090 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 14091 cqid, cq->queue_id, raw_smp_processor_id()); 14092 } 14093 14094 /** 14095 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry 14096 * @cq: Pointer to CQ to be processed 14097 * 14098 * This routine calls the cq processing routine with the handler for 14099 * fast path CQEs. 14100 * 14101 * The CQ routine returns two values: the first is the calling status, 14102 * which indicates whether work was queued to the background discovery 14103 * thread. If true, the routine should wakeup the discovery thread; 14104 * the second is the delay parameter. If non-zero, rather than rearming 14105 * the CQ and yet another interrupt, the CQ handler should be queued so 14106 * that it is processed in a subsequent polling action. The value of 14107 * the delay indicates when to reschedule it. 14108 **/ 14109 static void 14110 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq) 14111 { 14112 struct lpfc_hba *phba = cq->phba; 14113 unsigned long delay; 14114 bool workposted = false; 14115 14116 /* process and rearm the CQ */ 14117 workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, 14118 &delay); 14119 14120 if (delay) { 14121 if (!queue_delayed_work_on(cq->chann, phba->wq, 14122 &cq->sched_irqwork, delay)) 14123 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14124 "0367 Cannot schedule soft IRQ " 14125 "for cqid=%d on CPU %d\n", 14126 cq->queue_id, cq->chann); 14127 } 14128 14129 /* wake up worker thread if there are works to be done */ 14130 if (workposted) 14131 lpfc_worker_wake_up(phba); 14132 } 14133 14134 /** 14135 * lpfc_sli4_hba_process_cq - fast-path work handler when started by 14136 * interrupt 14137 * @work: pointer to work element 14138 * 14139 * translates from the work handler and calls the fast-path handler. 14140 **/ 14141 static void 14142 lpfc_sli4_hba_process_cq(struct work_struct *work) 14143 { 14144 struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork); 14145 14146 __lpfc_sli4_hba_process_cq(cq); 14147 } 14148 14149 /** 14150 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer 14151 * @work: pointer to work element 14152 * 14153 * translates from the work handler and calls the fast-path handler. 14154 **/ 14155 static void 14156 lpfc_sli4_dly_hba_process_cq(struct work_struct *work) 14157 { 14158 struct lpfc_queue *cq = container_of(to_delayed_work(work), 14159 struct lpfc_queue, sched_irqwork); 14160 14161 __lpfc_sli4_hba_process_cq(cq); 14162 } 14163 14164 /** 14165 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 14166 * @irq: Interrupt number. 14167 * @dev_id: The device context pointer. 14168 * 14169 * This function is directly called from the PCI layer as an interrupt 14170 * service routine when device with SLI-4 interface spec is enabled with 14171 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 14172 * ring event in the HBA. However, when the device is enabled with either 14173 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 14174 * device-level interrupt handler. When the PCI slot is in error recovery 14175 * or the HBA is undergoing initialization, the interrupt handler will not 14176 * process the interrupt. The SCSI FCP fast-path ring event are handled in 14177 * the intrrupt context. This function is called without any lock held. 14178 * It gets the hbalock to access and update SLI data structures. Note that, 14179 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 14180 * equal to that of FCP CQ index. 14181 * 14182 * The link attention and ELS ring attention events are handled 14183 * by the worker thread. The interrupt handler signals the worker thread 14184 * and returns for these events. This function is called without any lock 14185 * held. It gets the hbalock to access and update SLI data structures. 14186 * 14187 * This function returns IRQ_HANDLED when interrupt is handled else it 14188 * returns IRQ_NONE. 14189 **/ 14190 irqreturn_t 14191 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 14192 { 14193 struct lpfc_hba *phba; 14194 struct lpfc_hba_eq_hdl *hba_eq_hdl; 14195 struct lpfc_queue *fpeq; 14196 unsigned long iflag; 14197 int ecount = 0; 14198 int hba_eqidx; 14199 struct lpfc_eq_intr_info *eqi; 14200 uint32_t icnt; 14201 14202 /* Get the driver's phba structure from the dev_id */ 14203 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 14204 phba = hba_eq_hdl->phba; 14205 hba_eqidx = hba_eq_hdl->idx; 14206 14207 if (unlikely(!phba)) 14208 return IRQ_NONE; 14209 if (unlikely(!phba->sli4_hba.hdwq)) 14210 return IRQ_NONE; 14211 14212 /* Get to the EQ struct associated with this vector */ 14213 fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq; 14214 if (unlikely(!fpeq)) 14215 return IRQ_NONE; 14216 14217 /* Check device state for handling interrupt */ 14218 if (unlikely(lpfc_intr_state_check(phba))) { 14219 /* Check again for link_state with lock held */ 14220 spin_lock_irqsave(&phba->hbalock, iflag); 14221 if (phba->link_state < LPFC_LINK_DOWN) 14222 /* Flush, clear interrupt, and rearm the EQ */ 14223 lpfc_sli4_eq_flush(phba, fpeq); 14224 spin_unlock_irqrestore(&phba->hbalock, iflag); 14225 return IRQ_NONE; 14226 } 14227 14228 eqi = phba->sli4_hba.eq_info; 14229 icnt = this_cpu_inc_return(eqi->icnt); 14230 fpeq->last_cpu = raw_smp_processor_id(); 14231 14232 if (icnt > LPFC_EQD_ISR_TRIGGER && 14233 phba->cfg_irq_chann == 1 && 14234 phba->cfg_auto_imax && 14235 fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && 14236 phba->sli.sli_flag & LPFC_SLI_USE_EQDR) 14237 lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); 14238 14239 /* process and rearm the EQ */ 14240 ecount = lpfc_sli4_process_eq(phba, fpeq); 14241 14242 if (unlikely(ecount == 0)) { 14243 fpeq->EQ_no_entry++; 14244 if (phba->intr_type == MSIX) 14245 /* MSI-X treated interrupt served as no EQ share INT */ 14246 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14247 "0358 MSI-X interrupt with no EQE\n"); 14248 else 14249 /* Non MSI-X treated on interrupt as EQ share INT */ 14250 return IRQ_NONE; 14251 } 14252 14253 return IRQ_HANDLED; 14254 } /* lpfc_sli4_fp_intr_handler */ 14255 14256 /** 14257 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 14258 * @irq: Interrupt number. 14259 * @dev_id: The device context pointer. 14260 * 14261 * This function is the device-level interrupt handler to device with SLI-4 14262 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 14263 * interrupt mode is enabled and there is an event in the HBA which requires 14264 * driver attention. This function invokes the slow-path interrupt attention 14265 * handling function and fast-path interrupt attention handling function in 14266 * turn to process the relevant HBA attention events. This function is called 14267 * without any lock held. It gets the hbalock to access and update SLI data 14268 * structures. 14269 * 14270 * This function returns IRQ_HANDLED when interrupt is handled, else it 14271 * returns IRQ_NONE. 14272 **/ 14273 irqreturn_t 14274 lpfc_sli4_intr_handler(int irq, void *dev_id) 14275 { 14276 struct lpfc_hba *phba; 14277 irqreturn_t hba_irq_rc; 14278 bool hba_handled = false; 14279 int qidx; 14280 14281 /* Get the driver's phba structure from the dev_id */ 14282 phba = (struct lpfc_hba *)dev_id; 14283 14284 if (unlikely(!phba)) 14285 return IRQ_NONE; 14286 14287 /* 14288 * Invoke fast-path host attention interrupt handling as appropriate. 14289 */ 14290 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { 14291 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 14292 &phba->sli4_hba.hba_eq_hdl[qidx]); 14293 if (hba_irq_rc == IRQ_HANDLED) 14294 hba_handled |= true; 14295 } 14296 14297 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 14298 } /* lpfc_sli4_intr_handler */ 14299 14300 /** 14301 * lpfc_sli4_queue_free - free a queue structure and associated memory 14302 * @queue: The queue structure to free. 14303 * 14304 * This function frees a queue structure and the DMAable memory used for 14305 * the host resident queue. This function must be called after destroying the 14306 * queue on the HBA. 14307 **/ 14308 void 14309 lpfc_sli4_queue_free(struct lpfc_queue *queue) 14310 { 14311 struct lpfc_dmabuf *dmabuf; 14312 14313 if (!queue) 14314 return; 14315 14316 if (!list_empty(&queue->wq_list)) 14317 list_del(&queue->wq_list); 14318 14319 while (!list_empty(&queue->page_list)) { 14320 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 14321 list); 14322 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size, 14323 dmabuf->virt, dmabuf->phys); 14324 kfree(dmabuf); 14325 } 14326 if (queue->rqbp) { 14327 lpfc_free_rq_buffer(queue->phba, queue); 14328 kfree(queue->rqbp); 14329 } 14330 14331 if (!list_empty(&queue->cpu_list)) 14332 list_del(&queue->cpu_list); 14333 14334 kfree(queue); 14335 return; 14336 } 14337 14338 /** 14339 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 14340 * @phba: The HBA that this queue is being created on. 14341 * @page_size: The size of a queue page 14342 * @entry_size: The size of each queue entry for this queue. 14343 * @entry count: The number of entries that this queue will handle. 14344 * @cpu: The cpu that will primarily utilize this queue. 14345 * 14346 * This function allocates a queue structure and the DMAable memory used for 14347 * the host resident queue. This function must be called before creating the 14348 * queue on the HBA. 14349 **/ 14350 struct lpfc_queue * 14351 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, 14352 uint32_t entry_size, uint32_t entry_count, int cpu) 14353 { 14354 struct lpfc_queue *queue; 14355 struct lpfc_dmabuf *dmabuf; 14356 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14357 uint16_t x, pgcnt; 14358 14359 if (!phba->sli4_hba.pc_sli4_params.supported) 14360 hw_page_size = page_size; 14361 14362 pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size; 14363 14364 /* If needed, Adjust page count to match the max the adapter supports */ 14365 if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt) 14366 pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt; 14367 14368 queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt), 14369 GFP_KERNEL, cpu_to_node(cpu)); 14370 if (!queue) 14371 return NULL; 14372 14373 INIT_LIST_HEAD(&queue->list); 14374 INIT_LIST_HEAD(&queue->wq_list); 14375 INIT_LIST_HEAD(&queue->wqfull_list); 14376 INIT_LIST_HEAD(&queue->page_list); 14377 INIT_LIST_HEAD(&queue->child_list); 14378 INIT_LIST_HEAD(&queue->cpu_list); 14379 14380 /* Set queue parameters now. If the system cannot provide memory 14381 * resources, the free routine needs to know what was allocated. 14382 */ 14383 queue->page_count = pgcnt; 14384 queue->q_pgs = (void **)&queue[1]; 14385 queue->entry_cnt_per_pg = hw_page_size / entry_size; 14386 queue->entry_size = entry_size; 14387 queue->entry_count = entry_count; 14388 queue->page_size = hw_page_size; 14389 queue->phba = phba; 14390 14391 for (x = 0; x < queue->page_count; x++) { 14392 dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL, 14393 dev_to_node(&phba->pcidev->dev)); 14394 if (!dmabuf) 14395 goto out_fail; 14396 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 14397 hw_page_size, &dmabuf->phys, 14398 GFP_KERNEL); 14399 if (!dmabuf->virt) { 14400 kfree(dmabuf); 14401 goto out_fail; 14402 } 14403 dmabuf->buffer_tag = x; 14404 list_add_tail(&dmabuf->list, &queue->page_list); 14405 /* use lpfc_sli4_qe to index a paritcular entry in this page */ 14406 queue->q_pgs[x] = dmabuf->virt; 14407 } 14408 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); 14409 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); 14410 INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq); 14411 INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq); 14412 14413 /* notify_interval will be set during q creation */ 14414 14415 return queue; 14416 out_fail: 14417 lpfc_sli4_queue_free(queue); 14418 return NULL; 14419 } 14420 14421 /** 14422 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 14423 * @phba: HBA structure that indicates port to create a queue on. 14424 * @pci_barset: PCI BAR set flag. 14425 * 14426 * This function shall perform iomap of the specified PCI BAR address to host 14427 * memory address if not already done so and return it. The returned host 14428 * memory address can be NULL. 14429 */ 14430 static void __iomem * 14431 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 14432 { 14433 if (!phba->pcidev) 14434 return NULL; 14435 14436 switch (pci_barset) { 14437 case WQ_PCI_BAR_0_AND_1: 14438 return phba->pci_bar0_memmap_p; 14439 case WQ_PCI_BAR_2_AND_3: 14440 return phba->pci_bar2_memmap_p; 14441 case WQ_PCI_BAR_4_AND_5: 14442 return phba->pci_bar4_memmap_p; 14443 default: 14444 break; 14445 } 14446 return NULL; 14447 } 14448 14449 /** 14450 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs 14451 * @phba: HBA structure that EQs are on. 14452 * @startq: The starting EQ index to modify 14453 * @numq: The number of EQs (consecutive indexes) to modify 14454 * @usdelay: amount of delay 14455 * 14456 * This function revises the EQ delay on 1 or more EQs. The EQ delay 14457 * is set either by writing to a register (if supported by the SLI Port) 14458 * or by mailbox command. The mailbox command allows several EQs to be 14459 * updated at once. 14460 * 14461 * The @phba struct is used to send a mailbox command to HBA. The @startq 14462 * is used to get the starting EQ index to change. The @numq value is 14463 * used to specify how many consecutive EQ indexes, starting at EQ index, 14464 * are to be changed. This function is asynchronous and will wait for any 14465 * mailbox commands to finish before returning. 14466 * 14467 * On success this function will return a zero. If unable to allocate 14468 * enough memory this function will return -ENOMEM. If a mailbox command 14469 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may 14470 * have had their delay multipler changed. 14471 **/ 14472 void 14473 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, 14474 uint32_t numq, uint32_t usdelay) 14475 { 14476 struct lpfc_mbx_modify_eq_delay *eq_delay; 14477 LPFC_MBOXQ_t *mbox; 14478 struct lpfc_queue *eq; 14479 int cnt = 0, rc, length; 14480 uint32_t shdr_status, shdr_add_status; 14481 uint32_t dmult; 14482 int qidx; 14483 union lpfc_sli4_cfg_shdr *shdr; 14484 14485 if (startq >= phba->cfg_irq_chann) 14486 return; 14487 14488 if (usdelay > 0xFFFF) { 14489 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME, 14490 "6429 usdelay %d too large. Scaled down to " 14491 "0xFFFF.\n", usdelay); 14492 usdelay = 0xFFFF; 14493 } 14494 14495 /* set values by EQ_DELAY register if supported */ 14496 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { 14497 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 14498 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 14499 if (!eq) 14500 continue; 14501 14502 lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay); 14503 14504 if (++cnt >= numq) 14505 break; 14506 } 14507 return; 14508 } 14509 14510 /* Otherwise, set values by mailbox cmd */ 14511 14512 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14513 if (!mbox) { 14514 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_FCP | LOG_NVME, 14515 "6428 Failed allocating mailbox cmd buffer." 14516 " EQ delay was not set.\n"); 14517 return; 14518 } 14519 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 14520 sizeof(struct lpfc_sli4_cfg_mhdr)); 14521 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14522 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 14523 length, LPFC_SLI4_MBX_EMBED); 14524 eq_delay = &mbox->u.mqe.un.eq_delay; 14525 14526 /* Calculate delay multiper from maximum interrupt per second */ 14527 dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC; 14528 if (dmult) 14529 dmult--; 14530 if (dmult > LPFC_DMULT_MAX) 14531 dmult = LPFC_DMULT_MAX; 14532 14533 for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) { 14534 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq; 14535 if (!eq) 14536 continue; 14537 eq->q_mode = usdelay; 14538 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 14539 eq_delay->u.request.eq[cnt].phase = 0; 14540 eq_delay->u.request.eq[cnt].delay_multi = dmult; 14541 14542 if (++cnt >= numq) 14543 break; 14544 } 14545 eq_delay->u.request.num_eq = cnt; 14546 14547 mbox->vport = phba->pport; 14548 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14549 mbox->ctx_buf = NULL; 14550 mbox->ctx_ndlp = NULL; 14551 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14552 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 14553 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14554 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14555 if (shdr_status || shdr_add_status || rc) { 14556 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14557 "2512 MODIFY_EQ_DELAY mailbox failed with " 14558 "status x%x add_status x%x, mbx status x%x\n", 14559 shdr_status, shdr_add_status, rc); 14560 } 14561 mempool_free(mbox, phba->mbox_mem_pool); 14562 return; 14563 } 14564 14565 /** 14566 * lpfc_eq_create - Create an Event Queue on the HBA 14567 * @phba: HBA structure that indicates port to create a queue on. 14568 * @eq: The queue structure to use to create the event queue. 14569 * @imax: The maximum interrupt per second limit. 14570 * 14571 * This function creates an event queue, as detailed in @eq, on a port, 14572 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 14573 * 14574 * The @phba struct is used to send mailbox command to HBA. The @eq struct 14575 * is used to get the entry count and entry size that are necessary to 14576 * determine the number of pages to allocate and use for this queue. This 14577 * function will send the EQ_CREATE mailbox command to the HBA to setup the 14578 * event queue. This function is asynchronous and will wait for the mailbox 14579 * command to finish before continuing. 14580 * 14581 * On success this function will return a zero. If unable to allocate enough 14582 * memory this function will return -ENOMEM. If the queue create mailbox command 14583 * fails this function will return -ENXIO. 14584 **/ 14585 int 14586 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 14587 { 14588 struct lpfc_mbx_eq_create *eq_create; 14589 LPFC_MBOXQ_t *mbox; 14590 int rc, length, status = 0; 14591 struct lpfc_dmabuf *dmabuf; 14592 uint32_t shdr_status, shdr_add_status; 14593 union lpfc_sli4_cfg_shdr *shdr; 14594 uint16_t dmult; 14595 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14596 14597 /* sanity check on queue memory */ 14598 if (!eq) 14599 return -ENODEV; 14600 if (!phba->sli4_hba.pc_sli4_params.supported) 14601 hw_page_size = SLI4_PAGE_SIZE; 14602 14603 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14604 if (!mbox) 14605 return -ENOMEM; 14606 length = (sizeof(struct lpfc_mbx_eq_create) - 14607 sizeof(struct lpfc_sli4_cfg_mhdr)); 14608 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14609 LPFC_MBOX_OPCODE_EQ_CREATE, 14610 length, LPFC_SLI4_MBX_EMBED); 14611 eq_create = &mbox->u.mqe.un.eq_create; 14612 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 14613 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 14614 eq->page_count); 14615 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 14616 LPFC_EQE_SIZE); 14617 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 14618 14619 /* Use version 2 of CREATE_EQ if eqav is set */ 14620 if (phba->sli4_hba.pc_sli4_params.eqav) { 14621 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14622 LPFC_Q_CREATE_VERSION_2); 14623 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context, 14624 phba->sli4_hba.pc_sli4_params.eqav); 14625 } 14626 14627 /* don't setup delay multiplier using EQ_CREATE */ 14628 dmult = 0; 14629 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 14630 dmult); 14631 switch (eq->entry_count) { 14632 default: 14633 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14634 "0360 Unsupported EQ count. (%d)\n", 14635 eq->entry_count); 14636 if (eq->entry_count < 256) { 14637 status = -EINVAL; 14638 goto out; 14639 } 14640 /* fall through - otherwise default to smallest count */ 14641 case 256: 14642 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14643 LPFC_EQ_CNT_256); 14644 break; 14645 case 512: 14646 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14647 LPFC_EQ_CNT_512); 14648 break; 14649 case 1024: 14650 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14651 LPFC_EQ_CNT_1024); 14652 break; 14653 case 2048: 14654 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14655 LPFC_EQ_CNT_2048); 14656 break; 14657 case 4096: 14658 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14659 LPFC_EQ_CNT_4096); 14660 break; 14661 } 14662 list_for_each_entry(dmabuf, &eq->page_list, list) { 14663 memset(dmabuf->virt, 0, hw_page_size); 14664 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14665 putPaddrLow(dmabuf->phys); 14666 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14667 putPaddrHigh(dmabuf->phys); 14668 } 14669 mbox->vport = phba->pport; 14670 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14671 mbox->ctx_buf = NULL; 14672 mbox->ctx_ndlp = NULL; 14673 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14674 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14675 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14676 if (shdr_status || shdr_add_status || rc) { 14677 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14678 "2500 EQ_CREATE mailbox failed with " 14679 "status x%x add_status x%x, mbx status x%x\n", 14680 shdr_status, shdr_add_status, rc); 14681 status = -ENXIO; 14682 } 14683 eq->type = LPFC_EQ; 14684 eq->subtype = LPFC_NONE; 14685 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 14686 if (eq->queue_id == 0xFFFF) 14687 status = -ENXIO; 14688 eq->host_index = 0; 14689 eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL; 14690 eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT; 14691 out: 14692 mempool_free(mbox, phba->mbox_mem_pool); 14693 return status; 14694 } 14695 14696 /** 14697 * lpfc_cq_create - Create a Completion Queue on the HBA 14698 * @phba: HBA structure that indicates port to create a queue on. 14699 * @cq: The queue structure to use to create the completion queue. 14700 * @eq: The event queue to bind this completion queue to. 14701 * 14702 * This function creates a completion queue, as detailed in @wq, on a port, 14703 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 14704 * 14705 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14706 * is used to get the entry count and entry size that are necessary to 14707 * determine the number of pages to allocate and use for this queue. The @eq 14708 * is used to indicate which event queue to bind this completion queue to. This 14709 * function will send the CQ_CREATE mailbox command to the HBA to setup the 14710 * completion queue. This function is asynchronous and will wait for the mailbox 14711 * command to finish before continuing. 14712 * 14713 * On success this function will return a zero. If unable to allocate enough 14714 * memory this function will return -ENOMEM. If the queue create mailbox command 14715 * fails this function will return -ENXIO. 14716 **/ 14717 int 14718 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 14719 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 14720 { 14721 struct lpfc_mbx_cq_create *cq_create; 14722 struct lpfc_dmabuf *dmabuf; 14723 LPFC_MBOXQ_t *mbox; 14724 int rc, length, status = 0; 14725 uint32_t shdr_status, shdr_add_status; 14726 union lpfc_sli4_cfg_shdr *shdr; 14727 14728 /* sanity check on queue memory */ 14729 if (!cq || !eq) 14730 return -ENODEV; 14731 14732 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14733 if (!mbox) 14734 return -ENOMEM; 14735 length = (sizeof(struct lpfc_mbx_cq_create) - 14736 sizeof(struct lpfc_sli4_cfg_mhdr)); 14737 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14738 LPFC_MBOX_OPCODE_CQ_CREATE, 14739 length, LPFC_SLI4_MBX_EMBED); 14740 cq_create = &mbox->u.mqe.un.cq_create; 14741 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 14742 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 14743 cq->page_count); 14744 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 14745 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 14746 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14747 phba->sli4_hba.pc_sli4_params.cqv); 14748 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 14749 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 14750 (cq->page_size / SLI4_PAGE_SIZE)); 14751 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 14752 eq->queue_id); 14753 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context, 14754 phba->sli4_hba.pc_sli4_params.cqav); 14755 } else { 14756 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 14757 eq->queue_id); 14758 } 14759 switch (cq->entry_count) { 14760 case 2048: 14761 case 4096: 14762 if (phba->sli4_hba.pc_sli4_params.cqv == 14763 LPFC_Q_CREATE_VERSION_2) { 14764 cq_create->u.request.context.lpfc_cq_context_count = 14765 cq->entry_count; 14766 bf_set(lpfc_cq_context_count, 14767 &cq_create->u.request.context, 14768 LPFC_CQ_CNT_WORD7); 14769 break; 14770 } 14771 /* fall through */ 14772 default: 14773 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14774 "0361 Unsupported CQ count: " 14775 "entry cnt %d sz %d pg cnt %d\n", 14776 cq->entry_count, cq->entry_size, 14777 cq->page_count); 14778 if (cq->entry_count < 256) { 14779 status = -EINVAL; 14780 goto out; 14781 } 14782 /* fall through - otherwise default to smallest count */ 14783 case 256: 14784 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14785 LPFC_CQ_CNT_256); 14786 break; 14787 case 512: 14788 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14789 LPFC_CQ_CNT_512); 14790 break; 14791 case 1024: 14792 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14793 LPFC_CQ_CNT_1024); 14794 break; 14795 } 14796 list_for_each_entry(dmabuf, &cq->page_list, list) { 14797 memset(dmabuf->virt, 0, cq->page_size); 14798 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14799 putPaddrLow(dmabuf->phys); 14800 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14801 putPaddrHigh(dmabuf->phys); 14802 } 14803 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14804 14805 /* The IOCTL status is embedded in the mailbox subheader. */ 14806 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14807 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14808 if (shdr_status || shdr_add_status || rc) { 14809 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14810 "2501 CQ_CREATE mailbox failed with " 14811 "status x%x add_status x%x, mbx status x%x\n", 14812 shdr_status, shdr_add_status, rc); 14813 status = -ENXIO; 14814 goto out; 14815 } 14816 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14817 if (cq->queue_id == 0xFFFF) { 14818 status = -ENXIO; 14819 goto out; 14820 } 14821 /* link the cq onto the parent eq child list */ 14822 list_add_tail(&cq->list, &eq->child_list); 14823 /* Set up completion queue's type and subtype */ 14824 cq->type = type; 14825 cq->subtype = subtype; 14826 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14827 cq->assoc_qid = eq->queue_id; 14828 cq->assoc_qp = eq; 14829 cq->host_index = 0; 14830 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 14831 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count); 14832 14833 if (cq->queue_id > phba->sli4_hba.cq_max) 14834 phba->sli4_hba.cq_max = cq->queue_id; 14835 out: 14836 mempool_free(mbox, phba->mbox_mem_pool); 14837 return status; 14838 } 14839 14840 /** 14841 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ 14842 * @phba: HBA structure that indicates port to create a queue on. 14843 * @cqp: The queue structure array to use to create the completion queues. 14844 * @hdwq: The hardware queue array with the EQ to bind completion queues to. 14845 * 14846 * This function creates a set of completion queue, s to support MRQ 14847 * as detailed in @cqp, on a port, 14848 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. 14849 * 14850 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14851 * is used to get the entry count and entry size that are necessary to 14852 * determine the number of pages to allocate and use for this queue. The @eq 14853 * is used to indicate which event queue to bind this completion queue to. This 14854 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the 14855 * completion queue. This function is asynchronous and will wait for the mailbox 14856 * command to finish before continuing. 14857 * 14858 * On success this function will return a zero. If unable to allocate enough 14859 * memory this function will return -ENOMEM. If the queue create mailbox command 14860 * fails this function will return -ENXIO. 14861 **/ 14862 int 14863 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 14864 struct lpfc_sli4_hdw_queue *hdwq, uint32_t type, 14865 uint32_t subtype) 14866 { 14867 struct lpfc_queue *cq; 14868 struct lpfc_queue *eq; 14869 struct lpfc_mbx_cq_create_set *cq_set; 14870 struct lpfc_dmabuf *dmabuf; 14871 LPFC_MBOXQ_t *mbox; 14872 int rc, length, alloclen, status = 0; 14873 int cnt, idx, numcq, page_idx = 0; 14874 uint32_t shdr_status, shdr_add_status; 14875 union lpfc_sli4_cfg_shdr *shdr; 14876 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14877 14878 /* sanity check on queue memory */ 14879 numcq = phba->cfg_nvmet_mrq; 14880 if (!cqp || !hdwq || !numcq) 14881 return -ENODEV; 14882 14883 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14884 if (!mbox) 14885 return -ENOMEM; 14886 14887 length = sizeof(struct lpfc_mbx_cq_create_set); 14888 length += ((numcq * cqp[0]->page_count) * 14889 sizeof(struct dma_address)); 14890 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14891 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, 14892 LPFC_SLI4_MBX_NEMBED); 14893 if (alloclen < length) { 14894 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14895 "3098 Allocated DMA memory size (%d) is " 14896 "less than the requested DMA memory size " 14897 "(%d)\n", alloclen, length); 14898 status = -ENOMEM; 14899 goto out; 14900 } 14901 cq_set = mbox->sge_array->addr[0]; 14902 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; 14903 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); 14904 14905 for (idx = 0; idx < numcq; idx++) { 14906 cq = cqp[idx]; 14907 eq = hdwq[idx].hba_eq; 14908 if (!cq || !eq) { 14909 status = -ENOMEM; 14910 goto out; 14911 } 14912 if (!phba->sli4_hba.pc_sli4_params.supported) 14913 hw_page_size = cq->page_size; 14914 14915 switch (idx) { 14916 case 0: 14917 bf_set(lpfc_mbx_cq_create_set_page_size, 14918 &cq_set->u.request, 14919 (hw_page_size / SLI4_PAGE_SIZE)); 14920 bf_set(lpfc_mbx_cq_create_set_num_pages, 14921 &cq_set->u.request, cq->page_count); 14922 bf_set(lpfc_mbx_cq_create_set_evt, 14923 &cq_set->u.request, 1); 14924 bf_set(lpfc_mbx_cq_create_set_valid, 14925 &cq_set->u.request, 1); 14926 bf_set(lpfc_mbx_cq_create_set_cqe_size, 14927 &cq_set->u.request, 0); 14928 bf_set(lpfc_mbx_cq_create_set_num_cq, 14929 &cq_set->u.request, numcq); 14930 bf_set(lpfc_mbx_cq_create_set_autovalid, 14931 &cq_set->u.request, 14932 phba->sli4_hba.pc_sli4_params.cqav); 14933 switch (cq->entry_count) { 14934 case 2048: 14935 case 4096: 14936 if (phba->sli4_hba.pc_sli4_params.cqv == 14937 LPFC_Q_CREATE_VERSION_2) { 14938 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14939 &cq_set->u.request, 14940 cq->entry_count); 14941 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14942 &cq_set->u.request, 14943 LPFC_CQ_CNT_WORD7); 14944 break; 14945 } 14946 /* fall through */ 14947 default: 14948 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14949 "3118 Bad CQ count. (%d)\n", 14950 cq->entry_count); 14951 if (cq->entry_count < 256) { 14952 status = -EINVAL; 14953 goto out; 14954 } 14955 /* fall through - otherwise default to smallest */ 14956 case 256: 14957 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14958 &cq_set->u.request, LPFC_CQ_CNT_256); 14959 break; 14960 case 512: 14961 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14962 &cq_set->u.request, LPFC_CQ_CNT_512); 14963 break; 14964 case 1024: 14965 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14966 &cq_set->u.request, LPFC_CQ_CNT_1024); 14967 break; 14968 } 14969 bf_set(lpfc_mbx_cq_create_set_eq_id0, 14970 &cq_set->u.request, eq->queue_id); 14971 break; 14972 case 1: 14973 bf_set(lpfc_mbx_cq_create_set_eq_id1, 14974 &cq_set->u.request, eq->queue_id); 14975 break; 14976 case 2: 14977 bf_set(lpfc_mbx_cq_create_set_eq_id2, 14978 &cq_set->u.request, eq->queue_id); 14979 break; 14980 case 3: 14981 bf_set(lpfc_mbx_cq_create_set_eq_id3, 14982 &cq_set->u.request, eq->queue_id); 14983 break; 14984 case 4: 14985 bf_set(lpfc_mbx_cq_create_set_eq_id4, 14986 &cq_set->u.request, eq->queue_id); 14987 break; 14988 case 5: 14989 bf_set(lpfc_mbx_cq_create_set_eq_id5, 14990 &cq_set->u.request, eq->queue_id); 14991 break; 14992 case 6: 14993 bf_set(lpfc_mbx_cq_create_set_eq_id6, 14994 &cq_set->u.request, eq->queue_id); 14995 break; 14996 case 7: 14997 bf_set(lpfc_mbx_cq_create_set_eq_id7, 14998 &cq_set->u.request, eq->queue_id); 14999 break; 15000 case 8: 15001 bf_set(lpfc_mbx_cq_create_set_eq_id8, 15002 &cq_set->u.request, eq->queue_id); 15003 break; 15004 case 9: 15005 bf_set(lpfc_mbx_cq_create_set_eq_id9, 15006 &cq_set->u.request, eq->queue_id); 15007 break; 15008 case 10: 15009 bf_set(lpfc_mbx_cq_create_set_eq_id10, 15010 &cq_set->u.request, eq->queue_id); 15011 break; 15012 case 11: 15013 bf_set(lpfc_mbx_cq_create_set_eq_id11, 15014 &cq_set->u.request, eq->queue_id); 15015 break; 15016 case 12: 15017 bf_set(lpfc_mbx_cq_create_set_eq_id12, 15018 &cq_set->u.request, eq->queue_id); 15019 break; 15020 case 13: 15021 bf_set(lpfc_mbx_cq_create_set_eq_id13, 15022 &cq_set->u.request, eq->queue_id); 15023 break; 15024 case 14: 15025 bf_set(lpfc_mbx_cq_create_set_eq_id14, 15026 &cq_set->u.request, eq->queue_id); 15027 break; 15028 case 15: 15029 bf_set(lpfc_mbx_cq_create_set_eq_id15, 15030 &cq_set->u.request, eq->queue_id); 15031 break; 15032 } 15033 15034 /* link the cq onto the parent eq child list */ 15035 list_add_tail(&cq->list, &eq->child_list); 15036 /* Set up completion queue's type and subtype */ 15037 cq->type = type; 15038 cq->subtype = subtype; 15039 cq->assoc_qid = eq->queue_id; 15040 cq->assoc_qp = eq; 15041 cq->host_index = 0; 15042 cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL; 15043 cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, 15044 cq->entry_count); 15045 cq->chann = idx; 15046 15047 rc = 0; 15048 list_for_each_entry(dmabuf, &cq->page_list, list) { 15049 memset(dmabuf->virt, 0, hw_page_size); 15050 cnt = page_idx + dmabuf->buffer_tag; 15051 cq_set->u.request.page[cnt].addr_lo = 15052 putPaddrLow(dmabuf->phys); 15053 cq_set->u.request.page[cnt].addr_hi = 15054 putPaddrHigh(dmabuf->phys); 15055 rc++; 15056 } 15057 page_idx += rc; 15058 } 15059 15060 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15061 15062 /* The IOCTL status is embedded in the mailbox subheader. */ 15063 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15064 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15065 if (shdr_status || shdr_add_status || rc) { 15066 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15067 "3119 CQ_CREATE_SET mailbox failed with " 15068 "status x%x add_status x%x, mbx status x%x\n", 15069 shdr_status, shdr_add_status, rc); 15070 status = -ENXIO; 15071 goto out; 15072 } 15073 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); 15074 if (rc == 0xFFFF) { 15075 status = -ENXIO; 15076 goto out; 15077 } 15078 15079 for (idx = 0; idx < numcq; idx++) { 15080 cq = cqp[idx]; 15081 cq->queue_id = rc + idx; 15082 if (cq->queue_id > phba->sli4_hba.cq_max) 15083 phba->sli4_hba.cq_max = cq->queue_id; 15084 } 15085 15086 out: 15087 lpfc_sli4_mbox_cmd_free(phba, mbox); 15088 return status; 15089 } 15090 15091 /** 15092 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 15093 * @phba: HBA structure that indicates port to create a queue on. 15094 * @mq: The queue structure to use to create the mailbox queue. 15095 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 15096 * @cq: The completion queue to associate with this cq. 15097 * 15098 * This function provides failback (fb) functionality when the 15099 * mq_create_ext fails on older FW generations. It's purpose is identical 15100 * to mq_create_ext otherwise. 15101 * 15102 * This routine cannot fail as all attributes were previously accessed and 15103 * initialized in mq_create_ext. 15104 **/ 15105 static void 15106 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 15107 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 15108 { 15109 struct lpfc_mbx_mq_create *mq_create; 15110 struct lpfc_dmabuf *dmabuf; 15111 int length; 15112 15113 length = (sizeof(struct lpfc_mbx_mq_create) - 15114 sizeof(struct lpfc_sli4_cfg_mhdr)); 15115 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15116 LPFC_MBOX_OPCODE_MQ_CREATE, 15117 length, LPFC_SLI4_MBX_EMBED); 15118 mq_create = &mbox->u.mqe.un.mq_create; 15119 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 15120 mq->page_count); 15121 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 15122 cq->queue_id); 15123 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 15124 switch (mq->entry_count) { 15125 case 16: 15126 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15127 LPFC_MQ_RING_SIZE_16); 15128 break; 15129 case 32: 15130 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15131 LPFC_MQ_RING_SIZE_32); 15132 break; 15133 case 64: 15134 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15135 LPFC_MQ_RING_SIZE_64); 15136 break; 15137 case 128: 15138 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15139 LPFC_MQ_RING_SIZE_128); 15140 break; 15141 } 15142 list_for_each_entry(dmabuf, &mq->page_list, list) { 15143 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15144 putPaddrLow(dmabuf->phys); 15145 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15146 putPaddrHigh(dmabuf->phys); 15147 } 15148 } 15149 15150 /** 15151 * lpfc_mq_create - Create a mailbox Queue on the HBA 15152 * @phba: HBA structure that indicates port to create a queue on. 15153 * @mq: The queue structure to use to create the mailbox queue. 15154 * @cq: The completion queue to associate with this cq. 15155 * @subtype: The queue's subtype. 15156 * 15157 * This function creates a mailbox queue, as detailed in @mq, on a port, 15158 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 15159 * 15160 * The @phba struct is used to send mailbox command to HBA. The @cq struct 15161 * is used to get the entry count and entry size that are necessary to 15162 * determine the number of pages to allocate and use for this queue. This 15163 * function will send the MQ_CREATE mailbox command to the HBA to setup the 15164 * mailbox queue. This function is asynchronous and will wait for the mailbox 15165 * command to finish before continuing. 15166 * 15167 * On success this function will return a zero. If unable to allocate enough 15168 * memory this function will return -ENOMEM. If the queue create mailbox command 15169 * fails this function will return -ENXIO. 15170 **/ 15171 int32_t 15172 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 15173 struct lpfc_queue *cq, uint32_t subtype) 15174 { 15175 struct lpfc_mbx_mq_create *mq_create; 15176 struct lpfc_mbx_mq_create_ext *mq_create_ext; 15177 struct lpfc_dmabuf *dmabuf; 15178 LPFC_MBOXQ_t *mbox; 15179 int rc, length, status = 0; 15180 uint32_t shdr_status, shdr_add_status; 15181 union lpfc_sli4_cfg_shdr *shdr; 15182 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15183 15184 /* sanity check on queue memory */ 15185 if (!mq || !cq) 15186 return -ENODEV; 15187 if (!phba->sli4_hba.pc_sli4_params.supported) 15188 hw_page_size = SLI4_PAGE_SIZE; 15189 15190 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15191 if (!mbox) 15192 return -ENOMEM; 15193 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 15194 sizeof(struct lpfc_sli4_cfg_mhdr)); 15195 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15196 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 15197 length, LPFC_SLI4_MBX_EMBED); 15198 15199 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 15200 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 15201 bf_set(lpfc_mbx_mq_create_ext_num_pages, 15202 &mq_create_ext->u.request, mq->page_count); 15203 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 15204 &mq_create_ext->u.request, 1); 15205 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 15206 &mq_create_ext->u.request, 1); 15207 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 15208 &mq_create_ext->u.request, 1); 15209 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 15210 &mq_create_ext->u.request, 1); 15211 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 15212 &mq_create_ext->u.request, 1); 15213 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 15214 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15215 phba->sli4_hba.pc_sli4_params.mqv); 15216 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 15217 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 15218 cq->queue_id); 15219 else 15220 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 15221 cq->queue_id); 15222 switch (mq->entry_count) { 15223 default: 15224 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15225 "0362 Unsupported MQ count. (%d)\n", 15226 mq->entry_count); 15227 if (mq->entry_count < 16) { 15228 status = -EINVAL; 15229 goto out; 15230 } 15231 /* fall through - otherwise default to smallest count */ 15232 case 16: 15233 bf_set(lpfc_mq_context_ring_size, 15234 &mq_create_ext->u.request.context, 15235 LPFC_MQ_RING_SIZE_16); 15236 break; 15237 case 32: 15238 bf_set(lpfc_mq_context_ring_size, 15239 &mq_create_ext->u.request.context, 15240 LPFC_MQ_RING_SIZE_32); 15241 break; 15242 case 64: 15243 bf_set(lpfc_mq_context_ring_size, 15244 &mq_create_ext->u.request.context, 15245 LPFC_MQ_RING_SIZE_64); 15246 break; 15247 case 128: 15248 bf_set(lpfc_mq_context_ring_size, 15249 &mq_create_ext->u.request.context, 15250 LPFC_MQ_RING_SIZE_128); 15251 break; 15252 } 15253 list_for_each_entry(dmabuf, &mq->page_list, list) { 15254 memset(dmabuf->virt, 0, hw_page_size); 15255 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 15256 putPaddrLow(dmabuf->phys); 15257 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 15258 putPaddrHigh(dmabuf->phys); 15259 } 15260 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15261 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15262 &mq_create_ext->u.response); 15263 if (rc != MBX_SUCCESS) { 15264 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15265 "2795 MQ_CREATE_EXT failed with " 15266 "status x%x. Failback to MQ_CREATE.\n", 15267 rc); 15268 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 15269 mq_create = &mbox->u.mqe.un.mq_create; 15270 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15271 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 15272 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15273 &mq_create->u.response); 15274 } 15275 15276 /* The IOCTL status is embedded in the mailbox subheader. */ 15277 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15278 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15279 if (shdr_status || shdr_add_status || rc) { 15280 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15281 "2502 MQ_CREATE mailbox failed with " 15282 "status x%x add_status x%x, mbx status x%x\n", 15283 shdr_status, shdr_add_status, rc); 15284 status = -ENXIO; 15285 goto out; 15286 } 15287 if (mq->queue_id == 0xFFFF) { 15288 status = -ENXIO; 15289 goto out; 15290 } 15291 mq->type = LPFC_MQ; 15292 mq->assoc_qid = cq->queue_id; 15293 mq->subtype = subtype; 15294 mq->host_index = 0; 15295 mq->hba_index = 0; 15296 15297 /* link the mq onto the parent cq child list */ 15298 list_add_tail(&mq->list, &cq->child_list); 15299 out: 15300 mempool_free(mbox, phba->mbox_mem_pool); 15301 return status; 15302 } 15303 15304 /** 15305 * lpfc_wq_create - Create a Work Queue on the HBA 15306 * @phba: HBA structure that indicates port to create a queue on. 15307 * @wq: The queue structure to use to create the work queue. 15308 * @cq: The completion queue to bind this work queue to. 15309 * @subtype: The subtype of the work queue indicating its functionality. 15310 * 15311 * This function creates a work queue, as detailed in @wq, on a port, described 15312 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 15313 * 15314 * The @phba struct is used to send mailbox command to HBA. The @wq struct 15315 * is used to get the entry count and entry size that are necessary to 15316 * determine the number of pages to allocate and use for this queue. The @cq 15317 * is used to indicate which completion queue to bind this work queue to. This 15318 * function will send the WQ_CREATE mailbox command to the HBA to setup the 15319 * work queue. This function is asynchronous and will wait for the mailbox 15320 * command to finish before continuing. 15321 * 15322 * On success this function will return a zero. If unable to allocate enough 15323 * memory this function will return -ENOMEM. If the queue create mailbox command 15324 * fails this function will return -ENXIO. 15325 **/ 15326 int 15327 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 15328 struct lpfc_queue *cq, uint32_t subtype) 15329 { 15330 struct lpfc_mbx_wq_create *wq_create; 15331 struct lpfc_dmabuf *dmabuf; 15332 LPFC_MBOXQ_t *mbox; 15333 int rc, length, status = 0; 15334 uint32_t shdr_status, shdr_add_status; 15335 union lpfc_sli4_cfg_shdr *shdr; 15336 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15337 struct dma_address *page; 15338 void __iomem *bar_memmap_p; 15339 uint32_t db_offset; 15340 uint16_t pci_barset; 15341 uint8_t dpp_barset; 15342 uint32_t dpp_offset; 15343 unsigned long pg_addr; 15344 uint8_t wq_create_version; 15345 15346 /* sanity check on queue memory */ 15347 if (!wq || !cq) 15348 return -ENODEV; 15349 if (!phba->sli4_hba.pc_sli4_params.supported) 15350 hw_page_size = wq->page_size; 15351 15352 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15353 if (!mbox) 15354 return -ENOMEM; 15355 length = (sizeof(struct lpfc_mbx_wq_create) - 15356 sizeof(struct lpfc_sli4_cfg_mhdr)); 15357 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15358 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 15359 length, LPFC_SLI4_MBX_EMBED); 15360 wq_create = &mbox->u.mqe.un.wq_create; 15361 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 15362 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 15363 wq->page_count); 15364 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 15365 cq->queue_id); 15366 15367 /* wqv is the earliest version supported, NOT the latest */ 15368 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15369 phba->sli4_hba.pc_sli4_params.wqv); 15370 15371 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) || 15372 (wq->page_size > SLI4_PAGE_SIZE)) 15373 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15374 else 15375 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15376 15377 15378 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) 15379 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15380 else 15381 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15382 15383 switch (wq_create_version) { 15384 case LPFC_Q_CREATE_VERSION_1: 15385 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 15386 wq->entry_count); 15387 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15388 LPFC_Q_CREATE_VERSION_1); 15389 15390 switch (wq->entry_size) { 15391 default: 15392 case 64: 15393 bf_set(lpfc_mbx_wq_create_wqe_size, 15394 &wq_create->u.request_1, 15395 LPFC_WQ_WQE_SIZE_64); 15396 break; 15397 case 128: 15398 bf_set(lpfc_mbx_wq_create_wqe_size, 15399 &wq_create->u.request_1, 15400 LPFC_WQ_WQE_SIZE_128); 15401 break; 15402 } 15403 /* Request DPP by default */ 15404 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); 15405 bf_set(lpfc_mbx_wq_create_page_size, 15406 &wq_create->u.request_1, 15407 (wq->page_size / SLI4_PAGE_SIZE)); 15408 page = wq_create->u.request_1.page; 15409 break; 15410 default: 15411 page = wq_create->u.request.page; 15412 break; 15413 } 15414 15415 list_for_each_entry(dmabuf, &wq->page_list, list) { 15416 memset(dmabuf->virt, 0, hw_page_size); 15417 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 15418 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 15419 } 15420 15421 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15422 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 15423 15424 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15425 /* The IOCTL status is embedded in the mailbox subheader. */ 15426 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15427 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15428 if (shdr_status || shdr_add_status || rc) { 15429 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15430 "2503 WQ_CREATE mailbox failed with " 15431 "status x%x add_status x%x, mbx status x%x\n", 15432 shdr_status, shdr_add_status, rc); 15433 status = -ENXIO; 15434 goto out; 15435 } 15436 15437 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) 15438 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, 15439 &wq_create->u.response); 15440 else 15441 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, 15442 &wq_create->u.response_1); 15443 15444 if (wq->queue_id == 0xFFFF) { 15445 status = -ENXIO; 15446 goto out; 15447 } 15448 15449 wq->db_format = LPFC_DB_LIST_FORMAT; 15450 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) { 15451 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15452 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 15453 &wq_create->u.response); 15454 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 15455 (wq->db_format != LPFC_DB_RING_FORMAT)) { 15456 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15457 "3265 WQ[%d] doorbell format " 15458 "not supported: x%x\n", 15459 wq->queue_id, wq->db_format); 15460 status = -EINVAL; 15461 goto out; 15462 } 15463 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 15464 &wq_create->u.response); 15465 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15466 pci_barset); 15467 if (!bar_memmap_p) { 15468 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15469 "3263 WQ[%d] failed to memmap " 15470 "pci barset:x%x\n", 15471 wq->queue_id, pci_barset); 15472 status = -ENOMEM; 15473 goto out; 15474 } 15475 db_offset = wq_create->u.response.doorbell_offset; 15476 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 15477 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 15478 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15479 "3252 WQ[%d] doorbell offset " 15480 "not supported: x%x\n", 15481 wq->queue_id, db_offset); 15482 status = -EINVAL; 15483 goto out; 15484 } 15485 wq->db_regaddr = bar_memmap_p + db_offset; 15486 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15487 "3264 WQ[%d]: barset:x%x, offset:x%x, " 15488 "format:x%x\n", wq->queue_id, 15489 pci_barset, db_offset, wq->db_format); 15490 } else 15491 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15492 } else { 15493 /* Check if DPP was honored by the firmware */ 15494 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, 15495 &wq_create->u.response_1); 15496 if (wq->dpp_enable) { 15497 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set, 15498 &wq_create->u.response_1); 15499 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15500 pci_barset); 15501 if (!bar_memmap_p) { 15502 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15503 "3267 WQ[%d] failed to memmap " 15504 "pci barset:x%x\n", 15505 wq->queue_id, pci_barset); 15506 status = -ENOMEM; 15507 goto out; 15508 } 15509 db_offset = wq_create->u.response_1.doorbell_offset; 15510 wq->db_regaddr = bar_memmap_p + db_offset; 15511 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, 15512 &wq_create->u.response_1); 15513 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar, 15514 &wq_create->u.response_1); 15515 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15516 dpp_barset); 15517 if (!bar_memmap_p) { 15518 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15519 "3268 WQ[%d] failed to memmap " 15520 "pci barset:x%x\n", 15521 wq->queue_id, dpp_barset); 15522 status = -ENOMEM; 15523 goto out; 15524 } 15525 dpp_offset = wq_create->u.response_1.dpp_offset; 15526 wq->dpp_regaddr = bar_memmap_p + dpp_offset; 15527 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15528 "3271 WQ[%d]: barset:x%x, offset:x%x, " 15529 "dpp_id:x%x dpp_barset:x%x " 15530 "dpp_offset:x%x\n", 15531 wq->queue_id, pci_barset, db_offset, 15532 wq->dpp_id, dpp_barset, dpp_offset); 15533 15534 /* Enable combined writes for DPP aperture */ 15535 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; 15536 #ifdef CONFIG_X86 15537 rc = set_memory_wc(pg_addr, 1); 15538 if (rc) { 15539 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15540 "3272 Cannot setup Combined " 15541 "Write on WQ[%d] - disable DPP\n", 15542 wq->queue_id); 15543 phba->cfg_enable_dpp = 0; 15544 } 15545 #else 15546 phba->cfg_enable_dpp = 0; 15547 #endif 15548 } else 15549 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15550 } 15551 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); 15552 if (wq->pring == NULL) { 15553 status = -ENOMEM; 15554 goto out; 15555 } 15556 wq->type = LPFC_WQ; 15557 wq->assoc_qid = cq->queue_id; 15558 wq->subtype = subtype; 15559 wq->host_index = 0; 15560 wq->hba_index = 0; 15561 wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; 15562 15563 /* link the wq onto the parent cq child list */ 15564 list_add_tail(&wq->list, &cq->child_list); 15565 out: 15566 mempool_free(mbox, phba->mbox_mem_pool); 15567 return status; 15568 } 15569 15570 /** 15571 * lpfc_rq_create - Create a Receive Queue on the HBA 15572 * @phba: HBA structure that indicates port to create a queue on. 15573 * @hrq: The queue structure to use to create the header receive queue. 15574 * @drq: The queue structure to use to create the data receive queue. 15575 * @cq: The completion queue to bind this work queue to. 15576 * 15577 * This function creates a receive buffer queue pair , as detailed in @hrq and 15578 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15579 * to the HBA. 15580 * 15581 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15582 * struct is used to get the entry count that is necessary to determine the 15583 * number of pages to use for this queue. The @cq is used to indicate which 15584 * completion queue to bind received buffers that are posted to these queues to. 15585 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15586 * receive queue pair. This function is asynchronous and will wait for the 15587 * mailbox command to finish before continuing. 15588 * 15589 * On success this function will return a zero. If unable to allocate enough 15590 * memory this function will return -ENOMEM. If the queue create mailbox command 15591 * fails this function will return -ENXIO. 15592 **/ 15593 int 15594 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 15595 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 15596 { 15597 struct lpfc_mbx_rq_create *rq_create; 15598 struct lpfc_dmabuf *dmabuf; 15599 LPFC_MBOXQ_t *mbox; 15600 int rc, length, status = 0; 15601 uint32_t shdr_status, shdr_add_status; 15602 union lpfc_sli4_cfg_shdr *shdr; 15603 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15604 void __iomem *bar_memmap_p; 15605 uint32_t db_offset; 15606 uint16_t pci_barset; 15607 15608 /* sanity check on queue memory */ 15609 if (!hrq || !drq || !cq) 15610 return -ENODEV; 15611 if (!phba->sli4_hba.pc_sli4_params.supported) 15612 hw_page_size = SLI4_PAGE_SIZE; 15613 15614 if (hrq->entry_count != drq->entry_count) 15615 return -EINVAL; 15616 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15617 if (!mbox) 15618 return -ENOMEM; 15619 length = (sizeof(struct lpfc_mbx_rq_create) - 15620 sizeof(struct lpfc_sli4_cfg_mhdr)); 15621 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15622 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15623 length, LPFC_SLI4_MBX_EMBED); 15624 rq_create = &mbox->u.mqe.un.rq_create; 15625 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15626 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15627 phba->sli4_hba.pc_sli4_params.rqv); 15628 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15629 bf_set(lpfc_rq_context_rqe_count_1, 15630 &rq_create->u.request.context, 15631 hrq->entry_count); 15632 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 15633 bf_set(lpfc_rq_context_rqe_size, 15634 &rq_create->u.request.context, 15635 LPFC_RQE_SIZE_8); 15636 bf_set(lpfc_rq_context_page_size, 15637 &rq_create->u.request.context, 15638 LPFC_RQ_PAGE_SIZE_4096); 15639 } else { 15640 switch (hrq->entry_count) { 15641 default: 15642 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15643 "2535 Unsupported RQ count. (%d)\n", 15644 hrq->entry_count); 15645 if (hrq->entry_count < 512) { 15646 status = -EINVAL; 15647 goto out; 15648 } 15649 /* fall through - otherwise default to smallest count */ 15650 case 512: 15651 bf_set(lpfc_rq_context_rqe_count, 15652 &rq_create->u.request.context, 15653 LPFC_RQ_RING_SIZE_512); 15654 break; 15655 case 1024: 15656 bf_set(lpfc_rq_context_rqe_count, 15657 &rq_create->u.request.context, 15658 LPFC_RQ_RING_SIZE_1024); 15659 break; 15660 case 2048: 15661 bf_set(lpfc_rq_context_rqe_count, 15662 &rq_create->u.request.context, 15663 LPFC_RQ_RING_SIZE_2048); 15664 break; 15665 case 4096: 15666 bf_set(lpfc_rq_context_rqe_count, 15667 &rq_create->u.request.context, 15668 LPFC_RQ_RING_SIZE_4096); 15669 break; 15670 } 15671 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 15672 LPFC_HDR_BUF_SIZE); 15673 } 15674 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15675 cq->queue_id); 15676 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15677 hrq->page_count); 15678 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15679 memset(dmabuf->virt, 0, hw_page_size); 15680 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15681 putPaddrLow(dmabuf->phys); 15682 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15683 putPaddrHigh(dmabuf->phys); 15684 } 15685 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15686 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15687 15688 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15689 /* The IOCTL status is embedded in the mailbox subheader. */ 15690 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15691 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15692 if (shdr_status || shdr_add_status || rc) { 15693 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15694 "2504 RQ_CREATE mailbox failed with " 15695 "status x%x add_status x%x, mbx status x%x\n", 15696 shdr_status, shdr_add_status, rc); 15697 status = -ENXIO; 15698 goto out; 15699 } 15700 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15701 if (hrq->queue_id == 0xFFFF) { 15702 status = -ENXIO; 15703 goto out; 15704 } 15705 15706 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15707 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 15708 &rq_create->u.response); 15709 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 15710 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 15711 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15712 "3262 RQ [%d] doorbell format not " 15713 "supported: x%x\n", hrq->queue_id, 15714 hrq->db_format); 15715 status = -EINVAL; 15716 goto out; 15717 } 15718 15719 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 15720 &rq_create->u.response); 15721 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 15722 if (!bar_memmap_p) { 15723 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15724 "3269 RQ[%d] failed to memmap pci " 15725 "barset:x%x\n", hrq->queue_id, 15726 pci_barset); 15727 status = -ENOMEM; 15728 goto out; 15729 } 15730 15731 db_offset = rq_create->u.response.doorbell_offset; 15732 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 15733 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 15734 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15735 "3270 RQ[%d] doorbell offset not " 15736 "supported: x%x\n", hrq->queue_id, 15737 db_offset); 15738 status = -EINVAL; 15739 goto out; 15740 } 15741 hrq->db_regaddr = bar_memmap_p + db_offset; 15742 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15743 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 15744 "format:x%x\n", hrq->queue_id, pci_barset, 15745 db_offset, hrq->db_format); 15746 } else { 15747 hrq->db_format = LPFC_DB_RING_FORMAT; 15748 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15749 } 15750 hrq->type = LPFC_HRQ; 15751 hrq->assoc_qid = cq->queue_id; 15752 hrq->subtype = subtype; 15753 hrq->host_index = 0; 15754 hrq->hba_index = 0; 15755 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 15756 15757 /* now create the data queue */ 15758 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15759 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15760 length, LPFC_SLI4_MBX_EMBED); 15761 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15762 phba->sli4_hba.pc_sli4_params.rqv); 15763 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15764 bf_set(lpfc_rq_context_rqe_count_1, 15765 &rq_create->u.request.context, hrq->entry_count); 15766 if (subtype == LPFC_NVMET) 15767 rq_create->u.request.context.buffer_size = 15768 LPFC_NVMET_DATA_BUF_SIZE; 15769 else 15770 rq_create->u.request.context.buffer_size = 15771 LPFC_DATA_BUF_SIZE; 15772 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 15773 LPFC_RQE_SIZE_8); 15774 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 15775 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15776 } else { 15777 switch (drq->entry_count) { 15778 default: 15779 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15780 "2536 Unsupported RQ count. (%d)\n", 15781 drq->entry_count); 15782 if (drq->entry_count < 512) { 15783 status = -EINVAL; 15784 goto out; 15785 } 15786 /* fall through - otherwise default to smallest count */ 15787 case 512: 15788 bf_set(lpfc_rq_context_rqe_count, 15789 &rq_create->u.request.context, 15790 LPFC_RQ_RING_SIZE_512); 15791 break; 15792 case 1024: 15793 bf_set(lpfc_rq_context_rqe_count, 15794 &rq_create->u.request.context, 15795 LPFC_RQ_RING_SIZE_1024); 15796 break; 15797 case 2048: 15798 bf_set(lpfc_rq_context_rqe_count, 15799 &rq_create->u.request.context, 15800 LPFC_RQ_RING_SIZE_2048); 15801 break; 15802 case 4096: 15803 bf_set(lpfc_rq_context_rqe_count, 15804 &rq_create->u.request.context, 15805 LPFC_RQ_RING_SIZE_4096); 15806 break; 15807 } 15808 if (subtype == LPFC_NVMET) 15809 bf_set(lpfc_rq_context_buf_size, 15810 &rq_create->u.request.context, 15811 LPFC_NVMET_DATA_BUF_SIZE); 15812 else 15813 bf_set(lpfc_rq_context_buf_size, 15814 &rq_create->u.request.context, 15815 LPFC_DATA_BUF_SIZE); 15816 } 15817 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15818 cq->queue_id); 15819 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15820 drq->page_count); 15821 list_for_each_entry(dmabuf, &drq->page_list, list) { 15822 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15823 putPaddrLow(dmabuf->phys); 15824 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15825 putPaddrHigh(dmabuf->phys); 15826 } 15827 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15828 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15829 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15830 /* The IOCTL status is embedded in the mailbox subheader. */ 15831 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15832 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15833 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15834 if (shdr_status || shdr_add_status || rc) { 15835 status = -ENXIO; 15836 goto out; 15837 } 15838 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15839 if (drq->queue_id == 0xFFFF) { 15840 status = -ENXIO; 15841 goto out; 15842 } 15843 drq->type = LPFC_DRQ; 15844 drq->assoc_qid = cq->queue_id; 15845 drq->subtype = subtype; 15846 drq->host_index = 0; 15847 drq->hba_index = 0; 15848 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 15849 15850 /* link the header and data RQs onto the parent cq child list */ 15851 list_add_tail(&hrq->list, &cq->child_list); 15852 list_add_tail(&drq->list, &cq->child_list); 15853 15854 out: 15855 mempool_free(mbox, phba->mbox_mem_pool); 15856 return status; 15857 } 15858 15859 /** 15860 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA 15861 * @phba: HBA structure that indicates port to create a queue on. 15862 * @hrqp: The queue structure array to use to create the header receive queues. 15863 * @drqp: The queue structure array to use to create the data receive queues. 15864 * @cqp: The completion queue array to bind these receive queues to. 15865 * 15866 * This function creates a receive buffer queue pair , as detailed in @hrq and 15867 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15868 * to the HBA. 15869 * 15870 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15871 * struct is used to get the entry count that is necessary to determine the 15872 * number of pages to use for this queue. The @cq is used to indicate which 15873 * completion queue to bind received buffers that are posted to these queues to. 15874 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15875 * receive queue pair. This function is asynchronous and will wait for the 15876 * mailbox command to finish before continuing. 15877 * 15878 * On success this function will return a zero. If unable to allocate enough 15879 * memory this function will return -ENOMEM. If the queue create mailbox command 15880 * fails this function will return -ENXIO. 15881 **/ 15882 int 15883 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 15884 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 15885 uint32_t subtype) 15886 { 15887 struct lpfc_queue *hrq, *drq, *cq; 15888 struct lpfc_mbx_rq_create_v2 *rq_create; 15889 struct lpfc_dmabuf *dmabuf; 15890 LPFC_MBOXQ_t *mbox; 15891 int rc, length, alloclen, status = 0; 15892 int cnt, idx, numrq, page_idx = 0; 15893 uint32_t shdr_status, shdr_add_status; 15894 union lpfc_sli4_cfg_shdr *shdr; 15895 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15896 15897 numrq = phba->cfg_nvmet_mrq; 15898 /* sanity check on array memory */ 15899 if (!hrqp || !drqp || !cqp || !numrq) 15900 return -ENODEV; 15901 if (!phba->sli4_hba.pc_sli4_params.supported) 15902 hw_page_size = SLI4_PAGE_SIZE; 15903 15904 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15905 if (!mbox) 15906 return -ENOMEM; 15907 15908 length = sizeof(struct lpfc_mbx_rq_create_v2); 15909 length += ((2 * numrq * hrqp[0]->page_count) * 15910 sizeof(struct dma_address)); 15911 15912 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15913 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, 15914 LPFC_SLI4_MBX_NEMBED); 15915 if (alloclen < length) { 15916 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15917 "3099 Allocated DMA memory size (%d) is " 15918 "less than the requested DMA memory size " 15919 "(%d)\n", alloclen, length); 15920 status = -ENOMEM; 15921 goto out; 15922 } 15923 15924 15925 15926 rq_create = mbox->sge_array->addr[0]; 15927 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; 15928 15929 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); 15930 cnt = 0; 15931 15932 for (idx = 0; idx < numrq; idx++) { 15933 hrq = hrqp[idx]; 15934 drq = drqp[idx]; 15935 cq = cqp[idx]; 15936 15937 /* sanity check on queue memory */ 15938 if (!hrq || !drq || !cq) { 15939 status = -ENODEV; 15940 goto out; 15941 } 15942 15943 if (hrq->entry_count != drq->entry_count) { 15944 status = -EINVAL; 15945 goto out; 15946 } 15947 15948 if (idx == 0) { 15949 bf_set(lpfc_mbx_rq_create_num_pages, 15950 &rq_create->u.request, 15951 hrq->page_count); 15952 bf_set(lpfc_mbx_rq_create_rq_cnt, 15953 &rq_create->u.request, (numrq * 2)); 15954 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, 15955 1); 15956 bf_set(lpfc_rq_context_base_cq, 15957 &rq_create->u.request.context, 15958 cq->queue_id); 15959 bf_set(lpfc_rq_context_data_size, 15960 &rq_create->u.request.context, 15961 LPFC_NVMET_DATA_BUF_SIZE); 15962 bf_set(lpfc_rq_context_hdr_size, 15963 &rq_create->u.request.context, 15964 LPFC_HDR_BUF_SIZE); 15965 bf_set(lpfc_rq_context_rqe_count_1, 15966 &rq_create->u.request.context, 15967 hrq->entry_count); 15968 bf_set(lpfc_rq_context_rqe_size, 15969 &rq_create->u.request.context, 15970 LPFC_RQE_SIZE_8); 15971 bf_set(lpfc_rq_context_page_size, 15972 &rq_create->u.request.context, 15973 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15974 } 15975 rc = 0; 15976 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15977 memset(dmabuf->virt, 0, hw_page_size); 15978 cnt = page_idx + dmabuf->buffer_tag; 15979 rq_create->u.request.page[cnt].addr_lo = 15980 putPaddrLow(dmabuf->phys); 15981 rq_create->u.request.page[cnt].addr_hi = 15982 putPaddrHigh(dmabuf->phys); 15983 rc++; 15984 } 15985 page_idx += rc; 15986 15987 rc = 0; 15988 list_for_each_entry(dmabuf, &drq->page_list, list) { 15989 memset(dmabuf->virt, 0, hw_page_size); 15990 cnt = page_idx + dmabuf->buffer_tag; 15991 rq_create->u.request.page[cnt].addr_lo = 15992 putPaddrLow(dmabuf->phys); 15993 rq_create->u.request.page[cnt].addr_hi = 15994 putPaddrHigh(dmabuf->phys); 15995 rc++; 15996 } 15997 page_idx += rc; 15998 15999 hrq->db_format = LPFC_DB_RING_FORMAT; 16000 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16001 hrq->type = LPFC_HRQ; 16002 hrq->assoc_qid = cq->queue_id; 16003 hrq->subtype = subtype; 16004 hrq->host_index = 0; 16005 hrq->hba_index = 0; 16006 hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16007 16008 drq->db_format = LPFC_DB_RING_FORMAT; 16009 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16010 drq->type = LPFC_DRQ; 16011 drq->assoc_qid = cq->queue_id; 16012 drq->subtype = subtype; 16013 drq->host_index = 0; 16014 drq->hba_index = 0; 16015 drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL; 16016 16017 list_add_tail(&hrq->list, &cq->child_list); 16018 list_add_tail(&drq->list, &cq->child_list); 16019 } 16020 16021 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16022 /* The IOCTL status is embedded in the mailbox subheader. */ 16023 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16024 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16025 if (shdr_status || shdr_add_status || rc) { 16026 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16027 "3120 RQ_CREATE mailbox failed with " 16028 "status x%x add_status x%x, mbx status x%x\n", 16029 shdr_status, shdr_add_status, rc); 16030 status = -ENXIO; 16031 goto out; 16032 } 16033 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 16034 if (rc == 0xFFFF) { 16035 status = -ENXIO; 16036 goto out; 16037 } 16038 16039 /* Initialize all RQs with associated queue id */ 16040 for (idx = 0; idx < numrq; idx++) { 16041 hrq = hrqp[idx]; 16042 hrq->queue_id = rc + (2 * idx); 16043 drq = drqp[idx]; 16044 drq->queue_id = rc + (2 * idx) + 1; 16045 } 16046 16047 out: 16048 lpfc_sli4_mbox_cmd_free(phba, mbox); 16049 return status; 16050 } 16051 16052 /** 16053 * lpfc_eq_destroy - Destroy an event Queue on the HBA 16054 * @eq: The queue structure associated with the queue to destroy. 16055 * 16056 * This function destroys a queue, as detailed in @eq by sending an mailbox 16057 * command, specific to the type of queue, to the HBA. 16058 * 16059 * The @eq struct is used to get the queue ID of the queue to destroy. 16060 * 16061 * On success this function will return a zero. If the queue destroy mailbox 16062 * command fails this function will return -ENXIO. 16063 **/ 16064 int 16065 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 16066 { 16067 LPFC_MBOXQ_t *mbox; 16068 int rc, length, status = 0; 16069 uint32_t shdr_status, shdr_add_status; 16070 union lpfc_sli4_cfg_shdr *shdr; 16071 16072 /* sanity check on queue memory */ 16073 if (!eq) 16074 return -ENODEV; 16075 16076 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 16077 if (!mbox) 16078 return -ENOMEM; 16079 length = (sizeof(struct lpfc_mbx_eq_destroy) - 16080 sizeof(struct lpfc_sli4_cfg_mhdr)); 16081 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16082 LPFC_MBOX_OPCODE_EQ_DESTROY, 16083 length, LPFC_SLI4_MBX_EMBED); 16084 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 16085 eq->queue_id); 16086 mbox->vport = eq->phba->pport; 16087 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16088 16089 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 16090 /* The IOCTL status is embedded in the mailbox subheader. */ 16091 shdr = (union lpfc_sli4_cfg_shdr *) 16092 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 16093 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16094 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16095 if (shdr_status || shdr_add_status || rc) { 16096 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16097 "2505 EQ_DESTROY mailbox failed with " 16098 "status x%x add_status x%x, mbx status x%x\n", 16099 shdr_status, shdr_add_status, rc); 16100 status = -ENXIO; 16101 } 16102 16103 /* Remove eq from any list */ 16104 list_del_init(&eq->list); 16105 mempool_free(mbox, eq->phba->mbox_mem_pool); 16106 return status; 16107 } 16108 16109 /** 16110 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 16111 * @cq: The queue structure associated with the queue to destroy. 16112 * 16113 * This function destroys a queue, as detailed in @cq by sending an mailbox 16114 * command, specific to the type of queue, to the HBA. 16115 * 16116 * The @cq struct is used to get the queue ID of the queue to destroy. 16117 * 16118 * On success this function will return a zero. If the queue destroy mailbox 16119 * command fails this function will return -ENXIO. 16120 **/ 16121 int 16122 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 16123 { 16124 LPFC_MBOXQ_t *mbox; 16125 int rc, length, status = 0; 16126 uint32_t shdr_status, shdr_add_status; 16127 union lpfc_sli4_cfg_shdr *shdr; 16128 16129 /* sanity check on queue memory */ 16130 if (!cq) 16131 return -ENODEV; 16132 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 16133 if (!mbox) 16134 return -ENOMEM; 16135 length = (sizeof(struct lpfc_mbx_cq_destroy) - 16136 sizeof(struct lpfc_sli4_cfg_mhdr)); 16137 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16138 LPFC_MBOX_OPCODE_CQ_DESTROY, 16139 length, LPFC_SLI4_MBX_EMBED); 16140 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 16141 cq->queue_id); 16142 mbox->vport = cq->phba->pport; 16143 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16144 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 16145 /* The IOCTL status is embedded in the mailbox subheader. */ 16146 shdr = (union lpfc_sli4_cfg_shdr *) 16147 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 16148 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16149 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16150 if (shdr_status || shdr_add_status || rc) { 16151 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16152 "2506 CQ_DESTROY mailbox failed with " 16153 "status x%x add_status x%x, mbx status x%x\n", 16154 shdr_status, shdr_add_status, rc); 16155 status = -ENXIO; 16156 } 16157 /* Remove cq from any list */ 16158 list_del_init(&cq->list); 16159 mempool_free(mbox, cq->phba->mbox_mem_pool); 16160 return status; 16161 } 16162 16163 /** 16164 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 16165 * @qm: The queue structure associated with the queue to destroy. 16166 * 16167 * This function destroys a queue, as detailed in @mq by sending an mailbox 16168 * command, specific to the type of queue, to the HBA. 16169 * 16170 * The @mq struct is used to get the queue ID of the queue to destroy. 16171 * 16172 * On success this function will return a zero. If the queue destroy mailbox 16173 * command fails this function will return -ENXIO. 16174 **/ 16175 int 16176 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 16177 { 16178 LPFC_MBOXQ_t *mbox; 16179 int rc, length, status = 0; 16180 uint32_t shdr_status, shdr_add_status; 16181 union lpfc_sli4_cfg_shdr *shdr; 16182 16183 /* sanity check on queue memory */ 16184 if (!mq) 16185 return -ENODEV; 16186 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 16187 if (!mbox) 16188 return -ENOMEM; 16189 length = (sizeof(struct lpfc_mbx_mq_destroy) - 16190 sizeof(struct lpfc_sli4_cfg_mhdr)); 16191 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16192 LPFC_MBOX_OPCODE_MQ_DESTROY, 16193 length, LPFC_SLI4_MBX_EMBED); 16194 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 16195 mq->queue_id); 16196 mbox->vport = mq->phba->pport; 16197 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16198 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 16199 /* The IOCTL status is embedded in the mailbox subheader. */ 16200 shdr = (union lpfc_sli4_cfg_shdr *) 16201 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 16202 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16203 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16204 if (shdr_status || shdr_add_status || rc) { 16205 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16206 "2507 MQ_DESTROY mailbox failed with " 16207 "status x%x add_status x%x, mbx status x%x\n", 16208 shdr_status, shdr_add_status, rc); 16209 status = -ENXIO; 16210 } 16211 /* Remove mq from any list */ 16212 list_del_init(&mq->list); 16213 mempool_free(mbox, mq->phba->mbox_mem_pool); 16214 return status; 16215 } 16216 16217 /** 16218 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 16219 * @wq: The queue structure associated with the queue to destroy. 16220 * 16221 * This function destroys a queue, as detailed in @wq by sending an mailbox 16222 * command, specific to the type of queue, to the HBA. 16223 * 16224 * The @wq struct is used to get the queue ID of the queue to destroy. 16225 * 16226 * On success this function will return a zero. If the queue destroy mailbox 16227 * command fails this function will return -ENXIO. 16228 **/ 16229 int 16230 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 16231 { 16232 LPFC_MBOXQ_t *mbox; 16233 int rc, length, status = 0; 16234 uint32_t shdr_status, shdr_add_status; 16235 union lpfc_sli4_cfg_shdr *shdr; 16236 16237 /* sanity check on queue memory */ 16238 if (!wq) 16239 return -ENODEV; 16240 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 16241 if (!mbox) 16242 return -ENOMEM; 16243 length = (sizeof(struct lpfc_mbx_wq_destroy) - 16244 sizeof(struct lpfc_sli4_cfg_mhdr)); 16245 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16246 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 16247 length, LPFC_SLI4_MBX_EMBED); 16248 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 16249 wq->queue_id); 16250 mbox->vport = wq->phba->pport; 16251 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16252 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 16253 shdr = (union lpfc_sli4_cfg_shdr *) 16254 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 16255 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16256 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16257 if (shdr_status || shdr_add_status || rc) { 16258 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16259 "2508 WQ_DESTROY mailbox failed with " 16260 "status x%x add_status x%x, mbx status x%x\n", 16261 shdr_status, shdr_add_status, rc); 16262 status = -ENXIO; 16263 } 16264 /* Remove wq from any list */ 16265 list_del_init(&wq->list); 16266 kfree(wq->pring); 16267 wq->pring = NULL; 16268 mempool_free(mbox, wq->phba->mbox_mem_pool); 16269 return status; 16270 } 16271 16272 /** 16273 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 16274 * @rq: The queue structure associated with the queue to destroy. 16275 * 16276 * This function destroys a queue, as detailed in @rq by sending an mailbox 16277 * command, specific to the type of queue, to the HBA. 16278 * 16279 * The @rq struct is used to get the queue ID of the queue to destroy. 16280 * 16281 * On success this function will return a zero. If the queue destroy mailbox 16282 * command fails this function will return -ENXIO. 16283 **/ 16284 int 16285 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 16286 struct lpfc_queue *drq) 16287 { 16288 LPFC_MBOXQ_t *mbox; 16289 int rc, length, status = 0; 16290 uint32_t shdr_status, shdr_add_status; 16291 union lpfc_sli4_cfg_shdr *shdr; 16292 16293 /* sanity check on queue memory */ 16294 if (!hrq || !drq) 16295 return -ENODEV; 16296 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 16297 if (!mbox) 16298 return -ENOMEM; 16299 length = (sizeof(struct lpfc_mbx_rq_destroy) - 16300 sizeof(struct lpfc_sli4_cfg_mhdr)); 16301 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16302 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 16303 length, LPFC_SLI4_MBX_EMBED); 16304 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16305 hrq->queue_id); 16306 mbox->vport = hrq->phba->pport; 16307 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16308 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 16309 /* The IOCTL status is embedded in the mailbox subheader. */ 16310 shdr = (union lpfc_sli4_cfg_shdr *) 16311 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16312 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16313 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16314 if (shdr_status || shdr_add_status || rc) { 16315 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16316 "2509 RQ_DESTROY mailbox failed with " 16317 "status x%x add_status x%x, mbx status x%x\n", 16318 shdr_status, shdr_add_status, rc); 16319 if (rc != MBX_TIMEOUT) 16320 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16321 return -ENXIO; 16322 } 16323 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16324 drq->queue_id); 16325 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 16326 shdr = (union lpfc_sli4_cfg_shdr *) 16327 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16328 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16329 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16330 if (shdr_status || shdr_add_status || rc) { 16331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16332 "2510 RQ_DESTROY mailbox failed with " 16333 "status x%x add_status x%x, mbx status x%x\n", 16334 shdr_status, shdr_add_status, rc); 16335 status = -ENXIO; 16336 } 16337 list_del_init(&hrq->list); 16338 list_del_init(&drq->list); 16339 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16340 return status; 16341 } 16342 16343 /** 16344 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 16345 * @phba: The virtual port for which this call being executed. 16346 * @pdma_phys_addr0: Physical address of the 1st SGL page. 16347 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 16348 * @xritag: the xritag that ties this io to the SGL pages. 16349 * 16350 * This routine will post the sgl pages for the IO that has the xritag 16351 * that is in the iocbq structure. The xritag is assigned during iocbq 16352 * creation and persists for as long as the driver is loaded. 16353 * if the caller has fewer than 256 scatter gather segments to map then 16354 * pdma_phys_addr1 should be 0. 16355 * If the caller needs to map more than 256 scatter gather segment then 16356 * pdma_phys_addr1 should be a valid physical address. 16357 * physical address for SGLs must be 64 byte aligned. 16358 * If you are going to map 2 SGL's then the first one must have 256 entries 16359 * the second sgl can have between 1 and 256 entries. 16360 * 16361 * Return codes: 16362 * 0 - Success 16363 * -ENXIO, -ENOMEM - Failure 16364 **/ 16365 int 16366 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 16367 dma_addr_t pdma_phys_addr0, 16368 dma_addr_t pdma_phys_addr1, 16369 uint16_t xritag) 16370 { 16371 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 16372 LPFC_MBOXQ_t *mbox; 16373 int rc; 16374 uint32_t shdr_status, shdr_add_status; 16375 uint32_t mbox_tmo; 16376 union lpfc_sli4_cfg_shdr *shdr; 16377 16378 if (xritag == NO_XRI) { 16379 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16380 "0364 Invalid param:\n"); 16381 return -EINVAL; 16382 } 16383 16384 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16385 if (!mbox) 16386 return -ENOMEM; 16387 16388 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16389 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16390 sizeof(struct lpfc_mbx_post_sgl_pages) - 16391 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 16392 16393 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 16394 &mbox->u.mqe.un.post_sgl_pages; 16395 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 16396 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 16397 16398 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 16399 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 16400 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 16401 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 16402 16403 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 16404 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 16405 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 16406 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 16407 if (!phba->sli4_hba.intr_enable) 16408 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16409 else { 16410 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16411 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16412 } 16413 /* The IOCTL status is embedded in the mailbox subheader. */ 16414 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 16415 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16416 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16417 if (rc != MBX_TIMEOUT) 16418 mempool_free(mbox, phba->mbox_mem_pool); 16419 if (shdr_status || shdr_add_status || rc) { 16420 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16421 "2511 POST_SGL mailbox failed with " 16422 "status x%x add_status x%x, mbx status x%x\n", 16423 shdr_status, shdr_add_status, rc); 16424 } 16425 return 0; 16426 } 16427 16428 /** 16429 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 16430 * @phba: pointer to lpfc hba data structure. 16431 * 16432 * This routine is invoked to post rpi header templates to the 16433 * HBA consistent with the SLI-4 interface spec. This routine 16434 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 16435 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 16436 * 16437 * Returns 16438 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 16439 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 16440 **/ 16441 static uint16_t 16442 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 16443 { 16444 unsigned long xri; 16445 16446 /* 16447 * Fetch the next logical xri. Because this index is logical, 16448 * the driver starts at 0 each time. 16449 */ 16450 spin_lock_irq(&phba->hbalock); 16451 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 16452 phba->sli4_hba.max_cfg_param.max_xri, 0); 16453 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 16454 spin_unlock_irq(&phba->hbalock); 16455 return NO_XRI; 16456 } else { 16457 set_bit(xri, phba->sli4_hba.xri_bmask); 16458 phba->sli4_hba.max_cfg_param.xri_used++; 16459 } 16460 spin_unlock_irq(&phba->hbalock); 16461 return xri; 16462 } 16463 16464 /** 16465 * lpfc_sli4_free_xri - Release an xri for reuse. 16466 * @phba: pointer to lpfc hba data structure. 16467 * 16468 * This routine is invoked to release an xri to the pool of 16469 * available rpis maintained by the driver. 16470 **/ 16471 static void 16472 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16473 { 16474 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 16475 phba->sli4_hba.max_cfg_param.xri_used--; 16476 } 16477 } 16478 16479 /** 16480 * lpfc_sli4_free_xri - Release an xri for reuse. 16481 * @phba: pointer to lpfc hba data structure. 16482 * 16483 * This routine is invoked to release an xri to the pool of 16484 * available rpis maintained by the driver. 16485 **/ 16486 void 16487 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16488 { 16489 spin_lock_irq(&phba->hbalock); 16490 __lpfc_sli4_free_xri(phba, xri); 16491 spin_unlock_irq(&phba->hbalock); 16492 } 16493 16494 /** 16495 * lpfc_sli4_next_xritag - Get an xritag for the io 16496 * @phba: Pointer to HBA context object. 16497 * 16498 * This function gets an xritag for the iocb. If there is no unused xritag 16499 * it will return 0xffff. 16500 * The function returns the allocated xritag if successful, else returns zero. 16501 * Zero is not a valid xritag. 16502 * The caller is not required to hold any lock. 16503 **/ 16504 uint16_t 16505 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 16506 { 16507 uint16_t xri_index; 16508 16509 xri_index = lpfc_sli4_alloc_xri(phba); 16510 if (xri_index == NO_XRI) 16511 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 16512 "2004 Failed to allocate XRI.last XRITAG is %d" 16513 " Max XRI is %d, Used XRI is %d\n", 16514 xri_index, 16515 phba->sli4_hba.max_cfg_param.max_xri, 16516 phba->sli4_hba.max_cfg_param.xri_used); 16517 return xri_index; 16518 } 16519 16520 /** 16521 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. 16522 * @phba: pointer to lpfc hba data structure. 16523 * @post_sgl_list: pointer to els sgl entry list. 16524 * @count: number of els sgl entries on the list. 16525 * 16526 * This routine is invoked to post a block of driver's sgl pages to the 16527 * HBA using non-embedded mailbox command. No Lock is held. This routine 16528 * is only called when the driver is loading and after all IO has been 16529 * stopped. 16530 **/ 16531 static int 16532 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, 16533 struct list_head *post_sgl_list, 16534 int post_cnt) 16535 { 16536 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 16537 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16538 struct sgl_page_pairs *sgl_pg_pairs; 16539 void *viraddr; 16540 LPFC_MBOXQ_t *mbox; 16541 uint32_t reqlen, alloclen, pg_pairs; 16542 uint32_t mbox_tmo; 16543 uint16_t xritag_start = 0; 16544 int rc = 0; 16545 uint32_t shdr_status, shdr_add_status; 16546 union lpfc_sli4_cfg_shdr *shdr; 16547 16548 reqlen = post_cnt * sizeof(struct sgl_page_pairs) + 16549 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16550 if (reqlen > SLI4_PAGE_SIZE) { 16551 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16552 "2559 Block sgl registration required DMA " 16553 "size (%d) great than a page\n", reqlen); 16554 return -ENOMEM; 16555 } 16556 16557 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16558 if (!mbox) 16559 return -ENOMEM; 16560 16561 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16562 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16563 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 16564 LPFC_SLI4_MBX_NEMBED); 16565 16566 if (alloclen < reqlen) { 16567 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16568 "0285 Allocated DMA memory size (%d) is " 16569 "less than the requested DMA memory " 16570 "size (%d)\n", alloclen, reqlen); 16571 lpfc_sli4_mbox_cmd_free(phba, mbox); 16572 return -ENOMEM; 16573 } 16574 /* Set up the SGL pages in the non-embedded DMA pages */ 16575 viraddr = mbox->sge_array->addr[0]; 16576 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16577 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16578 16579 pg_pairs = 0; 16580 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 16581 /* Set up the sge entry */ 16582 sgl_pg_pairs->sgl_pg0_addr_lo = 16583 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 16584 sgl_pg_pairs->sgl_pg0_addr_hi = 16585 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 16586 sgl_pg_pairs->sgl_pg1_addr_lo = 16587 cpu_to_le32(putPaddrLow(0)); 16588 sgl_pg_pairs->sgl_pg1_addr_hi = 16589 cpu_to_le32(putPaddrHigh(0)); 16590 16591 /* Keep the first xritag on the list */ 16592 if (pg_pairs == 0) 16593 xritag_start = sglq_entry->sli4_xritag; 16594 sgl_pg_pairs++; 16595 pg_pairs++; 16596 } 16597 16598 /* Complete initialization and perform endian conversion. */ 16599 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16600 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); 16601 sgl->word0 = cpu_to_le32(sgl->word0); 16602 16603 if (!phba->sli4_hba.intr_enable) 16604 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16605 else { 16606 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16607 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16608 } 16609 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16610 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16611 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16612 if (rc != MBX_TIMEOUT) 16613 lpfc_sli4_mbox_cmd_free(phba, mbox); 16614 if (shdr_status || shdr_add_status || rc) { 16615 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16616 "2513 POST_SGL_BLOCK mailbox command failed " 16617 "status x%x add_status x%x mbx status x%x\n", 16618 shdr_status, shdr_add_status, rc); 16619 rc = -ENXIO; 16620 } 16621 return rc; 16622 } 16623 16624 /** 16625 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware 16626 * @phba: pointer to lpfc hba data structure. 16627 * @nblist: pointer to nvme buffer list. 16628 * @count: number of scsi buffers on the list. 16629 * 16630 * This routine is invoked to post a block of @count scsi sgl pages from a 16631 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command. 16632 * No Lock is held. 16633 * 16634 **/ 16635 static int 16636 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist, 16637 int count) 16638 { 16639 struct lpfc_io_buf *lpfc_ncmd; 16640 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16641 struct sgl_page_pairs *sgl_pg_pairs; 16642 void *viraddr; 16643 LPFC_MBOXQ_t *mbox; 16644 uint32_t reqlen, alloclen, pg_pairs; 16645 uint32_t mbox_tmo; 16646 uint16_t xritag_start = 0; 16647 int rc = 0; 16648 uint32_t shdr_status, shdr_add_status; 16649 dma_addr_t pdma_phys_bpl1; 16650 union lpfc_sli4_cfg_shdr *shdr; 16651 16652 /* Calculate the requested length of the dma memory */ 16653 reqlen = count * sizeof(struct sgl_page_pairs) + 16654 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16655 if (reqlen > SLI4_PAGE_SIZE) { 16656 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 16657 "6118 Block sgl registration required DMA " 16658 "size (%d) great than a page\n", reqlen); 16659 return -ENOMEM; 16660 } 16661 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16662 if (!mbox) { 16663 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16664 "6119 Failed to allocate mbox cmd memory\n"); 16665 return -ENOMEM; 16666 } 16667 16668 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16669 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16670 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16671 reqlen, LPFC_SLI4_MBX_NEMBED); 16672 16673 if (alloclen < reqlen) { 16674 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16675 "6120 Allocated DMA memory size (%d) is " 16676 "less than the requested DMA memory " 16677 "size (%d)\n", alloclen, reqlen); 16678 lpfc_sli4_mbox_cmd_free(phba, mbox); 16679 return -ENOMEM; 16680 } 16681 16682 /* Get the first SGE entry from the non-embedded DMA memory */ 16683 viraddr = mbox->sge_array->addr[0]; 16684 16685 /* Set up the SGL pages in the non-embedded DMA pages */ 16686 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16687 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16688 16689 pg_pairs = 0; 16690 list_for_each_entry(lpfc_ncmd, nblist, list) { 16691 /* Set up the sge entry */ 16692 sgl_pg_pairs->sgl_pg0_addr_lo = 16693 cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl)); 16694 sgl_pg_pairs->sgl_pg0_addr_hi = 16695 cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl)); 16696 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 16697 pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl + 16698 SGL_PAGE_SIZE; 16699 else 16700 pdma_phys_bpl1 = 0; 16701 sgl_pg_pairs->sgl_pg1_addr_lo = 16702 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 16703 sgl_pg_pairs->sgl_pg1_addr_hi = 16704 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 16705 /* Keep the first xritag on the list */ 16706 if (pg_pairs == 0) 16707 xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag; 16708 sgl_pg_pairs++; 16709 pg_pairs++; 16710 } 16711 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16712 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 16713 /* Perform endian conversion if necessary */ 16714 sgl->word0 = cpu_to_le32(sgl->word0); 16715 16716 if (!phba->sli4_hba.intr_enable) { 16717 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16718 } else { 16719 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16720 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16721 } 16722 shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr; 16723 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16724 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16725 if (rc != MBX_TIMEOUT) 16726 lpfc_sli4_mbox_cmd_free(phba, mbox); 16727 if (shdr_status || shdr_add_status || rc) { 16728 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16729 "6125 POST_SGL_BLOCK mailbox command failed " 16730 "status x%x add_status x%x mbx status x%x\n", 16731 shdr_status, shdr_add_status, rc); 16732 rc = -ENXIO; 16733 } 16734 return rc; 16735 } 16736 16737 /** 16738 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list 16739 * @phba: pointer to lpfc hba data structure. 16740 * @post_nblist: pointer to the nvme buffer list. 16741 * 16742 * This routine walks a list of nvme buffers that was passed in. It attempts 16743 * to construct blocks of nvme buffer sgls which contains contiguous xris and 16744 * uses the non-embedded SGL block post mailbox commands to post to the port. 16745 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use 16746 * embedded SGL post mailbox command for posting. The @post_nblist passed in 16747 * must be local list, thus no lock is needed when manipulate the list. 16748 * 16749 * Returns: 0 = failure, non-zero number of successfully posted buffers. 16750 **/ 16751 int 16752 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba, 16753 struct list_head *post_nblist, int sb_count) 16754 { 16755 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next; 16756 int status, sgl_size; 16757 int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; 16758 dma_addr_t pdma_phys_sgl1; 16759 int last_xritag = NO_XRI; 16760 int cur_xritag; 16761 LIST_HEAD(prep_nblist); 16762 LIST_HEAD(blck_nblist); 16763 LIST_HEAD(nvme_nblist); 16764 16765 /* sanity check */ 16766 if (sb_count <= 0) 16767 return -EINVAL; 16768 16769 sgl_size = phba->cfg_sg_dma_buf_size; 16770 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) { 16771 list_del_init(&lpfc_ncmd->list); 16772 block_cnt++; 16773 if ((last_xritag != NO_XRI) && 16774 (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) { 16775 /* a hole in xri block, form a sgl posting block */ 16776 list_splice_init(&prep_nblist, &blck_nblist); 16777 post_cnt = block_cnt - 1; 16778 /* prepare list for next posting block */ 16779 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 16780 block_cnt = 1; 16781 } else { 16782 /* prepare list for next posting block */ 16783 list_add_tail(&lpfc_ncmd->list, &prep_nblist); 16784 /* enough sgls for non-embed sgl mbox command */ 16785 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 16786 list_splice_init(&prep_nblist, &blck_nblist); 16787 post_cnt = block_cnt; 16788 block_cnt = 0; 16789 } 16790 } 16791 num_posting++; 16792 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 16793 16794 /* end of repost sgl list condition for NVME buffers */ 16795 if (num_posting == sb_count) { 16796 if (post_cnt == 0) { 16797 /* last sgl posting block */ 16798 list_splice_init(&prep_nblist, &blck_nblist); 16799 post_cnt = block_cnt; 16800 } else if (block_cnt == 1) { 16801 /* last single sgl with non-contiguous xri */ 16802 if (sgl_size > SGL_PAGE_SIZE) 16803 pdma_phys_sgl1 = 16804 lpfc_ncmd->dma_phys_sgl + 16805 SGL_PAGE_SIZE; 16806 else 16807 pdma_phys_sgl1 = 0; 16808 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag; 16809 status = lpfc_sli4_post_sgl( 16810 phba, lpfc_ncmd->dma_phys_sgl, 16811 pdma_phys_sgl1, cur_xritag); 16812 if (status) { 16813 /* Post error. Buffer unavailable. */ 16814 lpfc_ncmd->flags |= 16815 LPFC_SBUF_NOT_POSTED; 16816 } else { 16817 /* Post success. Bffer available. */ 16818 lpfc_ncmd->flags &= 16819 ~LPFC_SBUF_NOT_POSTED; 16820 lpfc_ncmd->status = IOSTAT_SUCCESS; 16821 num_posted++; 16822 } 16823 /* success, put on NVME buffer sgl list */ 16824 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 16825 } 16826 } 16827 16828 /* continue until a nembed page worth of sgls */ 16829 if (post_cnt == 0) 16830 continue; 16831 16832 /* post block of NVME buffer list sgls */ 16833 status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist, 16834 post_cnt); 16835 16836 /* don't reset xirtag due to hole in xri block */ 16837 if (block_cnt == 0) 16838 last_xritag = NO_XRI; 16839 16840 /* reset NVME buffer post count for next round of posting */ 16841 post_cnt = 0; 16842 16843 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */ 16844 while (!list_empty(&blck_nblist)) { 16845 list_remove_head(&blck_nblist, lpfc_ncmd, 16846 struct lpfc_io_buf, list); 16847 if (status) { 16848 /* Post error. Mark buffer unavailable. */ 16849 lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED; 16850 } else { 16851 /* Post success, Mark buffer available. */ 16852 lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED; 16853 lpfc_ncmd->status = IOSTAT_SUCCESS; 16854 num_posted++; 16855 } 16856 list_add_tail(&lpfc_ncmd->list, &nvme_nblist); 16857 } 16858 } 16859 /* Push NVME buffers with sgl posted to the available list */ 16860 lpfc_io_buf_replenish(phba, &nvme_nblist); 16861 16862 return num_posted; 16863 } 16864 16865 /** 16866 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 16867 * @phba: pointer to lpfc_hba struct that the frame was received on 16868 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16869 * 16870 * This function checks the fields in the @fc_hdr to see if the FC frame is a 16871 * valid type of frame that the LPFC driver will handle. This function will 16872 * return a zero if the frame is a valid frame or a non zero value when the 16873 * frame does not pass the check. 16874 **/ 16875 static int 16876 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 16877 { 16878 /* make rctl_names static to save stack space */ 16879 struct fc_vft_header *fc_vft_hdr; 16880 uint32_t *header = (uint32_t *) fc_hdr; 16881 16882 #define FC_RCTL_MDS_DIAGS 0xF4 16883 16884 switch (fc_hdr->fh_r_ctl) { 16885 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 16886 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 16887 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 16888 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 16889 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 16890 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 16891 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 16892 case FC_RCTL_DD_CMD_STATUS: /* command status */ 16893 case FC_RCTL_ELS_REQ: /* extended link services request */ 16894 case FC_RCTL_ELS_REP: /* extended link services reply */ 16895 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 16896 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 16897 case FC_RCTL_BA_NOP: /* basic link service NOP */ 16898 case FC_RCTL_BA_ABTS: /* basic link service abort */ 16899 case FC_RCTL_BA_RMC: /* remove connection */ 16900 case FC_RCTL_BA_ACC: /* basic accept */ 16901 case FC_RCTL_BA_RJT: /* basic reject */ 16902 case FC_RCTL_BA_PRMT: 16903 case FC_RCTL_ACK_1: /* acknowledge_1 */ 16904 case FC_RCTL_ACK_0: /* acknowledge_0 */ 16905 case FC_RCTL_P_RJT: /* port reject */ 16906 case FC_RCTL_F_RJT: /* fabric reject */ 16907 case FC_RCTL_P_BSY: /* port busy */ 16908 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 16909 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 16910 case FC_RCTL_LCR: /* link credit reset */ 16911 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ 16912 case FC_RCTL_END: /* end */ 16913 break; 16914 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 16915 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16916 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 16917 return lpfc_fc_frame_check(phba, fc_hdr); 16918 default: 16919 goto drop; 16920 } 16921 16922 switch (fc_hdr->fh_type) { 16923 case FC_TYPE_BLS: 16924 case FC_TYPE_ELS: 16925 case FC_TYPE_FCP: 16926 case FC_TYPE_CT: 16927 case FC_TYPE_NVME: 16928 break; 16929 case FC_TYPE_IP: 16930 case FC_TYPE_ILS: 16931 default: 16932 goto drop; 16933 } 16934 16935 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 16936 "2538 Received frame rctl:x%x, type:x%x, " 16937 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 16938 fc_hdr->fh_r_ctl, fc_hdr->fh_type, 16939 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 16940 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 16941 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 16942 be32_to_cpu(header[6])); 16943 return 0; 16944 drop: 16945 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 16946 "2539 Dropped frame rctl:x%x type:x%x\n", 16947 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 16948 return 1; 16949 } 16950 16951 /** 16952 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 16953 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16954 * 16955 * This function processes the FC header to retrieve the VFI from the VF 16956 * header, if one exists. This function will return the VFI if one exists 16957 * or 0 if no VSAN Header exists. 16958 **/ 16959 static uint32_t 16960 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 16961 { 16962 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16963 16964 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 16965 return 0; 16966 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 16967 } 16968 16969 /** 16970 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 16971 * @phba: Pointer to the HBA structure to search for the vport on 16972 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16973 * @fcfi: The FC Fabric ID that the frame came from 16974 * 16975 * This function searches the @phba for a vport that matches the content of the 16976 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 16977 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 16978 * returns the matching vport pointer or NULL if unable to match frame to a 16979 * vport. 16980 **/ 16981 static struct lpfc_vport * 16982 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 16983 uint16_t fcfi, uint32_t did) 16984 { 16985 struct lpfc_vport **vports; 16986 struct lpfc_vport *vport = NULL; 16987 int i; 16988 16989 if (did == Fabric_DID) 16990 return phba->pport; 16991 if ((phba->pport->fc_flag & FC_PT2PT) && 16992 !(phba->link_state == LPFC_HBA_READY)) 16993 return phba->pport; 16994 16995 vports = lpfc_create_vport_work_array(phba); 16996 if (vports != NULL) { 16997 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 16998 if (phba->fcf.fcfi == fcfi && 16999 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 17000 vports[i]->fc_myDID == did) { 17001 vport = vports[i]; 17002 break; 17003 } 17004 } 17005 } 17006 lpfc_destroy_vport_work_array(phba, vports); 17007 return vport; 17008 } 17009 17010 /** 17011 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 17012 * @vport: The vport to work on. 17013 * 17014 * This function updates the receive sequence time stamp for this vport. The 17015 * receive sequence time stamp indicates the time that the last frame of the 17016 * the sequence that has been idle for the longest amount of time was received. 17017 * the driver uses this time stamp to indicate if any received sequences have 17018 * timed out. 17019 **/ 17020 static void 17021 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 17022 { 17023 struct lpfc_dmabuf *h_buf; 17024 struct hbq_dmabuf *dmabuf = NULL; 17025 17026 /* get the oldest sequence on the rcv list */ 17027 h_buf = list_get_first(&vport->rcv_buffer_list, 17028 struct lpfc_dmabuf, list); 17029 if (!h_buf) 17030 return; 17031 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17032 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 17033 } 17034 17035 /** 17036 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 17037 * @vport: The vport that the received sequences were sent to. 17038 * 17039 * This function cleans up all outstanding received sequences. This is called 17040 * by the driver when a link event or user action invalidates all the received 17041 * sequences. 17042 **/ 17043 void 17044 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 17045 { 17046 struct lpfc_dmabuf *h_buf, *hnext; 17047 struct lpfc_dmabuf *d_buf, *dnext; 17048 struct hbq_dmabuf *dmabuf = NULL; 17049 17050 /* start with the oldest sequence on the rcv list */ 17051 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17052 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17053 list_del_init(&dmabuf->hbuf.list); 17054 list_for_each_entry_safe(d_buf, dnext, 17055 &dmabuf->dbuf.list, list) { 17056 list_del_init(&d_buf->list); 17057 lpfc_in_buf_free(vport->phba, d_buf); 17058 } 17059 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17060 } 17061 } 17062 17063 /** 17064 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 17065 * @vport: The vport that the received sequences were sent to. 17066 * 17067 * This function determines whether any received sequences have timed out by 17068 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 17069 * indicates that there is at least one timed out sequence this routine will 17070 * go through the received sequences one at a time from most inactive to most 17071 * active to determine which ones need to be cleaned up. Once it has determined 17072 * that a sequence needs to be cleaned up it will simply free up the resources 17073 * without sending an abort. 17074 **/ 17075 void 17076 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 17077 { 17078 struct lpfc_dmabuf *h_buf, *hnext; 17079 struct lpfc_dmabuf *d_buf, *dnext; 17080 struct hbq_dmabuf *dmabuf = NULL; 17081 unsigned long timeout; 17082 int abort_count = 0; 17083 17084 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17085 vport->rcv_buffer_time_stamp); 17086 if (list_empty(&vport->rcv_buffer_list) || 17087 time_before(jiffies, timeout)) 17088 return; 17089 /* start with the oldest sequence on the rcv list */ 17090 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17091 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17092 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17093 dmabuf->time_stamp); 17094 if (time_before(jiffies, timeout)) 17095 break; 17096 abort_count++; 17097 list_del_init(&dmabuf->hbuf.list); 17098 list_for_each_entry_safe(d_buf, dnext, 17099 &dmabuf->dbuf.list, list) { 17100 list_del_init(&d_buf->list); 17101 lpfc_in_buf_free(vport->phba, d_buf); 17102 } 17103 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17104 } 17105 if (abort_count) 17106 lpfc_update_rcv_time_stamp(vport); 17107 } 17108 17109 /** 17110 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 17111 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 17112 * 17113 * This function searches through the existing incomplete sequences that have 17114 * been sent to this @vport. If the frame matches one of the incomplete 17115 * sequences then the dbuf in the @dmabuf is added to the list of frames that 17116 * make up that sequence. If no sequence is found that matches this frame then 17117 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 17118 * This function returns a pointer to the first dmabuf in the sequence list that 17119 * the frame was linked to. 17120 **/ 17121 static struct hbq_dmabuf * 17122 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17123 { 17124 struct fc_frame_header *new_hdr; 17125 struct fc_frame_header *temp_hdr; 17126 struct lpfc_dmabuf *d_buf; 17127 struct lpfc_dmabuf *h_buf; 17128 struct hbq_dmabuf *seq_dmabuf = NULL; 17129 struct hbq_dmabuf *temp_dmabuf = NULL; 17130 uint8_t found = 0; 17131 17132 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17133 dmabuf->time_stamp = jiffies; 17134 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17135 17136 /* Use the hdr_buf to find the sequence that this frame belongs to */ 17137 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17138 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17139 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17140 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17141 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17142 continue; 17143 /* found a pending sequence that matches this frame */ 17144 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17145 break; 17146 } 17147 if (!seq_dmabuf) { 17148 /* 17149 * This indicates first frame received for this sequence. 17150 * Queue the buffer on the vport's rcv_buffer_list. 17151 */ 17152 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17153 lpfc_update_rcv_time_stamp(vport); 17154 return dmabuf; 17155 } 17156 temp_hdr = seq_dmabuf->hbuf.virt; 17157 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 17158 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17159 list_del_init(&seq_dmabuf->hbuf.list); 17160 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17161 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17162 lpfc_update_rcv_time_stamp(vport); 17163 return dmabuf; 17164 } 17165 /* move this sequence to the tail to indicate a young sequence */ 17166 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 17167 seq_dmabuf->time_stamp = jiffies; 17168 lpfc_update_rcv_time_stamp(vport); 17169 if (list_empty(&seq_dmabuf->dbuf.list)) { 17170 temp_hdr = dmabuf->hbuf.virt; 17171 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17172 return seq_dmabuf; 17173 } 17174 /* find the correct place in the sequence to insert this frame */ 17175 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); 17176 while (!found) { 17177 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17178 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 17179 /* 17180 * If the frame's sequence count is greater than the frame on 17181 * the list then insert the frame right after this frame 17182 */ 17183 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 17184 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17185 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 17186 found = 1; 17187 break; 17188 } 17189 17190 if (&d_buf->list == &seq_dmabuf->dbuf.list) 17191 break; 17192 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); 17193 } 17194 17195 if (found) 17196 return seq_dmabuf; 17197 return NULL; 17198 } 17199 17200 /** 17201 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 17202 * @vport: pointer to a vitural port 17203 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17204 * 17205 * This function tries to abort from the partially assembed sequence, described 17206 * by the information from basic abbort @dmabuf. It checks to see whether such 17207 * partially assembled sequence held by the driver. If so, it shall free up all 17208 * the frames from the partially assembled sequence. 17209 * 17210 * Return 17211 * true -- if there is matching partially assembled sequence present and all 17212 * the frames freed with the sequence; 17213 * false -- if there is no matching partially assembled sequence present so 17214 * nothing got aborted in the lower layer driver 17215 **/ 17216 static bool 17217 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 17218 struct hbq_dmabuf *dmabuf) 17219 { 17220 struct fc_frame_header *new_hdr; 17221 struct fc_frame_header *temp_hdr; 17222 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 17223 struct hbq_dmabuf *seq_dmabuf = NULL; 17224 17225 /* Use the hdr_buf to find the sequence that matches this frame */ 17226 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17227 INIT_LIST_HEAD(&dmabuf->hbuf.list); 17228 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17229 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17230 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17231 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17232 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17233 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17234 continue; 17235 /* found a pending sequence that matches this frame */ 17236 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17237 break; 17238 } 17239 17240 /* Free up all the frames from the partially assembled sequence */ 17241 if (seq_dmabuf) { 17242 list_for_each_entry_safe(d_buf, n_buf, 17243 &seq_dmabuf->dbuf.list, list) { 17244 list_del_init(&d_buf->list); 17245 lpfc_in_buf_free(vport->phba, d_buf); 17246 } 17247 return true; 17248 } 17249 return false; 17250 } 17251 17252 /** 17253 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 17254 * @vport: pointer to a vitural port 17255 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17256 * 17257 * This function tries to abort from the assembed sequence from upper level 17258 * protocol, described by the information from basic abbort @dmabuf. It 17259 * checks to see whether such pending context exists at upper level protocol. 17260 * If so, it shall clean up the pending context. 17261 * 17262 * Return 17263 * true -- if there is matching pending context of the sequence cleaned 17264 * at ulp; 17265 * false -- if there is no matching pending context of the sequence present 17266 * at ulp. 17267 **/ 17268 static bool 17269 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17270 { 17271 struct lpfc_hba *phba = vport->phba; 17272 int handled; 17273 17274 /* Accepting abort at ulp with SLI4 only */ 17275 if (phba->sli_rev < LPFC_SLI_REV4) 17276 return false; 17277 17278 /* Register all caring upper level protocols to attend abort */ 17279 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 17280 if (handled) 17281 return true; 17282 17283 return false; 17284 } 17285 17286 /** 17287 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 17288 * @phba: Pointer to HBA context object. 17289 * @cmd_iocbq: pointer to the command iocbq structure. 17290 * @rsp_iocbq: pointer to the response iocbq structure. 17291 * 17292 * This function handles the sequence abort response iocb command complete 17293 * event. It properly releases the memory allocated to the sequence abort 17294 * accept iocb. 17295 **/ 17296 static void 17297 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 17298 struct lpfc_iocbq *cmd_iocbq, 17299 struct lpfc_iocbq *rsp_iocbq) 17300 { 17301 struct lpfc_nodelist *ndlp; 17302 17303 if (cmd_iocbq) { 17304 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; 17305 lpfc_nlp_put(ndlp); 17306 lpfc_nlp_not_used(ndlp); 17307 lpfc_sli_release_iocbq(phba, cmd_iocbq); 17308 } 17309 17310 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 17311 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 17312 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17313 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 17314 rsp_iocbq->iocb.ulpStatus, 17315 rsp_iocbq->iocb.un.ulpWord[4]); 17316 } 17317 17318 /** 17319 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 17320 * @phba: Pointer to HBA context object. 17321 * @xri: xri id in transaction. 17322 * 17323 * This function validates the xri maps to the known range of XRIs allocated an 17324 * used by the driver. 17325 **/ 17326 uint16_t 17327 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 17328 uint16_t xri) 17329 { 17330 uint16_t i; 17331 17332 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 17333 if (xri == phba->sli4_hba.xri_ids[i]) 17334 return i; 17335 } 17336 return NO_XRI; 17337 } 17338 17339 /** 17340 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 17341 * @phba: Pointer to HBA context object. 17342 * @fc_hdr: pointer to a FC frame header. 17343 * 17344 * This function sends a basic response to a previous unsol sequence abort 17345 * event after aborting the sequence handling. 17346 **/ 17347 void 17348 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 17349 struct fc_frame_header *fc_hdr, bool aborted) 17350 { 17351 struct lpfc_hba *phba = vport->phba; 17352 struct lpfc_iocbq *ctiocb = NULL; 17353 struct lpfc_nodelist *ndlp; 17354 uint16_t oxid, rxid, xri, lxri; 17355 uint32_t sid, fctl; 17356 IOCB_t *icmd; 17357 int rc; 17358 17359 if (!lpfc_is_link_up(phba)) 17360 return; 17361 17362 sid = sli4_sid_from_fc_hdr(fc_hdr); 17363 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 17364 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 17365 17366 ndlp = lpfc_findnode_did(vport, sid); 17367 if (!ndlp) { 17368 ndlp = lpfc_nlp_init(vport, sid); 17369 if (!ndlp) { 17370 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17371 "1268 Failed to allocate ndlp for " 17372 "oxid:x%x SID:x%x\n", oxid, sid); 17373 return; 17374 } 17375 /* Put ndlp onto pport node list */ 17376 lpfc_enqueue_node(vport, ndlp); 17377 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 17378 /* re-setup ndlp without removing from node list */ 17379 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 17380 if (!ndlp) { 17381 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17382 "3275 Failed to active ndlp found " 17383 "for oxid:x%x SID:x%x\n", oxid, sid); 17384 return; 17385 } 17386 } 17387 17388 /* Allocate buffer for rsp iocb */ 17389 ctiocb = lpfc_sli_get_iocbq(phba); 17390 if (!ctiocb) 17391 return; 17392 17393 /* Extract the F_CTL field from FC_HDR */ 17394 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 17395 17396 icmd = &ctiocb->iocb; 17397 icmd->un.xseq64.bdl.bdeSize = 0; 17398 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 17399 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 17400 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 17401 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 17402 17403 /* Fill in the rest of iocb fields */ 17404 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 17405 icmd->ulpBdeCount = 0; 17406 icmd->ulpLe = 1; 17407 icmd->ulpClass = CLASS3; 17408 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 17409 ctiocb->context1 = lpfc_nlp_get(ndlp); 17410 17411 ctiocb->vport = phba->pport; 17412 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 17413 ctiocb->sli4_lxritag = NO_XRI; 17414 ctiocb->sli4_xritag = NO_XRI; 17415 17416 if (fctl & FC_FC_EX_CTX) 17417 /* Exchange responder sent the abort so we 17418 * own the oxid. 17419 */ 17420 xri = oxid; 17421 else 17422 xri = rxid; 17423 lxri = lpfc_sli4_xri_inrange(phba, xri); 17424 if (lxri != NO_XRI) 17425 lpfc_set_rrq_active(phba, ndlp, lxri, 17426 (xri == oxid) ? rxid : oxid, 0); 17427 /* For BA_ABTS from exchange responder, if the logical xri with 17428 * the oxid maps to the FCP XRI range, the port no longer has 17429 * that exchange context, send a BLS_RJT. Override the IOCB for 17430 * a BA_RJT. 17431 */ 17432 if ((fctl & FC_FC_EX_CTX) && 17433 (lxri > lpfc_sli4_get_iocb_cnt(phba))) { 17434 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17435 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17436 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17437 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17438 } 17439 17440 /* If BA_ABTS failed to abort a partially assembled receive sequence, 17441 * the driver no longer has that exchange, send a BLS_RJT. Override 17442 * the IOCB for a BA_RJT. 17443 */ 17444 if (aborted == false) { 17445 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17446 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17447 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17448 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17449 } 17450 17451 if (fctl & FC_FC_EX_CTX) { 17452 /* ABTS sent by responder to CT exchange, construction 17453 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 17454 * field and RX_ID from ABTS for RX_ID field. 17455 */ 17456 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 17457 } else { 17458 /* ABTS sent by initiator to CT exchange, construction 17459 * of BA_ACC will need to allocate a new XRI as for the 17460 * XRI_TAG field. 17461 */ 17462 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 17463 } 17464 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 17465 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 17466 17467 /* Xmit CT abts response on exchange <xid> */ 17468 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 17469 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 17470 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 17471 17472 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 17473 if (rc == IOCB_ERROR) { 17474 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 17475 "2925 Failed to issue CT ABTS RSP x%x on " 17476 "xri x%x, Data x%x\n", 17477 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 17478 phba->link_state); 17479 lpfc_nlp_put(ndlp); 17480 ctiocb->context1 = NULL; 17481 lpfc_sli_release_iocbq(phba, ctiocb); 17482 } 17483 } 17484 17485 /** 17486 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 17487 * @vport: Pointer to the vport on which this sequence was received 17488 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17489 * 17490 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 17491 * receive sequence is only partially assembed by the driver, it shall abort 17492 * the partially assembled frames for the sequence. Otherwise, if the 17493 * unsolicited receive sequence has been completely assembled and passed to 17494 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 17495 * unsolicited sequence has been aborted. After that, it will issue a basic 17496 * accept to accept the abort. 17497 **/ 17498 static void 17499 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 17500 struct hbq_dmabuf *dmabuf) 17501 { 17502 struct lpfc_hba *phba = vport->phba; 17503 struct fc_frame_header fc_hdr; 17504 uint32_t fctl; 17505 bool aborted; 17506 17507 /* Make a copy of fc_hdr before the dmabuf being released */ 17508 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 17509 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 17510 17511 if (fctl & FC_FC_EX_CTX) { 17512 /* ABTS by responder to exchange, no cleanup needed */ 17513 aborted = true; 17514 } else { 17515 /* ABTS by initiator to exchange, need to do cleanup */ 17516 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 17517 if (aborted == false) 17518 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 17519 } 17520 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17521 17522 if (phba->nvmet_support) { 17523 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); 17524 return; 17525 } 17526 17527 /* Respond with BA_ACC or BA_RJT accordingly */ 17528 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 17529 } 17530 17531 /** 17532 * lpfc_seq_complete - Indicates if a sequence is complete 17533 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17534 * 17535 * This function checks the sequence, starting with the frame described by 17536 * @dmabuf, to see if all the frames associated with this sequence are present. 17537 * the frames associated with this sequence are linked to the @dmabuf using the 17538 * dbuf list. This function looks for two major things. 1) That the first frame 17539 * has a sequence count of zero. 2) There is a frame with last frame of sequence 17540 * set. 3) That there are no holes in the sequence count. The function will 17541 * return 1 when the sequence is complete, otherwise it will return 0. 17542 **/ 17543 static int 17544 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 17545 { 17546 struct fc_frame_header *hdr; 17547 struct lpfc_dmabuf *d_buf; 17548 struct hbq_dmabuf *seq_dmabuf; 17549 uint32_t fctl; 17550 int seq_count = 0; 17551 17552 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17553 /* make sure first fame of sequence has a sequence count of zero */ 17554 if (hdr->fh_seq_cnt != seq_count) 17555 return 0; 17556 fctl = (hdr->fh_f_ctl[0] << 16 | 17557 hdr->fh_f_ctl[1] << 8 | 17558 hdr->fh_f_ctl[2]); 17559 /* If last frame of sequence we can return success. */ 17560 if (fctl & FC_FC_END_SEQ) 17561 return 1; 17562 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 17563 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17564 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17565 /* If there is a hole in the sequence count then fail. */ 17566 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 17567 return 0; 17568 fctl = (hdr->fh_f_ctl[0] << 16 | 17569 hdr->fh_f_ctl[1] << 8 | 17570 hdr->fh_f_ctl[2]); 17571 /* If last frame of sequence we can return success. */ 17572 if (fctl & FC_FC_END_SEQ) 17573 return 1; 17574 } 17575 return 0; 17576 } 17577 17578 /** 17579 * lpfc_prep_seq - Prep sequence for ULP processing 17580 * @vport: Pointer to the vport on which this sequence was received 17581 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17582 * 17583 * This function takes a sequence, described by a list of frames, and creates 17584 * a list of iocbq structures to describe the sequence. This iocbq list will be 17585 * used to issue to the generic unsolicited sequence handler. This routine 17586 * returns a pointer to the first iocbq in the list. If the function is unable 17587 * to allocate an iocbq then it throw out the received frames that were not 17588 * able to be described and return a pointer to the first iocbq. If unable to 17589 * allocate any iocbqs (including the first) this function will return NULL. 17590 **/ 17591 static struct lpfc_iocbq * 17592 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 17593 { 17594 struct hbq_dmabuf *hbq_buf; 17595 struct lpfc_dmabuf *d_buf, *n_buf; 17596 struct lpfc_iocbq *first_iocbq, *iocbq; 17597 struct fc_frame_header *fc_hdr; 17598 uint32_t sid; 17599 uint32_t len, tot_len; 17600 struct ulp_bde64 *pbde; 17601 17602 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17603 /* remove from receive buffer list */ 17604 list_del_init(&seq_dmabuf->hbuf.list); 17605 lpfc_update_rcv_time_stamp(vport); 17606 /* get the Remote Port's SID */ 17607 sid = sli4_sid_from_fc_hdr(fc_hdr); 17608 tot_len = 0; 17609 /* Get an iocbq struct to fill in. */ 17610 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 17611 if (first_iocbq) { 17612 /* Initialize the first IOCB. */ 17613 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 17614 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 17615 first_iocbq->vport = vport; 17616 17617 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 17618 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 17619 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 17620 first_iocbq->iocb.un.rcvels.parmRo = 17621 sli4_did_from_fc_hdr(fc_hdr); 17622 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 17623 } else 17624 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 17625 first_iocbq->iocb.ulpContext = NO_XRI; 17626 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 17627 be16_to_cpu(fc_hdr->fh_ox_id); 17628 /* iocbq is prepped for internal consumption. Physical vpi. */ 17629 first_iocbq->iocb.unsli3.rcvsli3.vpi = 17630 vport->phba->vpi_ids[vport->vpi]; 17631 /* put the first buffer into the first IOCBq */ 17632 tot_len = bf_get(lpfc_rcqe_length, 17633 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 17634 17635 first_iocbq->context2 = &seq_dmabuf->dbuf; 17636 first_iocbq->context3 = NULL; 17637 first_iocbq->iocb.ulpBdeCount = 1; 17638 if (tot_len > LPFC_DATA_BUF_SIZE) 17639 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17640 LPFC_DATA_BUF_SIZE; 17641 else 17642 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; 17643 17644 first_iocbq->iocb.un.rcvels.remoteID = sid; 17645 17646 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17647 } 17648 iocbq = first_iocbq; 17649 /* 17650 * Each IOCBq can have two Buffers assigned, so go through the list 17651 * of buffers for this sequence and save two buffers in each IOCBq 17652 */ 17653 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 17654 if (!iocbq) { 17655 lpfc_in_buf_free(vport->phba, d_buf); 17656 continue; 17657 } 17658 if (!iocbq->context3) { 17659 iocbq->context3 = d_buf; 17660 iocbq->iocb.ulpBdeCount++; 17661 /* We need to get the size out of the right CQE */ 17662 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17663 len = bf_get(lpfc_rcqe_length, 17664 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17665 pbde = (struct ulp_bde64 *) 17666 &iocbq->iocb.unsli3.sli3Words[4]; 17667 if (len > LPFC_DATA_BUF_SIZE) 17668 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 17669 else 17670 pbde->tus.f.bdeSize = len; 17671 17672 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 17673 tot_len += len; 17674 } else { 17675 iocbq = lpfc_sli_get_iocbq(vport->phba); 17676 if (!iocbq) { 17677 if (first_iocbq) { 17678 first_iocbq->iocb.ulpStatus = 17679 IOSTAT_FCP_RSP_ERROR; 17680 first_iocbq->iocb.un.ulpWord[4] = 17681 IOERR_NO_RESOURCES; 17682 } 17683 lpfc_in_buf_free(vport->phba, d_buf); 17684 continue; 17685 } 17686 /* We need to get the size out of the right CQE */ 17687 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17688 len = bf_get(lpfc_rcqe_length, 17689 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17690 iocbq->context2 = d_buf; 17691 iocbq->context3 = NULL; 17692 iocbq->iocb.ulpBdeCount = 1; 17693 if (len > LPFC_DATA_BUF_SIZE) 17694 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17695 LPFC_DATA_BUF_SIZE; 17696 else 17697 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; 17698 17699 tot_len += len; 17700 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17701 17702 iocbq->iocb.un.rcvels.remoteID = sid; 17703 list_add_tail(&iocbq->list, &first_iocbq->list); 17704 } 17705 } 17706 return first_iocbq; 17707 } 17708 17709 static void 17710 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 17711 struct hbq_dmabuf *seq_dmabuf) 17712 { 17713 struct fc_frame_header *fc_hdr; 17714 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 17715 struct lpfc_hba *phba = vport->phba; 17716 17717 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17718 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 17719 if (!iocbq) { 17720 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17721 "2707 Ring %d handler: Failed to allocate " 17722 "iocb Rctl x%x Type x%x received\n", 17723 LPFC_ELS_RING, 17724 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17725 return; 17726 } 17727 if (!lpfc_complete_unsol_iocb(phba, 17728 phba->sli4_hba.els_wq->pring, 17729 iocbq, fc_hdr->fh_r_ctl, 17730 fc_hdr->fh_type)) 17731 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17732 "2540 Ring %d handler: unexpected Rctl " 17733 "x%x Type x%x received\n", 17734 LPFC_ELS_RING, 17735 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17736 17737 /* Free iocb created in lpfc_prep_seq */ 17738 list_for_each_entry_safe(curr_iocb, next_iocb, 17739 &iocbq->list, list) { 17740 list_del_init(&curr_iocb->list); 17741 lpfc_sli_release_iocbq(phba, curr_iocb); 17742 } 17743 lpfc_sli_release_iocbq(phba, iocbq); 17744 } 17745 17746 static void 17747 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 17748 struct lpfc_iocbq *rspiocb) 17749 { 17750 struct lpfc_dmabuf *pcmd = cmdiocb->context2; 17751 17752 if (pcmd && pcmd->virt) 17753 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17754 kfree(pcmd); 17755 lpfc_sli_release_iocbq(phba, cmdiocb); 17756 lpfc_drain_txq(phba); 17757 } 17758 17759 static void 17760 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 17761 struct hbq_dmabuf *dmabuf) 17762 { 17763 struct fc_frame_header *fc_hdr; 17764 struct lpfc_hba *phba = vport->phba; 17765 struct lpfc_iocbq *iocbq = NULL; 17766 union lpfc_wqe *wqe; 17767 struct lpfc_dmabuf *pcmd = NULL; 17768 uint32_t frame_len; 17769 int rc; 17770 unsigned long iflags; 17771 17772 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17773 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); 17774 17775 /* Send the received frame back */ 17776 iocbq = lpfc_sli_get_iocbq(phba); 17777 if (!iocbq) { 17778 /* Queue cq event and wakeup worker thread to process it */ 17779 spin_lock_irqsave(&phba->hbalock, iflags); 17780 list_add_tail(&dmabuf->cq_event.list, 17781 &phba->sli4_hba.sp_queue_event); 17782 phba->hba_flag |= HBA_SP_QUEUE_EVT; 17783 spin_unlock_irqrestore(&phba->hbalock, iflags); 17784 lpfc_worker_wake_up(phba); 17785 return; 17786 } 17787 17788 /* Allocate buffer for command payload */ 17789 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 17790 if (pcmd) 17791 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 17792 &pcmd->phys); 17793 if (!pcmd || !pcmd->virt) 17794 goto exit; 17795 17796 INIT_LIST_HEAD(&pcmd->list); 17797 17798 /* copyin the payload */ 17799 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); 17800 17801 /* fill in BDE's for command */ 17802 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys); 17803 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys); 17804 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 17805 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len; 17806 17807 iocbq->context2 = pcmd; 17808 iocbq->vport = vport; 17809 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 17810 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 17811 17812 /* 17813 * Setup rest of the iocb as though it were a WQE 17814 * Build the SEND_FRAME WQE 17815 */ 17816 wqe = (union lpfc_wqe *)&iocbq->iocb; 17817 17818 wqe->send_frame.frame_len = frame_len; 17819 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr)); 17820 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1)); 17821 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2)); 17822 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3)); 17823 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4)); 17824 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5)); 17825 17826 iocbq->iocb.ulpCommand = CMD_SEND_FRAME; 17827 iocbq->iocb.ulpLe = 1; 17828 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl; 17829 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); 17830 if (rc == IOCB_ERROR) 17831 goto exit; 17832 17833 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17834 return; 17835 17836 exit: 17837 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 17838 "2023 Unable to process MDS loopback frame\n"); 17839 if (pcmd && pcmd->virt) 17840 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17841 kfree(pcmd); 17842 if (iocbq) 17843 lpfc_sli_release_iocbq(phba, iocbq); 17844 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17845 } 17846 17847 /** 17848 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 17849 * @phba: Pointer to HBA context object. 17850 * 17851 * This function is called with no lock held. This function processes all 17852 * the received buffers and gives it to upper layers when a received buffer 17853 * indicates that it is the final frame in the sequence. The interrupt 17854 * service routine processes received buffers at interrupt contexts. 17855 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 17856 * appropriate receive function when the final frame in a sequence is received. 17857 **/ 17858 void 17859 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 17860 struct hbq_dmabuf *dmabuf) 17861 { 17862 struct hbq_dmabuf *seq_dmabuf; 17863 struct fc_frame_header *fc_hdr; 17864 struct lpfc_vport *vport; 17865 uint32_t fcfi; 17866 uint32_t did; 17867 17868 /* Process each received buffer */ 17869 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17870 17871 if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS || 17872 fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { 17873 vport = phba->pport; 17874 /* Handle MDS Loopback frames */ 17875 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 17876 return; 17877 } 17878 17879 /* check to see if this a valid type of frame */ 17880 if (lpfc_fc_frame_check(phba, fc_hdr)) { 17881 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17882 return; 17883 } 17884 17885 if ((bf_get(lpfc_cqe_code, 17886 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 17887 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 17888 &dmabuf->cq_event.cqe.rcqe_cmpl); 17889 else 17890 fcfi = bf_get(lpfc_rcqe_fcf_id, 17891 &dmabuf->cq_event.cqe.rcqe_cmpl); 17892 17893 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { 17894 vport = phba->pport; 17895 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 17896 "2023 MDS Loopback %d bytes\n", 17897 bf_get(lpfc_rcqe_length, 17898 &dmabuf->cq_event.cqe.rcqe_cmpl)); 17899 /* Handle MDS Loopback frames */ 17900 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 17901 return; 17902 } 17903 17904 /* d_id this frame is directed to */ 17905 did = sli4_did_from_fc_hdr(fc_hdr); 17906 17907 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); 17908 if (!vport) { 17909 /* throw out the frame */ 17910 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17911 return; 17912 } 17913 17914 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 17915 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 17916 (did != Fabric_DID)) { 17917 /* 17918 * Throw out the frame if we are not pt2pt. 17919 * The pt2pt protocol allows for discovery frames 17920 * to be received without a registered VPI. 17921 */ 17922 if (!(vport->fc_flag & FC_PT2PT) || 17923 (phba->link_state == LPFC_HBA_READY)) { 17924 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17925 return; 17926 } 17927 } 17928 17929 /* Handle the basic abort sequence (BA_ABTS) event */ 17930 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 17931 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 17932 return; 17933 } 17934 17935 /* Link this frame */ 17936 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 17937 if (!seq_dmabuf) { 17938 /* unable to add frame to vport - throw it out */ 17939 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17940 return; 17941 } 17942 /* If not last frame in sequence continue processing frames. */ 17943 if (!lpfc_seq_complete(seq_dmabuf)) 17944 return; 17945 17946 /* Send the complete sequence to the upper layer protocol */ 17947 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 17948 } 17949 17950 /** 17951 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 17952 * @phba: pointer to lpfc hba data structure. 17953 * 17954 * This routine is invoked to post rpi header templates to the 17955 * HBA consistent with the SLI-4 interface spec. This routine 17956 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 17957 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 17958 * 17959 * This routine does not require any locks. It's usage is expected 17960 * to be driver load or reset recovery when the driver is 17961 * sequential. 17962 * 17963 * Return codes 17964 * 0 - successful 17965 * -EIO - The mailbox failed to complete successfully. 17966 * When this error occurs, the driver is not guaranteed 17967 * to have any rpi regions posted to the device and 17968 * must either attempt to repost the regions or take a 17969 * fatal error. 17970 **/ 17971 int 17972 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 17973 { 17974 struct lpfc_rpi_hdr *rpi_page; 17975 uint32_t rc = 0; 17976 uint16_t lrpi = 0; 17977 17978 /* SLI4 ports that support extents do not require RPI headers. */ 17979 if (!phba->sli4_hba.rpi_hdrs_in_use) 17980 goto exit; 17981 if (phba->sli4_hba.extents_in_use) 17982 return -EIO; 17983 17984 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 17985 /* 17986 * Assign the rpi headers a physical rpi only if the driver 17987 * has not initialized those resources. A port reset only 17988 * needs the headers posted. 17989 */ 17990 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 17991 LPFC_RPI_RSRC_RDY) 17992 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 17993 17994 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 17995 if (rc != MBX_SUCCESS) { 17996 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17997 "2008 Error %d posting all rpi " 17998 "headers\n", rc); 17999 rc = -EIO; 18000 break; 18001 } 18002 } 18003 18004 exit: 18005 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 18006 LPFC_RPI_RSRC_RDY); 18007 return rc; 18008 } 18009 18010 /** 18011 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 18012 * @phba: pointer to lpfc hba data structure. 18013 * @rpi_page: pointer to the rpi memory region. 18014 * 18015 * This routine is invoked to post a single rpi header to the 18016 * HBA consistent with the SLI-4 interface spec. This memory region 18017 * maps up to 64 rpi context regions. 18018 * 18019 * Return codes 18020 * 0 - successful 18021 * -ENOMEM - No available memory 18022 * -EIO - The mailbox failed to complete successfully. 18023 **/ 18024 int 18025 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 18026 { 18027 LPFC_MBOXQ_t *mboxq; 18028 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 18029 uint32_t rc = 0; 18030 uint32_t shdr_status, shdr_add_status; 18031 union lpfc_sli4_cfg_shdr *shdr; 18032 18033 /* SLI4 ports that support extents do not require RPI headers. */ 18034 if (!phba->sli4_hba.rpi_hdrs_in_use) 18035 return rc; 18036 if (phba->sli4_hba.extents_in_use) 18037 return -EIO; 18038 18039 /* The port is notified of the header region via a mailbox command. */ 18040 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18041 if (!mboxq) { 18042 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18043 "2001 Unable to allocate memory for issuing " 18044 "SLI_CONFIG_SPECIAL mailbox command\n"); 18045 return -ENOMEM; 18046 } 18047 18048 /* Post all rpi memory regions to the port. */ 18049 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 18050 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18051 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 18052 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 18053 sizeof(struct lpfc_sli4_cfg_mhdr), 18054 LPFC_SLI4_MBX_EMBED); 18055 18056 18057 /* Post the physical rpi to the port for this rpi header. */ 18058 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 18059 rpi_page->start_rpi); 18060 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 18061 hdr_tmpl, rpi_page->page_count); 18062 18063 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 18064 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 18065 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 18066 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 18067 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18068 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18069 if (rc != MBX_TIMEOUT) 18070 mempool_free(mboxq, phba->mbox_mem_pool); 18071 if (shdr_status || shdr_add_status || rc) { 18072 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18073 "2514 POST_RPI_HDR mailbox failed with " 18074 "status x%x add_status x%x, mbx status x%x\n", 18075 shdr_status, shdr_add_status, rc); 18076 rc = -ENXIO; 18077 } else { 18078 /* 18079 * The next_rpi stores the next logical module-64 rpi value used 18080 * to post physical rpis in subsequent rpi postings. 18081 */ 18082 spin_lock_irq(&phba->hbalock); 18083 phba->sli4_hba.next_rpi = rpi_page->next_rpi; 18084 spin_unlock_irq(&phba->hbalock); 18085 } 18086 return rc; 18087 } 18088 18089 /** 18090 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 18091 * @phba: pointer to lpfc hba data structure. 18092 * 18093 * This routine is invoked to post rpi header templates to the 18094 * HBA consistent with the SLI-4 interface spec. This routine 18095 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 18096 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 18097 * 18098 * Returns 18099 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 18100 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 18101 **/ 18102 int 18103 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 18104 { 18105 unsigned long rpi; 18106 uint16_t max_rpi, rpi_limit; 18107 uint16_t rpi_remaining, lrpi = 0; 18108 struct lpfc_rpi_hdr *rpi_hdr; 18109 unsigned long iflag; 18110 18111 /* 18112 * Fetch the next logical rpi. Because this index is logical, 18113 * the driver starts at 0 each time. 18114 */ 18115 spin_lock_irqsave(&phba->hbalock, iflag); 18116 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 18117 rpi_limit = phba->sli4_hba.next_rpi; 18118 18119 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 18120 if (rpi >= rpi_limit) 18121 rpi = LPFC_RPI_ALLOC_ERROR; 18122 else { 18123 set_bit(rpi, phba->sli4_hba.rpi_bmask); 18124 phba->sli4_hba.max_cfg_param.rpi_used++; 18125 phba->sli4_hba.rpi_count++; 18126 } 18127 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 18128 "0001 rpi:%x max:%x lim:%x\n", 18129 (int) rpi, max_rpi, rpi_limit); 18130 18131 /* 18132 * Don't try to allocate more rpi header regions if the device limit 18133 * has been exhausted. 18134 */ 18135 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 18136 (phba->sli4_hba.rpi_count >= max_rpi)) { 18137 spin_unlock_irqrestore(&phba->hbalock, iflag); 18138 return rpi; 18139 } 18140 18141 /* 18142 * RPI header postings are not required for SLI4 ports capable of 18143 * extents. 18144 */ 18145 if (!phba->sli4_hba.rpi_hdrs_in_use) { 18146 spin_unlock_irqrestore(&phba->hbalock, iflag); 18147 return rpi; 18148 } 18149 18150 /* 18151 * If the driver is running low on rpi resources, allocate another 18152 * page now. Note that the next_rpi value is used because 18153 * it represents how many are actually in use whereas max_rpi notes 18154 * how many are supported max by the device. 18155 */ 18156 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 18157 spin_unlock_irqrestore(&phba->hbalock, iflag); 18158 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 18159 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 18160 if (!rpi_hdr) { 18161 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18162 "2002 Error Could not grow rpi " 18163 "count\n"); 18164 } else { 18165 lrpi = rpi_hdr->start_rpi; 18166 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 18167 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 18168 } 18169 } 18170 18171 return rpi; 18172 } 18173 18174 /** 18175 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18176 * @phba: pointer to lpfc hba data structure. 18177 * 18178 * This routine is invoked to release an rpi to the pool of 18179 * available rpis maintained by the driver. 18180 **/ 18181 static void 18182 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18183 { 18184 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 18185 phba->sli4_hba.rpi_count--; 18186 phba->sli4_hba.max_cfg_param.rpi_used--; 18187 } else { 18188 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 18189 "2016 rpi %x not inuse\n", 18190 rpi); 18191 } 18192 } 18193 18194 /** 18195 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18196 * @phba: pointer to lpfc hba data structure. 18197 * 18198 * This routine is invoked to release an rpi to the pool of 18199 * available rpis maintained by the driver. 18200 **/ 18201 void 18202 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18203 { 18204 spin_lock_irq(&phba->hbalock); 18205 __lpfc_sli4_free_rpi(phba, rpi); 18206 spin_unlock_irq(&phba->hbalock); 18207 } 18208 18209 /** 18210 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 18211 * @phba: pointer to lpfc hba data structure. 18212 * 18213 * This routine is invoked to remove the memory region that 18214 * provided rpi via a bitmask. 18215 **/ 18216 void 18217 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 18218 { 18219 kfree(phba->sli4_hba.rpi_bmask); 18220 kfree(phba->sli4_hba.rpi_ids); 18221 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 18222 } 18223 18224 /** 18225 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 18226 * @phba: pointer to lpfc hba data structure. 18227 * 18228 * This routine is invoked to remove the memory region that 18229 * provided rpi via a bitmask. 18230 **/ 18231 int 18232 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 18233 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 18234 { 18235 LPFC_MBOXQ_t *mboxq; 18236 struct lpfc_hba *phba = ndlp->phba; 18237 int rc; 18238 18239 /* The port is notified of the header region via a mailbox command. */ 18240 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18241 if (!mboxq) 18242 return -ENOMEM; 18243 18244 /* Post all rpi memory regions to the port. */ 18245 lpfc_resume_rpi(mboxq, ndlp); 18246 if (cmpl) { 18247 mboxq->mbox_cmpl = cmpl; 18248 mboxq->ctx_buf = arg; 18249 mboxq->ctx_ndlp = ndlp; 18250 } else 18251 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18252 mboxq->vport = ndlp->vport; 18253 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18254 if (rc == MBX_NOT_FINISHED) { 18255 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18256 "2010 Resume RPI Mailbox failed " 18257 "status %d, mbxStatus x%x\n", rc, 18258 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18259 mempool_free(mboxq, phba->mbox_mem_pool); 18260 return -EIO; 18261 } 18262 return 0; 18263 } 18264 18265 /** 18266 * lpfc_sli4_init_vpi - Initialize a vpi with the port 18267 * @vport: Pointer to the vport for which the vpi is being initialized 18268 * 18269 * This routine is invoked to activate a vpi with the port. 18270 * 18271 * Returns: 18272 * 0 success 18273 * -Evalue otherwise 18274 **/ 18275 int 18276 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 18277 { 18278 LPFC_MBOXQ_t *mboxq; 18279 int rc = 0; 18280 int retval = MBX_SUCCESS; 18281 uint32_t mbox_tmo; 18282 struct lpfc_hba *phba = vport->phba; 18283 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18284 if (!mboxq) 18285 return -ENOMEM; 18286 lpfc_init_vpi(phba, mboxq, vport->vpi); 18287 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 18288 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 18289 if (rc != MBX_SUCCESS) { 18290 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 18291 "2022 INIT VPI Mailbox failed " 18292 "status %d, mbxStatus x%x\n", rc, 18293 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18294 retval = -EIO; 18295 } 18296 if (rc != MBX_TIMEOUT) 18297 mempool_free(mboxq, vport->phba->mbox_mem_pool); 18298 18299 return retval; 18300 } 18301 18302 /** 18303 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 18304 * @phba: pointer to lpfc hba data structure. 18305 * @mboxq: Pointer to mailbox object. 18306 * 18307 * This routine is invoked to manually add a single FCF record. The caller 18308 * must pass a completely initialized FCF_Record. This routine takes 18309 * care of the nonembedded mailbox operations. 18310 **/ 18311 static void 18312 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 18313 { 18314 void *virt_addr; 18315 union lpfc_sli4_cfg_shdr *shdr; 18316 uint32_t shdr_status, shdr_add_status; 18317 18318 virt_addr = mboxq->sge_array->addr[0]; 18319 /* The IOCTL status is embedded in the mailbox subheader. */ 18320 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 18321 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18322 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18323 18324 if ((shdr_status || shdr_add_status) && 18325 (shdr_status != STATUS_FCF_IN_USE)) 18326 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18327 "2558 ADD_FCF_RECORD mailbox failed with " 18328 "status x%x add_status x%x\n", 18329 shdr_status, shdr_add_status); 18330 18331 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18332 } 18333 18334 /** 18335 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 18336 * @phba: pointer to lpfc hba data structure. 18337 * @fcf_record: pointer to the initialized fcf record to add. 18338 * 18339 * This routine is invoked to manually add a single FCF record. The caller 18340 * must pass a completely initialized FCF_Record. This routine takes 18341 * care of the nonembedded mailbox operations. 18342 **/ 18343 int 18344 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 18345 { 18346 int rc = 0; 18347 LPFC_MBOXQ_t *mboxq; 18348 uint8_t *bytep; 18349 void *virt_addr; 18350 struct lpfc_mbx_sge sge; 18351 uint32_t alloc_len, req_len; 18352 uint32_t fcfindex; 18353 18354 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18355 if (!mboxq) { 18356 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18357 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 18358 return -ENOMEM; 18359 } 18360 18361 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 18362 sizeof(uint32_t); 18363 18364 /* Allocate DMA memory and set up the non-embedded mailbox command */ 18365 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18366 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 18367 req_len, LPFC_SLI4_MBX_NEMBED); 18368 if (alloc_len < req_len) { 18369 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18370 "2523 Allocated DMA memory size (x%x) is " 18371 "less than the requested DMA memory " 18372 "size (x%x)\n", alloc_len, req_len); 18373 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18374 return -ENOMEM; 18375 } 18376 18377 /* 18378 * Get the first SGE entry from the non-embedded DMA memory. This 18379 * routine only uses a single SGE. 18380 */ 18381 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 18382 virt_addr = mboxq->sge_array->addr[0]; 18383 /* 18384 * Configure the FCF record for FCFI 0. This is the driver's 18385 * hardcoded default and gets used in nonFIP mode. 18386 */ 18387 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 18388 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 18389 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 18390 18391 /* 18392 * Copy the fcf_index and the FCF Record Data. The data starts after 18393 * the FCoE header plus word10. The data copy needs to be endian 18394 * correct. 18395 */ 18396 bytep += sizeof(uint32_t); 18397 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 18398 mboxq->vport = phba->pport; 18399 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 18400 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18401 if (rc == MBX_NOT_FINISHED) { 18402 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18403 "2515 ADD_FCF_RECORD mailbox failed with " 18404 "status 0x%x\n", rc); 18405 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18406 rc = -EIO; 18407 } else 18408 rc = 0; 18409 18410 return rc; 18411 } 18412 18413 /** 18414 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 18415 * @phba: pointer to lpfc hba data structure. 18416 * @fcf_record: pointer to the fcf record to write the default data. 18417 * @fcf_index: FCF table entry index. 18418 * 18419 * This routine is invoked to build the driver's default FCF record. The 18420 * values used are hardcoded. This routine handles memory initialization. 18421 * 18422 **/ 18423 void 18424 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 18425 struct fcf_record *fcf_record, 18426 uint16_t fcf_index) 18427 { 18428 memset(fcf_record, 0, sizeof(struct fcf_record)); 18429 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 18430 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 18431 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 18432 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 18433 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 18434 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 18435 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 18436 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 18437 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 18438 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 18439 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 18440 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 18441 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 18442 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 18443 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 18444 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 18445 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 18446 /* Set the VLAN bit map */ 18447 if (phba->valid_vlan) { 18448 fcf_record->vlan_bitmap[phba->vlan_id / 8] 18449 = 1 << (phba->vlan_id % 8); 18450 } 18451 } 18452 18453 /** 18454 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 18455 * @phba: pointer to lpfc hba data structure. 18456 * @fcf_index: FCF table entry offset. 18457 * 18458 * This routine is invoked to scan the entire FCF table by reading FCF 18459 * record and processing it one at a time starting from the @fcf_index 18460 * for initial FCF discovery or fast FCF failover rediscovery. 18461 * 18462 * Return 0 if the mailbox command is submitted successfully, none 0 18463 * otherwise. 18464 **/ 18465 int 18466 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18467 { 18468 int rc = 0, error; 18469 LPFC_MBOXQ_t *mboxq; 18470 18471 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 18472 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 18473 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18474 if (!mboxq) { 18475 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18476 "2000 Failed to allocate mbox for " 18477 "READ_FCF cmd\n"); 18478 error = -ENOMEM; 18479 goto fail_fcf_scan; 18480 } 18481 /* Construct the read FCF record mailbox command */ 18482 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18483 if (rc) { 18484 error = -EINVAL; 18485 goto fail_fcf_scan; 18486 } 18487 /* Issue the mailbox command asynchronously */ 18488 mboxq->vport = phba->pport; 18489 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 18490 18491 spin_lock_irq(&phba->hbalock); 18492 phba->hba_flag |= FCF_TS_INPROG; 18493 spin_unlock_irq(&phba->hbalock); 18494 18495 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18496 if (rc == MBX_NOT_FINISHED) 18497 error = -EIO; 18498 else { 18499 /* Reset eligible FCF count for new scan */ 18500 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 18501 phba->fcf.eligible_fcf_cnt = 0; 18502 error = 0; 18503 } 18504 fail_fcf_scan: 18505 if (error) { 18506 if (mboxq) 18507 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18508 /* FCF scan failed, clear FCF_TS_INPROG flag */ 18509 spin_lock_irq(&phba->hbalock); 18510 phba->hba_flag &= ~FCF_TS_INPROG; 18511 spin_unlock_irq(&phba->hbalock); 18512 } 18513 return error; 18514 } 18515 18516 /** 18517 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 18518 * @phba: pointer to lpfc hba data structure. 18519 * @fcf_index: FCF table entry offset. 18520 * 18521 * This routine is invoked to read an FCF record indicated by @fcf_index 18522 * and to use it for FLOGI roundrobin FCF failover. 18523 * 18524 * Return 0 if the mailbox command is submitted successfully, none 0 18525 * otherwise. 18526 **/ 18527 int 18528 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18529 { 18530 int rc = 0, error; 18531 LPFC_MBOXQ_t *mboxq; 18532 18533 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18534 if (!mboxq) { 18535 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18536 "2763 Failed to allocate mbox for " 18537 "READ_FCF cmd\n"); 18538 error = -ENOMEM; 18539 goto fail_fcf_read; 18540 } 18541 /* Construct the read FCF record mailbox command */ 18542 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18543 if (rc) { 18544 error = -EINVAL; 18545 goto fail_fcf_read; 18546 } 18547 /* Issue the mailbox command asynchronously */ 18548 mboxq->vport = phba->pport; 18549 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 18550 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18551 if (rc == MBX_NOT_FINISHED) 18552 error = -EIO; 18553 else 18554 error = 0; 18555 18556 fail_fcf_read: 18557 if (error && mboxq) 18558 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18559 return error; 18560 } 18561 18562 /** 18563 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 18564 * @phba: pointer to lpfc hba data structure. 18565 * @fcf_index: FCF table entry offset. 18566 * 18567 * This routine is invoked to read an FCF record indicated by @fcf_index to 18568 * determine whether it's eligible for FLOGI roundrobin failover list. 18569 * 18570 * Return 0 if the mailbox command is submitted successfully, none 0 18571 * otherwise. 18572 **/ 18573 int 18574 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18575 { 18576 int rc = 0, error; 18577 LPFC_MBOXQ_t *mboxq; 18578 18579 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18580 if (!mboxq) { 18581 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18582 "2758 Failed to allocate mbox for " 18583 "READ_FCF cmd\n"); 18584 error = -ENOMEM; 18585 goto fail_fcf_read; 18586 } 18587 /* Construct the read FCF record mailbox command */ 18588 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18589 if (rc) { 18590 error = -EINVAL; 18591 goto fail_fcf_read; 18592 } 18593 /* Issue the mailbox command asynchronously */ 18594 mboxq->vport = phba->pport; 18595 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 18596 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18597 if (rc == MBX_NOT_FINISHED) 18598 error = -EIO; 18599 else 18600 error = 0; 18601 18602 fail_fcf_read: 18603 if (error && mboxq) 18604 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18605 return error; 18606 } 18607 18608 /** 18609 * lpfc_check_next_fcf_pri_level 18610 * phba pointer to the lpfc_hba struct for this port. 18611 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 18612 * routine when the rr_bmask is empty. The FCF indecies are put into the 18613 * rr_bmask based on their priority level. Starting from the highest priority 18614 * to the lowest. The most likely FCF candidate will be in the highest 18615 * priority group. When this routine is called it searches the fcf_pri list for 18616 * next lowest priority group and repopulates the rr_bmask with only those 18617 * fcf_indexes. 18618 * returns: 18619 * 1=success 0=failure 18620 **/ 18621 static int 18622 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 18623 { 18624 uint16_t next_fcf_pri; 18625 uint16_t last_index; 18626 struct lpfc_fcf_pri *fcf_pri; 18627 int rc; 18628 int ret = 0; 18629 18630 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 18631 LPFC_SLI4_FCF_TBL_INDX_MAX); 18632 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18633 "3060 Last IDX %d\n", last_index); 18634 18635 /* Verify the priority list has 2 or more entries */ 18636 spin_lock_irq(&phba->hbalock); 18637 if (list_empty(&phba->fcf.fcf_pri_list) || 18638 list_is_singular(&phba->fcf.fcf_pri_list)) { 18639 spin_unlock_irq(&phba->hbalock); 18640 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18641 "3061 Last IDX %d\n", last_index); 18642 return 0; /* Empty rr list */ 18643 } 18644 spin_unlock_irq(&phba->hbalock); 18645 18646 next_fcf_pri = 0; 18647 /* 18648 * Clear the rr_bmask and set all of the bits that are at this 18649 * priority. 18650 */ 18651 memset(phba->fcf.fcf_rr_bmask, 0, 18652 sizeof(*phba->fcf.fcf_rr_bmask)); 18653 spin_lock_irq(&phba->hbalock); 18654 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18655 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 18656 continue; 18657 /* 18658 * the 1st priority that has not FLOGI failed 18659 * will be the highest. 18660 */ 18661 if (!next_fcf_pri) 18662 next_fcf_pri = fcf_pri->fcf_rec.priority; 18663 spin_unlock_irq(&phba->hbalock); 18664 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18665 rc = lpfc_sli4_fcf_rr_index_set(phba, 18666 fcf_pri->fcf_rec.fcf_index); 18667 if (rc) 18668 return 0; 18669 } 18670 spin_lock_irq(&phba->hbalock); 18671 } 18672 /* 18673 * if next_fcf_pri was not set above and the list is not empty then 18674 * we have failed flogis on all of them. So reset flogi failed 18675 * and start at the beginning. 18676 */ 18677 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 18678 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18679 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 18680 /* 18681 * the 1st priority that has not FLOGI failed 18682 * will be the highest. 18683 */ 18684 if (!next_fcf_pri) 18685 next_fcf_pri = fcf_pri->fcf_rec.priority; 18686 spin_unlock_irq(&phba->hbalock); 18687 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18688 rc = lpfc_sli4_fcf_rr_index_set(phba, 18689 fcf_pri->fcf_rec.fcf_index); 18690 if (rc) 18691 return 0; 18692 } 18693 spin_lock_irq(&phba->hbalock); 18694 } 18695 } else 18696 ret = 1; 18697 spin_unlock_irq(&phba->hbalock); 18698 18699 return ret; 18700 } 18701 /** 18702 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 18703 * @phba: pointer to lpfc hba data structure. 18704 * 18705 * This routine is to get the next eligible FCF record index in a round 18706 * robin fashion. If the next eligible FCF record index equals to the 18707 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 18708 * shall be returned, otherwise, the next eligible FCF record's index 18709 * shall be returned. 18710 **/ 18711 uint16_t 18712 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 18713 { 18714 uint16_t next_fcf_index; 18715 18716 initial_priority: 18717 /* Search start from next bit of currently registered FCF index */ 18718 next_fcf_index = phba->fcf.current_rec.fcf_indx; 18719 18720 next_priority: 18721 /* Determine the next fcf index to check */ 18722 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 18723 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18724 LPFC_SLI4_FCF_TBL_INDX_MAX, 18725 next_fcf_index); 18726 18727 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 18728 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18729 /* 18730 * If we have wrapped then we need to clear the bits that 18731 * have been tested so that we can detect when we should 18732 * change the priority level. 18733 */ 18734 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18735 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 18736 } 18737 18738 18739 /* Check roundrobin failover list empty condition */ 18740 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 18741 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 18742 /* 18743 * If next fcf index is not found check if there are lower 18744 * Priority level fcf's in the fcf_priority list. 18745 * Set up the rr_bmask with all of the avaiable fcf bits 18746 * at that level and continue the selection process. 18747 */ 18748 if (lpfc_check_next_fcf_pri_level(phba)) 18749 goto initial_priority; 18750 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 18751 "2844 No roundrobin failover FCF available\n"); 18752 18753 return LPFC_FCOE_FCF_NEXT_NONE; 18754 } 18755 18756 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 18757 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 18758 LPFC_FCF_FLOGI_FAILED) { 18759 if (list_is_singular(&phba->fcf.fcf_pri_list)) 18760 return LPFC_FCOE_FCF_NEXT_NONE; 18761 18762 goto next_priority; 18763 } 18764 18765 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18766 "2845 Get next roundrobin failover FCF (x%x)\n", 18767 next_fcf_index); 18768 18769 return next_fcf_index; 18770 } 18771 18772 /** 18773 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 18774 * @phba: pointer to lpfc hba data structure. 18775 * 18776 * This routine sets the FCF record index in to the eligible bmask for 18777 * roundrobin failover search. It checks to make sure that the index 18778 * does not go beyond the range of the driver allocated bmask dimension 18779 * before setting the bit. 18780 * 18781 * Returns 0 if the index bit successfully set, otherwise, it returns 18782 * -EINVAL. 18783 **/ 18784 int 18785 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 18786 { 18787 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18788 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18789 "2610 FCF (x%x) reached driver's book " 18790 "keeping dimension:x%x\n", 18791 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18792 return -EINVAL; 18793 } 18794 /* Set the eligible FCF record index bmask */ 18795 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18796 18797 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18798 "2790 Set FCF (x%x) to roundrobin FCF failover " 18799 "bmask\n", fcf_index); 18800 18801 return 0; 18802 } 18803 18804 /** 18805 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 18806 * @phba: pointer to lpfc hba data structure. 18807 * 18808 * This routine clears the FCF record index from the eligible bmask for 18809 * roundrobin failover search. It checks to make sure that the index 18810 * does not go beyond the range of the driver allocated bmask dimension 18811 * before clearing the bit. 18812 **/ 18813 void 18814 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 18815 { 18816 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 18817 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18818 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18819 "2762 FCF (x%x) reached driver's book " 18820 "keeping dimension:x%x\n", 18821 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18822 return; 18823 } 18824 /* Clear the eligible FCF record index bmask */ 18825 spin_lock_irq(&phba->hbalock); 18826 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 18827 list) { 18828 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 18829 list_del_init(&fcf_pri->list); 18830 break; 18831 } 18832 } 18833 spin_unlock_irq(&phba->hbalock); 18834 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18835 18836 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18837 "2791 Clear FCF (x%x) from roundrobin failover " 18838 "bmask\n", fcf_index); 18839 } 18840 18841 /** 18842 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 18843 * @phba: pointer to lpfc hba data structure. 18844 * 18845 * This routine is the completion routine for the rediscover FCF table mailbox 18846 * command. If the mailbox command returned failure, it will try to stop the 18847 * FCF rediscover wait timer. 18848 **/ 18849 static void 18850 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 18851 { 18852 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18853 uint32_t shdr_status, shdr_add_status; 18854 18855 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18856 18857 shdr_status = bf_get(lpfc_mbox_hdr_status, 18858 &redisc_fcf->header.cfg_shdr.response); 18859 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 18860 &redisc_fcf->header.cfg_shdr.response); 18861 if (shdr_status || shdr_add_status) { 18862 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18863 "2746 Requesting for FCF rediscovery failed " 18864 "status x%x add_status x%x\n", 18865 shdr_status, shdr_add_status); 18866 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 18867 spin_lock_irq(&phba->hbalock); 18868 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 18869 spin_unlock_irq(&phba->hbalock); 18870 /* 18871 * CVL event triggered FCF rediscover request failed, 18872 * last resort to re-try current registered FCF entry. 18873 */ 18874 lpfc_retry_pport_discovery(phba); 18875 } else { 18876 spin_lock_irq(&phba->hbalock); 18877 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 18878 spin_unlock_irq(&phba->hbalock); 18879 /* 18880 * DEAD FCF event triggered FCF rediscover request 18881 * failed, last resort to fail over as a link down 18882 * to FCF registration. 18883 */ 18884 lpfc_sli4_fcf_dead_failthrough(phba); 18885 } 18886 } else { 18887 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18888 "2775 Start FCF rediscover quiescent timer\n"); 18889 /* 18890 * Start FCF rediscovery wait timer for pending FCF 18891 * before rescan FCF record table. 18892 */ 18893 lpfc_fcf_redisc_wait_start_timer(phba); 18894 } 18895 18896 mempool_free(mbox, phba->mbox_mem_pool); 18897 } 18898 18899 /** 18900 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 18901 * @phba: pointer to lpfc hba data structure. 18902 * 18903 * This routine is invoked to request for rediscovery of the entire FCF table 18904 * by the port. 18905 **/ 18906 int 18907 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 18908 { 18909 LPFC_MBOXQ_t *mbox; 18910 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18911 int rc, length; 18912 18913 /* Cancel retry delay timers to all vports before FCF rediscover */ 18914 lpfc_cancel_all_vport_retry_delay_timer(phba); 18915 18916 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18917 if (!mbox) { 18918 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18919 "2745 Failed to allocate mbox for " 18920 "requesting FCF rediscover.\n"); 18921 return -ENOMEM; 18922 } 18923 18924 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 18925 sizeof(struct lpfc_sli4_cfg_mhdr)); 18926 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 18927 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 18928 length, LPFC_SLI4_MBX_EMBED); 18929 18930 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18931 /* Set count to 0 for invalidating the entire FCF database */ 18932 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 18933 18934 /* Issue the mailbox command asynchronously */ 18935 mbox->vport = phba->pport; 18936 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 18937 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 18938 18939 if (rc == MBX_NOT_FINISHED) { 18940 mempool_free(mbox, phba->mbox_mem_pool); 18941 return -EIO; 18942 } 18943 return 0; 18944 } 18945 18946 /** 18947 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 18948 * @phba: pointer to lpfc hba data structure. 18949 * 18950 * This function is the failover routine as a last resort to the FCF DEAD 18951 * event when driver failed to perform fast FCF failover. 18952 **/ 18953 void 18954 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 18955 { 18956 uint32_t link_state; 18957 18958 /* 18959 * Last resort as FCF DEAD event failover will treat this as 18960 * a link down, but save the link state because we don't want 18961 * it to be changed to Link Down unless it is already down. 18962 */ 18963 link_state = phba->link_state; 18964 lpfc_linkdown(phba); 18965 phba->link_state = link_state; 18966 18967 /* Unregister FCF if no devices connected to it */ 18968 lpfc_unregister_unused_fcf(phba); 18969 } 18970 18971 /** 18972 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 18973 * @phba: pointer to lpfc hba data structure. 18974 * @rgn23_data: pointer to configure region 23 data. 18975 * 18976 * This function gets SLI3 port configure region 23 data through memory dump 18977 * mailbox command. When it successfully retrieves data, the size of the data 18978 * will be returned, otherwise, 0 will be returned. 18979 **/ 18980 static uint32_t 18981 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 18982 { 18983 LPFC_MBOXQ_t *pmb = NULL; 18984 MAILBOX_t *mb; 18985 uint32_t offset = 0; 18986 int rc; 18987 18988 if (!rgn23_data) 18989 return 0; 18990 18991 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18992 if (!pmb) { 18993 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18994 "2600 failed to allocate mailbox memory\n"); 18995 return 0; 18996 } 18997 mb = &pmb->u.mb; 18998 18999 do { 19000 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 19001 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 19002 19003 if (rc != MBX_SUCCESS) { 19004 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19005 "2601 failed to read config " 19006 "region 23, rc 0x%x Status 0x%x\n", 19007 rc, mb->mbxStatus); 19008 mb->un.varDmp.word_cnt = 0; 19009 } 19010 /* 19011 * dump mem may return a zero when finished or we got a 19012 * mailbox error, either way we are done. 19013 */ 19014 if (mb->un.varDmp.word_cnt == 0) 19015 break; 19016 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 19017 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 19018 19019 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 19020 rgn23_data + offset, 19021 mb->un.varDmp.word_cnt); 19022 offset += mb->un.varDmp.word_cnt; 19023 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 19024 19025 mempool_free(pmb, phba->mbox_mem_pool); 19026 return offset; 19027 } 19028 19029 /** 19030 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 19031 * @phba: pointer to lpfc hba data structure. 19032 * @rgn23_data: pointer to configure region 23 data. 19033 * 19034 * This function gets SLI4 port configure region 23 data through memory dump 19035 * mailbox command. When it successfully retrieves data, the size of the data 19036 * will be returned, otherwise, 0 will be returned. 19037 **/ 19038 static uint32_t 19039 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 19040 { 19041 LPFC_MBOXQ_t *mboxq = NULL; 19042 struct lpfc_dmabuf *mp = NULL; 19043 struct lpfc_mqe *mqe; 19044 uint32_t data_length = 0; 19045 int rc; 19046 19047 if (!rgn23_data) 19048 return 0; 19049 19050 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19051 if (!mboxq) { 19052 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19053 "3105 failed to allocate mailbox memory\n"); 19054 return 0; 19055 } 19056 19057 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 19058 goto out; 19059 mqe = &mboxq->u.mqe; 19060 mp = (struct lpfc_dmabuf *)mboxq->ctx_buf; 19061 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 19062 if (rc) 19063 goto out; 19064 data_length = mqe->un.mb_words[5]; 19065 if (data_length == 0) 19066 goto out; 19067 if (data_length > DMP_RGN23_SIZE) { 19068 data_length = 0; 19069 goto out; 19070 } 19071 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 19072 out: 19073 mempool_free(mboxq, phba->mbox_mem_pool); 19074 if (mp) { 19075 lpfc_mbuf_free(phba, mp->virt, mp->phys); 19076 kfree(mp); 19077 } 19078 return data_length; 19079 } 19080 19081 /** 19082 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 19083 * @phba: pointer to lpfc hba data structure. 19084 * 19085 * This function read region 23 and parse TLV for port status to 19086 * decide if the user disaled the port. If the TLV indicates the 19087 * port is disabled, the hba_flag is set accordingly. 19088 **/ 19089 void 19090 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 19091 { 19092 uint8_t *rgn23_data = NULL; 19093 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 19094 uint32_t offset = 0; 19095 19096 /* Get adapter Region 23 data */ 19097 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 19098 if (!rgn23_data) 19099 goto out; 19100 19101 if (phba->sli_rev < LPFC_SLI_REV4) 19102 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 19103 else { 19104 if_type = bf_get(lpfc_sli_intf_if_type, 19105 &phba->sli4_hba.sli_intf); 19106 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 19107 goto out; 19108 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 19109 } 19110 19111 if (!data_size) 19112 goto out; 19113 19114 /* Check the region signature first */ 19115 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 19116 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19117 "2619 Config region 23 has bad signature\n"); 19118 goto out; 19119 } 19120 offset += 4; 19121 19122 /* Check the data structure version */ 19123 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 19124 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19125 "2620 Config region 23 has bad version\n"); 19126 goto out; 19127 } 19128 offset += 4; 19129 19130 /* Parse TLV entries in the region */ 19131 while (offset < data_size) { 19132 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 19133 break; 19134 /* 19135 * If the TLV is not driver specific TLV or driver id is 19136 * not linux driver id, skip the record. 19137 */ 19138 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 19139 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 19140 (rgn23_data[offset + 3] != 0)) { 19141 offset += rgn23_data[offset + 1] * 4 + 4; 19142 continue; 19143 } 19144 19145 /* Driver found a driver specific TLV in the config region */ 19146 sub_tlv_len = rgn23_data[offset + 1] * 4; 19147 offset += 4; 19148 tlv_offset = 0; 19149 19150 /* 19151 * Search for configured port state sub-TLV. 19152 */ 19153 while ((offset < data_size) && 19154 (tlv_offset < sub_tlv_len)) { 19155 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 19156 offset += 4; 19157 tlv_offset += 4; 19158 break; 19159 } 19160 if (rgn23_data[offset] != PORT_STE_TYPE) { 19161 offset += rgn23_data[offset + 1] * 4 + 4; 19162 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 19163 continue; 19164 } 19165 19166 /* This HBA contains PORT_STE configured */ 19167 if (!rgn23_data[offset + 2]) 19168 phba->hba_flag |= LINK_DISABLED; 19169 19170 goto out; 19171 } 19172 } 19173 19174 out: 19175 kfree(rgn23_data); 19176 return; 19177 } 19178 19179 /** 19180 * lpfc_wr_object - write an object to the firmware 19181 * @phba: HBA structure that indicates port to create a queue on. 19182 * @dmabuf_list: list of dmabufs to write to the port. 19183 * @size: the total byte value of the objects to write to the port. 19184 * @offset: the current offset to be used to start the transfer. 19185 * 19186 * This routine will create a wr_object mailbox command to send to the port. 19187 * the mailbox command will be constructed using the dma buffers described in 19188 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 19189 * BDEs that the imbedded mailbox can support. The @offset variable will be 19190 * used to indicate the starting offset of the transfer and will also return 19191 * the offset after the write object mailbox has completed. @size is used to 19192 * determine the end of the object and whether the eof bit should be set. 19193 * 19194 * Return 0 is successful and offset will contain the the new offset to use 19195 * for the next write. 19196 * Return negative value for error cases. 19197 **/ 19198 int 19199 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 19200 uint32_t size, uint32_t *offset) 19201 { 19202 struct lpfc_mbx_wr_object *wr_object; 19203 LPFC_MBOXQ_t *mbox; 19204 int rc = 0, i = 0; 19205 uint32_t shdr_status, shdr_add_status, shdr_change_status; 19206 uint32_t mbox_tmo; 19207 struct lpfc_dmabuf *dmabuf; 19208 uint32_t written = 0; 19209 bool check_change_status = false; 19210 19211 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19212 if (!mbox) 19213 return -ENOMEM; 19214 19215 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 19216 LPFC_MBOX_OPCODE_WRITE_OBJECT, 19217 sizeof(struct lpfc_mbx_wr_object) - 19218 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 19219 19220 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 19221 wr_object->u.request.write_offset = *offset; 19222 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 19223 wr_object->u.request.object_name[0] = 19224 cpu_to_le32(wr_object->u.request.object_name[0]); 19225 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 19226 list_for_each_entry(dmabuf, dmabuf_list, list) { 19227 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 19228 break; 19229 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 19230 wr_object->u.request.bde[i].addrHigh = 19231 putPaddrHigh(dmabuf->phys); 19232 if (written + SLI4_PAGE_SIZE >= size) { 19233 wr_object->u.request.bde[i].tus.f.bdeSize = 19234 (size - written); 19235 written += (size - written); 19236 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 19237 bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1); 19238 check_change_status = true; 19239 } else { 19240 wr_object->u.request.bde[i].tus.f.bdeSize = 19241 SLI4_PAGE_SIZE; 19242 written += SLI4_PAGE_SIZE; 19243 } 19244 i++; 19245 } 19246 wr_object->u.request.bde_count = i; 19247 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 19248 if (!phba->sli4_hba.intr_enable) 19249 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 19250 else { 19251 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 19252 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 19253 } 19254 /* The IOCTL status is embedded in the mailbox subheader. */ 19255 shdr_status = bf_get(lpfc_mbox_hdr_status, 19256 &wr_object->header.cfg_shdr.response); 19257 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 19258 &wr_object->header.cfg_shdr.response); 19259 if (check_change_status) { 19260 shdr_change_status = bf_get(lpfc_wr_object_change_status, 19261 &wr_object->u.response); 19262 switch (shdr_change_status) { 19263 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): 19264 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19265 "3198 Firmware write complete: System " 19266 "reboot required to instantiate\n"); 19267 break; 19268 case (LPFC_CHANGE_STATUS_FW_RESET): 19269 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19270 "3199 Firmware write complete: Firmware" 19271 " reset required to instantiate\n"); 19272 break; 19273 case (LPFC_CHANGE_STATUS_PORT_MIGRATION): 19274 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19275 "3200 Firmware write complete: Port " 19276 "Migration or PCI Reset required to " 19277 "instantiate\n"); 19278 break; 19279 case (LPFC_CHANGE_STATUS_PCI_RESET): 19280 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 19281 "3201 Firmware write complete: PCI " 19282 "Reset required to instantiate\n"); 19283 break; 19284 default: 19285 break; 19286 } 19287 } 19288 if (rc != MBX_TIMEOUT) 19289 mempool_free(mbox, phba->mbox_mem_pool); 19290 if (shdr_status || shdr_add_status || rc) { 19291 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19292 "3025 Write Object mailbox failed with " 19293 "status x%x add_status x%x, mbx status x%x\n", 19294 shdr_status, shdr_add_status, rc); 19295 rc = -ENXIO; 19296 *offset = shdr_add_status; 19297 } else 19298 *offset += wr_object->u.response.actual_write_length; 19299 return rc; 19300 } 19301 19302 /** 19303 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 19304 * @vport: pointer to vport data structure. 19305 * 19306 * This function iterate through the mailboxq and clean up all REG_LOGIN 19307 * and REG_VPI mailbox commands associated with the vport. This function 19308 * is called when driver want to restart discovery of the vport due to 19309 * a Clear Virtual Link event. 19310 **/ 19311 void 19312 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 19313 { 19314 struct lpfc_hba *phba = vport->phba; 19315 LPFC_MBOXQ_t *mb, *nextmb; 19316 struct lpfc_dmabuf *mp; 19317 struct lpfc_nodelist *ndlp; 19318 struct lpfc_nodelist *act_mbx_ndlp = NULL; 19319 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 19320 LIST_HEAD(mbox_cmd_list); 19321 uint8_t restart_loop; 19322 19323 /* Clean up internally queued mailbox commands with the vport */ 19324 spin_lock_irq(&phba->hbalock); 19325 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 19326 if (mb->vport != vport) 19327 continue; 19328 19329 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19330 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19331 continue; 19332 19333 list_del(&mb->list); 19334 list_add_tail(&mb->list, &mbox_cmd_list); 19335 } 19336 /* Clean up active mailbox command with the vport */ 19337 mb = phba->sli.mbox_active; 19338 if (mb && (mb->vport == vport)) { 19339 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 19340 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 19341 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19342 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19343 act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19344 /* Put reference count for delayed processing */ 19345 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 19346 /* Unregister the RPI when mailbox complete */ 19347 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19348 } 19349 } 19350 /* Cleanup any mailbox completions which are not yet processed */ 19351 do { 19352 restart_loop = 0; 19353 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 19354 /* 19355 * If this mailox is already processed or it is 19356 * for another vport ignore it. 19357 */ 19358 if ((mb->vport != vport) || 19359 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 19360 continue; 19361 19362 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19363 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19364 continue; 19365 19366 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19367 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19368 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19369 /* Unregister the RPI when mailbox complete */ 19370 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19371 restart_loop = 1; 19372 spin_unlock_irq(&phba->hbalock); 19373 spin_lock(shost->host_lock); 19374 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19375 spin_unlock(shost->host_lock); 19376 spin_lock_irq(&phba->hbalock); 19377 break; 19378 } 19379 } 19380 } while (restart_loop); 19381 19382 spin_unlock_irq(&phba->hbalock); 19383 19384 /* Release the cleaned-up mailbox commands */ 19385 while (!list_empty(&mbox_cmd_list)) { 19386 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 19387 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19388 mp = (struct lpfc_dmabuf *)(mb->ctx_buf); 19389 if (mp) { 19390 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 19391 kfree(mp); 19392 } 19393 mb->ctx_buf = NULL; 19394 ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp; 19395 mb->ctx_ndlp = NULL; 19396 if (ndlp) { 19397 spin_lock(shost->host_lock); 19398 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19399 spin_unlock(shost->host_lock); 19400 lpfc_nlp_put(ndlp); 19401 } 19402 } 19403 mempool_free(mb, phba->mbox_mem_pool); 19404 } 19405 19406 /* Release the ndlp with the cleaned-up active mailbox command */ 19407 if (act_mbx_ndlp) { 19408 spin_lock(shost->host_lock); 19409 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19410 spin_unlock(shost->host_lock); 19411 lpfc_nlp_put(act_mbx_ndlp); 19412 } 19413 } 19414 19415 /** 19416 * lpfc_drain_txq - Drain the txq 19417 * @phba: Pointer to HBA context object. 19418 * 19419 * This function attempt to submit IOCBs on the txq 19420 * to the adapter. For SLI4 adapters, the txq contains 19421 * ELS IOCBs that have been deferred because the there 19422 * are no SGLs. This congestion can occur with large 19423 * vport counts during node discovery. 19424 **/ 19425 19426 uint32_t 19427 lpfc_drain_txq(struct lpfc_hba *phba) 19428 { 19429 LIST_HEAD(completions); 19430 struct lpfc_sli_ring *pring; 19431 struct lpfc_iocbq *piocbq = NULL; 19432 unsigned long iflags = 0; 19433 char *fail_msg = NULL; 19434 struct lpfc_sglq *sglq; 19435 union lpfc_wqe128 wqe; 19436 uint32_t txq_cnt = 0; 19437 struct lpfc_queue *wq; 19438 19439 if (phba->link_flag & LS_MDS_LOOPBACK) { 19440 /* MDS WQE are posted only to first WQ*/ 19441 wq = phba->sli4_hba.hdwq[0].io_wq; 19442 if (unlikely(!wq)) 19443 return 0; 19444 pring = wq->pring; 19445 } else { 19446 wq = phba->sli4_hba.els_wq; 19447 if (unlikely(!wq)) 19448 return 0; 19449 pring = lpfc_phba_elsring(phba); 19450 } 19451 19452 if (unlikely(!pring) || list_empty(&pring->txq)) 19453 return 0; 19454 19455 spin_lock_irqsave(&pring->ring_lock, iflags); 19456 list_for_each_entry(piocbq, &pring->txq, list) { 19457 txq_cnt++; 19458 } 19459 19460 if (txq_cnt > pring->txq_max) 19461 pring->txq_max = txq_cnt; 19462 19463 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19464 19465 while (!list_empty(&pring->txq)) { 19466 spin_lock_irqsave(&pring->ring_lock, iflags); 19467 19468 piocbq = lpfc_sli_ringtx_get(phba, pring); 19469 if (!piocbq) { 19470 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19471 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19472 "2823 txq empty and txq_cnt is %d\n ", 19473 txq_cnt); 19474 break; 19475 } 19476 sglq = __lpfc_sli_get_els_sglq(phba, piocbq); 19477 if (!sglq) { 19478 __lpfc_sli_ringtx_put(phba, pring, piocbq); 19479 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19480 break; 19481 } 19482 txq_cnt--; 19483 19484 /* The xri and iocb resources secured, 19485 * attempt to issue request 19486 */ 19487 piocbq->sli4_lxritag = sglq->sli4_lxritag; 19488 piocbq->sli4_xritag = sglq->sli4_xritag; 19489 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 19490 fail_msg = "to convert bpl to sgl"; 19491 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 19492 fail_msg = "to convert iocb to wqe"; 19493 else if (lpfc_sli4_wq_put(wq, &wqe)) 19494 fail_msg = " - Wq is full"; 19495 else 19496 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 19497 19498 if (fail_msg) { 19499 /* Failed means we can't issue and need to cancel */ 19500 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19501 "2822 IOCB failed %s iotag 0x%x " 19502 "xri 0x%x\n", 19503 fail_msg, 19504 piocbq->iotag, piocbq->sli4_xritag); 19505 list_add_tail(&piocbq->list, &completions); 19506 } 19507 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19508 } 19509 19510 /* Cancel all the IOCBs that cannot be issued */ 19511 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 19512 IOERR_SLI_ABORTED); 19513 19514 return txq_cnt; 19515 } 19516 19517 /** 19518 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. 19519 * @phba: Pointer to HBA context object. 19520 * @pwqe: Pointer to command WQE. 19521 * @sglq: Pointer to the scatter gather queue object. 19522 * 19523 * This routine converts the bpl or bde that is in the WQE 19524 * to a sgl list for the sli4 hardware. The physical address 19525 * of the bpl/bde is converted back to a virtual address. 19526 * If the WQE contains a BPL then the list of BDE's is 19527 * converted to sli4_sge's. If the WQE contains a single 19528 * BDE then it is converted to a single sli_sge. 19529 * The WQE is still in cpu endianness so the contents of 19530 * the bpl can be used without byte swapping. 19531 * 19532 * Returns valid XRI = Success, NO_XRI = Failure. 19533 */ 19534 static uint16_t 19535 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, 19536 struct lpfc_sglq *sglq) 19537 { 19538 uint16_t xritag = NO_XRI; 19539 struct ulp_bde64 *bpl = NULL; 19540 struct ulp_bde64 bde; 19541 struct sli4_sge *sgl = NULL; 19542 struct lpfc_dmabuf *dmabuf; 19543 union lpfc_wqe128 *wqe; 19544 int numBdes = 0; 19545 int i = 0; 19546 uint32_t offset = 0; /* accumulated offset in the sg request list */ 19547 int inbound = 0; /* number of sg reply entries inbound from firmware */ 19548 uint32_t cmd; 19549 19550 if (!pwqeq || !sglq) 19551 return xritag; 19552 19553 sgl = (struct sli4_sge *)sglq->sgl; 19554 wqe = &pwqeq->wqe; 19555 pwqeq->iocb.ulpIoTag = pwqeq->iotag; 19556 19557 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); 19558 if (cmd == CMD_XMIT_BLS_RSP64_WQE) 19559 return sglq->sli4_xritag; 19560 numBdes = pwqeq->rsvd2; 19561 if (numBdes) { 19562 /* The addrHigh and addrLow fields within the WQE 19563 * have not been byteswapped yet so there is no 19564 * need to swap them back. 19565 */ 19566 if (pwqeq->context3) 19567 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3; 19568 else 19569 return xritag; 19570 19571 bpl = (struct ulp_bde64 *)dmabuf->virt; 19572 if (!bpl) 19573 return xritag; 19574 19575 for (i = 0; i < numBdes; i++) { 19576 /* Should already be byte swapped. */ 19577 sgl->addr_hi = bpl->addrHigh; 19578 sgl->addr_lo = bpl->addrLow; 19579 19580 sgl->word2 = le32_to_cpu(sgl->word2); 19581 if ((i+1) == numBdes) 19582 bf_set(lpfc_sli4_sge_last, sgl, 1); 19583 else 19584 bf_set(lpfc_sli4_sge_last, sgl, 0); 19585 /* swap the size field back to the cpu so we 19586 * can assign it to the sgl. 19587 */ 19588 bde.tus.w = le32_to_cpu(bpl->tus.w); 19589 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 19590 /* The offsets in the sgl need to be accumulated 19591 * separately for the request and reply lists. 19592 * The request is always first, the reply follows. 19593 */ 19594 switch (cmd) { 19595 case CMD_GEN_REQUEST64_WQE: 19596 /* add up the reply sg entries */ 19597 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 19598 inbound++; 19599 /* first inbound? reset the offset */ 19600 if (inbound == 1) 19601 offset = 0; 19602 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19603 bf_set(lpfc_sli4_sge_type, sgl, 19604 LPFC_SGE_TYPE_DATA); 19605 offset += bde.tus.f.bdeSize; 19606 break; 19607 case CMD_FCP_TRSP64_WQE: 19608 bf_set(lpfc_sli4_sge_offset, sgl, 0); 19609 bf_set(lpfc_sli4_sge_type, sgl, 19610 LPFC_SGE_TYPE_DATA); 19611 break; 19612 case CMD_FCP_TSEND64_WQE: 19613 case CMD_FCP_TRECEIVE64_WQE: 19614 bf_set(lpfc_sli4_sge_type, sgl, 19615 bpl->tus.f.bdeFlags); 19616 if (i < 3) 19617 offset = 0; 19618 else 19619 offset += bde.tus.f.bdeSize; 19620 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19621 break; 19622 } 19623 sgl->word2 = cpu_to_le32(sgl->word2); 19624 bpl++; 19625 sgl++; 19626 } 19627 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { 19628 /* The addrHigh and addrLow fields of the BDE have not 19629 * been byteswapped yet so they need to be swapped 19630 * before putting them in the sgl. 19631 */ 19632 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); 19633 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); 19634 sgl->word2 = le32_to_cpu(sgl->word2); 19635 bf_set(lpfc_sli4_sge_last, sgl, 1); 19636 sgl->word2 = cpu_to_le32(sgl->word2); 19637 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); 19638 } 19639 return sglq->sli4_xritag; 19640 } 19641 19642 /** 19643 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) 19644 * @phba: Pointer to HBA context object. 19645 * @ring_number: Base sli ring number 19646 * @pwqe: Pointer to command WQE. 19647 **/ 19648 int 19649 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 19650 struct lpfc_iocbq *pwqe) 19651 { 19652 union lpfc_wqe128 *wqe = &pwqe->wqe; 19653 struct lpfc_nvmet_rcv_ctx *ctxp; 19654 struct lpfc_queue *wq; 19655 struct lpfc_sglq *sglq; 19656 struct lpfc_sli_ring *pring; 19657 unsigned long iflags; 19658 uint32_t ret = 0; 19659 19660 /* NVME_LS and NVME_LS ABTS requests. */ 19661 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { 19662 pring = phba->sli4_hba.nvmels_wq->pring; 19663 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19664 qp, wq_access); 19665 sglq = __lpfc_sli_get_els_sglq(phba, pwqe); 19666 if (!sglq) { 19667 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19668 return WQE_BUSY; 19669 } 19670 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19671 pwqe->sli4_xritag = sglq->sli4_xritag; 19672 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { 19673 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19674 return WQE_ERROR; 19675 } 19676 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19677 pwqe->sli4_xritag); 19678 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); 19679 if (ret) { 19680 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19681 return ret; 19682 } 19683 19684 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19685 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19686 return 0; 19687 } 19688 19689 /* NVME_FCREQ and NVME_ABTS requests */ 19690 if (pwqe->iocb_flag & LPFC_IO_NVME) { 19691 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19692 wq = qp->io_wq; 19693 pring = wq->pring; 19694 19695 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); 19696 19697 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19698 qp, wq_access); 19699 ret = lpfc_sli4_wq_put(wq, wqe); 19700 if (ret) { 19701 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19702 return ret; 19703 } 19704 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19705 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19706 return 0; 19707 } 19708 19709 /* NVMET requests */ 19710 if (pwqe->iocb_flag & LPFC_IO_NVMET) { 19711 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19712 wq = qp->io_wq; 19713 pring = wq->pring; 19714 19715 ctxp = pwqe->context2; 19716 sglq = ctxp->ctxbuf->sglq; 19717 if (pwqe->sli4_xritag == NO_XRI) { 19718 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19719 pwqe->sli4_xritag = sglq->sli4_xritag; 19720 } 19721 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19722 pwqe->sli4_xritag); 19723 bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); 19724 19725 lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, 19726 qp, wq_access); 19727 ret = lpfc_sli4_wq_put(wq, wqe); 19728 if (ret) { 19729 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19730 return ret; 19731 } 19732 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19733 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19734 return 0; 19735 } 19736 return WQE_ERROR; 19737 } 19738 19739 #ifdef LPFC_MXP_STAT 19740 /** 19741 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count 19742 * @phba: pointer to lpfc hba data structure. 19743 * @hwqid: belong to which HWQ. 19744 * 19745 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count 19746 * 15 seconds after a test case is running. 19747 * 19748 * The user should call lpfc_debugfs_multixripools_write before running a test 19749 * case to clear stat_snapshot_taken. Then the user starts a test case. During 19750 * test case is running, stat_snapshot_taken is incremented by 1 every time when 19751 * this routine is called from heartbeat timer. When stat_snapshot_taken is 19752 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken. 19753 **/ 19754 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) 19755 { 19756 struct lpfc_sli4_hdw_queue *qp; 19757 struct lpfc_multixri_pool *multixri_pool; 19758 struct lpfc_pvt_pool *pvt_pool; 19759 struct lpfc_pbl_pool *pbl_pool; 19760 u32 txcmplq_cnt; 19761 19762 qp = &phba->sli4_hba.hdwq[hwqid]; 19763 multixri_pool = qp->p_multixri_pool; 19764 if (!multixri_pool) 19765 return; 19766 19767 if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { 19768 pvt_pool = &qp->p_multixri_pool->pvt_pool; 19769 pbl_pool = &qp->p_multixri_pool->pbl_pool; 19770 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; 19771 19772 multixri_pool->stat_pbl_count = pbl_pool->count; 19773 multixri_pool->stat_pvt_count = pvt_pool->count; 19774 multixri_pool->stat_busy_count = txcmplq_cnt; 19775 } 19776 19777 multixri_pool->stat_snapshot_taken++; 19778 } 19779 #endif 19780 19781 /** 19782 * lpfc_adjust_pvt_pool_count - Adjust private pool count 19783 * @phba: pointer to lpfc hba data structure. 19784 * @hwqid: belong to which HWQ. 19785 * 19786 * This routine moves some XRIs from private to public pool when private pool 19787 * is not busy. 19788 **/ 19789 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid) 19790 { 19791 struct lpfc_multixri_pool *multixri_pool; 19792 u32 io_req_count; 19793 u32 prev_io_req_count; 19794 19795 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 19796 if (!multixri_pool) 19797 return; 19798 io_req_count = multixri_pool->io_req_count; 19799 prev_io_req_count = multixri_pool->prev_io_req_count; 19800 19801 if (prev_io_req_count != io_req_count) { 19802 /* Private pool is busy */ 19803 multixri_pool->prev_io_req_count = io_req_count; 19804 } else { 19805 /* Private pool is not busy. 19806 * Move XRIs from private to public pool. 19807 */ 19808 lpfc_move_xri_pvt_to_pbl(phba, hwqid); 19809 } 19810 } 19811 19812 /** 19813 * lpfc_adjust_high_watermark - Adjust high watermark 19814 * @phba: pointer to lpfc hba data structure. 19815 * @hwqid: belong to which HWQ. 19816 * 19817 * This routine sets high watermark as number of outstanding XRIs, 19818 * but make sure the new value is between xri_limit/2 and xri_limit. 19819 **/ 19820 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) 19821 { 19822 u32 new_watermark; 19823 u32 watermark_max; 19824 u32 watermark_min; 19825 u32 xri_limit; 19826 u32 txcmplq_cnt; 19827 u32 abts_io_bufs; 19828 struct lpfc_multixri_pool *multixri_pool; 19829 struct lpfc_sli4_hdw_queue *qp; 19830 19831 qp = &phba->sli4_hba.hdwq[hwqid]; 19832 multixri_pool = qp->p_multixri_pool; 19833 if (!multixri_pool) 19834 return; 19835 xri_limit = multixri_pool->xri_limit; 19836 19837 watermark_max = xri_limit; 19838 watermark_min = xri_limit / 2; 19839 19840 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; 19841 abts_io_bufs = qp->abts_scsi_io_bufs; 19842 abts_io_bufs += qp->abts_nvme_io_bufs; 19843 19844 new_watermark = txcmplq_cnt + abts_io_bufs; 19845 new_watermark = min(watermark_max, new_watermark); 19846 new_watermark = max(watermark_min, new_watermark); 19847 multixri_pool->pvt_pool.high_watermark = new_watermark; 19848 19849 #ifdef LPFC_MXP_STAT 19850 multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm, 19851 new_watermark); 19852 #endif 19853 } 19854 19855 /** 19856 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool 19857 * @phba: pointer to lpfc hba data structure. 19858 * @hwqid: belong to which HWQ. 19859 * 19860 * This routine is called from hearbeat timer when pvt_pool is idle. 19861 * All free XRIs are moved from private to public pool on hwqid with 2 steps. 19862 * The first step moves (all - low_watermark) amount of XRIs. 19863 * The second step moves the rest of XRIs. 19864 **/ 19865 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid) 19866 { 19867 struct lpfc_pbl_pool *pbl_pool; 19868 struct lpfc_pvt_pool *pvt_pool; 19869 struct lpfc_sli4_hdw_queue *qp; 19870 struct lpfc_io_buf *lpfc_ncmd; 19871 struct lpfc_io_buf *lpfc_ncmd_next; 19872 unsigned long iflag; 19873 struct list_head tmp_list; 19874 u32 tmp_count; 19875 19876 qp = &phba->sli4_hba.hdwq[hwqid]; 19877 pbl_pool = &qp->p_multixri_pool->pbl_pool; 19878 pvt_pool = &qp->p_multixri_pool->pvt_pool; 19879 tmp_count = 0; 19880 19881 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool); 19882 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool); 19883 19884 if (pvt_pool->count > pvt_pool->low_watermark) { 19885 /* Step 1: move (all - low_watermark) from pvt_pool 19886 * to pbl_pool 19887 */ 19888 19889 /* Move low watermark of bufs from pvt_pool to tmp_list */ 19890 INIT_LIST_HEAD(&tmp_list); 19891 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 19892 &pvt_pool->list, list) { 19893 list_move_tail(&lpfc_ncmd->list, &tmp_list); 19894 tmp_count++; 19895 if (tmp_count >= pvt_pool->low_watermark) 19896 break; 19897 } 19898 19899 /* Move all bufs from pvt_pool to pbl_pool */ 19900 list_splice_init(&pvt_pool->list, &pbl_pool->list); 19901 19902 /* Move all bufs from tmp_list to pvt_pool */ 19903 list_splice(&tmp_list, &pvt_pool->list); 19904 19905 pbl_pool->count += (pvt_pool->count - tmp_count); 19906 pvt_pool->count = tmp_count; 19907 } else { 19908 /* Step 2: move the rest from pvt_pool to pbl_pool */ 19909 list_splice_init(&pvt_pool->list, &pbl_pool->list); 19910 pbl_pool->count += pvt_pool->count; 19911 pvt_pool->count = 0; 19912 } 19913 19914 spin_unlock(&pvt_pool->lock); 19915 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 19916 } 19917 19918 /** 19919 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 19920 * @phba: pointer to lpfc hba data structure 19921 * @pbl_pool: specified public free XRI pool 19922 * @pvt_pool: specified private free XRI pool 19923 * @count: number of XRIs to move 19924 * 19925 * This routine tries to move some free common bufs from the specified pbl_pool 19926 * to the specified pvt_pool. It might move less than count XRIs if there's not 19927 * enough in public pool. 19928 * 19929 * Return: 19930 * true - if XRIs are successfully moved from the specified pbl_pool to the 19931 * specified pvt_pool 19932 * false - if the specified pbl_pool is empty or locked by someone else 19933 **/ 19934 static bool 19935 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, 19936 struct lpfc_pbl_pool *pbl_pool, 19937 struct lpfc_pvt_pool *pvt_pool, u32 count) 19938 { 19939 struct lpfc_io_buf *lpfc_ncmd; 19940 struct lpfc_io_buf *lpfc_ncmd_next; 19941 unsigned long iflag; 19942 int ret; 19943 19944 ret = spin_trylock_irqsave(&pbl_pool->lock, iflag); 19945 if (ret) { 19946 if (pbl_pool->count) { 19947 /* Move a batch of XRIs from public to private pool */ 19948 lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool); 19949 list_for_each_entry_safe(lpfc_ncmd, 19950 lpfc_ncmd_next, 19951 &pbl_pool->list, 19952 list) { 19953 list_move_tail(&lpfc_ncmd->list, 19954 &pvt_pool->list); 19955 pvt_pool->count++; 19956 pbl_pool->count--; 19957 count--; 19958 if (count == 0) 19959 break; 19960 } 19961 19962 spin_unlock(&pvt_pool->lock); 19963 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 19964 return true; 19965 } 19966 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 19967 } 19968 19969 return false; 19970 } 19971 19972 /** 19973 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool 19974 * @phba: pointer to lpfc hba data structure. 19975 * @hwqid: belong to which HWQ. 19976 * @count: number of XRIs to move 19977 * 19978 * This routine tries to find some free common bufs in one of public pools with 19979 * Round Robin method. The search always starts from local hwqid, then the next 19980 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found, 19981 * a batch of free common bufs are moved to private pool on hwqid. 19982 * It might move less than count XRIs if there's not enough in public pool. 19983 **/ 19984 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count) 19985 { 19986 struct lpfc_multixri_pool *multixri_pool; 19987 struct lpfc_multixri_pool *next_multixri_pool; 19988 struct lpfc_pvt_pool *pvt_pool; 19989 struct lpfc_pbl_pool *pbl_pool; 19990 struct lpfc_sli4_hdw_queue *qp; 19991 u32 next_hwqid; 19992 u32 hwq_count; 19993 int ret; 19994 19995 qp = &phba->sli4_hba.hdwq[hwqid]; 19996 multixri_pool = qp->p_multixri_pool; 19997 pvt_pool = &multixri_pool->pvt_pool; 19998 pbl_pool = &multixri_pool->pbl_pool; 19999 20000 /* Check if local pbl_pool is available */ 20001 ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count); 20002 if (ret) { 20003 #ifdef LPFC_MXP_STAT 20004 multixri_pool->local_pbl_hit_count++; 20005 #endif 20006 return; 20007 } 20008 20009 hwq_count = phba->cfg_hdw_queue; 20010 20011 /* Get the next hwqid which was found last time */ 20012 next_hwqid = multixri_pool->rrb_next_hwqid; 20013 20014 do { 20015 /* Go to next hwq */ 20016 next_hwqid = (next_hwqid + 1) % hwq_count; 20017 20018 next_multixri_pool = 20019 phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool; 20020 pbl_pool = &next_multixri_pool->pbl_pool; 20021 20022 /* Check if the public free xri pool is available */ 20023 ret = _lpfc_move_xri_pbl_to_pvt( 20024 phba, qp, pbl_pool, pvt_pool, count); 20025 20026 /* Exit while-loop if success or all hwqid are checked */ 20027 } while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid); 20028 20029 /* Starting point for the next time */ 20030 multixri_pool->rrb_next_hwqid = next_hwqid; 20031 20032 if (!ret) { 20033 /* stats: all public pools are empty*/ 20034 multixri_pool->pbl_empty_count++; 20035 } 20036 20037 #ifdef LPFC_MXP_STAT 20038 if (ret) { 20039 if (next_hwqid == hwqid) 20040 multixri_pool->local_pbl_hit_count++; 20041 else 20042 multixri_pool->other_pbl_hit_count++; 20043 } 20044 #endif 20045 } 20046 20047 /** 20048 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark 20049 * @phba: pointer to lpfc hba data structure. 20050 * @qp: belong to which HWQ. 20051 * 20052 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than 20053 * low watermark. 20054 **/ 20055 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid) 20056 { 20057 struct lpfc_multixri_pool *multixri_pool; 20058 struct lpfc_pvt_pool *pvt_pool; 20059 20060 multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool; 20061 pvt_pool = &multixri_pool->pvt_pool; 20062 20063 if (pvt_pool->count < pvt_pool->low_watermark) 20064 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 20065 } 20066 20067 /** 20068 * lpfc_release_io_buf - Return one IO buf back to free pool 20069 * @phba: pointer to lpfc hba data structure. 20070 * @lpfc_ncmd: IO buf to be returned. 20071 * @qp: belong to which HWQ. 20072 * 20073 * This routine returns one IO buf back to free pool. If this is an urgent IO, 20074 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1, 20075 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and 20076 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to 20077 * lpfc_io_buf_list_put. 20078 **/ 20079 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, 20080 struct lpfc_sli4_hdw_queue *qp) 20081 { 20082 unsigned long iflag; 20083 struct lpfc_pbl_pool *pbl_pool; 20084 struct lpfc_pvt_pool *pvt_pool; 20085 struct lpfc_epd_pool *epd_pool; 20086 u32 txcmplq_cnt; 20087 u32 xri_owned; 20088 u32 xri_limit; 20089 u32 abts_io_bufs; 20090 20091 /* MUST zero fields if buffer is reused by another protocol */ 20092 lpfc_ncmd->nvmeCmd = NULL; 20093 lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; 20094 lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; 20095 20096 if (phba->cfg_xri_rebalancing) { 20097 if (lpfc_ncmd->expedite) { 20098 /* Return to expedite pool */ 20099 epd_pool = &phba->epd_pool; 20100 spin_lock_irqsave(&epd_pool->lock, iflag); 20101 list_add_tail(&lpfc_ncmd->list, &epd_pool->list); 20102 epd_pool->count++; 20103 spin_unlock_irqrestore(&epd_pool->lock, iflag); 20104 return; 20105 } 20106 20107 /* Avoid invalid access if an IO sneaks in and is being rejected 20108 * just _after_ xri pools are destroyed in lpfc_offline. 20109 * Nothing much can be done at this point. 20110 */ 20111 if (!qp->p_multixri_pool) 20112 return; 20113 20114 pbl_pool = &qp->p_multixri_pool->pbl_pool; 20115 pvt_pool = &qp->p_multixri_pool->pvt_pool; 20116 20117 txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; 20118 abts_io_bufs = qp->abts_scsi_io_bufs; 20119 abts_io_bufs += qp->abts_nvme_io_bufs; 20120 20121 xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; 20122 xri_limit = qp->p_multixri_pool->xri_limit; 20123 20124 #ifdef LPFC_MXP_STAT 20125 if (xri_owned <= xri_limit) 20126 qp->p_multixri_pool->below_limit_count++; 20127 else 20128 qp->p_multixri_pool->above_limit_count++; 20129 #endif 20130 20131 /* XRI goes to either public or private free xri pool 20132 * based on watermark and xri_limit 20133 */ 20134 if ((pvt_pool->count < pvt_pool->low_watermark) || 20135 (xri_owned < xri_limit && 20136 pvt_pool->count < pvt_pool->high_watermark)) { 20137 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, 20138 qp, free_pvt_pool); 20139 list_add_tail(&lpfc_ncmd->list, 20140 &pvt_pool->list); 20141 pvt_pool->count++; 20142 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20143 } else { 20144 lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, 20145 qp, free_pub_pool); 20146 list_add_tail(&lpfc_ncmd->list, 20147 &pbl_pool->list); 20148 pbl_pool->count++; 20149 spin_unlock_irqrestore(&pbl_pool->lock, iflag); 20150 } 20151 } else { 20152 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag, 20153 qp, free_xri); 20154 list_add_tail(&lpfc_ncmd->list, 20155 &qp->lpfc_io_buf_list_put); 20156 qp->put_io_bufs++; 20157 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, 20158 iflag); 20159 } 20160 20161 if (phba->cfg_xpsgl && !phba->nvmet_support && 20162 !list_empty(&lpfc_ncmd->dma_sgl_xtra_list)) 20163 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); 20164 20165 if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list)) 20166 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); 20167 } 20168 20169 /** 20170 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool 20171 * @phba: pointer to lpfc hba data structure. 20172 * @pvt_pool: pointer to private pool data structure. 20173 * @ndlp: pointer to lpfc nodelist data structure. 20174 * 20175 * This routine tries to get one free IO buf from private pool. 20176 * 20177 * Return: 20178 * pointer to one free IO buf - if private pool is not empty 20179 * NULL - if private pool is empty 20180 **/ 20181 static struct lpfc_io_buf * 20182 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba, 20183 struct lpfc_sli4_hdw_queue *qp, 20184 struct lpfc_pvt_pool *pvt_pool, 20185 struct lpfc_nodelist *ndlp) 20186 { 20187 struct lpfc_io_buf *lpfc_ncmd; 20188 struct lpfc_io_buf *lpfc_ncmd_next; 20189 unsigned long iflag; 20190 20191 lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool); 20192 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 20193 &pvt_pool->list, list) { 20194 if (lpfc_test_rrq_active( 20195 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag)) 20196 continue; 20197 list_del(&lpfc_ncmd->list); 20198 pvt_pool->count--; 20199 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20200 return lpfc_ncmd; 20201 } 20202 spin_unlock_irqrestore(&pvt_pool->lock, iflag); 20203 20204 return NULL; 20205 } 20206 20207 /** 20208 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool 20209 * @phba: pointer to lpfc hba data structure. 20210 * 20211 * This routine tries to get one free IO buf from expedite pool. 20212 * 20213 * Return: 20214 * pointer to one free IO buf - if expedite pool is not empty 20215 * NULL - if expedite pool is empty 20216 **/ 20217 static struct lpfc_io_buf * 20218 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba) 20219 { 20220 struct lpfc_io_buf *lpfc_ncmd; 20221 struct lpfc_io_buf *lpfc_ncmd_next; 20222 unsigned long iflag; 20223 struct lpfc_epd_pool *epd_pool; 20224 20225 epd_pool = &phba->epd_pool; 20226 lpfc_ncmd = NULL; 20227 20228 spin_lock_irqsave(&epd_pool->lock, iflag); 20229 if (epd_pool->count > 0) { 20230 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, 20231 &epd_pool->list, list) { 20232 list_del(&lpfc_ncmd->list); 20233 epd_pool->count--; 20234 break; 20235 } 20236 } 20237 spin_unlock_irqrestore(&epd_pool->lock, iflag); 20238 20239 return lpfc_ncmd; 20240 } 20241 20242 /** 20243 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs 20244 * @phba: pointer to lpfc hba data structure. 20245 * @ndlp: pointer to lpfc nodelist data structure. 20246 * @hwqid: belong to which HWQ 20247 * @expedite: 1 means this request is urgent. 20248 * 20249 * This routine will do the following actions and then return a pointer to 20250 * one free IO buf. 20251 * 20252 * 1. If private free xri count is empty, move some XRIs from public to 20253 * private pool. 20254 * 2. Get one XRI from private free xri pool. 20255 * 3. If we fail to get one from pvt_pool and this is an expedite request, 20256 * get one free xri from expedite pool. 20257 * 20258 * Note: ndlp is only used on SCSI side for RRQ testing. 20259 * The caller should pass NULL for ndlp on NVME side. 20260 * 20261 * Return: 20262 * pointer to one free IO buf - if private pool is not empty 20263 * NULL - if private pool is empty 20264 **/ 20265 static struct lpfc_io_buf * 20266 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba, 20267 struct lpfc_nodelist *ndlp, 20268 int hwqid, int expedite) 20269 { 20270 struct lpfc_sli4_hdw_queue *qp; 20271 struct lpfc_multixri_pool *multixri_pool; 20272 struct lpfc_pvt_pool *pvt_pool; 20273 struct lpfc_io_buf *lpfc_ncmd; 20274 20275 qp = &phba->sli4_hba.hdwq[hwqid]; 20276 lpfc_ncmd = NULL; 20277 multixri_pool = qp->p_multixri_pool; 20278 pvt_pool = &multixri_pool->pvt_pool; 20279 multixri_pool->io_req_count++; 20280 20281 /* If pvt_pool is empty, move some XRIs from public to private pool */ 20282 if (pvt_pool->count == 0) 20283 lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH); 20284 20285 /* Get one XRI from private free xri pool */ 20286 lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp); 20287 20288 if (lpfc_ncmd) { 20289 lpfc_ncmd->hdwq = qp; 20290 lpfc_ncmd->hdwq_no = hwqid; 20291 } else if (expedite) { 20292 /* If we fail to get one from pvt_pool and this is an expedite 20293 * request, get one free xri from expedite pool. 20294 */ 20295 lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba); 20296 } 20297 20298 return lpfc_ncmd; 20299 } 20300 20301 static inline struct lpfc_io_buf * 20302 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx) 20303 { 20304 struct lpfc_sli4_hdw_queue *qp; 20305 struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next; 20306 20307 qp = &phba->sli4_hba.hdwq[idx]; 20308 list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, 20309 &qp->lpfc_io_buf_list_get, list) { 20310 if (lpfc_test_rrq_active(phba, ndlp, 20311 lpfc_cmd->cur_iocbq.sli4_lxritag)) 20312 continue; 20313 20314 if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED) 20315 continue; 20316 20317 list_del_init(&lpfc_cmd->list); 20318 qp->get_io_bufs--; 20319 lpfc_cmd->hdwq = qp; 20320 lpfc_cmd->hdwq_no = idx; 20321 return lpfc_cmd; 20322 } 20323 return NULL; 20324 } 20325 20326 /** 20327 * lpfc_get_io_buf - Get one IO buffer from free pool 20328 * @phba: The HBA for which this call is being executed. 20329 * @ndlp: pointer to lpfc nodelist data structure. 20330 * @hwqid: belong to which HWQ 20331 * @expedite: 1 means this request is urgent. 20332 * 20333 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1, 20334 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes 20335 * a IO buffer from head of @hdwq io_buf_list and returns to caller. 20336 * 20337 * Note: ndlp is only used on SCSI side for RRQ testing. 20338 * The caller should pass NULL for ndlp on NVME side. 20339 * 20340 * Return codes: 20341 * NULL - Error 20342 * Pointer to lpfc_io_buf - Success 20343 **/ 20344 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, 20345 struct lpfc_nodelist *ndlp, 20346 u32 hwqid, int expedite) 20347 { 20348 struct lpfc_sli4_hdw_queue *qp; 20349 unsigned long iflag; 20350 struct lpfc_io_buf *lpfc_cmd; 20351 20352 qp = &phba->sli4_hba.hdwq[hwqid]; 20353 lpfc_cmd = NULL; 20354 20355 if (phba->cfg_xri_rebalancing) 20356 lpfc_cmd = lpfc_get_io_buf_from_multixri_pools( 20357 phba, ndlp, hwqid, expedite); 20358 else { 20359 lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag, 20360 qp, alloc_xri_get); 20361 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite) 20362 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 20363 if (!lpfc_cmd) { 20364 lpfc_qp_spin_lock(&qp->io_buf_list_put_lock, 20365 qp, alloc_xri_put); 20366 list_splice(&qp->lpfc_io_buf_list_put, 20367 &qp->lpfc_io_buf_list_get); 20368 qp->get_io_bufs += qp->put_io_bufs; 20369 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put); 20370 qp->put_io_bufs = 0; 20371 spin_unlock(&qp->io_buf_list_put_lock); 20372 if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || 20373 expedite) 20374 lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid); 20375 } 20376 spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag); 20377 } 20378 20379 return lpfc_cmd; 20380 } 20381 20382 /** 20383 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool 20384 * @phba: The HBA for which this call is being executed. 20385 * @lpfc_buf: IO buf structure to append the SGL chunk 20386 * 20387 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool, 20388 * and will allocate an SGL chunk if the pool is empty. 20389 * 20390 * Return codes: 20391 * NULL - Error 20392 * Pointer to sli4_hybrid_sgl - Success 20393 **/ 20394 struct sli4_hybrid_sgl * 20395 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) 20396 { 20397 struct sli4_hybrid_sgl *list_entry = NULL; 20398 struct sli4_hybrid_sgl *tmp = NULL; 20399 struct sli4_hybrid_sgl *allocated_sgl = NULL; 20400 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 20401 struct list_head *buf_list = &hdwq->sgl_list; 20402 20403 spin_lock_irq(&hdwq->hdwq_lock); 20404 20405 if (likely(!list_empty(buf_list))) { 20406 /* break off 1 chunk from the sgl_list */ 20407 list_for_each_entry_safe(list_entry, tmp, 20408 buf_list, list_node) { 20409 list_move_tail(&list_entry->list_node, 20410 &lpfc_buf->dma_sgl_xtra_list); 20411 break; 20412 } 20413 } else { 20414 /* allocate more */ 20415 spin_unlock_irq(&hdwq->hdwq_lock); 20416 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, 20417 cpu_to_node(smp_processor_id())); 20418 if (!tmp) { 20419 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 20420 "8353 error kmalloc memory for HDWQ " 20421 "%d %s\n", 20422 lpfc_buf->hdwq_no, __func__); 20423 return NULL; 20424 } 20425 20426 tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, 20427 GFP_ATOMIC, &tmp->dma_phys_sgl); 20428 if (!tmp->dma_sgl) { 20429 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 20430 "8354 error pool_alloc memory for HDWQ " 20431 "%d %s\n", 20432 lpfc_buf->hdwq_no, __func__); 20433 kfree(tmp); 20434 return NULL; 20435 } 20436 20437 spin_lock_irq(&hdwq->hdwq_lock); 20438 list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list); 20439 } 20440 20441 allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list, 20442 struct sli4_hybrid_sgl, 20443 list_node); 20444 20445 spin_unlock_irq(&hdwq->hdwq_lock); 20446 20447 return allocated_sgl; 20448 } 20449 20450 /** 20451 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool 20452 * @phba: The HBA for which this call is being executed. 20453 * @lpfc_buf: IO buf structure with the SGL chunk 20454 * 20455 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool. 20456 * 20457 * Return codes: 20458 * 0 - Success 20459 * -EINVAL - Error 20460 **/ 20461 int 20462 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) 20463 { 20464 int rc = 0; 20465 struct sli4_hybrid_sgl *list_entry = NULL; 20466 struct sli4_hybrid_sgl *tmp = NULL; 20467 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 20468 struct list_head *buf_list = &hdwq->sgl_list; 20469 20470 spin_lock_irq(&hdwq->hdwq_lock); 20471 20472 if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) { 20473 list_for_each_entry_safe(list_entry, tmp, 20474 &lpfc_buf->dma_sgl_xtra_list, 20475 list_node) { 20476 list_move_tail(&list_entry->list_node, 20477 buf_list); 20478 } 20479 } else { 20480 rc = -EINVAL; 20481 } 20482 20483 spin_unlock_irq(&hdwq->hdwq_lock); 20484 return rc; 20485 } 20486 20487 /** 20488 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool 20489 * @phba: phba object 20490 * @hdwq: hdwq to cleanup sgl buff resources on 20491 * 20492 * This routine frees all SGL chunks of hdwq SGL chunk pool. 20493 * 20494 * Return codes: 20495 * None 20496 **/ 20497 void 20498 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, 20499 struct lpfc_sli4_hdw_queue *hdwq) 20500 { 20501 struct list_head *buf_list = &hdwq->sgl_list; 20502 struct sli4_hybrid_sgl *list_entry = NULL; 20503 struct sli4_hybrid_sgl *tmp = NULL; 20504 20505 spin_lock_irq(&hdwq->hdwq_lock); 20506 20507 /* Free sgl pool */ 20508 list_for_each_entry_safe(list_entry, tmp, 20509 buf_list, list_node) { 20510 dma_pool_free(phba->lpfc_sg_dma_buf_pool, 20511 list_entry->dma_sgl, 20512 list_entry->dma_phys_sgl); 20513 list_del(&list_entry->list_node); 20514 kfree(list_entry); 20515 } 20516 20517 spin_unlock_irq(&hdwq->hdwq_lock); 20518 } 20519 20520 /** 20521 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq 20522 * @phba: The HBA for which this call is being executed. 20523 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer 20524 * 20525 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool, 20526 * and will allocate an CMD/RSP buffer if the pool is empty. 20527 * 20528 * Return codes: 20529 * NULL - Error 20530 * Pointer to fcp_cmd_rsp_buf - Success 20531 **/ 20532 struct fcp_cmd_rsp_buf * 20533 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, 20534 struct lpfc_io_buf *lpfc_buf) 20535 { 20536 struct fcp_cmd_rsp_buf *list_entry = NULL; 20537 struct fcp_cmd_rsp_buf *tmp = NULL; 20538 struct fcp_cmd_rsp_buf *allocated_buf = NULL; 20539 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 20540 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; 20541 20542 spin_lock_irq(&hdwq->hdwq_lock); 20543 20544 if (likely(!list_empty(buf_list))) { 20545 /* break off 1 chunk from the list */ 20546 list_for_each_entry_safe(list_entry, tmp, 20547 buf_list, 20548 list_node) { 20549 list_move_tail(&list_entry->list_node, 20550 &lpfc_buf->dma_cmd_rsp_list); 20551 break; 20552 } 20553 } else { 20554 /* allocate more */ 20555 spin_unlock_irq(&hdwq->hdwq_lock); 20556 tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, 20557 cpu_to_node(smp_processor_id())); 20558 if (!tmp) { 20559 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 20560 "8355 error kmalloc memory for HDWQ " 20561 "%d %s\n", 20562 lpfc_buf->hdwq_no, __func__); 20563 return NULL; 20564 } 20565 20566 tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool, 20567 GFP_ATOMIC, 20568 &tmp->fcp_cmd_rsp_dma_handle); 20569 20570 if (!tmp->fcp_cmnd) { 20571 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 20572 "8356 error pool_alloc memory for HDWQ " 20573 "%d %s\n", 20574 lpfc_buf->hdwq_no, __func__); 20575 kfree(tmp); 20576 return NULL; 20577 } 20578 20579 tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + 20580 sizeof(struct fcp_cmnd)); 20581 20582 spin_lock_irq(&hdwq->hdwq_lock); 20583 list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); 20584 } 20585 20586 allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list, 20587 struct fcp_cmd_rsp_buf, 20588 list_node); 20589 20590 spin_unlock_irq(&hdwq->hdwq_lock); 20591 20592 return allocated_buf; 20593 } 20594 20595 /** 20596 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool 20597 * @phba: The HBA for which this call is being executed. 20598 * @lpfc_buf: IO buf structure with the CMD/RSP buf 20599 * 20600 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool. 20601 * 20602 * Return codes: 20603 * 0 - Success 20604 * -EINVAL - Error 20605 **/ 20606 int 20607 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, 20608 struct lpfc_io_buf *lpfc_buf) 20609 { 20610 int rc = 0; 20611 struct fcp_cmd_rsp_buf *list_entry = NULL; 20612 struct fcp_cmd_rsp_buf *tmp = NULL; 20613 struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; 20614 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; 20615 20616 spin_lock_irq(&hdwq->hdwq_lock); 20617 20618 if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) { 20619 list_for_each_entry_safe(list_entry, tmp, 20620 &lpfc_buf->dma_cmd_rsp_list, 20621 list_node) { 20622 list_move_tail(&list_entry->list_node, 20623 buf_list); 20624 } 20625 } else { 20626 rc = -EINVAL; 20627 } 20628 20629 spin_unlock_irq(&hdwq->hdwq_lock); 20630 return rc; 20631 } 20632 20633 /** 20634 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool 20635 * @phba: phba object 20636 * @hdwq: hdwq to cleanup cmd rsp buff resources on 20637 * 20638 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool. 20639 * 20640 * Return codes: 20641 * None 20642 **/ 20643 void 20644 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, 20645 struct lpfc_sli4_hdw_queue *hdwq) 20646 { 20647 struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; 20648 struct fcp_cmd_rsp_buf *list_entry = NULL; 20649 struct fcp_cmd_rsp_buf *tmp = NULL; 20650 20651 spin_lock_irq(&hdwq->hdwq_lock); 20652 20653 /* Free cmd_rsp buf pool */ 20654 list_for_each_entry_safe(list_entry, tmp, 20655 buf_list, 20656 list_node) { 20657 dma_pool_free(phba->lpfc_cmd_rsp_buf_pool, 20658 list_entry->fcp_cmnd, 20659 list_entry->fcp_cmd_rsp_dma_handle); 20660 list_del(&list_entry->list_node); 20661 kfree(list_entry); 20662 } 20663 20664 spin_unlock_irq(&hdwq->hdwq_lock); 20665 } 20666