1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * 5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * 6 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 7 * EMULEX and SLI are trademarks of Emulex. * 8 * www.broadcom.com * 9 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 10 * * 11 * This program is free software; you can redistribute it and/or * 12 * modify it under the terms of version 2 of the GNU General * 13 * Public License as published by the Free Software Foundation. * 14 * This program is distributed in the hope that it will be useful. * 15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 19 * TO BE LEGALLY INVALID. See the GNU General Public License for * 20 * more details, a copy of which can be found in the file COPYING * 21 * included with this package. * 22 *******************************************************************/ 23 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/interrupt.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/lockdep.h> 30 31 #include <scsi/scsi.h> 32 #include <scsi/scsi_cmnd.h> 33 #include <scsi/scsi_device.h> 34 #include <scsi/scsi_host.h> 35 #include <scsi/scsi_transport_fc.h> 36 #include <scsi/fc/fc_fs.h> 37 #include <linux/aer.h> 38 #ifdef CONFIG_X86 39 #include <asm/set_memory.h> 40 #endif 41 42 #include <linux/nvme-fc-driver.h> 43 44 #include "lpfc_hw4.h" 45 #include "lpfc_hw.h" 46 #include "lpfc_sli.h" 47 #include "lpfc_sli4.h" 48 #include "lpfc_nl.h" 49 #include "lpfc_disc.h" 50 #include "lpfc.h" 51 #include "lpfc_scsi.h" 52 #include "lpfc_nvme.h" 53 #include "lpfc_nvmet.h" 54 #include "lpfc_crtn.h" 55 #include "lpfc_logmsg.h" 56 #include "lpfc_compat.h" 57 #include "lpfc_debugfs.h" 58 #include "lpfc_vport.h" 59 #include "lpfc_version.h" 60 61 /* There are only four IOCB completion types. */ 62 typedef enum _lpfc_iocb_type { 63 LPFC_UNKNOWN_IOCB, 64 LPFC_UNSOL_IOCB, 65 LPFC_SOL_IOCB, 66 LPFC_ABORT_IOCB 67 } lpfc_iocb_type; 68 69 70 /* Provide function prototypes local to this module. */ 71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 72 uint32_t); 73 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 74 uint8_t *, uint32_t *); 75 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 76 struct lpfc_iocbq *); 77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 78 struct hbq_dmabuf *); 79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 80 struct hbq_dmabuf *dmabuf); 81 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, 82 struct lpfc_cqe *); 83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 84 int); 85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, 86 struct lpfc_eqe *eqe, uint32_t qidx); 87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 89 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, 90 struct lpfc_sli_ring *pring, 91 struct lpfc_iocbq *cmdiocb); 92 93 static IOCB_t * 94 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 95 { 96 return &iocbq->iocb; 97 } 98 99 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN) 100 /** 101 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function 102 * @srcp: Source memory pointer. 103 * @destp: Destination memory pointer. 104 * @cnt: Number of words required to be copied. 105 * Must be a multiple of sizeof(uint64_t) 106 * 107 * This function is used for copying data between driver memory 108 * and the SLI WQ. This function also changes the endianness 109 * of each word if native endianness is different from SLI 110 * endianness. This function can be called with or without 111 * lock. 112 **/ 113 void 114 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 115 { 116 uint64_t *src = srcp; 117 uint64_t *dest = destp; 118 int i; 119 120 for (i = 0; i < (int)cnt; i += sizeof(uint64_t)) 121 *dest++ = *src++; 122 } 123 #else 124 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c) 125 #endif 126 127 /** 128 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 129 * @q: The Work Queue to operate on. 130 * @wqe: The work Queue Entry to put on the Work queue. 131 * 132 * This routine will copy the contents of @wqe to the next available entry on 133 * the @q. This function will then ring the Work Queue Doorbell to signal the 134 * HBA to start processing the Work Queue Entry. This function returns 0 if 135 * successful. If no entries are available on @q then this function will return 136 * -ENOMEM. 137 * The caller is expected to hold the hbalock when calling this routine. 138 **/ 139 static int 140 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) 141 { 142 union lpfc_wqe *temp_wqe; 143 struct lpfc_register doorbell; 144 uint32_t host_index; 145 uint32_t idx; 146 uint32_t i = 0; 147 uint8_t *tmp; 148 u32 if_type; 149 150 /* sanity check on queue memory */ 151 if (unlikely(!q)) 152 return -ENOMEM; 153 temp_wqe = q->qe[q->host_index].wqe; 154 155 /* If the host has not yet processed the next entry then we are done */ 156 idx = ((q->host_index + 1) % q->entry_count); 157 if (idx == q->hba_index) { 158 q->WQ_overflow++; 159 return -EBUSY; 160 } 161 q->WQ_posted++; 162 /* set consumption flag every once in a while */ 163 if (!((q->host_index + 1) % q->entry_repost)) 164 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 165 else 166 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0); 167 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 168 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 169 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 170 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 171 /* write to DPP aperture taking advatage of Combined Writes */ 172 tmp = (uint8_t *)temp_wqe; 173 #ifdef __raw_writeq 174 for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) 175 __raw_writeq(*((uint64_t *)(tmp + i)), 176 q->dpp_regaddr + i); 177 #else 178 for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) 179 __raw_writel(*((uint32_t *)(tmp + i)), 180 q->dpp_regaddr + i); 181 #endif 182 } 183 /* ensure WQE bcopy and DPP flushed before doorbell write */ 184 wmb(); 185 186 /* Update the host index before invoking device */ 187 host_index = q->host_index; 188 189 q->host_index = idx; 190 191 /* Ring Doorbell */ 192 doorbell.word0 = 0; 193 if (q->db_format == LPFC_DB_LIST_FORMAT) { 194 if (q->dpp_enable && q->phba->cfg_enable_dpp) { 195 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1); 196 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1); 197 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell, 198 q->dpp_id); 199 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell, 200 q->queue_id); 201 } else { 202 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 203 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 204 205 /* Leave bits <23:16> clear for if_type 6 dpp */ 206 if_type = bf_get(lpfc_sli_intf_if_type, 207 &q->phba->sli4_hba.sli_intf); 208 if (if_type != LPFC_SLI_INTF_IF_TYPE_6) 209 bf_set(lpfc_wq_db_list_fm_index, &doorbell, 210 host_index); 211 } 212 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 213 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 214 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 215 } else { 216 return -EINVAL; 217 } 218 writel(doorbell.word0, q->db_regaddr); 219 220 return 0; 221 } 222 223 /** 224 * lpfc_sli4_wq_release - Updates internal hba index for WQ 225 * @q: The Work Queue to operate on. 226 * @index: The index to advance the hba index to. 227 * 228 * This routine will update the HBA index of a queue to reflect consumption of 229 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 230 * an entry the host calls this function to update the queue's internal 231 * pointers. This routine returns the number of entries that were consumed by 232 * the HBA. 233 **/ 234 static uint32_t 235 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 236 { 237 uint32_t released = 0; 238 239 /* sanity check on queue memory */ 240 if (unlikely(!q)) 241 return 0; 242 243 if (q->hba_index == index) 244 return 0; 245 do { 246 q->hba_index = ((q->hba_index + 1) % q->entry_count); 247 released++; 248 } while (q->hba_index != index); 249 return released; 250 } 251 252 /** 253 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 254 * @q: The Mailbox Queue to operate on. 255 * @wqe: The Mailbox Queue Entry to put on the Work queue. 256 * 257 * This routine will copy the contents of @mqe to the next available entry on 258 * the @q. This function will then ring the Work Queue Doorbell to signal the 259 * HBA to start processing the Work Queue Entry. This function returns 0 if 260 * successful. If no entries are available on @q then this function will return 261 * -ENOMEM. 262 * The caller is expected to hold the hbalock when calling this routine. 263 **/ 264 static uint32_t 265 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 266 { 267 struct lpfc_mqe *temp_mqe; 268 struct lpfc_register doorbell; 269 270 /* sanity check on queue memory */ 271 if (unlikely(!q)) 272 return -ENOMEM; 273 temp_mqe = q->qe[q->host_index].mqe; 274 275 /* If the host has not yet processed the next entry then we are done */ 276 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 277 return -ENOMEM; 278 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 279 /* Save off the mailbox pointer for completion */ 280 q->phba->mbox = (MAILBOX_t *)temp_mqe; 281 282 /* Update the host index before invoking device */ 283 q->host_index = ((q->host_index + 1) % q->entry_count); 284 285 /* Ring Doorbell */ 286 doorbell.word0 = 0; 287 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 288 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 289 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 290 return 0; 291 } 292 293 /** 294 * lpfc_sli4_mq_release - Updates internal hba index for MQ 295 * @q: The Mailbox Queue to operate on. 296 * 297 * This routine will update the HBA index of a queue to reflect consumption of 298 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 299 * an entry the host calls this function to update the queue's internal 300 * pointers. This routine returns the number of entries that were consumed by 301 * the HBA. 302 **/ 303 static uint32_t 304 lpfc_sli4_mq_release(struct lpfc_queue *q) 305 { 306 /* sanity check on queue memory */ 307 if (unlikely(!q)) 308 return 0; 309 310 /* Clear the mailbox pointer for completion */ 311 q->phba->mbox = NULL; 312 q->hba_index = ((q->hba_index + 1) % q->entry_count); 313 return 1; 314 } 315 316 /** 317 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 318 * @q: The Event Queue to get the first valid EQE from 319 * 320 * This routine will get the first valid Event Queue Entry from @q, update 321 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 322 * the Queue (no more work to do), or the Queue is full of EQEs that have been 323 * processed, but not popped back to the HBA then this routine will return NULL. 324 **/ 325 static struct lpfc_eqe * 326 lpfc_sli4_eq_get(struct lpfc_queue *q) 327 { 328 struct lpfc_hba *phba; 329 struct lpfc_eqe *eqe; 330 uint32_t idx; 331 332 /* sanity check on queue memory */ 333 if (unlikely(!q)) 334 return NULL; 335 phba = q->phba; 336 eqe = q->qe[q->hba_index].eqe; 337 338 /* If the next EQE is not valid then we are done */ 339 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) 340 return NULL; 341 /* If the host has not yet processed the next entry then we are done */ 342 idx = ((q->hba_index + 1) % q->entry_count); 343 if (idx == q->host_index) 344 return NULL; 345 346 q->hba_index = idx; 347 /* if the index wrapped around, toggle the valid bit */ 348 if (phba->sli4_hba.pc_sli4_params.eqav && !q->hba_index) 349 q->qe_valid = (q->qe_valid) ? 0 : 1; 350 351 352 /* 353 * insert barrier for instruction interlock : data from the hardware 354 * must have the valid bit checked before it can be copied and acted 355 * upon. Speculative instructions were allowing a bcopy at the start 356 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately 357 * after our return, to copy data before the valid bit check above 358 * was done. As such, some of the copied data was stale. The barrier 359 * ensures the check is before any data is copied. 360 */ 361 mb(); 362 return eqe; 363 } 364 365 /** 366 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 367 * @q: The Event Queue to disable interrupts 368 * 369 **/ 370 inline void 371 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 372 { 373 struct lpfc_register doorbell; 374 375 doorbell.word0 = 0; 376 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 377 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 378 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 379 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 380 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 381 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 382 } 383 384 /** 385 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ 386 * @q: The Event Queue to disable interrupts 387 * 388 **/ 389 inline void 390 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) 391 { 392 struct lpfc_register doorbell; 393 394 doorbell.word0 = 0; 395 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 396 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 397 } 398 399 /** 400 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 401 * @q: The Event Queue that the host has completed processing for. 402 * @arm: Indicates whether the host wants to arms this CQ. 403 * 404 * This routine will mark all Event Queue Entries on @q, from the last 405 * known completed entry to the last entry that was processed, as completed 406 * by clearing the valid bit for each completion queue entry. Then it will 407 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 408 * The internal host index in the @q will be updated by this routine to indicate 409 * that the host has finished processing the entries. The @arm parameter 410 * indicates that the queue should be rearmed when ringing the doorbell. 411 * 412 * This function will return the number of EQEs that were popped. 413 **/ 414 uint32_t 415 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 416 { 417 uint32_t released = 0; 418 struct lpfc_hba *phba; 419 struct lpfc_eqe *temp_eqe; 420 struct lpfc_register doorbell; 421 422 /* sanity check on queue memory */ 423 if (unlikely(!q)) 424 return 0; 425 phba = q->phba; 426 427 /* while there are valid entries */ 428 while (q->hba_index != q->host_index) { 429 if (!phba->sli4_hba.pc_sli4_params.eqav) { 430 temp_eqe = q->qe[q->host_index].eqe; 431 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 432 } 433 released++; 434 q->host_index = ((q->host_index + 1) % q->entry_count); 435 } 436 if (unlikely(released == 0 && !arm)) 437 return 0; 438 439 /* ring doorbell for number popped */ 440 doorbell.word0 = 0; 441 if (arm) { 442 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 443 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 444 } 445 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 446 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 447 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 448 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 449 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 450 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 451 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 452 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 453 readl(q->phba->sli4_hba.EQDBregaddr); 454 return released; 455 } 456 457 /** 458 * lpfc_sli4_if6_eq_release - Indicates the host has finished processing an EQ 459 * @q: The Event Queue that the host has completed processing for. 460 * @arm: Indicates whether the host wants to arms this CQ. 461 * 462 * This routine will mark all Event Queue Entries on @q, from the last 463 * known completed entry to the last entry that was processed, as completed 464 * by clearing the valid bit for each completion queue entry. Then it will 465 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 466 * The internal host index in the @q will be updated by this routine to indicate 467 * that the host has finished processing the entries. The @arm parameter 468 * indicates that the queue should be rearmed when ringing the doorbell. 469 * 470 * This function will return the number of EQEs that were popped. 471 **/ 472 uint32_t 473 lpfc_sli4_if6_eq_release(struct lpfc_queue *q, bool arm) 474 { 475 uint32_t released = 0; 476 struct lpfc_hba *phba; 477 struct lpfc_eqe *temp_eqe; 478 struct lpfc_register doorbell; 479 480 /* sanity check on queue memory */ 481 if (unlikely(!q)) 482 return 0; 483 phba = q->phba; 484 485 /* while there are valid entries */ 486 while (q->hba_index != q->host_index) { 487 if (!phba->sli4_hba.pc_sli4_params.eqav) { 488 temp_eqe = q->qe[q->host_index].eqe; 489 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 490 } 491 released++; 492 q->host_index = ((q->host_index + 1) % q->entry_count); 493 } 494 if (unlikely(released == 0 && !arm)) 495 return 0; 496 497 /* ring doorbell for number popped */ 498 doorbell.word0 = 0; 499 if (arm) 500 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1); 501 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, released); 502 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); 503 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); 504 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 505 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 506 readl(q->phba->sli4_hba.EQDBregaddr); 507 return released; 508 } 509 510 /** 511 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 512 * @q: The Completion Queue to get the first valid CQE from 513 * 514 * This routine will get the first valid Completion Queue Entry from @q, update 515 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 516 * the Queue (no more work to do), or the Queue is full of CQEs that have been 517 * processed, but not popped back to the HBA then this routine will return NULL. 518 **/ 519 static struct lpfc_cqe * 520 lpfc_sli4_cq_get(struct lpfc_queue *q) 521 { 522 struct lpfc_hba *phba; 523 struct lpfc_cqe *cqe; 524 uint32_t idx; 525 526 /* sanity check on queue memory */ 527 if (unlikely(!q)) 528 return NULL; 529 phba = q->phba; 530 cqe = q->qe[q->hba_index].cqe; 531 532 /* If the next CQE is not valid then we are done */ 533 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) 534 return NULL; 535 /* If the host has not yet processed the next entry then we are done */ 536 idx = ((q->hba_index + 1) % q->entry_count); 537 if (idx == q->host_index) 538 return NULL; 539 540 q->hba_index = idx; 541 /* if the index wrapped around, toggle the valid bit */ 542 if (phba->sli4_hba.pc_sli4_params.cqav && !q->hba_index) 543 q->qe_valid = (q->qe_valid) ? 0 : 1; 544 545 /* 546 * insert barrier for instruction interlock : data from the hardware 547 * must have the valid bit checked before it can be copied and acted 548 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative 549 * instructions allowing action on content before valid bit checked, 550 * add barrier here as well. May not be needed as "content" is a 551 * single 32-bit entity here (vs multi word structure for cq's). 552 */ 553 mb(); 554 return cqe; 555 } 556 557 /** 558 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 559 * @q: The Completion Queue that the host has completed processing for. 560 * @arm: Indicates whether the host wants to arms this CQ. 561 * 562 * This routine will mark all Completion queue entries on @q, from the last 563 * known completed entry to the last entry that was processed, as completed 564 * by clearing the valid bit for each completion queue entry. Then it will 565 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 566 * The internal host index in the @q will be updated by this routine to indicate 567 * that the host has finished processing the entries. The @arm parameter 568 * indicates that the queue should be rearmed when ringing the doorbell. 569 * 570 * This function will return the number of CQEs that were released. 571 **/ 572 uint32_t 573 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 574 { 575 uint32_t released = 0; 576 struct lpfc_hba *phba; 577 struct lpfc_cqe *temp_qe; 578 struct lpfc_register doorbell; 579 580 /* sanity check on queue memory */ 581 if (unlikely(!q)) 582 return 0; 583 phba = q->phba; 584 585 /* while there are valid entries */ 586 while (q->hba_index != q->host_index) { 587 if (!phba->sli4_hba.pc_sli4_params.cqav) { 588 temp_qe = q->qe[q->host_index].cqe; 589 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 590 } 591 released++; 592 q->host_index = ((q->host_index + 1) % q->entry_count); 593 } 594 if (unlikely(released == 0 && !arm)) 595 return 0; 596 597 /* ring doorbell for number popped */ 598 doorbell.word0 = 0; 599 if (arm) 600 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 601 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 602 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 603 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 604 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 605 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 606 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 607 return released; 608 } 609 610 /** 611 * lpfc_sli4_if6_cq_release - Indicates the host has finished processing a CQ 612 * @q: The Completion Queue that the host has completed processing for. 613 * @arm: Indicates whether the host wants to arms this CQ. 614 * 615 * This routine will mark all Completion queue entries on @q, from the last 616 * known completed entry to the last entry that was processed, as completed 617 * by clearing the valid bit for each completion queue entry. Then it will 618 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 619 * The internal host index in the @q will be updated by this routine to indicate 620 * that the host has finished processing the entries. The @arm parameter 621 * indicates that the queue should be rearmed when ringing the doorbell. 622 * 623 * This function will return the number of CQEs that were released. 624 **/ 625 uint32_t 626 lpfc_sli4_if6_cq_release(struct lpfc_queue *q, bool arm) 627 { 628 uint32_t released = 0; 629 struct lpfc_hba *phba; 630 struct lpfc_cqe *temp_qe; 631 struct lpfc_register doorbell; 632 633 /* sanity check on queue memory */ 634 if (unlikely(!q)) 635 return 0; 636 phba = q->phba; 637 638 /* while there are valid entries */ 639 while (q->hba_index != q->host_index) { 640 if (!phba->sli4_hba.pc_sli4_params.cqav) { 641 temp_qe = q->qe[q->host_index].cqe; 642 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 643 } 644 released++; 645 q->host_index = ((q->host_index + 1) % q->entry_count); 646 } 647 if (unlikely(released == 0 && !arm)) 648 return 0; 649 650 /* ring doorbell for number popped */ 651 doorbell.word0 = 0; 652 if (arm) 653 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1); 654 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, released); 655 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); 656 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); 657 return released; 658 } 659 660 /** 661 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 662 * @q: The Header Receive Queue to operate on. 663 * @wqe: The Receive Queue Entry to put on the Receive queue. 664 * 665 * This routine will copy the contents of @wqe to the next available entry on 666 * the @q. This function will then ring the Receive Queue Doorbell to signal the 667 * HBA to start processing the Receive Queue Entry. This function returns the 668 * index that the rqe was copied to if successful. If no entries are available 669 * on @q then this function will return -ENOMEM. 670 * The caller is expected to hold the hbalock when calling this routine. 671 **/ 672 int 673 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 674 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 675 { 676 struct lpfc_rqe *temp_hrqe; 677 struct lpfc_rqe *temp_drqe; 678 struct lpfc_register doorbell; 679 int hq_put_index; 680 int dq_put_index; 681 682 /* sanity check on queue memory */ 683 if (unlikely(!hq) || unlikely(!dq)) 684 return -ENOMEM; 685 hq_put_index = hq->host_index; 686 dq_put_index = dq->host_index; 687 temp_hrqe = hq->qe[hq_put_index].rqe; 688 temp_drqe = dq->qe[dq_put_index].rqe; 689 690 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 691 return -EINVAL; 692 if (hq_put_index != dq_put_index) 693 return -EINVAL; 694 /* If the host has not yet processed the next entry then we are done */ 695 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) 696 return -EBUSY; 697 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 698 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 699 700 /* Update the host index to point to the next slot */ 701 hq->host_index = ((hq_put_index + 1) % hq->entry_count); 702 dq->host_index = ((dq_put_index + 1) % dq->entry_count); 703 hq->RQ_buf_posted++; 704 705 /* Ring The Header Receive Queue Doorbell */ 706 if (!(hq->host_index % hq->entry_repost)) { 707 doorbell.word0 = 0; 708 if (hq->db_format == LPFC_DB_RING_FORMAT) { 709 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 710 hq->entry_repost); 711 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 712 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 713 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 714 hq->entry_repost); 715 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 716 hq->host_index); 717 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 718 } else { 719 return -EINVAL; 720 } 721 writel(doorbell.word0, hq->db_regaddr); 722 } 723 return hq_put_index; 724 } 725 726 /** 727 * lpfc_sli4_rq_release - Updates internal hba index for RQ 728 * @q: The Header Receive Queue to operate on. 729 * 730 * This routine will update the HBA index of a queue to reflect consumption of 731 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 732 * consumed an entry the host calls this function to update the queue's 733 * internal pointers. This routine returns the number of entries that were 734 * consumed by the HBA. 735 **/ 736 static uint32_t 737 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 738 { 739 /* sanity check on queue memory */ 740 if (unlikely(!hq) || unlikely(!dq)) 741 return 0; 742 743 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 744 return 0; 745 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 746 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 747 return 1; 748 } 749 750 /** 751 * lpfc_cmd_iocb - Get next command iocb entry in the ring 752 * @phba: Pointer to HBA context object. 753 * @pring: Pointer to driver SLI ring object. 754 * 755 * This function returns pointer to next command iocb entry 756 * in the command ring. The caller must hold hbalock to prevent 757 * other threads consume the next command iocb. 758 * SLI-2/SLI-3 provide different sized iocbs. 759 **/ 760 static inline IOCB_t * 761 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 762 { 763 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 764 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 765 } 766 767 /** 768 * lpfc_resp_iocb - Get next response iocb entry in the ring 769 * @phba: Pointer to HBA context object. 770 * @pring: Pointer to driver SLI ring object. 771 * 772 * This function returns pointer to next response iocb entry 773 * in the response ring. The caller must hold hbalock to make sure 774 * that no other thread consume the next response iocb. 775 * SLI-2/SLI-3 provide different sized iocbs. 776 **/ 777 static inline IOCB_t * 778 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 779 { 780 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 781 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 782 } 783 784 /** 785 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 786 * @phba: Pointer to HBA context object. 787 * 788 * This function is called with hbalock held. This function 789 * allocates a new driver iocb object from the iocb pool. If the 790 * allocation is successful, it returns pointer to the newly 791 * allocated iocb object else it returns NULL. 792 **/ 793 struct lpfc_iocbq * 794 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 795 { 796 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 797 struct lpfc_iocbq * iocbq = NULL; 798 799 lockdep_assert_held(&phba->hbalock); 800 801 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 802 if (iocbq) 803 phba->iocb_cnt++; 804 if (phba->iocb_cnt > phba->iocb_max) 805 phba->iocb_max = phba->iocb_cnt; 806 return iocbq; 807 } 808 809 /** 810 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 811 * @phba: Pointer to HBA context object. 812 * @xritag: XRI value. 813 * 814 * This function clears the sglq pointer from the array of acive 815 * sglq's. The xritag that is passed in is used to index into the 816 * array. Before the xritag can be used it needs to be adjusted 817 * by subtracting the xribase. 818 * 819 * Returns sglq ponter = success, NULL = Failure. 820 **/ 821 struct lpfc_sglq * 822 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 823 { 824 struct lpfc_sglq *sglq; 825 826 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 827 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 828 return sglq; 829 } 830 831 /** 832 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 833 * @phba: Pointer to HBA context object. 834 * @xritag: XRI value. 835 * 836 * This function returns the sglq pointer from the array of acive 837 * sglq's. The xritag that is passed in is used to index into the 838 * array. Before the xritag can be used it needs to be adjusted 839 * by subtracting the xribase. 840 * 841 * Returns sglq ponter = success, NULL = Failure. 842 **/ 843 struct lpfc_sglq * 844 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 845 { 846 struct lpfc_sglq *sglq; 847 848 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 849 return sglq; 850 } 851 852 /** 853 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 854 * @phba: Pointer to HBA context object. 855 * @xritag: xri used in this exchange. 856 * @rrq: The RRQ to be cleared. 857 * 858 **/ 859 void 860 lpfc_clr_rrq_active(struct lpfc_hba *phba, 861 uint16_t xritag, 862 struct lpfc_node_rrq *rrq) 863 { 864 struct lpfc_nodelist *ndlp = NULL; 865 866 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 867 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 868 869 /* The target DID could have been swapped (cable swap) 870 * we should use the ndlp from the findnode if it is 871 * available. 872 */ 873 if ((!ndlp) && rrq->ndlp) 874 ndlp = rrq->ndlp; 875 876 if (!ndlp) 877 goto out; 878 879 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 880 rrq->send_rrq = 0; 881 rrq->xritag = 0; 882 rrq->rrq_stop_time = 0; 883 } 884 out: 885 mempool_free(rrq, phba->rrq_pool); 886 } 887 888 /** 889 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 890 * @phba: Pointer to HBA context object. 891 * 892 * This function is called with hbalock held. This function 893 * Checks if stop_time (ratov from setting rrq active) has 894 * been reached, if it has and the send_rrq flag is set then 895 * it will call lpfc_send_rrq. If the send_rrq flag is not set 896 * then it will just call the routine to clear the rrq and 897 * free the rrq resource. 898 * The timer is set to the next rrq that is going to expire before 899 * leaving the routine. 900 * 901 **/ 902 void 903 lpfc_handle_rrq_active(struct lpfc_hba *phba) 904 { 905 struct lpfc_node_rrq *rrq; 906 struct lpfc_node_rrq *nextrrq; 907 unsigned long next_time; 908 unsigned long iflags; 909 LIST_HEAD(send_rrq); 910 911 spin_lock_irqsave(&phba->hbalock, iflags); 912 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 913 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 914 list_for_each_entry_safe(rrq, nextrrq, 915 &phba->active_rrq_list, list) { 916 if (time_after(jiffies, rrq->rrq_stop_time)) 917 list_move(&rrq->list, &send_rrq); 918 else if (time_before(rrq->rrq_stop_time, next_time)) 919 next_time = rrq->rrq_stop_time; 920 } 921 spin_unlock_irqrestore(&phba->hbalock, iflags); 922 if ((!list_empty(&phba->active_rrq_list)) && 923 (!(phba->pport->load_flag & FC_UNLOADING))) 924 mod_timer(&phba->rrq_tmr, next_time); 925 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 926 list_del(&rrq->list); 927 if (!rrq->send_rrq) 928 /* this call will free the rrq */ 929 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 930 else if (lpfc_send_rrq(phba, rrq)) { 931 /* if we send the rrq then the completion handler 932 * will clear the bit in the xribitmap. 933 */ 934 lpfc_clr_rrq_active(phba, rrq->xritag, 935 rrq); 936 } 937 } 938 } 939 940 /** 941 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 942 * @vport: Pointer to vport context object. 943 * @xri: The xri used in the exchange. 944 * @did: The targets DID for this exchange. 945 * 946 * returns NULL = rrq not found in the phba->active_rrq_list. 947 * rrq = rrq for this xri and target. 948 **/ 949 struct lpfc_node_rrq * 950 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 951 { 952 struct lpfc_hba *phba = vport->phba; 953 struct lpfc_node_rrq *rrq; 954 struct lpfc_node_rrq *nextrrq; 955 unsigned long iflags; 956 957 if (phba->sli_rev != LPFC_SLI_REV4) 958 return NULL; 959 spin_lock_irqsave(&phba->hbalock, iflags); 960 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 961 if (rrq->vport == vport && rrq->xritag == xri && 962 rrq->nlp_DID == did){ 963 list_del(&rrq->list); 964 spin_unlock_irqrestore(&phba->hbalock, iflags); 965 return rrq; 966 } 967 } 968 spin_unlock_irqrestore(&phba->hbalock, iflags); 969 return NULL; 970 } 971 972 /** 973 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 974 * @vport: Pointer to vport context object. 975 * @ndlp: Pointer to the lpfc_node_list structure. 976 * If ndlp is NULL Remove all active RRQs for this vport from the 977 * phba->active_rrq_list and clear the rrq. 978 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 979 **/ 980 void 981 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 982 983 { 984 struct lpfc_hba *phba = vport->phba; 985 struct lpfc_node_rrq *rrq; 986 struct lpfc_node_rrq *nextrrq; 987 unsigned long iflags; 988 LIST_HEAD(rrq_list); 989 990 if (phba->sli_rev != LPFC_SLI_REV4) 991 return; 992 if (!ndlp) { 993 lpfc_sli4_vport_delete_els_xri_aborted(vport); 994 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 995 } 996 spin_lock_irqsave(&phba->hbalock, iflags); 997 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 998 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 999 list_move(&rrq->list, &rrq_list); 1000 spin_unlock_irqrestore(&phba->hbalock, iflags); 1001 1002 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 1003 list_del(&rrq->list); 1004 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 1005 } 1006 } 1007 1008 /** 1009 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 1010 * @phba: Pointer to HBA context object. 1011 * @ndlp: Targets nodelist pointer for this exchange. 1012 * @xritag the xri in the bitmap to test. 1013 * 1014 * This function is called with hbalock held. This function 1015 * returns 0 = rrq not active for this xri 1016 * 1 = rrq is valid for this xri. 1017 **/ 1018 int 1019 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1020 uint16_t xritag) 1021 { 1022 lockdep_assert_held(&phba->hbalock); 1023 if (!ndlp) 1024 return 0; 1025 if (!ndlp->active_rrqs_xri_bitmap) 1026 return 0; 1027 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1028 return 1; 1029 else 1030 return 0; 1031 } 1032 1033 /** 1034 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 1035 * @phba: Pointer to HBA context object. 1036 * @ndlp: nodelist pointer for this target. 1037 * @xritag: xri used in this exchange. 1038 * @rxid: Remote Exchange ID. 1039 * @send_rrq: Flag used to determine if we should send rrq els cmd. 1040 * 1041 * This function takes the hbalock. 1042 * The active bit is always set in the active rrq xri_bitmap even 1043 * if there is no slot avaiable for the other rrq information. 1044 * 1045 * returns 0 rrq actived for this xri 1046 * < 0 No memory or invalid ndlp. 1047 **/ 1048 int 1049 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 1050 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 1051 { 1052 unsigned long iflags; 1053 struct lpfc_node_rrq *rrq; 1054 int empty; 1055 1056 if (!ndlp) 1057 return -EINVAL; 1058 1059 if (!phba->cfg_enable_rrq) 1060 return -EINVAL; 1061 1062 spin_lock_irqsave(&phba->hbalock, iflags); 1063 if (phba->pport->load_flag & FC_UNLOADING) { 1064 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 1065 goto out; 1066 } 1067 1068 /* 1069 * set the active bit even if there is no mem available. 1070 */ 1071 if (NLP_CHK_FREE_REQ(ndlp)) 1072 goto out; 1073 1074 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 1075 goto out; 1076 1077 if (!ndlp->active_rrqs_xri_bitmap) 1078 goto out; 1079 1080 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 1081 goto out; 1082 1083 spin_unlock_irqrestore(&phba->hbalock, iflags); 1084 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 1085 if (!rrq) { 1086 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1087 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 1088 " DID:0x%x Send:%d\n", 1089 xritag, rxid, ndlp->nlp_DID, send_rrq); 1090 return -EINVAL; 1091 } 1092 if (phba->cfg_enable_rrq == 1) 1093 rrq->send_rrq = send_rrq; 1094 else 1095 rrq->send_rrq = 0; 1096 rrq->xritag = xritag; 1097 rrq->rrq_stop_time = jiffies + 1098 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 1099 rrq->ndlp = ndlp; 1100 rrq->nlp_DID = ndlp->nlp_DID; 1101 rrq->vport = ndlp->vport; 1102 rrq->rxid = rxid; 1103 spin_lock_irqsave(&phba->hbalock, iflags); 1104 empty = list_empty(&phba->active_rrq_list); 1105 list_add_tail(&rrq->list, &phba->active_rrq_list); 1106 phba->hba_flag |= HBA_RRQ_ACTIVE; 1107 if (empty) 1108 lpfc_worker_wake_up(phba); 1109 spin_unlock_irqrestore(&phba->hbalock, iflags); 1110 return 0; 1111 out: 1112 spin_unlock_irqrestore(&phba->hbalock, iflags); 1113 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 1114 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 1115 " DID:0x%x Send:%d\n", 1116 xritag, rxid, ndlp->nlp_DID, send_rrq); 1117 return -EINVAL; 1118 } 1119 1120 /** 1121 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool 1122 * @phba: Pointer to HBA context object. 1123 * @piocb: Pointer to the iocbq. 1124 * 1125 * This function is called with the ring lock held. This function 1126 * gets a new driver sglq object from the sglq list. If the 1127 * list is not empty then it is successful, it returns pointer to the newly 1128 * allocated sglq object else it returns NULL. 1129 **/ 1130 static struct lpfc_sglq * 1131 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1132 { 1133 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; 1134 struct lpfc_sglq *sglq = NULL; 1135 struct lpfc_sglq *start_sglq = NULL; 1136 struct lpfc_scsi_buf *lpfc_cmd; 1137 struct lpfc_nodelist *ndlp; 1138 int found = 0; 1139 1140 lockdep_assert_held(&phba->hbalock); 1141 1142 if (piocbq->iocb_flag & LPFC_IO_FCP) { 1143 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; 1144 ndlp = lpfc_cmd->rdata->pnode; 1145 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 1146 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { 1147 ndlp = piocbq->context_un.ndlp; 1148 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) { 1149 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK) 1150 ndlp = NULL; 1151 else 1152 ndlp = piocbq->context_un.ndlp; 1153 } else { 1154 ndlp = piocbq->context1; 1155 } 1156 1157 spin_lock(&phba->sli4_hba.sgl_list_lock); 1158 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); 1159 start_sglq = sglq; 1160 while (!found) { 1161 if (!sglq) 1162 break; 1163 if (ndlp && ndlp->active_rrqs_xri_bitmap && 1164 test_bit(sglq->sli4_lxritag, 1165 ndlp->active_rrqs_xri_bitmap)) { 1166 /* This xri has an rrq outstanding for this DID. 1167 * put it back in the list and get another xri. 1168 */ 1169 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1170 sglq = NULL; 1171 list_remove_head(lpfc_els_sgl_list, sglq, 1172 struct lpfc_sglq, list); 1173 if (sglq == start_sglq) { 1174 list_add_tail(&sglq->list, lpfc_els_sgl_list); 1175 sglq = NULL; 1176 break; 1177 } else 1178 continue; 1179 } 1180 sglq->ndlp = ndlp; 1181 found = 1; 1182 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1183 sglq->state = SGL_ALLOCATED; 1184 } 1185 spin_unlock(&phba->sli4_hba.sgl_list_lock); 1186 return sglq; 1187 } 1188 1189 /** 1190 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool 1191 * @phba: Pointer to HBA context object. 1192 * @piocb: Pointer to the iocbq. 1193 * 1194 * This function is called with the sgl_list lock held. This function 1195 * gets a new driver sglq object from the sglq list. If the 1196 * list is not empty then it is successful, it returns pointer to the newly 1197 * allocated sglq object else it returns NULL. 1198 **/ 1199 struct lpfc_sglq * 1200 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1201 { 1202 struct list_head *lpfc_nvmet_sgl_list; 1203 struct lpfc_sglq *sglq = NULL; 1204 1205 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; 1206 1207 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); 1208 1209 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); 1210 if (!sglq) 1211 return NULL; 1212 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1213 sglq->state = SGL_ALLOCATED; 1214 return sglq; 1215 } 1216 1217 /** 1218 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 1219 * @phba: Pointer to HBA context object. 1220 * 1221 * This function is called with no lock held. This function 1222 * allocates a new driver iocb object from the iocb pool. If the 1223 * allocation is successful, it returns pointer to the newly 1224 * allocated iocb object else it returns NULL. 1225 **/ 1226 struct lpfc_iocbq * 1227 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 1228 { 1229 struct lpfc_iocbq * iocbq = NULL; 1230 unsigned long iflags; 1231 1232 spin_lock_irqsave(&phba->hbalock, iflags); 1233 iocbq = __lpfc_sli_get_iocbq(phba); 1234 spin_unlock_irqrestore(&phba->hbalock, iflags); 1235 return iocbq; 1236 } 1237 1238 /** 1239 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 1240 * @phba: Pointer to HBA context object. 1241 * @iocbq: Pointer to driver iocb object. 1242 * 1243 * This function is called with hbalock held to release driver 1244 * iocb object to the iocb pool. The iotag in the iocb object 1245 * does not change for each use of the iocb object. This function 1246 * clears all other fields of the iocb object when it is freed. 1247 * The sqlq structure that holds the xritag and phys and virtual 1248 * mappings for the scatter gather list is retrieved from the 1249 * active array of sglq. The get of the sglq pointer also clears 1250 * the entry in the array. If the status of the IO indiactes that 1251 * this IO was aborted then the sglq entry it put on the 1252 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1253 * IO has good status or fails for any other reason then the sglq 1254 * entry is added to the free list (lpfc_els_sgl_list). 1255 **/ 1256 static void 1257 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1258 { 1259 struct lpfc_sglq *sglq; 1260 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1261 unsigned long iflag = 0; 1262 struct lpfc_sli_ring *pring; 1263 1264 lockdep_assert_held(&phba->hbalock); 1265 1266 if (iocbq->sli4_xritag == NO_XRI) 1267 sglq = NULL; 1268 else 1269 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1270 1271 1272 if (sglq) { 1273 if (iocbq->iocb_flag & LPFC_IO_NVMET) { 1274 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1275 iflag); 1276 sglq->state = SGL_FREED; 1277 sglq->ndlp = NULL; 1278 list_add_tail(&sglq->list, 1279 &phba->sli4_hba.lpfc_nvmet_sgl_list); 1280 spin_unlock_irqrestore( 1281 &phba->sli4_hba.sgl_list_lock, iflag); 1282 goto out; 1283 } 1284 1285 pring = phba->sli4_hba.els_wq->pring; 1286 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1287 (sglq->state != SGL_XRI_ABORTED)) { 1288 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1289 iflag); 1290 list_add(&sglq->list, 1291 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1292 spin_unlock_irqrestore( 1293 &phba->sli4_hba.sgl_list_lock, iflag); 1294 } else { 1295 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1296 iflag); 1297 sglq->state = SGL_FREED; 1298 sglq->ndlp = NULL; 1299 list_add_tail(&sglq->list, 1300 &phba->sli4_hba.lpfc_els_sgl_list); 1301 spin_unlock_irqrestore( 1302 &phba->sli4_hba.sgl_list_lock, iflag); 1303 1304 /* Check if TXQ queue needs to be serviced */ 1305 if (!list_empty(&pring->txq)) 1306 lpfc_worker_wake_up(phba); 1307 } 1308 } 1309 1310 out: 1311 /* 1312 * Clean all volatile data fields, preserve iotag and node struct. 1313 */ 1314 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1315 iocbq->sli4_lxritag = NO_XRI; 1316 iocbq->sli4_xritag = NO_XRI; 1317 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | 1318 LPFC_IO_NVME_LS); 1319 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1320 } 1321 1322 1323 /** 1324 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1325 * @phba: Pointer to HBA context object. 1326 * @iocbq: Pointer to driver iocb object. 1327 * 1328 * This function is called with hbalock held to release driver 1329 * iocb object to the iocb pool. The iotag in the iocb object 1330 * does not change for each use of the iocb object. This function 1331 * clears all other fields of the iocb object when it is freed. 1332 **/ 1333 static void 1334 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1335 { 1336 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1337 1338 lockdep_assert_held(&phba->hbalock); 1339 1340 /* 1341 * Clean all volatile data fields, preserve iotag and node struct. 1342 */ 1343 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1344 iocbq->sli4_xritag = NO_XRI; 1345 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1346 } 1347 1348 /** 1349 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1350 * @phba: Pointer to HBA context object. 1351 * @iocbq: Pointer to driver iocb object. 1352 * 1353 * This function is called with hbalock held to release driver 1354 * iocb object to the iocb pool. The iotag in the iocb object 1355 * does not change for each use of the iocb object. This function 1356 * clears all other fields of the iocb object when it is freed. 1357 **/ 1358 static void 1359 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1360 { 1361 lockdep_assert_held(&phba->hbalock); 1362 1363 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1364 phba->iocb_cnt--; 1365 } 1366 1367 /** 1368 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1369 * @phba: Pointer to HBA context object. 1370 * @iocbq: Pointer to driver iocb object. 1371 * 1372 * This function is called with no lock held to release the iocb to 1373 * iocb pool. 1374 **/ 1375 void 1376 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1377 { 1378 unsigned long iflags; 1379 1380 /* 1381 * Clean all volatile data fields, preserve iotag and node struct. 1382 */ 1383 spin_lock_irqsave(&phba->hbalock, iflags); 1384 __lpfc_sli_release_iocbq(phba, iocbq); 1385 spin_unlock_irqrestore(&phba->hbalock, iflags); 1386 } 1387 1388 /** 1389 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1390 * @phba: Pointer to HBA context object. 1391 * @iocblist: List of IOCBs. 1392 * @ulpstatus: ULP status in IOCB command field. 1393 * @ulpWord4: ULP word-4 in IOCB command field. 1394 * 1395 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1396 * on the list by invoking the complete callback function associated with the 1397 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1398 * fields. 1399 **/ 1400 void 1401 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1402 uint32_t ulpstatus, uint32_t ulpWord4) 1403 { 1404 struct lpfc_iocbq *piocb; 1405 1406 while (!list_empty(iocblist)) { 1407 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1408 if (!piocb->iocb_cmpl) 1409 lpfc_sli_release_iocbq(phba, piocb); 1410 else { 1411 piocb->iocb.ulpStatus = ulpstatus; 1412 piocb->iocb.un.ulpWord[4] = ulpWord4; 1413 (piocb->iocb_cmpl) (phba, piocb, piocb); 1414 } 1415 } 1416 return; 1417 } 1418 1419 /** 1420 * lpfc_sli_iocb_cmd_type - Get the iocb type 1421 * @iocb_cmnd: iocb command code. 1422 * 1423 * This function is called by ring event handler function to get the iocb type. 1424 * This function translates the iocb command to an iocb command type used to 1425 * decide the final disposition of each completed IOCB. 1426 * The function returns 1427 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1428 * LPFC_SOL_IOCB if it is a solicited iocb completion 1429 * LPFC_ABORT_IOCB if it is an abort iocb 1430 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1431 * 1432 * The caller is not required to hold any lock. 1433 **/ 1434 static lpfc_iocb_type 1435 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1436 { 1437 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1438 1439 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1440 return 0; 1441 1442 switch (iocb_cmnd) { 1443 case CMD_XMIT_SEQUENCE_CR: 1444 case CMD_XMIT_SEQUENCE_CX: 1445 case CMD_XMIT_BCAST_CN: 1446 case CMD_XMIT_BCAST_CX: 1447 case CMD_ELS_REQUEST_CR: 1448 case CMD_ELS_REQUEST_CX: 1449 case CMD_CREATE_XRI_CR: 1450 case CMD_CREATE_XRI_CX: 1451 case CMD_GET_RPI_CN: 1452 case CMD_XMIT_ELS_RSP_CX: 1453 case CMD_GET_RPI_CR: 1454 case CMD_FCP_IWRITE_CR: 1455 case CMD_FCP_IWRITE_CX: 1456 case CMD_FCP_IREAD_CR: 1457 case CMD_FCP_IREAD_CX: 1458 case CMD_FCP_ICMND_CR: 1459 case CMD_FCP_ICMND_CX: 1460 case CMD_FCP_TSEND_CX: 1461 case CMD_FCP_TRSP_CX: 1462 case CMD_FCP_TRECEIVE_CX: 1463 case CMD_FCP_AUTO_TRSP_CX: 1464 case CMD_ADAPTER_MSG: 1465 case CMD_ADAPTER_DUMP: 1466 case CMD_XMIT_SEQUENCE64_CR: 1467 case CMD_XMIT_SEQUENCE64_CX: 1468 case CMD_XMIT_BCAST64_CN: 1469 case CMD_XMIT_BCAST64_CX: 1470 case CMD_ELS_REQUEST64_CR: 1471 case CMD_ELS_REQUEST64_CX: 1472 case CMD_FCP_IWRITE64_CR: 1473 case CMD_FCP_IWRITE64_CX: 1474 case CMD_FCP_IREAD64_CR: 1475 case CMD_FCP_IREAD64_CX: 1476 case CMD_FCP_ICMND64_CR: 1477 case CMD_FCP_ICMND64_CX: 1478 case CMD_FCP_TSEND64_CX: 1479 case CMD_FCP_TRSP64_CX: 1480 case CMD_FCP_TRECEIVE64_CX: 1481 case CMD_GEN_REQUEST64_CR: 1482 case CMD_GEN_REQUEST64_CX: 1483 case CMD_XMIT_ELS_RSP64_CX: 1484 case DSSCMD_IWRITE64_CR: 1485 case DSSCMD_IWRITE64_CX: 1486 case DSSCMD_IREAD64_CR: 1487 case DSSCMD_IREAD64_CX: 1488 type = LPFC_SOL_IOCB; 1489 break; 1490 case CMD_ABORT_XRI_CN: 1491 case CMD_ABORT_XRI_CX: 1492 case CMD_CLOSE_XRI_CN: 1493 case CMD_CLOSE_XRI_CX: 1494 case CMD_XRI_ABORTED_CX: 1495 case CMD_ABORT_MXRI64_CN: 1496 case CMD_XMIT_BLS_RSP64_CX: 1497 type = LPFC_ABORT_IOCB; 1498 break; 1499 case CMD_RCV_SEQUENCE_CX: 1500 case CMD_RCV_ELS_REQ_CX: 1501 case CMD_RCV_SEQUENCE64_CX: 1502 case CMD_RCV_ELS_REQ64_CX: 1503 case CMD_ASYNC_STATUS: 1504 case CMD_IOCB_RCV_SEQ64_CX: 1505 case CMD_IOCB_RCV_ELS64_CX: 1506 case CMD_IOCB_RCV_CONT64_CX: 1507 case CMD_IOCB_RET_XRI64_CX: 1508 type = LPFC_UNSOL_IOCB; 1509 break; 1510 case CMD_IOCB_XMIT_MSEQ64_CR: 1511 case CMD_IOCB_XMIT_MSEQ64_CX: 1512 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1513 case CMD_IOCB_RCV_ELS_LIST64_CX: 1514 case CMD_IOCB_CLOSE_EXTENDED_CN: 1515 case CMD_IOCB_ABORT_EXTENDED_CN: 1516 case CMD_IOCB_RET_HBQE64_CN: 1517 case CMD_IOCB_FCP_IBIDIR64_CR: 1518 case CMD_IOCB_FCP_IBIDIR64_CX: 1519 case CMD_IOCB_FCP_ITASKMGT64_CX: 1520 case CMD_IOCB_LOGENTRY_CN: 1521 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1522 printk("%s - Unhandled SLI-3 Command x%x\n", 1523 __func__, iocb_cmnd); 1524 type = LPFC_UNKNOWN_IOCB; 1525 break; 1526 default: 1527 type = LPFC_UNKNOWN_IOCB; 1528 break; 1529 } 1530 1531 return type; 1532 } 1533 1534 /** 1535 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1536 * @phba: Pointer to HBA context object. 1537 * 1538 * This function is called from SLI initialization code 1539 * to configure every ring of the HBA's SLI interface. The 1540 * caller is not required to hold any lock. This function issues 1541 * a config_ring mailbox command for each ring. 1542 * This function returns zero if successful else returns a negative 1543 * error code. 1544 **/ 1545 static int 1546 lpfc_sli_ring_map(struct lpfc_hba *phba) 1547 { 1548 struct lpfc_sli *psli = &phba->sli; 1549 LPFC_MBOXQ_t *pmb; 1550 MAILBOX_t *pmbox; 1551 int i, rc, ret = 0; 1552 1553 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1554 if (!pmb) 1555 return -ENOMEM; 1556 pmbox = &pmb->u.mb; 1557 phba->link_state = LPFC_INIT_MBX_CMDS; 1558 for (i = 0; i < psli->num_rings; i++) { 1559 lpfc_config_ring(phba, i, pmb); 1560 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1561 if (rc != MBX_SUCCESS) { 1562 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1563 "0446 Adapter failed to init (%d), " 1564 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1565 "ring %d\n", 1566 rc, pmbox->mbxCommand, 1567 pmbox->mbxStatus, i); 1568 phba->link_state = LPFC_HBA_ERROR; 1569 ret = -ENXIO; 1570 break; 1571 } 1572 } 1573 mempool_free(pmb, phba->mbox_mem_pool); 1574 return ret; 1575 } 1576 1577 /** 1578 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1579 * @phba: Pointer to HBA context object. 1580 * @pring: Pointer to driver SLI ring object. 1581 * @piocb: Pointer to the driver iocb object. 1582 * 1583 * This function is called with hbalock held. The function adds the 1584 * new iocb to txcmplq of the given ring. This function always returns 1585 * 0. If this function is called for ELS ring, this function checks if 1586 * there is a vport associated with the ELS command. This function also 1587 * starts els_tmofunc timer if this is an ELS command. 1588 **/ 1589 static int 1590 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1591 struct lpfc_iocbq *piocb) 1592 { 1593 lockdep_assert_held(&phba->hbalock); 1594 1595 BUG_ON(!piocb); 1596 1597 list_add_tail(&piocb->list, &pring->txcmplq); 1598 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1599 1600 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1601 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1602 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1603 BUG_ON(!piocb->vport); 1604 if (!(piocb->vport->load_flag & FC_UNLOADING)) 1605 mod_timer(&piocb->vport->els_tmofunc, 1606 jiffies + 1607 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1608 } 1609 1610 return 0; 1611 } 1612 1613 /** 1614 * lpfc_sli_ringtx_get - Get first element of the txq 1615 * @phba: Pointer to HBA context object. 1616 * @pring: Pointer to driver SLI ring object. 1617 * 1618 * This function is called with hbalock held to get next 1619 * iocb in txq of the given ring. If there is any iocb in 1620 * the txq, the function returns first iocb in the list after 1621 * removing the iocb from the list, else it returns NULL. 1622 **/ 1623 struct lpfc_iocbq * 1624 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1625 { 1626 struct lpfc_iocbq *cmd_iocb; 1627 1628 lockdep_assert_held(&phba->hbalock); 1629 1630 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1631 return cmd_iocb; 1632 } 1633 1634 /** 1635 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1636 * @phba: Pointer to HBA context object. 1637 * @pring: Pointer to driver SLI ring object. 1638 * 1639 * This function is called with hbalock held and the caller must post the 1640 * iocb without releasing the lock. If the caller releases the lock, 1641 * iocb slot returned by the function is not guaranteed to be available. 1642 * The function returns pointer to the next available iocb slot if there 1643 * is available slot in the ring, else it returns NULL. 1644 * If the get index of the ring is ahead of the put index, the function 1645 * will post an error attention event to the worker thread to take the 1646 * HBA to offline state. 1647 **/ 1648 static IOCB_t * 1649 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1650 { 1651 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1652 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 1653 1654 lockdep_assert_held(&phba->hbalock); 1655 1656 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 1657 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 1658 pring->sli.sli3.next_cmdidx = 0; 1659 1660 if (unlikely(pring->sli.sli3.local_getidx == 1661 pring->sli.sli3.next_cmdidx)) { 1662 1663 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1664 1665 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1666 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1667 "0315 Ring %d issue: portCmdGet %d " 1668 "is bigger than cmd ring %d\n", 1669 pring->ringno, 1670 pring->sli.sli3.local_getidx, 1671 max_cmd_idx); 1672 1673 phba->link_state = LPFC_HBA_ERROR; 1674 /* 1675 * All error attention handlers are posted to 1676 * worker thread 1677 */ 1678 phba->work_ha |= HA_ERATT; 1679 phba->work_hs = HS_FFER3; 1680 1681 lpfc_worker_wake_up(phba); 1682 1683 return NULL; 1684 } 1685 1686 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 1687 return NULL; 1688 } 1689 1690 return lpfc_cmd_iocb(phba, pring); 1691 } 1692 1693 /** 1694 * lpfc_sli_next_iotag - Get an iotag for the iocb 1695 * @phba: Pointer to HBA context object. 1696 * @iocbq: Pointer to driver iocb object. 1697 * 1698 * This function gets an iotag for the iocb. If there is no unused iotag and 1699 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1700 * array and assigns a new iotag. 1701 * The function returns the allocated iotag if successful, else returns zero. 1702 * Zero is not a valid iotag. 1703 * The caller is not required to hold any lock. 1704 **/ 1705 uint16_t 1706 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1707 { 1708 struct lpfc_iocbq **new_arr; 1709 struct lpfc_iocbq **old_arr; 1710 size_t new_len; 1711 struct lpfc_sli *psli = &phba->sli; 1712 uint16_t iotag; 1713 1714 spin_lock_irq(&phba->hbalock); 1715 iotag = psli->last_iotag; 1716 if(++iotag < psli->iocbq_lookup_len) { 1717 psli->last_iotag = iotag; 1718 psli->iocbq_lookup[iotag] = iocbq; 1719 spin_unlock_irq(&phba->hbalock); 1720 iocbq->iotag = iotag; 1721 return iotag; 1722 } else if (psli->iocbq_lookup_len < (0xffff 1723 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1724 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1725 spin_unlock_irq(&phba->hbalock); 1726 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *), 1727 GFP_KERNEL); 1728 if (new_arr) { 1729 spin_lock_irq(&phba->hbalock); 1730 old_arr = psli->iocbq_lookup; 1731 if (new_len <= psli->iocbq_lookup_len) { 1732 /* highly unprobable case */ 1733 kfree(new_arr); 1734 iotag = psli->last_iotag; 1735 if(++iotag < psli->iocbq_lookup_len) { 1736 psli->last_iotag = iotag; 1737 psli->iocbq_lookup[iotag] = iocbq; 1738 spin_unlock_irq(&phba->hbalock); 1739 iocbq->iotag = iotag; 1740 return iotag; 1741 } 1742 spin_unlock_irq(&phba->hbalock); 1743 return 0; 1744 } 1745 if (psli->iocbq_lookup) 1746 memcpy(new_arr, old_arr, 1747 ((psli->last_iotag + 1) * 1748 sizeof (struct lpfc_iocbq *))); 1749 psli->iocbq_lookup = new_arr; 1750 psli->iocbq_lookup_len = new_len; 1751 psli->last_iotag = iotag; 1752 psli->iocbq_lookup[iotag] = iocbq; 1753 spin_unlock_irq(&phba->hbalock); 1754 iocbq->iotag = iotag; 1755 kfree(old_arr); 1756 return iotag; 1757 } 1758 } else 1759 spin_unlock_irq(&phba->hbalock); 1760 1761 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1762 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1763 psli->last_iotag); 1764 1765 return 0; 1766 } 1767 1768 /** 1769 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1770 * @phba: Pointer to HBA context object. 1771 * @pring: Pointer to driver SLI ring object. 1772 * @iocb: Pointer to iocb slot in the ring. 1773 * @nextiocb: Pointer to driver iocb object which need to be 1774 * posted to firmware. 1775 * 1776 * This function is called with hbalock held to post a new iocb to 1777 * the firmware. This function copies the new iocb to ring iocb slot and 1778 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1779 * a completion call back for this iocb else the function will free the 1780 * iocb object. 1781 **/ 1782 static void 1783 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1784 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1785 { 1786 lockdep_assert_held(&phba->hbalock); 1787 /* 1788 * Set up an iotag 1789 */ 1790 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1791 1792 1793 if (pring->ringno == LPFC_ELS_RING) { 1794 lpfc_debugfs_slow_ring_trc(phba, 1795 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1796 *(((uint32_t *) &nextiocb->iocb) + 4), 1797 *(((uint32_t *) &nextiocb->iocb) + 6), 1798 *(((uint32_t *) &nextiocb->iocb) + 7)); 1799 } 1800 1801 /* 1802 * Issue iocb command to adapter 1803 */ 1804 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1805 wmb(); 1806 pring->stats.iocb_cmd++; 1807 1808 /* 1809 * If there is no completion routine to call, we can release the 1810 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1811 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1812 */ 1813 if (nextiocb->iocb_cmpl) 1814 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1815 else 1816 __lpfc_sli_release_iocbq(phba, nextiocb); 1817 1818 /* 1819 * Let the HBA know what IOCB slot will be the next one the 1820 * driver will put a command into. 1821 */ 1822 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 1823 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1824 } 1825 1826 /** 1827 * lpfc_sli_update_full_ring - Update the chip attention register 1828 * @phba: Pointer to HBA context object. 1829 * @pring: Pointer to driver SLI ring object. 1830 * 1831 * The caller is not required to hold any lock for calling this function. 1832 * This function updates the chip attention bits for the ring to inform firmware 1833 * that there are pending work to be done for this ring and requests an 1834 * interrupt when there is space available in the ring. This function is 1835 * called when the driver is unable to post more iocbs to the ring due 1836 * to unavailability of space in the ring. 1837 **/ 1838 static void 1839 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1840 { 1841 int ringno = pring->ringno; 1842 1843 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1844 1845 wmb(); 1846 1847 /* 1848 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1849 * The HBA will tell us when an IOCB entry is available. 1850 */ 1851 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1852 readl(phba->CAregaddr); /* flush */ 1853 1854 pring->stats.iocb_cmd_full++; 1855 } 1856 1857 /** 1858 * lpfc_sli_update_ring - Update chip attention register 1859 * @phba: Pointer to HBA context object. 1860 * @pring: Pointer to driver SLI ring object. 1861 * 1862 * This function updates the chip attention register bit for the 1863 * given ring to inform HBA that there is more work to be done 1864 * in this ring. The caller is not required to hold any lock. 1865 **/ 1866 static void 1867 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1868 { 1869 int ringno = pring->ringno; 1870 1871 /* 1872 * Tell the HBA that there is work to do in this ring. 1873 */ 1874 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1875 wmb(); 1876 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1877 readl(phba->CAregaddr); /* flush */ 1878 } 1879 } 1880 1881 /** 1882 * lpfc_sli_resume_iocb - Process iocbs in the txq 1883 * @phba: Pointer to HBA context object. 1884 * @pring: Pointer to driver SLI ring object. 1885 * 1886 * This function is called with hbalock held to post pending iocbs 1887 * in the txq to the firmware. This function is called when driver 1888 * detects space available in the ring. 1889 **/ 1890 static void 1891 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1892 { 1893 IOCB_t *iocb; 1894 struct lpfc_iocbq *nextiocb; 1895 1896 lockdep_assert_held(&phba->hbalock); 1897 1898 /* 1899 * Check to see if: 1900 * (a) there is anything on the txq to send 1901 * (b) link is up 1902 * (c) link attention events can be processed (fcp ring only) 1903 * (d) IOCB processing is not blocked by the outstanding mbox command. 1904 */ 1905 1906 if (lpfc_is_link_up(phba) && 1907 (!list_empty(&pring->txq)) && 1908 (pring->ringno != LPFC_FCP_RING || 1909 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1910 1911 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1912 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1913 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1914 1915 if (iocb) 1916 lpfc_sli_update_ring(phba, pring); 1917 else 1918 lpfc_sli_update_full_ring(phba, pring); 1919 } 1920 1921 return; 1922 } 1923 1924 /** 1925 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1926 * @phba: Pointer to HBA context object. 1927 * @hbqno: HBQ number. 1928 * 1929 * This function is called with hbalock held to get the next 1930 * available slot for the given HBQ. If there is free slot 1931 * available for the HBQ it will return pointer to the next available 1932 * HBQ entry else it will return NULL. 1933 **/ 1934 static struct lpfc_hbq_entry * 1935 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1936 { 1937 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1938 1939 lockdep_assert_held(&phba->hbalock); 1940 1941 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1942 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1943 hbqp->next_hbqPutIdx = 0; 1944 1945 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1946 uint32_t raw_index = phba->hbq_get[hbqno]; 1947 uint32_t getidx = le32_to_cpu(raw_index); 1948 1949 hbqp->local_hbqGetIdx = getidx; 1950 1951 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1952 lpfc_printf_log(phba, KERN_ERR, 1953 LOG_SLI | LOG_VPORT, 1954 "1802 HBQ %d: local_hbqGetIdx " 1955 "%u is > than hbqp->entry_count %u\n", 1956 hbqno, hbqp->local_hbqGetIdx, 1957 hbqp->entry_count); 1958 1959 phba->link_state = LPFC_HBA_ERROR; 1960 return NULL; 1961 } 1962 1963 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1964 return NULL; 1965 } 1966 1967 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1968 hbqp->hbqPutIdx; 1969 } 1970 1971 /** 1972 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1973 * @phba: Pointer to HBA context object. 1974 * 1975 * This function is called with no lock held to free all the 1976 * hbq buffers while uninitializing the SLI interface. It also 1977 * frees the HBQ buffers returned by the firmware but not yet 1978 * processed by the upper layers. 1979 **/ 1980 void 1981 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1982 { 1983 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1984 struct hbq_dmabuf *hbq_buf; 1985 unsigned long flags; 1986 int i, hbq_count; 1987 1988 hbq_count = lpfc_sli_hbq_count(); 1989 /* Return all memory used by all HBQs */ 1990 spin_lock_irqsave(&phba->hbalock, flags); 1991 for (i = 0; i < hbq_count; ++i) { 1992 list_for_each_entry_safe(dmabuf, next_dmabuf, 1993 &phba->hbqs[i].hbq_buffer_list, list) { 1994 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1995 list_del(&hbq_buf->dbuf.list); 1996 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1997 } 1998 phba->hbqs[i].buffer_count = 0; 1999 } 2000 2001 /* Mark the HBQs not in use */ 2002 phba->hbq_in_use = 0; 2003 spin_unlock_irqrestore(&phba->hbalock, flags); 2004 } 2005 2006 /** 2007 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 2008 * @phba: Pointer to HBA context object. 2009 * @hbqno: HBQ number. 2010 * @hbq_buf: Pointer to HBQ buffer. 2011 * 2012 * This function is called with the hbalock held to post a 2013 * hbq buffer to the firmware. If the function finds an empty 2014 * slot in the HBQ, it will post the buffer. The function will return 2015 * pointer to the hbq entry if it successfully post the buffer 2016 * else it will return NULL. 2017 **/ 2018 static int 2019 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 2020 struct hbq_dmabuf *hbq_buf) 2021 { 2022 lockdep_assert_held(&phba->hbalock); 2023 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 2024 } 2025 2026 /** 2027 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 2028 * @phba: Pointer to HBA context object. 2029 * @hbqno: HBQ number. 2030 * @hbq_buf: Pointer to HBQ buffer. 2031 * 2032 * This function is called with the hbalock held to post a hbq buffer to the 2033 * firmware. If the function finds an empty slot in the HBQ, it will post the 2034 * buffer and place it on the hbq_buffer_list. The function will return zero if 2035 * it successfully post the buffer else it will return an error. 2036 **/ 2037 static int 2038 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 2039 struct hbq_dmabuf *hbq_buf) 2040 { 2041 struct lpfc_hbq_entry *hbqe; 2042 dma_addr_t physaddr = hbq_buf->dbuf.phys; 2043 2044 lockdep_assert_held(&phba->hbalock); 2045 /* Get next HBQ entry slot to use */ 2046 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 2047 if (hbqe) { 2048 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 2049 2050 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 2051 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 2052 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; 2053 hbqe->bde.tus.f.bdeFlags = 0; 2054 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 2055 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 2056 /* Sync SLIM */ 2057 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 2058 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 2059 /* flush */ 2060 readl(phba->hbq_put + hbqno); 2061 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 2062 return 0; 2063 } else 2064 return -ENOMEM; 2065 } 2066 2067 /** 2068 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 2069 * @phba: Pointer to HBA context object. 2070 * @hbqno: HBQ number. 2071 * @hbq_buf: Pointer to HBQ buffer. 2072 * 2073 * This function is called with the hbalock held to post an RQE to the SLI4 2074 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 2075 * the hbq_buffer_list and return zero, otherwise it will return an error. 2076 **/ 2077 static int 2078 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 2079 struct hbq_dmabuf *hbq_buf) 2080 { 2081 int rc; 2082 struct lpfc_rqe hrqe; 2083 struct lpfc_rqe drqe; 2084 struct lpfc_queue *hrq; 2085 struct lpfc_queue *drq; 2086 2087 if (hbqno != LPFC_ELS_HBQ) 2088 return 1; 2089 hrq = phba->sli4_hba.hdr_rq; 2090 drq = phba->sli4_hba.dat_rq; 2091 2092 lockdep_assert_held(&phba->hbalock); 2093 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 2094 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 2095 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 2096 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 2097 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 2098 if (rc < 0) 2099 return rc; 2100 hbq_buf->tag = (rc | (hbqno << 16)); 2101 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 2102 return 0; 2103 } 2104 2105 /* HBQ for ELS and CT traffic. */ 2106 static struct lpfc_hbq_init lpfc_els_hbq = { 2107 .rn = 1, 2108 .entry_count = 256, 2109 .mask_count = 0, 2110 .profile = 0, 2111 .ring_mask = (1 << LPFC_ELS_RING), 2112 .buffer_count = 0, 2113 .init_count = 40, 2114 .add_count = 40, 2115 }; 2116 2117 /* Array of HBQs */ 2118 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 2119 &lpfc_els_hbq, 2120 }; 2121 2122 /** 2123 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 2124 * @phba: Pointer to HBA context object. 2125 * @hbqno: HBQ number. 2126 * @count: Number of HBQ buffers to be posted. 2127 * 2128 * This function is called with no lock held to post more hbq buffers to the 2129 * given HBQ. The function returns the number of HBQ buffers successfully 2130 * posted. 2131 **/ 2132 static int 2133 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 2134 { 2135 uint32_t i, posted = 0; 2136 unsigned long flags; 2137 struct hbq_dmabuf *hbq_buffer; 2138 LIST_HEAD(hbq_buf_list); 2139 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 2140 return 0; 2141 2142 if ((phba->hbqs[hbqno].buffer_count + count) > 2143 lpfc_hbq_defs[hbqno]->entry_count) 2144 count = lpfc_hbq_defs[hbqno]->entry_count - 2145 phba->hbqs[hbqno].buffer_count; 2146 if (!count) 2147 return 0; 2148 /* Allocate HBQ entries */ 2149 for (i = 0; i < count; i++) { 2150 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 2151 if (!hbq_buffer) 2152 break; 2153 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 2154 } 2155 /* Check whether HBQ is still in use */ 2156 spin_lock_irqsave(&phba->hbalock, flags); 2157 if (!phba->hbq_in_use) 2158 goto err; 2159 while (!list_empty(&hbq_buf_list)) { 2160 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2161 dbuf.list); 2162 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 2163 (hbqno << 16)); 2164 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 2165 phba->hbqs[hbqno].buffer_count++; 2166 posted++; 2167 } else 2168 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2169 } 2170 spin_unlock_irqrestore(&phba->hbalock, flags); 2171 return posted; 2172 err: 2173 spin_unlock_irqrestore(&phba->hbalock, flags); 2174 while (!list_empty(&hbq_buf_list)) { 2175 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 2176 dbuf.list); 2177 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2178 } 2179 return 0; 2180 } 2181 2182 /** 2183 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 2184 * @phba: Pointer to HBA context object. 2185 * @qno: HBQ number. 2186 * 2187 * This function posts more buffers to the HBQ. This function 2188 * is called with no lock held. The function returns the number of HBQ entries 2189 * successfully allocated. 2190 **/ 2191 int 2192 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 2193 { 2194 if (phba->sli_rev == LPFC_SLI_REV4) 2195 return 0; 2196 else 2197 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2198 lpfc_hbq_defs[qno]->add_count); 2199 } 2200 2201 /** 2202 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 2203 * @phba: Pointer to HBA context object. 2204 * @qno: HBQ queue number. 2205 * 2206 * This function is called from SLI initialization code path with 2207 * no lock held to post initial HBQ buffers to firmware. The 2208 * function returns the number of HBQ entries successfully allocated. 2209 **/ 2210 static int 2211 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 2212 { 2213 if (phba->sli_rev == LPFC_SLI_REV4) 2214 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2215 lpfc_hbq_defs[qno]->entry_count); 2216 else 2217 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2218 lpfc_hbq_defs[qno]->init_count); 2219 } 2220 2221 /** 2222 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 2223 * @phba: Pointer to HBA context object. 2224 * @hbqno: HBQ number. 2225 * 2226 * This function removes the first hbq buffer on an hbq list and returns a 2227 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2228 **/ 2229 static struct hbq_dmabuf * 2230 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 2231 { 2232 struct lpfc_dmabuf *d_buf; 2233 2234 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 2235 if (!d_buf) 2236 return NULL; 2237 return container_of(d_buf, struct hbq_dmabuf, dbuf); 2238 } 2239 2240 /** 2241 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list 2242 * @phba: Pointer to HBA context object. 2243 * @hbqno: HBQ number. 2244 * 2245 * This function removes the first RQ buffer on an RQ buffer list and returns a 2246 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2247 **/ 2248 static struct rqb_dmabuf * 2249 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) 2250 { 2251 struct lpfc_dmabuf *h_buf; 2252 struct lpfc_rqb *rqbp; 2253 2254 rqbp = hrq->rqbp; 2255 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 2256 struct lpfc_dmabuf, list); 2257 if (!h_buf) 2258 return NULL; 2259 rqbp->buffer_count--; 2260 return container_of(h_buf, struct rqb_dmabuf, hbuf); 2261 } 2262 2263 /** 2264 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 2265 * @phba: Pointer to HBA context object. 2266 * @tag: Tag of the hbq buffer. 2267 * 2268 * This function searches for the hbq buffer associated with the given tag in 2269 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer 2270 * otherwise it returns NULL. 2271 **/ 2272 static struct hbq_dmabuf * 2273 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 2274 { 2275 struct lpfc_dmabuf *d_buf; 2276 struct hbq_dmabuf *hbq_buf; 2277 uint32_t hbqno; 2278 2279 hbqno = tag >> 16; 2280 if (hbqno >= LPFC_MAX_HBQS) 2281 return NULL; 2282 2283 spin_lock_irq(&phba->hbalock); 2284 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2285 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2286 if (hbq_buf->tag == tag) { 2287 spin_unlock_irq(&phba->hbalock); 2288 return hbq_buf; 2289 } 2290 } 2291 spin_unlock_irq(&phba->hbalock); 2292 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2293 "1803 Bad hbq tag. Data: x%x x%x\n", 2294 tag, phba->hbqs[tag >> 16].buffer_count); 2295 return NULL; 2296 } 2297 2298 /** 2299 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2300 * @phba: Pointer to HBA context object. 2301 * @hbq_buffer: Pointer to HBQ buffer. 2302 * 2303 * This function is called with hbalock. This function gives back 2304 * the hbq buffer to firmware. If the HBQ does not have space to 2305 * post the buffer, it will free the buffer. 2306 **/ 2307 void 2308 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2309 { 2310 uint32_t hbqno; 2311 2312 if (hbq_buffer) { 2313 hbqno = hbq_buffer->tag >> 16; 2314 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2315 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2316 } 2317 } 2318 2319 /** 2320 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2321 * @mbxCommand: mailbox command code. 2322 * 2323 * This function is called by the mailbox event handler function to verify 2324 * that the completed mailbox command is a legitimate mailbox command. If the 2325 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2326 * and the mailbox event handler will take the HBA offline. 2327 **/ 2328 static int 2329 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2330 { 2331 uint8_t ret; 2332 2333 switch (mbxCommand) { 2334 case MBX_LOAD_SM: 2335 case MBX_READ_NV: 2336 case MBX_WRITE_NV: 2337 case MBX_WRITE_VPARMS: 2338 case MBX_RUN_BIU_DIAG: 2339 case MBX_INIT_LINK: 2340 case MBX_DOWN_LINK: 2341 case MBX_CONFIG_LINK: 2342 case MBX_CONFIG_RING: 2343 case MBX_RESET_RING: 2344 case MBX_READ_CONFIG: 2345 case MBX_READ_RCONFIG: 2346 case MBX_READ_SPARM: 2347 case MBX_READ_STATUS: 2348 case MBX_READ_RPI: 2349 case MBX_READ_XRI: 2350 case MBX_READ_REV: 2351 case MBX_READ_LNK_STAT: 2352 case MBX_REG_LOGIN: 2353 case MBX_UNREG_LOGIN: 2354 case MBX_CLEAR_LA: 2355 case MBX_DUMP_MEMORY: 2356 case MBX_DUMP_CONTEXT: 2357 case MBX_RUN_DIAGS: 2358 case MBX_RESTART: 2359 case MBX_UPDATE_CFG: 2360 case MBX_DOWN_LOAD: 2361 case MBX_DEL_LD_ENTRY: 2362 case MBX_RUN_PROGRAM: 2363 case MBX_SET_MASK: 2364 case MBX_SET_VARIABLE: 2365 case MBX_UNREG_D_ID: 2366 case MBX_KILL_BOARD: 2367 case MBX_CONFIG_FARP: 2368 case MBX_BEACON: 2369 case MBX_LOAD_AREA: 2370 case MBX_RUN_BIU_DIAG64: 2371 case MBX_CONFIG_PORT: 2372 case MBX_READ_SPARM64: 2373 case MBX_READ_RPI64: 2374 case MBX_REG_LOGIN64: 2375 case MBX_READ_TOPOLOGY: 2376 case MBX_WRITE_WWN: 2377 case MBX_SET_DEBUG: 2378 case MBX_LOAD_EXP_ROM: 2379 case MBX_ASYNCEVT_ENABLE: 2380 case MBX_REG_VPI: 2381 case MBX_UNREG_VPI: 2382 case MBX_HEARTBEAT: 2383 case MBX_PORT_CAPABILITIES: 2384 case MBX_PORT_IOV_CONTROL: 2385 case MBX_SLI4_CONFIG: 2386 case MBX_SLI4_REQ_FTRS: 2387 case MBX_REG_FCFI: 2388 case MBX_UNREG_FCFI: 2389 case MBX_REG_VFI: 2390 case MBX_UNREG_VFI: 2391 case MBX_INIT_VPI: 2392 case MBX_INIT_VFI: 2393 case MBX_RESUME_RPI: 2394 case MBX_READ_EVENT_LOG_STATUS: 2395 case MBX_READ_EVENT_LOG: 2396 case MBX_SECURITY_MGMT: 2397 case MBX_AUTH_PORT: 2398 case MBX_ACCESS_VDATA: 2399 ret = mbxCommand; 2400 break; 2401 default: 2402 ret = MBX_SHUTDOWN; 2403 break; 2404 } 2405 return ret; 2406 } 2407 2408 /** 2409 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2410 * @phba: Pointer to HBA context object. 2411 * @pmboxq: Pointer to mailbox command. 2412 * 2413 * This is completion handler function for mailbox commands issued from 2414 * lpfc_sli_issue_mbox_wait function. This function is called by the 2415 * mailbox event handler function with no lock held. This function 2416 * will wake up thread waiting on the wait queue pointed by context1 2417 * of the mailbox. 2418 **/ 2419 void 2420 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2421 { 2422 unsigned long drvr_flag; 2423 struct completion *pmbox_done; 2424 2425 /* 2426 * If pmbox_done is empty, the driver thread gave up waiting and 2427 * continued running. 2428 */ 2429 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2430 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2431 pmbox_done = (struct completion *)pmboxq->context3; 2432 if (pmbox_done) 2433 complete(pmbox_done); 2434 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2435 return; 2436 } 2437 2438 2439 /** 2440 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2441 * @phba: Pointer to HBA context object. 2442 * @pmb: Pointer to mailbox object. 2443 * 2444 * This function is the default mailbox completion handler. It 2445 * frees the memory resources associated with the completed mailbox 2446 * command. If the completed command is a REG_LOGIN mailbox command, 2447 * this function will issue a UREG_LOGIN to re-claim the RPI. 2448 **/ 2449 void 2450 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2451 { 2452 struct lpfc_vport *vport = pmb->vport; 2453 struct lpfc_dmabuf *mp; 2454 struct lpfc_nodelist *ndlp; 2455 struct Scsi_Host *shost; 2456 uint16_t rpi, vpi; 2457 int rc; 2458 2459 mp = (struct lpfc_dmabuf *) (pmb->context1); 2460 2461 if (mp) { 2462 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2463 kfree(mp); 2464 } 2465 2466 /* 2467 * If a REG_LOGIN succeeded after node is destroyed or node 2468 * is in re-discovery driver need to cleanup the RPI. 2469 */ 2470 if (!(phba->pport->load_flag & FC_UNLOADING) && 2471 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2472 !pmb->u.mb.mbxStatus) { 2473 rpi = pmb->u.mb.un.varWords[0]; 2474 vpi = pmb->u.mb.un.varRegLogin.vpi; 2475 lpfc_unreg_login(phba, vpi, rpi, pmb); 2476 pmb->vport = vport; 2477 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2478 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2479 if (rc != MBX_NOT_FINISHED) 2480 return; 2481 } 2482 2483 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2484 !(phba->pport->load_flag & FC_UNLOADING) && 2485 !pmb->u.mb.mbxStatus) { 2486 shost = lpfc_shost_from_vport(vport); 2487 spin_lock_irq(shost->host_lock); 2488 vport->vpi_state |= LPFC_VPI_REGISTERED; 2489 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2490 spin_unlock_irq(shost->host_lock); 2491 } 2492 2493 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2494 ndlp = (struct lpfc_nodelist *)pmb->context2; 2495 lpfc_nlp_put(ndlp); 2496 pmb->context2 = NULL; 2497 } 2498 2499 /* Check security permission status on INIT_LINK mailbox command */ 2500 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2501 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2502 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2503 "2860 SLI authentication is required " 2504 "for INIT_LINK but has not done yet\n"); 2505 2506 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2507 lpfc_sli4_mbox_cmd_free(phba, pmb); 2508 else 2509 mempool_free(pmb, phba->mbox_mem_pool); 2510 } 2511 /** 2512 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler 2513 * @phba: Pointer to HBA context object. 2514 * @pmb: Pointer to mailbox object. 2515 * 2516 * This function is the unreg rpi mailbox completion handler. It 2517 * frees the memory resources associated with the completed mailbox 2518 * command. An additional refrenece is put on the ndlp to prevent 2519 * lpfc_nlp_release from freeing the rpi bit in the bitmask before 2520 * the unreg mailbox command completes, this routine puts the 2521 * reference back. 2522 * 2523 **/ 2524 void 2525 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2526 { 2527 struct lpfc_vport *vport = pmb->vport; 2528 struct lpfc_nodelist *ndlp; 2529 2530 ndlp = pmb->context1; 2531 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2532 if (phba->sli_rev == LPFC_SLI_REV4 && 2533 (bf_get(lpfc_sli_intf_if_type, 2534 &phba->sli4_hba.sli_intf) >= 2535 LPFC_SLI_INTF_IF_TYPE_2)) { 2536 if (ndlp) { 2537 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 2538 "0010 UNREG_LOGIN vpi:%x " 2539 "rpi:%x DID:%x map:%x %p\n", 2540 vport->vpi, ndlp->nlp_rpi, 2541 ndlp->nlp_DID, 2542 ndlp->nlp_usg_map, ndlp); 2543 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2544 lpfc_nlp_put(ndlp); 2545 } 2546 } 2547 } 2548 2549 mempool_free(pmb, phba->mbox_mem_pool); 2550 } 2551 2552 /** 2553 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2554 * @phba: Pointer to HBA context object. 2555 * 2556 * This function is called with no lock held. This function processes all 2557 * the completed mailbox commands and gives it to upper layers. The interrupt 2558 * service routine processes mailbox completion interrupt and adds completed 2559 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2560 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2561 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2562 * function returns the mailbox commands to the upper layer by calling the 2563 * completion handler function of each mailbox. 2564 **/ 2565 int 2566 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2567 { 2568 MAILBOX_t *pmbox; 2569 LPFC_MBOXQ_t *pmb; 2570 int rc; 2571 LIST_HEAD(cmplq); 2572 2573 phba->sli.slistat.mbox_event++; 2574 2575 /* Get all completed mailboxe buffers into the cmplq */ 2576 spin_lock_irq(&phba->hbalock); 2577 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2578 spin_unlock_irq(&phba->hbalock); 2579 2580 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2581 do { 2582 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2583 if (pmb == NULL) 2584 break; 2585 2586 pmbox = &pmb->u.mb; 2587 2588 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2589 if (pmb->vport) { 2590 lpfc_debugfs_disc_trc(pmb->vport, 2591 LPFC_DISC_TRC_MBOX_VPORT, 2592 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2593 (uint32_t)pmbox->mbxCommand, 2594 pmbox->un.varWords[0], 2595 pmbox->un.varWords[1]); 2596 } 2597 else { 2598 lpfc_debugfs_disc_trc(phba->pport, 2599 LPFC_DISC_TRC_MBOX, 2600 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2601 (uint32_t)pmbox->mbxCommand, 2602 pmbox->un.varWords[0], 2603 pmbox->un.varWords[1]); 2604 } 2605 } 2606 2607 /* 2608 * It is a fatal error if unknown mbox command completion. 2609 */ 2610 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2611 MBX_SHUTDOWN) { 2612 /* Unknown mailbox command compl */ 2613 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2614 "(%d):0323 Unknown Mailbox command " 2615 "x%x (x%x/x%x) Cmpl\n", 2616 pmb->vport ? pmb->vport->vpi : 0, 2617 pmbox->mbxCommand, 2618 lpfc_sli_config_mbox_subsys_get(phba, 2619 pmb), 2620 lpfc_sli_config_mbox_opcode_get(phba, 2621 pmb)); 2622 phba->link_state = LPFC_HBA_ERROR; 2623 phba->work_hs = HS_FFER3; 2624 lpfc_handle_eratt(phba); 2625 continue; 2626 } 2627 2628 if (pmbox->mbxStatus) { 2629 phba->sli.slistat.mbox_stat_err++; 2630 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2631 /* Mbox cmd cmpl error - RETRYing */ 2632 lpfc_printf_log(phba, KERN_INFO, 2633 LOG_MBOX | LOG_SLI, 2634 "(%d):0305 Mbox cmd cmpl " 2635 "error - RETRYing Data: x%x " 2636 "(x%x/x%x) x%x x%x x%x\n", 2637 pmb->vport ? pmb->vport->vpi : 0, 2638 pmbox->mbxCommand, 2639 lpfc_sli_config_mbox_subsys_get(phba, 2640 pmb), 2641 lpfc_sli_config_mbox_opcode_get(phba, 2642 pmb), 2643 pmbox->mbxStatus, 2644 pmbox->un.varWords[0], 2645 pmb->vport->port_state); 2646 pmbox->mbxStatus = 0; 2647 pmbox->mbxOwner = OWN_HOST; 2648 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2649 if (rc != MBX_NOT_FINISHED) 2650 continue; 2651 } 2652 } 2653 2654 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2655 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2656 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2657 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2658 "x%x x%x x%x\n", 2659 pmb->vport ? pmb->vport->vpi : 0, 2660 pmbox->mbxCommand, 2661 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2662 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2663 pmb->mbox_cmpl, 2664 *((uint32_t *) pmbox), 2665 pmbox->un.varWords[0], 2666 pmbox->un.varWords[1], 2667 pmbox->un.varWords[2], 2668 pmbox->un.varWords[3], 2669 pmbox->un.varWords[4], 2670 pmbox->un.varWords[5], 2671 pmbox->un.varWords[6], 2672 pmbox->un.varWords[7], 2673 pmbox->un.varWords[8], 2674 pmbox->un.varWords[9], 2675 pmbox->un.varWords[10]); 2676 2677 if (pmb->mbox_cmpl) 2678 pmb->mbox_cmpl(phba,pmb); 2679 } while (1); 2680 return 0; 2681 } 2682 2683 /** 2684 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2685 * @phba: Pointer to HBA context object. 2686 * @pring: Pointer to driver SLI ring object. 2687 * @tag: buffer tag. 2688 * 2689 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2690 * is set in the tag the buffer is posted for a particular exchange, 2691 * the function will return the buffer without replacing the buffer. 2692 * If the buffer is for unsolicited ELS or CT traffic, this function 2693 * returns the buffer and also posts another buffer to the firmware. 2694 **/ 2695 static struct lpfc_dmabuf * 2696 lpfc_sli_get_buff(struct lpfc_hba *phba, 2697 struct lpfc_sli_ring *pring, 2698 uint32_t tag) 2699 { 2700 struct hbq_dmabuf *hbq_entry; 2701 2702 if (tag & QUE_BUFTAG_BIT) 2703 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2704 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2705 if (!hbq_entry) 2706 return NULL; 2707 return &hbq_entry->dbuf; 2708 } 2709 2710 /** 2711 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2712 * @phba: Pointer to HBA context object. 2713 * @pring: Pointer to driver SLI ring object. 2714 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2715 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2716 * @fch_type: the type for the first frame of the sequence. 2717 * 2718 * This function is called with no lock held. This function uses the r_ctl and 2719 * type of the received sequence to find the correct callback function to call 2720 * to process the sequence. 2721 **/ 2722 static int 2723 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2724 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2725 uint32_t fch_type) 2726 { 2727 int i; 2728 2729 switch (fch_type) { 2730 case FC_TYPE_NVME: 2731 lpfc_nvmet_unsol_ls_event(phba, pring, saveq); 2732 return 1; 2733 default: 2734 break; 2735 } 2736 2737 /* unSolicited Responses */ 2738 if (pring->prt[0].profile) { 2739 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2740 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2741 saveq); 2742 return 1; 2743 } 2744 /* We must search, based on rctl / type 2745 for the right routine */ 2746 for (i = 0; i < pring->num_mask; i++) { 2747 if ((pring->prt[i].rctl == fch_r_ctl) && 2748 (pring->prt[i].type == fch_type)) { 2749 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2750 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2751 (phba, pring, saveq); 2752 return 1; 2753 } 2754 } 2755 return 0; 2756 } 2757 2758 /** 2759 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2760 * @phba: Pointer to HBA context object. 2761 * @pring: Pointer to driver SLI ring object. 2762 * @saveq: Pointer to the unsolicited iocb. 2763 * 2764 * This function is called with no lock held by the ring event handler 2765 * when there is an unsolicited iocb posted to the response ring by the 2766 * firmware. This function gets the buffer associated with the iocbs 2767 * and calls the event handler for the ring. This function handles both 2768 * qring buffers and hbq buffers. 2769 * When the function returns 1 the caller can free the iocb object otherwise 2770 * upper layer functions will free the iocb objects. 2771 **/ 2772 static int 2773 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2774 struct lpfc_iocbq *saveq) 2775 { 2776 IOCB_t * irsp; 2777 WORD5 * w5p; 2778 uint32_t Rctl, Type; 2779 struct lpfc_iocbq *iocbq; 2780 struct lpfc_dmabuf *dmzbuf; 2781 2782 irsp = &(saveq->iocb); 2783 2784 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2785 if (pring->lpfc_sli_rcv_async_status) 2786 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2787 else 2788 lpfc_printf_log(phba, 2789 KERN_WARNING, 2790 LOG_SLI, 2791 "0316 Ring %d handler: unexpected " 2792 "ASYNC_STATUS iocb received evt_code " 2793 "0x%x\n", 2794 pring->ringno, 2795 irsp->un.asyncstat.evt_code); 2796 return 1; 2797 } 2798 2799 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2800 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2801 if (irsp->ulpBdeCount > 0) { 2802 dmzbuf = lpfc_sli_get_buff(phba, pring, 2803 irsp->un.ulpWord[3]); 2804 lpfc_in_buf_free(phba, dmzbuf); 2805 } 2806 2807 if (irsp->ulpBdeCount > 1) { 2808 dmzbuf = lpfc_sli_get_buff(phba, pring, 2809 irsp->unsli3.sli3Words[3]); 2810 lpfc_in_buf_free(phba, dmzbuf); 2811 } 2812 2813 if (irsp->ulpBdeCount > 2) { 2814 dmzbuf = lpfc_sli_get_buff(phba, pring, 2815 irsp->unsli3.sli3Words[7]); 2816 lpfc_in_buf_free(phba, dmzbuf); 2817 } 2818 2819 return 1; 2820 } 2821 2822 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2823 if (irsp->ulpBdeCount != 0) { 2824 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2825 irsp->un.ulpWord[3]); 2826 if (!saveq->context2) 2827 lpfc_printf_log(phba, 2828 KERN_ERR, 2829 LOG_SLI, 2830 "0341 Ring %d Cannot find buffer for " 2831 "an unsolicited iocb. tag 0x%x\n", 2832 pring->ringno, 2833 irsp->un.ulpWord[3]); 2834 } 2835 if (irsp->ulpBdeCount == 2) { 2836 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2837 irsp->unsli3.sli3Words[7]); 2838 if (!saveq->context3) 2839 lpfc_printf_log(phba, 2840 KERN_ERR, 2841 LOG_SLI, 2842 "0342 Ring %d Cannot find buffer for an" 2843 " unsolicited iocb. tag 0x%x\n", 2844 pring->ringno, 2845 irsp->unsli3.sli3Words[7]); 2846 } 2847 list_for_each_entry(iocbq, &saveq->list, list) { 2848 irsp = &(iocbq->iocb); 2849 if (irsp->ulpBdeCount != 0) { 2850 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2851 irsp->un.ulpWord[3]); 2852 if (!iocbq->context2) 2853 lpfc_printf_log(phba, 2854 KERN_ERR, 2855 LOG_SLI, 2856 "0343 Ring %d Cannot find " 2857 "buffer for an unsolicited iocb" 2858 ". tag 0x%x\n", pring->ringno, 2859 irsp->un.ulpWord[3]); 2860 } 2861 if (irsp->ulpBdeCount == 2) { 2862 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2863 irsp->unsli3.sli3Words[7]); 2864 if (!iocbq->context3) 2865 lpfc_printf_log(phba, 2866 KERN_ERR, 2867 LOG_SLI, 2868 "0344 Ring %d Cannot find " 2869 "buffer for an unsolicited " 2870 "iocb. tag 0x%x\n", 2871 pring->ringno, 2872 irsp->unsli3.sli3Words[7]); 2873 } 2874 } 2875 } 2876 if (irsp->ulpBdeCount != 0 && 2877 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2878 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2879 int found = 0; 2880 2881 /* search continue save q for same XRI */ 2882 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2883 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2884 saveq->iocb.unsli3.rcvsli3.ox_id) { 2885 list_add_tail(&saveq->list, &iocbq->list); 2886 found = 1; 2887 break; 2888 } 2889 } 2890 if (!found) 2891 list_add_tail(&saveq->clist, 2892 &pring->iocb_continue_saveq); 2893 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2894 list_del_init(&iocbq->clist); 2895 saveq = iocbq; 2896 irsp = &(saveq->iocb); 2897 } else 2898 return 0; 2899 } 2900 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2901 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2902 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2903 Rctl = FC_RCTL_ELS_REQ; 2904 Type = FC_TYPE_ELS; 2905 } else { 2906 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2907 Rctl = w5p->hcsw.Rctl; 2908 Type = w5p->hcsw.Type; 2909 2910 /* Firmware Workaround */ 2911 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2912 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2913 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2914 Rctl = FC_RCTL_ELS_REQ; 2915 Type = FC_TYPE_ELS; 2916 w5p->hcsw.Rctl = Rctl; 2917 w5p->hcsw.Type = Type; 2918 } 2919 } 2920 2921 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2922 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2923 "0313 Ring %d handler: unexpected Rctl x%x " 2924 "Type x%x received\n", 2925 pring->ringno, Rctl, Type); 2926 2927 return 1; 2928 } 2929 2930 /** 2931 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2932 * @phba: Pointer to HBA context object. 2933 * @pring: Pointer to driver SLI ring object. 2934 * @prspiocb: Pointer to response iocb object. 2935 * 2936 * This function looks up the iocb_lookup table to get the command iocb 2937 * corresponding to the given response iocb using the iotag of the 2938 * response iocb. This function is called with the hbalock held 2939 * for sli3 devices or the ring_lock for sli4 devices. 2940 * This function returns the command iocb object if it finds the command 2941 * iocb else returns NULL. 2942 **/ 2943 static struct lpfc_iocbq * 2944 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2945 struct lpfc_sli_ring *pring, 2946 struct lpfc_iocbq *prspiocb) 2947 { 2948 struct lpfc_iocbq *cmd_iocb = NULL; 2949 uint16_t iotag; 2950 lockdep_assert_held(&phba->hbalock); 2951 2952 iotag = prspiocb->iocb.ulpIoTag; 2953 2954 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2955 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2956 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2957 /* remove from txcmpl queue list */ 2958 list_del_init(&cmd_iocb->list); 2959 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2960 return cmd_iocb; 2961 } 2962 } 2963 2964 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2965 "0317 iotag x%x is out of " 2966 "range: max iotag x%x wd0 x%x\n", 2967 iotag, phba->sli.last_iotag, 2968 *(((uint32_t *) &prspiocb->iocb) + 7)); 2969 return NULL; 2970 } 2971 2972 /** 2973 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2974 * @phba: Pointer to HBA context object. 2975 * @pring: Pointer to driver SLI ring object. 2976 * @iotag: IOCB tag. 2977 * 2978 * This function looks up the iocb_lookup table to get the command iocb 2979 * corresponding to the given iotag. This function is called with the 2980 * hbalock held. 2981 * This function returns the command iocb object if it finds the command 2982 * iocb else returns NULL. 2983 **/ 2984 static struct lpfc_iocbq * 2985 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2986 struct lpfc_sli_ring *pring, uint16_t iotag) 2987 { 2988 struct lpfc_iocbq *cmd_iocb = NULL; 2989 2990 lockdep_assert_held(&phba->hbalock); 2991 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2992 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2993 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2994 /* remove from txcmpl queue list */ 2995 list_del_init(&cmd_iocb->list); 2996 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2997 return cmd_iocb; 2998 } 2999 } 3000 3001 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3002 "0372 iotag x%x lookup error: max iotag (x%x) " 3003 "iocb_flag x%x\n", 3004 iotag, phba->sli.last_iotag, 3005 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff); 3006 return NULL; 3007 } 3008 3009 /** 3010 * lpfc_sli_process_sol_iocb - process solicited iocb completion 3011 * @phba: Pointer to HBA context object. 3012 * @pring: Pointer to driver SLI ring object. 3013 * @saveq: Pointer to the response iocb to be processed. 3014 * 3015 * This function is called by the ring event handler for non-fcp 3016 * rings when there is a new response iocb in the response ring. 3017 * The caller is not required to hold any locks. This function 3018 * gets the command iocb associated with the response iocb and 3019 * calls the completion handler for the command iocb. If there 3020 * is no completion handler, the function will free the resources 3021 * associated with command iocb. If the response iocb is for 3022 * an already aborted command iocb, the status of the completion 3023 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 3024 * This function always returns 1. 3025 **/ 3026 static int 3027 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3028 struct lpfc_iocbq *saveq) 3029 { 3030 struct lpfc_iocbq *cmdiocbp; 3031 int rc = 1; 3032 unsigned long iflag; 3033 3034 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 3035 if (phba->sli_rev == LPFC_SLI_REV4) 3036 spin_lock_irqsave(&pring->ring_lock, iflag); 3037 else 3038 spin_lock_irqsave(&phba->hbalock, iflag); 3039 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 3040 if (phba->sli_rev == LPFC_SLI_REV4) 3041 spin_unlock_irqrestore(&pring->ring_lock, iflag); 3042 else 3043 spin_unlock_irqrestore(&phba->hbalock, iflag); 3044 3045 if (cmdiocbp) { 3046 if (cmdiocbp->iocb_cmpl) { 3047 /* 3048 * If an ELS command failed send an event to mgmt 3049 * application. 3050 */ 3051 if (saveq->iocb.ulpStatus && 3052 (pring->ringno == LPFC_ELS_RING) && 3053 (cmdiocbp->iocb.ulpCommand == 3054 CMD_ELS_REQUEST64_CR)) 3055 lpfc_send_els_failure_event(phba, 3056 cmdiocbp, saveq); 3057 3058 /* 3059 * Post all ELS completions to the worker thread. 3060 * All other are passed to the completion callback. 3061 */ 3062 if (pring->ringno == LPFC_ELS_RING) { 3063 if ((phba->sli_rev < LPFC_SLI_REV4) && 3064 (cmdiocbp->iocb_flag & 3065 LPFC_DRIVER_ABORTED)) { 3066 spin_lock_irqsave(&phba->hbalock, 3067 iflag); 3068 cmdiocbp->iocb_flag &= 3069 ~LPFC_DRIVER_ABORTED; 3070 spin_unlock_irqrestore(&phba->hbalock, 3071 iflag); 3072 saveq->iocb.ulpStatus = 3073 IOSTAT_LOCAL_REJECT; 3074 saveq->iocb.un.ulpWord[4] = 3075 IOERR_SLI_ABORTED; 3076 3077 /* Firmware could still be in progress 3078 * of DMAing payload, so don't free data 3079 * buffer till after a hbeat. 3080 */ 3081 spin_lock_irqsave(&phba->hbalock, 3082 iflag); 3083 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 3084 spin_unlock_irqrestore(&phba->hbalock, 3085 iflag); 3086 } 3087 if (phba->sli_rev == LPFC_SLI_REV4) { 3088 if (saveq->iocb_flag & 3089 LPFC_EXCHANGE_BUSY) { 3090 /* Set cmdiocb flag for the 3091 * exchange busy so sgl (xri) 3092 * will not be released until 3093 * the abort xri is received 3094 * from hba. 3095 */ 3096 spin_lock_irqsave( 3097 &phba->hbalock, iflag); 3098 cmdiocbp->iocb_flag |= 3099 LPFC_EXCHANGE_BUSY; 3100 spin_unlock_irqrestore( 3101 &phba->hbalock, iflag); 3102 } 3103 if (cmdiocbp->iocb_flag & 3104 LPFC_DRIVER_ABORTED) { 3105 /* 3106 * Clear LPFC_DRIVER_ABORTED 3107 * bit in case it was driver 3108 * initiated abort. 3109 */ 3110 spin_lock_irqsave( 3111 &phba->hbalock, iflag); 3112 cmdiocbp->iocb_flag &= 3113 ~LPFC_DRIVER_ABORTED; 3114 spin_unlock_irqrestore( 3115 &phba->hbalock, iflag); 3116 cmdiocbp->iocb.ulpStatus = 3117 IOSTAT_LOCAL_REJECT; 3118 cmdiocbp->iocb.un.ulpWord[4] = 3119 IOERR_ABORT_REQUESTED; 3120 /* 3121 * For SLI4, irsiocb contains 3122 * NO_XRI in sli_xritag, it 3123 * shall not affect releasing 3124 * sgl (xri) process. 3125 */ 3126 saveq->iocb.ulpStatus = 3127 IOSTAT_LOCAL_REJECT; 3128 saveq->iocb.un.ulpWord[4] = 3129 IOERR_SLI_ABORTED; 3130 spin_lock_irqsave( 3131 &phba->hbalock, iflag); 3132 saveq->iocb_flag |= 3133 LPFC_DELAY_MEM_FREE; 3134 spin_unlock_irqrestore( 3135 &phba->hbalock, iflag); 3136 } 3137 } 3138 } 3139 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 3140 } else 3141 lpfc_sli_release_iocbq(phba, cmdiocbp); 3142 } else { 3143 /* 3144 * Unknown initiating command based on the response iotag. 3145 * This could be the case on the ELS ring because of 3146 * lpfc_els_abort(). 3147 */ 3148 if (pring->ringno != LPFC_ELS_RING) { 3149 /* 3150 * Ring <ringno> handler: unexpected completion IoTag 3151 * <IoTag> 3152 */ 3153 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3154 "0322 Ring %d handler: " 3155 "unexpected completion IoTag x%x " 3156 "Data: x%x x%x x%x x%x\n", 3157 pring->ringno, 3158 saveq->iocb.ulpIoTag, 3159 saveq->iocb.ulpStatus, 3160 saveq->iocb.un.ulpWord[4], 3161 saveq->iocb.ulpCommand, 3162 saveq->iocb.ulpContext); 3163 } 3164 } 3165 3166 return rc; 3167 } 3168 3169 /** 3170 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 3171 * @phba: Pointer to HBA context object. 3172 * @pring: Pointer to driver SLI ring object. 3173 * 3174 * This function is called from the iocb ring event handlers when 3175 * put pointer is ahead of the get pointer for a ring. This function signal 3176 * an error attention condition to the worker thread and the worker 3177 * thread will transition the HBA to offline state. 3178 **/ 3179 static void 3180 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3181 { 3182 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3183 /* 3184 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3185 * rsp ring <portRspMax> 3186 */ 3187 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3188 "0312 Ring %d handler: portRspPut %d " 3189 "is bigger than rsp ring %d\n", 3190 pring->ringno, le32_to_cpu(pgp->rspPutInx), 3191 pring->sli.sli3.numRiocb); 3192 3193 phba->link_state = LPFC_HBA_ERROR; 3194 3195 /* 3196 * All error attention handlers are posted to 3197 * worker thread 3198 */ 3199 phba->work_ha |= HA_ERATT; 3200 phba->work_hs = HS_FFER3; 3201 3202 lpfc_worker_wake_up(phba); 3203 3204 return; 3205 } 3206 3207 /** 3208 * lpfc_poll_eratt - Error attention polling timer timeout handler 3209 * @ptr: Pointer to address of HBA context object. 3210 * 3211 * This function is invoked by the Error Attention polling timer when the 3212 * timer times out. It will check the SLI Error Attention register for 3213 * possible attention events. If so, it will post an Error Attention event 3214 * and wake up worker thread to process it. Otherwise, it will set up the 3215 * Error Attention polling timer for the next poll. 3216 **/ 3217 void lpfc_poll_eratt(struct timer_list *t) 3218 { 3219 struct lpfc_hba *phba; 3220 uint32_t eratt = 0; 3221 uint64_t sli_intr, cnt; 3222 3223 phba = from_timer(phba, t, eratt_poll); 3224 3225 /* Here we will also keep track of interrupts per sec of the hba */ 3226 sli_intr = phba->sli.slistat.sli_intr; 3227 3228 if (phba->sli.slistat.sli_prev_intr > sli_intr) 3229 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 3230 sli_intr); 3231 else 3232 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 3233 3234 /* 64-bit integer division not supported on 32-bit x86 - use do_div */ 3235 do_div(cnt, phba->eratt_poll_interval); 3236 phba->sli.slistat.sli_ips = cnt; 3237 3238 phba->sli.slistat.sli_prev_intr = sli_intr; 3239 3240 /* Check chip HA register for error event */ 3241 eratt = lpfc_sli_check_eratt(phba); 3242 3243 if (eratt) 3244 /* Tell the worker thread there is work to do */ 3245 lpfc_worker_wake_up(phba); 3246 else 3247 /* Restart the timer for next eratt poll */ 3248 mod_timer(&phba->eratt_poll, 3249 jiffies + 3250 msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 3251 return; 3252 } 3253 3254 3255 /** 3256 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 3257 * @phba: Pointer to HBA context object. 3258 * @pring: Pointer to driver SLI ring object. 3259 * @mask: Host attention register mask for this ring. 3260 * 3261 * This function is called from the interrupt context when there is a ring 3262 * event for the fcp ring. The caller does not hold any lock. 3263 * The function processes each response iocb in the response ring until it 3264 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 3265 * LE bit set. The function will call the completion handler of the command iocb 3266 * if the response iocb indicates a completion for a command iocb or it is 3267 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 3268 * function if this is an unsolicited iocb. 3269 * This routine presumes LPFC_FCP_RING handling and doesn't bother 3270 * to check it explicitly. 3271 */ 3272 int 3273 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 3274 struct lpfc_sli_ring *pring, uint32_t mask) 3275 { 3276 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3277 IOCB_t *irsp = NULL; 3278 IOCB_t *entry = NULL; 3279 struct lpfc_iocbq *cmdiocbq = NULL; 3280 struct lpfc_iocbq rspiocbq; 3281 uint32_t status; 3282 uint32_t portRspPut, portRspMax; 3283 int rc = 1; 3284 lpfc_iocb_type type; 3285 unsigned long iflag; 3286 uint32_t rsp_cmpl = 0; 3287 3288 spin_lock_irqsave(&phba->hbalock, iflag); 3289 pring->stats.iocb_event++; 3290 3291 /* 3292 * The next available response entry should never exceed the maximum 3293 * entries. If it does, treat it as an adapter hardware error. 3294 */ 3295 portRspMax = pring->sli.sli3.numRiocb; 3296 portRspPut = le32_to_cpu(pgp->rspPutInx); 3297 if (unlikely(portRspPut >= portRspMax)) { 3298 lpfc_sli_rsp_pointers_error(phba, pring); 3299 spin_unlock_irqrestore(&phba->hbalock, iflag); 3300 return 1; 3301 } 3302 if (phba->fcp_ring_in_use) { 3303 spin_unlock_irqrestore(&phba->hbalock, iflag); 3304 return 1; 3305 } else 3306 phba->fcp_ring_in_use = 1; 3307 3308 rmb(); 3309 while (pring->sli.sli3.rspidx != portRspPut) { 3310 /* 3311 * Fetch an entry off the ring and copy it into a local data 3312 * structure. The copy involves a byte-swap since the 3313 * network byte order and pci byte orders are different. 3314 */ 3315 entry = lpfc_resp_iocb(phba, pring); 3316 phba->last_completion_time = jiffies; 3317 3318 if (++pring->sli.sli3.rspidx >= portRspMax) 3319 pring->sli.sli3.rspidx = 0; 3320 3321 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 3322 (uint32_t *) &rspiocbq.iocb, 3323 phba->iocb_rsp_size); 3324 INIT_LIST_HEAD(&(rspiocbq.list)); 3325 irsp = &rspiocbq.iocb; 3326 3327 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 3328 pring->stats.iocb_rsp++; 3329 rsp_cmpl++; 3330 3331 if (unlikely(irsp->ulpStatus)) { 3332 /* 3333 * If resource errors reported from HBA, reduce 3334 * queuedepths of the SCSI device. 3335 */ 3336 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3337 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3338 IOERR_NO_RESOURCES)) { 3339 spin_unlock_irqrestore(&phba->hbalock, iflag); 3340 phba->lpfc_rampdown_queue_depth(phba); 3341 spin_lock_irqsave(&phba->hbalock, iflag); 3342 } 3343 3344 /* Rsp ring <ringno> error: IOCB */ 3345 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3346 "0336 Rsp Ring %d error: IOCB Data: " 3347 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 3348 pring->ringno, 3349 irsp->un.ulpWord[0], 3350 irsp->un.ulpWord[1], 3351 irsp->un.ulpWord[2], 3352 irsp->un.ulpWord[3], 3353 irsp->un.ulpWord[4], 3354 irsp->un.ulpWord[5], 3355 *(uint32_t *)&irsp->un1, 3356 *((uint32_t *)&irsp->un1 + 1)); 3357 } 3358 3359 switch (type) { 3360 case LPFC_ABORT_IOCB: 3361 case LPFC_SOL_IOCB: 3362 /* 3363 * Idle exchange closed via ABTS from port. No iocb 3364 * resources need to be recovered. 3365 */ 3366 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 3367 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3368 "0333 IOCB cmd 0x%x" 3369 " processed. Skipping" 3370 " completion\n", 3371 irsp->ulpCommand); 3372 break; 3373 } 3374 3375 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 3376 &rspiocbq); 3377 if (unlikely(!cmdiocbq)) 3378 break; 3379 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 3380 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3381 if (cmdiocbq->iocb_cmpl) { 3382 spin_unlock_irqrestore(&phba->hbalock, iflag); 3383 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 3384 &rspiocbq); 3385 spin_lock_irqsave(&phba->hbalock, iflag); 3386 } 3387 break; 3388 case LPFC_UNSOL_IOCB: 3389 spin_unlock_irqrestore(&phba->hbalock, iflag); 3390 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 3391 spin_lock_irqsave(&phba->hbalock, iflag); 3392 break; 3393 default: 3394 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3395 char adaptermsg[LPFC_MAX_ADPTMSG]; 3396 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3397 memcpy(&adaptermsg[0], (uint8_t *) irsp, 3398 MAX_MSG_DATA); 3399 dev_warn(&((phba->pcidev)->dev), 3400 "lpfc%d: %s\n", 3401 phba->brd_no, adaptermsg); 3402 } else { 3403 /* Unknown IOCB command */ 3404 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3405 "0334 Unknown IOCB command " 3406 "Data: x%x, x%x x%x x%x x%x\n", 3407 type, irsp->ulpCommand, 3408 irsp->ulpStatus, 3409 irsp->ulpIoTag, 3410 irsp->ulpContext); 3411 } 3412 break; 3413 } 3414 3415 /* 3416 * The response IOCB has been processed. Update the ring 3417 * pointer in SLIM. If the port response put pointer has not 3418 * been updated, sync the pgp->rspPutInx and fetch the new port 3419 * response put pointer. 3420 */ 3421 writel(pring->sli.sli3.rspidx, 3422 &phba->host_gp[pring->ringno].rspGetInx); 3423 3424 if (pring->sli.sli3.rspidx == portRspPut) 3425 portRspPut = le32_to_cpu(pgp->rspPutInx); 3426 } 3427 3428 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3429 pring->stats.iocb_rsp_full++; 3430 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3431 writel(status, phba->CAregaddr); 3432 readl(phba->CAregaddr); 3433 } 3434 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3435 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3436 pring->stats.iocb_cmd_empty++; 3437 3438 /* Force update of the local copy of cmdGetInx */ 3439 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3440 lpfc_sli_resume_iocb(phba, pring); 3441 3442 if ((pring->lpfc_sli_cmd_available)) 3443 (pring->lpfc_sli_cmd_available) (phba, pring); 3444 3445 } 3446 3447 phba->fcp_ring_in_use = 0; 3448 spin_unlock_irqrestore(&phba->hbalock, iflag); 3449 return rc; 3450 } 3451 3452 /** 3453 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3454 * @phba: Pointer to HBA context object. 3455 * @pring: Pointer to driver SLI ring object. 3456 * @rspiocbp: Pointer to driver response IOCB object. 3457 * 3458 * This function is called from the worker thread when there is a slow-path 3459 * response IOCB to process. This function chains all the response iocbs until 3460 * seeing the iocb with the LE bit set. The function will call 3461 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3462 * completion of a command iocb. The function will call the 3463 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3464 * The function frees the resources or calls the completion handler if this 3465 * iocb is an abort completion. The function returns NULL when the response 3466 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3467 * this function shall chain the iocb on to the iocb_continueq and return the 3468 * response iocb passed in. 3469 **/ 3470 static struct lpfc_iocbq * 3471 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3472 struct lpfc_iocbq *rspiocbp) 3473 { 3474 struct lpfc_iocbq *saveq; 3475 struct lpfc_iocbq *cmdiocbp; 3476 struct lpfc_iocbq *next_iocb; 3477 IOCB_t *irsp = NULL; 3478 uint32_t free_saveq; 3479 uint8_t iocb_cmd_type; 3480 lpfc_iocb_type type; 3481 unsigned long iflag; 3482 int rc; 3483 3484 spin_lock_irqsave(&phba->hbalock, iflag); 3485 /* First add the response iocb to the countinueq list */ 3486 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3487 pring->iocb_continueq_cnt++; 3488 3489 /* Now, determine whether the list is completed for processing */ 3490 irsp = &rspiocbp->iocb; 3491 if (irsp->ulpLe) { 3492 /* 3493 * By default, the driver expects to free all resources 3494 * associated with this iocb completion. 3495 */ 3496 free_saveq = 1; 3497 saveq = list_get_first(&pring->iocb_continueq, 3498 struct lpfc_iocbq, list); 3499 irsp = &(saveq->iocb); 3500 list_del_init(&pring->iocb_continueq); 3501 pring->iocb_continueq_cnt = 0; 3502 3503 pring->stats.iocb_rsp++; 3504 3505 /* 3506 * If resource errors reported from HBA, reduce 3507 * queuedepths of the SCSI device. 3508 */ 3509 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3510 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3511 IOERR_NO_RESOURCES)) { 3512 spin_unlock_irqrestore(&phba->hbalock, iflag); 3513 phba->lpfc_rampdown_queue_depth(phba); 3514 spin_lock_irqsave(&phba->hbalock, iflag); 3515 } 3516 3517 if (irsp->ulpStatus) { 3518 /* Rsp ring <ringno> error: IOCB */ 3519 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3520 "0328 Rsp Ring %d error: " 3521 "IOCB Data: " 3522 "x%x x%x x%x x%x " 3523 "x%x x%x x%x x%x " 3524 "x%x x%x x%x x%x " 3525 "x%x x%x x%x x%x\n", 3526 pring->ringno, 3527 irsp->un.ulpWord[0], 3528 irsp->un.ulpWord[1], 3529 irsp->un.ulpWord[2], 3530 irsp->un.ulpWord[3], 3531 irsp->un.ulpWord[4], 3532 irsp->un.ulpWord[5], 3533 *(((uint32_t *) irsp) + 6), 3534 *(((uint32_t *) irsp) + 7), 3535 *(((uint32_t *) irsp) + 8), 3536 *(((uint32_t *) irsp) + 9), 3537 *(((uint32_t *) irsp) + 10), 3538 *(((uint32_t *) irsp) + 11), 3539 *(((uint32_t *) irsp) + 12), 3540 *(((uint32_t *) irsp) + 13), 3541 *(((uint32_t *) irsp) + 14), 3542 *(((uint32_t *) irsp) + 15)); 3543 } 3544 3545 /* 3546 * Fetch the IOCB command type and call the correct completion 3547 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3548 * get freed back to the lpfc_iocb_list by the discovery 3549 * kernel thread. 3550 */ 3551 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3552 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3553 switch (type) { 3554 case LPFC_SOL_IOCB: 3555 spin_unlock_irqrestore(&phba->hbalock, iflag); 3556 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3557 spin_lock_irqsave(&phba->hbalock, iflag); 3558 break; 3559 3560 case LPFC_UNSOL_IOCB: 3561 spin_unlock_irqrestore(&phba->hbalock, iflag); 3562 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3563 spin_lock_irqsave(&phba->hbalock, iflag); 3564 if (!rc) 3565 free_saveq = 0; 3566 break; 3567 3568 case LPFC_ABORT_IOCB: 3569 cmdiocbp = NULL; 3570 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3571 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3572 saveq); 3573 if (cmdiocbp) { 3574 /* Call the specified completion routine */ 3575 if (cmdiocbp->iocb_cmpl) { 3576 spin_unlock_irqrestore(&phba->hbalock, 3577 iflag); 3578 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3579 saveq); 3580 spin_lock_irqsave(&phba->hbalock, 3581 iflag); 3582 } else 3583 __lpfc_sli_release_iocbq(phba, 3584 cmdiocbp); 3585 } 3586 break; 3587 3588 case LPFC_UNKNOWN_IOCB: 3589 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3590 char adaptermsg[LPFC_MAX_ADPTMSG]; 3591 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3592 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3593 MAX_MSG_DATA); 3594 dev_warn(&((phba->pcidev)->dev), 3595 "lpfc%d: %s\n", 3596 phba->brd_no, adaptermsg); 3597 } else { 3598 /* Unknown IOCB command */ 3599 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3600 "0335 Unknown IOCB " 3601 "command Data: x%x " 3602 "x%x x%x x%x\n", 3603 irsp->ulpCommand, 3604 irsp->ulpStatus, 3605 irsp->ulpIoTag, 3606 irsp->ulpContext); 3607 } 3608 break; 3609 } 3610 3611 if (free_saveq) { 3612 list_for_each_entry_safe(rspiocbp, next_iocb, 3613 &saveq->list, list) { 3614 list_del_init(&rspiocbp->list); 3615 __lpfc_sli_release_iocbq(phba, rspiocbp); 3616 } 3617 __lpfc_sli_release_iocbq(phba, saveq); 3618 } 3619 rspiocbp = NULL; 3620 } 3621 spin_unlock_irqrestore(&phba->hbalock, iflag); 3622 return rspiocbp; 3623 } 3624 3625 /** 3626 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3627 * @phba: Pointer to HBA context object. 3628 * @pring: Pointer to driver SLI ring object. 3629 * @mask: Host attention register mask for this ring. 3630 * 3631 * This routine wraps the actual slow_ring event process routine from the 3632 * API jump table function pointer from the lpfc_hba struct. 3633 **/ 3634 void 3635 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3636 struct lpfc_sli_ring *pring, uint32_t mask) 3637 { 3638 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3639 } 3640 3641 /** 3642 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3643 * @phba: Pointer to HBA context object. 3644 * @pring: Pointer to driver SLI ring object. 3645 * @mask: Host attention register mask for this ring. 3646 * 3647 * This function is called from the worker thread when there is a ring event 3648 * for non-fcp rings. The caller does not hold any lock. The function will 3649 * remove each response iocb in the response ring and calls the handle 3650 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3651 **/ 3652 static void 3653 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3654 struct lpfc_sli_ring *pring, uint32_t mask) 3655 { 3656 struct lpfc_pgp *pgp; 3657 IOCB_t *entry; 3658 IOCB_t *irsp = NULL; 3659 struct lpfc_iocbq *rspiocbp = NULL; 3660 uint32_t portRspPut, portRspMax; 3661 unsigned long iflag; 3662 uint32_t status; 3663 3664 pgp = &phba->port_gp[pring->ringno]; 3665 spin_lock_irqsave(&phba->hbalock, iflag); 3666 pring->stats.iocb_event++; 3667 3668 /* 3669 * The next available response entry should never exceed the maximum 3670 * entries. If it does, treat it as an adapter hardware error. 3671 */ 3672 portRspMax = pring->sli.sli3.numRiocb; 3673 portRspPut = le32_to_cpu(pgp->rspPutInx); 3674 if (portRspPut >= portRspMax) { 3675 /* 3676 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3677 * rsp ring <portRspMax> 3678 */ 3679 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3680 "0303 Ring %d handler: portRspPut %d " 3681 "is bigger than rsp ring %d\n", 3682 pring->ringno, portRspPut, portRspMax); 3683 3684 phba->link_state = LPFC_HBA_ERROR; 3685 spin_unlock_irqrestore(&phba->hbalock, iflag); 3686 3687 phba->work_hs = HS_FFER3; 3688 lpfc_handle_eratt(phba); 3689 3690 return; 3691 } 3692 3693 rmb(); 3694 while (pring->sli.sli3.rspidx != portRspPut) { 3695 /* 3696 * Build a completion list and call the appropriate handler. 3697 * The process is to get the next available response iocb, get 3698 * a free iocb from the list, copy the response data into the 3699 * free iocb, insert to the continuation list, and update the 3700 * next response index to slim. This process makes response 3701 * iocb's in the ring available to DMA as fast as possible but 3702 * pays a penalty for a copy operation. Since the iocb is 3703 * only 32 bytes, this penalty is considered small relative to 3704 * the PCI reads for register values and a slim write. When 3705 * the ulpLe field is set, the entire Command has been 3706 * received. 3707 */ 3708 entry = lpfc_resp_iocb(phba, pring); 3709 3710 phba->last_completion_time = jiffies; 3711 rspiocbp = __lpfc_sli_get_iocbq(phba); 3712 if (rspiocbp == NULL) { 3713 printk(KERN_ERR "%s: out of buffers! Failing " 3714 "completion.\n", __func__); 3715 break; 3716 } 3717 3718 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3719 phba->iocb_rsp_size); 3720 irsp = &rspiocbp->iocb; 3721 3722 if (++pring->sli.sli3.rspidx >= portRspMax) 3723 pring->sli.sli3.rspidx = 0; 3724 3725 if (pring->ringno == LPFC_ELS_RING) { 3726 lpfc_debugfs_slow_ring_trc(phba, 3727 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3728 *(((uint32_t *) irsp) + 4), 3729 *(((uint32_t *) irsp) + 6), 3730 *(((uint32_t *) irsp) + 7)); 3731 } 3732 3733 writel(pring->sli.sli3.rspidx, 3734 &phba->host_gp[pring->ringno].rspGetInx); 3735 3736 spin_unlock_irqrestore(&phba->hbalock, iflag); 3737 /* Handle the response IOCB */ 3738 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3739 spin_lock_irqsave(&phba->hbalock, iflag); 3740 3741 /* 3742 * If the port response put pointer has not been updated, sync 3743 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3744 * response put pointer. 3745 */ 3746 if (pring->sli.sli3.rspidx == portRspPut) { 3747 portRspPut = le32_to_cpu(pgp->rspPutInx); 3748 } 3749 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 3750 3751 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3752 /* At least one response entry has been freed */ 3753 pring->stats.iocb_rsp_full++; 3754 /* SET RxRE_RSP in Chip Att register */ 3755 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3756 writel(status, phba->CAregaddr); 3757 readl(phba->CAregaddr); /* flush */ 3758 } 3759 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3760 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3761 pring->stats.iocb_cmd_empty++; 3762 3763 /* Force update of the local copy of cmdGetInx */ 3764 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3765 lpfc_sli_resume_iocb(phba, pring); 3766 3767 if ((pring->lpfc_sli_cmd_available)) 3768 (pring->lpfc_sli_cmd_available) (phba, pring); 3769 3770 } 3771 3772 spin_unlock_irqrestore(&phba->hbalock, iflag); 3773 return; 3774 } 3775 3776 /** 3777 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3778 * @phba: Pointer to HBA context object. 3779 * @pring: Pointer to driver SLI ring object. 3780 * @mask: Host attention register mask for this ring. 3781 * 3782 * This function is called from the worker thread when there is a pending 3783 * ELS response iocb on the driver internal slow-path response iocb worker 3784 * queue. The caller does not hold any lock. The function will remove each 3785 * response iocb from the response worker queue and calls the handle 3786 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3787 **/ 3788 static void 3789 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3790 struct lpfc_sli_ring *pring, uint32_t mask) 3791 { 3792 struct lpfc_iocbq *irspiocbq; 3793 struct hbq_dmabuf *dmabuf; 3794 struct lpfc_cq_event *cq_event; 3795 unsigned long iflag; 3796 int count = 0; 3797 3798 spin_lock_irqsave(&phba->hbalock, iflag); 3799 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3800 spin_unlock_irqrestore(&phba->hbalock, iflag); 3801 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3802 /* Get the response iocb from the head of work queue */ 3803 spin_lock_irqsave(&phba->hbalock, iflag); 3804 list_remove_head(&phba->sli4_hba.sp_queue_event, 3805 cq_event, struct lpfc_cq_event, list); 3806 spin_unlock_irqrestore(&phba->hbalock, iflag); 3807 3808 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3809 case CQE_CODE_COMPL_WQE: 3810 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3811 cq_event); 3812 /* Translate ELS WCQE to response IOCBQ */ 3813 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3814 irspiocbq); 3815 if (irspiocbq) 3816 lpfc_sli_sp_handle_rspiocb(phba, pring, 3817 irspiocbq); 3818 count++; 3819 break; 3820 case CQE_CODE_RECEIVE: 3821 case CQE_CODE_RECEIVE_V1: 3822 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3823 cq_event); 3824 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3825 count++; 3826 break; 3827 default: 3828 break; 3829 } 3830 3831 /* Limit the number of events to 64 to avoid soft lockups */ 3832 if (count == 64) 3833 break; 3834 } 3835 } 3836 3837 /** 3838 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3839 * @phba: Pointer to HBA context object. 3840 * @pring: Pointer to driver SLI ring object. 3841 * 3842 * This function aborts all iocbs in the given ring and frees all the iocb 3843 * objects in txq. This function issues an abort iocb for all the iocb commands 3844 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3845 * the return of this function. The caller is not required to hold any locks. 3846 **/ 3847 void 3848 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3849 { 3850 LIST_HEAD(completions); 3851 struct lpfc_iocbq *iocb, *next_iocb; 3852 3853 if (pring->ringno == LPFC_ELS_RING) { 3854 lpfc_fabric_abort_hba(phba); 3855 } 3856 3857 /* Error everything on txq and txcmplq 3858 * First do the txq. 3859 */ 3860 if (phba->sli_rev >= LPFC_SLI_REV4) { 3861 spin_lock_irq(&pring->ring_lock); 3862 list_splice_init(&pring->txq, &completions); 3863 pring->txq_cnt = 0; 3864 spin_unlock_irq(&pring->ring_lock); 3865 3866 spin_lock_irq(&phba->hbalock); 3867 /* Next issue ABTS for everything on the txcmplq */ 3868 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3869 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3870 spin_unlock_irq(&phba->hbalock); 3871 } else { 3872 spin_lock_irq(&phba->hbalock); 3873 list_splice_init(&pring->txq, &completions); 3874 pring->txq_cnt = 0; 3875 3876 /* Next issue ABTS for everything on the txcmplq */ 3877 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3878 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3879 spin_unlock_irq(&phba->hbalock); 3880 } 3881 3882 /* Cancel all the IOCBs from the completions list */ 3883 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3884 IOERR_SLI_ABORTED); 3885 } 3886 3887 /** 3888 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring 3889 * @phba: Pointer to HBA context object. 3890 * @pring: Pointer to driver SLI ring object. 3891 * 3892 * This function aborts all iocbs in the given ring and frees all the iocb 3893 * objects in txq. This function issues an abort iocb for all the iocb commands 3894 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3895 * the return of this function. The caller is not required to hold any locks. 3896 **/ 3897 void 3898 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3899 { 3900 LIST_HEAD(completions); 3901 struct lpfc_iocbq *iocb, *next_iocb; 3902 3903 if (pring->ringno == LPFC_ELS_RING) 3904 lpfc_fabric_abort_hba(phba); 3905 3906 spin_lock_irq(&phba->hbalock); 3907 /* Next issue ABTS for everything on the txcmplq */ 3908 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3909 lpfc_sli4_abort_nvme_io(phba, pring, iocb); 3910 spin_unlock_irq(&phba->hbalock); 3911 } 3912 3913 3914 /** 3915 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 3916 * @phba: Pointer to HBA context object. 3917 * @pring: Pointer to driver SLI ring object. 3918 * 3919 * This function aborts all iocbs in FCP rings and frees all the iocb 3920 * objects in txq. This function issues an abort iocb for all the iocb commands 3921 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3922 * the return of this function. The caller is not required to hold any locks. 3923 **/ 3924 void 3925 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 3926 { 3927 struct lpfc_sli *psli = &phba->sli; 3928 struct lpfc_sli_ring *pring; 3929 uint32_t i; 3930 3931 /* Look on all the FCP Rings for the iotag */ 3932 if (phba->sli_rev >= LPFC_SLI_REV4) { 3933 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 3934 pring = phba->sli4_hba.fcp_wq[i]->pring; 3935 lpfc_sli_abort_iocb_ring(phba, pring); 3936 } 3937 } else { 3938 pring = &psli->sli3_ring[LPFC_FCP_RING]; 3939 lpfc_sli_abort_iocb_ring(phba, pring); 3940 } 3941 } 3942 3943 /** 3944 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings 3945 * @phba: Pointer to HBA context object. 3946 * 3947 * This function aborts all wqes in NVME rings. This function issues an 3948 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in 3949 * the txcmplq is not guaranteed to complete before the return of this 3950 * function. The caller is not required to hold any locks. 3951 **/ 3952 void 3953 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba) 3954 { 3955 struct lpfc_sli_ring *pring; 3956 uint32_t i; 3957 3958 if (phba->sli_rev < LPFC_SLI_REV4) 3959 return; 3960 3961 /* Abort all IO on each NVME ring. */ 3962 for (i = 0; i < phba->cfg_nvme_io_channel; i++) { 3963 pring = phba->sli4_hba.nvme_wq[i]->pring; 3964 lpfc_sli_abort_wqe_ring(phba, pring); 3965 } 3966 } 3967 3968 3969 /** 3970 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3971 * @phba: Pointer to HBA context object. 3972 * 3973 * This function flushes all iocbs in the fcp ring and frees all the iocb 3974 * objects in txq and txcmplq. This function will not issue abort iocbs 3975 * for all the iocb commands in txcmplq, they will just be returned with 3976 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3977 * slot has been permanently disabled. 3978 **/ 3979 void 3980 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3981 { 3982 LIST_HEAD(txq); 3983 LIST_HEAD(txcmplq); 3984 struct lpfc_sli *psli = &phba->sli; 3985 struct lpfc_sli_ring *pring; 3986 uint32_t i; 3987 struct lpfc_iocbq *piocb, *next_iocb; 3988 3989 spin_lock_irq(&phba->hbalock); 3990 /* Indicate the I/O queues are flushed */ 3991 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3992 spin_unlock_irq(&phba->hbalock); 3993 3994 /* Look on all the FCP Rings for the iotag */ 3995 if (phba->sli_rev >= LPFC_SLI_REV4) { 3996 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 3997 pring = phba->sli4_hba.fcp_wq[i]->pring; 3998 3999 spin_lock_irq(&pring->ring_lock); 4000 /* Retrieve everything on txq */ 4001 list_splice_init(&pring->txq, &txq); 4002 list_for_each_entry_safe(piocb, next_iocb, 4003 &pring->txcmplq, list) 4004 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4005 /* Retrieve everything on the txcmplq */ 4006 list_splice_init(&pring->txcmplq, &txcmplq); 4007 pring->txq_cnt = 0; 4008 pring->txcmplq_cnt = 0; 4009 spin_unlock_irq(&pring->ring_lock); 4010 4011 /* Flush the txq */ 4012 lpfc_sli_cancel_iocbs(phba, &txq, 4013 IOSTAT_LOCAL_REJECT, 4014 IOERR_SLI_DOWN); 4015 /* Flush the txcmpq */ 4016 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4017 IOSTAT_LOCAL_REJECT, 4018 IOERR_SLI_DOWN); 4019 } 4020 } else { 4021 pring = &psli->sli3_ring[LPFC_FCP_RING]; 4022 4023 spin_lock_irq(&phba->hbalock); 4024 /* Retrieve everything on txq */ 4025 list_splice_init(&pring->txq, &txq); 4026 list_for_each_entry_safe(piocb, next_iocb, 4027 &pring->txcmplq, list) 4028 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4029 /* Retrieve everything on the txcmplq */ 4030 list_splice_init(&pring->txcmplq, &txcmplq); 4031 pring->txq_cnt = 0; 4032 pring->txcmplq_cnt = 0; 4033 spin_unlock_irq(&phba->hbalock); 4034 4035 /* Flush the txq */ 4036 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 4037 IOERR_SLI_DOWN); 4038 /* Flush the txcmpq */ 4039 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 4040 IOERR_SLI_DOWN); 4041 } 4042 } 4043 4044 /** 4045 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings 4046 * @phba: Pointer to HBA context object. 4047 * 4048 * This function flushes all wqes in the nvme rings and frees all resources 4049 * in the txcmplq. This function does not issue abort wqes for the IO 4050 * commands in txcmplq, they will just be returned with 4051 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 4052 * slot has been permanently disabled. 4053 **/ 4054 void 4055 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) 4056 { 4057 LIST_HEAD(txcmplq); 4058 struct lpfc_sli_ring *pring; 4059 uint32_t i; 4060 struct lpfc_iocbq *piocb, *next_iocb; 4061 4062 if (phba->sli_rev < LPFC_SLI_REV4) 4063 return; 4064 4065 /* Hint to other driver operations that a flush is in progress. */ 4066 spin_lock_irq(&phba->hbalock); 4067 phba->hba_flag |= HBA_NVME_IOQ_FLUSH; 4068 spin_unlock_irq(&phba->hbalock); 4069 4070 /* Cycle through all NVME rings and complete each IO with 4071 * a local driver reason code. This is a flush so no 4072 * abort exchange to FW. 4073 */ 4074 for (i = 0; i < phba->cfg_nvme_io_channel; i++) { 4075 pring = phba->sli4_hba.nvme_wq[i]->pring; 4076 4077 spin_lock_irq(&pring->ring_lock); 4078 list_for_each_entry_safe(piocb, next_iocb, 4079 &pring->txcmplq, list) 4080 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 4081 /* Retrieve everything on the txcmplq */ 4082 list_splice_init(&pring->txcmplq, &txcmplq); 4083 pring->txcmplq_cnt = 0; 4084 spin_unlock_irq(&pring->ring_lock); 4085 4086 /* Flush the txcmpq &&&PAE */ 4087 lpfc_sli_cancel_iocbs(phba, &txcmplq, 4088 IOSTAT_LOCAL_REJECT, 4089 IOERR_SLI_DOWN); 4090 } 4091 } 4092 4093 /** 4094 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 4095 * @phba: Pointer to HBA context object. 4096 * @mask: Bit mask to be checked. 4097 * 4098 * This function reads the host status register and compares 4099 * with the provided bit mask to check if HBA completed 4100 * the restart. This function will wait in a loop for the 4101 * HBA to complete restart. If the HBA does not restart within 4102 * 15 iterations, the function will reset the HBA again. The 4103 * function returns 1 when HBA fail to restart otherwise returns 4104 * zero. 4105 **/ 4106 static int 4107 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 4108 { 4109 uint32_t status; 4110 int i = 0; 4111 int retval = 0; 4112 4113 /* Read the HBA Host Status Register */ 4114 if (lpfc_readl(phba->HSregaddr, &status)) 4115 return 1; 4116 4117 /* 4118 * Check status register every 100ms for 5 retries, then every 4119 * 500ms for 5, then every 2.5 sec for 5, then reset board and 4120 * every 2.5 sec for 4. 4121 * Break our of the loop if errors occurred during init. 4122 */ 4123 while (((status & mask) != mask) && 4124 !(status & HS_FFERM) && 4125 i++ < 20) { 4126 4127 if (i <= 5) 4128 msleep(10); 4129 else if (i <= 10) 4130 msleep(500); 4131 else 4132 msleep(2500); 4133 4134 if (i == 15) { 4135 /* Do post */ 4136 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4137 lpfc_sli_brdrestart(phba); 4138 } 4139 /* Read the HBA Host Status Register */ 4140 if (lpfc_readl(phba->HSregaddr, &status)) { 4141 retval = 1; 4142 break; 4143 } 4144 } 4145 4146 /* Check to see if any errors occurred during init */ 4147 if ((status & HS_FFERM) || (i >= 20)) { 4148 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4149 "2751 Adapter failed to restart, " 4150 "status reg x%x, FW Data: A8 x%x AC x%x\n", 4151 status, 4152 readl(phba->MBslimaddr + 0xa8), 4153 readl(phba->MBslimaddr + 0xac)); 4154 phba->link_state = LPFC_HBA_ERROR; 4155 retval = 1; 4156 } 4157 4158 return retval; 4159 } 4160 4161 /** 4162 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 4163 * @phba: Pointer to HBA context object. 4164 * @mask: Bit mask to be checked. 4165 * 4166 * This function checks the host status register to check if HBA is 4167 * ready. This function will wait in a loop for the HBA to be ready 4168 * If the HBA is not ready , the function will will reset the HBA PCI 4169 * function again. The function returns 1 when HBA fail to be ready 4170 * otherwise returns zero. 4171 **/ 4172 static int 4173 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 4174 { 4175 uint32_t status; 4176 int retval = 0; 4177 4178 /* Read the HBA Host Status Register */ 4179 status = lpfc_sli4_post_status_check(phba); 4180 4181 if (status) { 4182 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4183 lpfc_sli_brdrestart(phba); 4184 status = lpfc_sli4_post_status_check(phba); 4185 } 4186 4187 /* Check to see if any errors occurred during init */ 4188 if (status) { 4189 phba->link_state = LPFC_HBA_ERROR; 4190 retval = 1; 4191 } else 4192 phba->sli4_hba.intr_enable = 0; 4193 4194 return retval; 4195 } 4196 4197 /** 4198 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 4199 * @phba: Pointer to HBA context object. 4200 * @mask: Bit mask to be checked. 4201 * 4202 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 4203 * from the API jump table function pointer from the lpfc_hba struct. 4204 **/ 4205 int 4206 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 4207 { 4208 return phba->lpfc_sli_brdready(phba, mask); 4209 } 4210 4211 #define BARRIER_TEST_PATTERN (0xdeadbeef) 4212 4213 /** 4214 * lpfc_reset_barrier - Make HBA ready for HBA reset 4215 * @phba: Pointer to HBA context object. 4216 * 4217 * This function is called before resetting an HBA. This function is called 4218 * with hbalock held and requests HBA to quiesce DMAs before a reset. 4219 **/ 4220 void lpfc_reset_barrier(struct lpfc_hba *phba) 4221 { 4222 uint32_t __iomem *resp_buf; 4223 uint32_t __iomem *mbox_buf; 4224 volatile uint32_t mbox; 4225 uint32_t hc_copy, ha_copy, resp_data; 4226 int i; 4227 uint8_t hdrtype; 4228 4229 lockdep_assert_held(&phba->hbalock); 4230 4231 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 4232 if (hdrtype != 0x80 || 4233 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 4234 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 4235 return; 4236 4237 /* 4238 * Tell the other part of the chip to suspend temporarily all 4239 * its DMA activity. 4240 */ 4241 resp_buf = phba->MBslimaddr; 4242 4243 /* Disable the error attention */ 4244 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 4245 return; 4246 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 4247 readl(phba->HCregaddr); /* flush */ 4248 phba->link_flag |= LS_IGNORE_ERATT; 4249 4250 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4251 return; 4252 if (ha_copy & HA_ERATT) { 4253 /* Clear Chip error bit */ 4254 writel(HA_ERATT, phba->HAregaddr); 4255 phba->pport->stopped = 1; 4256 } 4257 4258 mbox = 0; 4259 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 4260 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 4261 4262 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 4263 mbox_buf = phba->MBslimaddr; 4264 writel(mbox, mbox_buf); 4265 4266 for (i = 0; i < 50; i++) { 4267 if (lpfc_readl((resp_buf + 1), &resp_data)) 4268 return; 4269 if (resp_data != ~(BARRIER_TEST_PATTERN)) 4270 mdelay(1); 4271 else 4272 break; 4273 } 4274 resp_data = 0; 4275 if (lpfc_readl((resp_buf + 1), &resp_data)) 4276 return; 4277 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 4278 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 4279 phba->pport->stopped) 4280 goto restore_hc; 4281 else 4282 goto clear_errat; 4283 } 4284 4285 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 4286 resp_data = 0; 4287 for (i = 0; i < 500; i++) { 4288 if (lpfc_readl(resp_buf, &resp_data)) 4289 return; 4290 if (resp_data != mbox) 4291 mdelay(1); 4292 else 4293 break; 4294 } 4295 4296 clear_errat: 4297 4298 while (++i < 500) { 4299 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4300 return; 4301 if (!(ha_copy & HA_ERATT)) 4302 mdelay(1); 4303 else 4304 break; 4305 } 4306 4307 if (readl(phba->HAregaddr) & HA_ERATT) { 4308 writel(HA_ERATT, phba->HAregaddr); 4309 phba->pport->stopped = 1; 4310 } 4311 4312 restore_hc: 4313 phba->link_flag &= ~LS_IGNORE_ERATT; 4314 writel(hc_copy, phba->HCregaddr); 4315 readl(phba->HCregaddr); /* flush */ 4316 } 4317 4318 /** 4319 * lpfc_sli_brdkill - Issue a kill_board mailbox command 4320 * @phba: Pointer to HBA context object. 4321 * 4322 * This function issues a kill_board mailbox command and waits for 4323 * the error attention interrupt. This function is called for stopping 4324 * the firmware processing. The caller is not required to hold any 4325 * locks. This function calls lpfc_hba_down_post function to free 4326 * any pending commands after the kill. The function will return 1 when it 4327 * fails to kill the board else will return 0. 4328 **/ 4329 int 4330 lpfc_sli_brdkill(struct lpfc_hba *phba) 4331 { 4332 struct lpfc_sli *psli; 4333 LPFC_MBOXQ_t *pmb; 4334 uint32_t status; 4335 uint32_t ha_copy; 4336 int retval; 4337 int i = 0; 4338 4339 psli = &phba->sli; 4340 4341 /* Kill HBA */ 4342 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4343 "0329 Kill HBA Data: x%x x%x\n", 4344 phba->pport->port_state, psli->sli_flag); 4345 4346 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4347 if (!pmb) 4348 return 1; 4349 4350 /* Disable the error attention */ 4351 spin_lock_irq(&phba->hbalock); 4352 if (lpfc_readl(phba->HCregaddr, &status)) { 4353 spin_unlock_irq(&phba->hbalock); 4354 mempool_free(pmb, phba->mbox_mem_pool); 4355 return 1; 4356 } 4357 status &= ~HC_ERINT_ENA; 4358 writel(status, phba->HCregaddr); 4359 readl(phba->HCregaddr); /* flush */ 4360 phba->link_flag |= LS_IGNORE_ERATT; 4361 spin_unlock_irq(&phba->hbalock); 4362 4363 lpfc_kill_board(phba, pmb); 4364 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4365 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4366 4367 if (retval != MBX_SUCCESS) { 4368 if (retval != MBX_BUSY) 4369 mempool_free(pmb, phba->mbox_mem_pool); 4370 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4371 "2752 KILL_BOARD command failed retval %d\n", 4372 retval); 4373 spin_lock_irq(&phba->hbalock); 4374 phba->link_flag &= ~LS_IGNORE_ERATT; 4375 spin_unlock_irq(&phba->hbalock); 4376 return 1; 4377 } 4378 4379 spin_lock_irq(&phba->hbalock); 4380 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 4381 spin_unlock_irq(&phba->hbalock); 4382 4383 mempool_free(pmb, phba->mbox_mem_pool); 4384 4385 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 4386 * attention every 100ms for 3 seconds. If we don't get ERATT after 4387 * 3 seconds we still set HBA_ERROR state because the status of the 4388 * board is now undefined. 4389 */ 4390 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4391 return 1; 4392 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 4393 mdelay(100); 4394 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4395 return 1; 4396 } 4397 4398 del_timer_sync(&psli->mbox_tmo); 4399 if (ha_copy & HA_ERATT) { 4400 writel(HA_ERATT, phba->HAregaddr); 4401 phba->pport->stopped = 1; 4402 } 4403 spin_lock_irq(&phba->hbalock); 4404 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4405 psli->mbox_active = NULL; 4406 phba->link_flag &= ~LS_IGNORE_ERATT; 4407 spin_unlock_irq(&phba->hbalock); 4408 4409 lpfc_hba_down_post(phba); 4410 phba->link_state = LPFC_HBA_ERROR; 4411 4412 return ha_copy & HA_ERATT ? 0 : 1; 4413 } 4414 4415 /** 4416 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 4417 * @phba: Pointer to HBA context object. 4418 * 4419 * This function resets the HBA by writing HC_INITFF to the control 4420 * register. After the HBA resets, this function resets all the iocb ring 4421 * indices. This function disables PCI layer parity checking during 4422 * the reset. 4423 * This function returns 0 always. 4424 * The caller is not required to hold any locks. 4425 **/ 4426 int 4427 lpfc_sli_brdreset(struct lpfc_hba *phba) 4428 { 4429 struct lpfc_sli *psli; 4430 struct lpfc_sli_ring *pring; 4431 uint16_t cfg_value; 4432 int i; 4433 4434 psli = &phba->sli; 4435 4436 /* Reset HBA */ 4437 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4438 "0325 Reset HBA Data: x%x x%x\n", 4439 (phba->pport) ? phba->pport->port_state : 0, 4440 psli->sli_flag); 4441 4442 /* perform board reset */ 4443 phba->fc_eventTag = 0; 4444 phba->link_events = 0; 4445 if (phba->pport) { 4446 phba->pport->fc_myDID = 0; 4447 phba->pport->fc_prevDID = 0; 4448 } 4449 4450 /* Turn off parity checking and serr during the physical reset */ 4451 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4452 pci_write_config_word(phba->pcidev, PCI_COMMAND, 4453 (cfg_value & 4454 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4455 4456 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 4457 4458 /* Now toggle INITFF bit in the Host Control Register */ 4459 writel(HC_INITFF, phba->HCregaddr); 4460 mdelay(1); 4461 readl(phba->HCregaddr); /* flush */ 4462 writel(0, phba->HCregaddr); 4463 readl(phba->HCregaddr); /* flush */ 4464 4465 /* Restore PCI cmd register */ 4466 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4467 4468 /* Initialize relevant SLI info */ 4469 for (i = 0; i < psli->num_rings; i++) { 4470 pring = &psli->sli3_ring[i]; 4471 pring->flag = 0; 4472 pring->sli.sli3.rspidx = 0; 4473 pring->sli.sli3.next_cmdidx = 0; 4474 pring->sli.sli3.local_getidx = 0; 4475 pring->sli.sli3.cmdidx = 0; 4476 pring->missbufcnt = 0; 4477 } 4478 4479 phba->link_state = LPFC_WARM_START; 4480 return 0; 4481 } 4482 4483 /** 4484 * lpfc_sli4_brdreset - Reset a sli-4 HBA 4485 * @phba: Pointer to HBA context object. 4486 * 4487 * This function resets a SLI4 HBA. This function disables PCI layer parity 4488 * checking during resets the device. The caller is not required to hold 4489 * any locks. 4490 * 4491 * This function returns 0 always. 4492 **/ 4493 int 4494 lpfc_sli4_brdreset(struct lpfc_hba *phba) 4495 { 4496 struct lpfc_sli *psli = &phba->sli; 4497 uint16_t cfg_value; 4498 int rc = 0; 4499 4500 /* Reset HBA */ 4501 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4502 "0295 Reset HBA Data: x%x x%x x%x\n", 4503 phba->pport->port_state, psli->sli_flag, 4504 phba->hba_flag); 4505 4506 /* perform board reset */ 4507 phba->fc_eventTag = 0; 4508 phba->link_events = 0; 4509 phba->pport->fc_myDID = 0; 4510 phba->pport->fc_prevDID = 0; 4511 4512 spin_lock_irq(&phba->hbalock); 4513 psli->sli_flag &= ~(LPFC_PROCESS_LA); 4514 phba->fcf.fcf_flag = 0; 4515 spin_unlock_irq(&phba->hbalock); 4516 4517 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ 4518 if (phba->hba_flag & HBA_FW_DUMP_OP) { 4519 phba->hba_flag &= ~HBA_FW_DUMP_OP; 4520 return rc; 4521 } 4522 4523 /* Now physically reset the device */ 4524 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4525 "0389 Performing PCI function reset!\n"); 4526 4527 /* Turn off parity checking and serr during the physical reset */ 4528 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4529 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 4530 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4531 4532 /* Perform FCoE PCI function reset before freeing queue memory */ 4533 rc = lpfc_pci_function_reset(phba); 4534 4535 /* Restore PCI cmd register */ 4536 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4537 4538 return rc; 4539 } 4540 4541 /** 4542 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 4543 * @phba: Pointer to HBA context object. 4544 * 4545 * This function is called in the SLI initialization code path to 4546 * restart the HBA. The caller is not required to hold any lock. 4547 * This function writes MBX_RESTART mailbox command to the SLIM and 4548 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 4549 * function to free any pending commands. The function enables 4550 * POST only during the first initialization. The function returns zero. 4551 * The function does not guarantee completion of MBX_RESTART mailbox 4552 * command before the return of this function. 4553 **/ 4554 static int 4555 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 4556 { 4557 MAILBOX_t *mb; 4558 struct lpfc_sli *psli; 4559 volatile uint32_t word0; 4560 void __iomem *to_slim; 4561 uint32_t hba_aer_enabled; 4562 4563 spin_lock_irq(&phba->hbalock); 4564 4565 /* Take PCIe device Advanced Error Reporting (AER) state */ 4566 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4567 4568 psli = &phba->sli; 4569 4570 /* Restart HBA */ 4571 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4572 "0337 Restart HBA Data: x%x x%x\n", 4573 (phba->pport) ? phba->pport->port_state : 0, 4574 psli->sli_flag); 4575 4576 word0 = 0; 4577 mb = (MAILBOX_t *) &word0; 4578 mb->mbxCommand = MBX_RESTART; 4579 mb->mbxHc = 1; 4580 4581 lpfc_reset_barrier(phba); 4582 4583 to_slim = phba->MBslimaddr; 4584 writel(*(uint32_t *) mb, to_slim); 4585 readl(to_slim); /* flush */ 4586 4587 /* Only skip post after fc_ffinit is completed */ 4588 if (phba->pport && phba->pport->port_state) 4589 word0 = 1; /* This is really setting up word1 */ 4590 else 4591 word0 = 0; /* This is really setting up word1 */ 4592 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4593 writel(*(uint32_t *) mb, to_slim); 4594 readl(to_slim); /* flush */ 4595 4596 lpfc_sli_brdreset(phba); 4597 if (phba->pport) 4598 phba->pport->stopped = 0; 4599 phba->link_state = LPFC_INIT_START; 4600 phba->hba_flag = 0; 4601 spin_unlock_irq(&phba->hbalock); 4602 4603 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4604 psli->stats_start = ktime_get_seconds(); 4605 4606 /* Give the INITFF and Post time to settle. */ 4607 mdelay(100); 4608 4609 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4610 if (hba_aer_enabled) 4611 pci_disable_pcie_error_reporting(phba->pcidev); 4612 4613 lpfc_hba_down_post(phba); 4614 4615 return 0; 4616 } 4617 4618 /** 4619 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4620 * @phba: Pointer to HBA context object. 4621 * 4622 * This function is called in the SLI initialization code path to restart 4623 * a SLI4 HBA. The caller is not required to hold any lock. 4624 * At the end of the function, it calls lpfc_hba_down_post function to 4625 * free any pending commands. 4626 **/ 4627 static int 4628 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4629 { 4630 struct lpfc_sli *psli = &phba->sli; 4631 uint32_t hba_aer_enabled; 4632 int rc; 4633 4634 /* Restart HBA */ 4635 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4636 "0296 Restart HBA Data: x%x x%x\n", 4637 phba->pport->port_state, psli->sli_flag); 4638 4639 /* Take PCIe device Advanced Error Reporting (AER) state */ 4640 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4641 4642 rc = lpfc_sli4_brdreset(phba); 4643 4644 spin_lock_irq(&phba->hbalock); 4645 phba->pport->stopped = 0; 4646 phba->link_state = LPFC_INIT_START; 4647 phba->hba_flag = 0; 4648 spin_unlock_irq(&phba->hbalock); 4649 4650 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4651 psli->stats_start = ktime_get_seconds(); 4652 4653 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4654 if (hba_aer_enabled) 4655 pci_disable_pcie_error_reporting(phba->pcidev); 4656 4657 lpfc_hba_down_post(phba); 4658 lpfc_sli4_queue_destroy(phba); 4659 4660 return rc; 4661 } 4662 4663 /** 4664 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4665 * @phba: Pointer to HBA context object. 4666 * 4667 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4668 * API jump table function pointer from the lpfc_hba struct. 4669 **/ 4670 int 4671 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4672 { 4673 return phba->lpfc_sli_brdrestart(phba); 4674 } 4675 4676 /** 4677 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4678 * @phba: Pointer to HBA context object. 4679 * 4680 * This function is called after a HBA restart to wait for successful 4681 * restart of the HBA. Successful restart of the HBA is indicated by 4682 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4683 * iteration, the function will restart the HBA again. The function returns 4684 * zero if HBA successfully restarted else returns negative error code. 4685 **/ 4686 int 4687 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4688 { 4689 uint32_t status, i = 0; 4690 4691 /* Read the HBA Host Status Register */ 4692 if (lpfc_readl(phba->HSregaddr, &status)) 4693 return -EIO; 4694 4695 /* Check status register to see what current state is */ 4696 i = 0; 4697 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4698 4699 /* Check every 10ms for 10 retries, then every 100ms for 90 4700 * retries, then every 1 sec for 50 retires for a total of 4701 * ~60 seconds before reset the board again and check every 4702 * 1 sec for 50 retries. The up to 60 seconds before the 4703 * board ready is required by the Falcon FIPS zeroization 4704 * complete, and any reset the board in between shall cause 4705 * restart of zeroization, further delay the board ready. 4706 */ 4707 if (i++ >= 200) { 4708 /* Adapter failed to init, timeout, status reg 4709 <status> */ 4710 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4711 "0436 Adapter failed to init, " 4712 "timeout, status reg x%x, " 4713 "FW Data: A8 x%x AC x%x\n", status, 4714 readl(phba->MBslimaddr + 0xa8), 4715 readl(phba->MBslimaddr + 0xac)); 4716 phba->link_state = LPFC_HBA_ERROR; 4717 return -ETIMEDOUT; 4718 } 4719 4720 /* Check to see if any errors occurred during init */ 4721 if (status & HS_FFERM) { 4722 /* ERROR: During chipset initialization */ 4723 /* Adapter failed to init, chipset, status reg 4724 <status> */ 4725 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4726 "0437 Adapter failed to init, " 4727 "chipset, status reg x%x, " 4728 "FW Data: A8 x%x AC x%x\n", status, 4729 readl(phba->MBslimaddr + 0xa8), 4730 readl(phba->MBslimaddr + 0xac)); 4731 phba->link_state = LPFC_HBA_ERROR; 4732 return -EIO; 4733 } 4734 4735 if (i <= 10) 4736 msleep(10); 4737 else if (i <= 100) 4738 msleep(100); 4739 else 4740 msleep(1000); 4741 4742 if (i == 150) { 4743 /* Do post */ 4744 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4745 lpfc_sli_brdrestart(phba); 4746 } 4747 /* Read the HBA Host Status Register */ 4748 if (lpfc_readl(phba->HSregaddr, &status)) 4749 return -EIO; 4750 } 4751 4752 /* Check to see if any errors occurred during init */ 4753 if (status & HS_FFERM) { 4754 /* ERROR: During chipset initialization */ 4755 /* Adapter failed to init, chipset, status reg <status> */ 4756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4757 "0438 Adapter failed to init, chipset, " 4758 "status reg x%x, " 4759 "FW Data: A8 x%x AC x%x\n", status, 4760 readl(phba->MBslimaddr + 0xa8), 4761 readl(phba->MBslimaddr + 0xac)); 4762 phba->link_state = LPFC_HBA_ERROR; 4763 return -EIO; 4764 } 4765 4766 /* Clear all interrupt enable conditions */ 4767 writel(0, phba->HCregaddr); 4768 readl(phba->HCregaddr); /* flush */ 4769 4770 /* setup host attn register */ 4771 writel(0xffffffff, phba->HAregaddr); 4772 readl(phba->HAregaddr); /* flush */ 4773 return 0; 4774 } 4775 4776 /** 4777 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4778 * 4779 * This function calculates and returns the number of HBQs required to be 4780 * configured. 4781 **/ 4782 int 4783 lpfc_sli_hbq_count(void) 4784 { 4785 return ARRAY_SIZE(lpfc_hbq_defs); 4786 } 4787 4788 /** 4789 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4790 * 4791 * This function adds the number of hbq entries in every HBQ to get 4792 * the total number of hbq entries required for the HBA and returns 4793 * the total count. 4794 **/ 4795 static int 4796 lpfc_sli_hbq_entry_count(void) 4797 { 4798 int hbq_count = lpfc_sli_hbq_count(); 4799 int count = 0; 4800 int i; 4801 4802 for (i = 0; i < hbq_count; ++i) 4803 count += lpfc_hbq_defs[i]->entry_count; 4804 return count; 4805 } 4806 4807 /** 4808 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4809 * 4810 * This function calculates amount of memory required for all hbq entries 4811 * to be configured and returns the total memory required. 4812 **/ 4813 int 4814 lpfc_sli_hbq_size(void) 4815 { 4816 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4817 } 4818 4819 /** 4820 * lpfc_sli_hbq_setup - configure and initialize HBQs 4821 * @phba: Pointer to HBA context object. 4822 * 4823 * This function is called during the SLI initialization to configure 4824 * all the HBQs and post buffers to the HBQ. The caller is not 4825 * required to hold any locks. This function will return zero if successful 4826 * else it will return negative error code. 4827 **/ 4828 static int 4829 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4830 { 4831 int hbq_count = lpfc_sli_hbq_count(); 4832 LPFC_MBOXQ_t *pmb; 4833 MAILBOX_t *pmbox; 4834 uint32_t hbqno; 4835 uint32_t hbq_entry_index; 4836 4837 /* Get a Mailbox buffer to setup mailbox 4838 * commands for HBA initialization 4839 */ 4840 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4841 4842 if (!pmb) 4843 return -ENOMEM; 4844 4845 pmbox = &pmb->u.mb; 4846 4847 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4848 phba->link_state = LPFC_INIT_MBX_CMDS; 4849 phba->hbq_in_use = 1; 4850 4851 hbq_entry_index = 0; 4852 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4853 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4854 phba->hbqs[hbqno].hbqPutIdx = 0; 4855 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4856 phba->hbqs[hbqno].entry_count = 4857 lpfc_hbq_defs[hbqno]->entry_count; 4858 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4859 hbq_entry_index, pmb); 4860 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4861 4862 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4863 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4864 mbxStatus <status>, ring <num> */ 4865 4866 lpfc_printf_log(phba, KERN_ERR, 4867 LOG_SLI | LOG_VPORT, 4868 "1805 Adapter failed to init. " 4869 "Data: x%x x%x x%x\n", 4870 pmbox->mbxCommand, 4871 pmbox->mbxStatus, hbqno); 4872 4873 phba->link_state = LPFC_HBA_ERROR; 4874 mempool_free(pmb, phba->mbox_mem_pool); 4875 return -ENXIO; 4876 } 4877 } 4878 phba->hbq_count = hbq_count; 4879 4880 mempool_free(pmb, phba->mbox_mem_pool); 4881 4882 /* Initially populate or replenish the HBQs */ 4883 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4884 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4885 return 0; 4886 } 4887 4888 /** 4889 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4890 * @phba: Pointer to HBA context object. 4891 * 4892 * This function is called during the SLI initialization to configure 4893 * all the HBQs and post buffers to the HBQ. The caller is not 4894 * required to hold any locks. This function will return zero if successful 4895 * else it will return negative error code. 4896 **/ 4897 static int 4898 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4899 { 4900 phba->hbq_in_use = 1; 4901 phba->hbqs[LPFC_ELS_HBQ].entry_count = 4902 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; 4903 phba->hbq_count = 1; 4904 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); 4905 /* Initially populate or replenish the HBQs */ 4906 return 0; 4907 } 4908 4909 /** 4910 * lpfc_sli_config_port - Issue config port mailbox command 4911 * @phba: Pointer to HBA context object. 4912 * @sli_mode: sli mode - 2/3 4913 * 4914 * This function is called by the sli initialization code path 4915 * to issue config_port mailbox command. This function restarts the 4916 * HBA firmware and issues a config_port mailbox command to configure 4917 * the SLI interface in the sli mode specified by sli_mode 4918 * variable. The caller is not required to hold any locks. 4919 * The function returns 0 if successful, else returns negative error 4920 * code. 4921 **/ 4922 int 4923 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4924 { 4925 LPFC_MBOXQ_t *pmb; 4926 uint32_t resetcount = 0, rc = 0, done = 0; 4927 4928 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4929 if (!pmb) { 4930 phba->link_state = LPFC_HBA_ERROR; 4931 return -ENOMEM; 4932 } 4933 4934 phba->sli_rev = sli_mode; 4935 while (resetcount < 2 && !done) { 4936 spin_lock_irq(&phba->hbalock); 4937 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4938 spin_unlock_irq(&phba->hbalock); 4939 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4940 lpfc_sli_brdrestart(phba); 4941 rc = lpfc_sli_chipset_init(phba); 4942 if (rc) 4943 break; 4944 4945 spin_lock_irq(&phba->hbalock); 4946 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4947 spin_unlock_irq(&phba->hbalock); 4948 resetcount++; 4949 4950 /* Call pre CONFIG_PORT mailbox command initialization. A 4951 * value of 0 means the call was successful. Any other 4952 * nonzero value is a failure, but if ERESTART is returned, 4953 * the driver may reset the HBA and try again. 4954 */ 4955 rc = lpfc_config_port_prep(phba); 4956 if (rc == -ERESTART) { 4957 phba->link_state = LPFC_LINK_UNKNOWN; 4958 continue; 4959 } else if (rc) 4960 break; 4961 4962 phba->link_state = LPFC_INIT_MBX_CMDS; 4963 lpfc_config_port(phba, pmb); 4964 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4965 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4966 LPFC_SLI3_HBQ_ENABLED | 4967 LPFC_SLI3_CRP_ENABLED | 4968 LPFC_SLI3_DSS_ENABLED); 4969 if (rc != MBX_SUCCESS) { 4970 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4971 "0442 Adapter failed to init, mbxCmd x%x " 4972 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4973 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4974 spin_lock_irq(&phba->hbalock); 4975 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4976 spin_unlock_irq(&phba->hbalock); 4977 rc = -ENXIO; 4978 } else { 4979 /* Allow asynchronous mailbox command to go through */ 4980 spin_lock_irq(&phba->hbalock); 4981 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4982 spin_unlock_irq(&phba->hbalock); 4983 done = 1; 4984 4985 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 4986 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 4987 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4988 "3110 Port did not grant ASABT\n"); 4989 } 4990 } 4991 if (!done) { 4992 rc = -EINVAL; 4993 goto do_prep_failed; 4994 } 4995 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4996 if (!pmb->u.mb.un.varCfgPort.cMA) { 4997 rc = -ENXIO; 4998 goto do_prep_failed; 4999 } 5000 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 5001 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 5002 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 5003 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 5004 phba->max_vpi : phba->max_vports; 5005 5006 } else 5007 phba->max_vpi = 0; 5008 phba->fips_level = 0; 5009 phba->fips_spec_rev = 0; 5010 if (pmb->u.mb.un.varCfgPort.gdss) { 5011 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 5012 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 5013 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 5014 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5015 "2850 Security Crypto Active. FIPS x%d " 5016 "(Spec Rev: x%d)", 5017 phba->fips_level, phba->fips_spec_rev); 5018 } 5019 if (pmb->u.mb.un.varCfgPort.sec_err) { 5020 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5021 "2856 Config Port Security Crypto " 5022 "Error: x%x ", 5023 pmb->u.mb.un.varCfgPort.sec_err); 5024 } 5025 if (pmb->u.mb.un.varCfgPort.gerbm) 5026 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 5027 if (pmb->u.mb.un.varCfgPort.gcrp) 5028 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 5029 5030 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 5031 phba->port_gp = phba->mbox->us.s3_pgp.port; 5032 5033 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 5034 if (pmb->u.mb.un.varCfgPort.gbg == 0) { 5035 phba->cfg_enable_bg = 0; 5036 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 5037 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5038 "0443 Adapter did not grant " 5039 "BlockGuard\n"); 5040 } 5041 } 5042 } else { 5043 phba->hbq_get = NULL; 5044 phba->port_gp = phba->mbox->us.s2.port; 5045 phba->max_vpi = 0; 5046 } 5047 do_prep_failed: 5048 mempool_free(pmb, phba->mbox_mem_pool); 5049 return rc; 5050 } 5051 5052 5053 /** 5054 * lpfc_sli_hba_setup - SLI initialization function 5055 * @phba: Pointer to HBA context object. 5056 * 5057 * This function is the main SLI initialization function. This function 5058 * is called by the HBA initialization code, HBA reset code and HBA 5059 * error attention handler code. Caller is not required to hold any 5060 * locks. This function issues config_port mailbox command to configure 5061 * the SLI, setup iocb rings and HBQ rings. In the end the function 5062 * calls the config_port_post function to issue init_link mailbox 5063 * command and to start the discovery. The function will return zero 5064 * if successful, else it will return negative error code. 5065 **/ 5066 int 5067 lpfc_sli_hba_setup(struct lpfc_hba *phba) 5068 { 5069 uint32_t rc; 5070 int mode = 3, i; 5071 int longs; 5072 5073 switch (phba->cfg_sli_mode) { 5074 case 2: 5075 if (phba->cfg_enable_npiv) { 5076 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5077 "1824 NPIV enabled: Override sli_mode " 5078 "parameter (%d) to auto (0).\n", 5079 phba->cfg_sli_mode); 5080 break; 5081 } 5082 mode = 2; 5083 break; 5084 case 0: 5085 case 3: 5086 break; 5087 default: 5088 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5089 "1819 Unrecognized sli_mode parameter: %d.\n", 5090 phba->cfg_sli_mode); 5091 5092 break; 5093 } 5094 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 5095 5096 rc = lpfc_sli_config_port(phba, mode); 5097 5098 if (rc && phba->cfg_sli_mode == 3) 5099 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 5100 "1820 Unable to select SLI-3. " 5101 "Not supported by adapter.\n"); 5102 if (rc && mode != 2) 5103 rc = lpfc_sli_config_port(phba, 2); 5104 else if (rc && mode == 2) 5105 rc = lpfc_sli_config_port(phba, 3); 5106 if (rc) 5107 goto lpfc_sli_hba_setup_error; 5108 5109 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 5110 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 5111 rc = pci_enable_pcie_error_reporting(phba->pcidev); 5112 if (!rc) { 5113 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5114 "2709 This device supports " 5115 "Advanced Error Reporting (AER)\n"); 5116 spin_lock_irq(&phba->hbalock); 5117 phba->hba_flag |= HBA_AER_ENABLED; 5118 spin_unlock_irq(&phba->hbalock); 5119 } else { 5120 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5121 "2708 This device does not support " 5122 "Advanced Error Reporting (AER): %d\n", 5123 rc); 5124 phba->cfg_aer_support = 0; 5125 } 5126 } 5127 5128 if (phba->sli_rev == 3) { 5129 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 5130 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 5131 } else { 5132 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 5133 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 5134 phba->sli3_options = 0; 5135 } 5136 5137 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 5138 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 5139 phba->sli_rev, phba->max_vpi); 5140 rc = lpfc_sli_ring_map(phba); 5141 5142 if (rc) 5143 goto lpfc_sli_hba_setup_error; 5144 5145 /* Initialize VPIs. */ 5146 if (phba->sli_rev == LPFC_SLI_REV3) { 5147 /* 5148 * The VPI bitmask and physical ID array are allocated 5149 * and initialized once only - at driver load. A port 5150 * reset doesn't need to reinitialize this memory. 5151 */ 5152 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 5153 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 5154 phba->vpi_bmask = kcalloc(longs, 5155 sizeof(unsigned long), 5156 GFP_KERNEL); 5157 if (!phba->vpi_bmask) { 5158 rc = -ENOMEM; 5159 goto lpfc_sli_hba_setup_error; 5160 } 5161 5162 phba->vpi_ids = kcalloc(phba->max_vpi + 1, 5163 sizeof(uint16_t), 5164 GFP_KERNEL); 5165 if (!phba->vpi_ids) { 5166 kfree(phba->vpi_bmask); 5167 rc = -ENOMEM; 5168 goto lpfc_sli_hba_setup_error; 5169 } 5170 for (i = 0; i < phba->max_vpi; i++) 5171 phba->vpi_ids[i] = i; 5172 } 5173 } 5174 5175 /* Init HBQs */ 5176 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 5177 rc = lpfc_sli_hbq_setup(phba); 5178 if (rc) 5179 goto lpfc_sli_hba_setup_error; 5180 } 5181 spin_lock_irq(&phba->hbalock); 5182 phba->sli.sli_flag |= LPFC_PROCESS_LA; 5183 spin_unlock_irq(&phba->hbalock); 5184 5185 rc = lpfc_config_port_post(phba); 5186 if (rc) 5187 goto lpfc_sli_hba_setup_error; 5188 5189 return rc; 5190 5191 lpfc_sli_hba_setup_error: 5192 phba->link_state = LPFC_HBA_ERROR; 5193 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5194 "0445 Firmware initialization failed\n"); 5195 return rc; 5196 } 5197 5198 /** 5199 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 5200 * @phba: Pointer to HBA context object. 5201 * @mboxq: mailbox pointer. 5202 * This function issue a dump mailbox command to read config region 5203 * 23 and parse the records in the region and populate driver 5204 * data structure. 5205 **/ 5206 static int 5207 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 5208 { 5209 LPFC_MBOXQ_t *mboxq; 5210 struct lpfc_dmabuf *mp; 5211 struct lpfc_mqe *mqe; 5212 uint32_t data_length; 5213 int rc; 5214 5215 /* Program the default value of vlan_id and fc_map */ 5216 phba->valid_vlan = 0; 5217 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 5218 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 5219 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5220 5221 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5222 if (!mboxq) 5223 return -ENOMEM; 5224 5225 mqe = &mboxq->u.mqe; 5226 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 5227 rc = -ENOMEM; 5228 goto out_free_mboxq; 5229 } 5230 5231 mp = (struct lpfc_dmabuf *) mboxq->context1; 5232 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5233 5234 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5235 "(%d):2571 Mailbox cmd x%x Status x%x " 5236 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5237 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5238 "CQ: x%x x%x x%x x%x\n", 5239 mboxq->vport ? mboxq->vport->vpi : 0, 5240 bf_get(lpfc_mqe_command, mqe), 5241 bf_get(lpfc_mqe_status, mqe), 5242 mqe->un.mb_words[0], mqe->un.mb_words[1], 5243 mqe->un.mb_words[2], mqe->un.mb_words[3], 5244 mqe->un.mb_words[4], mqe->un.mb_words[5], 5245 mqe->un.mb_words[6], mqe->un.mb_words[7], 5246 mqe->un.mb_words[8], mqe->un.mb_words[9], 5247 mqe->un.mb_words[10], mqe->un.mb_words[11], 5248 mqe->un.mb_words[12], mqe->un.mb_words[13], 5249 mqe->un.mb_words[14], mqe->un.mb_words[15], 5250 mqe->un.mb_words[16], mqe->un.mb_words[50], 5251 mboxq->mcqe.word0, 5252 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5253 mboxq->mcqe.trailer); 5254 5255 if (rc) { 5256 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5257 kfree(mp); 5258 rc = -EIO; 5259 goto out_free_mboxq; 5260 } 5261 data_length = mqe->un.mb_words[5]; 5262 if (data_length > DMP_RGN23_SIZE) { 5263 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5264 kfree(mp); 5265 rc = -EIO; 5266 goto out_free_mboxq; 5267 } 5268 5269 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 5270 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5271 kfree(mp); 5272 rc = 0; 5273 5274 out_free_mboxq: 5275 mempool_free(mboxq, phba->mbox_mem_pool); 5276 return rc; 5277 } 5278 5279 /** 5280 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 5281 * @phba: pointer to lpfc hba data structure. 5282 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 5283 * @vpd: pointer to the memory to hold resulting port vpd data. 5284 * @vpd_size: On input, the number of bytes allocated to @vpd. 5285 * On output, the number of data bytes in @vpd. 5286 * 5287 * This routine executes a READ_REV SLI4 mailbox command. In 5288 * addition, this routine gets the port vpd data. 5289 * 5290 * Return codes 5291 * 0 - successful 5292 * -ENOMEM - could not allocated memory. 5293 **/ 5294 static int 5295 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5296 uint8_t *vpd, uint32_t *vpd_size) 5297 { 5298 int rc = 0; 5299 uint32_t dma_size; 5300 struct lpfc_dmabuf *dmabuf; 5301 struct lpfc_mqe *mqe; 5302 5303 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5304 if (!dmabuf) 5305 return -ENOMEM; 5306 5307 /* 5308 * Get a DMA buffer for the vpd data resulting from the READ_REV 5309 * mailbox command. 5310 */ 5311 dma_size = *vpd_size; 5312 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size, 5313 &dmabuf->phys, GFP_KERNEL); 5314 if (!dmabuf->virt) { 5315 kfree(dmabuf); 5316 return -ENOMEM; 5317 } 5318 5319 /* 5320 * The SLI4 implementation of READ_REV conflicts at word1, 5321 * bits 31:16 and SLI4 adds vpd functionality not present 5322 * in SLI3. This code corrects the conflicts. 5323 */ 5324 lpfc_read_rev(phba, mboxq); 5325 mqe = &mboxq->u.mqe; 5326 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 5327 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 5328 mqe->un.read_rev.word1 &= 0x0000FFFF; 5329 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 5330 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 5331 5332 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5333 if (rc) { 5334 dma_free_coherent(&phba->pcidev->dev, dma_size, 5335 dmabuf->virt, dmabuf->phys); 5336 kfree(dmabuf); 5337 return -EIO; 5338 } 5339 5340 /* 5341 * The available vpd length cannot be bigger than the 5342 * DMA buffer passed to the port. Catch the less than 5343 * case and update the caller's size. 5344 */ 5345 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 5346 *vpd_size = mqe->un.read_rev.avail_vpd_len; 5347 5348 memcpy(vpd, dmabuf->virt, *vpd_size); 5349 5350 dma_free_coherent(&phba->pcidev->dev, dma_size, 5351 dmabuf->virt, dmabuf->phys); 5352 kfree(dmabuf); 5353 return 0; 5354 } 5355 5356 /** 5357 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 5358 * @phba: pointer to lpfc hba data structure. 5359 * 5360 * This routine retrieves SLI4 device physical port name this PCI function 5361 * is attached to. 5362 * 5363 * Return codes 5364 * 0 - successful 5365 * otherwise - failed to retrieve physical port name 5366 **/ 5367 static int 5368 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 5369 { 5370 LPFC_MBOXQ_t *mboxq; 5371 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 5372 struct lpfc_controller_attribute *cntl_attr; 5373 struct lpfc_mbx_get_port_name *get_port_name; 5374 void *virtaddr = NULL; 5375 uint32_t alloclen, reqlen; 5376 uint32_t shdr_status, shdr_add_status; 5377 union lpfc_sli4_cfg_shdr *shdr; 5378 char cport_name = 0; 5379 int rc; 5380 5381 /* We assume nothing at this point */ 5382 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5383 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 5384 5385 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5386 if (!mboxq) 5387 return -ENOMEM; 5388 /* obtain link type and link number via READ_CONFIG */ 5389 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5390 lpfc_sli4_read_config(phba); 5391 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 5392 goto retrieve_ppname; 5393 5394 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 5395 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 5396 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5397 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 5398 LPFC_SLI4_MBX_NEMBED); 5399 if (alloclen < reqlen) { 5400 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5401 "3084 Allocated DMA memory size (%d) is " 5402 "less than the requested DMA memory size " 5403 "(%d)\n", alloclen, reqlen); 5404 rc = -ENOMEM; 5405 goto out_free_mboxq; 5406 } 5407 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5408 virtaddr = mboxq->sge_array->addr[0]; 5409 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 5410 shdr = &mbx_cntl_attr->cfg_shdr; 5411 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5412 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5413 if (shdr_status || shdr_add_status || rc) { 5414 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5415 "3085 Mailbox x%x (x%x/x%x) failed, " 5416 "rc:x%x, status:x%x, add_status:x%x\n", 5417 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5418 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5419 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5420 rc, shdr_status, shdr_add_status); 5421 rc = -ENXIO; 5422 goto out_free_mboxq; 5423 } 5424 cntl_attr = &mbx_cntl_attr->cntl_attr; 5425 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 5426 phba->sli4_hba.lnk_info.lnk_tp = 5427 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 5428 phba->sli4_hba.lnk_info.lnk_no = 5429 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 5430 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5431 "3086 lnk_type:%d, lnk_numb:%d\n", 5432 phba->sli4_hba.lnk_info.lnk_tp, 5433 phba->sli4_hba.lnk_info.lnk_no); 5434 5435 retrieve_ppname: 5436 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5437 LPFC_MBOX_OPCODE_GET_PORT_NAME, 5438 sizeof(struct lpfc_mbx_get_port_name) - 5439 sizeof(struct lpfc_sli4_cfg_mhdr), 5440 LPFC_SLI4_MBX_EMBED); 5441 get_port_name = &mboxq->u.mqe.un.get_port_name; 5442 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 5443 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 5444 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 5445 phba->sli4_hba.lnk_info.lnk_tp); 5446 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5447 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5448 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5449 if (shdr_status || shdr_add_status || rc) { 5450 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5451 "3087 Mailbox x%x (x%x/x%x) failed: " 5452 "rc:x%x, status:x%x, add_status:x%x\n", 5453 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5454 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5455 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5456 rc, shdr_status, shdr_add_status); 5457 rc = -ENXIO; 5458 goto out_free_mboxq; 5459 } 5460 switch (phba->sli4_hba.lnk_info.lnk_no) { 5461 case LPFC_LINK_NUMBER_0: 5462 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 5463 &get_port_name->u.response); 5464 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5465 break; 5466 case LPFC_LINK_NUMBER_1: 5467 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 5468 &get_port_name->u.response); 5469 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5470 break; 5471 case LPFC_LINK_NUMBER_2: 5472 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 5473 &get_port_name->u.response); 5474 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5475 break; 5476 case LPFC_LINK_NUMBER_3: 5477 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 5478 &get_port_name->u.response); 5479 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5480 break; 5481 default: 5482 break; 5483 } 5484 5485 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 5486 phba->Port[0] = cport_name; 5487 phba->Port[1] = '\0'; 5488 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5489 "3091 SLI get port name: %s\n", phba->Port); 5490 } 5491 5492 out_free_mboxq: 5493 if (rc != MBX_TIMEOUT) { 5494 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5495 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5496 else 5497 mempool_free(mboxq, phba->mbox_mem_pool); 5498 } 5499 return rc; 5500 } 5501 5502 /** 5503 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 5504 * @phba: pointer to lpfc hba data structure. 5505 * 5506 * This routine is called to explicitly arm the SLI4 device's completion and 5507 * event queues 5508 **/ 5509 static void 5510 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 5511 { 5512 int qidx; 5513 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 5514 5515 sli4_hba->sli4_cq_release(sli4_hba->mbx_cq, LPFC_QUEUE_REARM); 5516 sli4_hba->sli4_cq_release(sli4_hba->els_cq, LPFC_QUEUE_REARM); 5517 if (sli4_hba->nvmels_cq) 5518 sli4_hba->sli4_cq_release(sli4_hba->nvmels_cq, 5519 LPFC_QUEUE_REARM); 5520 5521 if (sli4_hba->fcp_cq) 5522 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) 5523 sli4_hba->sli4_cq_release(sli4_hba->fcp_cq[qidx], 5524 LPFC_QUEUE_REARM); 5525 5526 if (sli4_hba->nvme_cq) 5527 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) 5528 sli4_hba->sli4_cq_release(sli4_hba->nvme_cq[qidx], 5529 LPFC_QUEUE_REARM); 5530 5531 if (phba->cfg_fof) 5532 sli4_hba->sli4_cq_release(sli4_hba->oas_cq, LPFC_QUEUE_REARM); 5533 5534 if (sli4_hba->hba_eq) 5535 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) 5536 sli4_hba->sli4_eq_release(sli4_hba->hba_eq[qidx], 5537 LPFC_QUEUE_REARM); 5538 5539 if (phba->nvmet_support) { 5540 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { 5541 sli4_hba->sli4_cq_release( 5542 sli4_hba->nvmet_cqset[qidx], 5543 LPFC_QUEUE_REARM); 5544 } 5545 } 5546 5547 if (phba->cfg_fof) 5548 sli4_hba->sli4_eq_release(sli4_hba->fof_eq, LPFC_QUEUE_REARM); 5549 } 5550 5551 /** 5552 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 5553 * @phba: Pointer to HBA context object. 5554 * @type: The resource extent type. 5555 * @extnt_count: buffer to hold port available extent count. 5556 * @extnt_size: buffer to hold element count per extent. 5557 * 5558 * This function calls the port and retrievs the number of available 5559 * extents and their size for a particular extent type. 5560 * 5561 * Returns: 0 if successful. Nonzero otherwise. 5562 **/ 5563 int 5564 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 5565 uint16_t *extnt_count, uint16_t *extnt_size) 5566 { 5567 int rc = 0; 5568 uint32_t length; 5569 uint32_t mbox_tmo; 5570 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 5571 LPFC_MBOXQ_t *mbox; 5572 5573 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5574 if (!mbox) 5575 return -ENOMEM; 5576 5577 /* Find out how many extents are available for this resource type */ 5578 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 5579 sizeof(struct lpfc_sli4_cfg_mhdr)); 5580 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5581 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 5582 length, LPFC_SLI4_MBX_EMBED); 5583 5584 /* Send an extents count of 0 - the GET doesn't use it. */ 5585 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5586 LPFC_SLI4_MBX_EMBED); 5587 if (unlikely(rc)) { 5588 rc = -EIO; 5589 goto err_exit; 5590 } 5591 5592 if (!phba->sli4_hba.intr_enable) 5593 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5594 else { 5595 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5596 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5597 } 5598 if (unlikely(rc)) { 5599 rc = -EIO; 5600 goto err_exit; 5601 } 5602 5603 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5604 if (bf_get(lpfc_mbox_hdr_status, 5605 &rsrc_info->header.cfg_shdr.response)) { 5606 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5607 "2930 Failed to get resource extents " 5608 "Status 0x%x Add'l Status 0x%x\n", 5609 bf_get(lpfc_mbox_hdr_status, 5610 &rsrc_info->header.cfg_shdr.response), 5611 bf_get(lpfc_mbox_hdr_add_status, 5612 &rsrc_info->header.cfg_shdr.response)); 5613 rc = -EIO; 5614 goto err_exit; 5615 } 5616 5617 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 5618 &rsrc_info->u.rsp); 5619 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 5620 &rsrc_info->u.rsp); 5621 5622 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5623 "3162 Retrieved extents type-%d from port: count:%d, " 5624 "size:%d\n", type, *extnt_count, *extnt_size); 5625 5626 err_exit: 5627 mempool_free(mbox, phba->mbox_mem_pool); 5628 return rc; 5629 } 5630 5631 /** 5632 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 5633 * @phba: Pointer to HBA context object. 5634 * @type: The extent type to check. 5635 * 5636 * This function reads the current available extents from the port and checks 5637 * if the extent count or extent size has changed since the last access. 5638 * Callers use this routine post port reset to understand if there is a 5639 * extent reprovisioning requirement. 5640 * 5641 * Returns: 5642 * -Error: error indicates problem. 5643 * 1: Extent count or size has changed. 5644 * 0: No changes. 5645 **/ 5646 static int 5647 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5648 { 5649 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5650 uint16_t size_diff, rsrc_ext_size; 5651 int rc = 0; 5652 struct lpfc_rsrc_blks *rsrc_entry; 5653 struct list_head *rsrc_blk_list = NULL; 5654 5655 size_diff = 0; 5656 curr_ext_cnt = 0; 5657 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5658 &rsrc_ext_cnt, 5659 &rsrc_ext_size); 5660 if (unlikely(rc)) 5661 return -EIO; 5662 5663 switch (type) { 5664 case LPFC_RSC_TYPE_FCOE_RPI: 5665 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5666 break; 5667 case LPFC_RSC_TYPE_FCOE_VPI: 5668 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5669 break; 5670 case LPFC_RSC_TYPE_FCOE_XRI: 5671 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5672 break; 5673 case LPFC_RSC_TYPE_FCOE_VFI: 5674 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5675 break; 5676 default: 5677 break; 5678 } 5679 5680 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5681 curr_ext_cnt++; 5682 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5683 size_diff++; 5684 } 5685 5686 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5687 rc = 1; 5688 5689 return rc; 5690 } 5691 5692 /** 5693 * lpfc_sli4_cfg_post_extnts - 5694 * @phba: Pointer to HBA context object. 5695 * @extnt_cnt - number of available extents. 5696 * @type - the extent type (rpi, xri, vfi, vpi). 5697 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5698 * @mbox - pointer to the caller's allocated mailbox structure. 5699 * 5700 * This function executes the extents allocation request. It also 5701 * takes care of the amount of memory needed to allocate or get the 5702 * allocated extents. It is the caller's responsibility to evaluate 5703 * the response. 5704 * 5705 * Returns: 5706 * -Error: Error value describes the condition found. 5707 * 0: if successful 5708 **/ 5709 static int 5710 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5711 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5712 { 5713 int rc = 0; 5714 uint32_t req_len; 5715 uint32_t emb_len; 5716 uint32_t alloc_len, mbox_tmo; 5717 5718 /* Calculate the total requested length of the dma memory */ 5719 req_len = extnt_cnt * sizeof(uint16_t); 5720 5721 /* 5722 * Calculate the size of an embedded mailbox. The uint32_t 5723 * accounts for extents-specific word. 5724 */ 5725 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5726 sizeof(uint32_t); 5727 5728 /* 5729 * Presume the allocation and response will fit into an embedded 5730 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5731 */ 5732 *emb = LPFC_SLI4_MBX_EMBED; 5733 if (req_len > emb_len) { 5734 req_len = extnt_cnt * sizeof(uint16_t) + 5735 sizeof(union lpfc_sli4_cfg_shdr) + 5736 sizeof(uint32_t); 5737 *emb = LPFC_SLI4_MBX_NEMBED; 5738 } 5739 5740 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5741 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5742 req_len, *emb); 5743 if (alloc_len < req_len) { 5744 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5745 "2982 Allocated DMA memory size (x%x) is " 5746 "less than the requested DMA memory " 5747 "size (x%x)\n", alloc_len, req_len); 5748 return -ENOMEM; 5749 } 5750 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5751 if (unlikely(rc)) 5752 return -EIO; 5753 5754 if (!phba->sli4_hba.intr_enable) 5755 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5756 else { 5757 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5758 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5759 } 5760 5761 if (unlikely(rc)) 5762 rc = -EIO; 5763 return rc; 5764 } 5765 5766 /** 5767 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5768 * @phba: Pointer to HBA context object. 5769 * @type: The resource extent type to allocate. 5770 * 5771 * This function allocates the number of elements for the specified 5772 * resource type. 5773 **/ 5774 static int 5775 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5776 { 5777 bool emb = false; 5778 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5779 uint16_t rsrc_id, rsrc_start, j, k; 5780 uint16_t *ids; 5781 int i, rc; 5782 unsigned long longs; 5783 unsigned long *bmask; 5784 struct lpfc_rsrc_blks *rsrc_blks; 5785 LPFC_MBOXQ_t *mbox; 5786 uint32_t length; 5787 struct lpfc_id_range *id_array = NULL; 5788 void *virtaddr = NULL; 5789 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5790 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5791 struct list_head *ext_blk_list; 5792 5793 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5794 &rsrc_cnt, 5795 &rsrc_size); 5796 if (unlikely(rc)) 5797 return -EIO; 5798 5799 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5800 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5801 "3009 No available Resource Extents " 5802 "for resource type 0x%x: Count: 0x%x, " 5803 "Size 0x%x\n", type, rsrc_cnt, 5804 rsrc_size); 5805 return -ENOMEM; 5806 } 5807 5808 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5809 "2903 Post resource extents type-0x%x: " 5810 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5811 5812 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5813 if (!mbox) 5814 return -ENOMEM; 5815 5816 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5817 if (unlikely(rc)) { 5818 rc = -EIO; 5819 goto err_exit; 5820 } 5821 5822 /* 5823 * Figure out where the response is located. Then get local pointers 5824 * to the response data. The port does not guarantee to respond to 5825 * all extents counts request so update the local variable with the 5826 * allocated count from the port. 5827 */ 5828 if (emb == LPFC_SLI4_MBX_EMBED) { 5829 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5830 id_array = &rsrc_ext->u.rsp.id[0]; 5831 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5832 } else { 5833 virtaddr = mbox->sge_array->addr[0]; 5834 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5835 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5836 id_array = &n_rsrc->id; 5837 } 5838 5839 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5840 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5841 5842 /* 5843 * Based on the resource size and count, correct the base and max 5844 * resource values. 5845 */ 5846 length = sizeof(struct lpfc_rsrc_blks); 5847 switch (type) { 5848 case LPFC_RSC_TYPE_FCOE_RPI: 5849 phba->sli4_hba.rpi_bmask = kcalloc(longs, 5850 sizeof(unsigned long), 5851 GFP_KERNEL); 5852 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5853 rc = -ENOMEM; 5854 goto err_exit; 5855 } 5856 phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt, 5857 sizeof(uint16_t), 5858 GFP_KERNEL); 5859 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5860 kfree(phba->sli4_hba.rpi_bmask); 5861 rc = -ENOMEM; 5862 goto err_exit; 5863 } 5864 5865 /* 5866 * The next_rpi was initialized with the maximum available 5867 * count but the port may allocate a smaller number. Catch 5868 * that case and update the next_rpi. 5869 */ 5870 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5871 5872 /* Initialize local ptrs for common extent processing later. */ 5873 bmask = phba->sli4_hba.rpi_bmask; 5874 ids = phba->sli4_hba.rpi_ids; 5875 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5876 break; 5877 case LPFC_RSC_TYPE_FCOE_VPI: 5878 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 5879 GFP_KERNEL); 5880 if (unlikely(!phba->vpi_bmask)) { 5881 rc = -ENOMEM; 5882 goto err_exit; 5883 } 5884 phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t), 5885 GFP_KERNEL); 5886 if (unlikely(!phba->vpi_ids)) { 5887 kfree(phba->vpi_bmask); 5888 rc = -ENOMEM; 5889 goto err_exit; 5890 } 5891 5892 /* Initialize local ptrs for common extent processing later. */ 5893 bmask = phba->vpi_bmask; 5894 ids = phba->vpi_ids; 5895 ext_blk_list = &phba->lpfc_vpi_blk_list; 5896 break; 5897 case LPFC_RSC_TYPE_FCOE_XRI: 5898 phba->sli4_hba.xri_bmask = kcalloc(longs, 5899 sizeof(unsigned long), 5900 GFP_KERNEL); 5901 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5902 rc = -ENOMEM; 5903 goto err_exit; 5904 } 5905 phba->sli4_hba.max_cfg_param.xri_used = 0; 5906 phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt, 5907 sizeof(uint16_t), 5908 GFP_KERNEL); 5909 if (unlikely(!phba->sli4_hba.xri_ids)) { 5910 kfree(phba->sli4_hba.xri_bmask); 5911 rc = -ENOMEM; 5912 goto err_exit; 5913 } 5914 5915 /* Initialize local ptrs for common extent processing later. */ 5916 bmask = phba->sli4_hba.xri_bmask; 5917 ids = phba->sli4_hba.xri_ids; 5918 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5919 break; 5920 case LPFC_RSC_TYPE_FCOE_VFI: 5921 phba->sli4_hba.vfi_bmask = kcalloc(longs, 5922 sizeof(unsigned long), 5923 GFP_KERNEL); 5924 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5925 rc = -ENOMEM; 5926 goto err_exit; 5927 } 5928 phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt, 5929 sizeof(uint16_t), 5930 GFP_KERNEL); 5931 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5932 kfree(phba->sli4_hba.vfi_bmask); 5933 rc = -ENOMEM; 5934 goto err_exit; 5935 } 5936 5937 /* Initialize local ptrs for common extent processing later. */ 5938 bmask = phba->sli4_hba.vfi_bmask; 5939 ids = phba->sli4_hba.vfi_ids; 5940 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5941 break; 5942 default: 5943 /* Unsupported Opcode. Fail call. */ 5944 id_array = NULL; 5945 bmask = NULL; 5946 ids = NULL; 5947 ext_blk_list = NULL; 5948 goto err_exit; 5949 } 5950 5951 /* 5952 * Complete initializing the extent configuration with the 5953 * allocated ids assigned to this function. The bitmask serves 5954 * as an index into the array and manages the available ids. The 5955 * array just stores the ids communicated to the port via the wqes. 5956 */ 5957 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5958 if ((i % 2) == 0) 5959 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5960 &id_array[k]); 5961 else 5962 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5963 &id_array[k]); 5964 5965 rsrc_blks = kzalloc(length, GFP_KERNEL); 5966 if (unlikely(!rsrc_blks)) { 5967 rc = -ENOMEM; 5968 kfree(bmask); 5969 kfree(ids); 5970 goto err_exit; 5971 } 5972 rsrc_blks->rsrc_start = rsrc_id; 5973 rsrc_blks->rsrc_size = rsrc_size; 5974 list_add_tail(&rsrc_blks->list, ext_blk_list); 5975 rsrc_start = rsrc_id; 5976 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { 5977 phba->sli4_hba.scsi_xri_start = rsrc_start + 5978 lpfc_sli4_get_iocb_cnt(phba); 5979 phba->sli4_hba.nvme_xri_start = 5980 phba->sli4_hba.scsi_xri_start + 5981 phba->sli4_hba.scsi_xri_max; 5982 } 5983 5984 while (rsrc_id < (rsrc_start + rsrc_size)) { 5985 ids[j] = rsrc_id; 5986 rsrc_id++; 5987 j++; 5988 } 5989 /* Entire word processed. Get next word.*/ 5990 if ((i % 2) == 1) 5991 k++; 5992 } 5993 err_exit: 5994 lpfc_sli4_mbox_cmd_free(phba, mbox); 5995 return rc; 5996 } 5997 5998 5999 6000 /** 6001 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 6002 * @phba: Pointer to HBA context object. 6003 * @type: the extent's type. 6004 * 6005 * This function deallocates all extents of a particular resource type. 6006 * SLI4 does not allow for deallocating a particular extent range. It 6007 * is the caller's responsibility to release all kernel memory resources. 6008 **/ 6009 static int 6010 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 6011 { 6012 int rc; 6013 uint32_t length, mbox_tmo = 0; 6014 LPFC_MBOXQ_t *mbox; 6015 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 6016 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 6017 6018 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6019 if (!mbox) 6020 return -ENOMEM; 6021 6022 /* 6023 * This function sends an embedded mailbox because it only sends the 6024 * the resource type. All extents of this type are released by the 6025 * port. 6026 */ 6027 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 6028 sizeof(struct lpfc_sli4_cfg_mhdr)); 6029 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6030 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 6031 length, LPFC_SLI4_MBX_EMBED); 6032 6033 /* Send an extents count of 0 - the dealloc doesn't use it. */ 6034 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 6035 LPFC_SLI4_MBX_EMBED); 6036 if (unlikely(rc)) { 6037 rc = -EIO; 6038 goto out_free_mbox; 6039 } 6040 if (!phba->sli4_hba.intr_enable) 6041 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6042 else { 6043 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6044 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6045 } 6046 if (unlikely(rc)) { 6047 rc = -EIO; 6048 goto out_free_mbox; 6049 } 6050 6051 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 6052 if (bf_get(lpfc_mbox_hdr_status, 6053 &dealloc_rsrc->header.cfg_shdr.response)) { 6054 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6055 "2919 Failed to release resource extents " 6056 "for type %d - Status 0x%x Add'l Status 0x%x. " 6057 "Resource memory not released.\n", 6058 type, 6059 bf_get(lpfc_mbox_hdr_status, 6060 &dealloc_rsrc->header.cfg_shdr.response), 6061 bf_get(lpfc_mbox_hdr_add_status, 6062 &dealloc_rsrc->header.cfg_shdr.response)); 6063 rc = -EIO; 6064 goto out_free_mbox; 6065 } 6066 6067 /* Release kernel memory resources for the specific type. */ 6068 switch (type) { 6069 case LPFC_RSC_TYPE_FCOE_VPI: 6070 kfree(phba->vpi_bmask); 6071 kfree(phba->vpi_ids); 6072 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6073 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6074 &phba->lpfc_vpi_blk_list, list) { 6075 list_del_init(&rsrc_blk->list); 6076 kfree(rsrc_blk); 6077 } 6078 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6079 break; 6080 case LPFC_RSC_TYPE_FCOE_XRI: 6081 kfree(phba->sli4_hba.xri_bmask); 6082 kfree(phba->sli4_hba.xri_ids); 6083 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6084 &phba->sli4_hba.lpfc_xri_blk_list, list) { 6085 list_del_init(&rsrc_blk->list); 6086 kfree(rsrc_blk); 6087 } 6088 break; 6089 case LPFC_RSC_TYPE_FCOE_VFI: 6090 kfree(phba->sli4_hba.vfi_bmask); 6091 kfree(phba->sli4_hba.vfi_ids); 6092 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6093 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6094 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 6095 list_del_init(&rsrc_blk->list); 6096 kfree(rsrc_blk); 6097 } 6098 break; 6099 case LPFC_RSC_TYPE_FCOE_RPI: 6100 /* RPI bitmask and physical id array are cleaned up earlier. */ 6101 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 6102 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 6103 list_del_init(&rsrc_blk->list); 6104 kfree(rsrc_blk); 6105 } 6106 break; 6107 default: 6108 break; 6109 } 6110 6111 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6112 6113 out_free_mbox: 6114 mempool_free(mbox, phba->mbox_mem_pool); 6115 return rc; 6116 } 6117 6118 static void 6119 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, 6120 uint32_t feature) 6121 { 6122 uint32_t len; 6123 6124 len = sizeof(struct lpfc_mbx_set_feature) - 6125 sizeof(struct lpfc_sli4_cfg_mhdr); 6126 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6127 LPFC_MBOX_OPCODE_SET_FEATURES, len, 6128 LPFC_SLI4_MBX_EMBED); 6129 6130 switch (feature) { 6131 case LPFC_SET_UE_RECOVERY: 6132 bf_set(lpfc_mbx_set_feature_UER, 6133 &mbox->u.mqe.un.set_feature, 1); 6134 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; 6135 mbox->u.mqe.un.set_feature.param_len = 8; 6136 break; 6137 case LPFC_SET_MDS_DIAGS: 6138 bf_set(lpfc_mbx_set_feature_mds, 6139 &mbox->u.mqe.un.set_feature, 1); 6140 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 6141 &mbox->u.mqe.un.set_feature, 1); 6142 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 6143 mbox->u.mqe.un.set_feature.param_len = 8; 6144 break; 6145 } 6146 6147 return; 6148 } 6149 6150 /** 6151 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging. 6152 * @phba: Pointer to HBA context object. 6153 * 6154 * This function is called to free memory allocated for RAS FW logging 6155 * support in the driver. 6156 **/ 6157 void 6158 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) 6159 { 6160 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6161 struct lpfc_dmabuf *dmabuf, *next; 6162 6163 if (!list_empty(&ras_fwlog->fwlog_buff_list)) { 6164 list_for_each_entry_safe(dmabuf, next, 6165 &ras_fwlog->fwlog_buff_list, 6166 list) { 6167 list_del(&dmabuf->list); 6168 dma_free_coherent(&phba->pcidev->dev, 6169 LPFC_RAS_MAX_ENTRY_SIZE, 6170 dmabuf->virt, dmabuf->phys); 6171 kfree(dmabuf); 6172 } 6173 } 6174 6175 if (ras_fwlog->lwpd.virt) { 6176 dma_free_coherent(&phba->pcidev->dev, 6177 sizeof(uint32_t) * 2, 6178 ras_fwlog->lwpd.virt, 6179 ras_fwlog->lwpd.phys); 6180 ras_fwlog->lwpd.virt = NULL; 6181 } 6182 6183 ras_fwlog->ras_active = false; 6184 } 6185 6186 /** 6187 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support 6188 * @phba: Pointer to HBA context object. 6189 * @fwlog_buff_count: Count of buffers to be created. 6190 * 6191 * This routine DMA memory for Log Write Position Data[LPWD] and buffer 6192 * to update FW log is posted to the adapter. 6193 * Buffer count is calculated based on module param ras_fwlog_buffsize 6194 * Size of each buffer posted to FW is 64K. 6195 **/ 6196 6197 static int 6198 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba, 6199 uint32_t fwlog_buff_count) 6200 { 6201 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6202 struct lpfc_dmabuf *dmabuf; 6203 int rc = 0, i = 0; 6204 6205 /* Initialize List */ 6206 INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list); 6207 6208 /* Allocate memory for the LWPD */ 6209 ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev, 6210 sizeof(uint32_t) * 2, 6211 &ras_fwlog->lwpd.phys, 6212 GFP_KERNEL); 6213 if (!ras_fwlog->lwpd.virt) { 6214 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6215 "6185 LWPD Memory Alloc Failed\n"); 6216 6217 return -ENOMEM; 6218 } 6219 6220 ras_fwlog->fw_buffcount = fwlog_buff_count; 6221 for (i = 0; i < ras_fwlog->fw_buffcount; i++) { 6222 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), 6223 GFP_KERNEL); 6224 if (!dmabuf) { 6225 rc = -ENOMEM; 6226 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6227 "6186 Memory Alloc failed FW logging"); 6228 goto free_mem; 6229 } 6230 6231 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6232 LPFC_RAS_MAX_ENTRY_SIZE, 6233 &dmabuf->phys, 6234 GFP_KERNEL); 6235 if (!dmabuf->virt) { 6236 kfree(dmabuf); 6237 rc = -ENOMEM; 6238 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6239 "6187 DMA Alloc Failed FW logging"); 6240 goto free_mem; 6241 } 6242 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); 6243 dmabuf->buffer_tag = i; 6244 list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list); 6245 } 6246 6247 free_mem: 6248 if (rc) 6249 lpfc_sli4_ras_dma_free(phba); 6250 6251 return rc; 6252 } 6253 6254 /** 6255 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command 6256 * @phba: pointer to lpfc hba data structure. 6257 * @pmboxq: pointer to the driver internal queue element for mailbox command. 6258 * 6259 * Completion handler for driver's RAS MBX command to the device. 6260 **/ 6261 static void 6262 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 6263 { 6264 MAILBOX_t *mb; 6265 union lpfc_sli4_cfg_shdr *shdr; 6266 uint32_t shdr_status, shdr_add_status; 6267 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6268 6269 mb = &pmb->u.mb; 6270 6271 shdr = (union lpfc_sli4_cfg_shdr *) 6272 &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr; 6273 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 6274 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 6275 6276 if (mb->mbxStatus != MBX_SUCCESS || shdr_status) { 6277 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 6278 "6188 FW LOG mailbox " 6279 "completed with status x%x add_status x%x," 6280 " mbx status x%x\n", 6281 shdr_status, shdr_add_status, mb->mbxStatus); 6282 goto disable_ras; 6283 } 6284 6285 ras_fwlog->ras_active = true; 6286 mempool_free(pmb, phba->mbox_mem_pool); 6287 6288 return; 6289 6290 disable_ras: 6291 /* Free RAS DMA memory */ 6292 lpfc_sli4_ras_dma_free(phba); 6293 mempool_free(pmb, phba->mbox_mem_pool); 6294 } 6295 6296 /** 6297 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command 6298 * @phba: pointer to lpfc hba data structure. 6299 * @fwlog_level: Logging verbosity level. 6300 * @fwlog_enable: Enable/Disable logging. 6301 * 6302 * Initialize memory and post mailbox command to enable FW logging in host 6303 * memory. 6304 **/ 6305 int 6306 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, 6307 uint32_t fwlog_level, 6308 uint32_t fwlog_enable) 6309 { 6310 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; 6311 struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL; 6312 struct lpfc_dmabuf *dmabuf; 6313 LPFC_MBOXQ_t *mbox; 6314 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; 6315 int rc = 0; 6316 6317 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * 6318 phba->cfg_ras_fwlog_buffsize); 6319 fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); 6320 6321 /* 6322 * If re-enabling FW logging support use earlier allocated 6323 * DMA buffers while posting MBX command. 6324 **/ 6325 if (!ras_fwlog->lwpd.virt) { 6326 rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count); 6327 if (rc) { 6328 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6329 "6189 RAS FW Log Support Not Enabled"); 6330 return rc; 6331 } 6332 } 6333 6334 /* Setup Mailbox command */ 6335 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6336 if (!mbox) { 6337 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6338 "6190 RAS MBX Alloc Failed"); 6339 rc = -ENOMEM; 6340 goto mem_free; 6341 } 6342 6343 ras_fwlog->fw_loglevel = fwlog_level; 6344 len = (sizeof(struct lpfc_mbx_set_ras_fwlog) - 6345 sizeof(struct lpfc_sli4_cfg_mhdr)); 6346 6347 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL, 6348 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION, 6349 len, LPFC_SLI4_MBX_EMBED); 6350 6351 mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog; 6352 bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request, 6353 fwlog_enable); 6354 bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request, 6355 ras_fwlog->fw_loglevel); 6356 bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request, 6357 ras_fwlog->fw_buffcount); 6358 bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request, 6359 LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE); 6360 6361 /* Update DMA buffer address */ 6362 list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) { 6363 memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE); 6364 6365 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo = 6366 putPaddrLow(dmabuf->phys); 6367 6368 mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi = 6369 putPaddrHigh(dmabuf->phys); 6370 } 6371 6372 /* Update LPWD address */ 6373 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); 6374 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); 6375 6376 mbox->vport = phba->pport; 6377 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; 6378 6379 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 6380 6381 if (rc == MBX_NOT_FINISHED) { 6382 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6383 "6191 RAS Mailbox failed. " 6384 "status %d mbxStatus : x%x", rc, 6385 bf_get(lpfc_mqe_status, &mbox->u.mqe)); 6386 mempool_free(mbox, phba->mbox_mem_pool); 6387 rc = -EIO; 6388 goto mem_free; 6389 } else 6390 rc = 0; 6391 mem_free: 6392 if (rc) 6393 lpfc_sli4_ras_dma_free(phba); 6394 6395 return rc; 6396 } 6397 6398 /** 6399 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter 6400 * @phba: Pointer to HBA context object. 6401 * 6402 * Check if RAS is supported on the adapter and initialize it. 6403 **/ 6404 void 6405 lpfc_sli4_ras_setup(struct lpfc_hba *phba) 6406 { 6407 /* Check RAS FW Log needs to be enabled or not */ 6408 if (lpfc_check_fwlog_support(phba)) 6409 return; 6410 6411 lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, 6412 LPFC_RAS_ENABLE_LOGGING); 6413 } 6414 6415 /** 6416 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 6417 * @phba: Pointer to HBA context object. 6418 * 6419 * This function allocates all SLI4 resource identifiers. 6420 **/ 6421 int 6422 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 6423 { 6424 int i, rc, error = 0; 6425 uint16_t count, base; 6426 unsigned long longs; 6427 6428 if (!phba->sli4_hba.rpi_hdrs_in_use) 6429 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 6430 if (phba->sli4_hba.extents_in_use) { 6431 /* 6432 * The port supports resource extents. The XRI, VPI, VFI, RPI 6433 * resource extent count must be read and allocated before 6434 * provisioning the resource id arrays. 6435 */ 6436 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6437 LPFC_IDX_RSRC_RDY) { 6438 /* 6439 * Extent-based resources are set - the driver could 6440 * be in a port reset. Figure out if any corrective 6441 * actions need to be taken. 6442 */ 6443 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6444 LPFC_RSC_TYPE_FCOE_VFI); 6445 if (rc != 0) 6446 error++; 6447 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6448 LPFC_RSC_TYPE_FCOE_VPI); 6449 if (rc != 0) 6450 error++; 6451 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6452 LPFC_RSC_TYPE_FCOE_XRI); 6453 if (rc != 0) 6454 error++; 6455 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 6456 LPFC_RSC_TYPE_FCOE_RPI); 6457 if (rc != 0) 6458 error++; 6459 6460 /* 6461 * It's possible that the number of resources 6462 * provided to this port instance changed between 6463 * resets. Detect this condition and reallocate 6464 * resources. Otherwise, there is no action. 6465 */ 6466 if (error) { 6467 lpfc_printf_log(phba, KERN_INFO, 6468 LOG_MBOX | LOG_INIT, 6469 "2931 Detected extent resource " 6470 "change. Reallocating all " 6471 "extents.\n"); 6472 rc = lpfc_sli4_dealloc_extent(phba, 6473 LPFC_RSC_TYPE_FCOE_VFI); 6474 rc = lpfc_sli4_dealloc_extent(phba, 6475 LPFC_RSC_TYPE_FCOE_VPI); 6476 rc = lpfc_sli4_dealloc_extent(phba, 6477 LPFC_RSC_TYPE_FCOE_XRI); 6478 rc = lpfc_sli4_dealloc_extent(phba, 6479 LPFC_RSC_TYPE_FCOE_RPI); 6480 } else 6481 return 0; 6482 } 6483 6484 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6485 if (unlikely(rc)) 6486 goto err_exit; 6487 6488 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6489 if (unlikely(rc)) 6490 goto err_exit; 6491 6492 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6493 if (unlikely(rc)) 6494 goto err_exit; 6495 6496 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6497 if (unlikely(rc)) 6498 goto err_exit; 6499 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6500 LPFC_IDX_RSRC_RDY); 6501 return rc; 6502 } else { 6503 /* 6504 * The port does not support resource extents. The XRI, VPI, 6505 * VFI, RPI resource ids were determined from READ_CONFIG. 6506 * Just allocate the bitmasks and provision the resource id 6507 * arrays. If a port reset is active, the resources don't 6508 * need any action - just exit. 6509 */ 6510 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6511 LPFC_IDX_RSRC_RDY) { 6512 lpfc_sli4_dealloc_resource_identifiers(phba); 6513 lpfc_sli4_remove_rpis(phba); 6514 } 6515 /* RPIs. */ 6516 count = phba->sli4_hba.max_cfg_param.max_rpi; 6517 if (count <= 0) { 6518 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6519 "3279 Invalid provisioning of " 6520 "rpi:%d\n", count); 6521 rc = -EINVAL; 6522 goto err_exit; 6523 } 6524 base = phba->sli4_hba.max_cfg_param.rpi_base; 6525 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6526 phba->sli4_hba.rpi_bmask = kcalloc(longs, 6527 sizeof(unsigned long), 6528 GFP_KERNEL); 6529 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 6530 rc = -ENOMEM; 6531 goto err_exit; 6532 } 6533 phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t), 6534 GFP_KERNEL); 6535 if (unlikely(!phba->sli4_hba.rpi_ids)) { 6536 rc = -ENOMEM; 6537 goto free_rpi_bmask; 6538 } 6539 6540 for (i = 0; i < count; i++) 6541 phba->sli4_hba.rpi_ids[i] = base + i; 6542 6543 /* VPIs. */ 6544 count = phba->sli4_hba.max_cfg_param.max_vpi; 6545 if (count <= 0) { 6546 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6547 "3280 Invalid provisioning of " 6548 "vpi:%d\n", count); 6549 rc = -EINVAL; 6550 goto free_rpi_ids; 6551 } 6552 base = phba->sli4_hba.max_cfg_param.vpi_base; 6553 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6554 phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long), 6555 GFP_KERNEL); 6556 if (unlikely(!phba->vpi_bmask)) { 6557 rc = -ENOMEM; 6558 goto free_rpi_ids; 6559 } 6560 phba->vpi_ids = kcalloc(count, sizeof(uint16_t), 6561 GFP_KERNEL); 6562 if (unlikely(!phba->vpi_ids)) { 6563 rc = -ENOMEM; 6564 goto free_vpi_bmask; 6565 } 6566 6567 for (i = 0; i < count; i++) 6568 phba->vpi_ids[i] = base + i; 6569 6570 /* XRIs. */ 6571 count = phba->sli4_hba.max_cfg_param.max_xri; 6572 if (count <= 0) { 6573 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6574 "3281 Invalid provisioning of " 6575 "xri:%d\n", count); 6576 rc = -EINVAL; 6577 goto free_vpi_ids; 6578 } 6579 base = phba->sli4_hba.max_cfg_param.xri_base; 6580 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6581 phba->sli4_hba.xri_bmask = kcalloc(longs, 6582 sizeof(unsigned long), 6583 GFP_KERNEL); 6584 if (unlikely(!phba->sli4_hba.xri_bmask)) { 6585 rc = -ENOMEM; 6586 goto free_vpi_ids; 6587 } 6588 phba->sli4_hba.max_cfg_param.xri_used = 0; 6589 phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t), 6590 GFP_KERNEL); 6591 if (unlikely(!phba->sli4_hba.xri_ids)) { 6592 rc = -ENOMEM; 6593 goto free_xri_bmask; 6594 } 6595 6596 for (i = 0; i < count; i++) 6597 phba->sli4_hba.xri_ids[i] = base + i; 6598 6599 /* VFIs. */ 6600 count = phba->sli4_hba.max_cfg_param.max_vfi; 6601 if (count <= 0) { 6602 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6603 "3282 Invalid provisioning of " 6604 "vfi:%d\n", count); 6605 rc = -EINVAL; 6606 goto free_xri_ids; 6607 } 6608 base = phba->sli4_hba.max_cfg_param.vfi_base; 6609 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6610 phba->sli4_hba.vfi_bmask = kcalloc(longs, 6611 sizeof(unsigned long), 6612 GFP_KERNEL); 6613 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 6614 rc = -ENOMEM; 6615 goto free_xri_ids; 6616 } 6617 phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t), 6618 GFP_KERNEL); 6619 if (unlikely(!phba->sli4_hba.vfi_ids)) { 6620 rc = -ENOMEM; 6621 goto free_vfi_bmask; 6622 } 6623 6624 for (i = 0; i < count; i++) 6625 phba->sli4_hba.vfi_ids[i] = base + i; 6626 6627 /* 6628 * Mark all resources ready. An HBA reset doesn't need 6629 * to reset the initialization. 6630 */ 6631 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6632 LPFC_IDX_RSRC_RDY); 6633 return 0; 6634 } 6635 6636 free_vfi_bmask: 6637 kfree(phba->sli4_hba.vfi_bmask); 6638 phba->sli4_hba.vfi_bmask = NULL; 6639 free_xri_ids: 6640 kfree(phba->sli4_hba.xri_ids); 6641 phba->sli4_hba.xri_ids = NULL; 6642 free_xri_bmask: 6643 kfree(phba->sli4_hba.xri_bmask); 6644 phba->sli4_hba.xri_bmask = NULL; 6645 free_vpi_ids: 6646 kfree(phba->vpi_ids); 6647 phba->vpi_ids = NULL; 6648 free_vpi_bmask: 6649 kfree(phba->vpi_bmask); 6650 phba->vpi_bmask = NULL; 6651 free_rpi_ids: 6652 kfree(phba->sli4_hba.rpi_ids); 6653 phba->sli4_hba.rpi_ids = NULL; 6654 free_rpi_bmask: 6655 kfree(phba->sli4_hba.rpi_bmask); 6656 phba->sli4_hba.rpi_bmask = NULL; 6657 err_exit: 6658 return rc; 6659 } 6660 6661 /** 6662 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 6663 * @phba: Pointer to HBA context object. 6664 * 6665 * This function allocates the number of elements for the specified 6666 * resource type. 6667 **/ 6668 int 6669 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 6670 { 6671 if (phba->sli4_hba.extents_in_use) { 6672 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6673 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6674 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6675 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6676 } else { 6677 kfree(phba->vpi_bmask); 6678 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6679 kfree(phba->vpi_ids); 6680 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6681 kfree(phba->sli4_hba.xri_bmask); 6682 kfree(phba->sli4_hba.xri_ids); 6683 kfree(phba->sli4_hba.vfi_bmask); 6684 kfree(phba->sli4_hba.vfi_ids); 6685 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6686 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6687 } 6688 6689 return 0; 6690 } 6691 6692 /** 6693 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 6694 * @phba: Pointer to HBA context object. 6695 * @type: The resource extent type. 6696 * @extnt_count: buffer to hold port extent count response 6697 * @extnt_size: buffer to hold port extent size response. 6698 * 6699 * This function calls the port to read the host allocated extents 6700 * for a particular type. 6701 **/ 6702 int 6703 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 6704 uint16_t *extnt_cnt, uint16_t *extnt_size) 6705 { 6706 bool emb; 6707 int rc = 0; 6708 uint16_t curr_blks = 0; 6709 uint32_t req_len, emb_len; 6710 uint32_t alloc_len, mbox_tmo; 6711 struct list_head *blk_list_head; 6712 struct lpfc_rsrc_blks *rsrc_blk; 6713 LPFC_MBOXQ_t *mbox; 6714 void *virtaddr = NULL; 6715 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 6716 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 6717 union lpfc_sli4_cfg_shdr *shdr; 6718 6719 switch (type) { 6720 case LPFC_RSC_TYPE_FCOE_VPI: 6721 blk_list_head = &phba->lpfc_vpi_blk_list; 6722 break; 6723 case LPFC_RSC_TYPE_FCOE_XRI: 6724 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 6725 break; 6726 case LPFC_RSC_TYPE_FCOE_VFI: 6727 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 6728 break; 6729 case LPFC_RSC_TYPE_FCOE_RPI: 6730 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 6731 break; 6732 default: 6733 return -EIO; 6734 } 6735 6736 /* Count the number of extents currently allocatd for this type. */ 6737 list_for_each_entry(rsrc_blk, blk_list_head, list) { 6738 if (curr_blks == 0) { 6739 /* 6740 * The GET_ALLOCATED mailbox does not return the size, 6741 * just the count. The size should be just the size 6742 * stored in the current allocated block and all sizes 6743 * for an extent type are the same so set the return 6744 * value now. 6745 */ 6746 *extnt_size = rsrc_blk->rsrc_size; 6747 } 6748 curr_blks++; 6749 } 6750 6751 /* 6752 * Calculate the size of an embedded mailbox. The uint32_t 6753 * accounts for extents-specific word. 6754 */ 6755 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 6756 sizeof(uint32_t); 6757 6758 /* 6759 * Presume the allocation and response will fit into an embedded 6760 * mailbox. If not true, reconfigure to a non-embedded mailbox. 6761 */ 6762 emb = LPFC_SLI4_MBX_EMBED; 6763 req_len = emb_len; 6764 if (req_len > emb_len) { 6765 req_len = curr_blks * sizeof(uint16_t) + 6766 sizeof(union lpfc_sli4_cfg_shdr) + 6767 sizeof(uint32_t); 6768 emb = LPFC_SLI4_MBX_NEMBED; 6769 } 6770 6771 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6772 if (!mbox) 6773 return -ENOMEM; 6774 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 6775 6776 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6777 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 6778 req_len, emb); 6779 if (alloc_len < req_len) { 6780 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6781 "2983 Allocated DMA memory size (x%x) is " 6782 "less than the requested DMA memory " 6783 "size (x%x)\n", alloc_len, req_len); 6784 rc = -ENOMEM; 6785 goto err_exit; 6786 } 6787 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 6788 if (unlikely(rc)) { 6789 rc = -EIO; 6790 goto err_exit; 6791 } 6792 6793 if (!phba->sli4_hba.intr_enable) 6794 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6795 else { 6796 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6797 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6798 } 6799 6800 if (unlikely(rc)) { 6801 rc = -EIO; 6802 goto err_exit; 6803 } 6804 6805 /* 6806 * Figure out where the response is located. Then get local pointers 6807 * to the response data. The port does not guarantee to respond to 6808 * all extents counts request so update the local variable with the 6809 * allocated count from the port. 6810 */ 6811 if (emb == LPFC_SLI4_MBX_EMBED) { 6812 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 6813 shdr = &rsrc_ext->header.cfg_shdr; 6814 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 6815 } else { 6816 virtaddr = mbox->sge_array->addr[0]; 6817 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 6818 shdr = &n_rsrc->cfg_shdr; 6819 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 6820 } 6821 6822 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 6823 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6824 "2984 Failed to read allocated resources " 6825 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 6826 type, 6827 bf_get(lpfc_mbox_hdr_status, &shdr->response), 6828 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 6829 rc = -EIO; 6830 goto err_exit; 6831 } 6832 err_exit: 6833 lpfc_sli4_mbox_cmd_free(phba, mbox); 6834 return rc; 6835 } 6836 6837 /** 6838 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block 6839 * @phba: pointer to lpfc hba data structure. 6840 * @pring: Pointer to driver SLI ring object. 6841 * @sgl_list: linked link of sgl buffers to post 6842 * @cnt: number of linked list buffers 6843 * 6844 * This routine walks the list of buffers that have been allocated and 6845 * repost them to the port by using SGL block post. This is needed after a 6846 * pci_function_reset/warm_start or start. It attempts to construct blocks 6847 * of buffer sgls which contains contiguous xris and uses the non-embedded 6848 * SGL block post mailbox commands to post them to the port. For single 6849 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 6850 * mailbox command for posting. 6851 * 6852 * Returns: 0 = success, non-zero failure. 6853 **/ 6854 static int 6855 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, 6856 struct list_head *sgl_list, int cnt) 6857 { 6858 struct lpfc_sglq *sglq_entry = NULL; 6859 struct lpfc_sglq *sglq_entry_next = NULL; 6860 struct lpfc_sglq *sglq_entry_first = NULL; 6861 int status, total_cnt; 6862 int post_cnt = 0, num_posted = 0, block_cnt = 0; 6863 int last_xritag = NO_XRI; 6864 LIST_HEAD(prep_sgl_list); 6865 LIST_HEAD(blck_sgl_list); 6866 LIST_HEAD(allc_sgl_list); 6867 LIST_HEAD(post_sgl_list); 6868 LIST_HEAD(free_sgl_list); 6869 6870 spin_lock_irq(&phba->hbalock); 6871 spin_lock(&phba->sli4_hba.sgl_list_lock); 6872 list_splice_init(sgl_list, &allc_sgl_list); 6873 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6874 spin_unlock_irq(&phba->hbalock); 6875 6876 total_cnt = cnt; 6877 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6878 &allc_sgl_list, list) { 6879 list_del_init(&sglq_entry->list); 6880 block_cnt++; 6881 if ((last_xritag != NO_XRI) && 6882 (sglq_entry->sli4_xritag != last_xritag + 1)) { 6883 /* a hole in xri block, form a sgl posting block */ 6884 list_splice_init(&prep_sgl_list, &blck_sgl_list); 6885 post_cnt = block_cnt - 1; 6886 /* prepare list for next posting block */ 6887 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6888 block_cnt = 1; 6889 } else { 6890 /* prepare list for next posting block */ 6891 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6892 /* enough sgls for non-embed sgl mbox command */ 6893 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 6894 list_splice_init(&prep_sgl_list, 6895 &blck_sgl_list); 6896 post_cnt = block_cnt; 6897 block_cnt = 0; 6898 } 6899 } 6900 num_posted++; 6901 6902 /* keep track of last sgl's xritag */ 6903 last_xritag = sglq_entry->sli4_xritag; 6904 6905 /* end of repost sgl list condition for buffers */ 6906 if (num_posted == total_cnt) { 6907 if (post_cnt == 0) { 6908 list_splice_init(&prep_sgl_list, 6909 &blck_sgl_list); 6910 post_cnt = block_cnt; 6911 } else if (block_cnt == 1) { 6912 status = lpfc_sli4_post_sgl(phba, 6913 sglq_entry->phys, 0, 6914 sglq_entry->sli4_xritag); 6915 if (!status) { 6916 /* successful, put sgl to posted list */ 6917 list_add_tail(&sglq_entry->list, 6918 &post_sgl_list); 6919 } else { 6920 /* Failure, put sgl to free list */ 6921 lpfc_printf_log(phba, KERN_WARNING, 6922 LOG_SLI, 6923 "3159 Failed to post " 6924 "sgl, xritag:x%x\n", 6925 sglq_entry->sli4_xritag); 6926 list_add_tail(&sglq_entry->list, 6927 &free_sgl_list); 6928 total_cnt--; 6929 } 6930 } 6931 } 6932 6933 /* continue until a nembed page worth of sgls */ 6934 if (post_cnt == 0) 6935 continue; 6936 6937 /* post the buffer list sgls as a block */ 6938 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, 6939 post_cnt); 6940 6941 if (!status) { 6942 /* success, put sgl list to posted sgl list */ 6943 list_splice_init(&blck_sgl_list, &post_sgl_list); 6944 } else { 6945 /* Failure, put sgl list to free sgl list */ 6946 sglq_entry_first = list_first_entry(&blck_sgl_list, 6947 struct lpfc_sglq, 6948 list); 6949 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6950 "3160 Failed to post sgl-list, " 6951 "xritag:x%x-x%x\n", 6952 sglq_entry_first->sli4_xritag, 6953 (sglq_entry_first->sli4_xritag + 6954 post_cnt - 1)); 6955 list_splice_init(&blck_sgl_list, &free_sgl_list); 6956 total_cnt -= post_cnt; 6957 } 6958 6959 /* don't reset xirtag due to hole in xri block */ 6960 if (block_cnt == 0) 6961 last_xritag = NO_XRI; 6962 6963 /* reset sgl post count for next round of posting */ 6964 post_cnt = 0; 6965 } 6966 6967 /* free the sgls failed to post */ 6968 lpfc_free_sgl_list(phba, &free_sgl_list); 6969 6970 /* push sgls posted to the available list */ 6971 if (!list_empty(&post_sgl_list)) { 6972 spin_lock_irq(&phba->hbalock); 6973 spin_lock(&phba->sli4_hba.sgl_list_lock); 6974 list_splice_init(&post_sgl_list, sgl_list); 6975 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6976 spin_unlock_irq(&phba->hbalock); 6977 } else { 6978 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6979 "3161 Failure to post sgl to port.\n"); 6980 return -EIO; 6981 } 6982 6983 /* return the number of XRIs actually posted */ 6984 return total_cnt; 6985 } 6986 6987 void 6988 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 6989 { 6990 uint32_t len; 6991 6992 len = sizeof(struct lpfc_mbx_set_host_data) - 6993 sizeof(struct lpfc_sli4_cfg_mhdr); 6994 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6995 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 6996 LPFC_SLI4_MBX_EMBED); 6997 6998 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; 6999 mbox->u.mqe.un.set_host_data.param_len = 7000 LPFC_HOST_OS_DRIVER_VERSION_SIZE; 7001 snprintf(mbox->u.mqe.un.set_host_data.data, 7002 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 7003 "Linux %s v"LPFC_DRIVER_VERSION, 7004 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 7005 } 7006 7007 int 7008 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, 7009 struct lpfc_queue *drq, int count, int idx) 7010 { 7011 int rc, i; 7012 struct lpfc_rqe hrqe; 7013 struct lpfc_rqe drqe; 7014 struct lpfc_rqb *rqbp; 7015 unsigned long flags; 7016 struct rqb_dmabuf *rqb_buffer; 7017 LIST_HEAD(rqb_buf_list); 7018 7019 spin_lock_irqsave(&phba->hbalock, flags); 7020 rqbp = hrq->rqbp; 7021 for (i = 0; i < count; i++) { 7022 /* IF RQ is already full, don't bother */ 7023 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) 7024 break; 7025 rqb_buffer = rqbp->rqb_alloc_buffer(phba); 7026 if (!rqb_buffer) 7027 break; 7028 rqb_buffer->hrq = hrq; 7029 rqb_buffer->drq = drq; 7030 rqb_buffer->idx = idx; 7031 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); 7032 } 7033 while (!list_empty(&rqb_buf_list)) { 7034 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, 7035 hbuf.list); 7036 7037 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); 7038 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); 7039 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); 7040 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); 7041 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 7042 if (rc < 0) { 7043 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7044 "6421 Cannot post to HRQ %d: %x %x %x " 7045 "DRQ %x %x\n", 7046 hrq->queue_id, 7047 hrq->host_index, 7048 hrq->hba_index, 7049 hrq->entry_count, 7050 drq->host_index, 7051 drq->hba_index); 7052 rqbp->rqb_free_buffer(phba, rqb_buffer); 7053 } else { 7054 list_add_tail(&rqb_buffer->hbuf.list, 7055 &rqbp->rqb_buffer_list); 7056 rqbp->buffer_count++; 7057 } 7058 } 7059 spin_unlock_irqrestore(&phba->hbalock, flags); 7060 return 1; 7061 } 7062 7063 /** 7064 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 7065 * @phba: Pointer to HBA context object. 7066 * 7067 * This function is the main SLI4 device initialization PCI function. This 7068 * function is called by the HBA initialization code, HBA reset code and 7069 * HBA error attention handler code. Caller is not required to hold any 7070 * locks. 7071 **/ 7072 int 7073 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 7074 { 7075 int rc, i, cnt; 7076 LPFC_MBOXQ_t *mboxq; 7077 struct lpfc_mqe *mqe; 7078 uint8_t *vpd; 7079 uint32_t vpd_size; 7080 uint32_t ftr_rsp = 0; 7081 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 7082 struct lpfc_vport *vport = phba->pport; 7083 struct lpfc_dmabuf *mp; 7084 struct lpfc_rqb *rqbp; 7085 7086 /* Perform a PCI function reset to start from clean */ 7087 rc = lpfc_pci_function_reset(phba); 7088 if (unlikely(rc)) 7089 return -ENODEV; 7090 7091 /* Check the HBA Host Status Register for readyness */ 7092 rc = lpfc_sli4_post_status_check(phba); 7093 if (unlikely(rc)) 7094 return -ENODEV; 7095 else { 7096 spin_lock_irq(&phba->hbalock); 7097 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 7098 spin_unlock_irq(&phba->hbalock); 7099 } 7100 7101 /* 7102 * Allocate a single mailbox container for initializing the 7103 * port. 7104 */ 7105 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 7106 if (!mboxq) 7107 return -ENOMEM; 7108 7109 /* Issue READ_REV to collect vpd and FW information. */ 7110 vpd_size = SLI4_PAGE_SIZE; 7111 vpd = kzalloc(vpd_size, GFP_KERNEL); 7112 if (!vpd) { 7113 rc = -ENOMEM; 7114 goto out_free_mbox; 7115 } 7116 7117 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 7118 if (unlikely(rc)) { 7119 kfree(vpd); 7120 goto out_free_mbox; 7121 } 7122 7123 mqe = &mboxq->u.mqe; 7124 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 7125 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 7126 phba->hba_flag |= HBA_FCOE_MODE; 7127 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 7128 } else { 7129 phba->hba_flag &= ~HBA_FCOE_MODE; 7130 } 7131 7132 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 7133 LPFC_DCBX_CEE_MODE) 7134 phba->hba_flag |= HBA_FIP_SUPPORT; 7135 else 7136 phba->hba_flag &= ~HBA_FIP_SUPPORT; 7137 7138 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; 7139 7140 if (phba->sli_rev != LPFC_SLI_REV4) { 7141 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7142 "0376 READ_REV Error. SLI Level %d " 7143 "FCoE enabled %d\n", 7144 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 7145 rc = -EIO; 7146 kfree(vpd); 7147 goto out_free_mbox; 7148 } 7149 7150 /* 7151 * Continue initialization with default values even if driver failed 7152 * to read FCoE param config regions, only read parameters if the 7153 * board is FCoE 7154 */ 7155 if (phba->hba_flag & HBA_FCOE_MODE && 7156 lpfc_sli4_read_fcoe_params(phba)) 7157 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 7158 "2570 Failed to read FCoE parameters\n"); 7159 7160 /* 7161 * Retrieve sli4 device physical port name, failure of doing it 7162 * is considered as non-fatal. 7163 */ 7164 rc = lpfc_sli4_retrieve_pport_name(phba); 7165 if (!rc) 7166 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7167 "3080 Successful retrieving SLI4 device " 7168 "physical port name: %s.\n", phba->Port); 7169 7170 /* 7171 * Evaluate the read rev and vpd data. Populate the driver 7172 * state with the results. If this routine fails, the failure 7173 * is not fatal as the driver will use generic values. 7174 */ 7175 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 7176 if (unlikely(!rc)) { 7177 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7178 "0377 Error %d parsing vpd. " 7179 "Using defaults.\n", rc); 7180 rc = 0; 7181 } 7182 kfree(vpd); 7183 7184 /* Save information as VPD data */ 7185 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 7186 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 7187 7188 /* 7189 * This is because first G7 ASIC doesn't support the standard 7190 * 0x5a NVME cmd descriptor type/subtype 7191 */ 7192 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7193 LPFC_SLI_INTF_IF_TYPE_6) && 7194 (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) && 7195 (phba->vpd.rev.smRev == 0) && 7196 (phba->cfg_nvme_embed_cmd == 1)) 7197 phba->cfg_nvme_embed_cmd = 0; 7198 7199 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 7200 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 7201 &mqe->un.read_rev); 7202 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 7203 &mqe->un.read_rev); 7204 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 7205 &mqe->un.read_rev); 7206 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 7207 &mqe->un.read_rev); 7208 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 7209 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 7210 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 7211 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 7212 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 7213 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 7214 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7215 "(%d):0380 READ_REV Status x%x " 7216 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 7217 mboxq->vport ? mboxq->vport->vpi : 0, 7218 bf_get(lpfc_mqe_status, mqe), 7219 phba->vpd.rev.opFwName, 7220 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 7221 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 7222 7223 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 7224 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); 7225 if (phba->pport->cfg_lun_queue_depth > rc) { 7226 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 7227 "3362 LUN queue depth changed from %d to %d\n", 7228 phba->pport->cfg_lun_queue_depth, rc); 7229 phba->pport->cfg_lun_queue_depth = rc; 7230 } 7231 7232 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7233 LPFC_SLI_INTF_IF_TYPE_0) { 7234 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); 7235 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7236 if (rc == MBX_SUCCESS) { 7237 phba->hba_flag |= HBA_RECOVERABLE_UE; 7238 /* Set 1Sec interval to detect UE */ 7239 phba->eratt_poll_interval = 1; 7240 phba->sli4_hba.ue_to_sr = bf_get( 7241 lpfc_mbx_set_feature_UESR, 7242 &mboxq->u.mqe.un.set_feature); 7243 phba->sli4_hba.ue_to_rp = bf_get( 7244 lpfc_mbx_set_feature_UERP, 7245 &mboxq->u.mqe.un.set_feature); 7246 } 7247 } 7248 7249 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { 7250 /* Enable MDS Diagnostics only if the SLI Port supports it */ 7251 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); 7252 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7253 if (rc != MBX_SUCCESS) 7254 phba->mds_diags_support = 0; 7255 } 7256 7257 /* 7258 * Discover the port's supported feature set and match it against the 7259 * hosts requests. 7260 */ 7261 lpfc_request_features(phba, mboxq); 7262 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7263 if (unlikely(rc)) { 7264 rc = -EIO; 7265 goto out_free_mbox; 7266 } 7267 7268 /* 7269 * The port must support FCP initiator mode as this is the 7270 * only mode running in the host. 7271 */ 7272 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 7273 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7274 "0378 No support for fcpi mode.\n"); 7275 ftr_rsp++; 7276 } 7277 7278 /* Performance Hints are ONLY for FCoE */ 7279 if (phba->hba_flag & HBA_FCOE_MODE) { 7280 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 7281 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 7282 else 7283 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 7284 } 7285 7286 /* 7287 * If the port cannot support the host's requested features 7288 * then turn off the global config parameters to disable the 7289 * feature in the driver. This is not a fatal error. 7290 */ 7291 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { 7292 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) { 7293 phba->cfg_enable_bg = 0; 7294 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 7295 ftr_rsp++; 7296 } 7297 } 7298 7299 if (phba->max_vpi && phba->cfg_enable_npiv && 7300 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7301 ftr_rsp++; 7302 7303 if (ftr_rsp) { 7304 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7305 "0379 Feature Mismatch Data: x%08x %08x " 7306 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 7307 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 7308 phba->cfg_enable_npiv, phba->max_vpi); 7309 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 7310 phba->cfg_enable_bg = 0; 7311 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 7312 phba->cfg_enable_npiv = 0; 7313 } 7314 7315 /* These SLI3 features are assumed in SLI4 */ 7316 spin_lock_irq(&phba->hbalock); 7317 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 7318 spin_unlock_irq(&phba->hbalock); 7319 7320 /* 7321 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 7322 * calls depends on these resources to complete port setup. 7323 */ 7324 rc = lpfc_sli4_alloc_resource_identifiers(phba); 7325 if (rc) { 7326 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7327 "2920 Failed to alloc Resource IDs " 7328 "rc = x%x\n", rc); 7329 goto out_free_mbox; 7330 } 7331 7332 lpfc_set_host_data(phba, mboxq); 7333 7334 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7335 if (rc) { 7336 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7337 "2134 Failed to set host os driver version %x", 7338 rc); 7339 } 7340 7341 /* Read the port's service parameters. */ 7342 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 7343 if (rc) { 7344 phba->link_state = LPFC_HBA_ERROR; 7345 rc = -ENOMEM; 7346 goto out_free_mbox; 7347 } 7348 7349 mboxq->vport = vport; 7350 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7351 mp = (struct lpfc_dmabuf *) mboxq->context1; 7352 if (rc == MBX_SUCCESS) { 7353 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 7354 rc = 0; 7355 } 7356 7357 /* 7358 * This memory was allocated by the lpfc_read_sparam routine. Release 7359 * it to the mbuf pool. 7360 */ 7361 lpfc_mbuf_free(phba, mp->virt, mp->phys); 7362 kfree(mp); 7363 mboxq->context1 = NULL; 7364 if (unlikely(rc)) { 7365 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7366 "0382 READ_SPARAM command failed " 7367 "status %d, mbxStatus x%x\n", 7368 rc, bf_get(lpfc_mqe_status, mqe)); 7369 phba->link_state = LPFC_HBA_ERROR; 7370 rc = -EIO; 7371 goto out_free_mbox; 7372 } 7373 7374 lpfc_update_vport_wwn(vport); 7375 7376 /* Update the fc_host data structures with new wwn. */ 7377 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 7378 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 7379 7380 /* Create all the SLI4 queues */ 7381 rc = lpfc_sli4_queue_create(phba); 7382 if (rc) { 7383 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7384 "3089 Failed to allocate queues\n"); 7385 rc = -ENODEV; 7386 goto out_free_mbox; 7387 } 7388 /* Set up all the queues to the device */ 7389 rc = lpfc_sli4_queue_setup(phba); 7390 if (unlikely(rc)) { 7391 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7392 "0381 Error %d during queue setup.\n ", rc); 7393 goto out_stop_timers; 7394 } 7395 /* Initialize the driver internal SLI layer lists. */ 7396 lpfc_sli4_setup(phba); 7397 lpfc_sli4_queue_init(phba); 7398 7399 /* update host els xri-sgl sizes and mappings */ 7400 rc = lpfc_sli4_els_sgl_update(phba); 7401 if (unlikely(rc)) { 7402 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7403 "1400 Failed to update xri-sgl size and " 7404 "mapping: %d\n", rc); 7405 goto out_destroy_queue; 7406 } 7407 7408 /* register the els sgl pool to the port */ 7409 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, 7410 phba->sli4_hba.els_xri_cnt); 7411 if (unlikely(rc < 0)) { 7412 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7413 "0582 Error %d during els sgl post " 7414 "operation\n", rc); 7415 rc = -ENODEV; 7416 goto out_destroy_queue; 7417 } 7418 phba->sli4_hba.els_xri_cnt = rc; 7419 7420 if (phba->nvmet_support) { 7421 /* update host nvmet xri-sgl sizes and mappings */ 7422 rc = lpfc_sli4_nvmet_sgl_update(phba); 7423 if (unlikely(rc)) { 7424 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7425 "6308 Failed to update nvmet-sgl size " 7426 "and mapping: %d\n", rc); 7427 goto out_destroy_queue; 7428 } 7429 7430 /* register the nvmet sgl pool to the port */ 7431 rc = lpfc_sli4_repost_sgl_list( 7432 phba, 7433 &phba->sli4_hba.lpfc_nvmet_sgl_list, 7434 phba->sli4_hba.nvmet_xri_cnt); 7435 if (unlikely(rc < 0)) { 7436 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7437 "3117 Error %d during nvmet " 7438 "sgl post\n", rc); 7439 rc = -ENODEV; 7440 goto out_destroy_queue; 7441 } 7442 phba->sli4_hba.nvmet_xri_cnt = rc; 7443 7444 cnt = phba->cfg_iocb_cnt * 1024; 7445 /* We need 1 iocbq for every SGL, for IO processing */ 7446 cnt += phba->sli4_hba.nvmet_xri_cnt; 7447 } else { 7448 /* update host scsi xri-sgl sizes and mappings */ 7449 rc = lpfc_sli4_scsi_sgl_update(phba); 7450 if (unlikely(rc)) { 7451 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7452 "6309 Failed to update scsi-sgl size " 7453 "and mapping: %d\n", rc); 7454 goto out_destroy_queue; 7455 } 7456 7457 /* update host nvme xri-sgl sizes and mappings */ 7458 rc = lpfc_sli4_nvme_sgl_update(phba); 7459 if (unlikely(rc)) { 7460 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7461 "6082 Failed to update nvme-sgl size " 7462 "and mapping: %d\n", rc); 7463 goto out_destroy_queue; 7464 } 7465 7466 cnt = phba->cfg_iocb_cnt * 1024; 7467 } 7468 7469 if (!phba->sli.iocbq_lookup) { 7470 /* Initialize and populate the iocb list per host */ 7471 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7472 "2821 initialize iocb list %d total %d\n", 7473 phba->cfg_iocb_cnt, cnt); 7474 rc = lpfc_init_iocb_list(phba, cnt); 7475 if (rc) { 7476 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7477 "1413 Failed to init iocb list.\n"); 7478 goto out_destroy_queue; 7479 } 7480 } 7481 7482 if (phba->nvmet_support) 7483 lpfc_nvmet_create_targetport(phba); 7484 7485 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 7486 /* Post initial buffers to all RQs created */ 7487 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 7488 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 7489 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 7490 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 7491 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 7492 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; 7493 rqbp->buffer_count = 0; 7494 7495 lpfc_post_rq_buffer( 7496 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 7497 phba->sli4_hba.nvmet_mrq_data[i], 7498 phba->cfg_nvmet_mrq_post, i); 7499 } 7500 } 7501 7502 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 7503 /* register the allocated scsi sgl pool to the port */ 7504 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 7505 if (unlikely(rc)) { 7506 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7507 "0383 Error %d during scsi sgl post " 7508 "operation\n", rc); 7509 /* Some Scsi buffers were moved to abort scsi list */ 7510 /* A pci function reset will repost them */ 7511 rc = -ENODEV; 7512 goto out_destroy_queue; 7513 } 7514 } 7515 7516 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 7517 (phba->nvmet_support == 0)) { 7518 7519 /* register the allocated nvme sgl pool to the port */ 7520 rc = lpfc_repost_nvme_sgl_list(phba); 7521 if (unlikely(rc)) { 7522 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7523 "6116 Error %d during nvme sgl post " 7524 "operation\n", rc); 7525 /* Some NVME buffers were moved to abort nvme list */ 7526 /* A pci function reset will repost them */ 7527 rc = -ENODEV; 7528 goto out_destroy_queue; 7529 } 7530 } 7531 7532 /* Post the rpi header region to the device. */ 7533 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 7534 if (unlikely(rc)) { 7535 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7536 "0393 Error %d during rpi post operation\n", 7537 rc); 7538 rc = -ENODEV; 7539 goto out_destroy_queue; 7540 } 7541 lpfc_sli4_node_prep(phba); 7542 7543 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 7544 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { 7545 /* 7546 * The FC Port needs to register FCFI (index 0) 7547 */ 7548 lpfc_reg_fcfi(phba, mboxq); 7549 mboxq->vport = phba->pport; 7550 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7551 if (rc != MBX_SUCCESS) 7552 goto out_unset_queue; 7553 rc = 0; 7554 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 7555 &mboxq->u.mqe.un.reg_fcfi); 7556 } else { 7557 /* We are a NVME Target mode with MRQ > 1 */ 7558 7559 /* First register the FCFI */ 7560 lpfc_reg_fcfi_mrq(phba, mboxq, 0); 7561 mboxq->vport = phba->pport; 7562 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7563 if (rc != MBX_SUCCESS) 7564 goto out_unset_queue; 7565 rc = 0; 7566 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, 7567 &mboxq->u.mqe.un.reg_fcfi_mrq); 7568 7569 /* Next register the MRQs */ 7570 lpfc_reg_fcfi_mrq(phba, mboxq, 1); 7571 mboxq->vport = phba->pport; 7572 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7573 if (rc != MBX_SUCCESS) 7574 goto out_unset_queue; 7575 rc = 0; 7576 } 7577 /* Check if the port is configured to be disabled */ 7578 lpfc_sli_read_link_ste(phba); 7579 } 7580 7581 /* Arm the CQs and then EQs on device */ 7582 lpfc_sli4_arm_cqeq_intr(phba); 7583 7584 /* Indicate device interrupt mode */ 7585 phba->sli4_hba.intr_enable = 1; 7586 7587 /* Allow asynchronous mailbox command to go through */ 7588 spin_lock_irq(&phba->hbalock); 7589 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7590 spin_unlock_irq(&phba->hbalock); 7591 7592 /* Post receive buffers to the device */ 7593 lpfc_sli4_rb_setup(phba); 7594 7595 /* Reset HBA FCF states after HBA reset */ 7596 phba->fcf.fcf_flag = 0; 7597 phba->fcf.current_rec.flag = 0; 7598 7599 /* Start the ELS watchdog timer */ 7600 mod_timer(&vport->els_tmofunc, 7601 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 7602 7603 /* Start heart beat timer */ 7604 mod_timer(&phba->hb_tmofunc, 7605 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 7606 phba->hb_outstanding = 0; 7607 phba->last_completion_time = jiffies; 7608 7609 /* Start error attention (ERATT) polling timer */ 7610 mod_timer(&phba->eratt_poll, 7611 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 7612 7613 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 7614 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 7615 rc = pci_enable_pcie_error_reporting(phba->pcidev); 7616 if (!rc) { 7617 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7618 "2829 This device supports " 7619 "Advanced Error Reporting (AER)\n"); 7620 spin_lock_irq(&phba->hbalock); 7621 phba->hba_flag |= HBA_AER_ENABLED; 7622 spin_unlock_irq(&phba->hbalock); 7623 } else { 7624 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7625 "2830 This device does not support " 7626 "Advanced Error Reporting (AER)\n"); 7627 phba->cfg_aer_support = 0; 7628 } 7629 rc = 0; 7630 } 7631 7632 /* 7633 * The port is ready, set the host's link state to LINK_DOWN 7634 * in preparation for link interrupts. 7635 */ 7636 spin_lock_irq(&phba->hbalock); 7637 phba->link_state = LPFC_LINK_DOWN; 7638 spin_unlock_irq(&phba->hbalock); 7639 if (!(phba->hba_flag & HBA_FCOE_MODE) && 7640 (phba->hba_flag & LINK_DISABLED)) { 7641 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7642 "3103 Adapter Link is disabled.\n"); 7643 lpfc_down_link(phba, mboxq); 7644 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7645 if (rc != MBX_SUCCESS) { 7646 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7647 "3104 Adapter failed to issue " 7648 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 7649 goto out_unset_queue; 7650 } 7651 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 7652 /* don't perform init_link on SLI4 FC port loopback test */ 7653 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 7654 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 7655 if (rc) 7656 goto out_unset_queue; 7657 } 7658 } 7659 mempool_free(mboxq, phba->mbox_mem_pool); 7660 return rc; 7661 out_unset_queue: 7662 /* Unset all the queues set up in this routine when error out */ 7663 lpfc_sli4_queue_unset(phba); 7664 out_destroy_queue: 7665 lpfc_free_iocb_list(phba); 7666 lpfc_sli4_queue_destroy(phba); 7667 out_stop_timers: 7668 lpfc_stop_hba_timers(phba); 7669 out_free_mbox: 7670 mempool_free(mboxq, phba->mbox_mem_pool); 7671 return rc; 7672 } 7673 7674 /** 7675 * lpfc_mbox_timeout - Timeout call back function for mbox timer 7676 * @ptr: context object - pointer to hba structure. 7677 * 7678 * This is the callback function for mailbox timer. The mailbox 7679 * timer is armed when a new mailbox command is issued and the timer 7680 * is deleted when the mailbox complete. The function is called by 7681 * the kernel timer code when a mailbox does not complete within 7682 * expected time. This function wakes up the worker thread to 7683 * process the mailbox timeout and returns. All the processing is 7684 * done by the worker thread function lpfc_mbox_timeout_handler. 7685 **/ 7686 void 7687 lpfc_mbox_timeout(struct timer_list *t) 7688 { 7689 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); 7690 unsigned long iflag; 7691 uint32_t tmo_posted; 7692 7693 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 7694 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 7695 if (!tmo_posted) 7696 phba->pport->work_port_events |= WORKER_MBOX_TMO; 7697 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 7698 7699 if (!tmo_posted) 7700 lpfc_worker_wake_up(phba); 7701 return; 7702 } 7703 7704 /** 7705 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 7706 * are pending 7707 * @phba: Pointer to HBA context object. 7708 * 7709 * This function checks if any mailbox completions are present on the mailbox 7710 * completion queue. 7711 **/ 7712 static bool 7713 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 7714 { 7715 7716 uint32_t idx; 7717 struct lpfc_queue *mcq; 7718 struct lpfc_mcqe *mcqe; 7719 bool pending_completions = false; 7720 uint8_t qe_valid; 7721 7722 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7723 return false; 7724 7725 /* Check for completions on mailbox completion queue */ 7726 7727 mcq = phba->sli4_hba.mbx_cq; 7728 idx = mcq->hba_index; 7729 qe_valid = mcq->qe_valid; 7730 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe) == qe_valid) { 7731 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; 7732 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 7733 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 7734 pending_completions = true; 7735 break; 7736 } 7737 idx = (idx + 1) % mcq->entry_count; 7738 if (mcq->hba_index == idx) 7739 break; 7740 7741 /* if the index wrapped around, toggle the valid bit */ 7742 if (phba->sli4_hba.pc_sli4_params.cqav && !idx) 7743 qe_valid = (qe_valid) ? 0 : 1; 7744 } 7745 return pending_completions; 7746 7747 } 7748 7749 /** 7750 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 7751 * that were missed. 7752 * @phba: Pointer to HBA context object. 7753 * 7754 * For sli4, it is possible to miss an interrupt. As such mbox completions 7755 * maybe missed causing erroneous mailbox timeouts to occur. This function 7756 * checks to see if mbox completions are on the mailbox completion queue 7757 * and will process all the completions associated with the eq for the 7758 * mailbox completion queue. 7759 **/ 7760 bool 7761 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 7762 { 7763 struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba; 7764 uint32_t eqidx; 7765 struct lpfc_queue *fpeq = NULL; 7766 struct lpfc_eqe *eqe; 7767 bool mbox_pending; 7768 7769 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7770 return false; 7771 7772 /* Find the eq associated with the mcq */ 7773 7774 if (sli4_hba->hba_eq) 7775 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) 7776 if (sli4_hba->hba_eq[eqidx]->queue_id == 7777 sli4_hba->mbx_cq->assoc_qid) { 7778 fpeq = sli4_hba->hba_eq[eqidx]; 7779 break; 7780 } 7781 if (!fpeq) 7782 return false; 7783 7784 /* Turn off interrupts from this EQ */ 7785 7786 sli4_hba->sli4_eq_clr_intr(fpeq); 7787 7788 /* Check to see if a mbox completion is pending */ 7789 7790 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 7791 7792 /* 7793 * If a mbox completion is pending, process all the events on EQ 7794 * associated with the mbox completion queue (this could include 7795 * mailbox commands, async events, els commands, receive queue data 7796 * and fcp commands) 7797 */ 7798 7799 if (mbox_pending) 7800 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 7801 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); 7802 fpeq->EQ_processed++; 7803 } 7804 7805 /* Always clear and re-arm the EQ */ 7806 7807 sli4_hba->sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 7808 7809 return mbox_pending; 7810 7811 } 7812 7813 /** 7814 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 7815 * @phba: Pointer to HBA context object. 7816 * 7817 * This function is called from worker thread when a mailbox command times out. 7818 * The caller is not required to hold any locks. This function will reset the 7819 * HBA and recover all the pending commands. 7820 **/ 7821 void 7822 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 7823 { 7824 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 7825 MAILBOX_t *mb = NULL; 7826 7827 struct lpfc_sli *psli = &phba->sli; 7828 7829 /* If the mailbox completed, process the completion and return */ 7830 if (lpfc_sli4_process_missed_mbox_completions(phba)) 7831 return; 7832 7833 if (pmbox != NULL) 7834 mb = &pmbox->u.mb; 7835 /* Check the pmbox pointer first. There is a race condition 7836 * between the mbox timeout handler getting executed in the 7837 * worklist and the mailbox actually completing. When this 7838 * race condition occurs, the mbox_active will be NULL. 7839 */ 7840 spin_lock_irq(&phba->hbalock); 7841 if (pmbox == NULL) { 7842 lpfc_printf_log(phba, KERN_WARNING, 7843 LOG_MBOX | LOG_SLI, 7844 "0353 Active Mailbox cleared - mailbox timeout " 7845 "exiting\n"); 7846 spin_unlock_irq(&phba->hbalock); 7847 return; 7848 } 7849 7850 /* Mbox cmd <mbxCommand> timeout */ 7851 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7852 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 7853 mb->mbxCommand, 7854 phba->pport->port_state, 7855 phba->sli.sli_flag, 7856 phba->sli.mbox_active); 7857 spin_unlock_irq(&phba->hbalock); 7858 7859 /* Setting state unknown so lpfc_sli_abort_iocb_ring 7860 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 7861 * it to fail all outstanding SCSI IO. 7862 */ 7863 spin_lock_irq(&phba->pport->work_port_lock); 7864 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 7865 spin_unlock_irq(&phba->pport->work_port_lock); 7866 spin_lock_irq(&phba->hbalock); 7867 phba->link_state = LPFC_LINK_UNKNOWN; 7868 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7869 spin_unlock_irq(&phba->hbalock); 7870 7871 lpfc_sli_abort_fcp_rings(phba); 7872 7873 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7874 "0345 Resetting board due to mailbox timeout\n"); 7875 7876 /* Reset the HBA device */ 7877 lpfc_reset_hba(phba); 7878 } 7879 7880 /** 7881 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 7882 * @phba: Pointer to HBA context object. 7883 * @pmbox: Pointer to mailbox object. 7884 * @flag: Flag indicating how the mailbox need to be processed. 7885 * 7886 * This function is called by discovery code and HBA management code 7887 * to submit a mailbox command to firmware with SLI-3 interface spec. This 7888 * function gets the hbalock to protect the data structures. 7889 * The mailbox command can be submitted in polling mode, in which case 7890 * this function will wait in a polling loop for the completion of the 7891 * mailbox. 7892 * If the mailbox is submitted in no_wait mode (not polling) the 7893 * function will submit the command and returns immediately without waiting 7894 * for the mailbox completion. The no_wait is supported only when HBA 7895 * is in SLI2/SLI3 mode - interrupts are enabled. 7896 * The SLI interface allows only one mailbox pending at a time. If the 7897 * mailbox is issued in polling mode and there is already a mailbox 7898 * pending, then the function will return an error. If the mailbox is issued 7899 * in NO_WAIT mode and there is a mailbox pending already, the function 7900 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 7901 * The sli layer owns the mailbox object until the completion of mailbox 7902 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 7903 * return codes the caller owns the mailbox command after the return of 7904 * the function. 7905 **/ 7906 static int 7907 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 7908 uint32_t flag) 7909 { 7910 MAILBOX_t *mbx; 7911 struct lpfc_sli *psli = &phba->sli; 7912 uint32_t status, evtctr; 7913 uint32_t ha_copy, hc_copy; 7914 int i; 7915 unsigned long timeout; 7916 unsigned long drvr_flag = 0; 7917 uint32_t word0, ldata; 7918 void __iomem *to_slim; 7919 int processing_queue = 0; 7920 7921 spin_lock_irqsave(&phba->hbalock, drvr_flag); 7922 if (!pmbox) { 7923 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7924 /* processing mbox queue from intr_handler */ 7925 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7926 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7927 return MBX_SUCCESS; 7928 } 7929 processing_queue = 1; 7930 pmbox = lpfc_mbox_get(phba); 7931 if (!pmbox) { 7932 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7933 return MBX_SUCCESS; 7934 } 7935 } 7936 7937 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 7938 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 7939 if(!pmbox->vport) { 7940 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7941 lpfc_printf_log(phba, KERN_ERR, 7942 LOG_MBOX | LOG_VPORT, 7943 "1806 Mbox x%x failed. No vport\n", 7944 pmbox->u.mb.mbxCommand); 7945 dump_stack(); 7946 goto out_not_finished; 7947 } 7948 } 7949 7950 /* If the PCI channel is in offline state, do not post mbox. */ 7951 if (unlikely(pci_channel_offline(phba->pcidev))) { 7952 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7953 goto out_not_finished; 7954 } 7955 7956 /* If HBA has a deferred error attention, fail the iocb. */ 7957 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7958 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7959 goto out_not_finished; 7960 } 7961 7962 psli = &phba->sli; 7963 7964 mbx = &pmbox->u.mb; 7965 status = MBX_SUCCESS; 7966 7967 if (phba->link_state == LPFC_HBA_ERROR) { 7968 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7969 7970 /* Mbox command <mbxCommand> cannot issue */ 7971 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7972 "(%d):0311 Mailbox command x%x cannot " 7973 "issue Data: x%x x%x\n", 7974 pmbox->vport ? pmbox->vport->vpi : 0, 7975 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 7976 goto out_not_finished; 7977 } 7978 7979 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 7980 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 7981 !(hc_copy & HC_MBINT_ENA)) { 7982 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7983 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7984 "(%d):2528 Mailbox command x%x cannot " 7985 "issue Data: x%x x%x\n", 7986 pmbox->vport ? pmbox->vport->vpi : 0, 7987 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 7988 goto out_not_finished; 7989 } 7990 } 7991 7992 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7993 /* Polling for a mbox command when another one is already active 7994 * is not allowed in SLI. Also, the driver must have established 7995 * SLI2 mode to queue and process multiple mbox commands. 7996 */ 7997 7998 if (flag & MBX_POLL) { 7999 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8000 8001 /* Mbox command <mbxCommand> cannot issue */ 8002 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8003 "(%d):2529 Mailbox command x%x " 8004 "cannot issue Data: x%x x%x\n", 8005 pmbox->vport ? pmbox->vport->vpi : 0, 8006 pmbox->u.mb.mbxCommand, 8007 psli->sli_flag, flag); 8008 goto out_not_finished; 8009 } 8010 8011 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 8012 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8013 /* Mbox command <mbxCommand> cannot issue */ 8014 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8015 "(%d):2530 Mailbox command x%x " 8016 "cannot issue Data: x%x x%x\n", 8017 pmbox->vport ? pmbox->vport->vpi : 0, 8018 pmbox->u.mb.mbxCommand, 8019 psli->sli_flag, flag); 8020 goto out_not_finished; 8021 } 8022 8023 /* Another mailbox command is still being processed, queue this 8024 * command to be processed later. 8025 */ 8026 lpfc_mbox_put(phba, pmbox); 8027 8028 /* Mbox cmd issue - BUSY */ 8029 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8030 "(%d):0308 Mbox cmd issue - BUSY Data: " 8031 "x%x x%x x%x x%x\n", 8032 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 8033 mbx->mbxCommand, 8034 phba->pport ? phba->pport->port_state : 0xff, 8035 psli->sli_flag, flag); 8036 8037 psli->slistat.mbox_busy++; 8038 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8039 8040 if (pmbox->vport) { 8041 lpfc_debugfs_disc_trc(pmbox->vport, 8042 LPFC_DISC_TRC_MBOX_VPORT, 8043 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 8044 (uint32_t)mbx->mbxCommand, 8045 mbx->un.varWords[0], mbx->un.varWords[1]); 8046 } 8047 else { 8048 lpfc_debugfs_disc_trc(phba->pport, 8049 LPFC_DISC_TRC_MBOX, 8050 "MBOX Bsy: cmd:x%x mb:x%x x%x", 8051 (uint32_t)mbx->mbxCommand, 8052 mbx->un.varWords[0], mbx->un.varWords[1]); 8053 } 8054 8055 return MBX_BUSY; 8056 } 8057 8058 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8059 8060 /* If we are not polling, we MUST be in SLI2 mode */ 8061 if (flag != MBX_POLL) { 8062 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 8063 (mbx->mbxCommand != MBX_KILL_BOARD)) { 8064 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8065 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8066 /* Mbox command <mbxCommand> cannot issue */ 8067 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8068 "(%d):2531 Mailbox command x%x " 8069 "cannot issue Data: x%x x%x\n", 8070 pmbox->vport ? pmbox->vport->vpi : 0, 8071 pmbox->u.mb.mbxCommand, 8072 psli->sli_flag, flag); 8073 goto out_not_finished; 8074 } 8075 /* timeout active mbox command */ 8076 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8077 1000); 8078 mod_timer(&psli->mbox_tmo, jiffies + timeout); 8079 } 8080 8081 /* Mailbox cmd <cmd> issue */ 8082 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8083 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 8084 "x%x\n", 8085 pmbox->vport ? pmbox->vport->vpi : 0, 8086 mbx->mbxCommand, 8087 phba->pport ? phba->pport->port_state : 0xff, 8088 psli->sli_flag, flag); 8089 8090 if (mbx->mbxCommand != MBX_HEARTBEAT) { 8091 if (pmbox->vport) { 8092 lpfc_debugfs_disc_trc(pmbox->vport, 8093 LPFC_DISC_TRC_MBOX_VPORT, 8094 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8095 (uint32_t)mbx->mbxCommand, 8096 mbx->un.varWords[0], mbx->un.varWords[1]); 8097 } 8098 else { 8099 lpfc_debugfs_disc_trc(phba->pport, 8100 LPFC_DISC_TRC_MBOX, 8101 "MBOX Send: cmd:x%x mb:x%x x%x", 8102 (uint32_t)mbx->mbxCommand, 8103 mbx->un.varWords[0], mbx->un.varWords[1]); 8104 } 8105 } 8106 8107 psli->slistat.mbox_cmd++; 8108 evtctr = psli->slistat.mbox_event; 8109 8110 /* next set own bit for the adapter and copy over command word */ 8111 mbx->mbxOwner = OWN_CHIP; 8112 8113 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8114 /* Populate mbox extension offset word. */ 8115 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 8116 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8117 = (uint8_t *)phba->mbox_ext 8118 - (uint8_t *)phba->mbox; 8119 } 8120 8121 /* Copy the mailbox extension data */ 8122 if (pmbox->in_ext_byte_len && pmbox->context2) { 8123 lpfc_sli_pcimem_bcopy(pmbox->context2, 8124 (uint8_t *)phba->mbox_ext, 8125 pmbox->in_ext_byte_len); 8126 } 8127 /* Copy command data to host SLIM area */ 8128 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 8129 } else { 8130 /* Populate mbox extension offset word. */ 8131 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 8132 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 8133 = MAILBOX_HBA_EXT_OFFSET; 8134 8135 /* Copy the mailbox extension data */ 8136 if (pmbox->in_ext_byte_len && pmbox->context2) 8137 lpfc_memcpy_to_slim(phba->MBslimaddr + 8138 MAILBOX_HBA_EXT_OFFSET, 8139 pmbox->context2, pmbox->in_ext_byte_len); 8140 8141 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8142 /* copy command data into host mbox for cmpl */ 8143 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, 8144 MAILBOX_CMD_SIZE); 8145 8146 /* First copy mbox command data to HBA SLIM, skip past first 8147 word */ 8148 to_slim = phba->MBslimaddr + sizeof (uint32_t); 8149 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 8150 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 8151 8152 /* Next copy over first word, with mbxOwner set */ 8153 ldata = *((uint32_t *)mbx); 8154 to_slim = phba->MBslimaddr; 8155 writel(ldata, to_slim); 8156 readl(to_slim); /* flush */ 8157 8158 if (mbx->mbxCommand == MBX_CONFIG_PORT) 8159 /* switch over to host mailbox */ 8160 psli->sli_flag |= LPFC_SLI_ACTIVE; 8161 } 8162 8163 wmb(); 8164 8165 switch (flag) { 8166 case MBX_NOWAIT: 8167 /* Set up reference to mailbox command */ 8168 psli->mbox_active = pmbox; 8169 /* Interrupt board to do it */ 8170 writel(CA_MBATT, phba->CAregaddr); 8171 readl(phba->CAregaddr); /* flush */ 8172 /* Don't wait for it to finish, just return */ 8173 break; 8174 8175 case MBX_POLL: 8176 /* Set up null reference to mailbox command */ 8177 psli->mbox_active = NULL; 8178 /* Interrupt board to do it */ 8179 writel(CA_MBATT, phba->CAregaddr); 8180 readl(phba->CAregaddr); /* flush */ 8181 8182 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8183 /* First read mbox status word */ 8184 word0 = *((uint32_t *)phba->mbox); 8185 word0 = le32_to_cpu(word0); 8186 } else { 8187 /* First read mbox status word */ 8188 if (lpfc_readl(phba->MBslimaddr, &word0)) { 8189 spin_unlock_irqrestore(&phba->hbalock, 8190 drvr_flag); 8191 goto out_not_finished; 8192 } 8193 } 8194 8195 /* Read the HBA Host Attention Register */ 8196 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8197 spin_unlock_irqrestore(&phba->hbalock, 8198 drvr_flag); 8199 goto out_not_finished; 8200 } 8201 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 8202 1000) + jiffies; 8203 i = 0; 8204 /* Wait for command to complete */ 8205 while (((word0 & OWN_CHIP) == OWN_CHIP) || 8206 (!(ha_copy & HA_MBATT) && 8207 (phba->link_state > LPFC_WARM_START))) { 8208 if (time_after(jiffies, timeout)) { 8209 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8210 spin_unlock_irqrestore(&phba->hbalock, 8211 drvr_flag); 8212 goto out_not_finished; 8213 } 8214 8215 /* Check if we took a mbox interrupt while we were 8216 polling */ 8217 if (((word0 & OWN_CHIP) != OWN_CHIP) 8218 && (evtctr != psli->slistat.mbox_event)) 8219 break; 8220 8221 if (i++ > 10) { 8222 spin_unlock_irqrestore(&phba->hbalock, 8223 drvr_flag); 8224 msleep(1); 8225 spin_lock_irqsave(&phba->hbalock, drvr_flag); 8226 } 8227 8228 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8229 /* First copy command data */ 8230 word0 = *((uint32_t *)phba->mbox); 8231 word0 = le32_to_cpu(word0); 8232 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 8233 MAILBOX_t *slimmb; 8234 uint32_t slimword0; 8235 /* Check real SLIM for any errors */ 8236 slimword0 = readl(phba->MBslimaddr); 8237 slimmb = (MAILBOX_t *) & slimword0; 8238 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 8239 && slimmb->mbxStatus) { 8240 psli->sli_flag &= 8241 ~LPFC_SLI_ACTIVE; 8242 word0 = slimword0; 8243 } 8244 } 8245 } else { 8246 /* First copy command data */ 8247 word0 = readl(phba->MBslimaddr); 8248 } 8249 /* Read the HBA Host Attention Register */ 8250 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 8251 spin_unlock_irqrestore(&phba->hbalock, 8252 drvr_flag); 8253 goto out_not_finished; 8254 } 8255 } 8256 8257 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 8258 /* copy results back to user */ 8259 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, 8260 MAILBOX_CMD_SIZE); 8261 /* Copy the mailbox extension data */ 8262 if (pmbox->out_ext_byte_len && pmbox->context2) { 8263 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 8264 pmbox->context2, 8265 pmbox->out_ext_byte_len); 8266 } 8267 } else { 8268 /* First copy command data */ 8269 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 8270 MAILBOX_CMD_SIZE); 8271 /* Copy the mailbox extension data */ 8272 if (pmbox->out_ext_byte_len && pmbox->context2) { 8273 lpfc_memcpy_from_slim(pmbox->context2, 8274 phba->MBslimaddr + 8275 MAILBOX_HBA_EXT_OFFSET, 8276 pmbox->out_ext_byte_len); 8277 } 8278 } 8279 8280 writel(HA_MBATT, phba->HAregaddr); 8281 readl(phba->HAregaddr); /* flush */ 8282 8283 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8284 status = mbx->mbxStatus; 8285 } 8286 8287 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 8288 return status; 8289 8290 out_not_finished: 8291 if (processing_queue) { 8292 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 8293 lpfc_mbox_cmpl_put(phba, pmbox); 8294 } 8295 return MBX_NOT_FINISHED; 8296 } 8297 8298 /** 8299 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 8300 * @phba: Pointer to HBA context object. 8301 * 8302 * The function blocks the posting of SLI4 asynchronous mailbox commands from 8303 * the driver internal pending mailbox queue. It will then try to wait out the 8304 * possible outstanding mailbox command before return. 8305 * 8306 * Returns: 8307 * 0 - the outstanding mailbox command completed; otherwise, the wait for 8308 * the outstanding mailbox command timed out. 8309 **/ 8310 static int 8311 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 8312 { 8313 struct lpfc_sli *psli = &phba->sli; 8314 int rc = 0; 8315 unsigned long timeout = 0; 8316 8317 /* Mark the asynchronous mailbox command posting as blocked */ 8318 spin_lock_irq(&phba->hbalock); 8319 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 8320 /* Determine how long we might wait for the active mailbox 8321 * command to be gracefully completed by firmware. 8322 */ 8323 if (phba->sli.mbox_active) 8324 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 8325 phba->sli.mbox_active) * 8326 1000) + jiffies; 8327 spin_unlock_irq(&phba->hbalock); 8328 8329 /* Make sure the mailbox is really active */ 8330 if (timeout) 8331 lpfc_sli4_process_missed_mbox_completions(phba); 8332 8333 /* Wait for the outstnading mailbox command to complete */ 8334 while (phba->sli.mbox_active) { 8335 /* Check active mailbox complete status every 2ms */ 8336 msleep(2); 8337 if (time_after(jiffies, timeout)) { 8338 /* Timeout, marked the outstanding cmd not complete */ 8339 rc = 1; 8340 break; 8341 } 8342 } 8343 8344 /* Can not cleanly block async mailbox command, fails it */ 8345 if (rc) { 8346 spin_lock_irq(&phba->hbalock); 8347 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8348 spin_unlock_irq(&phba->hbalock); 8349 } 8350 return rc; 8351 } 8352 8353 /** 8354 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 8355 * @phba: Pointer to HBA context object. 8356 * 8357 * The function unblocks and resume posting of SLI4 asynchronous mailbox 8358 * commands from the driver internal pending mailbox queue. It makes sure 8359 * that there is no outstanding mailbox command before resuming posting 8360 * asynchronous mailbox commands. If, for any reason, there is outstanding 8361 * mailbox command, it will try to wait it out before resuming asynchronous 8362 * mailbox command posting. 8363 **/ 8364 static void 8365 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 8366 { 8367 struct lpfc_sli *psli = &phba->sli; 8368 8369 spin_lock_irq(&phba->hbalock); 8370 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8371 /* Asynchronous mailbox posting is not blocked, do nothing */ 8372 spin_unlock_irq(&phba->hbalock); 8373 return; 8374 } 8375 8376 /* Outstanding synchronous mailbox command is guaranteed to be done, 8377 * successful or timeout, after timing-out the outstanding mailbox 8378 * command shall always be removed, so just unblock posting async 8379 * mailbox command and resume 8380 */ 8381 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 8382 spin_unlock_irq(&phba->hbalock); 8383 8384 /* wake up worker thread to post asynchronlous mailbox command */ 8385 lpfc_worker_wake_up(phba); 8386 } 8387 8388 /** 8389 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 8390 * @phba: Pointer to HBA context object. 8391 * @mboxq: Pointer to mailbox object. 8392 * 8393 * The function waits for the bootstrap mailbox register ready bit from 8394 * port for twice the regular mailbox command timeout value. 8395 * 8396 * 0 - no timeout on waiting for bootstrap mailbox register ready. 8397 * MBXERR_ERROR - wait for bootstrap mailbox register timed out. 8398 **/ 8399 static int 8400 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8401 { 8402 uint32_t db_ready; 8403 unsigned long timeout; 8404 struct lpfc_register bmbx_reg; 8405 8406 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 8407 * 1000) + jiffies; 8408 8409 do { 8410 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 8411 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 8412 if (!db_ready) 8413 msleep(2); 8414 8415 if (time_after(jiffies, timeout)) 8416 return MBXERR_ERROR; 8417 } while (!db_ready); 8418 8419 return 0; 8420 } 8421 8422 /** 8423 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 8424 * @phba: Pointer to HBA context object. 8425 * @mboxq: Pointer to mailbox object. 8426 * 8427 * The function posts a mailbox to the port. The mailbox is expected 8428 * to be comletely filled in and ready for the port to operate on it. 8429 * This routine executes a synchronous completion operation on the 8430 * mailbox by polling for its completion. 8431 * 8432 * The caller must not be holding any locks when calling this routine. 8433 * 8434 * Returns: 8435 * MBX_SUCCESS - mailbox posted successfully 8436 * Any of the MBX error values. 8437 **/ 8438 static int 8439 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 8440 { 8441 int rc = MBX_SUCCESS; 8442 unsigned long iflag; 8443 uint32_t mcqe_status; 8444 uint32_t mbx_cmnd; 8445 struct lpfc_sli *psli = &phba->sli; 8446 struct lpfc_mqe *mb = &mboxq->u.mqe; 8447 struct lpfc_bmbx_create *mbox_rgn; 8448 struct dma_address *dma_address; 8449 8450 /* 8451 * Only one mailbox can be active to the bootstrap mailbox region 8452 * at a time and there is no queueing provided. 8453 */ 8454 spin_lock_irqsave(&phba->hbalock, iflag); 8455 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8456 spin_unlock_irqrestore(&phba->hbalock, iflag); 8457 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8458 "(%d):2532 Mailbox command x%x (x%x/x%x) " 8459 "cannot issue Data: x%x x%x\n", 8460 mboxq->vport ? mboxq->vport->vpi : 0, 8461 mboxq->u.mb.mbxCommand, 8462 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8463 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8464 psli->sli_flag, MBX_POLL); 8465 return MBXERR_ERROR; 8466 } 8467 /* The server grabs the token and owns it until release */ 8468 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8469 phba->sli.mbox_active = mboxq; 8470 spin_unlock_irqrestore(&phba->hbalock, iflag); 8471 8472 /* wait for bootstrap mbox register for readyness */ 8473 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8474 if (rc) 8475 goto exit; 8476 8477 /* 8478 * Initialize the bootstrap memory region to avoid stale data areas 8479 * in the mailbox post. Then copy the caller's mailbox contents to 8480 * the bmbx mailbox region. 8481 */ 8482 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 8483 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 8484 lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 8485 sizeof(struct lpfc_mqe)); 8486 8487 /* Post the high mailbox dma address to the port and wait for ready. */ 8488 dma_address = &phba->sli4_hba.bmbx.dma_address; 8489 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 8490 8491 /* wait for bootstrap mbox register for hi-address write done */ 8492 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8493 if (rc) 8494 goto exit; 8495 8496 /* Post the low mailbox dma address to the port. */ 8497 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 8498 8499 /* wait for bootstrap mbox register for low address write done */ 8500 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 8501 if (rc) 8502 goto exit; 8503 8504 /* 8505 * Read the CQ to ensure the mailbox has completed. 8506 * If so, update the mailbox status so that the upper layers 8507 * can complete the request normally. 8508 */ 8509 lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 8510 sizeof(struct lpfc_mqe)); 8511 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 8512 lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 8513 sizeof(struct lpfc_mcqe)); 8514 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 8515 /* 8516 * When the CQE status indicates a failure and the mailbox status 8517 * indicates success then copy the CQE status into the mailbox status 8518 * (and prefix it with x4000). 8519 */ 8520 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 8521 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 8522 bf_set(lpfc_mqe_status, mb, 8523 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 8524 rc = MBXERR_ERROR; 8525 } else 8526 lpfc_sli4_swap_str(phba, mboxq); 8527 8528 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8529 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 8530 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 8531 " x%x x%x CQ: x%x x%x x%x x%x\n", 8532 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8533 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8534 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8535 bf_get(lpfc_mqe_status, mb), 8536 mb->un.mb_words[0], mb->un.mb_words[1], 8537 mb->un.mb_words[2], mb->un.mb_words[3], 8538 mb->un.mb_words[4], mb->un.mb_words[5], 8539 mb->un.mb_words[6], mb->un.mb_words[7], 8540 mb->un.mb_words[8], mb->un.mb_words[9], 8541 mb->un.mb_words[10], mb->un.mb_words[11], 8542 mb->un.mb_words[12], mboxq->mcqe.word0, 8543 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 8544 mboxq->mcqe.trailer); 8545 exit: 8546 /* We are holding the token, no needed for lock when release */ 8547 spin_lock_irqsave(&phba->hbalock, iflag); 8548 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8549 phba->sli.mbox_active = NULL; 8550 spin_unlock_irqrestore(&phba->hbalock, iflag); 8551 return rc; 8552 } 8553 8554 /** 8555 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 8556 * @phba: Pointer to HBA context object. 8557 * @pmbox: Pointer to mailbox object. 8558 * @flag: Flag indicating how the mailbox need to be processed. 8559 * 8560 * This function is called by discovery code and HBA management code to submit 8561 * a mailbox command to firmware with SLI-4 interface spec. 8562 * 8563 * Return codes the caller owns the mailbox command after the return of the 8564 * function. 8565 **/ 8566 static int 8567 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 8568 uint32_t flag) 8569 { 8570 struct lpfc_sli *psli = &phba->sli; 8571 unsigned long iflags; 8572 int rc; 8573 8574 /* dump from issue mailbox command if setup */ 8575 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 8576 8577 rc = lpfc_mbox_dev_check(phba); 8578 if (unlikely(rc)) { 8579 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8580 "(%d):2544 Mailbox command x%x (x%x/x%x) " 8581 "cannot issue Data: x%x x%x\n", 8582 mboxq->vport ? mboxq->vport->vpi : 0, 8583 mboxq->u.mb.mbxCommand, 8584 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8585 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8586 psli->sli_flag, flag); 8587 goto out_not_finished; 8588 } 8589 8590 /* Detect polling mode and jump to a handler */ 8591 if (!phba->sli4_hba.intr_enable) { 8592 if (flag == MBX_POLL) 8593 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8594 else 8595 rc = -EIO; 8596 if (rc != MBX_SUCCESS) 8597 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8598 "(%d):2541 Mailbox command x%x " 8599 "(x%x/x%x) failure: " 8600 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8601 "Data: x%x x%x\n,", 8602 mboxq->vport ? mboxq->vport->vpi : 0, 8603 mboxq->u.mb.mbxCommand, 8604 lpfc_sli_config_mbox_subsys_get(phba, 8605 mboxq), 8606 lpfc_sli_config_mbox_opcode_get(phba, 8607 mboxq), 8608 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8609 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8610 bf_get(lpfc_mcqe_ext_status, 8611 &mboxq->mcqe), 8612 psli->sli_flag, flag); 8613 return rc; 8614 } else if (flag == MBX_POLL) { 8615 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8616 "(%d):2542 Try to issue mailbox command " 8617 "x%x (x%x/x%x) synchronously ahead of async " 8618 "mailbox command queue: x%x x%x\n", 8619 mboxq->vport ? mboxq->vport->vpi : 0, 8620 mboxq->u.mb.mbxCommand, 8621 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8622 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8623 psli->sli_flag, flag); 8624 /* Try to block the asynchronous mailbox posting */ 8625 rc = lpfc_sli4_async_mbox_block(phba); 8626 if (!rc) { 8627 /* Successfully blocked, now issue sync mbox cmd */ 8628 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8629 if (rc != MBX_SUCCESS) 8630 lpfc_printf_log(phba, KERN_WARNING, 8631 LOG_MBOX | LOG_SLI, 8632 "(%d):2597 Sync Mailbox command " 8633 "x%x (x%x/x%x) failure: " 8634 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8635 "Data: x%x x%x\n,", 8636 mboxq->vport ? mboxq->vport->vpi : 0, 8637 mboxq->u.mb.mbxCommand, 8638 lpfc_sli_config_mbox_subsys_get(phba, 8639 mboxq), 8640 lpfc_sli_config_mbox_opcode_get(phba, 8641 mboxq), 8642 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8643 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8644 bf_get(lpfc_mcqe_ext_status, 8645 &mboxq->mcqe), 8646 psli->sli_flag, flag); 8647 /* Unblock the async mailbox posting afterward */ 8648 lpfc_sli4_async_mbox_unblock(phba); 8649 } 8650 return rc; 8651 } 8652 8653 /* Now, interrupt mode asynchrous mailbox command */ 8654 rc = lpfc_mbox_cmd_check(phba, mboxq); 8655 if (rc) { 8656 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8657 "(%d):2543 Mailbox command x%x (x%x/x%x) " 8658 "cannot issue Data: x%x x%x\n", 8659 mboxq->vport ? mboxq->vport->vpi : 0, 8660 mboxq->u.mb.mbxCommand, 8661 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8662 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8663 psli->sli_flag, flag); 8664 goto out_not_finished; 8665 } 8666 8667 /* Put the mailbox command to the driver internal FIFO */ 8668 psli->slistat.mbox_busy++; 8669 spin_lock_irqsave(&phba->hbalock, iflags); 8670 lpfc_mbox_put(phba, mboxq); 8671 spin_unlock_irqrestore(&phba->hbalock, iflags); 8672 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8673 "(%d):0354 Mbox cmd issue - Enqueue Data: " 8674 "x%x (x%x/x%x) x%x x%x x%x\n", 8675 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 8676 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8677 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8678 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8679 phba->pport->port_state, 8680 psli->sli_flag, MBX_NOWAIT); 8681 /* Wake up worker thread to transport mailbox command from head */ 8682 lpfc_worker_wake_up(phba); 8683 8684 return MBX_BUSY; 8685 8686 out_not_finished: 8687 return MBX_NOT_FINISHED; 8688 } 8689 8690 /** 8691 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 8692 * @phba: Pointer to HBA context object. 8693 * 8694 * This function is called by worker thread to send a mailbox command to 8695 * SLI4 HBA firmware. 8696 * 8697 **/ 8698 int 8699 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 8700 { 8701 struct lpfc_sli *psli = &phba->sli; 8702 LPFC_MBOXQ_t *mboxq; 8703 int rc = MBX_SUCCESS; 8704 unsigned long iflags; 8705 struct lpfc_mqe *mqe; 8706 uint32_t mbx_cmnd; 8707 8708 /* Check interrupt mode before post async mailbox command */ 8709 if (unlikely(!phba->sli4_hba.intr_enable)) 8710 return MBX_NOT_FINISHED; 8711 8712 /* Check for mailbox command service token */ 8713 spin_lock_irqsave(&phba->hbalock, iflags); 8714 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8715 spin_unlock_irqrestore(&phba->hbalock, iflags); 8716 return MBX_NOT_FINISHED; 8717 } 8718 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8719 spin_unlock_irqrestore(&phba->hbalock, iflags); 8720 return MBX_NOT_FINISHED; 8721 } 8722 if (unlikely(phba->sli.mbox_active)) { 8723 spin_unlock_irqrestore(&phba->hbalock, iflags); 8724 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8725 "0384 There is pending active mailbox cmd\n"); 8726 return MBX_NOT_FINISHED; 8727 } 8728 /* Take the mailbox command service token */ 8729 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8730 8731 /* Get the next mailbox command from head of queue */ 8732 mboxq = lpfc_mbox_get(phba); 8733 8734 /* If no more mailbox command waiting for post, we're done */ 8735 if (!mboxq) { 8736 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8737 spin_unlock_irqrestore(&phba->hbalock, iflags); 8738 return MBX_SUCCESS; 8739 } 8740 phba->sli.mbox_active = mboxq; 8741 spin_unlock_irqrestore(&phba->hbalock, iflags); 8742 8743 /* Check device readiness for posting mailbox command */ 8744 rc = lpfc_mbox_dev_check(phba); 8745 if (unlikely(rc)) 8746 /* Driver clean routine will clean up pending mailbox */ 8747 goto out_not_finished; 8748 8749 /* Prepare the mbox command to be posted */ 8750 mqe = &mboxq->u.mqe; 8751 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 8752 8753 /* Start timer for the mbox_tmo and log some mailbox post messages */ 8754 mod_timer(&psli->mbox_tmo, (jiffies + 8755 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 8756 8757 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8758 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 8759 "x%x x%x\n", 8760 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8761 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8762 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8763 phba->pport->port_state, psli->sli_flag); 8764 8765 if (mbx_cmnd != MBX_HEARTBEAT) { 8766 if (mboxq->vport) { 8767 lpfc_debugfs_disc_trc(mboxq->vport, 8768 LPFC_DISC_TRC_MBOX_VPORT, 8769 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8770 mbx_cmnd, mqe->un.mb_words[0], 8771 mqe->un.mb_words[1]); 8772 } else { 8773 lpfc_debugfs_disc_trc(phba->pport, 8774 LPFC_DISC_TRC_MBOX, 8775 "MBOX Send: cmd:x%x mb:x%x x%x", 8776 mbx_cmnd, mqe->un.mb_words[0], 8777 mqe->un.mb_words[1]); 8778 } 8779 } 8780 psli->slistat.mbox_cmd++; 8781 8782 /* Post the mailbox command to the port */ 8783 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 8784 if (rc != MBX_SUCCESS) { 8785 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8786 "(%d):2533 Mailbox command x%x (x%x/x%x) " 8787 "cannot issue Data: x%x x%x\n", 8788 mboxq->vport ? mboxq->vport->vpi : 0, 8789 mboxq->u.mb.mbxCommand, 8790 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8791 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8792 psli->sli_flag, MBX_NOWAIT); 8793 goto out_not_finished; 8794 } 8795 8796 return rc; 8797 8798 out_not_finished: 8799 spin_lock_irqsave(&phba->hbalock, iflags); 8800 if (phba->sli.mbox_active) { 8801 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8802 __lpfc_mbox_cmpl_put(phba, mboxq); 8803 /* Release the token */ 8804 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8805 phba->sli.mbox_active = NULL; 8806 } 8807 spin_unlock_irqrestore(&phba->hbalock, iflags); 8808 8809 return MBX_NOT_FINISHED; 8810 } 8811 8812 /** 8813 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 8814 * @phba: Pointer to HBA context object. 8815 * @pmbox: Pointer to mailbox object. 8816 * @flag: Flag indicating how the mailbox need to be processed. 8817 * 8818 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 8819 * the API jump table function pointer from the lpfc_hba struct. 8820 * 8821 * Return codes the caller owns the mailbox command after the return of the 8822 * function. 8823 **/ 8824 int 8825 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 8826 { 8827 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 8828 } 8829 8830 /** 8831 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 8832 * @phba: The hba struct for which this call is being executed. 8833 * @dev_grp: The HBA PCI-Device group number. 8834 * 8835 * This routine sets up the mbox interface API function jump table in @phba 8836 * struct. 8837 * Returns: 0 - success, -ENODEV - failure. 8838 **/ 8839 int 8840 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8841 { 8842 8843 switch (dev_grp) { 8844 case LPFC_PCI_DEV_LP: 8845 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 8846 phba->lpfc_sli_handle_slow_ring_event = 8847 lpfc_sli_handle_slow_ring_event_s3; 8848 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 8849 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 8850 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 8851 break; 8852 case LPFC_PCI_DEV_OC: 8853 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 8854 phba->lpfc_sli_handle_slow_ring_event = 8855 lpfc_sli_handle_slow_ring_event_s4; 8856 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 8857 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 8858 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 8859 break; 8860 default: 8861 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8862 "1420 Invalid HBA PCI-device group: 0x%x\n", 8863 dev_grp); 8864 return -ENODEV; 8865 break; 8866 } 8867 return 0; 8868 } 8869 8870 /** 8871 * __lpfc_sli_ringtx_put - Add an iocb to the txq 8872 * @phba: Pointer to HBA context object. 8873 * @pring: Pointer to driver SLI ring object. 8874 * @piocb: Pointer to address of newly added command iocb. 8875 * 8876 * This function is called with hbalock held to add a command 8877 * iocb to the txq when SLI layer cannot submit the command iocb 8878 * to the ring. 8879 **/ 8880 void 8881 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8882 struct lpfc_iocbq *piocb) 8883 { 8884 lockdep_assert_held(&phba->hbalock); 8885 /* Insert the caller's iocb in the txq tail for later processing. */ 8886 list_add_tail(&piocb->list, &pring->txq); 8887 } 8888 8889 /** 8890 * lpfc_sli_next_iocb - Get the next iocb in the txq 8891 * @phba: Pointer to HBA context object. 8892 * @pring: Pointer to driver SLI ring object. 8893 * @piocb: Pointer to address of newly added command iocb. 8894 * 8895 * This function is called with hbalock held before a new 8896 * iocb is submitted to the firmware. This function checks 8897 * txq to flush the iocbs in txq to Firmware before 8898 * submitting new iocbs to the Firmware. 8899 * If there are iocbs in the txq which need to be submitted 8900 * to firmware, lpfc_sli_next_iocb returns the first element 8901 * of the txq after dequeuing it from txq. 8902 * If there is no iocb in the txq then the function will return 8903 * *piocb and *piocb is set to NULL. Caller needs to check 8904 * *piocb to find if there are more commands in the txq. 8905 **/ 8906 static struct lpfc_iocbq * 8907 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8908 struct lpfc_iocbq **piocb) 8909 { 8910 struct lpfc_iocbq * nextiocb; 8911 8912 lockdep_assert_held(&phba->hbalock); 8913 8914 nextiocb = lpfc_sli_ringtx_get(phba, pring); 8915 if (!nextiocb) { 8916 nextiocb = *piocb; 8917 *piocb = NULL; 8918 } 8919 8920 return nextiocb; 8921 } 8922 8923 /** 8924 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 8925 * @phba: Pointer to HBA context object. 8926 * @ring_number: SLI ring number to issue iocb on. 8927 * @piocb: Pointer to command iocb. 8928 * @flag: Flag indicating if this command can be put into txq. 8929 * 8930 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 8931 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 8932 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 8933 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 8934 * this function allows only iocbs for posting buffers. This function finds 8935 * next available slot in the command ring and posts the command to the 8936 * available slot and writes the port attention register to request HBA start 8937 * processing new iocb. If there is no slot available in the ring and 8938 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 8939 * the function returns IOCB_BUSY. 8940 * 8941 * This function is called with hbalock held. The function will return success 8942 * after it successfully submit the iocb to firmware or after adding to the 8943 * txq. 8944 **/ 8945 static int 8946 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 8947 struct lpfc_iocbq *piocb, uint32_t flag) 8948 { 8949 struct lpfc_iocbq *nextiocb; 8950 IOCB_t *iocb; 8951 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; 8952 8953 lockdep_assert_held(&phba->hbalock); 8954 8955 if (piocb->iocb_cmpl && (!piocb->vport) && 8956 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 8957 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 8958 lpfc_printf_log(phba, KERN_ERR, 8959 LOG_SLI | LOG_VPORT, 8960 "1807 IOCB x%x failed. No vport\n", 8961 piocb->iocb.ulpCommand); 8962 dump_stack(); 8963 return IOCB_ERROR; 8964 } 8965 8966 8967 /* If the PCI channel is in offline state, do not post iocbs. */ 8968 if (unlikely(pci_channel_offline(phba->pcidev))) 8969 return IOCB_ERROR; 8970 8971 /* If HBA has a deferred error attention, fail the iocb. */ 8972 if (unlikely(phba->hba_flag & DEFER_ERATT)) 8973 return IOCB_ERROR; 8974 8975 /* 8976 * We should never get an IOCB if we are in a < LINK_DOWN state 8977 */ 8978 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 8979 return IOCB_ERROR; 8980 8981 /* 8982 * Check to see if we are blocking IOCB processing because of a 8983 * outstanding event. 8984 */ 8985 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 8986 goto iocb_busy; 8987 8988 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 8989 /* 8990 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 8991 * can be issued if the link is not up. 8992 */ 8993 switch (piocb->iocb.ulpCommand) { 8994 case CMD_GEN_REQUEST64_CR: 8995 case CMD_GEN_REQUEST64_CX: 8996 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 8997 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 8998 FC_RCTL_DD_UNSOL_CMD) || 8999 (piocb->iocb.un.genreq64.w5.hcsw.Type != 9000 MENLO_TRANSPORT_TYPE)) 9001 9002 goto iocb_busy; 9003 break; 9004 case CMD_QUE_RING_BUF_CN: 9005 case CMD_QUE_RING_BUF64_CN: 9006 /* 9007 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 9008 * completion, iocb_cmpl MUST be 0. 9009 */ 9010 if (piocb->iocb_cmpl) 9011 piocb->iocb_cmpl = NULL; 9012 /*FALLTHROUGH*/ 9013 case CMD_CREATE_XRI_CR: 9014 case CMD_CLOSE_XRI_CN: 9015 case CMD_CLOSE_XRI_CX: 9016 break; 9017 default: 9018 goto iocb_busy; 9019 } 9020 9021 /* 9022 * For FCP commands, we must be in a state where we can process link 9023 * attention events. 9024 */ 9025 } else if (unlikely(pring->ringno == LPFC_FCP_RING && 9026 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 9027 goto iocb_busy; 9028 } 9029 9030 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 9031 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 9032 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 9033 9034 if (iocb) 9035 lpfc_sli_update_ring(phba, pring); 9036 else 9037 lpfc_sli_update_full_ring(phba, pring); 9038 9039 if (!piocb) 9040 return IOCB_SUCCESS; 9041 9042 goto out_busy; 9043 9044 iocb_busy: 9045 pring->stats.iocb_cmd_delay++; 9046 9047 out_busy: 9048 9049 if (!(flag & SLI_IOCB_RET_IOCB)) { 9050 __lpfc_sli_ringtx_put(phba, pring, piocb); 9051 return IOCB_SUCCESS; 9052 } 9053 9054 return IOCB_BUSY; 9055 } 9056 9057 /** 9058 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 9059 * @phba: Pointer to HBA context object. 9060 * @piocb: Pointer to command iocb. 9061 * @sglq: Pointer to the scatter gather queue object. 9062 * 9063 * This routine converts the bpl or bde that is in the IOCB 9064 * to a sgl list for the sli4 hardware. The physical address 9065 * of the bpl/bde is converted back to a virtual address. 9066 * If the IOCB contains a BPL then the list of BDE's is 9067 * converted to sli4_sge's. If the IOCB contains a single 9068 * BDE then it is converted to a single sli_sge. 9069 * The IOCB is still in cpu endianess so the contents of 9070 * the bpl can be used without byte swapping. 9071 * 9072 * Returns valid XRI = Success, NO_XRI = Failure. 9073 **/ 9074 static uint16_t 9075 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 9076 struct lpfc_sglq *sglq) 9077 { 9078 uint16_t xritag = NO_XRI; 9079 struct ulp_bde64 *bpl = NULL; 9080 struct ulp_bde64 bde; 9081 struct sli4_sge *sgl = NULL; 9082 struct lpfc_dmabuf *dmabuf; 9083 IOCB_t *icmd; 9084 int numBdes = 0; 9085 int i = 0; 9086 uint32_t offset = 0; /* accumulated offset in the sg request list */ 9087 int inbound = 0; /* number of sg reply entries inbound from firmware */ 9088 9089 if (!piocbq || !sglq) 9090 return xritag; 9091 9092 sgl = (struct sli4_sge *)sglq->sgl; 9093 icmd = &piocbq->iocb; 9094 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 9095 return sglq->sli4_xritag; 9096 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9097 numBdes = icmd->un.genreq64.bdl.bdeSize / 9098 sizeof(struct ulp_bde64); 9099 /* The addrHigh and addrLow fields within the IOCB 9100 * have not been byteswapped yet so there is no 9101 * need to swap them back. 9102 */ 9103 if (piocbq->context3) 9104 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 9105 else 9106 return xritag; 9107 9108 bpl = (struct ulp_bde64 *)dmabuf->virt; 9109 if (!bpl) 9110 return xritag; 9111 9112 for (i = 0; i < numBdes; i++) { 9113 /* Should already be byte swapped. */ 9114 sgl->addr_hi = bpl->addrHigh; 9115 sgl->addr_lo = bpl->addrLow; 9116 9117 sgl->word2 = le32_to_cpu(sgl->word2); 9118 if ((i+1) == numBdes) 9119 bf_set(lpfc_sli4_sge_last, sgl, 1); 9120 else 9121 bf_set(lpfc_sli4_sge_last, sgl, 0); 9122 /* swap the size field back to the cpu so we 9123 * can assign it to the sgl. 9124 */ 9125 bde.tus.w = le32_to_cpu(bpl->tus.w); 9126 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 9127 /* The offsets in the sgl need to be accumulated 9128 * separately for the request and reply lists. 9129 * The request is always first, the reply follows. 9130 */ 9131 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 9132 /* add up the reply sg entries */ 9133 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 9134 inbound++; 9135 /* first inbound? reset the offset */ 9136 if (inbound == 1) 9137 offset = 0; 9138 bf_set(lpfc_sli4_sge_offset, sgl, offset); 9139 bf_set(lpfc_sli4_sge_type, sgl, 9140 LPFC_SGE_TYPE_DATA); 9141 offset += bde.tus.f.bdeSize; 9142 } 9143 sgl->word2 = cpu_to_le32(sgl->word2); 9144 bpl++; 9145 sgl++; 9146 } 9147 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 9148 /* The addrHigh and addrLow fields of the BDE have not 9149 * been byteswapped yet so they need to be swapped 9150 * before putting them in the sgl. 9151 */ 9152 sgl->addr_hi = 9153 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 9154 sgl->addr_lo = 9155 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 9156 sgl->word2 = le32_to_cpu(sgl->word2); 9157 bf_set(lpfc_sli4_sge_last, sgl, 1); 9158 sgl->word2 = cpu_to_le32(sgl->word2); 9159 sgl->sge_len = 9160 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 9161 } 9162 return sglq->sli4_xritag; 9163 } 9164 9165 /** 9166 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 9167 * @phba: Pointer to HBA context object. 9168 * @piocb: Pointer to command iocb. 9169 * @wqe: Pointer to the work queue entry. 9170 * 9171 * This routine converts the iocb command to its Work Queue Entry 9172 * equivalent. The wqe pointer should not have any fields set when 9173 * this routine is called because it will memcpy over them. 9174 * This routine does not set the CQ_ID or the WQEC bits in the 9175 * wqe. 9176 * 9177 * Returns: 0 = Success, IOCB_ERROR = Failure. 9178 **/ 9179 static int 9180 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 9181 union lpfc_wqe128 *wqe) 9182 { 9183 uint32_t xmit_len = 0, total_len = 0; 9184 uint8_t ct = 0; 9185 uint32_t fip; 9186 uint32_t abort_tag; 9187 uint8_t command_type = ELS_COMMAND_NON_FIP; 9188 uint8_t cmnd; 9189 uint16_t xritag; 9190 uint16_t abrt_iotag; 9191 struct lpfc_iocbq *abrtiocbq; 9192 struct ulp_bde64 *bpl = NULL; 9193 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 9194 int numBdes, i; 9195 struct ulp_bde64 bde; 9196 struct lpfc_nodelist *ndlp; 9197 uint32_t *pcmd; 9198 uint32_t if_type; 9199 9200 fip = phba->hba_flag & HBA_FIP_SUPPORT; 9201 /* The fcp commands will set command type */ 9202 if (iocbq->iocb_flag & LPFC_IO_FCP) 9203 command_type = FCP_COMMAND; 9204 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 9205 command_type = ELS_COMMAND_FIP; 9206 else 9207 command_type = ELS_COMMAND_NON_FIP; 9208 9209 if (phba->fcp_embed_io) 9210 memset(wqe, 0, sizeof(union lpfc_wqe128)); 9211 /* Some of the fields are in the right position already */ 9212 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 9213 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) { 9214 /* The ct field has moved so reset */ 9215 wqe->generic.wqe_com.word7 = 0; 9216 wqe->generic.wqe_com.word10 = 0; 9217 } 9218 9219 abort_tag = (uint32_t) iocbq->iotag; 9220 xritag = iocbq->sli4_xritag; 9221 /* words0-2 bpl convert bde */ 9222 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 9223 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9224 sizeof(struct ulp_bde64); 9225 bpl = (struct ulp_bde64 *) 9226 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 9227 if (!bpl) 9228 return IOCB_ERROR; 9229 9230 /* Should already be byte swapped. */ 9231 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 9232 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 9233 /* swap the size field back to the cpu so we 9234 * can assign it to the sgl. 9235 */ 9236 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 9237 xmit_len = wqe->generic.bde.tus.f.bdeSize; 9238 total_len = 0; 9239 for (i = 0; i < numBdes; i++) { 9240 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9241 total_len += bde.tus.f.bdeSize; 9242 } 9243 } else 9244 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 9245 9246 iocbq->iocb.ulpIoTag = iocbq->iotag; 9247 cmnd = iocbq->iocb.ulpCommand; 9248 9249 switch (iocbq->iocb.ulpCommand) { 9250 case CMD_ELS_REQUEST64_CR: 9251 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 9252 ndlp = iocbq->context_un.ndlp; 9253 else 9254 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9255 if (!iocbq->iocb.ulpLe) { 9256 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9257 "2007 Only Limited Edition cmd Format" 9258 " supported 0x%x\n", 9259 iocbq->iocb.ulpCommand); 9260 return IOCB_ERROR; 9261 } 9262 9263 wqe->els_req.payload_len = xmit_len; 9264 /* Els_reguest64 has a TMO */ 9265 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 9266 iocbq->iocb.ulpTimeout); 9267 /* Need a VF for word 4 set the vf bit*/ 9268 bf_set(els_req64_vf, &wqe->els_req, 0); 9269 /* And a VFID for word 12 */ 9270 bf_set(els_req64_vfid, &wqe->els_req, 0); 9271 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9272 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9273 iocbq->iocb.ulpContext); 9274 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 9275 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 9276 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 9277 if (command_type == ELS_COMMAND_FIP) 9278 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 9279 >> LPFC_FIP_ELS_ID_SHIFT); 9280 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9281 iocbq->context2)->virt); 9282 if_type = bf_get(lpfc_sli_intf_if_type, 9283 &phba->sli4_hba.sli_intf); 9284 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9285 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 9286 *pcmd == ELS_CMD_SCR || 9287 *pcmd == ELS_CMD_FDISC || 9288 *pcmd == ELS_CMD_LOGO || 9289 *pcmd == ELS_CMD_PLOGI)) { 9290 bf_set(els_req64_sp, &wqe->els_req, 1); 9291 bf_set(els_req64_sid, &wqe->els_req, 9292 iocbq->vport->fc_myDID); 9293 if ((*pcmd == ELS_CMD_FLOGI) && 9294 !(phba->fc_topology == 9295 LPFC_TOPOLOGY_LOOP)) 9296 bf_set(els_req64_sid, &wqe->els_req, 0); 9297 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 9298 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9299 phba->vpi_ids[iocbq->vport->vpi]); 9300 } else if (pcmd && iocbq->context1) { 9301 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 9302 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 9303 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9304 } 9305 } 9306 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 9307 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9308 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 9309 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 9310 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 9311 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 9312 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9313 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 9314 wqe->els_req.max_response_payload_len = total_len - xmit_len; 9315 break; 9316 case CMD_XMIT_SEQUENCE64_CX: 9317 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 9318 iocbq->iocb.un.ulpWord[3]); 9319 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 9320 iocbq->iocb.unsli3.rcvsli3.ox_id); 9321 /* The entire sequence is transmitted for this IOCB */ 9322 xmit_len = total_len; 9323 cmnd = CMD_XMIT_SEQUENCE64_CR; 9324 if (phba->link_flag & LS_LOOPBACK_MODE) 9325 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 9326 case CMD_XMIT_SEQUENCE64_CR: 9327 /* word3 iocb=io_tag32 wqe=reserved */ 9328 wqe->xmit_sequence.rsvd3 = 0; 9329 /* word4 relative_offset memcpy */ 9330 /* word5 r_ctl/df_ctl memcpy */ 9331 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 9332 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 9333 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 9334 LPFC_WQE_IOD_WRITE); 9335 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 9336 LPFC_WQE_LENLOC_WORD12); 9337 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 9338 wqe->xmit_sequence.xmit_len = xmit_len; 9339 command_type = OTHER_COMMAND; 9340 break; 9341 case CMD_XMIT_BCAST64_CN: 9342 /* word3 iocb=iotag32 wqe=seq_payload_len */ 9343 wqe->xmit_bcast64.seq_payload_len = xmit_len; 9344 /* word4 iocb=rsvd wqe=rsvd */ 9345 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 9346 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 9347 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 9348 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9349 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 9350 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 9351 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 9352 LPFC_WQE_LENLOC_WORD3); 9353 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 9354 break; 9355 case CMD_FCP_IWRITE64_CR: 9356 command_type = FCP_COMMAND_DATA_OUT; 9357 /* word3 iocb=iotag wqe=payload_offset_len */ 9358 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9359 bf_set(payload_offset_len, &wqe->fcp_iwrite, 9360 xmit_len + sizeof(struct fcp_rsp)); 9361 bf_set(cmd_buff_len, &wqe->fcp_iwrite, 9362 0); 9363 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9364 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9365 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 9366 iocbq->iocb.ulpFCP2Rcvy); 9367 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 9368 /* Always open the exchange */ 9369 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 9370 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 9371 LPFC_WQE_LENLOC_WORD4); 9372 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 9373 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 9374 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9375 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); 9376 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 9377 if (iocbq->priority) { 9378 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9379 (iocbq->priority << 1)); 9380 } else { 9381 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 9382 (phba->cfg_XLanePriority << 1)); 9383 } 9384 } 9385 /* Note, word 10 is already initialized to 0 */ 9386 9387 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9388 if (phba->cfg_enable_pbde) 9389 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1); 9390 else 9391 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); 9392 9393 if (phba->fcp_embed_io) { 9394 struct lpfc_scsi_buf *lpfc_cmd; 9395 struct sli4_sge *sgl; 9396 struct fcp_cmnd *fcp_cmnd; 9397 uint32_t *ptr; 9398 9399 /* 128 byte wqe support here */ 9400 9401 lpfc_cmd = iocbq->context1; 9402 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 9403 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9404 9405 /* Word 0-2 - FCP_CMND */ 9406 wqe->generic.bde.tus.f.bdeFlags = 9407 BUFF_TYPE_BDE_IMMED; 9408 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9409 wqe->generic.bde.addrHigh = 0; 9410 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9411 9412 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1); 9413 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0); 9414 9415 /* Word 22-29 FCP CMND Payload */ 9416 ptr = &wqe->words[22]; 9417 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9418 } 9419 break; 9420 case CMD_FCP_IREAD64_CR: 9421 /* word3 iocb=iotag wqe=payload_offset_len */ 9422 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9423 bf_set(payload_offset_len, &wqe->fcp_iread, 9424 xmit_len + sizeof(struct fcp_rsp)); 9425 bf_set(cmd_buff_len, &wqe->fcp_iread, 9426 0); 9427 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 9428 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 9429 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 9430 iocbq->iocb.ulpFCP2Rcvy); 9431 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 9432 /* Always open the exchange */ 9433 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 9434 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 9435 LPFC_WQE_LENLOC_WORD4); 9436 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 9437 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 9438 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9439 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); 9440 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); 9441 if (iocbq->priority) { 9442 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9443 (iocbq->priority << 1)); 9444 } else { 9445 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 9446 (phba->cfg_XLanePriority << 1)); 9447 } 9448 } 9449 /* Note, word 10 is already initialized to 0 */ 9450 9451 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ 9452 if (phba->cfg_enable_pbde) 9453 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1); 9454 else 9455 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); 9456 9457 if (phba->fcp_embed_io) { 9458 struct lpfc_scsi_buf *lpfc_cmd; 9459 struct sli4_sge *sgl; 9460 struct fcp_cmnd *fcp_cmnd; 9461 uint32_t *ptr; 9462 9463 /* 128 byte wqe support here */ 9464 9465 lpfc_cmd = iocbq->context1; 9466 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 9467 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9468 9469 /* Word 0-2 - FCP_CMND */ 9470 wqe->generic.bde.tus.f.bdeFlags = 9471 BUFF_TYPE_BDE_IMMED; 9472 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9473 wqe->generic.bde.addrHigh = 0; 9474 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9475 9476 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1); 9477 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0); 9478 9479 /* Word 22-29 FCP CMND Payload */ 9480 ptr = &wqe->words[22]; 9481 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9482 } 9483 break; 9484 case CMD_FCP_ICMND64_CR: 9485 /* word3 iocb=iotag wqe=payload_offset_len */ 9486 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 9487 bf_set(payload_offset_len, &wqe->fcp_icmd, 9488 xmit_len + sizeof(struct fcp_rsp)); 9489 bf_set(cmd_buff_len, &wqe->fcp_icmd, 9490 0); 9491 /* word3 iocb=IO_TAG wqe=reserved */ 9492 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 9493 /* Always open the exchange */ 9494 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 9495 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 9496 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 9497 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 9498 LPFC_WQE_LENLOC_NONE); 9499 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 9500 iocbq->iocb.ulpFCP2Rcvy); 9501 if (iocbq->iocb_flag & LPFC_IO_OAS) { 9502 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); 9503 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); 9504 if (iocbq->priority) { 9505 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9506 (iocbq->priority << 1)); 9507 } else { 9508 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 9509 (phba->cfg_XLanePriority << 1)); 9510 } 9511 } 9512 /* Note, word 10 is already initialized to 0 */ 9513 9514 if (phba->fcp_embed_io) { 9515 struct lpfc_scsi_buf *lpfc_cmd; 9516 struct sli4_sge *sgl; 9517 struct fcp_cmnd *fcp_cmnd; 9518 uint32_t *ptr; 9519 9520 /* 128 byte wqe support here */ 9521 9522 lpfc_cmd = iocbq->context1; 9523 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 9524 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9525 9526 /* Word 0-2 - FCP_CMND */ 9527 wqe->generic.bde.tus.f.bdeFlags = 9528 BUFF_TYPE_BDE_IMMED; 9529 wqe->generic.bde.tus.f.bdeSize = sgl->sge_len; 9530 wqe->generic.bde.addrHigh = 0; 9531 wqe->generic.bde.addrLow = 88; /* Word 22 */ 9532 9533 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1); 9534 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0); 9535 9536 /* Word 22-29 FCP CMND Payload */ 9537 ptr = &wqe->words[22]; 9538 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9539 } 9540 break; 9541 case CMD_GEN_REQUEST64_CR: 9542 /* For this command calculate the xmit length of the 9543 * request bde. 9544 */ 9545 xmit_len = 0; 9546 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9547 sizeof(struct ulp_bde64); 9548 for (i = 0; i < numBdes; i++) { 9549 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9550 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 9551 break; 9552 xmit_len += bde.tus.f.bdeSize; 9553 } 9554 /* word3 iocb=IO_TAG wqe=request_payload_len */ 9555 wqe->gen_req.request_payload_len = xmit_len; 9556 /* word4 iocb=parameter wqe=relative_offset memcpy */ 9557 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 9558 /* word6 context tag copied in memcpy */ 9559 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 9560 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9561 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9562 "2015 Invalid CT %x command 0x%x\n", 9563 ct, iocbq->iocb.ulpCommand); 9564 return IOCB_ERROR; 9565 } 9566 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 9567 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 9568 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 9569 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 9570 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 9571 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 9572 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9573 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 9574 wqe->gen_req.max_response_payload_len = total_len - xmit_len; 9575 command_type = OTHER_COMMAND; 9576 break; 9577 case CMD_XMIT_ELS_RSP64_CX: 9578 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9579 /* words0-2 BDE memcpy */ 9580 /* word3 iocb=iotag32 wqe=response_payload_len */ 9581 wqe->xmit_els_rsp.response_payload_len = xmit_len; 9582 /* word4 */ 9583 wqe->xmit_els_rsp.word4 = 0; 9584 /* word5 iocb=rsvd wge=did */ 9585 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 9586 iocbq->iocb.un.xseq64.xmit_els_remoteID); 9587 9588 if_type = bf_get(lpfc_sli_intf_if_type, 9589 &phba->sli4_hba.sli_intf); 9590 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 9591 if (iocbq->vport->fc_flag & FC_PT2PT) { 9592 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9593 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9594 iocbq->vport->fc_myDID); 9595 if (iocbq->vport->fc_myDID == Fabric_DID) { 9596 bf_set(wqe_els_did, 9597 &wqe->xmit_els_rsp.wqe_dest, 0); 9598 } 9599 } 9600 } 9601 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 9602 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9603 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 9604 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9605 iocbq->iocb.unsli3.rcvsli3.ox_id); 9606 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 9607 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9608 phba->vpi_ids[iocbq->vport->vpi]); 9609 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 9610 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 9611 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 9612 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 9613 LPFC_WQE_LENLOC_WORD3); 9614 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 9615 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 9616 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9617 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9618 iocbq->context2)->virt); 9619 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9620 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9621 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9622 iocbq->vport->fc_myDID); 9623 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 9624 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9625 phba->vpi_ids[phba->pport->vpi]); 9626 } 9627 command_type = OTHER_COMMAND; 9628 break; 9629 case CMD_CLOSE_XRI_CN: 9630 case CMD_ABORT_XRI_CN: 9631 case CMD_ABORT_XRI_CX: 9632 /* words 0-2 memcpy should be 0 rserved */ 9633 /* port will send abts */ 9634 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 9635 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 9636 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 9637 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 9638 } else 9639 fip = 0; 9640 9641 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 9642 /* 9643 * The link is down, or the command was ELS_FIP 9644 * so the fw does not need to send abts 9645 * on the wire. 9646 */ 9647 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 9648 else 9649 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 9650 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 9651 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 9652 wqe->abort_cmd.rsrvd5 = 0; 9653 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 9654 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9655 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 9656 /* 9657 * The abort handler will send us CMD_ABORT_XRI_CN or 9658 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 9659 */ 9660 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 9661 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 9662 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 9663 LPFC_WQE_LENLOC_NONE); 9664 cmnd = CMD_ABORT_XRI_CX; 9665 command_type = OTHER_COMMAND; 9666 xritag = 0; 9667 break; 9668 case CMD_XMIT_BLS_RSP64_CX: 9669 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9670 /* As BLS ABTS RSP WQE is very different from other WQEs, 9671 * we re-construct this WQE here based on information in 9672 * iocbq from scratch. 9673 */ 9674 memset(wqe, 0, sizeof(union lpfc_wqe)); 9675 /* OX_ID is invariable to who sent ABTS to CT exchange */ 9676 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 9677 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 9678 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 9679 LPFC_ABTS_UNSOL_INT) { 9680 /* ABTS sent by initiator to CT exchange, the 9681 * RX_ID field will be filled with the newly 9682 * allocated responder XRI. 9683 */ 9684 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9685 iocbq->sli4_xritag); 9686 } else { 9687 /* ABTS sent by responder to CT exchange, the 9688 * RX_ID field will be filled with the responder 9689 * RX_ID from ABTS. 9690 */ 9691 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9692 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 9693 } 9694 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 9695 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 9696 9697 /* Use CT=VPI */ 9698 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 9699 ndlp->nlp_DID); 9700 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 9701 iocbq->iocb.ulpContext); 9702 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 9703 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 9704 phba->vpi_ids[phba->pport->vpi]); 9705 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 9706 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 9707 LPFC_WQE_LENLOC_NONE); 9708 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 9709 command_type = OTHER_COMMAND; 9710 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 9711 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 9712 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 9713 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 9714 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 9715 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 9716 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 9717 } 9718 9719 break; 9720 case CMD_SEND_FRAME: 9721 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9722 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9723 return 0; 9724 case CMD_XRI_ABORTED_CX: 9725 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 9726 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 9727 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 9728 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 9729 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 9730 default: 9731 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9732 "2014 Invalid command 0x%x\n", 9733 iocbq->iocb.ulpCommand); 9734 return IOCB_ERROR; 9735 break; 9736 } 9737 9738 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) 9739 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 9740 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) 9741 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 9742 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) 9743 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 9744 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | 9745 LPFC_IO_DIF_INSERT); 9746 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9747 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9748 wqe->generic.wqe_com.abort_tag = abort_tag; 9749 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 9750 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 9751 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 9752 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 9753 return 0; 9754 } 9755 9756 /** 9757 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 9758 * @phba: Pointer to HBA context object. 9759 * @ring_number: SLI ring number to issue iocb on. 9760 * @piocb: Pointer to command iocb. 9761 * @flag: Flag indicating if this command can be put into txq. 9762 * 9763 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 9764 * an iocb command to an HBA with SLI-4 interface spec. 9765 * 9766 * This function is called with hbalock held. The function will return success 9767 * after it successfully submit the iocb to firmware or after adding to the 9768 * txq. 9769 **/ 9770 static int 9771 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 9772 struct lpfc_iocbq *piocb, uint32_t flag) 9773 { 9774 struct lpfc_sglq *sglq; 9775 union lpfc_wqe128 wqe; 9776 struct lpfc_queue *wq; 9777 struct lpfc_sli_ring *pring; 9778 9779 /* Get the WQ */ 9780 if ((piocb->iocb_flag & LPFC_IO_FCP) || 9781 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9782 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) 9783 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx]; 9784 else 9785 wq = phba->sli4_hba.oas_wq; 9786 } else { 9787 wq = phba->sli4_hba.els_wq; 9788 } 9789 9790 /* Get corresponding ring */ 9791 pring = wq->pring; 9792 9793 /* 9794 * The WQE can be either 64 or 128 bytes, 9795 */ 9796 9797 lockdep_assert_held(&phba->hbalock); 9798 9799 if (piocb->sli4_xritag == NO_XRI) { 9800 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 9801 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 9802 sglq = NULL; 9803 else { 9804 if (!list_empty(&pring->txq)) { 9805 if (!(flag & SLI_IOCB_RET_IOCB)) { 9806 __lpfc_sli_ringtx_put(phba, 9807 pring, piocb); 9808 return IOCB_SUCCESS; 9809 } else { 9810 return IOCB_BUSY; 9811 } 9812 } else { 9813 sglq = __lpfc_sli_get_els_sglq(phba, piocb); 9814 if (!sglq) { 9815 if (!(flag & SLI_IOCB_RET_IOCB)) { 9816 __lpfc_sli_ringtx_put(phba, 9817 pring, 9818 piocb); 9819 return IOCB_SUCCESS; 9820 } else 9821 return IOCB_BUSY; 9822 } 9823 } 9824 } 9825 } else if (piocb->iocb_flag & LPFC_IO_FCP) 9826 /* These IO's already have an XRI and a mapped sgl. */ 9827 sglq = NULL; 9828 else { 9829 /* 9830 * This is a continuation of a commandi,(CX) so this 9831 * sglq is on the active list 9832 */ 9833 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 9834 if (!sglq) 9835 return IOCB_ERROR; 9836 } 9837 9838 if (sglq) { 9839 piocb->sli4_lxritag = sglq->sli4_lxritag; 9840 piocb->sli4_xritag = sglq->sli4_xritag; 9841 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 9842 return IOCB_ERROR; 9843 } 9844 9845 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 9846 return IOCB_ERROR; 9847 9848 if (lpfc_sli4_wq_put(wq, &wqe)) 9849 return IOCB_ERROR; 9850 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 9851 9852 return 0; 9853 } 9854 9855 /** 9856 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 9857 * 9858 * This routine wraps the actual lockless version for issusing IOCB function 9859 * pointer from the lpfc_hba struct. 9860 * 9861 * Return codes: 9862 * IOCB_ERROR - Error 9863 * IOCB_SUCCESS - Success 9864 * IOCB_BUSY - Busy 9865 **/ 9866 int 9867 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 9868 struct lpfc_iocbq *piocb, uint32_t flag) 9869 { 9870 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9871 } 9872 9873 /** 9874 * lpfc_sli_api_table_setup - Set up sli api function jump table 9875 * @phba: The hba struct for which this call is being executed. 9876 * @dev_grp: The HBA PCI-Device group number. 9877 * 9878 * This routine sets up the SLI interface API function jump table in @phba 9879 * struct. 9880 * Returns: 0 - success, -ENODEV - failure. 9881 **/ 9882 int 9883 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 9884 { 9885 9886 switch (dev_grp) { 9887 case LPFC_PCI_DEV_LP: 9888 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 9889 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 9890 break; 9891 case LPFC_PCI_DEV_OC: 9892 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 9893 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 9894 break; 9895 default: 9896 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9897 "1419 Invalid HBA PCI-device group: 0x%x\n", 9898 dev_grp); 9899 return -ENODEV; 9900 break; 9901 } 9902 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 9903 return 0; 9904 } 9905 9906 /** 9907 * lpfc_sli4_calc_ring - Calculates which ring to use 9908 * @phba: Pointer to HBA context object. 9909 * @piocb: Pointer to command iocb. 9910 * 9911 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on 9912 * hba_wqidx, thus we need to calculate the corresponding ring. 9913 * Since ABORTS must go on the same WQ of the command they are 9914 * aborting, we use command's hba_wqidx. 9915 */ 9916 struct lpfc_sli_ring * 9917 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 9918 { 9919 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { 9920 if (!(phba->cfg_fof) || 9921 (!(piocb->iocb_flag & LPFC_IO_FOF))) { 9922 if (unlikely(!phba->sli4_hba.fcp_wq)) 9923 return NULL; 9924 /* 9925 * for abort iocb hba_wqidx should already 9926 * be setup based on what work queue we used. 9927 */ 9928 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9929 piocb->hba_wqidx = 9930 lpfc_sli4_scmd_to_wqidx_distr(phba, 9931 piocb->context1); 9932 piocb->hba_wqidx = piocb->hba_wqidx % 9933 phba->cfg_fcp_io_channel; 9934 } 9935 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring; 9936 } else { 9937 if (unlikely(!phba->sli4_hba.oas_wq)) 9938 return NULL; 9939 piocb->hba_wqidx = 0; 9940 return phba->sli4_hba.oas_wq->pring; 9941 } 9942 } else { 9943 if (unlikely(!phba->sli4_hba.els_wq)) 9944 return NULL; 9945 piocb->hba_wqidx = 0; 9946 return phba->sli4_hba.els_wq->pring; 9947 } 9948 } 9949 9950 /** 9951 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 9952 * @phba: Pointer to HBA context object. 9953 * @pring: Pointer to driver SLI ring object. 9954 * @piocb: Pointer to command iocb. 9955 * @flag: Flag indicating if this command can be put into txq. 9956 * 9957 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 9958 * function. This function gets the hbalock and calls 9959 * __lpfc_sli_issue_iocb function and will return the error returned 9960 * by __lpfc_sli_issue_iocb function. This wrapper is used by 9961 * functions which do not hold hbalock. 9962 **/ 9963 int 9964 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 9965 struct lpfc_iocbq *piocb, uint32_t flag) 9966 { 9967 struct lpfc_hba_eq_hdl *hba_eq_hdl; 9968 struct lpfc_sli_ring *pring; 9969 struct lpfc_queue *fpeq; 9970 struct lpfc_eqe *eqe; 9971 unsigned long iflags; 9972 int rc, idx; 9973 9974 if (phba->sli_rev == LPFC_SLI_REV4) { 9975 pring = lpfc_sli4_calc_ring(phba, piocb); 9976 if (unlikely(pring == NULL)) 9977 return IOCB_ERROR; 9978 9979 spin_lock_irqsave(&pring->ring_lock, iflags); 9980 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9981 spin_unlock_irqrestore(&pring->ring_lock, iflags); 9982 9983 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) { 9984 idx = piocb->hba_wqidx; 9985 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx]; 9986 9987 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) { 9988 9989 /* Get associated EQ with this index */ 9990 fpeq = phba->sli4_hba.hba_eq[idx]; 9991 9992 /* Turn off interrupts from this EQ */ 9993 phba->sli4_hba.sli4_eq_clr_intr(fpeq); 9994 9995 /* 9996 * Process all the events on FCP EQ 9997 */ 9998 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 9999 lpfc_sli4_hba_handle_eqe(phba, 10000 eqe, idx); 10001 fpeq->EQ_processed++; 10002 } 10003 10004 /* Always clear and re-arm the EQ */ 10005 phba->sli4_hba.sli4_eq_release(fpeq, 10006 LPFC_QUEUE_REARM); 10007 } 10008 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 10009 } 10010 } else { 10011 /* For now, SLI2/3 will still use hbalock */ 10012 spin_lock_irqsave(&phba->hbalock, iflags); 10013 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 10014 spin_unlock_irqrestore(&phba->hbalock, iflags); 10015 } 10016 return rc; 10017 } 10018 10019 /** 10020 * lpfc_extra_ring_setup - Extra ring setup function 10021 * @phba: Pointer to HBA context object. 10022 * 10023 * This function is called while driver attaches with the 10024 * HBA to setup the extra ring. The extra ring is used 10025 * only when driver needs to support target mode functionality 10026 * or IP over FC functionalities. 10027 * 10028 * This function is called with no lock held. SLI3 only. 10029 **/ 10030 static int 10031 lpfc_extra_ring_setup( struct lpfc_hba *phba) 10032 { 10033 struct lpfc_sli *psli; 10034 struct lpfc_sli_ring *pring; 10035 10036 psli = &phba->sli; 10037 10038 /* Adjust cmd/rsp ring iocb entries more evenly */ 10039 10040 /* Take some away from the FCP ring */ 10041 pring = &psli->sli3_ring[LPFC_FCP_RING]; 10042 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10043 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10044 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10045 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10046 10047 /* and give them to the extra ring */ 10048 pring = &psli->sli3_ring[LPFC_EXTRA_RING]; 10049 10050 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10051 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10052 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10053 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10054 10055 /* Setup default profile for this ring */ 10056 pring->iotag_max = 4096; 10057 pring->num_mask = 1; 10058 pring->prt[0].profile = 0; /* Mask 0 */ 10059 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 10060 pring->prt[0].type = phba->cfg_multi_ring_type; 10061 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 10062 return 0; 10063 } 10064 10065 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 10066 * @phba: Pointer to HBA context object. 10067 * @iocbq: Pointer to iocb object. 10068 * 10069 * The async_event handler calls this routine when it receives 10070 * an ASYNC_STATUS_CN event from the port. The port generates 10071 * this event when an Abort Sequence request to an rport fails 10072 * twice in succession. The abort could be originated by the 10073 * driver or by the port. The ABTS could have been for an ELS 10074 * or FCP IO. The port only generates this event when an ABTS 10075 * fails to complete after one retry. 10076 */ 10077 static void 10078 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 10079 struct lpfc_iocbq *iocbq) 10080 { 10081 struct lpfc_nodelist *ndlp = NULL; 10082 uint16_t rpi = 0, vpi = 0; 10083 struct lpfc_vport *vport = NULL; 10084 10085 /* The rpi in the ulpContext is vport-sensitive. */ 10086 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 10087 rpi = iocbq->iocb.ulpContext; 10088 10089 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10090 "3092 Port generated ABTS async event " 10091 "on vpi %d rpi %d status 0x%x\n", 10092 vpi, rpi, iocbq->iocb.ulpStatus); 10093 10094 vport = lpfc_find_vport_by_vpid(phba, vpi); 10095 if (!vport) 10096 goto err_exit; 10097 ndlp = lpfc_findnode_rpi(vport, rpi); 10098 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 10099 goto err_exit; 10100 10101 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 10102 lpfc_sli_abts_recover_port(vport, ndlp); 10103 return; 10104 10105 err_exit: 10106 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10107 "3095 Event Context not found, no " 10108 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 10109 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 10110 vpi, rpi); 10111 } 10112 10113 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 10114 * @phba: pointer to HBA context object. 10115 * @ndlp: nodelist pointer for the impacted rport. 10116 * @axri: pointer to the wcqe containing the failed exchange. 10117 * 10118 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 10119 * port. The port generates this event when an abort exchange request to an 10120 * rport fails twice in succession with no reply. The abort could be originated 10121 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 10122 */ 10123 void 10124 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 10125 struct lpfc_nodelist *ndlp, 10126 struct sli4_wcqe_xri_aborted *axri) 10127 { 10128 struct lpfc_vport *vport; 10129 uint32_t ext_status = 0; 10130 10131 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 10132 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10133 "3115 Node Context not found, driver " 10134 "ignoring abts err event\n"); 10135 return; 10136 } 10137 10138 vport = ndlp->vport; 10139 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 10140 "3116 Port generated FCP XRI ABORT event on " 10141 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 10142 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 10143 bf_get(lpfc_wcqe_xa_xri, axri), 10144 bf_get(lpfc_wcqe_xa_status, axri), 10145 axri->parameter); 10146 10147 /* 10148 * Catch the ABTS protocol failure case. Older OCe FW releases returned 10149 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 10150 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 10151 */ 10152 ext_status = axri->parameter & IOERR_PARAM_MASK; 10153 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 10154 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 10155 lpfc_sli_abts_recover_port(vport, ndlp); 10156 } 10157 10158 /** 10159 * lpfc_sli_async_event_handler - ASYNC iocb handler function 10160 * @phba: Pointer to HBA context object. 10161 * @pring: Pointer to driver SLI ring object. 10162 * @iocbq: Pointer to iocb object. 10163 * 10164 * This function is called by the slow ring event handler 10165 * function when there is an ASYNC event iocb in the ring. 10166 * This function is called with no lock held. 10167 * Currently this function handles only temperature related 10168 * ASYNC events. The function decodes the temperature sensor 10169 * event message and posts events for the management applications. 10170 **/ 10171 static void 10172 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 10173 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 10174 { 10175 IOCB_t *icmd; 10176 uint16_t evt_code; 10177 struct temp_event temp_event_data; 10178 struct Scsi_Host *shost; 10179 uint32_t *iocb_w; 10180 10181 icmd = &iocbq->iocb; 10182 evt_code = icmd->un.asyncstat.evt_code; 10183 10184 switch (evt_code) { 10185 case ASYNC_TEMP_WARN: 10186 case ASYNC_TEMP_SAFE: 10187 temp_event_data.data = (uint32_t) icmd->ulpContext; 10188 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 10189 if (evt_code == ASYNC_TEMP_WARN) { 10190 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 10191 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10192 "0347 Adapter is very hot, please take " 10193 "corrective action. temperature : %d Celsius\n", 10194 (uint32_t) icmd->ulpContext); 10195 } else { 10196 temp_event_data.event_code = LPFC_NORMAL_TEMP; 10197 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 10198 "0340 Adapter temperature is OK now. " 10199 "temperature : %d Celsius\n", 10200 (uint32_t) icmd->ulpContext); 10201 } 10202 10203 /* Send temperature change event to applications */ 10204 shost = lpfc_shost_from_vport(phba->pport); 10205 fc_host_post_vendor_event(shost, fc_get_event_number(), 10206 sizeof(temp_event_data), (char *) &temp_event_data, 10207 LPFC_NL_VENDOR_ID); 10208 break; 10209 case ASYNC_STATUS_CN: 10210 lpfc_sli_abts_err_handler(phba, iocbq); 10211 break; 10212 default: 10213 iocb_w = (uint32_t *) icmd; 10214 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10215 "0346 Ring %d handler: unexpected ASYNC_STATUS" 10216 " evt_code 0x%x\n" 10217 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 10218 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 10219 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 10220 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 10221 pring->ringno, icmd->un.asyncstat.evt_code, 10222 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 10223 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 10224 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 10225 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 10226 10227 break; 10228 } 10229 } 10230 10231 10232 /** 10233 * lpfc_sli4_setup - SLI ring setup function 10234 * @phba: Pointer to HBA context object. 10235 * 10236 * lpfc_sli_setup sets up rings of the SLI interface with 10237 * number of iocbs per ring and iotags. This function is 10238 * called while driver attach to the HBA and before the 10239 * interrupts are enabled. So there is no need for locking. 10240 * 10241 * This function always returns 0. 10242 **/ 10243 int 10244 lpfc_sli4_setup(struct lpfc_hba *phba) 10245 { 10246 struct lpfc_sli_ring *pring; 10247 10248 pring = phba->sli4_hba.els_wq->pring; 10249 pring->num_mask = LPFC_MAX_RING_MASK; 10250 pring->prt[0].profile = 0; /* Mask 0 */ 10251 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10252 pring->prt[0].type = FC_TYPE_ELS; 10253 pring->prt[0].lpfc_sli_rcv_unsol_event = 10254 lpfc_els_unsol_event; 10255 pring->prt[1].profile = 0; /* Mask 1 */ 10256 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10257 pring->prt[1].type = FC_TYPE_ELS; 10258 pring->prt[1].lpfc_sli_rcv_unsol_event = 10259 lpfc_els_unsol_event; 10260 pring->prt[2].profile = 0; /* Mask 2 */ 10261 /* NameServer Inquiry */ 10262 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10263 /* NameServer */ 10264 pring->prt[2].type = FC_TYPE_CT; 10265 pring->prt[2].lpfc_sli_rcv_unsol_event = 10266 lpfc_ct_unsol_event; 10267 pring->prt[3].profile = 0; /* Mask 3 */ 10268 /* NameServer response */ 10269 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10270 /* NameServer */ 10271 pring->prt[3].type = FC_TYPE_CT; 10272 pring->prt[3].lpfc_sli_rcv_unsol_event = 10273 lpfc_ct_unsol_event; 10274 return 0; 10275 } 10276 10277 /** 10278 * lpfc_sli_setup - SLI ring setup function 10279 * @phba: Pointer to HBA context object. 10280 * 10281 * lpfc_sli_setup sets up rings of the SLI interface with 10282 * number of iocbs per ring and iotags. This function is 10283 * called while driver attach to the HBA and before the 10284 * interrupts are enabled. So there is no need for locking. 10285 * 10286 * This function always returns 0. SLI3 only. 10287 **/ 10288 int 10289 lpfc_sli_setup(struct lpfc_hba *phba) 10290 { 10291 int i, totiocbsize = 0; 10292 struct lpfc_sli *psli = &phba->sli; 10293 struct lpfc_sli_ring *pring; 10294 10295 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 10296 psli->sli_flag = 0; 10297 10298 psli->iocbq_lookup = NULL; 10299 psli->iocbq_lookup_len = 0; 10300 psli->last_iotag = 0; 10301 10302 for (i = 0; i < psli->num_rings; i++) { 10303 pring = &psli->sli3_ring[i]; 10304 switch (i) { 10305 case LPFC_FCP_RING: /* ring 0 - FCP */ 10306 /* numCiocb and numRiocb are used in config_port */ 10307 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 10308 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 10309 pring->sli.sli3.numCiocb += 10310 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 10311 pring->sli.sli3.numRiocb += 10312 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 10313 pring->sli.sli3.numCiocb += 10314 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 10315 pring->sli.sli3.numRiocb += 10316 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 10317 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10318 SLI3_IOCB_CMD_SIZE : 10319 SLI2_IOCB_CMD_SIZE; 10320 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10321 SLI3_IOCB_RSP_SIZE : 10322 SLI2_IOCB_RSP_SIZE; 10323 pring->iotag_ctr = 0; 10324 pring->iotag_max = 10325 (phba->cfg_hba_queue_depth * 2); 10326 pring->fast_iotag = pring->iotag_max; 10327 pring->num_mask = 0; 10328 break; 10329 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 10330 /* numCiocb and numRiocb are used in config_port */ 10331 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 10332 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 10333 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10334 SLI3_IOCB_CMD_SIZE : 10335 SLI2_IOCB_CMD_SIZE; 10336 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10337 SLI3_IOCB_RSP_SIZE : 10338 SLI2_IOCB_RSP_SIZE; 10339 pring->iotag_max = phba->cfg_hba_queue_depth; 10340 pring->num_mask = 0; 10341 break; 10342 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 10343 /* numCiocb and numRiocb are used in config_port */ 10344 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 10345 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 10346 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 10347 SLI3_IOCB_CMD_SIZE : 10348 SLI2_IOCB_CMD_SIZE; 10349 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 10350 SLI3_IOCB_RSP_SIZE : 10351 SLI2_IOCB_RSP_SIZE; 10352 pring->fast_iotag = 0; 10353 pring->iotag_ctr = 0; 10354 pring->iotag_max = 4096; 10355 pring->lpfc_sli_rcv_async_status = 10356 lpfc_sli_async_event_handler; 10357 pring->num_mask = LPFC_MAX_RING_MASK; 10358 pring->prt[0].profile = 0; /* Mask 0 */ 10359 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 10360 pring->prt[0].type = FC_TYPE_ELS; 10361 pring->prt[0].lpfc_sli_rcv_unsol_event = 10362 lpfc_els_unsol_event; 10363 pring->prt[1].profile = 0; /* Mask 1 */ 10364 pring->prt[1].rctl = FC_RCTL_ELS_REP; 10365 pring->prt[1].type = FC_TYPE_ELS; 10366 pring->prt[1].lpfc_sli_rcv_unsol_event = 10367 lpfc_els_unsol_event; 10368 pring->prt[2].profile = 0; /* Mask 2 */ 10369 /* NameServer Inquiry */ 10370 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 10371 /* NameServer */ 10372 pring->prt[2].type = FC_TYPE_CT; 10373 pring->prt[2].lpfc_sli_rcv_unsol_event = 10374 lpfc_ct_unsol_event; 10375 pring->prt[3].profile = 0; /* Mask 3 */ 10376 /* NameServer response */ 10377 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 10378 /* NameServer */ 10379 pring->prt[3].type = FC_TYPE_CT; 10380 pring->prt[3].lpfc_sli_rcv_unsol_event = 10381 lpfc_ct_unsol_event; 10382 break; 10383 } 10384 totiocbsize += (pring->sli.sli3.numCiocb * 10385 pring->sli.sli3.sizeCiocb) + 10386 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 10387 } 10388 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 10389 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 10390 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 10391 "SLI2 SLIM Data: x%x x%lx\n", 10392 phba->brd_no, totiocbsize, 10393 (unsigned long) MAX_SLIM_IOCB_SIZE); 10394 } 10395 if (phba->cfg_multi_ring_support == 2) 10396 lpfc_extra_ring_setup(phba); 10397 10398 return 0; 10399 } 10400 10401 /** 10402 * lpfc_sli4_queue_init - Queue initialization function 10403 * @phba: Pointer to HBA context object. 10404 * 10405 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each 10406 * ring. This function also initializes ring indices of each ring. 10407 * This function is called during the initialization of the SLI 10408 * interface of an HBA. 10409 * This function is called with no lock held and always returns 10410 * 1. 10411 **/ 10412 void 10413 lpfc_sli4_queue_init(struct lpfc_hba *phba) 10414 { 10415 struct lpfc_sli *psli; 10416 struct lpfc_sli_ring *pring; 10417 int i; 10418 10419 psli = &phba->sli; 10420 spin_lock_irq(&phba->hbalock); 10421 INIT_LIST_HEAD(&psli->mboxq); 10422 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10423 /* Initialize list headers for txq and txcmplq as double linked lists */ 10424 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 10425 pring = phba->sli4_hba.fcp_wq[i]->pring; 10426 pring->flag = 0; 10427 pring->ringno = LPFC_FCP_RING; 10428 INIT_LIST_HEAD(&pring->txq); 10429 INIT_LIST_HEAD(&pring->txcmplq); 10430 INIT_LIST_HEAD(&pring->iocb_continueq); 10431 spin_lock_init(&pring->ring_lock); 10432 } 10433 for (i = 0; i < phba->cfg_nvme_io_channel; i++) { 10434 pring = phba->sli4_hba.nvme_wq[i]->pring; 10435 pring->flag = 0; 10436 pring->ringno = LPFC_FCP_RING; 10437 INIT_LIST_HEAD(&pring->txq); 10438 INIT_LIST_HEAD(&pring->txcmplq); 10439 INIT_LIST_HEAD(&pring->iocb_continueq); 10440 spin_lock_init(&pring->ring_lock); 10441 } 10442 pring = phba->sli4_hba.els_wq->pring; 10443 pring->flag = 0; 10444 pring->ringno = LPFC_ELS_RING; 10445 INIT_LIST_HEAD(&pring->txq); 10446 INIT_LIST_HEAD(&pring->txcmplq); 10447 INIT_LIST_HEAD(&pring->iocb_continueq); 10448 spin_lock_init(&pring->ring_lock); 10449 10450 if (phba->cfg_nvme_io_channel) { 10451 pring = phba->sli4_hba.nvmels_wq->pring; 10452 pring->flag = 0; 10453 pring->ringno = LPFC_ELS_RING; 10454 INIT_LIST_HEAD(&pring->txq); 10455 INIT_LIST_HEAD(&pring->txcmplq); 10456 INIT_LIST_HEAD(&pring->iocb_continueq); 10457 spin_lock_init(&pring->ring_lock); 10458 } 10459 10460 if (phba->cfg_fof) { 10461 pring = phba->sli4_hba.oas_wq->pring; 10462 pring->flag = 0; 10463 pring->ringno = LPFC_FCP_RING; 10464 INIT_LIST_HEAD(&pring->txq); 10465 INIT_LIST_HEAD(&pring->txcmplq); 10466 INIT_LIST_HEAD(&pring->iocb_continueq); 10467 spin_lock_init(&pring->ring_lock); 10468 } 10469 10470 spin_unlock_irq(&phba->hbalock); 10471 } 10472 10473 /** 10474 * lpfc_sli_queue_init - Queue initialization function 10475 * @phba: Pointer to HBA context object. 10476 * 10477 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each 10478 * ring. This function also initializes ring indices of each ring. 10479 * This function is called during the initialization of the SLI 10480 * interface of an HBA. 10481 * This function is called with no lock held and always returns 10482 * 1. 10483 **/ 10484 void 10485 lpfc_sli_queue_init(struct lpfc_hba *phba) 10486 { 10487 struct lpfc_sli *psli; 10488 struct lpfc_sli_ring *pring; 10489 int i; 10490 10491 psli = &phba->sli; 10492 spin_lock_irq(&phba->hbalock); 10493 INIT_LIST_HEAD(&psli->mboxq); 10494 INIT_LIST_HEAD(&psli->mboxq_cmpl); 10495 /* Initialize list headers for txq and txcmplq as double linked lists */ 10496 for (i = 0; i < psli->num_rings; i++) { 10497 pring = &psli->sli3_ring[i]; 10498 pring->ringno = i; 10499 pring->sli.sli3.next_cmdidx = 0; 10500 pring->sli.sli3.local_getidx = 0; 10501 pring->sli.sli3.cmdidx = 0; 10502 INIT_LIST_HEAD(&pring->iocb_continueq); 10503 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 10504 INIT_LIST_HEAD(&pring->postbufq); 10505 pring->flag = 0; 10506 INIT_LIST_HEAD(&pring->txq); 10507 INIT_LIST_HEAD(&pring->txcmplq); 10508 spin_lock_init(&pring->ring_lock); 10509 } 10510 spin_unlock_irq(&phba->hbalock); 10511 } 10512 10513 /** 10514 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 10515 * @phba: Pointer to HBA context object. 10516 * 10517 * This routine flushes the mailbox command subsystem. It will unconditionally 10518 * flush all the mailbox commands in the three possible stages in the mailbox 10519 * command sub-system: pending mailbox command queue; the outstanding mailbox 10520 * command; and completed mailbox command queue. It is caller's responsibility 10521 * to make sure that the driver is in the proper state to flush the mailbox 10522 * command sub-system. Namely, the posting of mailbox commands into the 10523 * pending mailbox command queue from the various clients must be stopped; 10524 * either the HBA is in a state that it will never works on the outstanding 10525 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 10526 * mailbox command has been completed. 10527 **/ 10528 static void 10529 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 10530 { 10531 LIST_HEAD(completions); 10532 struct lpfc_sli *psli = &phba->sli; 10533 LPFC_MBOXQ_t *pmb; 10534 unsigned long iflag; 10535 10536 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10537 local_bh_disable(); 10538 10539 /* Flush all the mailbox commands in the mbox system */ 10540 spin_lock_irqsave(&phba->hbalock, iflag); 10541 10542 /* The pending mailbox command queue */ 10543 list_splice_init(&phba->sli.mboxq, &completions); 10544 /* The outstanding active mailbox command */ 10545 if (psli->mbox_active) { 10546 list_add_tail(&psli->mbox_active->list, &completions); 10547 psli->mbox_active = NULL; 10548 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10549 } 10550 /* The completed mailbox command queue */ 10551 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 10552 spin_unlock_irqrestore(&phba->hbalock, iflag); 10553 10554 /* Enable softirqs again, done with phba->hbalock */ 10555 local_bh_enable(); 10556 10557 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 10558 while (!list_empty(&completions)) { 10559 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 10560 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 10561 if (pmb->mbox_cmpl) 10562 pmb->mbox_cmpl(phba, pmb); 10563 } 10564 } 10565 10566 /** 10567 * lpfc_sli_host_down - Vport cleanup function 10568 * @vport: Pointer to virtual port object. 10569 * 10570 * lpfc_sli_host_down is called to clean up the resources 10571 * associated with a vport before destroying virtual 10572 * port data structures. 10573 * This function does following operations: 10574 * - Free discovery resources associated with this virtual 10575 * port. 10576 * - Free iocbs associated with this virtual port in 10577 * the txq. 10578 * - Send abort for all iocb commands associated with this 10579 * vport in txcmplq. 10580 * 10581 * This function is called with no lock held and always returns 1. 10582 **/ 10583 int 10584 lpfc_sli_host_down(struct lpfc_vport *vport) 10585 { 10586 LIST_HEAD(completions); 10587 struct lpfc_hba *phba = vport->phba; 10588 struct lpfc_sli *psli = &phba->sli; 10589 struct lpfc_queue *qp = NULL; 10590 struct lpfc_sli_ring *pring; 10591 struct lpfc_iocbq *iocb, *next_iocb; 10592 int i; 10593 unsigned long flags = 0; 10594 uint16_t prev_pring_flag; 10595 10596 lpfc_cleanup_discovery_resources(vport); 10597 10598 spin_lock_irqsave(&phba->hbalock, flags); 10599 10600 /* 10601 * Error everything on the txq since these iocbs 10602 * have not been given to the FW yet. 10603 * Also issue ABTS for everything on the txcmplq 10604 */ 10605 if (phba->sli_rev != LPFC_SLI_REV4) { 10606 for (i = 0; i < psli->num_rings; i++) { 10607 pring = &psli->sli3_ring[i]; 10608 prev_pring_flag = pring->flag; 10609 /* Only slow rings */ 10610 if (pring->ringno == LPFC_ELS_RING) { 10611 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10612 /* Set the lpfc data pending flag */ 10613 set_bit(LPFC_DATA_READY, &phba->data_flags); 10614 } 10615 list_for_each_entry_safe(iocb, next_iocb, 10616 &pring->txq, list) { 10617 if (iocb->vport != vport) 10618 continue; 10619 list_move_tail(&iocb->list, &completions); 10620 } 10621 list_for_each_entry_safe(iocb, next_iocb, 10622 &pring->txcmplq, list) { 10623 if (iocb->vport != vport) 10624 continue; 10625 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10626 } 10627 pring->flag = prev_pring_flag; 10628 } 10629 } else { 10630 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10631 pring = qp->pring; 10632 if (!pring) 10633 continue; 10634 if (pring == phba->sli4_hba.els_wq->pring) { 10635 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10636 /* Set the lpfc data pending flag */ 10637 set_bit(LPFC_DATA_READY, &phba->data_flags); 10638 } 10639 prev_pring_flag = pring->flag; 10640 spin_lock_irq(&pring->ring_lock); 10641 list_for_each_entry_safe(iocb, next_iocb, 10642 &pring->txq, list) { 10643 if (iocb->vport != vport) 10644 continue; 10645 list_move_tail(&iocb->list, &completions); 10646 } 10647 spin_unlock_irq(&pring->ring_lock); 10648 list_for_each_entry_safe(iocb, next_iocb, 10649 &pring->txcmplq, list) { 10650 if (iocb->vport != vport) 10651 continue; 10652 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10653 } 10654 pring->flag = prev_pring_flag; 10655 } 10656 } 10657 spin_unlock_irqrestore(&phba->hbalock, flags); 10658 10659 /* Cancel all the IOCBs from the completions list */ 10660 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10661 IOERR_SLI_DOWN); 10662 return 1; 10663 } 10664 10665 /** 10666 * lpfc_sli_hba_down - Resource cleanup function for the HBA 10667 * @phba: Pointer to HBA context object. 10668 * 10669 * This function cleans up all iocb, buffers, mailbox commands 10670 * while shutting down the HBA. This function is called with no 10671 * lock held and always returns 1. 10672 * This function does the following to cleanup driver resources: 10673 * - Free discovery resources for each virtual port 10674 * - Cleanup any pending fabric iocbs 10675 * - Iterate through the iocb txq and free each entry 10676 * in the list. 10677 * - Free up any buffer posted to the HBA 10678 * - Free mailbox commands in the mailbox queue. 10679 **/ 10680 int 10681 lpfc_sli_hba_down(struct lpfc_hba *phba) 10682 { 10683 LIST_HEAD(completions); 10684 struct lpfc_sli *psli = &phba->sli; 10685 struct lpfc_queue *qp = NULL; 10686 struct lpfc_sli_ring *pring; 10687 struct lpfc_dmabuf *buf_ptr; 10688 unsigned long flags = 0; 10689 int i; 10690 10691 /* Shutdown the mailbox command sub-system */ 10692 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 10693 10694 lpfc_hba_down_prep(phba); 10695 10696 /* Disable softirqs, including timers from obtaining phba->hbalock */ 10697 local_bh_disable(); 10698 10699 lpfc_fabric_abort_hba(phba); 10700 10701 spin_lock_irqsave(&phba->hbalock, flags); 10702 10703 /* 10704 * Error everything on the txq since these iocbs 10705 * have not been given to the FW yet. 10706 */ 10707 if (phba->sli_rev != LPFC_SLI_REV4) { 10708 for (i = 0; i < psli->num_rings; i++) { 10709 pring = &psli->sli3_ring[i]; 10710 /* Only slow rings */ 10711 if (pring->ringno == LPFC_ELS_RING) { 10712 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10713 /* Set the lpfc data pending flag */ 10714 set_bit(LPFC_DATA_READY, &phba->data_flags); 10715 } 10716 list_splice_init(&pring->txq, &completions); 10717 } 10718 } else { 10719 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10720 pring = qp->pring; 10721 if (!pring) 10722 continue; 10723 spin_lock_irq(&pring->ring_lock); 10724 list_splice_init(&pring->txq, &completions); 10725 spin_unlock_irq(&pring->ring_lock); 10726 if (pring == phba->sli4_hba.els_wq->pring) { 10727 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10728 /* Set the lpfc data pending flag */ 10729 set_bit(LPFC_DATA_READY, &phba->data_flags); 10730 } 10731 } 10732 } 10733 spin_unlock_irqrestore(&phba->hbalock, flags); 10734 10735 /* Cancel all the IOCBs from the completions list */ 10736 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10737 IOERR_SLI_DOWN); 10738 10739 spin_lock_irqsave(&phba->hbalock, flags); 10740 list_splice_init(&phba->elsbuf, &completions); 10741 phba->elsbuf_cnt = 0; 10742 phba->elsbuf_prev_cnt = 0; 10743 spin_unlock_irqrestore(&phba->hbalock, flags); 10744 10745 while (!list_empty(&completions)) { 10746 list_remove_head(&completions, buf_ptr, 10747 struct lpfc_dmabuf, list); 10748 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 10749 kfree(buf_ptr); 10750 } 10751 10752 /* Enable softirqs again, done with phba->hbalock */ 10753 local_bh_enable(); 10754 10755 /* Return any active mbox cmds */ 10756 del_timer_sync(&psli->mbox_tmo); 10757 10758 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 10759 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 10760 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 10761 10762 return 1; 10763 } 10764 10765 /** 10766 * lpfc_sli_pcimem_bcopy - SLI memory copy function 10767 * @srcp: Source memory pointer. 10768 * @destp: Destination memory pointer. 10769 * @cnt: Number of words required to be copied. 10770 * 10771 * This function is used for copying data between driver memory 10772 * and the SLI memory. This function also changes the endianness 10773 * of each word if native endianness is different from SLI 10774 * endianness. This function can be called with or without 10775 * lock. 10776 **/ 10777 void 10778 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 10779 { 10780 uint32_t *src = srcp; 10781 uint32_t *dest = destp; 10782 uint32_t ldata; 10783 int i; 10784 10785 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 10786 ldata = *src; 10787 ldata = le32_to_cpu(ldata); 10788 *dest = ldata; 10789 src++; 10790 dest++; 10791 } 10792 } 10793 10794 10795 /** 10796 * lpfc_sli_bemem_bcopy - SLI memory copy function 10797 * @srcp: Source memory pointer. 10798 * @destp: Destination memory pointer. 10799 * @cnt: Number of words required to be copied. 10800 * 10801 * This function is used for copying data between a data structure 10802 * with big endian representation to local endianness. 10803 * This function can be called with or without lock. 10804 **/ 10805 void 10806 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 10807 { 10808 uint32_t *src = srcp; 10809 uint32_t *dest = destp; 10810 uint32_t ldata; 10811 int i; 10812 10813 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 10814 ldata = *src; 10815 ldata = be32_to_cpu(ldata); 10816 *dest = ldata; 10817 src++; 10818 dest++; 10819 } 10820 } 10821 10822 /** 10823 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 10824 * @phba: Pointer to HBA context object. 10825 * @pring: Pointer to driver SLI ring object. 10826 * @mp: Pointer to driver buffer object. 10827 * 10828 * This function is called with no lock held. 10829 * It always return zero after adding the buffer to the postbufq 10830 * buffer list. 10831 **/ 10832 int 10833 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10834 struct lpfc_dmabuf *mp) 10835 { 10836 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 10837 later */ 10838 spin_lock_irq(&phba->hbalock); 10839 list_add_tail(&mp->list, &pring->postbufq); 10840 pring->postbufq_cnt++; 10841 spin_unlock_irq(&phba->hbalock); 10842 return 0; 10843 } 10844 10845 /** 10846 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 10847 * @phba: Pointer to HBA context object. 10848 * 10849 * When HBQ is enabled, buffers are searched based on tags. This function 10850 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 10851 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 10852 * does not conflict with tags of buffer posted for unsolicited events. 10853 * The function returns the allocated tag. The function is called with 10854 * no locks held. 10855 **/ 10856 uint32_t 10857 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 10858 { 10859 spin_lock_irq(&phba->hbalock); 10860 phba->buffer_tag_count++; 10861 /* 10862 * Always set the QUE_BUFTAG_BIT to distiguish between 10863 * a tag assigned by HBQ. 10864 */ 10865 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 10866 spin_unlock_irq(&phba->hbalock); 10867 return phba->buffer_tag_count; 10868 } 10869 10870 /** 10871 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 10872 * @phba: Pointer to HBA context object. 10873 * @pring: Pointer to driver SLI ring object. 10874 * @tag: Buffer tag. 10875 * 10876 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 10877 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 10878 * iocb is posted to the response ring with the tag of the buffer. 10879 * This function searches the pring->postbufq list using the tag 10880 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 10881 * iocb. If the buffer is found then lpfc_dmabuf object of the 10882 * buffer is returned to the caller else NULL is returned. 10883 * This function is called with no lock held. 10884 **/ 10885 struct lpfc_dmabuf * 10886 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10887 uint32_t tag) 10888 { 10889 struct lpfc_dmabuf *mp, *next_mp; 10890 struct list_head *slp = &pring->postbufq; 10891 10892 /* Search postbufq, from the beginning, looking for a match on tag */ 10893 spin_lock_irq(&phba->hbalock); 10894 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10895 if (mp->buffer_tag == tag) { 10896 list_del_init(&mp->list); 10897 pring->postbufq_cnt--; 10898 spin_unlock_irq(&phba->hbalock); 10899 return mp; 10900 } 10901 } 10902 10903 spin_unlock_irq(&phba->hbalock); 10904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10905 "0402 Cannot find virtual addr for buffer tag on " 10906 "ring %d Data x%lx x%p x%p x%x\n", 10907 pring->ringno, (unsigned long) tag, 10908 slp->next, slp->prev, pring->postbufq_cnt); 10909 10910 return NULL; 10911 } 10912 10913 /** 10914 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 10915 * @phba: Pointer to HBA context object. 10916 * @pring: Pointer to driver SLI ring object. 10917 * @phys: DMA address of the buffer. 10918 * 10919 * This function searches the buffer list using the dma_address 10920 * of unsolicited event to find the driver's lpfc_dmabuf object 10921 * corresponding to the dma_address. The function returns the 10922 * lpfc_dmabuf object if a buffer is found else it returns NULL. 10923 * This function is called by the ct and els unsolicited event 10924 * handlers to get the buffer associated with the unsolicited 10925 * event. 10926 * 10927 * This function is called with no lock held. 10928 **/ 10929 struct lpfc_dmabuf * 10930 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10931 dma_addr_t phys) 10932 { 10933 struct lpfc_dmabuf *mp, *next_mp; 10934 struct list_head *slp = &pring->postbufq; 10935 10936 /* Search postbufq, from the beginning, looking for a match on phys */ 10937 spin_lock_irq(&phba->hbalock); 10938 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10939 if (mp->phys == phys) { 10940 list_del_init(&mp->list); 10941 pring->postbufq_cnt--; 10942 spin_unlock_irq(&phba->hbalock); 10943 return mp; 10944 } 10945 } 10946 10947 spin_unlock_irq(&phba->hbalock); 10948 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10949 "0410 Cannot find virtual addr for mapped buf on " 10950 "ring %d Data x%llx x%p x%p x%x\n", 10951 pring->ringno, (unsigned long long)phys, 10952 slp->next, slp->prev, pring->postbufq_cnt); 10953 return NULL; 10954 } 10955 10956 /** 10957 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 10958 * @phba: Pointer to HBA context object. 10959 * @cmdiocb: Pointer to driver command iocb object. 10960 * @rspiocb: Pointer to driver response iocb object. 10961 * 10962 * This function is the completion handler for the abort iocbs for 10963 * ELS commands. This function is called from the ELS ring event 10964 * handler with no lock held. This function frees memory resources 10965 * associated with the abort iocb. 10966 **/ 10967 static void 10968 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10969 struct lpfc_iocbq *rspiocb) 10970 { 10971 IOCB_t *irsp = &rspiocb->iocb; 10972 uint16_t abort_iotag, abort_context; 10973 struct lpfc_iocbq *abort_iocb = NULL; 10974 10975 if (irsp->ulpStatus) { 10976 10977 /* 10978 * Assume that the port already completed and returned, or 10979 * will return the iocb. Just Log the message. 10980 */ 10981 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 10982 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 10983 10984 spin_lock_irq(&phba->hbalock); 10985 if (phba->sli_rev < LPFC_SLI_REV4) { 10986 if (irsp->ulpCommand == CMD_ABORT_XRI_CX && 10987 irsp->ulpStatus == IOSTAT_LOCAL_REJECT && 10988 irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) { 10989 spin_unlock_irq(&phba->hbalock); 10990 goto release_iocb; 10991 } 10992 if (abort_iotag != 0 && 10993 abort_iotag <= phba->sli.last_iotag) 10994 abort_iocb = 10995 phba->sli.iocbq_lookup[abort_iotag]; 10996 } else 10997 /* For sli4 the abort_tag is the XRI, 10998 * so the abort routine puts the iotag of the iocb 10999 * being aborted in the context field of the abort 11000 * IOCB. 11001 */ 11002 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 11003 11004 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 11005 "0327 Cannot abort els iocb %p " 11006 "with tag %x context %x, abort status %x, " 11007 "abort code %x\n", 11008 abort_iocb, abort_iotag, abort_context, 11009 irsp->ulpStatus, irsp->un.ulpWord[4]); 11010 11011 spin_unlock_irq(&phba->hbalock); 11012 } 11013 release_iocb: 11014 lpfc_sli_release_iocbq(phba, cmdiocb); 11015 return; 11016 } 11017 11018 /** 11019 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 11020 * @phba: Pointer to HBA context object. 11021 * @cmdiocb: Pointer to driver command iocb object. 11022 * @rspiocb: Pointer to driver response iocb object. 11023 * 11024 * The function is called from SLI ring event handler with no 11025 * lock held. This function is the completion handler for ELS commands 11026 * which are aborted. The function frees memory resources used for 11027 * the aborted ELS commands. 11028 **/ 11029 static void 11030 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11031 struct lpfc_iocbq *rspiocb) 11032 { 11033 IOCB_t *irsp = &rspiocb->iocb; 11034 11035 /* ELS cmd tag <ulpIoTag> completes */ 11036 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 11037 "0139 Ignoring ELS cmd tag x%x completion Data: " 11038 "x%x x%x x%x\n", 11039 irsp->ulpIoTag, irsp->ulpStatus, 11040 irsp->un.ulpWord[4], irsp->ulpTimeout); 11041 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 11042 lpfc_ct_free_iocb(phba, cmdiocb); 11043 else 11044 lpfc_els_free_iocb(phba, cmdiocb); 11045 return; 11046 } 11047 11048 /** 11049 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 11050 * @phba: Pointer to HBA context object. 11051 * @pring: Pointer to driver SLI ring object. 11052 * @cmdiocb: Pointer to driver command iocb object. 11053 * 11054 * This function issues an abort iocb for the provided command iocb down to 11055 * the port. Other than the case the outstanding command iocb is an abort 11056 * request, this function issues abort out unconditionally. This function is 11057 * called with hbalock held. The function returns 0 when it fails due to 11058 * memory allocation failure or when the command iocb is an abort request. 11059 **/ 11060 static int 11061 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11062 struct lpfc_iocbq *cmdiocb) 11063 { 11064 struct lpfc_vport *vport = cmdiocb->vport; 11065 struct lpfc_iocbq *abtsiocbp; 11066 IOCB_t *icmd = NULL; 11067 IOCB_t *iabt = NULL; 11068 int retval; 11069 unsigned long iflags; 11070 struct lpfc_nodelist *ndlp; 11071 11072 lockdep_assert_held(&phba->hbalock); 11073 11074 /* 11075 * There are certain command types we don't want to abort. And we 11076 * don't want to abort commands that are already in the process of 11077 * being aborted. 11078 */ 11079 icmd = &cmdiocb->iocb; 11080 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11081 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11082 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11083 return 0; 11084 11085 /* issue ABTS for this IOCB based on iotag */ 11086 abtsiocbp = __lpfc_sli_get_iocbq(phba); 11087 if (abtsiocbp == NULL) 11088 return 0; 11089 11090 /* This signals the response to set the correct status 11091 * before calling the completion handler 11092 */ 11093 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 11094 11095 iabt = &abtsiocbp->iocb; 11096 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 11097 iabt->un.acxri.abortContextTag = icmd->ulpContext; 11098 if (phba->sli_rev == LPFC_SLI_REV4) { 11099 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 11100 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 11101 } else { 11102 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 11103 if (pring->ringno == LPFC_ELS_RING) { 11104 ndlp = (struct lpfc_nodelist *)(cmdiocb->context1); 11105 iabt->un.acxri.abortContextTag = ndlp->nlp_rpi; 11106 } 11107 } 11108 iabt->ulpLe = 1; 11109 iabt->ulpClass = icmd->ulpClass; 11110 11111 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11112 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; 11113 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 11114 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 11115 if (cmdiocb->iocb_flag & LPFC_IO_FOF) 11116 abtsiocbp->iocb_flag |= LPFC_IO_FOF; 11117 11118 if (phba->link_state >= LPFC_LINK_UP) 11119 iabt->ulpCommand = CMD_ABORT_XRI_CN; 11120 else 11121 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 11122 11123 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 11124 abtsiocbp->vport = vport; 11125 11126 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 11127 "0339 Abort xri x%x, original iotag x%x, " 11128 "abort cmd iotag x%x\n", 11129 iabt->un.acxri.abortIoTag, 11130 iabt->un.acxri.abortContextTag, 11131 abtsiocbp->iotag); 11132 11133 if (phba->sli_rev == LPFC_SLI_REV4) { 11134 pring = lpfc_sli4_calc_ring(phba, abtsiocbp); 11135 if (unlikely(pring == NULL)) 11136 return 0; 11137 /* Note: both hbalock and ring_lock need to be set here */ 11138 spin_lock_irqsave(&pring->ring_lock, iflags); 11139 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11140 abtsiocbp, 0); 11141 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11142 } else { 11143 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 11144 abtsiocbp, 0); 11145 } 11146 11147 if (retval) 11148 __lpfc_sli_release_iocbq(phba, abtsiocbp); 11149 11150 /* 11151 * Caller to this routine should check for IOCB_ERROR 11152 * and handle it properly. This routine no longer removes 11153 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11154 */ 11155 return retval; 11156 } 11157 11158 /** 11159 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 11160 * @phba: Pointer to HBA context object. 11161 * @pring: Pointer to driver SLI ring object. 11162 * @cmdiocb: Pointer to driver command iocb object. 11163 * 11164 * This function issues an abort iocb for the provided command iocb. In case 11165 * of unloading, the abort iocb will not be issued to commands on the ELS 11166 * ring. Instead, the callback function shall be changed to those commands 11167 * so that nothing happens when them finishes. This function is called with 11168 * hbalock held. The function returns 0 when the command iocb is an abort 11169 * request. 11170 **/ 11171 int 11172 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11173 struct lpfc_iocbq *cmdiocb) 11174 { 11175 struct lpfc_vport *vport = cmdiocb->vport; 11176 int retval = IOCB_ERROR; 11177 IOCB_t *icmd = NULL; 11178 11179 lockdep_assert_held(&phba->hbalock); 11180 11181 /* 11182 * There are certain command types we don't want to abort. And we 11183 * don't want to abort commands that are already in the process of 11184 * being aborted. 11185 */ 11186 icmd = &cmdiocb->iocb; 11187 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 11188 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 11189 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11190 return 0; 11191 11192 if (!pring) { 11193 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11194 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11195 else 11196 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11197 goto abort_iotag_exit; 11198 } 11199 11200 /* 11201 * If we're unloading, don't abort iocb on the ELS ring, but change 11202 * the callback so that nothing happens when it finishes. 11203 */ 11204 if ((vport->load_flag & FC_UNLOADING) && 11205 (pring->ringno == LPFC_ELS_RING)) { 11206 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 11207 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 11208 else 11209 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 11210 goto abort_iotag_exit; 11211 } 11212 11213 /* Now, we try to issue the abort to the cmdiocb out */ 11214 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 11215 11216 abort_iotag_exit: 11217 /* 11218 * Caller to this routine should check for IOCB_ERROR 11219 * and handle it properly. This routine no longer removes 11220 * iocb off txcmplq and call compl in case of IOCB_ERROR. 11221 */ 11222 return retval; 11223 } 11224 11225 /** 11226 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb 11227 * @phba: Pointer to HBA context object. 11228 * @pring: Pointer to driver SLI ring object. 11229 * @cmdiocb: Pointer to driver command iocb object. 11230 * 11231 * This function issues an abort iocb for the provided command iocb down to 11232 * the port. Other than the case the outstanding command iocb is an abort 11233 * request, this function issues abort out unconditionally. This function is 11234 * called with hbalock held. The function returns 0 when it fails due to 11235 * memory allocation failure or when the command iocb is an abort request. 11236 **/ 11237 static int 11238 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 11239 struct lpfc_iocbq *cmdiocb) 11240 { 11241 struct lpfc_vport *vport = cmdiocb->vport; 11242 struct lpfc_iocbq *abtsiocbp; 11243 union lpfc_wqe128 *abts_wqe; 11244 int retval; 11245 11246 /* 11247 * There are certain command types we don't want to abort. And we 11248 * don't want to abort commands that are already in the process of 11249 * being aborted. 11250 */ 11251 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 11252 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || 11253 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 11254 return 0; 11255 11256 /* issue ABTS for this io based on iotag */ 11257 abtsiocbp = __lpfc_sli_get_iocbq(phba); 11258 if (abtsiocbp == NULL) 11259 return 0; 11260 11261 /* This signals the response to set the correct status 11262 * before calling the completion handler 11263 */ 11264 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 11265 11266 /* Complete prepping the abort wqe and issue to the FW. */ 11267 abts_wqe = &abtsiocbp->wqe; 11268 bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0); 11269 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 11270 11271 /* Explicitly set reserved fields to zero.*/ 11272 abts_wqe->abort_cmd.rsrvd4 = 0; 11273 abts_wqe->abort_cmd.rsrvd5 = 0; 11274 11275 /* WQE Common - word 6. Context is XRI tag. Set 0. */ 11276 bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0); 11277 bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0); 11278 11279 /* word 7 */ 11280 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); 11281 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 11282 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, 11283 cmdiocb->iocb.ulpClass); 11284 11285 /* word 8 - tell the FW to abort the IO associated with this 11286 * outstanding exchange ID. 11287 */ 11288 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag; 11289 11290 /* word 9 - this is the iotag for the abts_wqe completion. */ 11291 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, 11292 abtsiocbp->iotag); 11293 11294 /* word 10 */ 11295 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx); 11296 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); 11297 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 11298 11299 /* word 11 */ 11300 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); 11301 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); 11302 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 11303 11304 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11305 abtsiocbp->iocb_flag |= LPFC_IO_NVME; 11306 abtsiocbp->vport = vport; 11307 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; 11308 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp); 11309 if (retval) { 11310 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 11311 "6147 Failed abts issue_wqe with status x%x " 11312 "for oxid x%x\n", 11313 retval, cmdiocb->sli4_xritag); 11314 lpfc_sli_release_iocbq(phba, abtsiocbp); 11315 return retval; 11316 } 11317 11318 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 11319 "6148 Drv Abort NVME Request Issued for " 11320 "ox_id x%x on reqtag x%x\n", 11321 cmdiocb->sli4_xritag, 11322 abtsiocbp->iotag); 11323 11324 return retval; 11325 } 11326 11327 /** 11328 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 11329 * @phba: pointer to lpfc HBA data structure. 11330 * 11331 * This routine will abort all pending and outstanding iocbs to an HBA. 11332 **/ 11333 void 11334 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 11335 { 11336 struct lpfc_sli *psli = &phba->sli; 11337 struct lpfc_sli_ring *pring; 11338 struct lpfc_queue *qp = NULL; 11339 int i; 11340 11341 if (phba->sli_rev != LPFC_SLI_REV4) { 11342 for (i = 0; i < psli->num_rings; i++) { 11343 pring = &psli->sli3_ring[i]; 11344 lpfc_sli_abort_iocb_ring(phba, pring); 11345 } 11346 return; 11347 } 11348 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 11349 pring = qp->pring; 11350 if (!pring) 11351 continue; 11352 lpfc_sli_abort_iocb_ring(phba, pring); 11353 } 11354 } 11355 11356 /** 11357 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 11358 * @iocbq: Pointer to driver iocb object. 11359 * @vport: Pointer to driver virtual port object. 11360 * @tgt_id: SCSI ID of the target. 11361 * @lun_id: LUN ID of the scsi device. 11362 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 11363 * 11364 * This function acts as an iocb filter for functions which abort or count 11365 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 11366 * 0 if the filtering criteria is met for the given iocb and will return 11367 * 1 if the filtering criteria is not met. 11368 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 11369 * given iocb is for the SCSI device specified by vport, tgt_id and 11370 * lun_id parameter. 11371 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 11372 * given iocb is for the SCSI target specified by vport and tgt_id 11373 * parameters. 11374 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 11375 * given iocb is for the SCSI host associated with the given vport. 11376 * This function is called with no locks held. 11377 **/ 11378 static int 11379 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 11380 uint16_t tgt_id, uint64_t lun_id, 11381 lpfc_ctx_cmd ctx_cmd) 11382 { 11383 struct lpfc_scsi_buf *lpfc_cmd; 11384 int rc = 1; 11385 11386 if (iocbq->vport != vport) 11387 return rc; 11388 11389 if (!(iocbq->iocb_flag & LPFC_IO_FCP) || 11390 !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) 11391 return rc; 11392 11393 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 11394 11395 if (lpfc_cmd->pCmd == NULL) 11396 return rc; 11397 11398 switch (ctx_cmd) { 11399 case LPFC_CTX_LUN: 11400 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11401 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 11402 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 11403 rc = 0; 11404 break; 11405 case LPFC_CTX_TGT: 11406 if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && 11407 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 11408 rc = 0; 11409 break; 11410 case LPFC_CTX_HOST: 11411 rc = 0; 11412 break; 11413 default: 11414 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 11415 __func__, ctx_cmd); 11416 break; 11417 } 11418 11419 return rc; 11420 } 11421 11422 /** 11423 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 11424 * @vport: Pointer to virtual port. 11425 * @tgt_id: SCSI ID of the target. 11426 * @lun_id: LUN ID of the scsi device. 11427 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11428 * 11429 * This function returns number of FCP commands pending for the vport. 11430 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 11431 * commands pending on the vport associated with SCSI device specified 11432 * by tgt_id and lun_id parameters. 11433 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 11434 * commands pending on the vport associated with SCSI target specified 11435 * by tgt_id parameter. 11436 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 11437 * commands pending on the vport. 11438 * This function returns the number of iocbs which satisfy the filter. 11439 * This function is called without any lock held. 11440 **/ 11441 int 11442 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 11443 lpfc_ctx_cmd ctx_cmd) 11444 { 11445 struct lpfc_hba *phba = vport->phba; 11446 struct lpfc_iocbq *iocbq; 11447 int sum, i; 11448 11449 spin_lock_irq(&phba->hbalock); 11450 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 11451 iocbq = phba->sli.iocbq_lookup[i]; 11452 11453 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 11454 ctx_cmd) == 0) 11455 sum++; 11456 } 11457 spin_unlock_irq(&phba->hbalock); 11458 11459 return sum; 11460 } 11461 11462 /** 11463 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 11464 * @phba: Pointer to HBA context object 11465 * @cmdiocb: Pointer to command iocb object. 11466 * @rspiocb: Pointer to response iocb object. 11467 * 11468 * This function is called when an aborted FCP iocb completes. This 11469 * function is called by the ring event handler with no lock held. 11470 * This function frees the iocb. 11471 **/ 11472 void 11473 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 11474 struct lpfc_iocbq *rspiocb) 11475 { 11476 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11477 "3096 ABORT_XRI_CN completing on rpi x%x " 11478 "original iotag x%x, abort cmd iotag x%x " 11479 "status 0x%x, reason 0x%x\n", 11480 cmdiocb->iocb.un.acxri.abortContextTag, 11481 cmdiocb->iocb.un.acxri.abortIoTag, 11482 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 11483 rspiocb->iocb.un.ulpWord[4]); 11484 lpfc_sli_release_iocbq(phba, cmdiocb); 11485 return; 11486 } 11487 11488 /** 11489 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 11490 * @vport: Pointer to virtual port. 11491 * @pring: Pointer to driver SLI ring object. 11492 * @tgt_id: SCSI ID of the target. 11493 * @lun_id: LUN ID of the scsi device. 11494 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11495 * 11496 * This function sends an abort command for every SCSI command 11497 * associated with the given virtual port pending on the ring 11498 * filtered by lpfc_sli_validate_fcp_iocb function. 11499 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 11500 * FCP iocbs associated with lun specified by tgt_id and lun_id 11501 * parameters 11502 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 11503 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11504 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 11505 * FCP iocbs associated with virtual port. 11506 * This function returns number of iocbs it failed to abort. 11507 * This function is called with no locks held. 11508 **/ 11509 int 11510 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11511 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 11512 { 11513 struct lpfc_hba *phba = vport->phba; 11514 struct lpfc_iocbq *iocbq; 11515 struct lpfc_iocbq *abtsiocb; 11516 struct lpfc_sli_ring *pring_s4; 11517 IOCB_t *cmd = NULL; 11518 int errcnt = 0, ret_val = 0; 11519 int i; 11520 11521 /* all I/Os are in process of being flushed */ 11522 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) 11523 return errcnt; 11524 11525 for (i = 1; i <= phba->sli.last_iotag; i++) { 11526 iocbq = phba->sli.iocbq_lookup[i]; 11527 11528 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11529 abort_cmd) != 0) 11530 continue; 11531 11532 /* 11533 * If the iocbq is already being aborted, don't take a second 11534 * action, but do count it. 11535 */ 11536 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 11537 continue; 11538 11539 /* issue ABTS for this IOCB based on iotag */ 11540 abtsiocb = lpfc_sli_get_iocbq(phba); 11541 if (abtsiocb == NULL) { 11542 errcnt++; 11543 continue; 11544 } 11545 11546 /* indicate the IO is being aborted by the driver. */ 11547 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11548 11549 cmd = &iocbq->iocb; 11550 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11551 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 11552 if (phba->sli_rev == LPFC_SLI_REV4) 11553 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 11554 else 11555 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 11556 abtsiocb->iocb.ulpLe = 1; 11557 abtsiocb->iocb.ulpClass = cmd->ulpClass; 11558 abtsiocb->vport = vport; 11559 11560 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11561 abtsiocb->hba_wqidx = iocbq->hba_wqidx; 11562 if (iocbq->iocb_flag & LPFC_IO_FCP) 11563 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 11564 if (iocbq->iocb_flag & LPFC_IO_FOF) 11565 abtsiocb->iocb_flag |= LPFC_IO_FOF; 11566 11567 if (lpfc_is_link_up(phba)) 11568 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11569 else 11570 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11571 11572 /* Setup callback routine and issue the command. */ 11573 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11574 if (phba->sli_rev == LPFC_SLI_REV4) { 11575 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); 11576 if (!pring_s4) 11577 continue; 11578 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11579 abtsiocb, 0); 11580 } else 11581 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 11582 abtsiocb, 0); 11583 if (ret_val == IOCB_ERROR) { 11584 lpfc_sli_release_iocbq(phba, abtsiocb); 11585 errcnt++; 11586 continue; 11587 } 11588 } 11589 11590 return errcnt; 11591 } 11592 11593 /** 11594 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 11595 * @vport: Pointer to virtual port. 11596 * @pring: Pointer to driver SLI ring object. 11597 * @tgt_id: SCSI ID of the target. 11598 * @lun_id: LUN ID of the scsi device. 11599 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11600 * 11601 * This function sends an abort command for every SCSI command 11602 * associated with the given virtual port pending on the ring 11603 * filtered by lpfc_sli_validate_fcp_iocb function. 11604 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 11605 * FCP iocbs associated with lun specified by tgt_id and lun_id 11606 * parameters 11607 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 11608 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11609 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 11610 * FCP iocbs associated with virtual port. 11611 * This function returns number of iocbs it aborted . 11612 * This function is called with no locks held right after a taskmgmt 11613 * command is sent. 11614 **/ 11615 int 11616 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11617 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 11618 { 11619 struct lpfc_hba *phba = vport->phba; 11620 struct lpfc_scsi_buf *lpfc_cmd; 11621 struct lpfc_iocbq *abtsiocbq; 11622 struct lpfc_nodelist *ndlp; 11623 struct lpfc_iocbq *iocbq; 11624 IOCB_t *icmd; 11625 int sum, i, ret_val; 11626 unsigned long iflags; 11627 struct lpfc_sli_ring *pring_s4; 11628 11629 spin_lock_irqsave(&phba->hbalock, iflags); 11630 11631 /* all I/Os are in process of being flushed */ 11632 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 11633 spin_unlock_irqrestore(&phba->hbalock, iflags); 11634 return 0; 11635 } 11636 sum = 0; 11637 11638 for (i = 1; i <= phba->sli.last_iotag; i++) { 11639 iocbq = phba->sli.iocbq_lookup[i]; 11640 11641 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11642 cmd) != 0) 11643 continue; 11644 11645 /* 11646 * If the iocbq is already being aborted, don't take a second 11647 * action, but do count it. 11648 */ 11649 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 11650 continue; 11651 11652 /* issue ABTS for this IOCB based on iotag */ 11653 abtsiocbq = __lpfc_sli_get_iocbq(phba); 11654 if (abtsiocbq == NULL) 11655 continue; 11656 11657 icmd = &iocbq->iocb; 11658 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11659 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; 11660 if (phba->sli_rev == LPFC_SLI_REV4) 11661 abtsiocbq->iocb.un.acxri.abortIoTag = 11662 iocbq->sli4_xritag; 11663 else 11664 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; 11665 abtsiocbq->iocb.ulpLe = 1; 11666 abtsiocbq->iocb.ulpClass = icmd->ulpClass; 11667 abtsiocbq->vport = vport; 11668 11669 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11670 abtsiocbq->hba_wqidx = iocbq->hba_wqidx; 11671 if (iocbq->iocb_flag & LPFC_IO_FCP) 11672 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 11673 if (iocbq->iocb_flag & LPFC_IO_FOF) 11674 abtsiocbq->iocb_flag |= LPFC_IO_FOF; 11675 11676 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 11677 ndlp = lpfc_cmd->rdata->pnode; 11678 11679 if (lpfc_is_link_up(phba) && 11680 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 11681 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11682 else 11683 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11684 11685 /* Setup callback routine and issue the command. */ 11686 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11687 11688 /* 11689 * Indicate the IO is being aborted by the driver and set 11690 * the caller's flag into the aborted IO. 11691 */ 11692 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11693 11694 if (phba->sli_rev == LPFC_SLI_REV4) { 11695 pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocbq); 11696 if (!pring_s4) 11697 continue; 11698 /* Note: both hbalock and ring_lock must be set here */ 11699 spin_lock(&pring_s4->ring_lock); 11700 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11701 abtsiocbq, 0); 11702 spin_unlock(&pring_s4->ring_lock); 11703 } else { 11704 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 11705 abtsiocbq, 0); 11706 } 11707 11708 11709 if (ret_val == IOCB_ERROR) 11710 __lpfc_sli_release_iocbq(phba, abtsiocbq); 11711 else 11712 sum++; 11713 } 11714 spin_unlock_irqrestore(&phba->hbalock, iflags); 11715 return sum; 11716 } 11717 11718 /** 11719 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 11720 * @phba: Pointer to HBA context object. 11721 * @cmdiocbq: Pointer to command iocb. 11722 * @rspiocbq: Pointer to response iocb. 11723 * 11724 * This function is the completion handler for iocbs issued using 11725 * lpfc_sli_issue_iocb_wait function. This function is called by the 11726 * ring event handler function without any lock held. This function 11727 * can be called from both worker thread context and interrupt 11728 * context. This function also can be called from other thread which 11729 * cleans up the SLI layer objects. 11730 * This function copy the contents of the response iocb to the 11731 * response iocb memory object provided by the caller of 11732 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 11733 * sleeps for the iocb completion. 11734 **/ 11735 static void 11736 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 11737 struct lpfc_iocbq *cmdiocbq, 11738 struct lpfc_iocbq *rspiocbq) 11739 { 11740 wait_queue_head_t *pdone_q; 11741 unsigned long iflags; 11742 struct lpfc_scsi_buf *lpfc_cmd; 11743 11744 spin_lock_irqsave(&phba->hbalock, iflags); 11745 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { 11746 11747 /* 11748 * A time out has occurred for the iocb. If a time out 11749 * completion handler has been supplied, call it. Otherwise, 11750 * just free the iocbq. 11751 */ 11752 11753 spin_unlock_irqrestore(&phba->hbalock, iflags); 11754 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; 11755 cmdiocbq->wait_iocb_cmpl = NULL; 11756 if (cmdiocbq->iocb_cmpl) 11757 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); 11758 else 11759 lpfc_sli_release_iocbq(phba, cmdiocbq); 11760 return; 11761 } 11762 11763 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 11764 if (cmdiocbq->context2 && rspiocbq) 11765 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 11766 &rspiocbq->iocb, sizeof(IOCB_t)); 11767 11768 /* Set the exchange busy flag for task management commands */ 11769 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 11770 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 11771 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 11772 cur_iocbq); 11773 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 11774 } 11775 11776 pdone_q = cmdiocbq->context_un.wait_queue; 11777 if (pdone_q) 11778 wake_up(pdone_q); 11779 spin_unlock_irqrestore(&phba->hbalock, iflags); 11780 return; 11781 } 11782 11783 /** 11784 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 11785 * @phba: Pointer to HBA context object.. 11786 * @piocbq: Pointer to command iocb. 11787 * @flag: Flag to test. 11788 * 11789 * This routine grabs the hbalock and then test the iocb_flag to 11790 * see if the passed in flag is set. 11791 * Returns: 11792 * 1 if flag is set. 11793 * 0 if flag is not set. 11794 **/ 11795 static int 11796 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 11797 struct lpfc_iocbq *piocbq, uint32_t flag) 11798 { 11799 unsigned long iflags; 11800 int ret; 11801 11802 spin_lock_irqsave(&phba->hbalock, iflags); 11803 ret = piocbq->iocb_flag & flag; 11804 spin_unlock_irqrestore(&phba->hbalock, iflags); 11805 return ret; 11806 11807 } 11808 11809 /** 11810 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 11811 * @phba: Pointer to HBA context object.. 11812 * @pring: Pointer to sli ring. 11813 * @piocb: Pointer to command iocb. 11814 * @prspiocbq: Pointer to response iocb. 11815 * @timeout: Timeout in number of seconds. 11816 * 11817 * This function issues the iocb to firmware and waits for the 11818 * iocb to complete. The iocb_cmpl field of the shall be used 11819 * to handle iocbs which time out. If the field is NULL, the 11820 * function shall free the iocbq structure. If more clean up is 11821 * needed, the caller is expected to provide a completion function 11822 * that will provide the needed clean up. If the iocb command is 11823 * not completed within timeout seconds, the function will either 11824 * free the iocbq structure (if iocb_cmpl == NULL) or execute the 11825 * completion function set in the iocb_cmpl field and then return 11826 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 11827 * resources if this function returns IOCB_TIMEDOUT. 11828 * The function waits for the iocb completion using an 11829 * non-interruptible wait. 11830 * This function will sleep while waiting for iocb completion. 11831 * So, this function should not be called from any context which 11832 * does not allow sleeping. Due to the same reason, this function 11833 * cannot be called with interrupt disabled. 11834 * This function assumes that the iocb completions occur while 11835 * this function sleep. So, this function cannot be called from 11836 * the thread which process iocb completion for this ring. 11837 * This function clears the iocb_flag of the iocb object before 11838 * issuing the iocb and the iocb completion handler sets this 11839 * flag and wakes this thread when the iocb completes. 11840 * The contents of the response iocb will be copied to prspiocbq 11841 * by the completion handler when the command completes. 11842 * This function returns IOCB_SUCCESS when success. 11843 * This function is called with no lock held. 11844 **/ 11845 int 11846 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 11847 uint32_t ring_number, 11848 struct lpfc_iocbq *piocb, 11849 struct lpfc_iocbq *prspiocbq, 11850 uint32_t timeout) 11851 { 11852 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 11853 long timeleft, timeout_req = 0; 11854 int retval = IOCB_SUCCESS; 11855 uint32_t creg_val; 11856 struct lpfc_iocbq *iocb; 11857 int txq_cnt = 0; 11858 int txcmplq_cnt = 0; 11859 struct lpfc_sli_ring *pring; 11860 unsigned long iflags; 11861 bool iocb_completed = true; 11862 11863 if (phba->sli_rev >= LPFC_SLI_REV4) 11864 pring = lpfc_sli4_calc_ring(phba, piocb); 11865 else 11866 pring = &phba->sli.sli3_ring[ring_number]; 11867 /* 11868 * If the caller has provided a response iocbq buffer, then context2 11869 * is NULL or its an error. 11870 */ 11871 if (prspiocbq) { 11872 if (piocb->context2) 11873 return IOCB_ERROR; 11874 piocb->context2 = prspiocbq; 11875 } 11876 11877 piocb->wait_iocb_cmpl = piocb->iocb_cmpl; 11878 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 11879 piocb->context_un.wait_queue = &done_q; 11880 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 11881 11882 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11883 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11884 return IOCB_ERROR; 11885 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 11886 writel(creg_val, phba->HCregaddr); 11887 readl(phba->HCregaddr); /* flush */ 11888 } 11889 11890 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 11891 SLI_IOCB_RET_IOCB); 11892 if (retval == IOCB_SUCCESS) { 11893 timeout_req = msecs_to_jiffies(timeout * 1000); 11894 timeleft = wait_event_timeout(done_q, 11895 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 11896 timeout_req); 11897 spin_lock_irqsave(&phba->hbalock, iflags); 11898 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 11899 11900 /* 11901 * IOCB timed out. Inform the wake iocb wait 11902 * completion function and set local status 11903 */ 11904 11905 iocb_completed = false; 11906 piocb->iocb_flag |= LPFC_IO_WAKE_TMO; 11907 } 11908 spin_unlock_irqrestore(&phba->hbalock, iflags); 11909 if (iocb_completed) { 11910 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11911 "0331 IOCB wake signaled\n"); 11912 /* Note: we are not indicating if the IOCB has a success 11913 * status or not - that's for the caller to check. 11914 * IOCB_SUCCESS means just that the command was sent and 11915 * completed. Not that it completed successfully. 11916 * */ 11917 } else if (timeleft == 0) { 11918 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11919 "0338 IOCB wait timeout error - no " 11920 "wake response Data x%x\n", timeout); 11921 retval = IOCB_TIMEDOUT; 11922 } else { 11923 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11924 "0330 IOCB wake NOT set, " 11925 "Data x%x x%lx\n", 11926 timeout, (timeleft / jiffies)); 11927 retval = IOCB_TIMEDOUT; 11928 } 11929 } else if (retval == IOCB_BUSY) { 11930 if (phba->cfg_log_verbose & LOG_SLI) { 11931 list_for_each_entry(iocb, &pring->txq, list) { 11932 txq_cnt++; 11933 } 11934 list_for_each_entry(iocb, &pring->txcmplq, list) { 11935 txcmplq_cnt++; 11936 } 11937 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11938 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 11939 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 11940 } 11941 return retval; 11942 } else { 11943 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11944 "0332 IOCB wait issue failed, Data x%x\n", 11945 retval); 11946 retval = IOCB_ERROR; 11947 } 11948 11949 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11950 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11951 return IOCB_ERROR; 11952 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 11953 writel(creg_val, phba->HCregaddr); 11954 readl(phba->HCregaddr); /* flush */ 11955 } 11956 11957 if (prspiocbq) 11958 piocb->context2 = NULL; 11959 11960 piocb->context_un.wait_queue = NULL; 11961 piocb->iocb_cmpl = NULL; 11962 return retval; 11963 } 11964 11965 /** 11966 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 11967 * @phba: Pointer to HBA context object. 11968 * @pmboxq: Pointer to driver mailbox object. 11969 * @timeout: Timeout in number of seconds. 11970 * 11971 * This function issues the mailbox to firmware and waits for the 11972 * mailbox command to complete. If the mailbox command is not 11973 * completed within timeout seconds, it returns MBX_TIMEOUT. 11974 * The function waits for the mailbox completion using an 11975 * interruptible wait. If the thread is woken up due to a 11976 * signal, MBX_TIMEOUT error is returned to the caller. Caller 11977 * should not free the mailbox resources, if this function returns 11978 * MBX_TIMEOUT. 11979 * This function will sleep while waiting for mailbox completion. 11980 * So, this function should not be called from any context which 11981 * does not allow sleeping. Due to the same reason, this function 11982 * cannot be called with interrupt disabled. 11983 * This function assumes that the mailbox completion occurs while 11984 * this function sleep. So, this function cannot be called from 11985 * the worker thread which processes mailbox completion. 11986 * This function is called in the context of HBA management 11987 * applications. 11988 * This function returns MBX_SUCCESS when successful. 11989 * This function is called with no lock held. 11990 **/ 11991 int 11992 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 11993 uint32_t timeout) 11994 { 11995 struct completion mbox_done; 11996 int retval; 11997 unsigned long flag; 11998 11999 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 12000 /* setup wake call as IOCB callback */ 12001 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 12002 12003 /* setup context3 field to pass wait_queue pointer to wake function */ 12004 init_completion(&mbox_done); 12005 pmboxq->context3 = &mbox_done; 12006 /* now issue the command */ 12007 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 12008 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 12009 wait_for_completion_timeout(&mbox_done, 12010 msecs_to_jiffies(timeout * 1000)); 12011 12012 spin_lock_irqsave(&phba->hbalock, flag); 12013 pmboxq->context3 = NULL; 12014 /* 12015 * if LPFC_MBX_WAKE flag is set the mailbox is completed 12016 * else do not free the resources. 12017 */ 12018 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 12019 retval = MBX_SUCCESS; 12020 } else { 12021 retval = MBX_TIMEOUT; 12022 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12023 } 12024 spin_unlock_irqrestore(&phba->hbalock, flag); 12025 } 12026 return retval; 12027 } 12028 12029 /** 12030 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 12031 * @phba: Pointer to HBA context. 12032 * 12033 * This function is called to shutdown the driver's mailbox sub-system. 12034 * It first marks the mailbox sub-system is in a block state to prevent 12035 * the asynchronous mailbox command from issued off the pending mailbox 12036 * command queue. If the mailbox command sub-system shutdown is due to 12037 * HBA error conditions such as EEH or ERATT, this routine shall invoke 12038 * the mailbox sub-system flush routine to forcefully bring down the 12039 * mailbox sub-system. Otherwise, if it is due to normal condition (such 12040 * as with offline or HBA function reset), this routine will wait for the 12041 * outstanding mailbox command to complete before invoking the mailbox 12042 * sub-system flush routine to gracefully bring down mailbox sub-system. 12043 **/ 12044 void 12045 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 12046 { 12047 struct lpfc_sli *psli = &phba->sli; 12048 unsigned long timeout; 12049 12050 if (mbx_action == LPFC_MBX_NO_WAIT) { 12051 /* delay 100ms for port state */ 12052 msleep(100); 12053 lpfc_sli_mbox_sys_flush(phba); 12054 return; 12055 } 12056 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 12057 12058 /* Disable softirqs, including timers from obtaining phba->hbalock */ 12059 local_bh_disable(); 12060 12061 spin_lock_irq(&phba->hbalock); 12062 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 12063 12064 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 12065 /* Determine how long we might wait for the active mailbox 12066 * command to be gracefully completed by firmware. 12067 */ 12068 if (phba->sli.mbox_active) 12069 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 12070 phba->sli.mbox_active) * 12071 1000) + jiffies; 12072 spin_unlock_irq(&phba->hbalock); 12073 12074 /* Enable softirqs again, done with phba->hbalock */ 12075 local_bh_enable(); 12076 12077 while (phba->sli.mbox_active) { 12078 /* Check active mailbox complete status every 2ms */ 12079 msleep(2); 12080 if (time_after(jiffies, timeout)) 12081 /* Timeout, let the mailbox flush routine to 12082 * forcefully release active mailbox command 12083 */ 12084 break; 12085 } 12086 } else { 12087 spin_unlock_irq(&phba->hbalock); 12088 12089 /* Enable softirqs again, done with phba->hbalock */ 12090 local_bh_enable(); 12091 } 12092 12093 lpfc_sli_mbox_sys_flush(phba); 12094 } 12095 12096 /** 12097 * lpfc_sli_eratt_read - read sli-3 error attention events 12098 * @phba: Pointer to HBA context. 12099 * 12100 * This function is called to read the SLI3 device error attention registers 12101 * for possible error attention events. The caller must hold the hostlock 12102 * with spin_lock_irq(). 12103 * 12104 * This function returns 1 when there is Error Attention in the Host Attention 12105 * Register and returns 0 otherwise. 12106 **/ 12107 static int 12108 lpfc_sli_eratt_read(struct lpfc_hba *phba) 12109 { 12110 uint32_t ha_copy; 12111 12112 /* Read chip Host Attention (HA) register */ 12113 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12114 goto unplug_err; 12115 12116 if (ha_copy & HA_ERATT) { 12117 /* Read host status register to retrieve error event */ 12118 if (lpfc_sli_read_hs(phba)) 12119 goto unplug_err; 12120 12121 /* Check if there is a deferred error condition is active */ 12122 if ((HS_FFER1 & phba->work_hs) && 12123 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12124 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 12125 phba->hba_flag |= DEFER_ERATT; 12126 /* Clear all interrupt enable conditions */ 12127 writel(0, phba->HCregaddr); 12128 readl(phba->HCregaddr); 12129 } 12130 12131 /* Set the driver HA work bitmap */ 12132 phba->work_ha |= HA_ERATT; 12133 /* Indicate polling handles this ERATT */ 12134 phba->hba_flag |= HBA_ERATT_HANDLED; 12135 return 1; 12136 } 12137 return 0; 12138 12139 unplug_err: 12140 /* Set the driver HS work bitmap */ 12141 phba->work_hs |= UNPLUG_ERR; 12142 /* Set the driver HA work bitmap */ 12143 phba->work_ha |= HA_ERATT; 12144 /* Indicate polling handles this ERATT */ 12145 phba->hba_flag |= HBA_ERATT_HANDLED; 12146 return 1; 12147 } 12148 12149 /** 12150 * lpfc_sli4_eratt_read - read sli-4 error attention events 12151 * @phba: Pointer to HBA context. 12152 * 12153 * This function is called to read the SLI4 device error attention registers 12154 * for possible error attention events. The caller must hold the hostlock 12155 * with spin_lock_irq(). 12156 * 12157 * This function returns 1 when there is Error Attention in the Host Attention 12158 * Register and returns 0 otherwise. 12159 **/ 12160 static int 12161 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 12162 { 12163 uint32_t uerr_sta_hi, uerr_sta_lo; 12164 uint32_t if_type, portsmphr; 12165 struct lpfc_register portstat_reg; 12166 12167 /* 12168 * For now, use the SLI4 device internal unrecoverable error 12169 * registers for error attention. This can be changed later. 12170 */ 12171 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 12172 switch (if_type) { 12173 case LPFC_SLI_INTF_IF_TYPE_0: 12174 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 12175 &uerr_sta_lo) || 12176 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 12177 &uerr_sta_hi)) { 12178 phba->work_hs |= UNPLUG_ERR; 12179 phba->work_ha |= HA_ERATT; 12180 phba->hba_flag |= HBA_ERATT_HANDLED; 12181 return 1; 12182 } 12183 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 12184 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 12185 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12186 "1423 HBA Unrecoverable error: " 12187 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 12188 "ue_mask_lo_reg=0x%x, " 12189 "ue_mask_hi_reg=0x%x\n", 12190 uerr_sta_lo, uerr_sta_hi, 12191 phba->sli4_hba.ue_mask_lo, 12192 phba->sli4_hba.ue_mask_hi); 12193 phba->work_status[0] = uerr_sta_lo; 12194 phba->work_status[1] = uerr_sta_hi; 12195 phba->work_ha |= HA_ERATT; 12196 phba->hba_flag |= HBA_ERATT_HANDLED; 12197 return 1; 12198 } 12199 break; 12200 case LPFC_SLI_INTF_IF_TYPE_2: 12201 case LPFC_SLI_INTF_IF_TYPE_6: 12202 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 12203 &portstat_reg.word0) || 12204 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 12205 &portsmphr)){ 12206 phba->work_hs |= UNPLUG_ERR; 12207 phba->work_ha |= HA_ERATT; 12208 phba->hba_flag |= HBA_ERATT_HANDLED; 12209 return 1; 12210 } 12211 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 12212 phba->work_status[0] = 12213 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 12214 phba->work_status[1] = 12215 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 12216 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12217 "2885 Port Status Event: " 12218 "port status reg 0x%x, " 12219 "port smphr reg 0x%x, " 12220 "error 1=0x%x, error 2=0x%x\n", 12221 portstat_reg.word0, 12222 portsmphr, 12223 phba->work_status[0], 12224 phba->work_status[1]); 12225 phba->work_ha |= HA_ERATT; 12226 phba->hba_flag |= HBA_ERATT_HANDLED; 12227 return 1; 12228 } 12229 break; 12230 case LPFC_SLI_INTF_IF_TYPE_1: 12231 default: 12232 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12233 "2886 HBA Error Attention on unsupported " 12234 "if type %d.", if_type); 12235 return 1; 12236 } 12237 12238 return 0; 12239 } 12240 12241 /** 12242 * lpfc_sli_check_eratt - check error attention events 12243 * @phba: Pointer to HBA context. 12244 * 12245 * This function is called from timer soft interrupt context to check HBA's 12246 * error attention register bit for error attention events. 12247 * 12248 * This function returns 1 when there is Error Attention in the Host Attention 12249 * Register and returns 0 otherwise. 12250 **/ 12251 int 12252 lpfc_sli_check_eratt(struct lpfc_hba *phba) 12253 { 12254 uint32_t ha_copy; 12255 12256 /* If somebody is waiting to handle an eratt, don't process it 12257 * here. The brdkill function will do this. 12258 */ 12259 if (phba->link_flag & LS_IGNORE_ERATT) 12260 return 0; 12261 12262 /* Check if interrupt handler handles this ERATT */ 12263 spin_lock_irq(&phba->hbalock); 12264 if (phba->hba_flag & HBA_ERATT_HANDLED) { 12265 /* Interrupt handler has handled ERATT */ 12266 spin_unlock_irq(&phba->hbalock); 12267 return 0; 12268 } 12269 12270 /* 12271 * If there is deferred error attention, do not check for error 12272 * attention 12273 */ 12274 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12275 spin_unlock_irq(&phba->hbalock); 12276 return 0; 12277 } 12278 12279 /* If PCI channel is offline, don't process it */ 12280 if (unlikely(pci_channel_offline(phba->pcidev))) { 12281 spin_unlock_irq(&phba->hbalock); 12282 return 0; 12283 } 12284 12285 switch (phba->sli_rev) { 12286 case LPFC_SLI_REV2: 12287 case LPFC_SLI_REV3: 12288 /* Read chip Host Attention (HA) register */ 12289 ha_copy = lpfc_sli_eratt_read(phba); 12290 break; 12291 case LPFC_SLI_REV4: 12292 /* Read device Uncoverable Error (UERR) registers */ 12293 ha_copy = lpfc_sli4_eratt_read(phba); 12294 break; 12295 default: 12296 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12297 "0299 Invalid SLI revision (%d)\n", 12298 phba->sli_rev); 12299 ha_copy = 0; 12300 break; 12301 } 12302 spin_unlock_irq(&phba->hbalock); 12303 12304 return ha_copy; 12305 } 12306 12307 /** 12308 * lpfc_intr_state_check - Check device state for interrupt handling 12309 * @phba: Pointer to HBA context. 12310 * 12311 * This inline routine checks whether a device or its PCI slot is in a state 12312 * that the interrupt should be handled. 12313 * 12314 * This function returns 0 if the device or the PCI slot is in a state that 12315 * interrupt should be handled, otherwise -EIO. 12316 */ 12317 static inline int 12318 lpfc_intr_state_check(struct lpfc_hba *phba) 12319 { 12320 /* If the pci channel is offline, ignore all the interrupts */ 12321 if (unlikely(pci_channel_offline(phba->pcidev))) 12322 return -EIO; 12323 12324 /* Update device level interrupt statistics */ 12325 phba->sli.slistat.sli_intr++; 12326 12327 /* Ignore all interrupts during initialization. */ 12328 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 12329 return -EIO; 12330 12331 return 0; 12332 } 12333 12334 /** 12335 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 12336 * @irq: Interrupt number. 12337 * @dev_id: The device context pointer. 12338 * 12339 * This function is directly called from the PCI layer as an interrupt 12340 * service routine when device with SLI-3 interface spec is enabled with 12341 * MSI-X multi-message interrupt mode and there are slow-path events in 12342 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 12343 * interrupt mode, this function is called as part of the device-level 12344 * interrupt handler. When the PCI slot is in error recovery or the HBA 12345 * is undergoing initialization, the interrupt handler will not process 12346 * the interrupt. The link attention and ELS ring attention events are 12347 * handled by the worker thread. The interrupt handler signals the worker 12348 * thread and returns for these events. This function is called without 12349 * any lock held. It gets the hbalock to access and update SLI data 12350 * structures. 12351 * 12352 * This function returns IRQ_HANDLED when interrupt is handled else it 12353 * returns IRQ_NONE. 12354 **/ 12355 irqreturn_t 12356 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 12357 { 12358 struct lpfc_hba *phba; 12359 uint32_t ha_copy, hc_copy; 12360 uint32_t work_ha_copy; 12361 unsigned long status; 12362 unsigned long iflag; 12363 uint32_t control; 12364 12365 MAILBOX_t *mbox, *pmbox; 12366 struct lpfc_vport *vport; 12367 struct lpfc_nodelist *ndlp; 12368 struct lpfc_dmabuf *mp; 12369 LPFC_MBOXQ_t *pmb; 12370 int rc; 12371 12372 /* 12373 * Get the driver's phba structure from the dev_id and 12374 * assume the HBA is not interrupting. 12375 */ 12376 phba = (struct lpfc_hba *)dev_id; 12377 12378 if (unlikely(!phba)) 12379 return IRQ_NONE; 12380 12381 /* 12382 * Stuff needs to be attented to when this function is invoked as an 12383 * individual interrupt handler in MSI-X multi-message interrupt mode 12384 */ 12385 if (phba->intr_type == MSIX) { 12386 /* Check device state for handling interrupt */ 12387 if (lpfc_intr_state_check(phba)) 12388 return IRQ_NONE; 12389 /* Need to read HA REG for slow-path events */ 12390 spin_lock_irqsave(&phba->hbalock, iflag); 12391 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12392 goto unplug_error; 12393 /* If somebody is waiting to handle an eratt don't process it 12394 * here. The brdkill function will do this. 12395 */ 12396 if (phba->link_flag & LS_IGNORE_ERATT) 12397 ha_copy &= ~HA_ERATT; 12398 /* Check the need for handling ERATT in interrupt handler */ 12399 if (ha_copy & HA_ERATT) { 12400 if (phba->hba_flag & HBA_ERATT_HANDLED) 12401 /* ERATT polling has handled ERATT */ 12402 ha_copy &= ~HA_ERATT; 12403 else 12404 /* Indicate interrupt handler handles ERATT */ 12405 phba->hba_flag |= HBA_ERATT_HANDLED; 12406 } 12407 12408 /* 12409 * If there is deferred error attention, do not check for any 12410 * interrupt. 12411 */ 12412 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12413 spin_unlock_irqrestore(&phba->hbalock, iflag); 12414 return IRQ_NONE; 12415 } 12416 12417 /* Clear up only attention source related to slow-path */ 12418 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 12419 goto unplug_error; 12420 12421 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 12422 HC_LAINT_ENA | HC_ERINT_ENA), 12423 phba->HCregaddr); 12424 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 12425 phba->HAregaddr); 12426 writel(hc_copy, phba->HCregaddr); 12427 readl(phba->HAregaddr); /* flush */ 12428 spin_unlock_irqrestore(&phba->hbalock, iflag); 12429 } else 12430 ha_copy = phba->ha_copy; 12431 12432 work_ha_copy = ha_copy & phba->work_ha_mask; 12433 12434 if (work_ha_copy) { 12435 if (work_ha_copy & HA_LATT) { 12436 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 12437 /* 12438 * Turn off Link Attention interrupts 12439 * until CLEAR_LA done 12440 */ 12441 spin_lock_irqsave(&phba->hbalock, iflag); 12442 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 12443 if (lpfc_readl(phba->HCregaddr, &control)) 12444 goto unplug_error; 12445 control &= ~HC_LAINT_ENA; 12446 writel(control, phba->HCregaddr); 12447 readl(phba->HCregaddr); /* flush */ 12448 spin_unlock_irqrestore(&phba->hbalock, iflag); 12449 } 12450 else 12451 work_ha_copy &= ~HA_LATT; 12452 } 12453 12454 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 12455 /* 12456 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 12457 * the only slow ring. 12458 */ 12459 status = (work_ha_copy & 12460 (HA_RXMASK << (4*LPFC_ELS_RING))); 12461 status >>= (4*LPFC_ELS_RING); 12462 if (status & HA_RXMASK) { 12463 spin_lock_irqsave(&phba->hbalock, iflag); 12464 if (lpfc_readl(phba->HCregaddr, &control)) 12465 goto unplug_error; 12466 12467 lpfc_debugfs_slow_ring_trc(phba, 12468 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 12469 control, status, 12470 (uint32_t)phba->sli.slistat.sli_intr); 12471 12472 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 12473 lpfc_debugfs_slow_ring_trc(phba, 12474 "ISR Disable ring:" 12475 "pwork:x%x hawork:x%x wait:x%x", 12476 phba->work_ha, work_ha_copy, 12477 (uint32_t)((unsigned long) 12478 &phba->work_waitq)); 12479 12480 control &= 12481 ~(HC_R0INT_ENA << LPFC_ELS_RING); 12482 writel(control, phba->HCregaddr); 12483 readl(phba->HCregaddr); /* flush */ 12484 } 12485 else { 12486 lpfc_debugfs_slow_ring_trc(phba, 12487 "ISR slow ring: pwork:" 12488 "x%x hawork:x%x wait:x%x", 12489 phba->work_ha, work_ha_copy, 12490 (uint32_t)((unsigned long) 12491 &phba->work_waitq)); 12492 } 12493 spin_unlock_irqrestore(&phba->hbalock, iflag); 12494 } 12495 } 12496 spin_lock_irqsave(&phba->hbalock, iflag); 12497 if (work_ha_copy & HA_ERATT) { 12498 if (lpfc_sli_read_hs(phba)) 12499 goto unplug_error; 12500 /* 12501 * Check if there is a deferred error condition 12502 * is active 12503 */ 12504 if ((HS_FFER1 & phba->work_hs) && 12505 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 12506 HS_FFER6 | HS_FFER7 | HS_FFER8) & 12507 phba->work_hs)) { 12508 phba->hba_flag |= DEFER_ERATT; 12509 /* Clear all interrupt enable conditions */ 12510 writel(0, phba->HCregaddr); 12511 readl(phba->HCregaddr); 12512 } 12513 } 12514 12515 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 12516 pmb = phba->sli.mbox_active; 12517 pmbox = &pmb->u.mb; 12518 mbox = phba->mbox; 12519 vport = pmb->vport; 12520 12521 /* First check out the status word */ 12522 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 12523 if (pmbox->mbxOwner != OWN_HOST) { 12524 spin_unlock_irqrestore(&phba->hbalock, iflag); 12525 /* 12526 * Stray Mailbox Interrupt, mbxCommand <cmd> 12527 * mbxStatus <status> 12528 */ 12529 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12530 LOG_SLI, 12531 "(%d):0304 Stray Mailbox " 12532 "Interrupt mbxCommand x%x " 12533 "mbxStatus x%x\n", 12534 (vport ? vport->vpi : 0), 12535 pmbox->mbxCommand, 12536 pmbox->mbxStatus); 12537 /* clear mailbox attention bit */ 12538 work_ha_copy &= ~HA_MBATT; 12539 } else { 12540 phba->sli.mbox_active = NULL; 12541 spin_unlock_irqrestore(&phba->hbalock, iflag); 12542 phba->last_completion_time = jiffies; 12543 del_timer(&phba->sli.mbox_tmo); 12544 if (pmb->mbox_cmpl) { 12545 lpfc_sli_pcimem_bcopy(mbox, pmbox, 12546 MAILBOX_CMD_SIZE); 12547 if (pmb->out_ext_byte_len && 12548 pmb->context2) 12549 lpfc_sli_pcimem_bcopy( 12550 phba->mbox_ext, 12551 pmb->context2, 12552 pmb->out_ext_byte_len); 12553 } 12554 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 12555 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 12556 12557 lpfc_debugfs_disc_trc(vport, 12558 LPFC_DISC_TRC_MBOX_VPORT, 12559 "MBOX dflt rpi: : " 12560 "status:x%x rpi:x%x", 12561 (uint32_t)pmbox->mbxStatus, 12562 pmbox->un.varWords[0], 0); 12563 12564 if (!pmbox->mbxStatus) { 12565 mp = (struct lpfc_dmabuf *) 12566 (pmb->context1); 12567 ndlp = (struct lpfc_nodelist *) 12568 pmb->context2; 12569 12570 /* Reg_LOGIN of dflt RPI was 12571 * successful. new lets get 12572 * rid of the RPI using the 12573 * same mbox buffer. 12574 */ 12575 lpfc_unreg_login(phba, 12576 vport->vpi, 12577 pmbox->un.varWords[0], 12578 pmb); 12579 pmb->mbox_cmpl = 12580 lpfc_mbx_cmpl_dflt_rpi; 12581 pmb->context1 = mp; 12582 pmb->context2 = ndlp; 12583 pmb->vport = vport; 12584 rc = lpfc_sli_issue_mbox(phba, 12585 pmb, 12586 MBX_NOWAIT); 12587 if (rc != MBX_BUSY) 12588 lpfc_printf_log(phba, 12589 KERN_ERR, 12590 LOG_MBOX | LOG_SLI, 12591 "0350 rc should have" 12592 "been MBX_BUSY\n"); 12593 if (rc != MBX_NOT_FINISHED) 12594 goto send_current_mbox; 12595 } 12596 } 12597 spin_lock_irqsave( 12598 &phba->pport->work_port_lock, 12599 iflag); 12600 phba->pport->work_port_events &= 12601 ~WORKER_MBOX_TMO; 12602 spin_unlock_irqrestore( 12603 &phba->pport->work_port_lock, 12604 iflag); 12605 lpfc_mbox_cmpl_put(phba, pmb); 12606 } 12607 } else 12608 spin_unlock_irqrestore(&phba->hbalock, iflag); 12609 12610 if ((work_ha_copy & HA_MBATT) && 12611 (phba->sli.mbox_active == NULL)) { 12612 send_current_mbox: 12613 /* Process next mailbox command if there is one */ 12614 do { 12615 rc = lpfc_sli_issue_mbox(phba, NULL, 12616 MBX_NOWAIT); 12617 } while (rc == MBX_NOT_FINISHED); 12618 if (rc != MBX_SUCCESS) 12619 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12620 LOG_SLI, "0349 rc should be " 12621 "MBX_SUCCESS\n"); 12622 } 12623 12624 spin_lock_irqsave(&phba->hbalock, iflag); 12625 phba->work_ha |= work_ha_copy; 12626 spin_unlock_irqrestore(&phba->hbalock, iflag); 12627 lpfc_worker_wake_up(phba); 12628 } 12629 return IRQ_HANDLED; 12630 unplug_error: 12631 spin_unlock_irqrestore(&phba->hbalock, iflag); 12632 return IRQ_HANDLED; 12633 12634 } /* lpfc_sli_sp_intr_handler */ 12635 12636 /** 12637 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 12638 * @irq: Interrupt number. 12639 * @dev_id: The device context pointer. 12640 * 12641 * This function is directly called from the PCI layer as an interrupt 12642 * service routine when device with SLI-3 interface spec is enabled with 12643 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 12644 * ring event in the HBA. However, when the device is enabled with either 12645 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12646 * device-level interrupt handler. When the PCI slot is in error recovery 12647 * or the HBA is undergoing initialization, the interrupt handler will not 12648 * process the interrupt. The SCSI FCP fast-path ring event are handled in 12649 * the intrrupt context. This function is called without any lock held. 12650 * It gets the hbalock to access and update SLI data structures. 12651 * 12652 * This function returns IRQ_HANDLED when interrupt is handled else it 12653 * returns IRQ_NONE. 12654 **/ 12655 irqreturn_t 12656 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 12657 { 12658 struct lpfc_hba *phba; 12659 uint32_t ha_copy; 12660 unsigned long status; 12661 unsigned long iflag; 12662 struct lpfc_sli_ring *pring; 12663 12664 /* Get the driver's phba structure from the dev_id and 12665 * assume the HBA is not interrupting. 12666 */ 12667 phba = (struct lpfc_hba *) dev_id; 12668 12669 if (unlikely(!phba)) 12670 return IRQ_NONE; 12671 12672 /* 12673 * Stuff needs to be attented to when this function is invoked as an 12674 * individual interrupt handler in MSI-X multi-message interrupt mode 12675 */ 12676 if (phba->intr_type == MSIX) { 12677 /* Check device state for handling interrupt */ 12678 if (lpfc_intr_state_check(phba)) 12679 return IRQ_NONE; 12680 /* Need to read HA REG for FCP ring and other ring events */ 12681 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12682 return IRQ_HANDLED; 12683 /* Clear up only attention source related to fast-path */ 12684 spin_lock_irqsave(&phba->hbalock, iflag); 12685 /* 12686 * If there is deferred error attention, do not check for 12687 * any interrupt. 12688 */ 12689 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12690 spin_unlock_irqrestore(&phba->hbalock, iflag); 12691 return IRQ_NONE; 12692 } 12693 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 12694 phba->HAregaddr); 12695 readl(phba->HAregaddr); /* flush */ 12696 spin_unlock_irqrestore(&phba->hbalock, iflag); 12697 } else 12698 ha_copy = phba->ha_copy; 12699 12700 /* 12701 * Process all events on FCP ring. Take the optimized path for FCP IO. 12702 */ 12703 ha_copy &= ~(phba->work_ha_mask); 12704 12705 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12706 status >>= (4*LPFC_FCP_RING); 12707 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 12708 if (status & HA_RXMASK) 12709 lpfc_sli_handle_fast_ring_event(phba, pring, status); 12710 12711 if (phba->cfg_multi_ring_support == 2) { 12712 /* 12713 * Process all events on extra ring. Take the optimized path 12714 * for extra ring IO. 12715 */ 12716 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12717 status >>= (4*LPFC_EXTRA_RING); 12718 if (status & HA_RXMASK) { 12719 lpfc_sli_handle_fast_ring_event(phba, 12720 &phba->sli.sli3_ring[LPFC_EXTRA_RING], 12721 status); 12722 } 12723 } 12724 return IRQ_HANDLED; 12725 } /* lpfc_sli_fp_intr_handler */ 12726 12727 /** 12728 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 12729 * @irq: Interrupt number. 12730 * @dev_id: The device context pointer. 12731 * 12732 * This function is the HBA device-level interrupt handler to device with 12733 * SLI-3 interface spec, called from the PCI layer when either MSI or 12734 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 12735 * requires driver attention. This function invokes the slow-path interrupt 12736 * attention handling function and fast-path interrupt attention handling 12737 * function in turn to process the relevant HBA attention events. This 12738 * function is called without any lock held. It gets the hbalock to access 12739 * and update SLI data structures. 12740 * 12741 * This function returns IRQ_HANDLED when interrupt is handled, else it 12742 * returns IRQ_NONE. 12743 **/ 12744 irqreturn_t 12745 lpfc_sli_intr_handler(int irq, void *dev_id) 12746 { 12747 struct lpfc_hba *phba; 12748 irqreturn_t sp_irq_rc, fp_irq_rc; 12749 unsigned long status1, status2; 12750 uint32_t hc_copy; 12751 12752 /* 12753 * Get the driver's phba structure from the dev_id and 12754 * assume the HBA is not interrupting. 12755 */ 12756 phba = (struct lpfc_hba *) dev_id; 12757 12758 if (unlikely(!phba)) 12759 return IRQ_NONE; 12760 12761 /* Check device state for handling interrupt */ 12762 if (lpfc_intr_state_check(phba)) 12763 return IRQ_NONE; 12764 12765 spin_lock(&phba->hbalock); 12766 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 12767 spin_unlock(&phba->hbalock); 12768 return IRQ_HANDLED; 12769 } 12770 12771 if (unlikely(!phba->ha_copy)) { 12772 spin_unlock(&phba->hbalock); 12773 return IRQ_NONE; 12774 } else if (phba->ha_copy & HA_ERATT) { 12775 if (phba->hba_flag & HBA_ERATT_HANDLED) 12776 /* ERATT polling has handled ERATT */ 12777 phba->ha_copy &= ~HA_ERATT; 12778 else 12779 /* Indicate interrupt handler handles ERATT */ 12780 phba->hba_flag |= HBA_ERATT_HANDLED; 12781 } 12782 12783 /* 12784 * If there is deferred error attention, do not check for any interrupt. 12785 */ 12786 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12787 spin_unlock(&phba->hbalock); 12788 return IRQ_NONE; 12789 } 12790 12791 /* Clear attention sources except link and error attentions */ 12792 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 12793 spin_unlock(&phba->hbalock); 12794 return IRQ_HANDLED; 12795 } 12796 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 12797 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 12798 phba->HCregaddr); 12799 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 12800 writel(hc_copy, phba->HCregaddr); 12801 readl(phba->HAregaddr); /* flush */ 12802 spin_unlock(&phba->hbalock); 12803 12804 /* 12805 * Invokes slow-path host attention interrupt handling as appropriate. 12806 */ 12807 12808 /* status of events with mailbox and link attention */ 12809 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 12810 12811 /* status of events with ELS ring */ 12812 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 12813 status2 >>= (4*LPFC_ELS_RING); 12814 12815 if (status1 || (status2 & HA_RXMASK)) 12816 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 12817 else 12818 sp_irq_rc = IRQ_NONE; 12819 12820 /* 12821 * Invoke fast-path host attention interrupt handling as appropriate. 12822 */ 12823 12824 /* status of events with FCP ring */ 12825 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12826 status1 >>= (4*LPFC_FCP_RING); 12827 12828 /* status of events with extra ring */ 12829 if (phba->cfg_multi_ring_support == 2) { 12830 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12831 status2 >>= (4*LPFC_EXTRA_RING); 12832 } else 12833 status2 = 0; 12834 12835 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 12836 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 12837 else 12838 fp_irq_rc = IRQ_NONE; 12839 12840 /* Return device-level interrupt handling status */ 12841 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 12842 } /* lpfc_sli_intr_handler */ 12843 12844 /** 12845 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 12846 * @phba: pointer to lpfc hba data structure. 12847 * 12848 * This routine is invoked by the worker thread to process all the pending 12849 * SLI4 FCP abort XRI events. 12850 **/ 12851 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 12852 { 12853 struct lpfc_cq_event *cq_event; 12854 12855 /* First, declare the fcp xri abort event has been handled */ 12856 spin_lock_irq(&phba->hbalock); 12857 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 12858 spin_unlock_irq(&phba->hbalock); 12859 /* Now, handle all the fcp xri abort events */ 12860 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 12861 /* Get the first event from the head of the event queue */ 12862 spin_lock_irq(&phba->hbalock); 12863 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 12864 cq_event, struct lpfc_cq_event, list); 12865 spin_unlock_irq(&phba->hbalock); 12866 /* Notify aborted XRI for FCP work queue */ 12867 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12868 /* Free the event processed back to the free pool */ 12869 lpfc_sli4_cq_event_release(phba, cq_event); 12870 } 12871 } 12872 12873 /** 12874 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 12875 * @phba: pointer to lpfc hba data structure. 12876 * 12877 * This routine is invoked by the worker thread to process all the pending 12878 * SLI4 els abort xri events. 12879 **/ 12880 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 12881 { 12882 struct lpfc_cq_event *cq_event; 12883 12884 /* First, declare the els xri abort event has been handled */ 12885 spin_lock_irq(&phba->hbalock); 12886 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 12887 spin_unlock_irq(&phba->hbalock); 12888 /* Now, handle all the els xri abort events */ 12889 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 12890 /* Get the first event from the head of the event queue */ 12891 spin_lock_irq(&phba->hbalock); 12892 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 12893 cq_event, struct lpfc_cq_event, list); 12894 spin_unlock_irq(&phba->hbalock); 12895 /* Notify aborted XRI for ELS work queue */ 12896 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12897 /* Free the event processed back to the free pool */ 12898 lpfc_sli4_cq_event_release(phba, cq_event); 12899 } 12900 } 12901 12902 /** 12903 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 12904 * @phba: pointer to lpfc hba data structure 12905 * @pIocbIn: pointer to the rspiocbq 12906 * @pIocbOut: pointer to the cmdiocbq 12907 * @wcqe: pointer to the complete wcqe 12908 * 12909 * This routine transfers the fields of a command iocbq to a response iocbq 12910 * by copying all the IOCB fields from command iocbq and transferring the 12911 * completion status information from the complete wcqe. 12912 **/ 12913 static void 12914 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 12915 struct lpfc_iocbq *pIocbIn, 12916 struct lpfc_iocbq *pIocbOut, 12917 struct lpfc_wcqe_complete *wcqe) 12918 { 12919 int numBdes, i; 12920 unsigned long iflags; 12921 uint32_t status, max_response; 12922 struct lpfc_dmabuf *dmabuf; 12923 struct ulp_bde64 *bpl, bde; 12924 size_t offset = offsetof(struct lpfc_iocbq, iocb); 12925 12926 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 12927 sizeof(struct lpfc_iocbq) - offset); 12928 /* Map WCQE parameters into irspiocb parameters */ 12929 status = bf_get(lpfc_wcqe_c_status, wcqe); 12930 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 12931 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 12932 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 12933 pIocbIn->iocb.un.fcpi.fcpi_parm = 12934 pIocbOut->iocb.un.fcpi.fcpi_parm - 12935 wcqe->total_data_placed; 12936 else 12937 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12938 else { 12939 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12940 switch (pIocbOut->iocb.ulpCommand) { 12941 case CMD_ELS_REQUEST64_CR: 12942 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12943 bpl = (struct ulp_bde64 *)dmabuf->virt; 12944 bde.tus.w = le32_to_cpu(bpl[1].tus.w); 12945 max_response = bde.tus.f.bdeSize; 12946 break; 12947 case CMD_GEN_REQUEST64_CR: 12948 max_response = 0; 12949 if (!pIocbOut->context3) 12950 break; 12951 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 12952 sizeof(struct ulp_bde64); 12953 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12954 bpl = (struct ulp_bde64 *)dmabuf->virt; 12955 for (i = 0; i < numBdes; i++) { 12956 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 12957 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 12958 max_response += bde.tus.f.bdeSize; 12959 } 12960 break; 12961 default: 12962 max_response = wcqe->total_data_placed; 12963 break; 12964 } 12965 if (max_response < wcqe->total_data_placed) 12966 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 12967 else 12968 pIocbIn->iocb.un.genreq64.bdl.bdeSize = 12969 wcqe->total_data_placed; 12970 } 12971 12972 /* Convert BG errors for completion status */ 12973 if (status == CQE_STATUS_DI_ERROR) { 12974 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 12975 12976 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 12977 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 12978 else 12979 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 12980 12981 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 12982 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 12983 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12984 BGS_GUARD_ERR_MASK; 12985 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 12986 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12987 BGS_APPTAG_ERR_MASK; 12988 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 12989 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12990 BGS_REFTAG_ERR_MASK; 12991 12992 /* Check to see if there was any good data before the error */ 12993 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 12994 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12995 BGS_HI_WATER_MARK_PRESENT_MASK; 12996 pIocbIn->iocb.unsli3.sli3_bg.bghm = 12997 wcqe->total_data_placed; 12998 } 12999 13000 /* 13001 * Set ALL the error bits to indicate we don't know what 13002 * type of error it is. 13003 */ 13004 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 13005 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 13006 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 13007 BGS_GUARD_ERR_MASK); 13008 } 13009 13010 /* Pick up HBA exchange busy condition */ 13011 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 13012 spin_lock_irqsave(&phba->hbalock, iflags); 13013 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 13014 spin_unlock_irqrestore(&phba->hbalock, iflags); 13015 } 13016 } 13017 13018 /** 13019 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 13020 * @phba: Pointer to HBA context object. 13021 * @wcqe: Pointer to work-queue completion queue entry. 13022 * 13023 * This routine handles an ELS work-queue completion event and construct 13024 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 13025 * discovery engine to handle. 13026 * 13027 * Return: Pointer to the receive IOCBQ, NULL otherwise. 13028 **/ 13029 static struct lpfc_iocbq * 13030 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 13031 struct lpfc_iocbq *irspiocbq) 13032 { 13033 struct lpfc_sli_ring *pring; 13034 struct lpfc_iocbq *cmdiocbq; 13035 struct lpfc_wcqe_complete *wcqe; 13036 unsigned long iflags; 13037 13038 pring = lpfc_phba_elsring(phba); 13039 if (unlikely(!pring)) 13040 return NULL; 13041 13042 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 13043 spin_lock_irqsave(&pring->ring_lock, iflags); 13044 pring->stats.iocb_event++; 13045 /* Look up the ELS command IOCB and create pseudo response IOCB */ 13046 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13047 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13048 if (unlikely(!cmdiocbq)) { 13049 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13050 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13051 "0386 ELS complete with no corresponding " 13052 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", 13053 wcqe->word0, wcqe->total_data_placed, 13054 wcqe->parameter, wcqe->word3); 13055 lpfc_sli_release_iocbq(phba, irspiocbq); 13056 return NULL; 13057 } 13058 13059 /* Put the iocb back on the txcmplq */ 13060 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); 13061 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13062 13063 /* Fake the irspiocbq and copy necessary response information */ 13064 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 13065 13066 return irspiocbq; 13067 } 13068 13069 inline struct lpfc_cq_event * 13070 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) 13071 { 13072 struct lpfc_cq_event *cq_event; 13073 13074 /* Allocate a new internal CQ_EVENT entry */ 13075 cq_event = lpfc_sli4_cq_event_alloc(phba); 13076 if (!cq_event) { 13077 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13078 "0602 Failed to alloc CQ_EVENT entry\n"); 13079 return NULL; 13080 } 13081 13082 /* Move the CQE into the event */ 13083 memcpy(&cq_event->cqe, entry, size); 13084 return cq_event; 13085 } 13086 13087 /** 13088 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 13089 * @phba: Pointer to HBA context object. 13090 * @cqe: Pointer to mailbox completion queue entry. 13091 * 13092 * This routine process a mailbox completion queue entry with asynchrous 13093 * event. 13094 * 13095 * Return: true if work posted to worker thread, otherwise false. 13096 **/ 13097 static bool 13098 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13099 { 13100 struct lpfc_cq_event *cq_event; 13101 unsigned long iflags; 13102 13103 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13104 "0392 Async Event: word0:x%x, word1:x%x, " 13105 "word2:x%x, word3:x%x\n", mcqe->word0, 13106 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 13107 13108 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); 13109 if (!cq_event) 13110 return false; 13111 spin_lock_irqsave(&phba->hbalock, iflags); 13112 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 13113 /* Set the async event flag */ 13114 phba->hba_flag |= ASYNC_EVENT; 13115 spin_unlock_irqrestore(&phba->hbalock, iflags); 13116 13117 return true; 13118 } 13119 13120 /** 13121 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 13122 * @phba: Pointer to HBA context object. 13123 * @cqe: Pointer to mailbox completion queue entry. 13124 * 13125 * This routine process a mailbox completion queue entry with mailbox 13126 * completion event. 13127 * 13128 * Return: true if work posted to worker thread, otherwise false. 13129 **/ 13130 static bool 13131 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 13132 { 13133 uint32_t mcqe_status; 13134 MAILBOX_t *mbox, *pmbox; 13135 struct lpfc_mqe *mqe; 13136 struct lpfc_vport *vport; 13137 struct lpfc_nodelist *ndlp; 13138 struct lpfc_dmabuf *mp; 13139 unsigned long iflags; 13140 LPFC_MBOXQ_t *pmb; 13141 bool workposted = false; 13142 int rc; 13143 13144 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 13145 if (!bf_get(lpfc_trailer_completed, mcqe)) 13146 goto out_no_mqe_complete; 13147 13148 /* Get the reference to the active mbox command */ 13149 spin_lock_irqsave(&phba->hbalock, iflags); 13150 pmb = phba->sli.mbox_active; 13151 if (unlikely(!pmb)) { 13152 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 13153 "1832 No pending MBOX command to handle\n"); 13154 spin_unlock_irqrestore(&phba->hbalock, iflags); 13155 goto out_no_mqe_complete; 13156 } 13157 spin_unlock_irqrestore(&phba->hbalock, iflags); 13158 mqe = &pmb->u.mqe; 13159 pmbox = (MAILBOX_t *)&pmb->u.mqe; 13160 mbox = phba->mbox; 13161 vport = pmb->vport; 13162 13163 /* Reset heartbeat timer */ 13164 phba->last_completion_time = jiffies; 13165 del_timer(&phba->sli.mbox_tmo); 13166 13167 /* Move mbox data to caller's mailbox region, do endian swapping */ 13168 if (pmb->mbox_cmpl && mbox) 13169 lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 13170 13171 /* 13172 * For mcqe errors, conditionally move a modified error code to 13173 * the mbox so that the error will not be missed. 13174 */ 13175 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 13176 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 13177 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 13178 bf_set(lpfc_mqe_status, mqe, 13179 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 13180 } 13181 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 13182 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 13183 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 13184 "MBOX dflt rpi: status:x%x rpi:x%x", 13185 mcqe_status, 13186 pmbox->un.varWords[0], 0); 13187 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 13188 mp = (struct lpfc_dmabuf *)(pmb->context1); 13189 ndlp = (struct lpfc_nodelist *)pmb->context2; 13190 /* Reg_LOGIN of dflt RPI was successful. Now lets get 13191 * RID of the PPI using the same mbox buffer. 13192 */ 13193 lpfc_unreg_login(phba, vport->vpi, 13194 pmbox->un.varWords[0], pmb); 13195 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 13196 pmb->context1 = mp; 13197 pmb->context2 = ndlp; 13198 pmb->vport = vport; 13199 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 13200 if (rc != MBX_BUSY) 13201 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 13202 LOG_SLI, "0385 rc should " 13203 "have been MBX_BUSY\n"); 13204 if (rc != MBX_NOT_FINISHED) 13205 goto send_current_mbox; 13206 } 13207 } 13208 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 13209 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 13210 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 13211 13212 /* There is mailbox completion work to do */ 13213 spin_lock_irqsave(&phba->hbalock, iflags); 13214 __lpfc_mbox_cmpl_put(phba, pmb); 13215 phba->work_ha |= HA_MBATT; 13216 spin_unlock_irqrestore(&phba->hbalock, iflags); 13217 workposted = true; 13218 13219 send_current_mbox: 13220 spin_lock_irqsave(&phba->hbalock, iflags); 13221 /* Release the mailbox command posting token */ 13222 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 13223 /* Setting active mailbox pointer need to be in sync to flag clear */ 13224 phba->sli.mbox_active = NULL; 13225 spin_unlock_irqrestore(&phba->hbalock, iflags); 13226 /* Wake up worker thread to post the next pending mailbox command */ 13227 lpfc_worker_wake_up(phba); 13228 out_no_mqe_complete: 13229 if (bf_get(lpfc_trailer_consumed, mcqe)) 13230 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 13231 return workposted; 13232 } 13233 13234 /** 13235 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 13236 * @phba: Pointer to HBA context object. 13237 * @cqe: Pointer to mailbox completion queue entry. 13238 * 13239 * This routine process a mailbox completion queue entry, it invokes the 13240 * proper mailbox complete handling or asynchrous event handling routine 13241 * according to the MCQE's async bit. 13242 * 13243 * Return: true if work posted to worker thread, otherwise false. 13244 **/ 13245 static bool 13246 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 13247 { 13248 struct lpfc_mcqe mcqe; 13249 bool workposted; 13250 13251 /* Copy the mailbox MCQE and convert endian order as needed */ 13252 lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 13253 13254 /* Invoke the proper event handling routine */ 13255 if (!bf_get(lpfc_trailer_async, &mcqe)) 13256 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 13257 else 13258 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 13259 return workposted; 13260 } 13261 13262 /** 13263 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 13264 * @phba: Pointer to HBA context object. 13265 * @cq: Pointer to associated CQ 13266 * @wcqe: Pointer to work-queue completion queue entry. 13267 * 13268 * This routine handles an ELS work-queue completion event. 13269 * 13270 * Return: true if work posted to worker thread, otherwise false. 13271 **/ 13272 static bool 13273 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13274 struct lpfc_wcqe_complete *wcqe) 13275 { 13276 struct lpfc_iocbq *irspiocbq; 13277 unsigned long iflags; 13278 struct lpfc_sli_ring *pring = cq->pring; 13279 int txq_cnt = 0; 13280 int txcmplq_cnt = 0; 13281 int fcp_txcmplq_cnt = 0; 13282 13283 /* Check for response status */ 13284 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13285 /* Log the error status */ 13286 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13287 "0357 ELS CQE error: status=x%x: " 13288 "CQE: %08x %08x %08x %08x\n", 13289 bf_get(lpfc_wcqe_c_status, wcqe), 13290 wcqe->word0, wcqe->total_data_placed, 13291 wcqe->parameter, wcqe->word3); 13292 } 13293 13294 /* Get an irspiocbq for later ELS response processing use */ 13295 irspiocbq = lpfc_sli_get_iocbq(phba); 13296 if (!irspiocbq) { 13297 if (!list_empty(&pring->txq)) 13298 txq_cnt++; 13299 if (!list_empty(&pring->txcmplq)) 13300 txcmplq_cnt++; 13301 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13302 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 13303 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 13304 txq_cnt, phba->iocb_cnt, 13305 fcp_txcmplq_cnt, 13306 txcmplq_cnt); 13307 return false; 13308 } 13309 13310 /* Save off the slow-path queue event for work thread to process */ 13311 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 13312 spin_lock_irqsave(&phba->hbalock, iflags); 13313 list_add_tail(&irspiocbq->cq_event.list, 13314 &phba->sli4_hba.sp_queue_event); 13315 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13316 spin_unlock_irqrestore(&phba->hbalock, iflags); 13317 13318 return true; 13319 } 13320 13321 /** 13322 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 13323 * @phba: Pointer to HBA context object. 13324 * @wcqe: Pointer to work-queue completion queue entry. 13325 * 13326 * This routine handles slow-path WQ entry consumed event by invoking the 13327 * proper WQ release routine to the slow-path WQ. 13328 **/ 13329 static void 13330 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 13331 struct lpfc_wcqe_release *wcqe) 13332 { 13333 /* sanity check on queue memory */ 13334 if (unlikely(!phba->sli4_hba.els_wq)) 13335 return; 13336 /* Check for the slow-path ELS work queue */ 13337 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 13338 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 13339 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13340 else 13341 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13342 "2579 Slow-path wqe consume event carries " 13343 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 13344 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 13345 phba->sli4_hba.els_wq->queue_id); 13346 } 13347 13348 /** 13349 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 13350 * @phba: Pointer to HBA context object. 13351 * @cq: Pointer to a WQ completion queue. 13352 * @wcqe: Pointer to work-queue completion queue entry. 13353 * 13354 * This routine handles an XRI abort event. 13355 * 13356 * Return: true if work posted to worker thread, otherwise false. 13357 **/ 13358 static bool 13359 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 13360 struct lpfc_queue *cq, 13361 struct sli4_wcqe_xri_aborted *wcqe) 13362 { 13363 bool workposted = false; 13364 struct lpfc_cq_event *cq_event; 13365 unsigned long iflags; 13366 13367 switch (cq->subtype) { 13368 case LPFC_FCP: 13369 cq_event = lpfc_cq_event_setup( 13370 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 13371 if (!cq_event) 13372 return false; 13373 spin_lock_irqsave(&phba->hbalock, iflags); 13374 list_add_tail(&cq_event->list, 13375 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 13376 /* Set the fcp xri abort event flag */ 13377 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 13378 spin_unlock_irqrestore(&phba->hbalock, iflags); 13379 workposted = true; 13380 break; 13381 case LPFC_NVME_LS: /* NVME LS uses ELS resources */ 13382 case LPFC_ELS: 13383 cq_event = lpfc_cq_event_setup( 13384 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 13385 if (!cq_event) 13386 return false; 13387 spin_lock_irqsave(&phba->hbalock, iflags); 13388 list_add_tail(&cq_event->list, 13389 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 13390 /* Set the els xri abort event flag */ 13391 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 13392 spin_unlock_irqrestore(&phba->hbalock, iflags); 13393 workposted = true; 13394 break; 13395 case LPFC_NVME: 13396 /* Notify aborted XRI for NVME work queue */ 13397 if (phba->nvmet_support) 13398 lpfc_sli4_nvmet_xri_aborted(phba, wcqe); 13399 else 13400 lpfc_sli4_nvme_xri_aborted(phba, wcqe); 13401 13402 workposted = false; 13403 break; 13404 default: 13405 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13406 "0603 Invalid CQ subtype %d: " 13407 "%08x %08x %08x %08x\n", 13408 cq->subtype, wcqe->word0, wcqe->parameter, 13409 wcqe->word2, wcqe->word3); 13410 workposted = false; 13411 break; 13412 } 13413 return workposted; 13414 } 13415 13416 /** 13417 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 13418 * @phba: Pointer to HBA context object. 13419 * @rcqe: Pointer to receive-queue completion queue entry. 13420 * 13421 * This routine process a receive-queue completion queue entry. 13422 * 13423 * Return: true if work posted to worker thread, otherwise false. 13424 **/ 13425 static bool 13426 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 13427 { 13428 bool workposted = false; 13429 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 13430 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 13431 struct lpfc_nvmet_tgtport *tgtp; 13432 struct hbq_dmabuf *dma_buf; 13433 uint32_t status, rq_id; 13434 unsigned long iflags; 13435 13436 /* sanity check on queue memory */ 13437 if (unlikely(!hrq) || unlikely(!drq)) 13438 return workposted; 13439 13440 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13441 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13442 else 13443 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13444 if (rq_id != hrq->queue_id) 13445 goto out; 13446 13447 status = bf_get(lpfc_rcqe_status, rcqe); 13448 switch (status) { 13449 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13450 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13451 "2537 Receive Frame Truncated!!\n"); 13452 case FC_STATUS_RQ_SUCCESS: 13453 spin_lock_irqsave(&phba->hbalock, iflags); 13454 lpfc_sli4_rq_release(hrq, drq); 13455 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 13456 if (!dma_buf) { 13457 hrq->RQ_no_buf_found++; 13458 spin_unlock_irqrestore(&phba->hbalock, iflags); 13459 goto out; 13460 } 13461 hrq->RQ_rcv_buf++; 13462 hrq->RQ_buf_posted--; 13463 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 13464 13465 /* save off the frame for the word thread to process */ 13466 list_add_tail(&dma_buf->cq_event.list, 13467 &phba->sli4_hba.sp_queue_event); 13468 /* Frame received */ 13469 phba->hba_flag |= HBA_SP_QUEUE_EVT; 13470 spin_unlock_irqrestore(&phba->hbalock, iflags); 13471 workposted = true; 13472 break; 13473 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13474 if (phba->nvmet_support) { 13475 tgtp = phba->targetport->private; 13476 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13477 "6402 RQE Error x%x, posted %d err_cnt " 13478 "%d: %x %x %x\n", 13479 status, hrq->RQ_buf_posted, 13480 hrq->RQ_no_posted_buf, 13481 atomic_read(&tgtp->rcv_fcp_cmd_in), 13482 atomic_read(&tgtp->rcv_fcp_cmd_out), 13483 atomic_read(&tgtp->xmt_fcp_release)); 13484 } 13485 /* fallthrough */ 13486 13487 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13488 hrq->RQ_no_posted_buf++; 13489 /* Post more buffers if possible */ 13490 spin_lock_irqsave(&phba->hbalock, iflags); 13491 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 13492 spin_unlock_irqrestore(&phba->hbalock, iflags); 13493 workposted = true; 13494 break; 13495 } 13496 out: 13497 return workposted; 13498 } 13499 13500 /** 13501 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 13502 * @phba: Pointer to HBA context object. 13503 * @cq: Pointer to the completion queue. 13504 * @wcqe: Pointer to a completion queue entry. 13505 * 13506 * This routine process a slow-path work-queue or receive queue completion queue 13507 * entry. 13508 * 13509 * Return: true if work posted to worker thread, otherwise false. 13510 **/ 13511 static bool 13512 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13513 struct lpfc_cqe *cqe) 13514 { 13515 struct lpfc_cqe cqevt; 13516 bool workposted = false; 13517 13518 /* Copy the work queue CQE and convert endian order if needed */ 13519 lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 13520 13521 /* Check and process for different type of WCQE and dispatch */ 13522 switch (bf_get(lpfc_cqe_code, &cqevt)) { 13523 case CQE_CODE_COMPL_WQE: 13524 /* Process the WQ/RQ complete event */ 13525 phba->last_completion_time = jiffies; 13526 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 13527 (struct lpfc_wcqe_complete *)&cqevt); 13528 break; 13529 case CQE_CODE_RELEASE_WQE: 13530 /* Process the WQ release event */ 13531 lpfc_sli4_sp_handle_rel_wcqe(phba, 13532 (struct lpfc_wcqe_release *)&cqevt); 13533 break; 13534 case CQE_CODE_XRI_ABORTED: 13535 /* Process the WQ XRI abort event */ 13536 phba->last_completion_time = jiffies; 13537 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13538 (struct sli4_wcqe_xri_aborted *)&cqevt); 13539 break; 13540 case CQE_CODE_RECEIVE: 13541 case CQE_CODE_RECEIVE_V1: 13542 /* Process the RQ event */ 13543 phba->last_completion_time = jiffies; 13544 workposted = lpfc_sli4_sp_handle_rcqe(phba, 13545 (struct lpfc_rcqe *)&cqevt); 13546 break; 13547 default: 13548 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13549 "0388 Not a valid WCQE code: x%x\n", 13550 bf_get(lpfc_cqe_code, &cqevt)); 13551 break; 13552 } 13553 return workposted; 13554 } 13555 13556 /** 13557 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 13558 * @phba: Pointer to HBA context object. 13559 * @eqe: Pointer to fast-path event queue entry. 13560 * 13561 * This routine process a event queue entry from the slow-path event queue. 13562 * It will check the MajorCode and MinorCode to determine this is for a 13563 * completion event on a completion queue, if not, an error shall be logged 13564 * and just return. Otherwise, it will get to the corresponding completion 13565 * queue and process all the entries on that completion queue, rearm the 13566 * completion queue, and then return. 13567 * 13568 **/ 13569 static void 13570 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13571 struct lpfc_queue *speq) 13572 { 13573 struct lpfc_queue *cq = NULL, *childq; 13574 uint16_t cqid; 13575 13576 /* Get the reference to the corresponding CQ */ 13577 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13578 13579 list_for_each_entry(childq, &speq->child_list, list) { 13580 if (childq->queue_id == cqid) { 13581 cq = childq; 13582 break; 13583 } 13584 } 13585 if (unlikely(!cq)) { 13586 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13587 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13588 "0365 Slow-path CQ identifier " 13589 "(%d) does not exist\n", cqid); 13590 return; 13591 } 13592 13593 /* Save EQ associated with this CQ */ 13594 cq->assoc_qp = speq; 13595 13596 if (!queue_work(phba->wq, &cq->spwork)) 13597 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13598 "0390 Cannot schedule soft IRQ " 13599 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13600 cqid, cq->queue_id, smp_processor_id()); 13601 } 13602 13603 /** 13604 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry 13605 * @phba: Pointer to HBA context object. 13606 * 13607 * This routine process a event queue entry from the slow-path event queue. 13608 * It will check the MajorCode and MinorCode to determine this is for a 13609 * completion event on a completion queue, if not, an error shall be logged 13610 * and just return. Otherwise, it will get to the corresponding completion 13611 * queue and process all the entries on that completion queue, rearm the 13612 * completion queue, and then return. 13613 * 13614 **/ 13615 static void 13616 lpfc_sli4_sp_process_cq(struct work_struct *work) 13617 { 13618 struct lpfc_queue *cq = 13619 container_of(work, struct lpfc_queue, spwork); 13620 struct lpfc_hba *phba = cq->phba; 13621 struct lpfc_cqe *cqe; 13622 bool workposted = false; 13623 int ccount = 0; 13624 13625 /* Process all the entries to the CQ */ 13626 switch (cq->type) { 13627 case LPFC_MCQ: 13628 while ((cqe = lpfc_sli4_cq_get(cq))) { 13629 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 13630 if (!(++ccount % cq->entry_repost)) 13631 break; 13632 cq->CQ_mbox++; 13633 } 13634 break; 13635 case LPFC_WCQ: 13636 while ((cqe = lpfc_sli4_cq_get(cq))) { 13637 if (cq->subtype == LPFC_FCP || 13638 cq->subtype == LPFC_NVME) { 13639 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13640 if (phba->ktime_on) 13641 cq->isr_timestamp = ktime_get_ns(); 13642 else 13643 cq->isr_timestamp = 0; 13644 #endif 13645 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, 13646 cqe); 13647 } else { 13648 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 13649 cqe); 13650 } 13651 if (!(++ccount % cq->entry_repost)) 13652 break; 13653 } 13654 13655 /* Track the max number of CQEs processed in 1 EQ */ 13656 if (ccount > cq->CQ_max_cqe) 13657 cq->CQ_max_cqe = ccount; 13658 break; 13659 default: 13660 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13661 "0370 Invalid completion queue type (%d)\n", 13662 cq->type); 13663 return; 13664 } 13665 13666 /* Catch the no cq entry condition, log an error */ 13667 if (unlikely(ccount == 0)) 13668 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13669 "0371 No entry from the CQ: identifier " 13670 "(x%x), type (%d)\n", cq->queue_id, cq->type); 13671 13672 /* In any case, flash and re-arm the RCQ */ 13673 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM); 13674 13675 /* wake up worker thread if there are works to be done */ 13676 if (workposted) 13677 lpfc_worker_wake_up(phba); 13678 } 13679 13680 /** 13681 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 13682 * @phba: Pointer to HBA context object. 13683 * @cq: Pointer to associated CQ 13684 * @wcqe: Pointer to work-queue completion queue entry. 13685 * 13686 * This routine process a fast-path work queue completion entry from fast-path 13687 * event queue for FCP command response completion. 13688 **/ 13689 static void 13690 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13691 struct lpfc_wcqe_complete *wcqe) 13692 { 13693 struct lpfc_sli_ring *pring = cq->pring; 13694 struct lpfc_iocbq *cmdiocbq; 13695 struct lpfc_iocbq irspiocbq; 13696 unsigned long iflags; 13697 13698 /* Check for response status */ 13699 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13700 /* If resource errors reported from HBA, reduce queue 13701 * depth of the SCSI device. 13702 */ 13703 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 13704 IOSTAT_LOCAL_REJECT)) && 13705 ((wcqe->parameter & IOERR_PARAM_MASK) == 13706 IOERR_NO_RESOURCES)) 13707 phba->lpfc_rampdown_queue_depth(phba); 13708 13709 /* Log the error status */ 13710 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 13711 "0373 FCP CQE error: status=x%x: " 13712 "CQE: %08x %08x %08x %08x\n", 13713 bf_get(lpfc_wcqe_c_status, wcqe), 13714 wcqe->word0, wcqe->total_data_placed, 13715 wcqe->parameter, wcqe->word3); 13716 } 13717 13718 /* Look up the FCP command IOCB and create pseudo response IOCB */ 13719 spin_lock_irqsave(&pring->ring_lock, iflags); 13720 pring->stats.iocb_event++; 13721 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13722 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13723 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13724 if (unlikely(!cmdiocbq)) { 13725 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13726 "0374 FCP complete with no corresponding " 13727 "cmdiocb: iotag (%d)\n", 13728 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13729 return; 13730 } 13731 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13732 cmdiocbq->isr_timestamp = cq->isr_timestamp; 13733 #endif 13734 if (cmdiocbq->iocb_cmpl == NULL) { 13735 if (cmdiocbq->wqe_cmpl) { 13736 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13737 spin_lock_irqsave(&phba->hbalock, iflags); 13738 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13739 spin_unlock_irqrestore(&phba->hbalock, iflags); 13740 } 13741 13742 /* Pass the cmd_iocb and the wcqe to the upper layer */ 13743 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe); 13744 return; 13745 } 13746 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13747 "0375 FCP cmdiocb not callback function " 13748 "iotag: (%d)\n", 13749 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13750 return; 13751 } 13752 13753 /* Fake the irspiocb and copy necessary response information */ 13754 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 13755 13756 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13757 spin_lock_irqsave(&phba->hbalock, iflags); 13758 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13759 spin_unlock_irqrestore(&phba->hbalock, iflags); 13760 } 13761 13762 /* Pass the cmd_iocb and the rsp state to the upper layer */ 13763 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 13764 } 13765 13766 /** 13767 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 13768 * @phba: Pointer to HBA context object. 13769 * @cq: Pointer to completion queue. 13770 * @wcqe: Pointer to work-queue completion queue entry. 13771 * 13772 * This routine handles an fast-path WQ entry consumed event by invoking the 13773 * proper WQ release routine to the slow-path WQ. 13774 **/ 13775 static void 13776 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13777 struct lpfc_wcqe_release *wcqe) 13778 { 13779 struct lpfc_queue *childwq; 13780 bool wqid_matched = false; 13781 uint16_t hba_wqid; 13782 13783 /* Check for fast-path FCP work queue release */ 13784 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 13785 list_for_each_entry(childwq, &cq->child_list, list) { 13786 if (childwq->queue_id == hba_wqid) { 13787 lpfc_sli4_wq_release(childwq, 13788 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13789 if (childwq->q_flag & HBA_NVMET_WQFULL) 13790 lpfc_nvmet_wqfull_process(phba, childwq); 13791 wqid_matched = true; 13792 break; 13793 } 13794 } 13795 /* Report warning log message if no match found */ 13796 if (wqid_matched != true) 13797 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13798 "2580 Fast-path wqe consume event carries " 13799 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); 13800 } 13801 13802 /** 13803 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry 13804 * @phba: Pointer to HBA context object. 13805 * @rcqe: Pointer to receive-queue completion queue entry. 13806 * 13807 * This routine process a receive-queue completion queue entry. 13808 * 13809 * Return: true if work posted to worker thread, otherwise false. 13810 **/ 13811 static bool 13812 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13813 struct lpfc_rcqe *rcqe) 13814 { 13815 bool workposted = false; 13816 struct lpfc_queue *hrq; 13817 struct lpfc_queue *drq; 13818 struct rqb_dmabuf *dma_buf; 13819 struct fc_frame_header *fc_hdr; 13820 struct lpfc_nvmet_tgtport *tgtp; 13821 uint32_t status, rq_id; 13822 unsigned long iflags; 13823 uint32_t fctl, idx; 13824 13825 if ((phba->nvmet_support == 0) || 13826 (phba->sli4_hba.nvmet_cqset == NULL)) 13827 return workposted; 13828 13829 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; 13830 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; 13831 drq = phba->sli4_hba.nvmet_mrq_data[idx]; 13832 13833 /* sanity check on queue memory */ 13834 if (unlikely(!hrq) || unlikely(!drq)) 13835 return workposted; 13836 13837 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13838 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13839 else 13840 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13841 13842 if ((phba->nvmet_support == 0) || 13843 (rq_id != hrq->queue_id)) 13844 return workposted; 13845 13846 status = bf_get(lpfc_rcqe_status, rcqe); 13847 switch (status) { 13848 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13849 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13850 "6126 Receive Frame Truncated!!\n"); 13851 /* Drop thru */ 13852 case FC_STATUS_RQ_SUCCESS: 13853 spin_lock_irqsave(&phba->hbalock, iflags); 13854 lpfc_sli4_rq_release(hrq, drq); 13855 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 13856 if (!dma_buf) { 13857 hrq->RQ_no_buf_found++; 13858 spin_unlock_irqrestore(&phba->hbalock, iflags); 13859 goto out; 13860 } 13861 spin_unlock_irqrestore(&phba->hbalock, iflags); 13862 hrq->RQ_rcv_buf++; 13863 hrq->RQ_buf_posted--; 13864 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13865 13866 /* Just some basic sanity checks on FCP Command frame */ 13867 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 13868 fc_hdr->fh_f_ctl[1] << 8 | 13869 fc_hdr->fh_f_ctl[2]); 13870 if (((fctl & 13871 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != 13872 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || 13873 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ 13874 goto drop; 13875 13876 if (fc_hdr->fh_type == FC_TYPE_FCP) { 13877 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 13878 lpfc_nvmet_unsol_fcp_event( 13879 phba, idx, dma_buf, 13880 cq->isr_timestamp); 13881 return false; 13882 } 13883 drop: 13884 lpfc_in_buf_free(phba, &dma_buf->dbuf); 13885 break; 13886 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13887 if (phba->nvmet_support) { 13888 tgtp = phba->targetport->private; 13889 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13890 "6401 RQE Error x%x, posted %d err_cnt " 13891 "%d: %x %x %x\n", 13892 status, hrq->RQ_buf_posted, 13893 hrq->RQ_no_posted_buf, 13894 atomic_read(&tgtp->rcv_fcp_cmd_in), 13895 atomic_read(&tgtp->rcv_fcp_cmd_out), 13896 atomic_read(&tgtp->xmt_fcp_release)); 13897 } 13898 /* fallthrough */ 13899 13900 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13901 hrq->RQ_no_posted_buf++; 13902 /* Post more buffers if possible */ 13903 break; 13904 } 13905 out: 13906 return workposted; 13907 } 13908 13909 /** 13910 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry 13911 * @cq: Pointer to the completion queue. 13912 * @eqe: Pointer to fast-path completion queue entry. 13913 * 13914 * This routine process a fast-path work queue completion entry from fast-path 13915 * event queue for FCP command response completion. 13916 **/ 13917 static int 13918 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13919 struct lpfc_cqe *cqe) 13920 { 13921 struct lpfc_wcqe_release wcqe; 13922 bool workposted = false; 13923 13924 /* Copy the work queue CQE and convert endian order if needed */ 13925 lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 13926 13927 /* Check and process for different type of WCQE and dispatch */ 13928 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 13929 case CQE_CODE_COMPL_WQE: 13930 case CQE_CODE_NVME_ERSP: 13931 cq->CQ_wq++; 13932 /* Process the WQ complete event */ 13933 phba->last_completion_time = jiffies; 13934 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME)) 13935 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 13936 (struct lpfc_wcqe_complete *)&wcqe); 13937 if (cq->subtype == LPFC_NVME_LS) 13938 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 13939 (struct lpfc_wcqe_complete *)&wcqe); 13940 break; 13941 case CQE_CODE_RELEASE_WQE: 13942 cq->CQ_release_wqe++; 13943 /* Process the WQ release event */ 13944 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 13945 (struct lpfc_wcqe_release *)&wcqe); 13946 break; 13947 case CQE_CODE_XRI_ABORTED: 13948 cq->CQ_xri_aborted++; 13949 /* Process the WQ XRI abort event */ 13950 phba->last_completion_time = jiffies; 13951 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13952 (struct sli4_wcqe_xri_aborted *)&wcqe); 13953 break; 13954 case CQE_CODE_RECEIVE_V1: 13955 case CQE_CODE_RECEIVE: 13956 phba->last_completion_time = jiffies; 13957 if (cq->subtype == LPFC_NVMET) { 13958 workposted = lpfc_sli4_nvmet_handle_rcqe( 13959 phba, cq, (struct lpfc_rcqe *)&wcqe); 13960 } 13961 break; 13962 default: 13963 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13964 "0144 Not a valid CQE code: x%x\n", 13965 bf_get(lpfc_wcqe_c_code, &wcqe)); 13966 break; 13967 } 13968 return workposted; 13969 } 13970 13971 /** 13972 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 13973 * @phba: Pointer to HBA context object. 13974 * @eqe: Pointer to fast-path event queue entry. 13975 * 13976 * This routine process a event queue entry from the fast-path event queue. 13977 * It will check the MajorCode and MinorCode to determine this is for a 13978 * completion event on a completion queue, if not, an error shall be logged 13979 * and just return. Otherwise, it will get to the corresponding completion 13980 * queue and process all the entries on the completion queue, rearm the 13981 * completion queue, and then return. 13982 **/ 13983 static void 13984 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13985 uint32_t qidx) 13986 { 13987 struct lpfc_queue *cq = NULL; 13988 uint16_t cqid, id; 13989 13990 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 13991 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13992 "0366 Not a valid completion " 13993 "event: majorcode=x%x, minorcode=x%x\n", 13994 bf_get_le32(lpfc_eqe_major_code, eqe), 13995 bf_get_le32(lpfc_eqe_minor_code, eqe)); 13996 return; 13997 } 13998 13999 /* Get the reference to the corresponding CQ */ 14000 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 14001 14002 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { 14003 id = phba->sli4_hba.nvmet_cqset[0]->queue_id; 14004 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { 14005 /* Process NVMET unsol rcv */ 14006 cq = phba->sli4_hba.nvmet_cqset[cqid - id]; 14007 goto process_cq; 14008 } 14009 } 14010 14011 if (phba->sli4_hba.nvme_cq_map && 14012 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) { 14013 /* Process NVME / NVMET command completion */ 14014 cq = phba->sli4_hba.nvme_cq[qidx]; 14015 goto process_cq; 14016 } 14017 14018 if (phba->sli4_hba.fcp_cq_map && 14019 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) { 14020 /* Process FCP command completion */ 14021 cq = phba->sli4_hba.fcp_cq[qidx]; 14022 goto process_cq; 14023 } 14024 14025 if (phba->sli4_hba.nvmels_cq && 14026 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { 14027 /* Process NVME unsol rcv */ 14028 cq = phba->sli4_hba.nvmels_cq; 14029 } 14030 14031 /* Otherwise this is a Slow path event */ 14032 if (cq == NULL) { 14033 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]); 14034 return; 14035 } 14036 14037 process_cq: 14038 if (unlikely(cqid != cq->queue_id)) { 14039 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14040 "0368 Miss-matched fast-path completion " 14041 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 14042 cqid, cq->queue_id); 14043 return; 14044 } 14045 14046 /* Save EQ associated with this CQ */ 14047 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx]; 14048 14049 if (!queue_work(phba->wq, &cq->irqwork)) 14050 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14051 "0363 Cannot schedule soft IRQ " 14052 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 14053 cqid, cq->queue_id, smp_processor_id()); 14054 } 14055 14056 /** 14057 * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry 14058 * @phba: Pointer to HBA context object. 14059 * @eqe: Pointer to fast-path event queue entry. 14060 * 14061 * This routine process a event queue entry from the fast-path event queue. 14062 * It will check the MajorCode and MinorCode to determine this is for a 14063 * completion event on a completion queue, if not, an error shall be logged 14064 * and just return. Otherwise, it will get to the corresponding completion 14065 * queue and process all the entries on the completion queue, rearm the 14066 * completion queue, and then return. 14067 **/ 14068 static void 14069 lpfc_sli4_hba_process_cq(struct work_struct *work) 14070 { 14071 struct lpfc_queue *cq = 14072 container_of(work, struct lpfc_queue, irqwork); 14073 struct lpfc_hba *phba = cq->phba; 14074 struct lpfc_cqe *cqe; 14075 bool workposted = false; 14076 int ccount = 0; 14077 14078 /* Process all the entries to the CQ */ 14079 while ((cqe = lpfc_sli4_cq_get(cq))) { 14080 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 14081 if (phba->ktime_on) 14082 cq->isr_timestamp = ktime_get_ns(); 14083 else 14084 cq->isr_timestamp = 0; 14085 #endif 14086 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); 14087 if (!(++ccount % cq->entry_repost)) 14088 break; 14089 } 14090 14091 /* Track the max number of CQEs processed in 1 EQ */ 14092 if (ccount > cq->CQ_max_cqe) 14093 cq->CQ_max_cqe = ccount; 14094 cq->assoc_qp->EQ_cqe_cnt += ccount; 14095 14096 /* Catch the no cq entry condition */ 14097 if (unlikely(ccount == 0)) 14098 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14099 "0369 No entry from fast-path completion " 14100 "queue fcpcqid=%d\n", cq->queue_id); 14101 14102 /* In any case, flash and re-arm the CQ */ 14103 phba->sli4_hba.sli4_cq_release(cq, LPFC_QUEUE_REARM); 14104 14105 /* wake up worker thread if there are works to be done */ 14106 if (workposted) 14107 lpfc_worker_wake_up(phba); 14108 } 14109 14110 static void 14111 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 14112 { 14113 struct lpfc_eqe *eqe; 14114 14115 /* walk all the EQ entries and drop on the floor */ 14116 while ((eqe = lpfc_sli4_eq_get(eq))) 14117 ; 14118 14119 /* Clear and re-arm the EQ */ 14120 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM); 14121 } 14122 14123 14124 /** 14125 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue 14126 * entry 14127 * @phba: Pointer to HBA context object. 14128 * @eqe: Pointer to fast-path event queue entry. 14129 * 14130 * This routine process a event queue entry from the Flash Optimized Fabric 14131 * event queue. It will check the MajorCode and MinorCode to determine this 14132 * is for a completion event on a completion queue, if not, an error shall be 14133 * logged and just return. Otherwise, it will get to the corresponding 14134 * completion queue and process all the entries on the completion queue, rearm 14135 * the completion queue, and then return. 14136 **/ 14137 static void 14138 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 14139 { 14140 struct lpfc_queue *cq; 14141 uint16_t cqid; 14142 14143 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 14144 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14145 "9147 Not a valid completion " 14146 "event: majorcode=x%x, minorcode=x%x\n", 14147 bf_get_le32(lpfc_eqe_major_code, eqe), 14148 bf_get_le32(lpfc_eqe_minor_code, eqe)); 14149 return; 14150 } 14151 14152 /* Get the reference to the corresponding CQ */ 14153 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 14154 14155 /* Next check for OAS */ 14156 cq = phba->sli4_hba.oas_cq; 14157 if (unlikely(!cq)) { 14158 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 14159 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14160 "9148 OAS completion queue " 14161 "does not exist\n"); 14162 return; 14163 } 14164 14165 if (unlikely(cqid != cq->queue_id)) { 14166 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14167 "9149 Miss-matched fast-path compl " 14168 "queue id: eqcqid=%d, fcpcqid=%d\n", 14169 cqid, cq->queue_id); 14170 return; 14171 } 14172 14173 /* Save EQ associated with this CQ */ 14174 cq->assoc_qp = phba->sli4_hba.fof_eq; 14175 14176 /* CQ work will be processed on CPU affinitized to this IRQ */ 14177 if (!queue_work(phba->wq, &cq->irqwork)) 14178 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14179 "0367 Cannot schedule soft IRQ " 14180 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 14181 cqid, cq->queue_id, smp_processor_id()); 14182 } 14183 14184 /** 14185 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device 14186 * @irq: Interrupt number. 14187 * @dev_id: The device context pointer. 14188 * 14189 * This function is directly called from the PCI layer as an interrupt 14190 * service routine when device with SLI-4 interface spec is enabled with 14191 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric 14192 * IOCB ring event in the HBA. However, when the device is enabled with either 14193 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 14194 * device-level interrupt handler. When the PCI slot is in error recovery 14195 * or the HBA is undergoing initialization, the interrupt handler will not 14196 * process the interrupt. The Flash Optimized Fabric ring event are handled in 14197 * the intrrupt context. This function is called without any lock held. 14198 * It gets the hbalock to access and update SLI data structures. Note that, 14199 * the EQ to CQ are one-to-one map such that the EQ index is 14200 * equal to that of CQ index. 14201 * 14202 * This function returns IRQ_HANDLED when interrupt is handled else it 14203 * returns IRQ_NONE. 14204 **/ 14205 irqreturn_t 14206 lpfc_sli4_fof_intr_handler(int irq, void *dev_id) 14207 { 14208 struct lpfc_hba *phba; 14209 struct lpfc_hba_eq_hdl *hba_eq_hdl; 14210 struct lpfc_queue *eq; 14211 struct lpfc_eqe *eqe; 14212 unsigned long iflag; 14213 int ecount = 0; 14214 14215 /* Get the driver's phba structure from the dev_id */ 14216 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 14217 phba = hba_eq_hdl->phba; 14218 14219 if (unlikely(!phba)) 14220 return IRQ_NONE; 14221 14222 /* Get to the EQ struct associated with this vector */ 14223 eq = phba->sli4_hba.fof_eq; 14224 if (unlikely(!eq)) 14225 return IRQ_NONE; 14226 14227 /* Check device state for handling interrupt */ 14228 if (unlikely(lpfc_intr_state_check(phba))) { 14229 /* Check again for link_state with lock held */ 14230 spin_lock_irqsave(&phba->hbalock, iflag); 14231 if (phba->link_state < LPFC_LINK_DOWN) 14232 /* Flush, clear interrupt, and rearm the EQ */ 14233 lpfc_sli4_eq_flush(phba, eq); 14234 spin_unlock_irqrestore(&phba->hbalock, iflag); 14235 return IRQ_NONE; 14236 } 14237 14238 /* 14239 * Process all the event on FCP fast-path EQ 14240 */ 14241 while ((eqe = lpfc_sli4_eq_get(eq))) { 14242 lpfc_sli4_fof_handle_eqe(phba, eqe); 14243 if (!(++ecount % eq->entry_repost)) 14244 break; 14245 eq->EQ_processed++; 14246 } 14247 14248 /* Track the max number of EQEs processed in 1 intr */ 14249 if (ecount > eq->EQ_max_eqe) 14250 eq->EQ_max_eqe = ecount; 14251 14252 14253 if (unlikely(ecount == 0)) { 14254 eq->EQ_no_entry++; 14255 14256 if (phba->intr_type == MSIX) 14257 /* MSI-X treated interrupt served as no EQ share INT */ 14258 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14259 "9145 MSI-X interrupt with no EQE\n"); 14260 else { 14261 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14262 "9146 ISR interrupt with no EQE\n"); 14263 /* Non MSI-X treated on interrupt as EQ share INT */ 14264 return IRQ_NONE; 14265 } 14266 } 14267 /* Always clear and re-arm the fast-path EQ */ 14268 phba->sli4_hba.sli4_eq_release(eq, LPFC_QUEUE_REARM); 14269 return IRQ_HANDLED; 14270 } 14271 14272 /** 14273 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 14274 * @irq: Interrupt number. 14275 * @dev_id: The device context pointer. 14276 * 14277 * This function is directly called from the PCI layer as an interrupt 14278 * service routine when device with SLI-4 interface spec is enabled with 14279 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 14280 * ring event in the HBA. However, when the device is enabled with either 14281 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 14282 * device-level interrupt handler. When the PCI slot is in error recovery 14283 * or the HBA is undergoing initialization, the interrupt handler will not 14284 * process the interrupt. The SCSI FCP fast-path ring event are handled in 14285 * the intrrupt context. This function is called without any lock held. 14286 * It gets the hbalock to access and update SLI data structures. Note that, 14287 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 14288 * equal to that of FCP CQ index. 14289 * 14290 * The link attention and ELS ring attention events are handled 14291 * by the worker thread. The interrupt handler signals the worker thread 14292 * and returns for these events. This function is called without any lock 14293 * held. It gets the hbalock to access and update SLI data structures. 14294 * 14295 * This function returns IRQ_HANDLED when interrupt is handled else it 14296 * returns IRQ_NONE. 14297 **/ 14298 irqreturn_t 14299 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 14300 { 14301 struct lpfc_hba *phba; 14302 struct lpfc_hba_eq_hdl *hba_eq_hdl; 14303 struct lpfc_queue *fpeq; 14304 struct lpfc_eqe *eqe; 14305 unsigned long iflag; 14306 int ecount = 0; 14307 int hba_eqidx; 14308 14309 /* Get the driver's phba structure from the dev_id */ 14310 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 14311 phba = hba_eq_hdl->phba; 14312 hba_eqidx = hba_eq_hdl->idx; 14313 14314 if (unlikely(!phba)) 14315 return IRQ_NONE; 14316 if (unlikely(!phba->sli4_hba.hba_eq)) 14317 return IRQ_NONE; 14318 14319 /* Get to the EQ struct associated with this vector */ 14320 fpeq = phba->sli4_hba.hba_eq[hba_eqidx]; 14321 if (unlikely(!fpeq)) 14322 return IRQ_NONE; 14323 14324 if (lpfc_fcp_look_ahead) { 14325 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) 14326 phba->sli4_hba.sli4_eq_clr_intr(fpeq); 14327 else { 14328 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 14329 return IRQ_NONE; 14330 } 14331 } 14332 14333 /* Check device state for handling interrupt */ 14334 if (unlikely(lpfc_intr_state_check(phba))) { 14335 /* Check again for link_state with lock held */ 14336 spin_lock_irqsave(&phba->hbalock, iflag); 14337 if (phba->link_state < LPFC_LINK_DOWN) 14338 /* Flush, clear interrupt, and rearm the EQ */ 14339 lpfc_sli4_eq_flush(phba, fpeq); 14340 spin_unlock_irqrestore(&phba->hbalock, iflag); 14341 if (lpfc_fcp_look_ahead) 14342 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 14343 return IRQ_NONE; 14344 } 14345 14346 /* 14347 * Process all the event on FCP fast-path EQ 14348 */ 14349 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 14350 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); 14351 if (!(++ecount % fpeq->entry_repost)) 14352 break; 14353 fpeq->EQ_processed++; 14354 } 14355 14356 /* Track the max number of EQEs processed in 1 intr */ 14357 if (ecount > fpeq->EQ_max_eqe) 14358 fpeq->EQ_max_eqe = ecount; 14359 14360 /* Always clear and re-arm the fast-path EQ */ 14361 phba->sli4_hba.sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 14362 14363 if (unlikely(ecount == 0)) { 14364 fpeq->EQ_no_entry++; 14365 14366 if (lpfc_fcp_look_ahead) { 14367 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 14368 return IRQ_NONE; 14369 } 14370 14371 if (phba->intr_type == MSIX) 14372 /* MSI-X treated interrupt served as no EQ share INT */ 14373 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14374 "0358 MSI-X interrupt with no EQE\n"); 14375 else 14376 /* Non MSI-X treated on interrupt as EQ share INT */ 14377 return IRQ_NONE; 14378 } 14379 14380 if (lpfc_fcp_look_ahead) 14381 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 14382 14383 return IRQ_HANDLED; 14384 } /* lpfc_sli4_fp_intr_handler */ 14385 14386 /** 14387 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 14388 * @irq: Interrupt number. 14389 * @dev_id: The device context pointer. 14390 * 14391 * This function is the device-level interrupt handler to device with SLI-4 14392 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 14393 * interrupt mode is enabled and there is an event in the HBA which requires 14394 * driver attention. This function invokes the slow-path interrupt attention 14395 * handling function and fast-path interrupt attention handling function in 14396 * turn to process the relevant HBA attention events. This function is called 14397 * without any lock held. It gets the hbalock to access and update SLI data 14398 * structures. 14399 * 14400 * This function returns IRQ_HANDLED when interrupt is handled, else it 14401 * returns IRQ_NONE. 14402 **/ 14403 irqreturn_t 14404 lpfc_sli4_intr_handler(int irq, void *dev_id) 14405 { 14406 struct lpfc_hba *phba; 14407 irqreturn_t hba_irq_rc; 14408 bool hba_handled = false; 14409 int qidx; 14410 14411 /* Get the driver's phba structure from the dev_id */ 14412 phba = (struct lpfc_hba *)dev_id; 14413 14414 if (unlikely(!phba)) 14415 return IRQ_NONE; 14416 14417 /* 14418 * Invoke fast-path host attention interrupt handling as appropriate. 14419 */ 14420 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) { 14421 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 14422 &phba->sli4_hba.hba_eq_hdl[qidx]); 14423 if (hba_irq_rc == IRQ_HANDLED) 14424 hba_handled |= true; 14425 } 14426 14427 if (phba->cfg_fof) { 14428 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq, 14429 &phba->sli4_hba.hba_eq_hdl[qidx]); 14430 if (hba_irq_rc == IRQ_HANDLED) 14431 hba_handled |= true; 14432 } 14433 14434 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 14435 } /* lpfc_sli4_intr_handler */ 14436 14437 /** 14438 * lpfc_sli4_queue_free - free a queue structure and associated memory 14439 * @queue: The queue structure to free. 14440 * 14441 * This function frees a queue structure and the DMAable memory used for 14442 * the host resident queue. This function must be called after destroying the 14443 * queue on the HBA. 14444 **/ 14445 void 14446 lpfc_sli4_queue_free(struct lpfc_queue *queue) 14447 { 14448 struct lpfc_dmabuf *dmabuf; 14449 14450 if (!queue) 14451 return; 14452 14453 while (!list_empty(&queue->page_list)) { 14454 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 14455 list); 14456 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size, 14457 dmabuf->virt, dmabuf->phys); 14458 kfree(dmabuf); 14459 } 14460 if (queue->rqbp) { 14461 lpfc_free_rq_buffer(queue->phba, queue); 14462 kfree(queue->rqbp); 14463 } 14464 14465 if (!list_empty(&queue->wq_list)) 14466 list_del(&queue->wq_list); 14467 14468 kfree(queue); 14469 return; 14470 } 14471 14472 /** 14473 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 14474 * @phba: The HBA that this queue is being created on. 14475 * @page_size: The size of a queue page 14476 * @entry_size: The size of each queue entry for this queue. 14477 * @entry count: The number of entries that this queue will handle. 14478 * 14479 * This function allocates a queue structure and the DMAable memory used for 14480 * the host resident queue. This function must be called before creating the 14481 * queue on the HBA. 14482 **/ 14483 struct lpfc_queue * 14484 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, 14485 uint32_t entry_size, uint32_t entry_count) 14486 { 14487 struct lpfc_queue *queue; 14488 struct lpfc_dmabuf *dmabuf; 14489 int x, total_qe_count; 14490 void *dma_pointer; 14491 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14492 14493 if (!phba->sli4_hba.pc_sli4_params.supported) 14494 hw_page_size = page_size; 14495 14496 queue = kzalloc(sizeof(struct lpfc_queue) + 14497 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 14498 if (!queue) 14499 return NULL; 14500 queue->page_count = (ALIGN(entry_size * entry_count, 14501 hw_page_size))/hw_page_size; 14502 14503 /* If needed, Adjust page count to match the max the adapter supports */ 14504 if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt) 14505 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt; 14506 14507 INIT_LIST_HEAD(&queue->list); 14508 INIT_LIST_HEAD(&queue->wq_list); 14509 INIT_LIST_HEAD(&queue->wqfull_list); 14510 INIT_LIST_HEAD(&queue->page_list); 14511 INIT_LIST_HEAD(&queue->child_list); 14512 14513 /* Set queue parameters now. If the system cannot provide memory 14514 * resources, the free routine needs to know what was allocated. 14515 */ 14516 queue->entry_size = entry_size; 14517 queue->entry_count = entry_count; 14518 queue->page_size = hw_page_size; 14519 queue->phba = phba; 14520 14521 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 14522 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 14523 if (!dmabuf) 14524 goto out_fail; 14525 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 14526 hw_page_size, &dmabuf->phys, 14527 GFP_KERNEL); 14528 if (!dmabuf->virt) { 14529 kfree(dmabuf); 14530 goto out_fail; 14531 } 14532 dmabuf->buffer_tag = x; 14533 list_add_tail(&dmabuf->list, &queue->page_list); 14534 /* initialize queue's entry array */ 14535 dma_pointer = dmabuf->virt; 14536 for (; total_qe_count < entry_count && 14537 dma_pointer < (hw_page_size + dmabuf->virt); 14538 total_qe_count++, dma_pointer += entry_size) { 14539 queue->qe[total_qe_count].address = dma_pointer; 14540 } 14541 } 14542 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); 14543 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); 14544 14545 /* entry_repost will be set during q creation */ 14546 14547 return queue; 14548 out_fail: 14549 lpfc_sli4_queue_free(queue); 14550 return NULL; 14551 } 14552 14553 /** 14554 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 14555 * @phba: HBA structure that indicates port to create a queue on. 14556 * @pci_barset: PCI BAR set flag. 14557 * 14558 * This function shall perform iomap of the specified PCI BAR address to host 14559 * memory address if not already done so and return it. The returned host 14560 * memory address can be NULL. 14561 */ 14562 static void __iomem * 14563 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 14564 { 14565 if (!phba->pcidev) 14566 return NULL; 14567 14568 switch (pci_barset) { 14569 case WQ_PCI_BAR_0_AND_1: 14570 return phba->pci_bar0_memmap_p; 14571 case WQ_PCI_BAR_2_AND_3: 14572 return phba->pci_bar2_memmap_p; 14573 case WQ_PCI_BAR_4_AND_5: 14574 return phba->pci_bar4_memmap_p; 14575 default: 14576 break; 14577 } 14578 return NULL; 14579 } 14580 14581 /** 14582 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs 14583 * @phba: HBA structure that indicates port to create a queue on. 14584 * @startq: The starting FCP EQ to modify 14585 * 14586 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. 14587 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be 14588 * updated in one mailbox command. 14589 * 14590 * The @phba struct is used to send mailbox command to HBA. The @startq 14591 * is used to get the starting FCP EQ to change. 14592 * This function is asynchronous and will wait for the mailbox 14593 * command to finish before continuing. 14594 * 14595 * On success this function will return a zero. If unable to allocate enough 14596 * memory this function will return -ENOMEM. If the queue create mailbox command 14597 * fails this function will return -ENXIO. 14598 **/ 14599 int 14600 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, 14601 uint32_t numq, uint32_t imax) 14602 { 14603 struct lpfc_mbx_modify_eq_delay *eq_delay; 14604 LPFC_MBOXQ_t *mbox; 14605 struct lpfc_queue *eq; 14606 int cnt, rc, length, status = 0; 14607 uint32_t shdr_status, shdr_add_status; 14608 uint32_t result, val; 14609 int qidx; 14610 union lpfc_sli4_cfg_shdr *shdr; 14611 uint16_t dmult; 14612 14613 if (startq >= phba->io_channel_irqs) 14614 return 0; 14615 14616 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14617 if (!mbox) 14618 return -ENOMEM; 14619 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 14620 sizeof(struct lpfc_sli4_cfg_mhdr)); 14621 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14622 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 14623 length, LPFC_SLI4_MBX_EMBED); 14624 eq_delay = &mbox->u.mqe.un.eq_delay; 14625 14626 /* Calculate delay multiper from maximum interrupt per second */ 14627 result = imax / phba->io_channel_irqs; 14628 if (result > LPFC_DMULT_CONST || result == 0) 14629 dmult = 0; 14630 else 14631 dmult = LPFC_DMULT_CONST/result - 1; 14632 if (dmult > LPFC_DMULT_MAX) 14633 dmult = LPFC_DMULT_MAX; 14634 14635 cnt = 0; 14636 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) { 14637 eq = phba->sli4_hba.hba_eq[qidx]; 14638 if (!eq) 14639 continue; 14640 eq->q_mode = imax; 14641 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 14642 eq_delay->u.request.eq[cnt].phase = 0; 14643 eq_delay->u.request.eq[cnt].delay_multi = dmult; 14644 cnt++; 14645 14646 /* q_mode is only used for auto_imax */ 14647 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { 14648 /* Use EQ Delay Register method for q_mode */ 14649 14650 /* Convert for EQ Delay register */ 14651 val = phba->cfg_fcp_imax; 14652 if (val) { 14653 /* First, interrupts per sec per EQ */ 14654 val = phba->cfg_fcp_imax / 14655 phba->io_channel_irqs; 14656 14657 /* us delay between each interrupt */ 14658 val = LPFC_SEC_TO_USEC / val; 14659 } 14660 eq->q_mode = val; 14661 } else { 14662 eq->q_mode = imax; 14663 } 14664 14665 if (cnt >= numq) 14666 break; 14667 } 14668 eq_delay->u.request.num_eq = cnt; 14669 14670 mbox->vport = phba->pport; 14671 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14672 mbox->context1 = NULL; 14673 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14674 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 14675 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14676 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14677 if (shdr_status || shdr_add_status || rc) { 14678 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14679 "2512 MODIFY_EQ_DELAY mailbox failed with " 14680 "status x%x add_status x%x, mbx status x%x\n", 14681 shdr_status, shdr_add_status, rc); 14682 status = -ENXIO; 14683 } 14684 mempool_free(mbox, phba->mbox_mem_pool); 14685 return status; 14686 } 14687 14688 /** 14689 * lpfc_eq_create - Create an Event Queue on the HBA 14690 * @phba: HBA structure that indicates port to create a queue on. 14691 * @eq: The queue structure to use to create the event queue. 14692 * @imax: The maximum interrupt per second limit. 14693 * 14694 * This function creates an event queue, as detailed in @eq, on a port, 14695 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 14696 * 14697 * The @phba struct is used to send mailbox command to HBA. The @eq struct 14698 * is used to get the entry count and entry size that are necessary to 14699 * determine the number of pages to allocate and use for this queue. This 14700 * function will send the EQ_CREATE mailbox command to the HBA to setup the 14701 * event queue. This function is asynchronous and will wait for the mailbox 14702 * command to finish before continuing. 14703 * 14704 * On success this function will return a zero. If unable to allocate enough 14705 * memory this function will return -ENOMEM. If the queue create mailbox command 14706 * fails this function will return -ENXIO. 14707 **/ 14708 int 14709 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 14710 { 14711 struct lpfc_mbx_eq_create *eq_create; 14712 LPFC_MBOXQ_t *mbox; 14713 int rc, length, status = 0; 14714 struct lpfc_dmabuf *dmabuf; 14715 uint32_t shdr_status, shdr_add_status; 14716 union lpfc_sli4_cfg_shdr *shdr; 14717 uint16_t dmult; 14718 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14719 14720 /* sanity check on queue memory */ 14721 if (!eq) 14722 return -ENODEV; 14723 if (!phba->sli4_hba.pc_sli4_params.supported) 14724 hw_page_size = SLI4_PAGE_SIZE; 14725 14726 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14727 if (!mbox) 14728 return -ENOMEM; 14729 length = (sizeof(struct lpfc_mbx_eq_create) - 14730 sizeof(struct lpfc_sli4_cfg_mhdr)); 14731 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14732 LPFC_MBOX_OPCODE_EQ_CREATE, 14733 length, LPFC_SLI4_MBX_EMBED); 14734 eq_create = &mbox->u.mqe.un.eq_create; 14735 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 14736 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 14737 eq->page_count); 14738 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 14739 LPFC_EQE_SIZE); 14740 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 14741 14742 /* Use version 2 of CREATE_EQ if eqav is set */ 14743 if (phba->sli4_hba.pc_sli4_params.eqav) { 14744 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14745 LPFC_Q_CREATE_VERSION_2); 14746 bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context, 14747 phba->sli4_hba.pc_sli4_params.eqav); 14748 } 14749 14750 /* don't setup delay multiplier using EQ_CREATE */ 14751 dmult = 0; 14752 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 14753 dmult); 14754 switch (eq->entry_count) { 14755 default: 14756 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14757 "0360 Unsupported EQ count. (%d)\n", 14758 eq->entry_count); 14759 if (eq->entry_count < 256) 14760 return -EINVAL; 14761 /* otherwise default to smallest count (drop through) */ 14762 case 256: 14763 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14764 LPFC_EQ_CNT_256); 14765 break; 14766 case 512: 14767 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14768 LPFC_EQ_CNT_512); 14769 break; 14770 case 1024: 14771 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14772 LPFC_EQ_CNT_1024); 14773 break; 14774 case 2048: 14775 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14776 LPFC_EQ_CNT_2048); 14777 break; 14778 case 4096: 14779 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14780 LPFC_EQ_CNT_4096); 14781 break; 14782 } 14783 list_for_each_entry(dmabuf, &eq->page_list, list) { 14784 memset(dmabuf->virt, 0, hw_page_size); 14785 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14786 putPaddrLow(dmabuf->phys); 14787 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14788 putPaddrHigh(dmabuf->phys); 14789 } 14790 mbox->vport = phba->pport; 14791 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14792 mbox->context1 = NULL; 14793 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14794 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14795 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14796 if (shdr_status || shdr_add_status || rc) { 14797 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14798 "2500 EQ_CREATE mailbox failed with " 14799 "status x%x add_status x%x, mbx status x%x\n", 14800 shdr_status, shdr_add_status, rc); 14801 status = -ENXIO; 14802 } 14803 eq->type = LPFC_EQ; 14804 eq->subtype = LPFC_NONE; 14805 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 14806 if (eq->queue_id == 0xFFFF) 14807 status = -ENXIO; 14808 eq->host_index = 0; 14809 eq->hba_index = 0; 14810 eq->entry_repost = LPFC_EQ_REPOST; 14811 14812 mempool_free(mbox, phba->mbox_mem_pool); 14813 return status; 14814 } 14815 14816 /** 14817 * lpfc_cq_create - Create a Completion Queue on the HBA 14818 * @phba: HBA structure that indicates port to create a queue on. 14819 * @cq: The queue structure to use to create the completion queue. 14820 * @eq: The event queue to bind this completion queue to. 14821 * 14822 * This function creates a completion queue, as detailed in @wq, on a port, 14823 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 14824 * 14825 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14826 * is used to get the entry count and entry size that are necessary to 14827 * determine the number of pages to allocate and use for this queue. The @eq 14828 * is used to indicate which event queue to bind this completion queue to. This 14829 * function will send the CQ_CREATE mailbox command to the HBA to setup the 14830 * completion queue. This function is asynchronous and will wait for the mailbox 14831 * command to finish before continuing. 14832 * 14833 * On success this function will return a zero. If unable to allocate enough 14834 * memory this function will return -ENOMEM. If the queue create mailbox command 14835 * fails this function will return -ENXIO. 14836 **/ 14837 int 14838 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 14839 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 14840 { 14841 struct lpfc_mbx_cq_create *cq_create; 14842 struct lpfc_dmabuf *dmabuf; 14843 LPFC_MBOXQ_t *mbox; 14844 int rc, length, status = 0; 14845 uint32_t shdr_status, shdr_add_status; 14846 union lpfc_sli4_cfg_shdr *shdr; 14847 14848 /* sanity check on queue memory */ 14849 if (!cq || !eq) 14850 return -ENODEV; 14851 14852 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14853 if (!mbox) 14854 return -ENOMEM; 14855 length = (sizeof(struct lpfc_mbx_cq_create) - 14856 sizeof(struct lpfc_sli4_cfg_mhdr)); 14857 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14858 LPFC_MBOX_OPCODE_CQ_CREATE, 14859 length, LPFC_SLI4_MBX_EMBED); 14860 cq_create = &mbox->u.mqe.un.cq_create; 14861 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 14862 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 14863 cq->page_count); 14864 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 14865 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 14866 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14867 phba->sli4_hba.pc_sli4_params.cqv); 14868 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 14869 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 14870 (cq->page_size / SLI4_PAGE_SIZE)); 14871 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 14872 eq->queue_id); 14873 bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context, 14874 phba->sli4_hba.pc_sli4_params.cqav); 14875 } else { 14876 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 14877 eq->queue_id); 14878 } 14879 switch (cq->entry_count) { 14880 case 2048: 14881 case 4096: 14882 if (phba->sli4_hba.pc_sli4_params.cqv == 14883 LPFC_Q_CREATE_VERSION_2) { 14884 cq_create->u.request.context.lpfc_cq_context_count = 14885 cq->entry_count; 14886 bf_set(lpfc_cq_context_count, 14887 &cq_create->u.request.context, 14888 LPFC_CQ_CNT_WORD7); 14889 break; 14890 } 14891 /* Fall Thru */ 14892 default: 14893 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14894 "0361 Unsupported CQ count: " 14895 "entry cnt %d sz %d pg cnt %d\n", 14896 cq->entry_count, cq->entry_size, 14897 cq->page_count); 14898 if (cq->entry_count < 256) { 14899 status = -EINVAL; 14900 goto out; 14901 } 14902 /* otherwise default to smallest count (drop through) */ 14903 case 256: 14904 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14905 LPFC_CQ_CNT_256); 14906 break; 14907 case 512: 14908 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14909 LPFC_CQ_CNT_512); 14910 break; 14911 case 1024: 14912 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14913 LPFC_CQ_CNT_1024); 14914 break; 14915 } 14916 list_for_each_entry(dmabuf, &cq->page_list, list) { 14917 memset(dmabuf->virt, 0, cq->page_size); 14918 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14919 putPaddrLow(dmabuf->phys); 14920 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14921 putPaddrHigh(dmabuf->phys); 14922 } 14923 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14924 14925 /* The IOCTL status is embedded in the mailbox subheader. */ 14926 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14927 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14928 if (shdr_status || shdr_add_status || rc) { 14929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14930 "2501 CQ_CREATE mailbox failed with " 14931 "status x%x add_status x%x, mbx status x%x\n", 14932 shdr_status, shdr_add_status, rc); 14933 status = -ENXIO; 14934 goto out; 14935 } 14936 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14937 if (cq->queue_id == 0xFFFF) { 14938 status = -ENXIO; 14939 goto out; 14940 } 14941 /* link the cq onto the parent eq child list */ 14942 list_add_tail(&cq->list, &eq->child_list); 14943 /* Set up completion queue's type and subtype */ 14944 cq->type = type; 14945 cq->subtype = subtype; 14946 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14947 cq->assoc_qid = eq->queue_id; 14948 cq->host_index = 0; 14949 cq->hba_index = 0; 14950 cq->entry_repost = LPFC_CQ_REPOST; 14951 14952 out: 14953 mempool_free(mbox, phba->mbox_mem_pool); 14954 return status; 14955 } 14956 14957 /** 14958 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ 14959 * @phba: HBA structure that indicates port to create a queue on. 14960 * @cqp: The queue structure array to use to create the completion queues. 14961 * @eqp: The event queue array to bind these completion queues to. 14962 * 14963 * This function creates a set of completion queue, s to support MRQ 14964 * as detailed in @cqp, on a port, 14965 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. 14966 * 14967 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14968 * is used to get the entry count and entry size that are necessary to 14969 * determine the number of pages to allocate and use for this queue. The @eq 14970 * is used to indicate which event queue to bind this completion queue to. This 14971 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the 14972 * completion queue. This function is asynchronous and will wait for the mailbox 14973 * command to finish before continuing. 14974 * 14975 * On success this function will return a zero. If unable to allocate enough 14976 * memory this function will return -ENOMEM. If the queue create mailbox command 14977 * fails this function will return -ENXIO. 14978 **/ 14979 int 14980 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 14981 struct lpfc_queue **eqp, uint32_t type, uint32_t subtype) 14982 { 14983 struct lpfc_queue *cq; 14984 struct lpfc_queue *eq; 14985 struct lpfc_mbx_cq_create_set *cq_set; 14986 struct lpfc_dmabuf *dmabuf; 14987 LPFC_MBOXQ_t *mbox; 14988 int rc, length, alloclen, status = 0; 14989 int cnt, idx, numcq, page_idx = 0; 14990 uint32_t shdr_status, shdr_add_status; 14991 union lpfc_sli4_cfg_shdr *shdr; 14992 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14993 14994 /* sanity check on queue memory */ 14995 numcq = phba->cfg_nvmet_mrq; 14996 if (!cqp || !eqp || !numcq) 14997 return -ENODEV; 14998 14999 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15000 if (!mbox) 15001 return -ENOMEM; 15002 15003 length = sizeof(struct lpfc_mbx_cq_create_set); 15004 length += ((numcq * cqp[0]->page_count) * 15005 sizeof(struct dma_address)); 15006 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15007 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, 15008 LPFC_SLI4_MBX_NEMBED); 15009 if (alloclen < length) { 15010 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15011 "3098 Allocated DMA memory size (%d) is " 15012 "less than the requested DMA memory size " 15013 "(%d)\n", alloclen, length); 15014 status = -ENOMEM; 15015 goto out; 15016 } 15017 cq_set = mbox->sge_array->addr[0]; 15018 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; 15019 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); 15020 15021 for (idx = 0; idx < numcq; idx++) { 15022 cq = cqp[idx]; 15023 eq = eqp[idx]; 15024 if (!cq || !eq) { 15025 status = -ENOMEM; 15026 goto out; 15027 } 15028 if (!phba->sli4_hba.pc_sli4_params.supported) 15029 hw_page_size = cq->page_size; 15030 15031 switch (idx) { 15032 case 0: 15033 bf_set(lpfc_mbx_cq_create_set_page_size, 15034 &cq_set->u.request, 15035 (hw_page_size / SLI4_PAGE_SIZE)); 15036 bf_set(lpfc_mbx_cq_create_set_num_pages, 15037 &cq_set->u.request, cq->page_count); 15038 bf_set(lpfc_mbx_cq_create_set_evt, 15039 &cq_set->u.request, 1); 15040 bf_set(lpfc_mbx_cq_create_set_valid, 15041 &cq_set->u.request, 1); 15042 bf_set(lpfc_mbx_cq_create_set_cqe_size, 15043 &cq_set->u.request, 0); 15044 bf_set(lpfc_mbx_cq_create_set_num_cq, 15045 &cq_set->u.request, numcq); 15046 bf_set(lpfc_mbx_cq_create_set_autovalid, 15047 &cq_set->u.request, 15048 phba->sli4_hba.pc_sli4_params.cqav); 15049 switch (cq->entry_count) { 15050 case 2048: 15051 case 4096: 15052 if (phba->sli4_hba.pc_sli4_params.cqv == 15053 LPFC_Q_CREATE_VERSION_2) { 15054 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15055 &cq_set->u.request, 15056 cq->entry_count); 15057 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15058 &cq_set->u.request, 15059 LPFC_CQ_CNT_WORD7); 15060 break; 15061 } 15062 /* Fall Thru */ 15063 default: 15064 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15065 "3118 Bad CQ count. (%d)\n", 15066 cq->entry_count); 15067 if (cq->entry_count < 256) { 15068 status = -EINVAL; 15069 goto out; 15070 } 15071 /* otherwise default to smallest (drop thru) */ 15072 case 256: 15073 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15074 &cq_set->u.request, LPFC_CQ_CNT_256); 15075 break; 15076 case 512: 15077 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15078 &cq_set->u.request, LPFC_CQ_CNT_512); 15079 break; 15080 case 1024: 15081 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 15082 &cq_set->u.request, LPFC_CQ_CNT_1024); 15083 break; 15084 } 15085 bf_set(lpfc_mbx_cq_create_set_eq_id0, 15086 &cq_set->u.request, eq->queue_id); 15087 break; 15088 case 1: 15089 bf_set(lpfc_mbx_cq_create_set_eq_id1, 15090 &cq_set->u.request, eq->queue_id); 15091 break; 15092 case 2: 15093 bf_set(lpfc_mbx_cq_create_set_eq_id2, 15094 &cq_set->u.request, eq->queue_id); 15095 break; 15096 case 3: 15097 bf_set(lpfc_mbx_cq_create_set_eq_id3, 15098 &cq_set->u.request, eq->queue_id); 15099 break; 15100 case 4: 15101 bf_set(lpfc_mbx_cq_create_set_eq_id4, 15102 &cq_set->u.request, eq->queue_id); 15103 break; 15104 case 5: 15105 bf_set(lpfc_mbx_cq_create_set_eq_id5, 15106 &cq_set->u.request, eq->queue_id); 15107 break; 15108 case 6: 15109 bf_set(lpfc_mbx_cq_create_set_eq_id6, 15110 &cq_set->u.request, eq->queue_id); 15111 break; 15112 case 7: 15113 bf_set(lpfc_mbx_cq_create_set_eq_id7, 15114 &cq_set->u.request, eq->queue_id); 15115 break; 15116 case 8: 15117 bf_set(lpfc_mbx_cq_create_set_eq_id8, 15118 &cq_set->u.request, eq->queue_id); 15119 break; 15120 case 9: 15121 bf_set(lpfc_mbx_cq_create_set_eq_id9, 15122 &cq_set->u.request, eq->queue_id); 15123 break; 15124 case 10: 15125 bf_set(lpfc_mbx_cq_create_set_eq_id10, 15126 &cq_set->u.request, eq->queue_id); 15127 break; 15128 case 11: 15129 bf_set(lpfc_mbx_cq_create_set_eq_id11, 15130 &cq_set->u.request, eq->queue_id); 15131 break; 15132 case 12: 15133 bf_set(lpfc_mbx_cq_create_set_eq_id12, 15134 &cq_set->u.request, eq->queue_id); 15135 break; 15136 case 13: 15137 bf_set(lpfc_mbx_cq_create_set_eq_id13, 15138 &cq_set->u.request, eq->queue_id); 15139 break; 15140 case 14: 15141 bf_set(lpfc_mbx_cq_create_set_eq_id14, 15142 &cq_set->u.request, eq->queue_id); 15143 break; 15144 case 15: 15145 bf_set(lpfc_mbx_cq_create_set_eq_id15, 15146 &cq_set->u.request, eq->queue_id); 15147 break; 15148 } 15149 15150 /* link the cq onto the parent eq child list */ 15151 list_add_tail(&cq->list, &eq->child_list); 15152 /* Set up completion queue's type and subtype */ 15153 cq->type = type; 15154 cq->subtype = subtype; 15155 cq->assoc_qid = eq->queue_id; 15156 cq->host_index = 0; 15157 cq->hba_index = 0; 15158 cq->entry_repost = LPFC_CQ_REPOST; 15159 cq->chann = idx; 15160 15161 rc = 0; 15162 list_for_each_entry(dmabuf, &cq->page_list, list) { 15163 memset(dmabuf->virt, 0, hw_page_size); 15164 cnt = page_idx + dmabuf->buffer_tag; 15165 cq_set->u.request.page[cnt].addr_lo = 15166 putPaddrLow(dmabuf->phys); 15167 cq_set->u.request.page[cnt].addr_hi = 15168 putPaddrHigh(dmabuf->phys); 15169 rc++; 15170 } 15171 page_idx += rc; 15172 } 15173 15174 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15175 15176 /* The IOCTL status is embedded in the mailbox subheader. */ 15177 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15178 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15179 if (shdr_status || shdr_add_status || rc) { 15180 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15181 "3119 CQ_CREATE_SET mailbox failed with " 15182 "status x%x add_status x%x, mbx status x%x\n", 15183 shdr_status, shdr_add_status, rc); 15184 status = -ENXIO; 15185 goto out; 15186 } 15187 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); 15188 if (rc == 0xFFFF) { 15189 status = -ENXIO; 15190 goto out; 15191 } 15192 15193 for (idx = 0; idx < numcq; idx++) { 15194 cq = cqp[idx]; 15195 cq->queue_id = rc + idx; 15196 } 15197 15198 out: 15199 lpfc_sli4_mbox_cmd_free(phba, mbox); 15200 return status; 15201 } 15202 15203 /** 15204 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 15205 * @phba: HBA structure that indicates port to create a queue on. 15206 * @mq: The queue structure to use to create the mailbox queue. 15207 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 15208 * @cq: The completion queue to associate with this cq. 15209 * 15210 * This function provides failback (fb) functionality when the 15211 * mq_create_ext fails on older FW generations. It's purpose is identical 15212 * to mq_create_ext otherwise. 15213 * 15214 * This routine cannot fail as all attributes were previously accessed and 15215 * initialized in mq_create_ext. 15216 **/ 15217 static void 15218 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 15219 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 15220 { 15221 struct lpfc_mbx_mq_create *mq_create; 15222 struct lpfc_dmabuf *dmabuf; 15223 int length; 15224 15225 length = (sizeof(struct lpfc_mbx_mq_create) - 15226 sizeof(struct lpfc_sli4_cfg_mhdr)); 15227 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15228 LPFC_MBOX_OPCODE_MQ_CREATE, 15229 length, LPFC_SLI4_MBX_EMBED); 15230 mq_create = &mbox->u.mqe.un.mq_create; 15231 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 15232 mq->page_count); 15233 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 15234 cq->queue_id); 15235 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 15236 switch (mq->entry_count) { 15237 case 16: 15238 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15239 LPFC_MQ_RING_SIZE_16); 15240 break; 15241 case 32: 15242 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15243 LPFC_MQ_RING_SIZE_32); 15244 break; 15245 case 64: 15246 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15247 LPFC_MQ_RING_SIZE_64); 15248 break; 15249 case 128: 15250 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 15251 LPFC_MQ_RING_SIZE_128); 15252 break; 15253 } 15254 list_for_each_entry(dmabuf, &mq->page_list, list) { 15255 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15256 putPaddrLow(dmabuf->phys); 15257 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15258 putPaddrHigh(dmabuf->phys); 15259 } 15260 } 15261 15262 /** 15263 * lpfc_mq_create - Create a mailbox Queue on the HBA 15264 * @phba: HBA structure that indicates port to create a queue on. 15265 * @mq: The queue structure to use to create the mailbox queue. 15266 * @cq: The completion queue to associate with this cq. 15267 * @subtype: The queue's subtype. 15268 * 15269 * This function creates a mailbox queue, as detailed in @mq, on a port, 15270 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 15271 * 15272 * The @phba struct is used to send mailbox command to HBA. The @cq struct 15273 * is used to get the entry count and entry size that are necessary to 15274 * determine the number of pages to allocate and use for this queue. This 15275 * function will send the MQ_CREATE mailbox command to the HBA to setup the 15276 * mailbox queue. This function is asynchronous and will wait for the mailbox 15277 * command to finish before continuing. 15278 * 15279 * On success this function will return a zero. If unable to allocate enough 15280 * memory this function will return -ENOMEM. If the queue create mailbox command 15281 * fails this function will return -ENXIO. 15282 **/ 15283 int32_t 15284 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 15285 struct lpfc_queue *cq, uint32_t subtype) 15286 { 15287 struct lpfc_mbx_mq_create *mq_create; 15288 struct lpfc_mbx_mq_create_ext *mq_create_ext; 15289 struct lpfc_dmabuf *dmabuf; 15290 LPFC_MBOXQ_t *mbox; 15291 int rc, length, status = 0; 15292 uint32_t shdr_status, shdr_add_status; 15293 union lpfc_sli4_cfg_shdr *shdr; 15294 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15295 15296 /* sanity check on queue memory */ 15297 if (!mq || !cq) 15298 return -ENODEV; 15299 if (!phba->sli4_hba.pc_sli4_params.supported) 15300 hw_page_size = SLI4_PAGE_SIZE; 15301 15302 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15303 if (!mbox) 15304 return -ENOMEM; 15305 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 15306 sizeof(struct lpfc_sli4_cfg_mhdr)); 15307 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15308 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 15309 length, LPFC_SLI4_MBX_EMBED); 15310 15311 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 15312 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 15313 bf_set(lpfc_mbx_mq_create_ext_num_pages, 15314 &mq_create_ext->u.request, mq->page_count); 15315 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 15316 &mq_create_ext->u.request, 1); 15317 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 15318 &mq_create_ext->u.request, 1); 15319 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 15320 &mq_create_ext->u.request, 1); 15321 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 15322 &mq_create_ext->u.request, 1); 15323 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 15324 &mq_create_ext->u.request, 1); 15325 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 15326 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15327 phba->sli4_hba.pc_sli4_params.mqv); 15328 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 15329 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 15330 cq->queue_id); 15331 else 15332 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 15333 cq->queue_id); 15334 switch (mq->entry_count) { 15335 default: 15336 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15337 "0362 Unsupported MQ count. (%d)\n", 15338 mq->entry_count); 15339 if (mq->entry_count < 16) { 15340 status = -EINVAL; 15341 goto out; 15342 } 15343 /* otherwise default to smallest count (drop through) */ 15344 case 16: 15345 bf_set(lpfc_mq_context_ring_size, 15346 &mq_create_ext->u.request.context, 15347 LPFC_MQ_RING_SIZE_16); 15348 break; 15349 case 32: 15350 bf_set(lpfc_mq_context_ring_size, 15351 &mq_create_ext->u.request.context, 15352 LPFC_MQ_RING_SIZE_32); 15353 break; 15354 case 64: 15355 bf_set(lpfc_mq_context_ring_size, 15356 &mq_create_ext->u.request.context, 15357 LPFC_MQ_RING_SIZE_64); 15358 break; 15359 case 128: 15360 bf_set(lpfc_mq_context_ring_size, 15361 &mq_create_ext->u.request.context, 15362 LPFC_MQ_RING_SIZE_128); 15363 break; 15364 } 15365 list_for_each_entry(dmabuf, &mq->page_list, list) { 15366 memset(dmabuf->virt, 0, hw_page_size); 15367 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 15368 putPaddrLow(dmabuf->phys); 15369 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 15370 putPaddrHigh(dmabuf->phys); 15371 } 15372 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15373 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15374 &mq_create_ext->u.response); 15375 if (rc != MBX_SUCCESS) { 15376 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15377 "2795 MQ_CREATE_EXT failed with " 15378 "status x%x. Failback to MQ_CREATE.\n", 15379 rc); 15380 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 15381 mq_create = &mbox->u.mqe.un.mq_create; 15382 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15383 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 15384 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 15385 &mq_create->u.response); 15386 } 15387 15388 /* The IOCTL status is embedded in the mailbox subheader. */ 15389 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15390 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15391 if (shdr_status || shdr_add_status || rc) { 15392 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15393 "2502 MQ_CREATE mailbox failed with " 15394 "status x%x add_status x%x, mbx status x%x\n", 15395 shdr_status, shdr_add_status, rc); 15396 status = -ENXIO; 15397 goto out; 15398 } 15399 if (mq->queue_id == 0xFFFF) { 15400 status = -ENXIO; 15401 goto out; 15402 } 15403 mq->type = LPFC_MQ; 15404 mq->assoc_qid = cq->queue_id; 15405 mq->subtype = subtype; 15406 mq->host_index = 0; 15407 mq->hba_index = 0; 15408 mq->entry_repost = LPFC_MQ_REPOST; 15409 15410 /* link the mq onto the parent cq child list */ 15411 list_add_tail(&mq->list, &cq->child_list); 15412 out: 15413 mempool_free(mbox, phba->mbox_mem_pool); 15414 return status; 15415 } 15416 15417 /** 15418 * lpfc_wq_create - Create a Work Queue on the HBA 15419 * @phba: HBA structure that indicates port to create a queue on. 15420 * @wq: The queue structure to use to create the work queue. 15421 * @cq: The completion queue to bind this work queue to. 15422 * @subtype: The subtype of the work queue indicating its functionality. 15423 * 15424 * This function creates a work queue, as detailed in @wq, on a port, described 15425 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 15426 * 15427 * The @phba struct is used to send mailbox command to HBA. The @wq struct 15428 * is used to get the entry count and entry size that are necessary to 15429 * determine the number of pages to allocate and use for this queue. The @cq 15430 * is used to indicate which completion queue to bind this work queue to. This 15431 * function will send the WQ_CREATE mailbox command to the HBA to setup the 15432 * work queue. This function is asynchronous and will wait for the mailbox 15433 * command to finish before continuing. 15434 * 15435 * On success this function will return a zero. If unable to allocate enough 15436 * memory this function will return -ENOMEM. If the queue create mailbox command 15437 * fails this function will return -ENXIO. 15438 **/ 15439 int 15440 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 15441 struct lpfc_queue *cq, uint32_t subtype) 15442 { 15443 struct lpfc_mbx_wq_create *wq_create; 15444 struct lpfc_dmabuf *dmabuf; 15445 LPFC_MBOXQ_t *mbox; 15446 int rc, length, status = 0; 15447 uint32_t shdr_status, shdr_add_status; 15448 union lpfc_sli4_cfg_shdr *shdr; 15449 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15450 struct dma_address *page; 15451 void __iomem *bar_memmap_p; 15452 uint32_t db_offset; 15453 uint16_t pci_barset; 15454 uint8_t dpp_barset; 15455 uint32_t dpp_offset; 15456 unsigned long pg_addr; 15457 uint8_t wq_create_version; 15458 15459 /* sanity check on queue memory */ 15460 if (!wq || !cq) 15461 return -ENODEV; 15462 if (!phba->sli4_hba.pc_sli4_params.supported) 15463 hw_page_size = wq->page_size; 15464 15465 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15466 if (!mbox) 15467 return -ENOMEM; 15468 length = (sizeof(struct lpfc_mbx_wq_create) - 15469 sizeof(struct lpfc_sli4_cfg_mhdr)); 15470 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15471 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 15472 length, LPFC_SLI4_MBX_EMBED); 15473 wq_create = &mbox->u.mqe.un.wq_create; 15474 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 15475 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 15476 wq->page_count); 15477 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 15478 cq->queue_id); 15479 15480 /* wqv is the earliest version supported, NOT the latest */ 15481 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15482 phba->sli4_hba.pc_sli4_params.wqv); 15483 15484 if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) || 15485 (wq->page_size > SLI4_PAGE_SIZE)) 15486 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15487 else 15488 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15489 15490 15491 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) 15492 wq_create_version = LPFC_Q_CREATE_VERSION_1; 15493 else 15494 wq_create_version = LPFC_Q_CREATE_VERSION_0; 15495 15496 switch (wq_create_version) { 15497 case LPFC_Q_CREATE_VERSION_1: 15498 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 15499 wq->entry_count); 15500 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15501 LPFC_Q_CREATE_VERSION_1); 15502 15503 switch (wq->entry_size) { 15504 default: 15505 case 64: 15506 bf_set(lpfc_mbx_wq_create_wqe_size, 15507 &wq_create->u.request_1, 15508 LPFC_WQ_WQE_SIZE_64); 15509 break; 15510 case 128: 15511 bf_set(lpfc_mbx_wq_create_wqe_size, 15512 &wq_create->u.request_1, 15513 LPFC_WQ_WQE_SIZE_128); 15514 break; 15515 } 15516 /* Request DPP by default */ 15517 bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1); 15518 bf_set(lpfc_mbx_wq_create_page_size, 15519 &wq_create->u.request_1, 15520 (wq->page_size / SLI4_PAGE_SIZE)); 15521 page = wq_create->u.request_1.page; 15522 break; 15523 default: 15524 page = wq_create->u.request.page; 15525 break; 15526 } 15527 15528 list_for_each_entry(dmabuf, &wq->page_list, list) { 15529 memset(dmabuf->virt, 0, hw_page_size); 15530 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 15531 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 15532 } 15533 15534 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15535 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 15536 15537 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15538 /* The IOCTL status is embedded in the mailbox subheader. */ 15539 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15540 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15541 if (shdr_status || shdr_add_status || rc) { 15542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15543 "2503 WQ_CREATE mailbox failed with " 15544 "status x%x add_status x%x, mbx status x%x\n", 15545 shdr_status, shdr_add_status, rc); 15546 status = -ENXIO; 15547 goto out; 15548 } 15549 15550 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) 15551 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, 15552 &wq_create->u.response); 15553 else 15554 wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, 15555 &wq_create->u.response_1); 15556 15557 if (wq->queue_id == 0xFFFF) { 15558 status = -ENXIO; 15559 goto out; 15560 } 15561 15562 wq->db_format = LPFC_DB_LIST_FORMAT; 15563 if (wq_create_version == LPFC_Q_CREATE_VERSION_0) { 15564 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15565 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 15566 &wq_create->u.response); 15567 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 15568 (wq->db_format != LPFC_DB_RING_FORMAT)) { 15569 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15570 "3265 WQ[%d] doorbell format " 15571 "not supported: x%x\n", 15572 wq->queue_id, wq->db_format); 15573 status = -EINVAL; 15574 goto out; 15575 } 15576 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 15577 &wq_create->u.response); 15578 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15579 pci_barset); 15580 if (!bar_memmap_p) { 15581 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15582 "3263 WQ[%d] failed to memmap " 15583 "pci barset:x%x\n", 15584 wq->queue_id, pci_barset); 15585 status = -ENOMEM; 15586 goto out; 15587 } 15588 db_offset = wq_create->u.response.doorbell_offset; 15589 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 15590 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 15591 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15592 "3252 WQ[%d] doorbell offset " 15593 "not supported: x%x\n", 15594 wq->queue_id, db_offset); 15595 status = -EINVAL; 15596 goto out; 15597 } 15598 wq->db_regaddr = bar_memmap_p + db_offset; 15599 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15600 "3264 WQ[%d]: barset:x%x, offset:x%x, " 15601 "format:x%x\n", wq->queue_id, 15602 pci_barset, db_offset, wq->db_format); 15603 } else 15604 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15605 } else { 15606 /* Check if DPP was honored by the firmware */ 15607 wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, 15608 &wq_create->u.response_1); 15609 if (wq->dpp_enable) { 15610 pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set, 15611 &wq_create->u.response_1); 15612 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15613 pci_barset); 15614 if (!bar_memmap_p) { 15615 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15616 "3267 WQ[%d] failed to memmap " 15617 "pci barset:x%x\n", 15618 wq->queue_id, pci_barset); 15619 status = -ENOMEM; 15620 goto out; 15621 } 15622 db_offset = wq_create->u.response_1.doorbell_offset; 15623 wq->db_regaddr = bar_memmap_p + db_offset; 15624 wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, 15625 &wq_create->u.response_1); 15626 dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar, 15627 &wq_create->u.response_1); 15628 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, 15629 dpp_barset); 15630 if (!bar_memmap_p) { 15631 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15632 "3268 WQ[%d] failed to memmap " 15633 "pci barset:x%x\n", 15634 wq->queue_id, dpp_barset); 15635 status = -ENOMEM; 15636 goto out; 15637 } 15638 dpp_offset = wq_create->u.response_1.dpp_offset; 15639 wq->dpp_regaddr = bar_memmap_p + dpp_offset; 15640 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15641 "3271 WQ[%d]: barset:x%x, offset:x%x, " 15642 "dpp_id:x%x dpp_barset:x%x " 15643 "dpp_offset:x%x\n", 15644 wq->queue_id, pci_barset, db_offset, 15645 wq->dpp_id, dpp_barset, dpp_offset); 15646 15647 /* Enable combined writes for DPP aperture */ 15648 pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; 15649 #ifdef CONFIG_X86 15650 rc = set_memory_wc(pg_addr, 1); 15651 if (rc) { 15652 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15653 "3272 Cannot setup Combined " 15654 "Write on WQ[%d] - disable DPP\n", 15655 wq->queue_id); 15656 phba->cfg_enable_dpp = 0; 15657 } 15658 #else 15659 phba->cfg_enable_dpp = 0; 15660 #endif 15661 } else 15662 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15663 } 15664 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); 15665 if (wq->pring == NULL) { 15666 status = -ENOMEM; 15667 goto out; 15668 } 15669 wq->type = LPFC_WQ; 15670 wq->assoc_qid = cq->queue_id; 15671 wq->subtype = subtype; 15672 wq->host_index = 0; 15673 wq->hba_index = 0; 15674 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL; 15675 15676 /* link the wq onto the parent cq child list */ 15677 list_add_tail(&wq->list, &cq->child_list); 15678 out: 15679 mempool_free(mbox, phba->mbox_mem_pool); 15680 return status; 15681 } 15682 15683 /** 15684 * lpfc_rq_create - Create a Receive Queue on the HBA 15685 * @phba: HBA structure that indicates port to create a queue on. 15686 * @hrq: The queue structure to use to create the header receive queue. 15687 * @drq: The queue structure to use to create the data receive queue. 15688 * @cq: The completion queue to bind this work queue to. 15689 * 15690 * This function creates a receive buffer queue pair , as detailed in @hrq and 15691 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15692 * to the HBA. 15693 * 15694 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15695 * struct is used to get the entry count that is necessary to determine the 15696 * number of pages to use for this queue. The @cq is used to indicate which 15697 * completion queue to bind received buffers that are posted to these queues to. 15698 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15699 * receive queue pair. This function is asynchronous and will wait for the 15700 * mailbox command to finish before continuing. 15701 * 15702 * On success this function will return a zero. If unable to allocate enough 15703 * memory this function will return -ENOMEM. If the queue create mailbox command 15704 * fails this function will return -ENXIO. 15705 **/ 15706 int 15707 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 15708 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 15709 { 15710 struct lpfc_mbx_rq_create *rq_create; 15711 struct lpfc_dmabuf *dmabuf; 15712 LPFC_MBOXQ_t *mbox; 15713 int rc, length, status = 0; 15714 uint32_t shdr_status, shdr_add_status; 15715 union lpfc_sli4_cfg_shdr *shdr; 15716 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15717 void __iomem *bar_memmap_p; 15718 uint32_t db_offset; 15719 uint16_t pci_barset; 15720 15721 /* sanity check on queue memory */ 15722 if (!hrq || !drq || !cq) 15723 return -ENODEV; 15724 if (!phba->sli4_hba.pc_sli4_params.supported) 15725 hw_page_size = SLI4_PAGE_SIZE; 15726 15727 if (hrq->entry_count != drq->entry_count) 15728 return -EINVAL; 15729 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15730 if (!mbox) 15731 return -ENOMEM; 15732 length = (sizeof(struct lpfc_mbx_rq_create) - 15733 sizeof(struct lpfc_sli4_cfg_mhdr)); 15734 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15735 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15736 length, LPFC_SLI4_MBX_EMBED); 15737 rq_create = &mbox->u.mqe.un.rq_create; 15738 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15739 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15740 phba->sli4_hba.pc_sli4_params.rqv); 15741 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15742 bf_set(lpfc_rq_context_rqe_count_1, 15743 &rq_create->u.request.context, 15744 hrq->entry_count); 15745 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 15746 bf_set(lpfc_rq_context_rqe_size, 15747 &rq_create->u.request.context, 15748 LPFC_RQE_SIZE_8); 15749 bf_set(lpfc_rq_context_page_size, 15750 &rq_create->u.request.context, 15751 LPFC_RQ_PAGE_SIZE_4096); 15752 } else { 15753 switch (hrq->entry_count) { 15754 default: 15755 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15756 "2535 Unsupported RQ count. (%d)\n", 15757 hrq->entry_count); 15758 if (hrq->entry_count < 512) { 15759 status = -EINVAL; 15760 goto out; 15761 } 15762 /* otherwise default to smallest count (drop through) */ 15763 case 512: 15764 bf_set(lpfc_rq_context_rqe_count, 15765 &rq_create->u.request.context, 15766 LPFC_RQ_RING_SIZE_512); 15767 break; 15768 case 1024: 15769 bf_set(lpfc_rq_context_rqe_count, 15770 &rq_create->u.request.context, 15771 LPFC_RQ_RING_SIZE_1024); 15772 break; 15773 case 2048: 15774 bf_set(lpfc_rq_context_rqe_count, 15775 &rq_create->u.request.context, 15776 LPFC_RQ_RING_SIZE_2048); 15777 break; 15778 case 4096: 15779 bf_set(lpfc_rq_context_rqe_count, 15780 &rq_create->u.request.context, 15781 LPFC_RQ_RING_SIZE_4096); 15782 break; 15783 } 15784 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 15785 LPFC_HDR_BUF_SIZE); 15786 } 15787 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15788 cq->queue_id); 15789 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15790 hrq->page_count); 15791 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15792 memset(dmabuf->virt, 0, hw_page_size); 15793 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15794 putPaddrLow(dmabuf->phys); 15795 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15796 putPaddrHigh(dmabuf->phys); 15797 } 15798 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15799 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15800 15801 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15802 /* The IOCTL status is embedded in the mailbox subheader. */ 15803 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15804 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15805 if (shdr_status || shdr_add_status || rc) { 15806 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15807 "2504 RQ_CREATE mailbox failed with " 15808 "status x%x add_status x%x, mbx status x%x\n", 15809 shdr_status, shdr_add_status, rc); 15810 status = -ENXIO; 15811 goto out; 15812 } 15813 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15814 if (hrq->queue_id == 0xFFFF) { 15815 status = -ENXIO; 15816 goto out; 15817 } 15818 15819 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15820 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 15821 &rq_create->u.response); 15822 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 15823 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 15824 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15825 "3262 RQ [%d] doorbell format not " 15826 "supported: x%x\n", hrq->queue_id, 15827 hrq->db_format); 15828 status = -EINVAL; 15829 goto out; 15830 } 15831 15832 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 15833 &rq_create->u.response); 15834 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 15835 if (!bar_memmap_p) { 15836 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15837 "3269 RQ[%d] failed to memmap pci " 15838 "barset:x%x\n", hrq->queue_id, 15839 pci_barset); 15840 status = -ENOMEM; 15841 goto out; 15842 } 15843 15844 db_offset = rq_create->u.response.doorbell_offset; 15845 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 15846 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 15847 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15848 "3270 RQ[%d] doorbell offset not " 15849 "supported: x%x\n", hrq->queue_id, 15850 db_offset); 15851 status = -EINVAL; 15852 goto out; 15853 } 15854 hrq->db_regaddr = bar_memmap_p + db_offset; 15855 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15856 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 15857 "format:x%x\n", hrq->queue_id, pci_barset, 15858 db_offset, hrq->db_format); 15859 } else { 15860 hrq->db_format = LPFC_DB_RING_FORMAT; 15861 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15862 } 15863 hrq->type = LPFC_HRQ; 15864 hrq->assoc_qid = cq->queue_id; 15865 hrq->subtype = subtype; 15866 hrq->host_index = 0; 15867 hrq->hba_index = 0; 15868 hrq->entry_repost = LPFC_RQ_REPOST; 15869 15870 /* now create the data queue */ 15871 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15872 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15873 length, LPFC_SLI4_MBX_EMBED); 15874 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15875 phba->sli4_hba.pc_sli4_params.rqv); 15876 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15877 bf_set(lpfc_rq_context_rqe_count_1, 15878 &rq_create->u.request.context, hrq->entry_count); 15879 if (subtype == LPFC_NVMET) 15880 rq_create->u.request.context.buffer_size = 15881 LPFC_NVMET_DATA_BUF_SIZE; 15882 else 15883 rq_create->u.request.context.buffer_size = 15884 LPFC_DATA_BUF_SIZE; 15885 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 15886 LPFC_RQE_SIZE_8); 15887 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 15888 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15889 } else { 15890 switch (drq->entry_count) { 15891 default: 15892 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15893 "2536 Unsupported RQ count. (%d)\n", 15894 drq->entry_count); 15895 if (drq->entry_count < 512) { 15896 status = -EINVAL; 15897 goto out; 15898 } 15899 /* otherwise default to smallest count (drop through) */ 15900 case 512: 15901 bf_set(lpfc_rq_context_rqe_count, 15902 &rq_create->u.request.context, 15903 LPFC_RQ_RING_SIZE_512); 15904 break; 15905 case 1024: 15906 bf_set(lpfc_rq_context_rqe_count, 15907 &rq_create->u.request.context, 15908 LPFC_RQ_RING_SIZE_1024); 15909 break; 15910 case 2048: 15911 bf_set(lpfc_rq_context_rqe_count, 15912 &rq_create->u.request.context, 15913 LPFC_RQ_RING_SIZE_2048); 15914 break; 15915 case 4096: 15916 bf_set(lpfc_rq_context_rqe_count, 15917 &rq_create->u.request.context, 15918 LPFC_RQ_RING_SIZE_4096); 15919 break; 15920 } 15921 if (subtype == LPFC_NVMET) 15922 bf_set(lpfc_rq_context_buf_size, 15923 &rq_create->u.request.context, 15924 LPFC_NVMET_DATA_BUF_SIZE); 15925 else 15926 bf_set(lpfc_rq_context_buf_size, 15927 &rq_create->u.request.context, 15928 LPFC_DATA_BUF_SIZE); 15929 } 15930 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15931 cq->queue_id); 15932 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15933 drq->page_count); 15934 list_for_each_entry(dmabuf, &drq->page_list, list) { 15935 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15936 putPaddrLow(dmabuf->phys); 15937 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15938 putPaddrHigh(dmabuf->phys); 15939 } 15940 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15941 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15942 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15943 /* The IOCTL status is embedded in the mailbox subheader. */ 15944 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15945 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15946 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15947 if (shdr_status || shdr_add_status || rc) { 15948 status = -ENXIO; 15949 goto out; 15950 } 15951 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15952 if (drq->queue_id == 0xFFFF) { 15953 status = -ENXIO; 15954 goto out; 15955 } 15956 drq->type = LPFC_DRQ; 15957 drq->assoc_qid = cq->queue_id; 15958 drq->subtype = subtype; 15959 drq->host_index = 0; 15960 drq->hba_index = 0; 15961 drq->entry_repost = LPFC_RQ_REPOST; 15962 15963 /* link the header and data RQs onto the parent cq child list */ 15964 list_add_tail(&hrq->list, &cq->child_list); 15965 list_add_tail(&drq->list, &cq->child_list); 15966 15967 out: 15968 mempool_free(mbox, phba->mbox_mem_pool); 15969 return status; 15970 } 15971 15972 /** 15973 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA 15974 * @phba: HBA structure that indicates port to create a queue on. 15975 * @hrqp: The queue structure array to use to create the header receive queues. 15976 * @drqp: The queue structure array to use to create the data receive queues. 15977 * @cqp: The completion queue array to bind these receive queues to. 15978 * 15979 * This function creates a receive buffer queue pair , as detailed in @hrq and 15980 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15981 * to the HBA. 15982 * 15983 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15984 * struct is used to get the entry count that is necessary to determine the 15985 * number of pages to use for this queue. The @cq is used to indicate which 15986 * completion queue to bind received buffers that are posted to these queues to. 15987 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15988 * receive queue pair. This function is asynchronous and will wait for the 15989 * mailbox command to finish before continuing. 15990 * 15991 * On success this function will return a zero. If unable to allocate enough 15992 * memory this function will return -ENOMEM. If the queue create mailbox command 15993 * fails this function will return -ENXIO. 15994 **/ 15995 int 15996 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 15997 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 15998 uint32_t subtype) 15999 { 16000 struct lpfc_queue *hrq, *drq, *cq; 16001 struct lpfc_mbx_rq_create_v2 *rq_create; 16002 struct lpfc_dmabuf *dmabuf; 16003 LPFC_MBOXQ_t *mbox; 16004 int rc, length, alloclen, status = 0; 16005 int cnt, idx, numrq, page_idx = 0; 16006 uint32_t shdr_status, shdr_add_status; 16007 union lpfc_sli4_cfg_shdr *shdr; 16008 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 16009 16010 numrq = phba->cfg_nvmet_mrq; 16011 /* sanity check on array memory */ 16012 if (!hrqp || !drqp || !cqp || !numrq) 16013 return -ENODEV; 16014 if (!phba->sli4_hba.pc_sli4_params.supported) 16015 hw_page_size = SLI4_PAGE_SIZE; 16016 16017 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16018 if (!mbox) 16019 return -ENOMEM; 16020 16021 length = sizeof(struct lpfc_mbx_rq_create_v2); 16022 length += ((2 * numrq * hrqp[0]->page_count) * 16023 sizeof(struct dma_address)); 16024 16025 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16026 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, 16027 LPFC_SLI4_MBX_NEMBED); 16028 if (alloclen < length) { 16029 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16030 "3099 Allocated DMA memory size (%d) is " 16031 "less than the requested DMA memory size " 16032 "(%d)\n", alloclen, length); 16033 status = -ENOMEM; 16034 goto out; 16035 } 16036 16037 16038 16039 rq_create = mbox->sge_array->addr[0]; 16040 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; 16041 16042 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); 16043 cnt = 0; 16044 16045 for (idx = 0; idx < numrq; idx++) { 16046 hrq = hrqp[idx]; 16047 drq = drqp[idx]; 16048 cq = cqp[idx]; 16049 16050 /* sanity check on queue memory */ 16051 if (!hrq || !drq || !cq) { 16052 status = -ENODEV; 16053 goto out; 16054 } 16055 16056 if (hrq->entry_count != drq->entry_count) { 16057 status = -EINVAL; 16058 goto out; 16059 } 16060 16061 if (idx == 0) { 16062 bf_set(lpfc_mbx_rq_create_num_pages, 16063 &rq_create->u.request, 16064 hrq->page_count); 16065 bf_set(lpfc_mbx_rq_create_rq_cnt, 16066 &rq_create->u.request, (numrq * 2)); 16067 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, 16068 1); 16069 bf_set(lpfc_rq_context_base_cq, 16070 &rq_create->u.request.context, 16071 cq->queue_id); 16072 bf_set(lpfc_rq_context_data_size, 16073 &rq_create->u.request.context, 16074 LPFC_NVMET_DATA_BUF_SIZE); 16075 bf_set(lpfc_rq_context_hdr_size, 16076 &rq_create->u.request.context, 16077 LPFC_HDR_BUF_SIZE); 16078 bf_set(lpfc_rq_context_rqe_count_1, 16079 &rq_create->u.request.context, 16080 hrq->entry_count); 16081 bf_set(lpfc_rq_context_rqe_size, 16082 &rq_create->u.request.context, 16083 LPFC_RQE_SIZE_8); 16084 bf_set(lpfc_rq_context_page_size, 16085 &rq_create->u.request.context, 16086 (PAGE_SIZE/SLI4_PAGE_SIZE)); 16087 } 16088 rc = 0; 16089 list_for_each_entry(dmabuf, &hrq->page_list, list) { 16090 memset(dmabuf->virt, 0, hw_page_size); 16091 cnt = page_idx + dmabuf->buffer_tag; 16092 rq_create->u.request.page[cnt].addr_lo = 16093 putPaddrLow(dmabuf->phys); 16094 rq_create->u.request.page[cnt].addr_hi = 16095 putPaddrHigh(dmabuf->phys); 16096 rc++; 16097 } 16098 page_idx += rc; 16099 16100 rc = 0; 16101 list_for_each_entry(dmabuf, &drq->page_list, list) { 16102 memset(dmabuf->virt, 0, hw_page_size); 16103 cnt = page_idx + dmabuf->buffer_tag; 16104 rq_create->u.request.page[cnt].addr_lo = 16105 putPaddrLow(dmabuf->phys); 16106 rq_create->u.request.page[cnt].addr_hi = 16107 putPaddrHigh(dmabuf->phys); 16108 rc++; 16109 } 16110 page_idx += rc; 16111 16112 hrq->db_format = LPFC_DB_RING_FORMAT; 16113 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16114 hrq->type = LPFC_HRQ; 16115 hrq->assoc_qid = cq->queue_id; 16116 hrq->subtype = subtype; 16117 hrq->host_index = 0; 16118 hrq->hba_index = 0; 16119 hrq->entry_repost = LPFC_RQ_REPOST; 16120 16121 drq->db_format = LPFC_DB_RING_FORMAT; 16122 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 16123 drq->type = LPFC_DRQ; 16124 drq->assoc_qid = cq->queue_id; 16125 drq->subtype = subtype; 16126 drq->host_index = 0; 16127 drq->hba_index = 0; 16128 drq->entry_repost = LPFC_RQ_REPOST; 16129 16130 list_add_tail(&hrq->list, &cq->child_list); 16131 list_add_tail(&drq->list, &cq->child_list); 16132 } 16133 16134 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16135 /* The IOCTL status is embedded in the mailbox subheader. */ 16136 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16137 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16138 if (shdr_status || shdr_add_status || rc) { 16139 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16140 "3120 RQ_CREATE mailbox failed with " 16141 "status x%x add_status x%x, mbx status x%x\n", 16142 shdr_status, shdr_add_status, rc); 16143 status = -ENXIO; 16144 goto out; 16145 } 16146 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 16147 if (rc == 0xFFFF) { 16148 status = -ENXIO; 16149 goto out; 16150 } 16151 16152 /* Initialize all RQs with associated queue id */ 16153 for (idx = 0; idx < numrq; idx++) { 16154 hrq = hrqp[idx]; 16155 hrq->queue_id = rc + (2 * idx); 16156 drq = drqp[idx]; 16157 drq->queue_id = rc + (2 * idx) + 1; 16158 } 16159 16160 out: 16161 lpfc_sli4_mbox_cmd_free(phba, mbox); 16162 return status; 16163 } 16164 16165 /** 16166 * lpfc_eq_destroy - Destroy an event Queue on the HBA 16167 * @eq: The queue structure associated with the queue to destroy. 16168 * 16169 * This function destroys a queue, as detailed in @eq by sending an mailbox 16170 * command, specific to the type of queue, to the HBA. 16171 * 16172 * The @eq struct is used to get the queue ID of the queue to destroy. 16173 * 16174 * On success this function will return a zero. If the queue destroy mailbox 16175 * command fails this function will return -ENXIO. 16176 **/ 16177 int 16178 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 16179 { 16180 LPFC_MBOXQ_t *mbox; 16181 int rc, length, status = 0; 16182 uint32_t shdr_status, shdr_add_status; 16183 union lpfc_sli4_cfg_shdr *shdr; 16184 16185 /* sanity check on queue memory */ 16186 if (!eq) 16187 return -ENODEV; 16188 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 16189 if (!mbox) 16190 return -ENOMEM; 16191 length = (sizeof(struct lpfc_mbx_eq_destroy) - 16192 sizeof(struct lpfc_sli4_cfg_mhdr)); 16193 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16194 LPFC_MBOX_OPCODE_EQ_DESTROY, 16195 length, LPFC_SLI4_MBX_EMBED); 16196 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 16197 eq->queue_id); 16198 mbox->vport = eq->phba->pport; 16199 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16200 16201 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 16202 /* The IOCTL status is embedded in the mailbox subheader. */ 16203 shdr = (union lpfc_sli4_cfg_shdr *) 16204 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 16205 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16206 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16207 if (shdr_status || shdr_add_status || rc) { 16208 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16209 "2505 EQ_DESTROY mailbox failed with " 16210 "status x%x add_status x%x, mbx status x%x\n", 16211 shdr_status, shdr_add_status, rc); 16212 status = -ENXIO; 16213 } 16214 16215 /* Remove eq from any list */ 16216 list_del_init(&eq->list); 16217 mempool_free(mbox, eq->phba->mbox_mem_pool); 16218 return status; 16219 } 16220 16221 /** 16222 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 16223 * @cq: The queue structure associated with the queue to destroy. 16224 * 16225 * This function destroys a queue, as detailed in @cq by sending an mailbox 16226 * command, specific to the type of queue, to the HBA. 16227 * 16228 * The @cq struct is used to get the queue ID of the queue to destroy. 16229 * 16230 * On success this function will return a zero. If the queue destroy mailbox 16231 * command fails this function will return -ENXIO. 16232 **/ 16233 int 16234 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 16235 { 16236 LPFC_MBOXQ_t *mbox; 16237 int rc, length, status = 0; 16238 uint32_t shdr_status, shdr_add_status; 16239 union lpfc_sli4_cfg_shdr *shdr; 16240 16241 /* sanity check on queue memory */ 16242 if (!cq) 16243 return -ENODEV; 16244 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 16245 if (!mbox) 16246 return -ENOMEM; 16247 length = (sizeof(struct lpfc_mbx_cq_destroy) - 16248 sizeof(struct lpfc_sli4_cfg_mhdr)); 16249 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16250 LPFC_MBOX_OPCODE_CQ_DESTROY, 16251 length, LPFC_SLI4_MBX_EMBED); 16252 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 16253 cq->queue_id); 16254 mbox->vport = cq->phba->pport; 16255 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16256 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 16257 /* The IOCTL status is embedded in the mailbox subheader. */ 16258 shdr = (union lpfc_sli4_cfg_shdr *) 16259 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 16260 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16261 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16262 if (shdr_status || shdr_add_status || rc) { 16263 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16264 "2506 CQ_DESTROY mailbox failed with " 16265 "status x%x add_status x%x, mbx status x%x\n", 16266 shdr_status, shdr_add_status, rc); 16267 status = -ENXIO; 16268 } 16269 /* Remove cq from any list */ 16270 list_del_init(&cq->list); 16271 mempool_free(mbox, cq->phba->mbox_mem_pool); 16272 return status; 16273 } 16274 16275 /** 16276 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 16277 * @qm: The queue structure associated with the queue to destroy. 16278 * 16279 * This function destroys a queue, as detailed in @mq by sending an mailbox 16280 * command, specific to the type of queue, to the HBA. 16281 * 16282 * The @mq struct is used to get the queue ID of the queue to destroy. 16283 * 16284 * On success this function will return a zero. If the queue destroy mailbox 16285 * command fails this function will return -ENXIO. 16286 **/ 16287 int 16288 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 16289 { 16290 LPFC_MBOXQ_t *mbox; 16291 int rc, length, status = 0; 16292 uint32_t shdr_status, shdr_add_status; 16293 union lpfc_sli4_cfg_shdr *shdr; 16294 16295 /* sanity check on queue memory */ 16296 if (!mq) 16297 return -ENODEV; 16298 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 16299 if (!mbox) 16300 return -ENOMEM; 16301 length = (sizeof(struct lpfc_mbx_mq_destroy) - 16302 sizeof(struct lpfc_sli4_cfg_mhdr)); 16303 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16304 LPFC_MBOX_OPCODE_MQ_DESTROY, 16305 length, LPFC_SLI4_MBX_EMBED); 16306 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 16307 mq->queue_id); 16308 mbox->vport = mq->phba->pport; 16309 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16310 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 16311 /* The IOCTL status is embedded in the mailbox subheader. */ 16312 shdr = (union lpfc_sli4_cfg_shdr *) 16313 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 16314 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16315 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16316 if (shdr_status || shdr_add_status || rc) { 16317 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16318 "2507 MQ_DESTROY mailbox failed with " 16319 "status x%x add_status x%x, mbx status x%x\n", 16320 shdr_status, shdr_add_status, rc); 16321 status = -ENXIO; 16322 } 16323 /* Remove mq from any list */ 16324 list_del_init(&mq->list); 16325 mempool_free(mbox, mq->phba->mbox_mem_pool); 16326 return status; 16327 } 16328 16329 /** 16330 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 16331 * @wq: The queue structure associated with the queue to destroy. 16332 * 16333 * This function destroys a queue, as detailed in @wq by sending an mailbox 16334 * command, specific to the type of queue, to the HBA. 16335 * 16336 * The @wq struct is used to get the queue ID of the queue to destroy. 16337 * 16338 * On success this function will return a zero. If the queue destroy mailbox 16339 * command fails this function will return -ENXIO. 16340 **/ 16341 int 16342 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 16343 { 16344 LPFC_MBOXQ_t *mbox; 16345 int rc, length, status = 0; 16346 uint32_t shdr_status, shdr_add_status; 16347 union lpfc_sli4_cfg_shdr *shdr; 16348 16349 /* sanity check on queue memory */ 16350 if (!wq) 16351 return -ENODEV; 16352 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 16353 if (!mbox) 16354 return -ENOMEM; 16355 length = (sizeof(struct lpfc_mbx_wq_destroy) - 16356 sizeof(struct lpfc_sli4_cfg_mhdr)); 16357 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16358 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 16359 length, LPFC_SLI4_MBX_EMBED); 16360 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 16361 wq->queue_id); 16362 mbox->vport = wq->phba->pport; 16363 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16364 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 16365 shdr = (union lpfc_sli4_cfg_shdr *) 16366 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 16367 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16368 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16369 if (shdr_status || shdr_add_status || rc) { 16370 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16371 "2508 WQ_DESTROY mailbox failed with " 16372 "status x%x add_status x%x, mbx status x%x\n", 16373 shdr_status, shdr_add_status, rc); 16374 status = -ENXIO; 16375 } 16376 /* Remove wq from any list */ 16377 list_del_init(&wq->list); 16378 kfree(wq->pring); 16379 wq->pring = NULL; 16380 mempool_free(mbox, wq->phba->mbox_mem_pool); 16381 return status; 16382 } 16383 16384 /** 16385 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 16386 * @rq: The queue structure associated with the queue to destroy. 16387 * 16388 * This function destroys a queue, as detailed in @rq by sending an mailbox 16389 * command, specific to the type of queue, to the HBA. 16390 * 16391 * The @rq struct is used to get the queue ID of the queue to destroy. 16392 * 16393 * On success this function will return a zero. If the queue destroy mailbox 16394 * command fails this function will return -ENXIO. 16395 **/ 16396 int 16397 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 16398 struct lpfc_queue *drq) 16399 { 16400 LPFC_MBOXQ_t *mbox; 16401 int rc, length, status = 0; 16402 uint32_t shdr_status, shdr_add_status; 16403 union lpfc_sli4_cfg_shdr *shdr; 16404 16405 /* sanity check on queue memory */ 16406 if (!hrq || !drq) 16407 return -ENODEV; 16408 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 16409 if (!mbox) 16410 return -ENOMEM; 16411 length = (sizeof(struct lpfc_mbx_rq_destroy) - 16412 sizeof(struct lpfc_sli4_cfg_mhdr)); 16413 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16414 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 16415 length, LPFC_SLI4_MBX_EMBED); 16416 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16417 hrq->queue_id); 16418 mbox->vport = hrq->phba->pport; 16419 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16420 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 16421 /* The IOCTL status is embedded in the mailbox subheader. */ 16422 shdr = (union lpfc_sli4_cfg_shdr *) 16423 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16424 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16425 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16426 if (shdr_status || shdr_add_status || rc) { 16427 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16428 "2509 RQ_DESTROY mailbox failed with " 16429 "status x%x add_status x%x, mbx status x%x\n", 16430 shdr_status, shdr_add_status, rc); 16431 if (rc != MBX_TIMEOUT) 16432 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16433 return -ENXIO; 16434 } 16435 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 16436 drq->queue_id); 16437 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 16438 shdr = (union lpfc_sli4_cfg_shdr *) 16439 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 16440 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16441 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16442 if (shdr_status || shdr_add_status || rc) { 16443 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16444 "2510 RQ_DESTROY mailbox failed with " 16445 "status x%x add_status x%x, mbx status x%x\n", 16446 shdr_status, shdr_add_status, rc); 16447 status = -ENXIO; 16448 } 16449 list_del_init(&hrq->list); 16450 list_del_init(&drq->list); 16451 mempool_free(mbox, hrq->phba->mbox_mem_pool); 16452 return status; 16453 } 16454 16455 /** 16456 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 16457 * @phba: The virtual port for which this call being executed. 16458 * @pdma_phys_addr0: Physical address of the 1st SGL page. 16459 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 16460 * @xritag: the xritag that ties this io to the SGL pages. 16461 * 16462 * This routine will post the sgl pages for the IO that has the xritag 16463 * that is in the iocbq structure. The xritag is assigned during iocbq 16464 * creation and persists for as long as the driver is loaded. 16465 * if the caller has fewer than 256 scatter gather segments to map then 16466 * pdma_phys_addr1 should be 0. 16467 * If the caller needs to map more than 256 scatter gather segment then 16468 * pdma_phys_addr1 should be a valid physical address. 16469 * physical address for SGLs must be 64 byte aligned. 16470 * If you are going to map 2 SGL's then the first one must have 256 entries 16471 * the second sgl can have between 1 and 256 entries. 16472 * 16473 * Return codes: 16474 * 0 - Success 16475 * -ENXIO, -ENOMEM - Failure 16476 **/ 16477 int 16478 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 16479 dma_addr_t pdma_phys_addr0, 16480 dma_addr_t pdma_phys_addr1, 16481 uint16_t xritag) 16482 { 16483 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 16484 LPFC_MBOXQ_t *mbox; 16485 int rc; 16486 uint32_t shdr_status, shdr_add_status; 16487 uint32_t mbox_tmo; 16488 union lpfc_sli4_cfg_shdr *shdr; 16489 16490 if (xritag == NO_XRI) { 16491 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16492 "0364 Invalid param:\n"); 16493 return -EINVAL; 16494 } 16495 16496 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16497 if (!mbox) 16498 return -ENOMEM; 16499 16500 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16501 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 16502 sizeof(struct lpfc_mbx_post_sgl_pages) - 16503 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 16504 16505 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 16506 &mbox->u.mqe.un.post_sgl_pages; 16507 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 16508 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 16509 16510 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 16511 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 16512 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 16513 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 16514 16515 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 16516 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 16517 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 16518 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 16519 if (!phba->sli4_hba.intr_enable) 16520 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16521 else { 16522 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16523 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16524 } 16525 /* The IOCTL status is embedded in the mailbox subheader. */ 16526 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 16527 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16528 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16529 if (rc != MBX_TIMEOUT) 16530 mempool_free(mbox, phba->mbox_mem_pool); 16531 if (shdr_status || shdr_add_status || rc) { 16532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16533 "2511 POST_SGL mailbox failed with " 16534 "status x%x add_status x%x, mbx status x%x\n", 16535 shdr_status, shdr_add_status, rc); 16536 } 16537 return 0; 16538 } 16539 16540 /** 16541 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 16542 * @phba: pointer to lpfc hba data structure. 16543 * 16544 * This routine is invoked to post rpi header templates to the 16545 * HBA consistent with the SLI-4 interface spec. This routine 16546 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 16547 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 16548 * 16549 * Returns 16550 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 16551 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 16552 **/ 16553 static uint16_t 16554 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 16555 { 16556 unsigned long xri; 16557 16558 /* 16559 * Fetch the next logical xri. Because this index is logical, 16560 * the driver starts at 0 each time. 16561 */ 16562 spin_lock_irq(&phba->hbalock); 16563 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 16564 phba->sli4_hba.max_cfg_param.max_xri, 0); 16565 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 16566 spin_unlock_irq(&phba->hbalock); 16567 return NO_XRI; 16568 } else { 16569 set_bit(xri, phba->sli4_hba.xri_bmask); 16570 phba->sli4_hba.max_cfg_param.xri_used++; 16571 } 16572 spin_unlock_irq(&phba->hbalock); 16573 return xri; 16574 } 16575 16576 /** 16577 * lpfc_sli4_free_xri - Release an xri for reuse. 16578 * @phba: pointer to lpfc hba data structure. 16579 * 16580 * This routine is invoked to release an xri to the pool of 16581 * available rpis maintained by the driver. 16582 **/ 16583 static void 16584 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16585 { 16586 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 16587 phba->sli4_hba.max_cfg_param.xri_used--; 16588 } 16589 } 16590 16591 /** 16592 * lpfc_sli4_free_xri - Release an xri for reuse. 16593 * @phba: pointer to lpfc hba data structure. 16594 * 16595 * This routine is invoked to release an xri to the pool of 16596 * available rpis maintained by the driver. 16597 **/ 16598 void 16599 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 16600 { 16601 spin_lock_irq(&phba->hbalock); 16602 __lpfc_sli4_free_xri(phba, xri); 16603 spin_unlock_irq(&phba->hbalock); 16604 } 16605 16606 /** 16607 * lpfc_sli4_next_xritag - Get an xritag for the io 16608 * @phba: Pointer to HBA context object. 16609 * 16610 * This function gets an xritag for the iocb. If there is no unused xritag 16611 * it will return 0xffff. 16612 * The function returns the allocated xritag if successful, else returns zero. 16613 * Zero is not a valid xritag. 16614 * The caller is not required to hold any lock. 16615 **/ 16616 uint16_t 16617 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 16618 { 16619 uint16_t xri_index; 16620 16621 xri_index = lpfc_sli4_alloc_xri(phba); 16622 if (xri_index == NO_XRI) 16623 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 16624 "2004 Failed to allocate XRI.last XRITAG is %d" 16625 " Max XRI is %d, Used XRI is %d\n", 16626 xri_index, 16627 phba->sli4_hba.max_cfg_param.max_xri, 16628 phba->sli4_hba.max_cfg_param.xri_used); 16629 return xri_index; 16630 } 16631 16632 /** 16633 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. 16634 * @phba: pointer to lpfc hba data structure. 16635 * @post_sgl_list: pointer to els sgl entry list. 16636 * @count: number of els sgl entries on the list. 16637 * 16638 * This routine is invoked to post a block of driver's sgl pages to the 16639 * HBA using non-embedded mailbox command. No Lock is held. This routine 16640 * is only called when the driver is loading and after all IO has been 16641 * stopped. 16642 **/ 16643 static int 16644 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, 16645 struct list_head *post_sgl_list, 16646 int post_cnt) 16647 { 16648 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 16649 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16650 struct sgl_page_pairs *sgl_pg_pairs; 16651 void *viraddr; 16652 LPFC_MBOXQ_t *mbox; 16653 uint32_t reqlen, alloclen, pg_pairs; 16654 uint32_t mbox_tmo; 16655 uint16_t xritag_start = 0; 16656 int rc = 0; 16657 uint32_t shdr_status, shdr_add_status; 16658 union lpfc_sli4_cfg_shdr *shdr; 16659 16660 reqlen = post_cnt * sizeof(struct sgl_page_pairs) + 16661 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16662 if (reqlen > SLI4_PAGE_SIZE) { 16663 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16664 "2559 Block sgl registration required DMA " 16665 "size (%d) great than a page\n", reqlen); 16666 return -ENOMEM; 16667 } 16668 16669 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16670 if (!mbox) 16671 return -ENOMEM; 16672 16673 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16674 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16675 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 16676 LPFC_SLI4_MBX_NEMBED); 16677 16678 if (alloclen < reqlen) { 16679 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16680 "0285 Allocated DMA memory size (%d) is " 16681 "less than the requested DMA memory " 16682 "size (%d)\n", alloclen, reqlen); 16683 lpfc_sli4_mbox_cmd_free(phba, mbox); 16684 return -ENOMEM; 16685 } 16686 /* Set up the SGL pages in the non-embedded DMA pages */ 16687 viraddr = mbox->sge_array->addr[0]; 16688 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16689 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16690 16691 pg_pairs = 0; 16692 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 16693 /* Set up the sge entry */ 16694 sgl_pg_pairs->sgl_pg0_addr_lo = 16695 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 16696 sgl_pg_pairs->sgl_pg0_addr_hi = 16697 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 16698 sgl_pg_pairs->sgl_pg1_addr_lo = 16699 cpu_to_le32(putPaddrLow(0)); 16700 sgl_pg_pairs->sgl_pg1_addr_hi = 16701 cpu_to_le32(putPaddrHigh(0)); 16702 16703 /* Keep the first xritag on the list */ 16704 if (pg_pairs == 0) 16705 xritag_start = sglq_entry->sli4_xritag; 16706 sgl_pg_pairs++; 16707 pg_pairs++; 16708 } 16709 16710 /* Complete initialization and perform endian conversion. */ 16711 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16712 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); 16713 sgl->word0 = cpu_to_le32(sgl->word0); 16714 16715 if (!phba->sli4_hba.intr_enable) 16716 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16717 else { 16718 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16719 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16720 } 16721 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16722 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16723 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16724 if (rc != MBX_TIMEOUT) 16725 lpfc_sli4_mbox_cmd_free(phba, mbox); 16726 if (shdr_status || shdr_add_status || rc) { 16727 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16728 "2513 POST_SGL_BLOCK mailbox command failed " 16729 "status x%x add_status x%x mbx status x%x\n", 16730 shdr_status, shdr_add_status, rc); 16731 rc = -ENXIO; 16732 } 16733 return rc; 16734 } 16735 16736 /** 16737 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 16738 * @phba: pointer to lpfc hba data structure. 16739 * @sblist: pointer to scsi buffer list. 16740 * @count: number of scsi buffers on the list. 16741 * 16742 * This routine is invoked to post a block of @count scsi sgl pages from a 16743 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 16744 * No Lock is held. 16745 * 16746 **/ 16747 int 16748 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, 16749 struct list_head *sblist, 16750 int count) 16751 { 16752 struct lpfc_scsi_buf *psb; 16753 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16754 struct sgl_page_pairs *sgl_pg_pairs; 16755 void *viraddr; 16756 LPFC_MBOXQ_t *mbox; 16757 uint32_t reqlen, alloclen, pg_pairs; 16758 uint32_t mbox_tmo; 16759 uint16_t xritag_start = 0; 16760 int rc = 0; 16761 uint32_t shdr_status, shdr_add_status; 16762 dma_addr_t pdma_phys_bpl1; 16763 union lpfc_sli4_cfg_shdr *shdr; 16764 16765 /* Calculate the requested length of the dma memory */ 16766 reqlen = count * sizeof(struct sgl_page_pairs) + 16767 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16768 if (reqlen > SLI4_PAGE_SIZE) { 16769 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 16770 "0217 Block sgl registration required DMA " 16771 "size (%d) great than a page\n", reqlen); 16772 return -ENOMEM; 16773 } 16774 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16775 if (!mbox) { 16776 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16777 "0283 Failed to allocate mbox cmd memory\n"); 16778 return -ENOMEM; 16779 } 16780 16781 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16782 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16783 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 16784 LPFC_SLI4_MBX_NEMBED); 16785 16786 if (alloclen < reqlen) { 16787 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16788 "2561 Allocated DMA memory size (%d) is " 16789 "less than the requested DMA memory " 16790 "size (%d)\n", alloclen, reqlen); 16791 lpfc_sli4_mbox_cmd_free(phba, mbox); 16792 return -ENOMEM; 16793 } 16794 16795 /* Get the first SGE entry from the non-embedded DMA memory */ 16796 viraddr = mbox->sge_array->addr[0]; 16797 16798 /* Set up the SGL pages in the non-embedded DMA pages */ 16799 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16800 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16801 16802 pg_pairs = 0; 16803 list_for_each_entry(psb, sblist, list) { 16804 /* Set up the sge entry */ 16805 sgl_pg_pairs->sgl_pg0_addr_lo = 16806 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 16807 sgl_pg_pairs->sgl_pg0_addr_hi = 16808 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 16809 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 16810 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 16811 else 16812 pdma_phys_bpl1 = 0; 16813 sgl_pg_pairs->sgl_pg1_addr_lo = 16814 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 16815 sgl_pg_pairs->sgl_pg1_addr_hi = 16816 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 16817 /* Keep the first xritag on the list */ 16818 if (pg_pairs == 0) 16819 xritag_start = psb->cur_iocbq.sli4_xritag; 16820 sgl_pg_pairs++; 16821 pg_pairs++; 16822 } 16823 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16824 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 16825 /* Perform endian conversion if necessary */ 16826 sgl->word0 = cpu_to_le32(sgl->word0); 16827 16828 if (!phba->sli4_hba.intr_enable) 16829 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16830 else { 16831 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16832 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16833 } 16834 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16835 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16836 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16837 if (rc != MBX_TIMEOUT) 16838 lpfc_sli4_mbox_cmd_free(phba, mbox); 16839 if (shdr_status || shdr_add_status || rc) { 16840 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16841 "2564 POST_SGL_BLOCK mailbox command failed " 16842 "status x%x add_status x%x mbx status x%x\n", 16843 shdr_status, shdr_add_status, rc); 16844 rc = -ENXIO; 16845 } 16846 return rc; 16847 } 16848 16849 /** 16850 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 16851 * @phba: pointer to lpfc_hba struct that the frame was received on 16852 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16853 * 16854 * This function checks the fields in the @fc_hdr to see if the FC frame is a 16855 * valid type of frame that the LPFC driver will handle. This function will 16856 * return a zero if the frame is a valid frame or a non zero value when the 16857 * frame does not pass the check. 16858 **/ 16859 static int 16860 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 16861 { 16862 /* make rctl_names static to save stack space */ 16863 struct fc_vft_header *fc_vft_hdr; 16864 uint32_t *header = (uint32_t *) fc_hdr; 16865 16866 #define FC_RCTL_MDS_DIAGS 0xF4 16867 16868 switch (fc_hdr->fh_r_ctl) { 16869 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 16870 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 16871 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 16872 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 16873 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 16874 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 16875 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 16876 case FC_RCTL_DD_CMD_STATUS: /* command status */ 16877 case FC_RCTL_ELS_REQ: /* extended link services request */ 16878 case FC_RCTL_ELS_REP: /* extended link services reply */ 16879 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 16880 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 16881 case FC_RCTL_BA_NOP: /* basic link service NOP */ 16882 case FC_RCTL_BA_ABTS: /* basic link service abort */ 16883 case FC_RCTL_BA_RMC: /* remove connection */ 16884 case FC_RCTL_BA_ACC: /* basic accept */ 16885 case FC_RCTL_BA_RJT: /* basic reject */ 16886 case FC_RCTL_BA_PRMT: 16887 case FC_RCTL_ACK_1: /* acknowledge_1 */ 16888 case FC_RCTL_ACK_0: /* acknowledge_0 */ 16889 case FC_RCTL_P_RJT: /* port reject */ 16890 case FC_RCTL_F_RJT: /* fabric reject */ 16891 case FC_RCTL_P_BSY: /* port busy */ 16892 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 16893 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 16894 case FC_RCTL_LCR: /* link credit reset */ 16895 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ 16896 case FC_RCTL_END: /* end */ 16897 break; 16898 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 16899 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16900 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 16901 return lpfc_fc_frame_check(phba, fc_hdr); 16902 default: 16903 goto drop; 16904 } 16905 16906 #define FC_TYPE_VENDOR_UNIQUE 0xFF 16907 16908 switch (fc_hdr->fh_type) { 16909 case FC_TYPE_BLS: 16910 case FC_TYPE_ELS: 16911 case FC_TYPE_FCP: 16912 case FC_TYPE_CT: 16913 case FC_TYPE_NVME: 16914 case FC_TYPE_VENDOR_UNIQUE: 16915 break; 16916 case FC_TYPE_IP: 16917 case FC_TYPE_ILS: 16918 default: 16919 goto drop; 16920 } 16921 16922 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 16923 "2538 Received frame rctl:x%x, type:x%x, " 16924 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 16925 fc_hdr->fh_r_ctl, fc_hdr->fh_type, 16926 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 16927 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 16928 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 16929 be32_to_cpu(header[6])); 16930 return 0; 16931 drop: 16932 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 16933 "2539 Dropped frame rctl:x%x type:x%x\n", 16934 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 16935 return 1; 16936 } 16937 16938 /** 16939 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 16940 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16941 * 16942 * This function processes the FC header to retrieve the VFI from the VF 16943 * header, if one exists. This function will return the VFI if one exists 16944 * or 0 if no VSAN Header exists. 16945 **/ 16946 static uint32_t 16947 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 16948 { 16949 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16950 16951 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 16952 return 0; 16953 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 16954 } 16955 16956 /** 16957 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 16958 * @phba: Pointer to the HBA structure to search for the vport on 16959 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16960 * @fcfi: The FC Fabric ID that the frame came from 16961 * 16962 * This function searches the @phba for a vport that matches the content of the 16963 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 16964 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 16965 * returns the matching vport pointer or NULL if unable to match frame to a 16966 * vport. 16967 **/ 16968 static struct lpfc_vport * 16969 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 16970 uint16_t fcfi, uint32_t did) 16971 { 16972 struct lpfc_vport **vports; 16973 struct lpfc_vport *vport = NULL; 16974 int i; 16975 16976 if (did == Fabric_DID) 16977 return phba->pport; 16978 if ((phba->pport->fc_flag & FC_PT2PT) && 16979 !(phba->link_state == LPFC_HBA_READY)) 16980 return phba->pport; 16981 16982 vports = lpfc_create_vport_work_array(phba); 16983 if (vports != NULL) { 16984 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 16985 if (phba->fcf.fcfi == fcfi && 16986 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 16987 vports[i]->fc_myDID == did) { 16988 vport = vports[i]; 16989 break; 16990 } 16991 } 16992 } 16993 lpfc_destroy_vport_work_array(phba, vports); 16994 return vport; 16995 } 16996 16997 /** 16998 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 16999 * @vport: The vport to work on. 17000 * 17001 * This function updates the receive sequence time stamp for this vport. The 17002 * receive sequence time stamp indicates the time that the last frame of the 17003 * the sequence that has been idle for the longest amount of time was received. 17004 * the driver uses this time stamp to indicate if any received sequences have 17005 * timed out. 17006 **/ 17007 static void 17008 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 17009 { 17010 struct lpfc_dmabuf *h_buf; 17011 struct hbq_dmabuf *dmabuf = NULL; 17012 17013 /* get the oldest sequence on the rcv list */ 17014 h_buf = list_get_first(&vport->rcv_buffer_list, 17015 struct lpfc_dmabuf, list); 17016 if (!h_buf) 17017 return; 17018 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17019 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 17020 } 17021 17022 /** 17023 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 17024 * @vport: The vport that the received sequences were sent to. 17025 * 17026 * This function cleans up all outstanding received sequences. This is called 17027 * by the driver when a link event or user action invalidates all the received 17028 * sequences. 17029 **/ 17030 void 17031 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 17032 { 17033 struct lpfc_dmabuf *h_buf, *hnext; 17034 struct lpfc_dmabuf *d_buf, *dnext; 17035 struct hbq_dmabuf *dmabuf = NULL; 17036 17037 /* start with the oldest sequence on the rcv list */ 17038 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17039 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17040 list_del_init(&dmabuf->hbuf.list); 17041 list_for_each_entry_safe(d_buf, dnext, 17042 &dmabuf->dbuf.list, list) { 17043 list_del_init(&d_buf->list); 17044 lpfc_in_buf_free(vport->phba, d_buf); 17045 } 17046 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17047 } 17048 } 17049 17050 /** 17051 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 17052 * @vport: The vport that the received sequences were sent to. 17053 * 17054 * This function determines whether any received sequences have timed out by 17055 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 17056 * indicates that there is at least one timed out sequence this routine will 17057 * go through the received sequences one at a time from most inactive to most 17058 * active to determine which ones need to be cleaned up. Once it has determined 17059 * that a sequence needs to be cleaned up it will simply free up the resources 17060 * without sending an abort. 17061 **/ 17062 void 17063 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 17064 { 17065 struct lpfc_dmabuf *h_buf, *hnext; 17066 struct lpfc_dmabuf *d_buf, *dnext; 17067 struct hbq_dmabuf *dmabuf = NULL; 17068 unsigned long timeout; 17069 int abort_count = 0; 17070 17071 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17072 vport->rcv_buffer_time_stamp); 17073 if (list_empty(&vport->rcv_buffer_list) || 17074 time_before(jiffies, timeout)) 17075 return; 17076 /* start with the oldest sequence on the rcv list */ 17077 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 17078 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17079 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 17080 dmabuf->time_stamp); 17081 if (time_before(jiffies, timeout)) 17082 break; 17083 abort_count++; 17084 list_del_init(&dmabuf->hbuf.list); 17085 list_for_each_entry_safe(d_buf, dnext, 17086 &dmabuf->dbuf.list, list) { 17087 list_del_init(&d_buf->list); 17088 lpfc_in_buf_free(vport->phba, d_buf); 17089 } 17090 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 17091 } 17092 if (abort_count) 17093 lpfc_update_rcv_time_stamp(vport); 17094 } 17095 17096 /** 17097 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 17098 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 17099 * 17100 * This function searches through the existing incomplete sequences that have 17101 * been sent to this @vport. If the frame matches one of the incomplete 17102 * sequences then the dbuf in the @dmabuf is added to the list of frames that 17103 * make up that sequence. If no sequence is found that matches this frame then 17104 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 17105 * This function returns a pointer to the first dmabuf in the sequence list that 17106 * the frame was linked to. 17107 **/ 17108 static struct hbq_dmabuf * 17109 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17110 { 17111 struct fc_frame_header *new_hdr; 17112 struct fc_frame_header *temp_hdr; 17113 struct lpfc_dmabuf *d_buf; 17114 struct lpfc_dmabuf *h_buf; 17115 struct hbq_dmabuf *seq_dmabuf = NULL; 17116 struct hbq_dmabuf *temp_dmabuf = NULL; 17117 uint8_t found = 0; 17118 17119 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17120 dmabuf->time_stamp = jiffies; 17121 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17122 17123 /* Use the hdr_buf to find the sequence that this frame belongs to */ 17124 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17125 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17126 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17127 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17128 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17129 continue; 17130 /* found a pending sequence that matches this frame */ 17131 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17132 break; 17133 } 17134 if (!seq_dmabuf) { 17135 /* 17136 * This indicates first frame received for this sequence. 17137 * Queue the buffer on the vport's rcv_buffer_list. 17138 */ 17139 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17140 lpfc_update_rcv_time_stamp(vport); 17141 return dmabuf; 17142 } 17143 temp_hdr = seq_dmabuf->hbuf.virt; 17144 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 17145 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17146 list_del_init(&seq_dmabuf->hbuf.list); 17147 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 17148 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17149 lpfc_update_rcv_time_stamp(vport); 17150 return dmabuf; 17151 } 17152 /* move this sequence to the tail to indicate a young sequence */ 17153 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 17154 seq_dmabuf->time_stamp = jiffies; 17155 lpfc_update_rcv_time_stamp(vport); 17156 if (list_empty(&seq_dmabuf->dbuf.list)) { 17157 temp_hdr = dmabuf->hbuf.virt; 17158 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 17159 return seq_dmabuf; 17160 } 17161 /* find the correct place in the sequence to insert this frame */ 17162 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); 17163 while (!found) { 17164 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17165 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 17166 /* 17167 * If the frame's sequence count is greater than the frame on 17168 * the list then insert the frame right after this frame 17169 */ 17170 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 17171 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 17172 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 17173 found = 1; 17174 break; 17175 } 17176 17177 if (&d_buf->list == &seq_dmabuf->dbuf.list) 17178 break; 17179 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); 17180 } 17181 17182 if (found) 17183 return seq_dmabuf; 17184 return NULL; 17185 } 17186 17187 /** 17188 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 17189 * @vport: pointer to a vitural port 17190 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17191 * 17192 * This function tries to abort from the partially assembed sequence, described 17193 * by the information from basic abbort @dmabuf. It checks to see whether such 17194 * partially assembled sequence held by the driver. If so, it shall free up all 17195 * the frames from the partially assembled sequence. 17196 * 17197 * Return 17198 * true -- if there is matching partially assembled sequence present and all 17199 * the frames freed with the sequence; 17200 * false -- if there is no matching partially assembled sequence present so 17201 * nothing got aborted in the lower layer driver 17202 **/ 17203 static bool 17204 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 17205 struct hbq_dmabuf *dmabuf) 17206 { 17207 struct fc_frame_header *new_hdr; 17208 struct fc_frame_header *temp_hdr; 17209 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 17210 struct hbq_dmabuf *seq_dmabuf = NULL; 17211 17212 /* Use the hdr_buf to find the sequence that matches this frame */ 17213 INIT_LIST_HEAD(&dmabuf->dbuf.list); 17214 INIT_LIST_HEAD(&dmabuf->hbuf.list); 17215 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17216 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 17217 temp_hdr = (struct fc_frame_header *)h_buf->virt; 17218 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 17219 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 17220 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 17221 continue; 17222 /* found a pending sequence that matches this frame */ 17223 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 17224 break; 17225 } 17226 17227 /* Free up all the frames from the partially assembled sequence */ 17228 if (seq_dmabuf) { 17229 list_for_each_entry_safe(d_buf, n_buf, 17230 &seq_dmabuf->dbuf.list, list) { 17231 list_del_init(&d_buf->list); 17232 lpfc_in_buf_free(vport->phba, d_buf); 17233 } 17234 return true; 17235 } 17236 return false; 17237 } 17238 17239 /** 17240 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 17241 * @vport: pointer to a vitural port 17242 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17243 * 17244 * This function tries to abort from the assembed sequence from upper level 17245 * protocol, described by the information from basic abbort @dmabuf. It 17246 * checks to see whether such pending context exists at upper level protocol. 17247 * If so, it shall clean up the pending context. 17248 * 17249 * Return 17250 * true -- if there is matching pending context of the sequence cleaned 17251 * at ulp; 17252 * false -- if there is no matching pending context of the sequence present 17253 * at ulp. 17254 **/ 17255 static bool 17256 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 17257 { 17258 struct lpfc_hba *phba = vport->phba; 17259 int handled; 17260 17261 /* Accepting abort at ulp with SLI4 only */ 17262 if (phba->sli_rev < LPFC_SLI_REV4) 17263 return false; 17264 17265 /* Register all caring upper level protocols to attend abort */ 17266 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 17267 if (handled) 17268 return true; 17269 17270 return false; 17271 } 17272 17273 /** 17274 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 17275 * @phba: Pointer to HBA context object. 17276 * @cmd_iocbq: pointer to the command iocbq structure. 17277 * @rsp_iocbq: pointer to the response iocbq structure. 17278 * 17279 * This function handles the sequence abort response iocb command complete 17280 * event. It properly releases the memory allocated to the sequence abort 17281 * accept iocb. 17282 **/ 17283 static void 17284 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 17285 struct lpfc_iocbq *cmd_iocbq, 17286 struct lpfc_iocbq *rsp_iocbq) 17287 { 17288 struct lpfc_nodelist *ndlp; 17289 17290 if (cmd_iocbq) { 17291 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; 17292 lpfc_nlp_put(ndlp); 17293 lpfc_nlp_not_used(ndlp); 17294 lpfc_sli_release_iocbq(phba, cmd_iocbq); 17295 } 17296 17297 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 17298 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 17299 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17300 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 17301 rsp_iocbq->iocb.ulpStatus, 17302 rsp_iocbq->iocb.un.ulpWord[4]); 17303 } 17304 17305 /** 17306 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 17307 * @phba: Pointer to HBA context object. 17308 * @xri: xri id in transaction. 17309 * 17310 * This function validates the xri maps to the known range of XRIs allocated an 17311 * used by the driver. 17312 **/ 17313 uint16_t 17314 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 17315 uint16_t xri) 17316 { 17317 uint16_t i; 17318 17319 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 17320 if (xri == phba->sli4_hba.xri_ids[i]) 17321 return i; 17322 } 17323 return NO_XRI; 17324 } 17325 17326 /** 17327 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 17328 * @phba: Pointer to HBA context object. 17329 * @fc_hdr: pointer to a FC frame header. 17330 * 17331 * This function sends a basic response to a previous unsol sequence abort 17332 * event after aborting the sequence handling. 17333 **/ 17334 void 17335 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 17336 struct fc_frame_header *fc_hdr, bool aborted) 17337 { 17338 struct lpfc_hba *phba = vport->phba; 17339 struct lpfc_iocbq *ctiocb = NULL; 17340 struct lpfc_nodelist *ndlp; 17341 uint16_t oxid, rxid, xri, lxri; 17342 uint32_t sid, fctl; 17343 IOCB_t *icmd; 17344 int rc; 17345 17346 if (!lpfc_is_link_up(phba)) 17347 return; 17348 17349 sid = sli4_sid_from_fc_hdr(fc_hdr); 17350 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 17351 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 17352 17353 ndlp = lpfc_findnode_did(vport, sid); 17354 if (!ndlp) { 17355 ndlp = lpfc_nlp_init(vport, sid); 17356 if (!ndlp) { 17357 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17358 "1268 Failed to allocate ndlp for " 17359 "oxid:x%x SID:x%x\n", oxid, sid); 17360 return; 17361 } 17362 /* Put ndlp onto pport node list */ 17363 lpfc_enqueue_node(vport, ndlp); 17364 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 17365 /* re-setup ndlp without removing from node list */ 17366 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 17367 if (!ndlp) { 17368 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 17369 "3275 Failed to active ndlp found " 17370 "for oxid:x%x SID:x%x\n", oxid, sid); 17371 return; 17372 } 17373 } 17374 17375 /* Allocate buffer for rsp iocb */ 17376 ctiocb = lpfc_sli_get_iocbq(phba); 17377 if (!ctiocb) 17378 return; 17379 17380 /* Extract the F_CTL field from FC_HDR */ 17381 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 17382 17383 icmd = &ctiocb->iocb; 17384 icmd->un.xseq64.bdl.bdeSize = 0; 17385 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 17386 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 17387 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 17388 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 17389 17390 /* Fill in the rest of iocb fields */ 17391 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 17392 icmd->ulpBdeCount = 0; 17393 icmd->ulpLe = 1; 17394 icmd->ulpClass = CLASS3; 17395 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 17396 ctiocb->context1 = lpfc_nlp_get(ndlp); 17397 17398 ctiocb->iocb_cmpl = NULL; 17399 ctiocb->vport = phba->pport; 17400 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 17401 ctiocb->sli4_lxritag = NO_XRI; 17402 ctiocb->sli4_xritag = NO_XRI; 17403 17404 if (fctl & FC_FC_EX_CTX) 17405 /* Exchange responder sent the abort so we 17406 * own the oxid. 17407 */ 17408 xri = oxid; 17409 else 17410 xri = rxid; 17411 lxri = lpfc_sli4_xri_inrange(phba, xri); 17412 if (lxri != NO_XRI) 17413 lpfc_set_rrq_active(phba, ndlp, lxri, 17414 (xri == oxid) ? rxid : oxid, 0); 17415 /* For BA_ABTS from exchange responder, if the logical xri with 17416 * the oxid maps to the FCP XRI range, the port no longer has 17417 * that exchange context, send a BLS_RJT. Override the IOCB for 17418 * a BA_RJT. 17419 */ 17420 if ((fctl & FC_FC_EX_CTX) && 17421 (lxri > lpfc_sli4_get_iocb_cnt(phba))) { 17422 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17423 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17424 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17425 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17426 } 17427 17428 /* If BA_ABTS failed to abort a partially assembled receive sequence, 17429 * the driver no longer has that exchange, send a BLS_RJT. Override 17430 * the IOCB for a BA_RJT. 17431 */ 17432 if (aborted == false) { 17433 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 17434 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 17435 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 17436 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 17437 } 17438 17439 if (fctl & FC_FC_EX_CTX) { 17440 /* ABTS sent by responder to CT exchange, construction 17441 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 17442 * field and RX_ID from ABTS for RX_ID field. 17443 */ 17444 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 17445 } else { 17446 /* ABTS sent by initiator to CT exchange, construction 17447 * of BA_ACC will need to allocate a new XRI as for the 17448 * XRI_TAG field. 17449 */ 17450 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 17451 } 17452 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 17453 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 17454 17455 /* Xmit CT abts response on exchange <xid> */ 17456 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 17457 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 17458 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 17459 17460 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 17461 if (rc == IOCB_ERROR) { 17462 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 17463 "2925 Failed to issue CT ABTS RSP x%x on " 17464 "xri x%x, Data x%x\n", 17465 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 17466 phba->link_state); 17467 lpfc_nlp_put(ndlp); 17468 ctiocb->context1 = NULL; 17469 lpfc_sli_release_iocbq(phba, ctiocb); 17470 } 17471 } 17472 17473 /** 17474 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 17475 * @vport: Pointer to the vport on which this sequence was received 17476 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17477 * 17478 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 17479 * receive sequence is only partially assembed by the driver, it shall abort 17480 * the partially assembled frames for the sequence. Otherwise, if the 17481 * unsolicited receive sequence has been completely assembled and passed to 17482 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 17483 * unsolicited sequence has been aborted. After that, it will issue a basic 17484 * accept to accept the abort. 17485 **/ 17486 static void 17487 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 17488 struct hbq_dmabuf *dmabuf) 17489 { 17490 struct lpfc_hba *phba = vport->phba; 17491 struct fc_frame_header fc_hdr; 17492 uint32_t fctl; 17493 bool aborted; 17494 17495 /* Make a copy of fc_hdr before the dmabuf being released */ 17496 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 17497 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 17498 17499 if (fctl & FC_FC_EX_CTX) { 17500 /* ABTS by responder to exchange, no cleanup needed */ 17501 aborted = true; 17502 } else { 17503 /* ABTS by initiator to exchange, need to do cleanup */ 17504 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 17505 if (aborted == false) 17506 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 17507 } 17508 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17509 17510 if (phba->nvmet_support) { 17511 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); 17512 return; 17513 } 17514 17515 /* Respond with BA_ACC or BA_RJT accordingly */ 17516 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 17517 } 17518 17519 /** 17520 * lpfc_seq_complete - Indicates if a sequence is complete 17521 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17522 * 17523 * This function checks the sequence, starting with the frame described by 17524 * @dmabuf, to see if all the frames associated with this sequence are present. 17525 * the frames associated with this sequence are linked to the @dmabuf using the 17526 * dbuf list. This function looks for two major things. 1) That the first frame 17527 * has a sequence count of zero. 2) There is a frame with last frame of sequence 17528 * set. 3) That there are no holes in the sequence count. The function will 17529 * return 1 when the sequence is complete, otherwise it will return 0. 17530 **/ 17531 static int 17532 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 17533 { 17534 struct fc_frame_header *hdr; 17535 struct lpfc_dmabuf *d_buf; 17536 struct hbq_dmabuf *seq_dmabuf; 17537 uint32_t fctl; 17538 int seq_count = 0; 17539 17540 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17541 /* make sure first fame of sequence has a sequence count of zero */ 17542 if (hdr->fh_seq_cnt != seq_count) 17543 return 0; 17544 fctl = (hdr->fh_f_ctl[0] << 16 | 17545 hdr->fh_f_ctl[1] << 8 | 17546 hdr->fh_f_ctl[2]); 17547 /* If last frame of sequence we can return success. */ 17548 if (fctl & FC_FC_END_SEQ) 17549 return 1; 17550 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 17551 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17552 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17553 /* If there is a hole in the sequence count then fail. */ 17554 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 17555 return 0; 17556 fctl = (hdr->fh_f_ctl[0] << 16 | 17557 hdr->fh_f_ctl[1] << 8 | 17558 hdr->fh_f_ctl[2]); 17559 /* If last frame of sequence we can return success. */ 17560 if (fctl & FC_FC_END_SEQ) 17561 return 1; 17562 } 17563 return 0; 17564 } 17565 17566 /** 17567 * lpfc_prep_seq - Prep sequence for ULP processing 17568 * @vport: Pointer to the vport on which this sequence was received 17569 * @dmabuf: pointer to a dmabuf that describes the FC sequence 17570 * 17571 * This function takes a sequence, described by a list of frames, and creates 17572 * a list of iocbq structures to describe the sequence. This iocbq list will be 17573 * used to issue to the generic unsolicited sequence handler. This routine 17574 * returns a pointer to the first iocbq in the list. If the function is unable 17575 * to allocate an iocbq then it throw out the received frames that were not 17576 * able to be described and return a pointer to the first iocbq. If unable to 17577 * allocate any iocbqs (including the first) this function will return NULL. 17578 **/ 17579 static struct lpfc_iocbq * 17580 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 17581 { 17582 struct hbq_dmabuf *hbq_buf; 17583 struct lpfc_dmabuf *d_buf, *n_buf; 17584 struct lpfc_iocbq *first_iocbq, *iocbq; 17585 struct fc_frame_header *fc_hdr; 17586 uint32_t sid; 17587 uint32_t len, tot_len; 17588 struct ulp_bde64 *pbde; 17589 17590 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17591 /* remove from receive buffer list */ 17592 list_del_init(&seq_dmabuf->hbuf.list); 17593 lpfc_update_rcv_time_stamp(vport); 17594 /* get the Remote Port's SID */ 17595 sid = sli4_sid_from_fc_hdr(fc_hdr); 17596 tot_len = 0; 17597 /* Get an iocbq struct to fill in. */ 17598 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 17599 if (first_iocbq) { 17600 /* Initialize the first IOCB. */ 17601 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 17602 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 17603 first_iocbq->vport = vport; 17604 17605 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 17606 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 17607 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 17608 first_iocbq->iocb.un.rcvels.parmRo = 17609 sli4_did_from_fc_hdr(fc_hdr); 17610 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 17611 } else 17612 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 17613 first_iocbq->iocb.ulpContext = NO_XRI; 17614 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 17615 be16_to_cpu(fc_hdr->fh_ox_id); 17616 /* iocbq is prepped for internal consumption. Physical vpi. */ 17617 first_iocbq->iocb.unsli3.rcvsli3.vpi = 17618 vport->phba->vpi_ids[vport->vpi]; 17619 /* put the first buffer into the first IOCBq */ 17620 tot_len = bf_get(lpfc_rcqe_length, 17621 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 17622 17623 first_iocbq->context2 = &seq_dmabuf->dbuf; 17624 first_iocbq->context3 = NULL; 17625 first_iocbq->iocb.ulpBdeCount = 1; 17626 if (tot_len > LPFC_DATA_BUF_SIZE) 17627 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17628 LPFC_DATA_BUF_SIZE; 17629 else 17630 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; 17631 17632 first_iocbq->iocb.un.rcvels.remoteID = sid; 17633 17634 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17635 } 17636 iocbq = first_iocbq; 17637 /* 17638 * Each IOCBq can have two Buffers assigned, so go through the list 17639 * of buffers for this sequence and save two buffers in each IOCBq 17640 */ 17641 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 17642 if (!iocbq) { 17643 lpfc_in_buf_free(vport->phba, d_buf); 17644 continue; 17645 } 17646 if (!iocbq->context3) { 17647 iocbq->context3 = d_buf; 17648 iocbq->iocb.ulpBdeCount++; 17649 /* We need to get the size out of the right CQE */ 17650 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17651 len = bf_get(lpfc_rcqe_length, 17652 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17653 pbde = (struct ulp_bde64 *) 17654 &iocbq->iocb.unsli3.sli3Words[4]; 17655 if (len > LPFC_DATA_BUF_SIZE) 17656 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 17657 else 17658 pbde->tus.f.bdeSize = len; 17659 17660 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 17661 tot_len += len; 17662 } else { 17663 iocbq = lpfc_sli_get_iocbq(vport->phba); 17664 if (!iocbq) { 17665 if (first_iocbq) { 17666 first_iocbq->iocb.ulpStatus = 17667 IOSTAT_FCP_RSP_ERROR; 17668 first_iocbq->iocb.un.ulpWord[4] = 17669 IOERR_NO_RESOURCES; 17670 } 17671 lpfc_in_buf_free(vport->phba, d_buf); 17672 continue; 17673 } 17674 /* We need to get the size out of the right CQE */ 17675 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17676 len = bf_get(lpfc_rcqe_length, 17677 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17678 iocbq->context2 = d_buf; 17679 iocbq->context3 = NULL; 17680 iocbq->iocb.ulpBdeCount = 1; 17681 if (len > LPFC_DATA_BUF_SIZE) 17682 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17683 LPFC_DATA_BUF_SIZE; 17684 else 17685 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; 17686 17687 tot_len += len; 17688 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17689 17690 iocbq->iocb.un.rcvels.remoteID = sid; 17691 list_add_tail(&iocbq->list, &first_iocbq->list); 17692 } 17693 } 17694 return first_iocbq; 17695 } 17696 17697 static void 17698 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 17699 struct hbq_dmabuf *seq_dmabuf) 17700 { 17701 struct fc_frame_header *fc_hdr; 17702 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 17703 struct lpfc_hba *phba = vport->phba; 17704 17705 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17706 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 17707 if (!iocbq) { 17708 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17709 "2707 Ring %d handler: Failed to allocate " 17710 "iocb Rctl x%x Type x%x received\n", 17711 LPFC_ELS_RING, 17712 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17713 return; 17714 } 17715 if (!lpfc_complete_unsol_iocb(phba, 17716 phba->sli4_hba.els_wq->pring, 17717 iocbq, fc_hdr->fh_r_ctl, 17718 fc_hdr->fh_type)) 17719 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17720 "2540 Ring %d handler: unexpected Rctl " 17721 "x%x Type x%x received\n", 17722 LPFC_ELS_RING, 17723 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17724 17725 /* Free iocb created in lpfc_prep_seq */ 17726 list_for_each_entry_safe(curr_iocb, next_iocb, 17727 &iocbq->list, list) { 17728 list_del_init(&curr_iocb->list); 17729 lpfc_sli_release_iocbq(phba, curr_iocb); 17730 } 17731 lpfc_sli_release_iocbq(phba, iocbq); 17732 } 17733 17734 static void 17735 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 17736 struct lpfc_iocbq *rspiocb) 17737 { 17738 struct lpfc_dmabuf *pcmd = cmdiocb->context2; 17739 17740 if (pcmd && pcmd->virt) 17741 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17742 kfree(pcmd); 17743 lpfc_sli_release_iocbq(phba, cmdiocb); 17744 } 17745 17746 static void 17747 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 17748 struct hbq_dmabuf *dmabuf) 17749 { 17750 struct fc_frame_header *fc_hdr; 17751 struct lpfc_hba *phba = vport->phba; 17752 struct lpfc_iocbq *iocbq = NULL; 17753 union lpfc_wqe *wqe; 17754 struct lpfc_dmabuf *pcmd = NULL; 17755 uint32_t frame_len; 17756 int rc; 17757 17758 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17759 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); 17760 17761 /* Send the received frame back */ 17762 iocbq = lpfc_sli_get_iocbq(phba); 17763 if (!iocbq) 17764 goto exit; 17765 17766 /* Allocate buffer for command payload */ 17767 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 17768 if (pcmd) 17769 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 17770 &pcmd->phys); 17771 if (!pcmd || !pcmd->virt) 17772 goto exit; 17773 17774 INIT_LIST_HEAD(&pcmd->list); 17775 17776 /* copyin the payload */ 17777 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); 17778 17779 /* fill in BDE's for command */ 17780 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys); 17781 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys); 17782 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 17783 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len; 17784 17785 iocbq->context2 = pcmd; 17786 iocbq->vport = vport; 17787 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 17788 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 17789 17790 /* 17791 * Setup rest of the iocb as though it were a WQE 17792 * Build the SEND_FRAME WQE 17793 */ 17794 wqe = (union lpfc_wqe *)&iocbq->iocb; 17795 17796 wqe->send_frame.frame_len = frame_len; 17797 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr)); 17798 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1)); 17799 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2)); 17800 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3)); 17801 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4)); 17802 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5)); 17803 17804 iocbq->iocb.ulpCommand = CMD_SEND_FRAME; 17805 iocbq->iocb.ulpLe = 1; 17806 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl; 17807 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); 17808 if (rc == IOCB_ERROR) 17809 goto exit; 17810 17811 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17812 return; 17813 17814 exit: 17815 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 17816 "2023 Unable to process MDS loopback frame\n"); 17817 if (pcmd && pcmd->virt) 17818 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17819 kfree(pcmd); 17820 if (iocbq) 17821 lpfc_sli_release_iocbq(phba, iocbq); 17822 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17823 } 17824 17825 /** 17826 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 17827 * @phba: Pointer to HBA context object. 17828 * 17829 * This function is called with no lock held. This function processes all 17830 * the received buffers and gives it to upper layers when a received buffer 17831 * indicates that it is the final frame in the sequence. The interrupt 17832 * service routine processes received buffers at interrupt contexts. 17833 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 17834 * appropriate receive function when the final frame in a sequence is received. 17835 **/ 17836 void 17837 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 17838 struct hbq_dmabuf *dmabuf) 17839 { 17840 struct hbq_dmabuf *seq_dmabuf; 17841 struct fc_frame_header *fc_hdr; 17842 struct lpfc_vport *vport; 17843 uint32_t fcfi; 17844 uint32_t did; 17845 17846 /* Process each received buffer */ 17847 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17848 17849 /* check to see if this a valid type of frame */ 17850 if (lpfc_fc_frame_check(phba, fc_hdr)) { 17851 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17852 return; 17853 } 17854 17855 if ((bf_get(lpfc_cqe_code, 17856 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 17857 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 17858 &dmabuf->cq_event.cqe.rcqe_cmpl); 17859 else 17860 fcfi = bf_get(lpfc_rcqe_fcf_id, 17861 &dmabuf->cq_event.cqe.rcqe_cmpl); 17862 17863 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { 17864 vport = phba->pport; 17865 /* Handle MDS Loopback frames */ 17866 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 17867 return; 17868 } 17869 17870 /* d_id this frame is directed to */ 17871 did = sli4_did_from_fc_hdr(fc_hdr); 17872 17873 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); 17874 if (!vport) { 17875 /* throw out the frame */ 17876 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17877 return; 17878 } 17879 17880 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 17881 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 17882 (did != Fabric_DID)) { 17883 /* 17884 * Throw out the frame if we are not pt2pt. 17885 * The pt2pt protocol allows for discovery frames 17886 * to be received without a registered VPI. 17887 */ 17888 if (!(vport->fc_flag & FC_PT2PT) || 17889 (phba->link_state == LPFC_HBA_READY)) { 17890 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17891 return; 17892 } 17893 } 17894 17895 /* Handle the basic abort sequence (BA_ABTS) event */ 17896 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 17897 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 17898 return; 17899 } 17900 17901 /* Link this frame */ 17902 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 17903 if (!seq_dmabuf) { 17904 /* unable to add frame to vport - throw it out */ 17905 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17906 return; 17907 } 17908 /* If not last frame in sequence continue processing frames. */ 17909 if (!lpfc_seq_complete(seq_dmabuf)) 17910 return; 17911 17912 /* Send the complete sequence to the upper layer protocol */ 17913 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 17914 } 17915 17916 /** 17917 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 17918 * @phba: pointer to lpfc hba data structure. 17919 * 17920 * This routine is invoked to post rpi header templates to the 17921 * HBA consistent with the SLI-4 interface spec. This routine 17922 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 17923 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 17924 * 17925 * This routine does not require any locks. It's usage is expected 17926 * to be driver load or reset recovery when the driver is 17927 * sequential. 17928 * 17929 * Return codes 17930 * 0 - successful 17931 * -EIO - The mailbox failed to complete successfully. 17932 * When this error occurs, the driver is not guaranteed 17933 * to have any rpi regions posted to the device and 17934 * must either attempt to repost the regions or take a 17935 * fatal error. 17936 **/ 17937 int 17938 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 17939 { 17940 struct lpfc_rpi_hdr *rpi_page; 17941 uint32_t rc = 0; 17942 uint16_t lrpi = 0; 17943 17944 /* SLI4 ports that support extents do not require RPI headers. */ 17945 if (!phba->sli4_hba.rpi_hdrs_in_use) 17946 goto exit; 17947 if (phba->sli4_hba.extents_in_use) 17948 return -EIO; 17949 17950 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 17951 /* 17952 * Assign the rpi headers a physical rpi only if the driver 17953 * has not initialized those resources. A port reset only 17954 * needs the headers posted. 17955 */ 17956 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 17957 LPFC_RPI_RSRC_RDY) 17958 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 17959 17960 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 17961 if (rc != MBX_SUCCESS) { 17962 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17963 "2008 Error %d posting all rpi " 17964 "headers\n", rc); 17965 rc = -EIO; 17966 break; 17967 } 17968 } 17969 17970 exit: 17971 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 17972 LPFC_RPI_RSRC_RDY); 17973 return rc; 17974 } 17975 17976 /** 17977 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 17978 * @phba: pointer to lpfc hba data structure. 17979 * @rpi_page: pointer to the rpi memory region. 17980 * 17981 * This routine is invoked to post a single rpi header to the 17982 * HBA consistent with the SLI-4 interface spec. This memory region 17983 * maps up to 64 rpi context regions. 17984 * 17985 * Return codes 17986 * 0 - successful 17987 * -ENOMEM - No available memory 17988 * -EIO - The mailbox failed to complete successfully. 17989 **/ 17990 int 17991 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 17992 { 17993 LPFC_MBOXQ_t *mboxq; 17994 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 17995 uint32_t rc = 0; 17996 uint32_t shdr_status, shdr_add_status; 17997 union lpfc_sli4_cfg_shdr *shdr; 17998 17999 /* SLI4 ports that support extents do not require RPI headers. */ 18000 if (!phba->sli4_hba.rpi_hdrs_in_use) 18001 return rc; 18002 if (phba->sli4_hba.extents_in_use) 18003 return -EIO; 18004 18005 /* The port is notified of the header region via a mailbox command. */ 18006 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18007 if (!mboxq) { 18008 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18009 "2001 Unable to allocate memory for issuing " 18010 "SLI_CONFIG_SPECIAL mailbox command\n"); 18011 return -ENOMEM; 18012 } 18013 18014 /* Post all rpi memory regions to the port. */ 18015 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 18016 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18017 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 18018 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 18019 sizeof(struct lpfc_sli4_cfg_mhdr), 18020 LPFC_SLI4_MBX_EMBED); 18021 18022 18023 /* Post the physical rpi to the port for this rpi header. */ 18024 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 18025 rpi_page->start_rpi); 18026 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 18027 hdr_tmpl, rpi_page->page_count); 18028 18029 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 18030 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 18031 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 18032 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 18033 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18034 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18035 if (rc != MBX_TIMEOUT) 18036 mempool_free(mboxq, phba->mbox_mem_pool); 18037 if (shdr_status || shdr_add_status || rc) { 18038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18039 "2514 POST_RPI_HDR mailbox failed with " 18040 "status x%x add_status x%x, mbx status x%x\n", 18041 shdr_status, shdr_add_status, rc); 18042 rc = -ENXIO; 18043 } else { 18044 /* 18045 * The next_rpi stores the next logical module-64 rpi value used 18046 * to post physical rpis in subsequent rpi postings. 18047 */ 18048 spin_lock_irq(&phba->hbalock); 18049 phba->sli4_hba.next_rpi = rpi_page->next_rpi; 18050 spin_unlock_irq(&phba->hbalock); 18051 } 18052 return rc; 18053 } 18054 18055 /** 18056 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 18057 * @phba: pointer to lpfc hba data structure. 18058 * 18059 * This routine is invoked to post rpi header templates to the 18060 * HBA consistent with the SLI-4 interface spec. This routine 18061 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 18062 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 18063 * 18064 * Returns 18065 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 18066 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 18067 **/ 18068 int 18069 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 18070 { 18071 unsigned long rpi; 18072 uint16_t max_rpi, rpi_limit; 18073 uint16_t rpi_remaining, lrpi = 0; 18074 struct lpfc_rpi_hdr *rpi_hdr; 18075 unsigned long iflag; 18076 18077 /* 18078 * Fetch the next logical rpi. Because this index is logical, 18079 * the driver starts at 0 each time. 18080 */ 18081 spin_lock_irqsave(&phba->hbalock, iflag); 18082 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 18083 rpi_limit = phba->sli4_hba.next_rpi; 18084 18085 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 18086 if (rpi >= rpi_limit) 18087 rpi = LPFC_RPI_ALLOC_ERROR; 18088 else { 18089 set_bit(rpi, phba->sli4_hba.rpi_bmask); 18090 phba->sli4_hba.max_cfg_param.rpi_used++; 18091 phba->sli4_hba.rpi_count++; 18092 } 18093 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 18094 "0001 rpi:%x max:%x lim:%x\n", 18095 (int) rpi, max_rpi, rpi_limit); 18096 18097 /* 18098 * Don't try to allocate more rpi header regions if the device limit 18099 * has been exhausted. 18100 */ 18101 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 18102 (phba->sli4_hba.rpi_count >= max_rpi)) { 18103 spin_unlock_irqrestore(&phba->hbalock, iflag); 18104 return rpi; 18105 } 18106 18107 /* 18108 * RPI header postings are not required for SLI4 ports capable of 18109 * extents. 18110 */ 18111 if (!phba->sli4_hba.rpi_hdrs_in_use) { 18112 spin_unlock_irqrestore(&phba->hbalock, iflag); 18113 return rpi; 18114 } 18115 18116 /* 18117 * If the driver is running low on rpi resources, allocate another 18118 * page now. Note that the next_rpi value is used because 18119 * it represents how many are actually in use whereas max_rpi notes 18120 * how many are supported max by the device. 18121 */ 18122 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 18123 spin_unlock_irqrestore(&phba->hbalock, iflag); 18124 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 18125 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 18126 if (!rpi_hdr) { 18127 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18128 "2002 Error Could not grow rpi " 18129 "count\n"); 18130 } else { 18131 lrpi = rpi_hdr->start_rpi; 18132 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 18133 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 18134 } 18135 } 18136 18137 return rpi; 18138 } 18139 18140 /** 18141 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18142 * @phba: pointer to lpfc hba data structure. 18143 * 18144 * This routine is invoked to release an rpi to the pool of 18145 * available rpis maintained by the driver. 18146 **/ 18147 static void 18148 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18149 { 18150 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 18151 phba->sli4_hba.rpi_count--; 18152 phba->sli4_hba.max_cfg_param.rpi_used--; 18153 } 18154 } 18155 18156 /** 18157 * lpfc_sli4_free_rpi - Release an rpi for reuse. 18158 * @phba: pointer to lpfc hba data structure. 18159 * 18160 * This routine is invoked to release an rpi to the pool of 18161 * available rpis maintained by the driver. 18162 **/ 18163 void 18164 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 18165 { 18166 spin_lock_irq(&phba->hbalock); 18167 __lpfc_sli4_free_rpi(phba, rpi); 18168 spin_unlock_irq(&phba->hbalock); 18169 } 18170 18171 /** 18172 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 18173 * @phba: pointer to lpfc hba data structure. 18174 * 18175 * This routine is invoked to remove the memory region that 18176 * provided rpi via a bitmask. 18177 **/ 18178 void 18179 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 18180 { 18181 kfree(phba->sli4_hba.rpi_bmask); 18182 kfree(phba->sli4_hba.rpi_ids); 18183 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 18184 } 18185 18186 /** 18187 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 18188 * @phba: pointer to lpfc hba data structure. 18189 * 18190 * This routine is invoked to remove the memory region that 18191 * provided rpi via a bitmask. 18192 **/ 18193 int 18194 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 18195 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 18196 { 18197 LPFC_MBOXQ_t *mboxq; 18198 struct lpfc_hba *phba = ndlp->phba; 18199 int rc; 18200 18201 /* The port is notified of the header region via a mailbox command. */ 18202 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18203 if (!mboxq) 18204 return -ENOMEM; 18205 18206 /* Post all rpi memory regions to the port. */ 18207 lpfc_resume_rpi(mboxq, ndlp); 18208 if (cmpl) { 18209 mboxq->mbox_cmpl = cmpl; 18210 mboxq->context1 = arg; 18211 mboxq->context2 = ndlp; 18212 } else 18213 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18214 mboxq->vport = ndlp->vport; 18215 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18216 if (rc == MBX_NOT_FINISHED) { 18217 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18218 "2010 Resume RPI Mailbox failed " 18219 "status %d, mbxStatus x%x\n", rc, 18220 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18221 mempool_free(mboxq, phba->mbox_mem_pool); 18222 return -EIO; 18223 } 18224 return 0; 18225 } 18226 18227 /** 18228 * lpfc_sli4_init_vpi - Initialize a vpi with the port 18229 * @vport: Pointer to the vport for which the vpi is being initialized 18230 * 18231 * This routine is invoked to activate a vpi with the port. 18232 * 18233 * Returns: 18234 * 0 success 18235 * -Evalue otherwise 18236 **/ 18237 int 18238 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 18239 { 18240 LPFC_MBOXQ_t *mboxq; 18241 int rc = 0; 18242 int retval = MBX_SUCCESS; 18243 uint32_t mbox_tmo; 18244 struct lpfc_hba *phba = vport->phba; 18245 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18246 if (!mboxq) 18247 return -ENOMEM; 18248 lpfc_init_vpi(phba, mboxq, vport->vpi); 18249 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 18250 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 18251 if (rc != MBX_SUCCESS) { 18252 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 18253 "2022 INIT VPI Mailbox failed " 18254 "status %d, mbxStatus x%x\n", rc, 18255 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 18256 retval = -EIO; 18257 } 18258 if (rc != MBX_TIMEOUT) 18259 mempool_free(mboxq, vport->phba->mbox_mem_pool); 18260 18261 return retval; 18262 } 18263 18264 /** 18265 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 18266 * @phba: pointer to lpfc hba data structure. 18267 * @mboxq: Pointer to mailbox object. 18268 * 18269 * This routine is invoked to manually add a single FCF record. The caller 18270 * must pass a completely initialized FCF_Record. This routine takes 18271 * care of the nonembedded mailbox operations. 18272 **/ 18273 static void 18274 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 18275 { 18276 void *virt_addr; 18277 union lpfc_sli4_cfg_shdr *shdr; 18278 uint32_t shdr_status, shdr_add_status; 18279 18280 virt_addr = mboxq->sge_array->addr[0]; 18281 /* The IOCTL status is embedded in the mailbox subheader. */ 18282 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 18283 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18284 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18285 18286 if ((shdr_status || shdr_add_status) && 18287 (shdr_status != STATUS_FCF_IN_USE)) 18288 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18289 "2558 ADD_FCF_RECORD mailbox failed with " 18290 "status x%x add_status x%x\n", 18291 shdr_status, shdr_add_status); 18292 18293 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18294 } 18295 18296 /** 18297 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 18298 * @phba: pointer to lpfc hba data structure. 18299 * @fcf_record: pointer to the initialized fcf record to add. 18300 * 18301 * This routine is invoked to manually add a single FCF record. The caller 18302 * must pass a completely initialized FCF_Record. This routine takes 18303 * care of the nonembedded mailbox operations. 18304 **/ 18305 int 18306 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 18307 { 18308 int rc = 0; 18309 LPFC_MBOXQ_t *mboxq; 18310 uint8_t *bytep; 18311 void *virt_addr; 18312 struct lpfc_mbx_sge sge; 18313 uint32_t alloc_len, req_len; 18314 uint32_t fcfindex; 18315 18316 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18317 if (!mboxq) { 18318 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18319 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 18320 return -ENOMEM; 18321 } 18322 18323 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 18324 sizeof(uint32_t); 18325 18326 /* Allocate DMA memory and set up the non-embedded mailbox command */ 18327 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 18328 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 18329 req_len, LPFC_SLI4_MBX_NEMBED); 18330 if (alloc_len < req_len) { 18331 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18332 "2523 Allocated DMA memory size (x%x) is " 18333 "less than the requested DMA memory " 18334 "size (x%x)\n", alloc_len, req_len); 18335 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18336 return -ENOMEM; 18337 } 18338 18339 /* 18340 * Get the first SGE entry from the non-embedded DMA memory. This 18341 * routine only uses a single SGE. 18342 */ 18343 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 18344 virt_addr = mboxq->sge_array->addr[0]; 18345 /* 18346 * Configure the FCF record for FCFI 0. This is the driver's 18347 * hardcoded default and gets used in nonFIP mode. 18348 */ 18349 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 18350 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 18351 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 18352 18353 /* 18354 * Copy the fcf_index and the FCF Record Data. The data starts after 18355 * the FCoE header plus word10. The data copy needs to be endian 18356 * correct. 18357 */ 18358 bytep += sizeof(uint32_t); 18359 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 18360 mboxq->vport = phba->pport; 18361 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 18362 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18363 if (rc == MBX_NOT_FINISHED) { 18364 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18365 "2515 ADD_FCF_RECORD mailbox failed with " 18366 "status 0x%x\n", rc); 18367 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18368 rc = -EIO; 18369 } else 18370 rc = 0; 18371 18372 return rc; 18373 } 18374 18375 /** 18376 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 18377 * @phba: pointer to lpfc hba data structure. 18378 * @fcf_record: pointer to the fcf record to write the default data. 18379 * @fcf_index: FCF table entry index. 18380 * 18381 * This routine is invoked to build the driver's default FCF record. The 18382 * values used are hardcoded. This routine handles memory initialization. 18383 * 18384 **/ 18385 void 18386 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 18387 struct fcf_record *fcf_record, 18388 uint16_t fcf_index) 18389 { 18390 memset(fcf_record, 0, sizeof(struct fcf_record)); 18391 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 18392 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 18393 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 18394 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 18395 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 18396 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 18397 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 18398 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 18399 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 18400 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 18401 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 18402 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 18403 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 18404 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 18405 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 18406 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 18407 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 18408 /* Set the VLAN bit map */ 18409 if (phba->valid_vlan) { 18410 fcf_record->vlan_bitmap[phba->vlan_id / 8] 18411 = 1 << (phba->vlan_id % 8); 18412 } 18413 } 18414 18415 /** 18416 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 18417 * @phba: pointer to lpfc hba data structure. 18418 * @fcf_index: FCF table entry offset. 18419 * 18420 * This routine is invoked to scan the entire FCF table by reading FCF 18421 * record and processing it one at a time starting from the @fcf_index 18422 * for initial FCF discovery or fast FCF failover rediscovery. 18423 * 18424 * Return 0 if the mailbox command is submitted successfully, none 0 18425 * otherwise. 18426 **/ 18427 int 18428 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18429 { 18430 int rc = 0, error; 18431 LPFC_MBOXQ_t *mboxq; 18432 18433 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 18434 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 18435 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18436 if (!mboxq) { 18437 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18438 "2000 Failed to allocate mbox for " 18439 "READ_FCF cmd\n"); 18440 error = -ENOMEM; 18441 goto fail_fcf_scan; 18442 } 18443 /* Construct the read FCF record mailbox command */ 18444 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18445 if (rc) { 18446 error = -EINVAL; 18447 goto fail_fcf_scan; 18448 } 18449 /* Issue the mailbox command asynchronously */ 18450 mboxq->vport = phba->pport; 18451 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 18452 18453 spin_lock_irq(&phba->hbalock); 18454 phba->hba_flag |= FCF_TS_INPROG; 18455 spin_unlock_irq(&phba->hbalock); 18456 18457 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18458 if (rc == MBX_NOT_FINISHED) 18459 error = -EIO; 18460 else { 18461 /* Reset eligible FCF count for new scan */ 18462 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 18463 phba->fcf.eligible_fcf_cnt = 0; 18464 error = 0; 18465 } 18466 fail_fcf_scan: 18467 if (error) { 18468 if (mboxq) 18469 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18470 /* FCF scan failed, clear FCF_TS_INPROG flag */ 18471 spin_lock_irq(&phba->hbalock); 18472 phba->hba_flag &= ~FCF_TS_INPROG; 18473 spin_unlock_irq(&phba->hbalock); 18474 } 18475 return error; 18476 } 18477 18478 /** 18479 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 18480 * @phba: pointer to lpfc hba data structure. 18481 * @fcf_index: FCF table entry offset. 18482 * 18483 * This routine is invoked to read an FCF record indicated by @fcf_index 18484 * and to use it for FLOGI roundrobin FCF failover. 18485 * 18486 * Return 0 if the mailbox command is submitted successfully, none 0 18487 * otherwise. 18488 **/ 18489 int 18490 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18491 { 18492 int rc = 0, error; 18493 LPFC_MBOXQ_t *mboxq; 18494 18495 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18496 if (!mboxq) { 18497 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18498 "2763 Failed to allocate mbox for " 18499 "READ_FCF cmd\n"); 18500 error = -ENOMEM; 18501 goto fail_fcf_read; 18502 } 18503 /* Construct the read FCF record mailbox command */ 18504 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18505 if (rc) { 18506 error = -EINVAL; 18507 goto fail_fcf_read; 18508 } 18509 /* Issue the mailbox command asynchronously */ 18510 mboxq->vport = phba->pport; 18511 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 18512 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18513 if (rc == MBX_NOT_FINISHED) 18514 error = -EIO; 18515 else 18516 error = 0; 18517 18518 fail_fcf_read: 18519 if (error && mboxq) 18520 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18521 return error; 18522 } 18523 18524 /** 18525 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 18526 * @phba: pointer to lpfc hba data structure. 18527 * @fcf_index: FCF table entry offset. 18528 * 18529 * This routine is invoked to read an FCF record indicated by @fcf_index to 18530 * determine whether it's eligible for FLOGI roundrobin failover list. 18531 * 18532 * Return 0 if the mailbox command is submitted successfully, none 0 18533 * otherwise. 18534 **/ 18535 int 18536 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 18537 { 18538 int rc = 0, error; 18539 LPFC_MBOXQ_t *mboxq; 18540 18541 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18542 if (!mboxq) { 18543 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 18544 "2758 Failed to allocate mbox for " 18545 "READ_FCF cmd\n"); 18546 error = -ENOMEM; 18547 goto fail_fcf_read; 18548 } 18549 /* Construct the read FCF record mailbox command */ 18550 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 18551 if (rc) { 18552 error = -EINVAL; 18553 goto fail_fcf_read; 18554 } 18555 /* Issue the mailbox command asynchronously */ 18556 mboxq->vport = phba->pport; 18557 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 18558 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 18559 if (rc == MBX_NOT_FINISHED) 18560 error = -EIO; 18561 else 18562 error = 0; 18563 18564 fail_fcf_read: 18565 if (error && mboxq) 18566 lpfc_sli4_mbox_cmd_free(phba, mboxq); 18567 return error; 18568 } 18569 18570 /** 18571 * lpfc_check_next_fcf_pri_level 18572 * phba pointer to the lpfc_hba struct for this port. 18573 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 18574 * routine when the rr_bmask is empty. The FCF indecies are put into the 18575 * rr_bmask based on their priority level. Starting from the highest priority 18576 * to the lowest. The most likely FCF candidate will be in the highest 18577 * priority group. When this routine is called it searches the fcf_pri list for 18578 * next lowest priority group and repopulates the rr_bmask with only those 18579 * fcf_indexes. 18580 * returns: 18581 * 1=success 0=failure 18582 **/ 18583 static int 18584 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 18585 { 18586 uint16_t next_fcf_pri; 18587 uint16_t last_index; 18588 struct lpfc_fcf_pri *fcf_pri; 18589 int rc; 18590 int ret = 0; 18591 18592 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 18593 LPFC_SLI4_FCF_TBL_INDX_MAX); 18594 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18595 "3060 Last IDX %d\n", last_index); 18596 18597 /* Verify the priority list has 2 or more entries */ 18598 spin_lock_irq(&phba->hbalock); 18599 if (list_empty(&phba->fcf.fcf_pri_list) || 18600 list_is_singular(&phba->fcf.fcf_pri_list)) { 18601 spin_unlock_irq(&phba->hbalock); 18602 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18603 "3061 Last IDX %d\n", last_index); 18604 return 0; /* Empty rr list */ 18605 } 18606 spin_unlock_irq(&phba->hbalock); 18607 18608 next_fcf_pri = 0; 18609 /* 18610 * Clear the rr_bmask and set all of the bits that are at this 18611 * priority. 18612 */ 18613 memset(phba->fcf.fcf_rr_bmask, 0, 18614 sizeof(*phba->fcf.fcf_rr_bmask)); 18615 spin_lock_irq(&phba->hbalock); 18616 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18617 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 18618 continue; 18619 /* 18620 * the 1st priority that has not FLOGI failed 18621 * will be the highest. 18622 */ 18623 if (!next_fcf_pri) 18624 next_fcf_pri = fcf_pri->fcf_rec.priority; 18625 spin_unlock_irq(&phba->hbalock); 18626 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18627 rc = lpfc_sli4_fcf_rr_index_set(phba, 18628 fcf_pri->fcf_rec.fcf_index); 18629 if (rc) 18630 return 0; 18631 } 18632 spin_lock_irq(&phba->hbalock); 18633 } 18634 /* 18635 * if next_fcf_pri was not set above and the list is not empty then 18636 * we have failed flogis on all of them. So reset flogi failed 18637 * and start at the beginning. 18638 */ 18639 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 18640 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18641 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 18642 /* 18643 * the 1st priority that has not FLOGI failed 18644 * will be the highest. 18645 */ 18646 if (!next_fcf_pri) 18647 next_fcf_pri = fcf_pri->fcf_rec.priority; 18648 spin_unlock_irq(&phba->hbalock); 18649 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18650 rc = lpfc_sli4_fcf_rr_index_set(phba, 18651 fcf_pri->fcf_rec.fcf_index); 18652 if (rc) 18653 return 0; 18654 } 18655 spin_lock_irq(&phba->hbalock); 18656 } 18657 } else 18658 ret = 1; 18659 spin_unlock_irq(&phba->hbalock); 18660 18661 return ret; 18662 } 18663 /** 18664 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 18665 * @phba: pointer to lpfc hba data structure. 18666 * 18667 * This routine is to get the next eligible FCF record index in a round 18668 * robin fashion. If the next eligible FCF record index equals to the 18669 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 18670 * shall be returned, otherwise, the next eligible FCF record's index 18671 * shall be returned. 18672 **/ 18673 uint16_t 18674 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 18675 { 18676 uint16_t next_fcf_index; 18677 18678 initial_priority: 18679 /* Search start from next bit of currently registered FCF index */ 18680 next_fcf_index = phba->fcf.current_rec.fcf_indx; 18681 18682 next_priority: 18683 /* Determine the next fcf index to check */ 18684 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 18685 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18686 LPFC_SLI4_FCF_TBL_INDX_MAX, 18687 next_fcf_index); 18688 18689 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 18690 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18691 /* 18692 * If we have wrapped then we need to clear the bits that 18693 * have been tested so that we can detect when we should 18694 * change the priority level. 18695 */ 18696 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18697 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 18698 } 18699 18700 18701 /* Check roundrobin failover list empty condition */ 18702 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 18703 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 18704 /* 18705 * If next fcf index is not found check if there are lower 18706 * Priority level fcf's in the fcf_priority list. 18707 * Set up the rr_bmask with all of the avaiable fcf bits 18708 * at that level and continue the selection process. 18709 */ 18710 if (lpfc_check_next_fcf_pri_level(phba)) 18711 goto initial_priority; 18712 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 18713 "2844 No roundrobin failover FCF available\n"); 18714 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 18715 return LPFC_FCOE_FCF_NEXT_NONE; 18716 else { 18717 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 18718 "3063 Only FCF available idx %d, flag %x\n", 18719 next_fcf_index, 18720 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); 18721 return next_fcf_index; 18722 } 18723 } 18724 18725 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 18726 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 18727 LPFC_FCF_FLOGI_FAILED) { 18728 if (list_is_singular(&phba->fcf.fcf_pri_list)) 18729 return LPFC_FCOE_FCF_NEXT_NONE; 18730 18731 goto next_priority; 18732 } 18733 18734 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18735 "2845 Get next roundrobin failover FCF (x%x)\n", 18736 next_fcf_index); 18737 18738 return next_fcf_index; 18739 } 18740 18741 /** 18742 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 18743 * @phba: pointer to lpfc hba data structure. 18744 * 18745 * This routine sets the FCF record index in to the eligible bmask for 18746 * roundrobin failover search. It checks to make sure that the index 18747 * does not go beyond the range of the driver allocated bmask dimension 18748 * before setting the bit. 18749 * 18750 * Returns 0 if the index bit successfully set, otherwise, it returns 18751 * -EINVAL. 18752 **/ 18753 int 18754 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 18755 { 18756 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18757 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18758 "2610 FCF (x%x) reached driver's book " 18759 "keeping dimension:x%x\n", 18760 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18761 return -EINVAL; 18762 } 18763 /* Set the eligible FCF record index bmask */ 18764 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18765 18766 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18767 "2790 Set FCF (x%x) to roundrobin FCF failover " 18768 "bmask\n", fcf_index); 18769 18770 return 0; 18771 } 18772 18773 /** 18774 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 18775 * @phba: pointer to lpfc hba data structure. 18776 * 18777 * This routine clears the FCF record index from the eligible bmask for 18778 * roundrobin failover search. It checks to make sure that the index 18779 * does not go beyond the range of the driver allocated bmask dimension 18780 * before clearing the bit. 18781 **/ 18782 void 18783 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 18784 { 18785 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 18786 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18787 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18788 "2762 FCF (x%x) reached driver's book " 18789 "keeping dimension:x%x\n", 18790 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18791 return; 18792 } 18793 /* Clear the eligible FCF record index bmask */ 18794 spin_lock_irq(&phba->hbalock); 18795 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 18796 list) { 18797 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 18798 list_del_init(&fcf_pri->list); 18799 break; 18800 } 18801 } 18802 spin_unlock_irq(&phba->hbalock); 18803 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18804 18805 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18806 "2791 Clear FCF (x%x) from roundrobin failover " 18807 "bmask\n", fcf_index); 18808 } 18809 18810 /** 18811 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 18812 * @phba: pointer to lpfc hba data structure. 18813 * 18814 * This routine is the completion routine for the rediscover FCF table mailbox 18815 * command. If the mailbox command returned failure, it will try to stop the 18816 * FCF rediscover wait timer. 18817 **/ 18818 static void 18819 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 18820 { 18821 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18822 uint32_t shdr_status, shdr_add_status; 18823 18824 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18825 18826 shdr_status = bf_get(lpfc_mbox_hdr_status, 18827 &redisc_fcf->header.cfg_shdr.response); 18828 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 18829 &redisc_fcf->header.cfg_shdr.response); 18830 if (shdr_status || shdr_add_status) { 18831 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18832 "2746 Requesting for FCF rediscovery failed " 18833 "status x%x add_status x%x\n", 18834 shdr_status, shdr_add_status); 18835 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 18836 spin_lock_irq(&phba->hbalock); 18837 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 18838 spin_unlock_irq(&phba->hbalock); 18839 /* 18840 * CVL event triggered FCF rediscover request failed, 18841 * last resort to re-try current registered FCF entry. 18842 */ 18843 lpfc_retry_pport_discovery(phba); 18844 } else { 18845 spin_lock_irq(&phba->hbalock); 18846 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 18847 spin_unlock_irq(&phba->hbalock); 18848 /* 18849 * DEAD FCF event triggered FCF rediscover request 18850 * failed, last resort to fail over as a link down 18851 * to FCF registration. 18852 */ 18853 lpfc_sli4_fcf_dead_failthrough(phba); 18854 } 18855 } else { 18856 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18857 "2775 Start FCF rediscover quiescent timer\n"); 18858 /* 18859 * Start FCF rediscovery wait timer for pending FCF 18860 * before rescan FCF record table. 18861 */ 18862 lpfc_fcf_redisc_wait_start_timer(phba); 18863 } 18864 18865 mempool_free(mbox, phba->mbox_mem_pool); 18866 } 18867 18868 /** 18869 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 18870 * @phba: pointer to lpfc hba data structure. 18871 * 18872 * This routine is invoked to request for rediscovery of the entire FCF table 18873 * by the port. 18874 **/ 18875 int 18876 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 18877 { 18878 LPFC_MBOXQ_t *mbox; 18879 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18880 int rc, length; 18881 18882 /* Cancel retry delay timers to all vports before FCF rediscover */ 18883 lpfc_cancel_all_vport_retry_delay_timer(phba); 18884 18885 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18886 if (!mbox) { 18887 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18888 "2745 Failed to allocate mbox for " 18889 "requesting FCF rediscover.\n"); 18890 return -ENOMEM; 18891 } 18892 18893 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 18894 sizeof(struct lpfc_sli4_cfg_mhdr)); 18895 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 18896 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 18897 length, LPFC_SLI4_MBX_EMBED); 18898 18899 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18900 /* Set count to 0 for invalidating the entire FCF database */ 18901 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 18902 18903 /* Issue the mailbox command asynchronously */ 18904 mbox->vport = phba->pport; 18905 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 18906 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 18907 18908 if (rc == MBX_NOT_FINISHED) { 18909 mempool_free(mbox, phba->mbox_mem_pool); 18910 return -EIO; 18911 } 18912 return 0; 18913 } 18914 18915 /** 18916 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 18917 * @phba: pointer to lpfc hba data structure. 18918 * 18919 * This function is the failover routine as a last resort to the FCF DEAD 18920 * event when driver failed to perform fast FCF failover. 18921 **/ 18922 void 18923 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 18924 { 18925 uint32_t link_state; 18926 18927 /* 18928 * Last resort as FCF DEAD event failover will treat this as 18929 * a link down, but save the link state because we don't want 18930 * it to be changed to Link Down unless it is already down. 18931 */ 18932 link_state = phba->link_state; 18933 lpfc_linkdown(phba); 18934 phba->link_state = link_state; 18935 18936 /* Unregister FCF if no devices connected to it */ 18937 lpfc_unregister_unused_fcf(phba); 18938 } 18939 18940 /** 18941 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 18942 * @phba: pointer to lpfc hba data structure. 18943 * @rgn23_data: pointer to configure region 23 data. 18944 * 18945 * This function gets SLI3 port configure region 23 data through memory dump 18946 * mailbox command. When it successfully retrieves data, the size of the data 18947 * will be returned, otherwise, 0 will be returned. 18948 **/ 18949 static uint32_t 18950 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 18951 { 18952 LPFC_MBOXQ_t *pmb = NULL; 18953 MAILBOX_t *mb; 18954 uint32_t offset = 0; 18955 int rc; 18956 18957 if (!rgn23_data) 18958 return 0; 18959 18960 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18961 if (!pmb) { 18962 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18963 "2600 failed to allocate mailbox memory\n"); 18964 return 0; 18965 } 18966 mb = &pmb->u.mb; 18967 18968 do { 18969 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 18970 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 18971 18972 if (rc != MBX_SUCCESS) { 18973 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 18974 "2601 failed to read config " 18975 "region 23, rc 0x%x Status 0x%x\n", 18976 rc, mb->mbxStatus); 18977 mb->un.varDmp.word_cnt = 0; 18978 } 18979 /* 18980 * dump mem may return a zero when finished or we got a 18981 * mailbox error, either way we are done. 18982 */ 18983 if (mb->un.varDmp.word_cnt == 0) 18984 break; 18985 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 18986 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 18987 18988 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 18989 rgn23_data + offset, 18990 mb->un.varDmp.word_cnt); 18991 offset += mb->un.varDmp.word_cnt; 18992 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 18993 18994 mempool_free(pmb, phba->mbox_mem_pool); 18995 return offset; 18996 } 18997 18998 /** 18999 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 19000 * @phba: pointer to lpfc hba data structure. 19001 * @rgn23_data: pointer to configure region 23 data. 19002 * 19003 * This function gets SLI4 port configure region 23 data through memory dump 19004 * mailbox command. When it successfully retrieves data, the size of the data 19005 * will be returned, otherwise, 0 will be returned. 19006 **/ 19007 static uint32_t 19008 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 19009 { 19010 LPFC_MBOXQ_t *mboxq = NULL; 19011 struct lpfc_dmabuf *mp = NULL; 19012 struct lpfc_mqe *mqe; 19013 uint32_t data_length = 0; 19014 int rc; 19015 19016 if (!rgn23_data) 19017 return 0; 19018 19019 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19020 if (!mboxq) { 19021 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19022 "3105 failed to allocate mailbox memory\n"); 19023 return 0; 19024 } 19025 19026 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 19027 goto out; 19028 mqe = &mboxq->u.mqe; 19029 mp = (struct lpfc_dmabuf *) mboxq->context1; 19030 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 19031 if (rc) 19032 goto out; 19033 data_length = mqe->un.mb_words[5]; 19034 if (data_length == 0) 19035 goto out; 19036 if (data_length > DMP_RGN23_SIZE) { 19037 data_length = 0; 19038 goto out; 19039 } 19040 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 19041 out: 19042 mempool_free(mboxq, phba->mbox_mem_pool); 19043 if (mp) { 19044 lpfc_mbuf_free(phba, mp->virt, mp->phys); 19045 kfree(mp); 19046 } 19047 return data_length; 19048 } 19049 19050 /** 19051 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 19052 * @phba: pointer to lpfc hba data structure. 19053 * 19054 * This function read region 23 and parse TLV for port status to 19055 * decide if the user disaled the port. If the TLV indicates the 19056 * port is disabled, the hba_flag is set accordingly. 19057 **/ 19058 void 19059 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 19060 { 19061 uint8_t *rgn23_data = NULL; 19062 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 19063 uint32_t offset = 0; 19064 19065 /* Get adapter Region 23 data */ 19066 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 19067 if (!rgn23_data) 19068 goto out; 19069 19070 if (phba->sli_rev < LPFC_SLI_REV4) 19071 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 19072 else { 19073 if_type = bf_get(lpfc_sli_intf_if_type, 19074 &phba->sli4_hba.sli_intf); 19075 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 19076 goto out; 19077 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 19078 } 19079 19080 if (!data_size) 19081 goto out; 19082 19083 /* Check the region signature first */ 19084 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 19085 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19086 "2619 Config region 23 has bad signature\n"); 19087 goto out; 19088 } 19089 offset += 4; 19090 19091 /* Check the data structure version */ 19092 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 19093 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19094 "2620 Config region 23 has bad version\n"); 19095 goto out; 19096 } 19097 offset += 4; 19098 19099 /* Parse TLV entries in the region */ 19100 while (offset < data_size) { 19101 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 19102 break; 19103 /* 19104 * If the TLV is not driver specific TLV or driver id is 19105 * not linux driver id, skip the record. 19106 */ 19107 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 19108 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 19109 (rgn23_data[offset + 3] != 0)) { 19110 offset += rgn23_data[offset + 1] * 4 + 4; 19111 continue; 19112 } 19113 19114 /* Driver found a driver specific TLV in the config region */ 19115 sub_tlv_len = rgn23_data[offset + 1] * 4; 19116 offset += 4; 19117 tlv_offset = 0; 19118 19119 /* 19120 * Search for configured port state sub-TLV. 19121 */ 19122 while ((offset < data_size) && 19123 (tlv_offset < sub_tlv_len)) { 19124 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 19125 offset += 4; 19126 tlv_offset += 4; 19127 break; 19128 } 19129 if (rgn23_data[offset] != PORT_STE_TYPE) { 19130 offset += rgn23_data[offset + 1] * 4 + 4; 19131 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 19132 continue; 19133 } 19134 19135 /* This HBA contains PORT_STE configured */ 19136 if (!rgn23_data[offset + 2]) 19137 phba->hba_flag |= LINK_DISABLED; 19138 19139 goto out; 19140 } 19141 } 19142 19143 out: 19144 kfree(rgn23_data); 19145 return; 19146 } 19147 19148 /** 19149 * lpfc_wr_object - write an object to the firmware 19150 * @phba: HBA structure that indicates port to create a queue on. 19151 * @dmabuf_list: list of dmabufs to write to the port. 19152 * @size: the total byte value of the objects to write to the port. 19153 * @offset: the current offset to be used to start the transfer. 19154 * 19155 * This routine will create a wr_object mailbox command to send to the port. 19156 * the mailbox command will be constructed using the dma buffers described in 19157 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 19158 * BDEs that the imbedded mailbox can support. The @offset variable will be 19159 * used to indicate the starting offset of the transfer and will also return 19160 * the offset after the write object mailbox has completed. @size is used to 19161 * determine the end of the object and whether the eof bit should be set. 19162 * 19163 * Return 0 is successful and offset will contain the the new offset to use 19164 * for the next write. 19165 * Return negative value for error cases. 19166 **/ 19167 int 19168 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 19169 uint32_t size, uint32_t *offset) 19170 { 19171 struct lpfc_mbx_wr_object *wr_object; 19172 LPFC_MBOXQ_t *mbox; 19173 int rc = 0, i = 0; 19174 uint32_t shdr_status, shdr_add_status; 19175 uint32_t mbox_tmo; 19176 union lpfc_sli4_cfg_shdr *shdr; 19177 struct lpfc_dmabuf *dmabuf; 19178 uint32_t written = 0; 19179 19180 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 19181 if (!mbox) 19182 return -ENOMEM; 19183 19184 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 19185 LPFC_MBOX_OPCODE_WRITE_OBJECT, 19186 sizeof(struct lpfc_mbx_wr_object) - 19187 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 19188 19189 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 19190 wr_object->u.request.write_offset = *offset; 19191 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 19192 wr_object->u.request.object_name[0] = 19193 cpu_to_le32(wr_object->u.request.object_name[0]); 19194 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 19195 list_for_each_entry(dmabuf, dmabuf_list, list) { 19196 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 19197 break; 19198 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 19199 wr_object->u.request.bde[i].addrHigh = 19200 putPaddrHigh(dmabuf->phys); 19201 if (written + SLI4_PAGE_SIZE >= size) { 19202 wr_object->u.request.bde[i].tus.f.bdeSize = 19203 (size - written); 19204 written += (size - written); 19205 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 19206 } else { 19207 wr_object->u.request.bde[i].tus.f.bdeSize = 19208 SLI4_PAGE_SIZE; 19209 written += SLI4_PAGE_SIZE; 19210 } 19211 i++; 19212 } 19213 wr_object->u.request.bde_count = i; 19214 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 19215 if (!phba->sli4_hba.intr_enable) 19216 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 19217 else { 19218 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 19219 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 19220 } 19221 /* The IOCTL status is embedded in the mailbox subheader. */ 19222 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr; 19223 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 19224 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 19225 if (rc != MBX_TIMEOUT) 19226 mempool_free(mbox, phba->mbox_mem_pool); 19227 if (shdr_status || shdr_add_status || rc) { 19228 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 19229 "3025 Write Object mailbox failed with " 19230 "status x%x add_status x%x, mbx status x%x\n", 19231 shdr_status, shdr_add_status, rc); 19232 rc = -ENXIO; 19233 *offset = shdr_add_status; 19234 } else 19235 *offset += wr_object->u.response.actual_write_length; 19236 return rc; 19237 } 19238 19239 /** 19240 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 19241 * @vport: pointer to vport data structure. 19242 * 19243 * This function iterate through the mailboxq and clean up all REG_LOGIN 19244 * and REG_VPI mailbox commands associated with the vport. This function 19245 * is called when driver want to restart discovery of the vport due to 19246 * a Clear Virtual Link event. 19247 **/ 19248 void 19249 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 19250 { 19251 struct lpfc_hba *phba = vport->phba; 19252 LPFC_MBOXQ_t *mb, *nextmb; 19253 struct lpfc_dmabuf *mp; 19254 struct lpfc_nodelist *ndlp; 19255 struct lpfc_nodelist *act_mbx_ndlp = NULL; 19256 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 19257 LIST_HEAD(mbox_cmd_list); 19258 uint8_t restart_loop; 19259 19260 /* Clean up internally queued mailbox commands with the vport */ 19261 spin_lock_irq(&phba->hbalock); 19262 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 19263 if (mb->vport != vport) 19264 continue; 19265 19266 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19267 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19268 continue; 19269 19270 list_del(&mb->list); 19271 list_add_tail(&mb->list, &mbox_cmd_list); 19272 } 19273 /* Clean up active mailbox command with the vport */ 19274 mb = phba->sli.mbox_active; 19275 if (mb && (mb->vport == vport)) { 19276 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 19277 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 19278 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19279 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19280 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 19281 /* Put reference count for delayed processing */ 19282 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 19283 /* Unregister the RPI when mailbox complete */ 19284 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19285 } 19286 } 19287 /* Cleanup any mailbox completions which are not yet processed */ 19288 do { 19289 restart_loop = 0; 19290 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 19291 /* 19292 * If this mailox is already processed or it is 19293 * for another vport ignore it. 19294 */ 19295 if ((mb->vport != vport) || 19296 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 19297 continue; 19298 19299 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 19300 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 19301 continue; 19302 19303 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 19304 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19305 ndlp = (struct lpfc_nodelist *)mb->context2; 19306 /* Unregister the RPI when mailbox complete */ 19307 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 19308 restart_loop = 1; 19309 spin_unlock_irq(&phba->hbalock); 19310 spin_lock(shost->host_lock); 19311 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19312 spin_unlock(shost->host_lock); 19313 spin_lock_irq(&phba->hbalock); 19314 break; 19315 } 19316 } 19317 } while (restart_loop); 19318 19319 spin_unlock_irq(&phba->hbalock); 19320 19321 /* Release the cleaned-up mailbox commands */ 19322 while (!list_empty(&mbox_cmd_list)) { 19323 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 19324 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 19325 mp = (struct lpfc_dmabuf *) (mb->context1); 19326 if (mp) { 19327 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 19328 kfree(mp); 19329 } 19330 ndlp = (struct lpfc_nodelist *) mb->context2; 19331 mb->context2 = NULL; 19332 if (ndlp) { 19333 spin_lock(shost->host_lock); 19334 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19335 spin_unlock(shost->host_lock); 19336 lpfc_nlp_put(ndlp); 19337 } 19338 } 19339 mempool_free(mb, phba->mbox_mem_pool); 19340 } 19341 19342 /* Release the ndlp with the cleaned-up active mailbox command */ 19343 if (act_mbx_ndlp) { 19344 spin_lock(shost->host_lock); 19345 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 19346 spin_unlock(shost->host_lock); 19347 lpfc_nlp_put(act_mbx_ndlp); 19348 } 19349 } 19350 19351 /** 19352 * lpfc_drain_txq - Drain the txq 19353 * @phba: Pointer to HBA context object. 19354 * 19355 * This function attempt to submit IOCBs on the txq 19356 * to the adapter. For SLI4 adapters, the txq contains 19357 * ELS IOCBs that have been deferred because the there 19358 * are no SGLs. This congestion can occur with large 19359 * vport counts during node discovery. 19360 **/ 19361 19362 uint32_t 19363 lpfc_drain_txq(struct lpfc_hba *phba) 19364 { 19365 LIST_HEAD(completions); 19366 struct lpfc_sli_ring *pring; 19367 struct lpfc_iocbq *piocbq = NULL; 19368 unsigned long iflags = 0; 19369 char *fail_msg = NULL; 19370 struct lpfc_sglq *sglq; 19371 union lpfc_wqe128 wqe; 19372 uint32_t txq_cnt = 0; 19373 struct lpfc_queue *wq; 19374 19375 if (phba->link_flag & LS_MDS_LOOPBACK) { 19376 /* MDS WQE are posted only to first WQ*/ 19377 wq = phba->sli4_hba.fcp_wq[0]; 19378 if (unlikely(!wq)) 19379 return 0; 19380 pring = wq->pring; 19381 } else { 19382 wq = phba->sli4_hba.els_wq; 19383 if (unlikely(!wq)) 19384 return 0; 19385 pring = lpfc_phba_elsring(phba); 19386 } 19387 19388 if (unlikely(!pring) || list_empty(&pring->txq)) 19389 return 0; 19390 19391 spin_lock_irqsave(&pring->ring_lock, iflags); 19392 list_for_each_entry(piocbq, &pring->txq, list) { 19393 txq_cnt++; 19394 } 19395 19396 if (txq_cnt > pring->txq_max) 19397 pring->txq_max = txq_cnt; 19398 19399 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19400 19401 while (!list_empty(&pring->txq)) { 19402 spin_lock_irqsave(&pring->ring_lock, iflags); 19403 19404 piocbq = lpfc_sli_ringtx_get(phba, pring); 19405 if (!piocbq) { 19406 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19407 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19408 "2823 txq empty and txq_cnt is %d\n ", 19409 txq_cnt); 19410 break; 19411 } 19412 sglq = __lpfc_sli_get_els_sglq(phba, piocbq); 19413 if (!sglq) { 19414 __lpfc_sli_ringtx_put(phba, pring, piocbq); 19415 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19416 break; 19417 } 19418 txq_cnt--; 19419 19420 /* The xri and iocb resources secured, 19421 * attempt to issue request 19422 */ 19423 piocbq->sli4_lxritag = sglq->sli4_lxritag; 19424 piocbq->sli4_xritag = sglq->sli4_xritag; 19425 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 19426 fail_msg = "to convert bpl to sgl"; 19427 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 19428 fail_msg = "to convert iocb to wqe"; 19429 else if (lpfc_sli4_wq_put(wq, &wqe)) 19430 fail_msg = " - Wq is full"; 19431 else 19432 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 19433 19434 if (fail_msg) { 19435 /* Failed means we can't issue and need to cancel */ 19436 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 19437 "2822 IOCB failed %s iotag 0x%x " 19438 "xri 0x%x\n", 19439 fail_msg, 19440 piocbq->iotag, piocbq->sli4_xritag); 19441 list_add_tail(&piocbq->list, &completions); 19442 } 19443 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19444 } 19445 19446 /* Cancel all the IOCBs that cannot be issued */ 19447 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 19448 IOERR_SLI_ABORTED); 19449 19450 return txq_cnt; 19451 } 19452 19453 /** 19454 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. 19455 * @phba: Pointer to HBA context object. 19456 * @pwqe: Pointer to command WQE. 19457 * @sglq: Pointer to the scatter gather queue object. 19458 * 19459 * This routine converts the bpl or bde that is in the WQE 19460 * to a sgl list for the sli4 hardware. The physical address 19461 * of the bpl/bde is converted back to a virtual address. 19462 * If the WQE contains a BPL then the list of BDE's is 19463 * converted to sli4_sge's. If the WQE contains a single 19464 * BDE then it is converted to a single sli_sge. 19465 * The WQE is still in cpu endianness so the contents of 19466 * the bpl can be used without byte swapping. 19467 * 19468 * Returns valid XRI = Success, NO_XRI = Failure. 19469 */ 19470 static uint16_t 19471 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, 19472 struct lpfc_sglq *sglq) 19473 { 19474 uint16_t xritag = NO_XRI; 19475 struct ulp_bde64 *bpl = NULL; 19476 struct ulp_bde64 bde; 19477 struct sli4_sge *sgl = NULL; 19478 struct lpfc_dmabuf *dmabuf; 19479 union lpfc_wqe128 *wqe; 19480 int numBdes = 0; 19481 int i = 0; 19482 uint32_t offset = 0; /* accumulated offset in the sg request list */ 19483 int inbound = 0; /* number of sg reply entries inbound from firmware */ 19484 uint32_t cmd; 19485 19486 if (!pwqeq || !sglq) 19487 return xritag; 19488 19489 sgl = (struct sli4_sge *)sglq->sgl; 19490 wqe = &pwqeq->wqe; 19491 pwqeq->iocb.ulpIoTag = pwqeq->iotag; 19492 19493 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); 19494 if (cmd == CMD_XMIT_BLS_RSP64_WQE) 19495 return sglq->sli4_xritag; 19496 numBdes = pwqeq->rsvd2; 19497 if (numBdes) { 19498 /* The addrHigh and addrLow fields within the WQE 19499 * have not been byteswapped yet so there is no 19500 * need to swap them back. 19501 */ 19502 if (pwqeq->context3) 19503 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3; 19504 else 19505 return xritag; 19506 19507 bpl = (struct ulp_bde64 *)dmabuf->virt; 19508 if (!bpl) 19509 return xritag; 19510 19511 for (i = 0; i < numBdes; i++) { 19512 /* Should already be byte swapped. */ 19513 sgl->addr_hi = bpl->addrHigh; 19514 sgl->addr_lo = bpl->addrLow; 19515 19516 sgl->word2 = le32_to_cpu(sgl->word2); 19517 if ((i+1) == numBdes) 19518 bf_set(lpfc_sli4_sge_last, sgl, 1); 19519 else 19520 bf_set(lpfc_sli4_sge_last, sgl, 0); 19521 /* swap the size field back to the cpu so we 19522 * can assign it to the sgl. 19523 */ 19524 bde.tus.w = le32_to_cpu(bpl->tus.w); 19525 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 19526 /* The offsets in the sgl need to be accumulated 19527 * separately for the request and reply lists. 19528 * The request is always first, the reply follows. 19529 */ 19530 switch (cmd) { 19531 case CMD_GEN_REQUEST64_WQE: 19532 /* add up the reply sg entries */ 19533 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 19534 inbound++; 19535 /* first inbound? reset the offset */ 19536 if (inbound == 1) 19537 offset = 0; 19538 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19539 bf_set(lpfc_sli4_sge_type, sgl, 19540 LPFC_SGE_TYPE_DATA); 19541 offset += bde.tus.f.bdeSize; 19542 break; 19543 case CMD_FCP_TRSP64_WQE: 19544 bf_set(lpfc_sli4_sge_offset, sgl, 0); 19545 bf_set(lpfc_sli4_sge_type, sgl, 19546 LPFC_SGE_TYPE_DATA); 19547 break; 19548 case CMD_FCP_TSEND64_WQE: 19549 case CMD_FCP_TRECEIVE64_WQE: 19550 bf_set(lpfc_sli4_sge_type, sgl, 19551 bpl->tus.f.bdeFlags); 19552 if (i < 3) 19553 offset = 0; 19554 else 19555 offset += bde.tus.f.bdeSize; 19556 bf_set(lpfc_sli4_sge_offset, sgl, offset); 19557 break; 19558 } 19559 sgl->word2 = cpu_to_le32(sgl->word2); 19560 bpl++; 19561 sgl++; 19562 } 19563 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { 19564 /* The addrHigh and addrLow fields of the BDE have not 19565 * been byteswapped yet so they need to be swapped 19566 * before putting them in the sgl. 19567 */ 19568 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); 19569 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); 19570 sgl->word2 = le32_to_cpu(sgl->word2); 19571 bf_set(lpfc_sli4_sge_last, sgl, 1); 19572 sgl->word2 = cpu_to_le32(sgl->word2); 19573 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); 19574 } 19575 return sglq->sli4_xritag; 19576 } 19577 19578 /** 19579 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) 19580 * @phba: Pointer to HBA context object. 19581 * @ring_number: Base sli ring number 19582 * @pwqe: Pointer to command WQE. 19583 **/ 19584 int 19585 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, 19586 struct lpfc_iocbq *pwqe) 19587 { 19588 union lpfc_wqe128 *wqe = &pwqe->wqe; 19589 struct lpfc_nvmet_rcv_ctx *ctxp; 19590 struct lpfc_queue *wq; 19591 struct lpfc_sglq *sglq; 19592 struct lpfc_sli_ring *pring; 19593 unsigned long iflags; 19594 uint32_t ret = 0; 19595 19596 /* NVME_LS and NVME_LS ABTS requests. */ 19597 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { 19598 pring = phba->sli4_hba.nvmels_wq->pring; 19599 spin_lock_irqsave(&pring->ring_lock, iflags); 19600 sglq = __lpfc_sli_get_els_sglq(phba, pwqe); 19601 if (!sglq) { 19602 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19603 return WQE_BUSY; 19604 } 19605 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19606 pwqe->sli4_xritag = sglq->sli4_xritag; 19607 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { 19608 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19609 return WQE_ERROR; 19610 } 19611 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19612 pwqe->sli4_xritag); 19613 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); 19614 if (ret) { 19615 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19616 return ret; 19617 } 19618 19619 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19620 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19621 return 0; 19622 } 19623 19624 /* NVME_FCREQ and NVME_ABTS requests */ 19625 if (pwqe->iocb_flag & LPFC_IO_NVME) { 19626 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19627 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; 19628 19629 spin_lock_irqsave(&pring->ring_lock, iflags); 19630 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; 19631 bf_set(wqe_cqid, &wqe->generic.wqe_com, 19632 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); 19633 ret = lpfc_sli4_wq_put(wq, wqe); 19634 if (ret) { 19635 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19636 return ret; 19637 } 19638 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19639 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19640 return 0; 19641 } 19642 19643 /* NVMET requests */ 19644 if (pwqe->iocb_flag & LPFC_IO_NVMET) { 19645 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19646 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; 19647 19648 spin_lock_irqsave(&pring->ring_lock, iflags); 19649 ctxp = pwqe->context2; 19650 sglq = ctxp->ctxbuf->sglq; 19651 if (pwqe->sli4_xritag == NO_XRI) { 19652 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19653 pwqe->sli4_xritag = sglq->sli4_xritag; 19654 } 19655 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19656 pwqe->sli4_xritag); 19657 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; 19658 bf_set(wqe_cqid, &wqe->generic.wqe_com, 19659 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); 19660 ret = lpfc_sli4_wq_put(wq, wqe); 19661 if (ret) { 19662 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19663 return ret; 19664 } 19665 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19666 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19667 return 0; 19668 } 19669 return WQE_ERROR; 19670 } 19671