1 2 /******************************************************************* 3 * This file is part of the Emulex Linux Device Driver for * 4 * Fibre Channel Host Bus Adapters. * 5 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * 6 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * 7 * Copyright (C) 2004-2016 Emulex. All rights reserved. * 8 * EMULEX and SLI are trademarks of Emulex. * 9 * www.broadcom.com * 10 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 11 * * 12 * This program is free software; you can redistribute it and/or * 13 * modify it under the terms of version 2 of the GNU General * 14 * Public License as published by the Free Software Foundation. * 15 * This program is distributed in the hope that it will be useful. * 16 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 17 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 18 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 19 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 20 * TO BE LEGALLY INVALID. See the GNU General Public License for * 21 * more details, a copy of which can be found in the file COPYING * 22 * included with this package. * 23 *******************************************************************/ 24 25 #include <linux/blkdev.h> 26 #include <linux/pci.h> 27 #include <linux/interrupt.h> 28 #include <linux/delay.h> 29 #include <linux/slab.h> 30 #include <linux/lockdep.h> 31 32 #include <scsi/scsi.h> 33 #include <scsi/scsi_cmnd.h> 34 #include <scsi/scsi_device.h> 35 #include <scsi/scsi_host.h> 36 #include <scsi/scsi_transport_fc.h> 37 #include <scsi/fc/fc_fs.h> 38 #include <linux/aer.h> 39 40 #include <linux/nvme-fc-driver.h> 41 42 #include "lpfc_hw4.h" 43 #include "lpfc_hw.h" 44 #include "lpfc_sli.h" 45 #include "lpfc_sli4.h" 46 #include "lpfc_nl.h" 47 #include "lpfc_disc.h" 48 #include "lpfc.h" 49 #include "lpfc_scsi.h" 50 #include "lpfc_nvme.h" 51 #include "lpfc_nvmet.h" 52 #include "lpfc_crtn.h" 53 #include "lpfc_logmsg.h" 54 #include "lpfc_compat.h" 55 #include "lpfc_debugfs.h" 56 #include "lpfc_vport.h" 57 #include "lpfc_version.h" 58 59 /* There are only four IOCB completion types. */ 60 typedef enum _lpfc_iocb_type { 61 LPFC_UNKNOWN_IOCB, 62 LPFC_UNSOL_IOCB, 63 LPFC_SOL_IOCB, 64 LPFC_ABORT_IOCB 65 } lpfc_iocb_type; 66 67 68 /* Provide function prototypes local to this module. */ 69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 70 uint32_t); 71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 72 uint8_t *, uint32_t *); 73 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 74 struct lpfc_iocbq *); 75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 76 struct hbq_dmabuf *); 77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 78 struct hbq_dmabuf *dmabuf); 79 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *, 80 struct lpfc_cqe *); 81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *, 82 int); 83 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, 84 struct lpfc_eqe *eqe, uint32_t qidx); 85 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 86 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 87 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, 88 struct lpfc_sli_ring *pring, 89 struct lpfc_iocbq *cmdiocb); 90 91 static IOCB_t * 92 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 93 { 94 return &iocbq->iocb; 95 } 96 97 /** 98 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 99 * @q: The Work Queue to operate on. 100 * @wqe: The work Queue Entry to put on the Work queue. 101 * 102 * This routine will copy the contents of @wqe to the next available entry on 103 * the @q. This function will then ring the Work Queue Doorbell to signal the 104 * HBA to start processing the Work Queue Entry. This function returns 0 if 105 * successful. If no entries are available on @q then this function will return 106 * -ENOMEM. 107 * The caller is expected to hold the hbalock when calling this routine. 108 **/ 109 static int 110 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 111 { 112 union lpfc_wqe *temp_wqe; 113 struct lpfc_register doorbell; 114 uint32_t host_index; 115 uint32_t idx; 116 117 /* sanity check on queue memory */ 118 if (unlikely(!q)) 119 return -ENOMEM; 120 temp_wqe = q->qe[q->host_index].wqe; 121 122 /* If the host has not yet processed the next entry then we are done */ 123 idx = ((q->host_index + 1) % q->entry_count); 124 if (idx == q->hba_index) { 125 q->WQ_overflow++; 126 return -EBUSY; 127 } 128 q->WQ_posted++; 129 /* set consumption flag every once in a while */ 130 if (!((q->host_index + 1) % q->entry_repost)) 131 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 132 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 133 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 134 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 135 /* ensure WQE bcopy flushed before doorbell write */ 136 wmb(); 137 138 /* Update the host index before invoking device */ 139 host_index = q->host_index; 140 141 q->host_index = idx; 142 143 /* Ring Doorbell */ 144 doorbell.word0 = 0; 145 if (q->db_format == LPFC_DB_LIST_FORMAT) { 146 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 147 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index); 148 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 149 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 150 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 151 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 152 } else { 153 return -EINVAL; 154 } 155 writel(doorbell.word0, q->db_regaddr); 156 157 return 0; 158 } 159 160 /** 161 * lpfc_sli4_wq_release - Updates internal hba index for WQ 162 * @q: The Work Queue to operate on. 163 * @index: The index to advance the hba index to. 164 * 165 * This routine will update the HBA index of a queue to reflect consumption of 166 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 167 * an entry the host calls this function to update the queue's internal 168 * pointers. This routine returns the number of entries that were consumed by 169 * the HBA. 170 **/ 171 static uint32_t 172 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 173 { 174 uint32_t released = 0; 175 176 /* sanity check on queue memory */ 177 if (unlikely(!q)) 178 return 0; 179 180 if (q->hba_index == index) 181 return 0; 182 do { 183 q->hba_index = ((q->hba_index + 1) % q->entry_count); 184 released++; 185 } while (q->hba_index != index); 186 return released; 187 } 188 189 /** 190 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 191 * @q: The Mailbox Queue to operate on. 192 * @wqe: The Mailbox Queue Entry to put on the Work queue. 193 * 194 * This routine will copy the contents of @mqe to the next available entry on 195 * the @q. This function will then ring the Work Queue Doorbell to signal the 196 * HBA to start processing the Work Queue Entry. This function returns 0 if 197 * successful. If no entries are available on @q then this function will return 198 * -ENOMEM. 199 * The caller is expected to hold the hbalock when calling this routine. 200 **/ 201 static uint32_t 202 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 203 { 204 struct lpfc_mqe *temp_mqe; 205 struct lpfc_register doorbell; 206 207 /* sanity check on queue memory */ 208 if (unlikely(!q)) 209 return -ENOMEM; 210 temp_mqe = q->qe[q->host_index].mqe; 211 212 /* If the host has not yet processed the next entry then we are done */ 213 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 214 return -ENOMEM; 215 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 216 /* Save off the mailbox pointer for completion */ 217 q->phba->mbox = (MAILBOX_t *)temp_mqe; 218 219 /* Update the host index before invoking device */ 220 q->host_index = ((q->host_index + 1) % q->entry_count); 221 222 /* Ring Doorbell */ 223 doorbell.word0 = 0; 224 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 225 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 226 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 227 return 0; 228 } 229 230 /** 231 * lpfc_sli4_mq_release - Updates internal hba index for MQ 232 * @q: The Mailbox Queue to operate on. 233 * 234 * This routine will update the HBA index of a queue to reflect consumption of 235 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 236 * an entry the host calls this function to update the queue's internal 237 * pointers. This routine returns the number of entries that were consumed by 238 * the HBA. 239 **/ 240 static uint32_t 241 lpfc_sli4_mq_release(struct lpfc_queue *q) 242 { 243 /* sanity check on queue memory */ 244 if (unlikely(!q)) 245 return 0; 246 247 /* Clear the mailbox pointer for completion */ 248 q->phba->mbox = NULL; 249 q->hba_index = ((q->hba_index + 1) % q->entry_count); 250 return 1; 251 } 252 253 /** 254 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 255 * @q: The Event Queue to get the first valid EQE from 256 * 257 * This routine will get the first valid Event Queue Entry from @q, update 258 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 259 * the Queue (no more work to do), or the Queue is full of EQEs that have been 260 * processed, but not popped back to the HBA then this routine will return NULL. 261 **/ 262 static struct lpfc_eqe * 263 lpfc_sli4_eq_get(struct lpfc_queue *q) 264 { 265 struct lpfc_eqe *eqe; 266 uint32_t idx; 267 268 /* sanity check on queue memory */ 269 if (unlikely(!q)) 270 return NULL; 271 eqe = q->qe[q->hba_index].eqe; 272 273 /* If the next EQE is not valid then we are done */ 274 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 275 return NULL; 276 /* If the host has not yet processed the next entry then we are done */ 277 idx = ((q->hba_index + 1) % q->entry_count); 278 if (idx == q->host_index) 279 return NULL; 280 281 q->hba_index = idx; 282 283 /* 284 * insert barrier for instruction interlock : data from the hardware 285 * must have the valid bit checked before it can be copied and acted 286 * upon. Speculative instructions were allowing a bcopy at the start 287 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately 288 * after our return, to copy data before the valid bit check above 289 * was done. As such, some of the copied data was stale. The barrier 290 * ensures the check is before any data is copied. 291 */ 292 mb(); 293 return eqe; 294 } 295 296 /** 297 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 298 * @q: The Event Queue to disable interrupts 299 * 300 **/ 301 static inline void 302 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 303 { 304 struct lpfc_register doorbell; 305 306 doorbell.word0 = 0; 307 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 308 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 309 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 310 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 311 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 312 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 313 } 314 315 /** 316 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 317 * @q: The Event Queue that the host has completed processing for. 318 * @arm: Indicates whether the host wants to arms this CQ. 319 * 320 * This routine will mark all Event Queue Entries on @q, from the last 321 * known completed entry to the last entry that was processed, as completed 322 * by clearing the valid bit for each completion queue entry. Then it will 323 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 324 * The internal host index in the @q will be updated by this routine to indicate 325 * that the host has finished processing the entries. The @arm parameter 326 * indicates that the queue should be rearmed when ringing the doorbell. 327 * 328 * This function will return the number of EQEs that were popped. 329 **/ 330 uint32_t 331 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 332 { 333 uint32_t released = 0; 334 struct lpfc_eqe *temp_eqe; 335 struct lpfc_register doorbell; 336 337 /* sanity check on queue memory */ 338 if (unlikely(!q)) 339 return 0; 340 341 /* while there are valid entries */ 342 while (q->hba_index != q->host_index) { 343 temp_eqe = q->qe[q->host_index].eqe; 344 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 345 released++; 346 q->host_index = ((q->host_index + 1) % q->entry_count); 347 } 348 if (unlikely(released == 0 && !arm)) 349 return 0; 350 351 /* ring doorbell for number popped */ 352 doorbell.word0 = 0; 353 if (arm) { 354 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 355 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 356 } 357 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 358 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 359 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 360 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 361 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 362 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 363 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 364 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 365 readl(q->phba->sli4_hba.EQCQDBregaddr); 366 return released; 367 } 368 369 /** 370 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 371 * @q: The Completion Queue to get the first valid CQE from 372 * 373 * This routine will get the first valid Completion Queue Entry from @q, update 374 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 375 * the Queue (no more work to do), or the Queue is full of CQEs that have been 376 * processed, but not popped back to the HBA then this routine will return NULL. 377 **/ 378 static struct lpfc_cqe * 379 lpfc_sli4_cq_get(struct lpfc_queue *q) 380 { 381 struct lpfc_cqe *cqe; 382 uint32_t idx; 383 384 /* sanity check on queue memory */ 385 if (unlikely(!q)) 386 return NULL; 387 388 /* If the next CQE is not valid then we are done */ 389 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 390 return NULL; 391 /* If the host has not yet processed the next entry then we are done */ 392 idx = ((q->hba_index + 1) % q->entry_count); 393 if (idx == q->host_index) 394 return NULL; 395 396 cqe = q->qe[q->hba_index].cqe; 397 q->hba_index = idx; 398 399 /* 400 * insert barrier for instruction interlock : data from the hardware 401 * must have the valid bit checked before it can be copied and acted 402 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative 403 * instructions allowing action on content before valid bit checked, 404 * add barrier here as well. May not be needed as "content" is a 405 * single 32-bit entity here (vs multi word structure for cq's). 406 */ 407 mb(); 408 return cqe; 409 } 410 411 /** 412 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 413 * @q: The Completion Queue that the host has completed processing for. 414 * @arm: Indicates whether the host wants to arms this CQ. 415 * 416 * This routine will mark all Completion queue entries on @q, from the last 417 * known completed entry to the last entry that was processed, as completed 418 * by clearing the valid bit for each completion queue entry. Then it will 419 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 420 * The internal host index in the @q will be updated by this routine to indicate 421 * that the host has finished processing the entries. The @arm parameter 422 * indicates that the queue should be rearmed when ringing the doorbell. 423 * 424 * This function will return the number of CQEs that were released. 425 **/ 426 uint32_t 427 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 428 { 429 uint32_t released = 0; 430 struct lpfc_cqe *temp_qe; 431 struct lpfc_register doorbell; 432 433 /* sanity check on queue memory */ 434 if (unlikely(!q)) 435 return 0; 436 /* while there are valid entries */ 437 while (q->hba_index != q->host_index) { 438 temp_qe = q->qe[q->host_index].cqe; 439 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 440 released++; 441 q->host_index = ((q->host_index + 1) % q->entry_count); 442 } 443 if (unlikely(released == 0 && !arm)) 444 return 0; 445 446 /* ring doorbell for number popped */ 447 doorbell.word0 = 0; 448 if (arm) 449 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 450 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 451 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 452 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 453 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 454 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 455 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 456 return released; 457 } 458 459 /** 460 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 461 * @q: The Header Receive Queue to operate on. 462 * @wqe: The Receive Queue Entry to put on the Receive queue. 463 * 464 * This routine will copy the contents of @wqe to the next available entry on 465 * the @q. This function will then ring the Receive Queue Doorbell to signal the 466 * HBA to start processing the Receive Queue Entry. This function returns the 467 * index that the rqe was copied to if successful. If no entries are available 468 * on @q then this function will return -ENOMEM. 469 * The caller is expected to hold the hbalock when calling this routine. 470 **/ 471 int 472 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 473 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 474 { 475 struct lpfc_rqe *temp_hrqe; 476 struct lpfc_rqe *temp_drqe; 477 struct lpfc_register doorbell; 478 int hq_put_index; 479 int dq_put_index; 480 481 /* sanity check on queue memory */ 482 if (unlikely(!hq) || unlikely(!dq)) 483 return -ENOMEM; 484 hq_put_index = hq->host_index; 485 dq_put_index = dq->host_index; 486 temp_hrqe = hq->qe[hq_put_index].rqe; 487 temp_drqe = dq->qe[dq_put_index].rqe; 488 489 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 490 return -EINVAL; 491 if (hq_put_index != dq_put_index) 492 return -EINVAL; 493 /* If the host has not yet processed the next entry then we are done */ 494 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index) 495 return -EBUSY; 496 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 497 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 498 499 /* Update the host index to point to the next slot */ 500 hq->host_index = ((hq_put_index + 1) % hq->entry_count); 501 dq->host_index = ((dq_put_index + 1) % dq->entry_count); 502 hq->RQ_buf_posted++; 503 504 /* Ring The Header Receive Queue Doorbell */ 505 if (!(hq->host_index % hq->entry_repost)) { 506 doorbell.word0 = 0; 507 if (hq->db_format == LPFC_DB_RING_FORMAT) { 508 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 509 hq->entry_repost); 510 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 511 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 512 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 513 hq->entry_repost); 514 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 515 hq->host_index); 516 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 517 } else { 518 return -EINVAL; 519 } 520 writel(doorbell.word0, hq->db_regaddr); 521 } 522 return hq_put_index; 523 } 524 525 /** 526 * lpfc_sli4_rq_release - Updates internal hba index for RQ 527 * @q: The Header Receive Queue to operate on. 528 * 529 * This routine will update the HBA index of a queue to reflect consumption of 530 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 531 * consumed an entry the host calls this function to update the queue's 532 * internal pointers. This routine returns the number of entries that were 533 * consumed by the HBA. 534 **/ 535 static uint32_t 536 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 537 { 538 /* sanity check on queue memory */ 539 if (unlikely(!hq) || unlikely(!dq)) 540 return 0; 541 542 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 543 return 0; 544 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 545 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 546 return 1; 547 } 548 549 /** 550 * lpfc_cmd_iocb - Get next command iocb entry in the ring 551 * @phba: Pointer to HBA context object. 552 * @pring: Pointer to driver SLI ring object. 553 * 554 * This function returns pointer to next command iocb entry 555 * in the command ring. The caller must hold hbalock to prevent 556 * other threads consume the next command iocb. 557 * SLI-2/SLI-3 provide different sized iocbs. 558 **/ 559 static inline IOCB_t * 560 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 561 { 562 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 563 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 564 } 565 566 /** 567 * lpfc_resp_iocb - Get next response iocb entry in the ring 568 * @phba: Pointer to HBA context object. 569 * @pring: Pointer to driver SLI ring object. 570 * 571 * This function returns pointer to next response iocb entry 572 * in the response ring. The caller must hold hbalock to make sure 573 * that no other thread consume the next response iocb. 574 * SLI-2/SLI-3 provide different sized iocbs. 575 **/ 576 static inline IOCB_t * 577 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 578 { 579 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 580 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 581 } 582 583 /** 584 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 585 * @phba: Pointer to HBA context object. 586 * 587 * This function is called with hbalock held. This function 588 * allocates a new driver iocb object from the iocb pool. If the 589 * allocation is successful, it returns pointer to the newly 590 * allocated iocb object else it returns NULL. 591 **/ 592 struct lpfc_iocbq * 593 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 594 { 595 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 596 struct lpfc_iocbq * iocbq = NULL; 597 598 lockdep_assert_held(&phba->hbalock); 599 600 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 601 if (iocbq) 602 phba->iocb_cnt++; 603 if (phba->iocb_cnt > phba->iocb_max) 604 phba->iocb_max = phba->iocb_cnt; 605 return iocbq; 606 } 607 608 /** 609 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 610 * @phba: Pointer to HBA context object. 611 * @xritag: XRI value. 612 * 613 * This function clears the sglq pointer from the array of acive 614 * sglq's. The xritag that is passed in is used to index into the 615 * array. Before the xritag can be used it needs to be adjusted 616 * by subtracting the xribase. 617 * 618 * Returns sglq ponter = success, NULL = Failure. 619 **/ 620 struct lpfc_sglq * 621 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 622 { 623 struct lpfc_sglq *sglq; 624 625 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 626 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 627 return sglq; 628 } 629 630 /** 631 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 632 * @phba: Pointer to HBA context object. 633 * @xritag: XRI value. 634 * 635 * This function returns the sglq pointer from the array of acive 636 * sglq's. The xritag that is passed in is used to index into the 637 * array. Before the xritag can be used it needs to be adjusted 638 * by subtracting the xribase. 639 * 640 * Returns sglq ponter = success, NULL = Failure. 641 **/ 642 struct lpfc_sglq * 643 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 644 { 645 struct lpfc_sglq *sglq; 646 647 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 648 return sglq; 649 } 650 651 /** 652 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 653 * @phba: Pointer to HBA context object. 654 * @xritag: xri used in this exchange. 655 * @rrq: The RRQ to be cleared. 656 * 657 **/ 658 void 659 lpfc_clr_rrq_active(struct lpfc_hba *phba, 660 uint16_t xritag, 661 struct lpfc_node_rrq *rrq) 662 { 663 struct lpfc_nodelist *ndlp = NULL; 664 665 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 666 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 667 668 /* The target DID could have been swapped (cable swap) 669 * we should use the ndlp from the findnode if it is 670 * available. 671 */ 672 if ((!ndlp) && rrq->ndlp) 673 ndlp = rrq->ndlp; 674 675 if (!ndlp) 676 goto out; 677 678 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 679 rrq->send_rrq = 0; 680 rrq->xritag = 0; 681 rrq->rrq_stop_time = 0; 682 } 683 out: 684 mempool_free(rrq, phba->rrq_pool); 685 } 686 687 /** 688 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 689 * @phba: Pointer to HBA context object. 690 * 691 * This function is called with hbalock held. This function 692 * Checks if stop_time (ratov from setting rrq active) has 693 * been reached, if it has and the send_rrq flag is set then 694 * it will call lpfc_send_rrq. If the send_rrq flag is not set 695 * then it will just call the routine to clear the rrq and 696 * free the rrq resource. 697 * The timer is set to the next rrq that is going to expire before 698 * leaving the routine. 699 * 700 **/ 701 void 702 lpfc_handle_rrq_active(struct lpfc_hba *phba) 703 { 704 struct lpfc_node_rrq *rrq; 705 struct lpfc_node_rrq *nextrrq; 706 unsigned long next_time; 707 unsigned long iflags; 708 LIST_HEAD(send_rrq); 709 710 spin_lock_irqsave(&phba->hbalock, iflags); 711 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 712 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 713 list_for_each_entry_safe(rrq, nextrrq, 714 &phba->active_rrq_list, list) { 715 if (time_after(jiffies, rrq->rrq_stop_time)) 716 list_move(&rrq->list, &send_rrq); 717 else if (time_before(rrq->rrq_stop_time, next_time)) 718 next_time = rrq->rrq_stop_time; 719 } 720 spin_unlock_irqrestore(&phba->hbalock, iflags); 721 if ((!list_empty(&phba->active_rrq_list)) && 722 (!(phba->pport->load_flag & FC_UNLOADING))) 723 mod_timer(&phba->rrq_tmr, next_time); 724 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 725 list_del(&rrq->list); 726 if (!rrq->send_rrq) 727 /* this call will free the rrq */ 728 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 729 else if (lpfc_send_rrq(phba, rrq)) { 730 /* if we send the rrq then the completion handler 731 * will clear the bit in the xribitmap. 732 */ 733 lpfc_clr_rrq_active(phba, rrq->xritag, 734 rrq); 735 } 736 } 737 } 738 739 /** 740 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 741 * @vport: Pointer to vport context object. 742 * @xri: The xri used in the exchange. 743 * @did: The targets DID for this exchange. 744 * 745 * returns NULL = rrq not found in the phba->active_rrq_list. 746 * rrq = rrq for this xri and target. 747 **/ 748 struct lpfc_node_rrq * 749 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 750 { 751 struct lpfc_hba *phba = vport->phba; 752 struct lpfc_node_rrq *rrq; 753 struct lpfc_node_rrq *nextrrq; 754 unsigned long iflags; 755 756 if (phba->sli_rev != LPFC_SLI_REV4) 757 return NULL; 758 spin_lock_irqsave(&phba->hbalock, iflags); 759 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 760 if (rrq->vport == vport && rrq->xritag == xri && 761 rrq->nlp_DID == did){ 762 list_del(&rrq->list); 763 spin_unlock_irqrestore(&phba->hbalock, iflags); 764 return rrq; 765 } 766 } 767 spin_unlock_irqrestore(&phba->hbalock, iflags); 768 return NULL; 769 } 770 771 /** 772 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 773 * @vport: Pointer to vport context object. 774 * @ndlp: Pointer to the lpfc_node_list structure. 775 * If ndlp is NULL Remove all active RRQs for this vport from the 776 * phba->active_rrq_list and clear the rrq. 777 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 778 **/ 779 void 780 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 781 782 { 783 struct lpfc_hba *phba = vport->phba; 784 struct lpfc_node_rrq *rrq; 785 struct lpfc_node_rrq *nextrrq; 786 unsigned long iflags; 787 LIST_HEAD(rrq_list); 788 789 if (phba->sli_rev != LPFC_SLI_REV4) 790 return; 791 if (!ndlp) { 792 lpfc_sli4_vport_delete_els_xri_aborted(vport); 793 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 794 } 795 spin_lock_irqsave(&phba->hbalock, iflags); 796 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 797 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 798 list_move(&rrq->list, &rrq_list); 799 spin_unlock_irqrestore(&phba->hbalock, iflags); 800 801 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 802 list_del(&rrq->list); 803 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 804 } 805 } 806 807 /** 808 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 809 * @phba: Pointer to HBA context object. 810 * @ndlp: Targets nodelist pointer for this exchange. 811 * @xritag the xri in the bitmap to test. 812 * 813 * This function is called with hbalock held. This function 814 * returns 0 = rrq not active for this xri 815 * 1 = rrq is valid for this xri. 816 **/ 817 int 818 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 819 uint16_t xritag) 820 { 821 lockdep_assert_held(&phba->hbalock); 822 if (!ndlp) 823 return 0; 824 if (!ndlp->active_rrqs_xri_bitmap) 825 return 0; 826 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 827 return 1; 828 else 829 return 0; 830 } 831 832 /** 833 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 834 * @phba: Pointer to HBA context object. 835 * @ndlp: nodelist pointer for this target. 836 * @xritag: xri used in this exchange. 837 * @rxid: Remote Exchange ID. 838 * @send_rrq: Flag used to determine if we should send rrq els cmd. 839 * 840 * This function takes the hbalock. 841 * The active bit is always set in the active rrq xri_bitmap even 842 * if there is no slot avaiable for the other rrq information. 843 * 844 * returns 0 rrq actived for this xri 845 * < 0 No memory or invalid ndlp. 846 **/ 847 int 848 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 849 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 850 { 851 unsigned long iflags; 852 struct lpfc_node_rrq *rrq; 853 int empty; 854 855 if (!ndlp) 856 return -EINVAL; 857 858 if (!phba->cfg_enable_rrq) 859 return -EINVAL; 860 861 spin_lock_irqsave(&phba->hbalock, iflags); 862 if (phba->pport->load_flag & FC_UNLOADING) { 863 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 864 goto out; 865 } 866 867 /* 868 * set the active bit even if there is no mem available. 869 */ 870 if (NLP_CHK_FREE_REQ(ndlp)) 871 goto out; 872 873 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 874 goto out; 875 876 if (!ndlp->active_rrqs_xri_bitmap) 877 goto out; 878 879 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 880 goto out; 881 882 spin_unlock_irqrestore(&phba->hbalock, iflags); 883 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 884 if (!rrq) { 885 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 886 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 887 " DID:0x%x Send:%d\n", 888 xritag, rxid, ndlp->nlp_DID, send_rrq); 889 return -EINVAL; 890 } 891 if (phba->cfg_enable_rrq == 1) 892 rrq->send_rrq = send_rrq; 893 else 894 rrq->send_rrq = 0; 895 rrq->xritag = xritag; 896 rrq->rrq_stop_time = jiffies + 897 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 898 rrq->ndlp = ndlp; 899 rrq->nlp_DID = ndlp->nlp_DID; 900 rrq->vport = ndlp->vport; 901 rrq->rxid = rxid; 902 spin_lock_irqsave(&phba->hbalock, iflags); 903 empty = list_empty(&phba->active_rrq_list); 904 list_add_tail(&rrq->list, &phba->active_rrq_list); 905 phba->hba_flag |= HBA_RRQ_ACTIVE; 906 if (empty) 907 lpfc_worker_wake_up(phba); 908 spin_unlock_irqrestore(&phba->hbalock, iflags); 909 return 0; 910 out: 911 spin_unlock_irqrestore(&phba->hbalock, iflags); 912 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 913 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 914 " DID:0x%x Send:%d\n", 915 xritag, rxid, ndlp->nlp_DID, send_rrq); 916 return -EINVAL; 917 } 918 919 /** 920 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool 921 * @phba: Pointer to HBA context object. 922 * @piocb: Pointer to the iocbq. 923 * 924 * This function is called with the ring lock held. This function 925 * gets a new driver sglq object from the sglq list. If the 926 * list is not empty then it is successful, it returns pointer to the newly 927 * allocated sglq object else it returns NULL. 928 **/ 929 static struct lpfc_sglq * 930 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 931 { 932 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list; 933 struct lpfc_sglq *sglq = NULL; 934 struct lpfc_sglq *start_sglq = NULL; 935 struct lpfc_scsi_buf *lpfc_cmd; 936 struct lpfc_nodelist *ndlp; 937 int found = 0; 938 939 lockdep_assert_held(&phba->hbalock); 940 941 if (piocbq->iocb_flag & LPFC_IO_FCP) { 942 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; 943 ndlp = lpfc_cmd->rdata->pnode; 944 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 945 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) { 946 ndlp = piocbq->context_un.ndlp; 947 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) { 948 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK) 949 ndlp = NULL; 950 else 951 ndlp = piocbq->context_un.ndlp; 952 } else { 953 ndlp = piocbq->context1; 954 } 955 956 spin_lock(&phba->sli4_hba.sgl_list_lock); 957 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list); 958 start_sglq = sglq; 959 while (!found) { 960 if (!sglq) 961 break; 962 if (ndlp && ndlp->active_rrqs_xri_bitmap && 963 test_bit(sglq->sli4_lxritag, 964 ndlp->active_rrqs_xri_bitmap)) { 965 /* This xri has an rrq outstanding for this DID. 966 * put it back in the list and get another xri. 967 */ 968 list_add_tail(&sglq->list, lpfc_els_sgl_list); 969 sglq = NULL; 970 list_remove_head(lpfc_els_sgl_list, sglq, 971 struct lpfc_sglq, list); 972 if (sglq == start_sglq) { 973 list_add_tail(&sglq->list, lpfc_els_sgl_list); 974 sglq = NULL; 975 break; 976 } else 977 continue; 978 } 979 sglq->ndlp = ndlp; 980 found = 1; 981 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 982 sglq->state = SGL_ALLOCATED; 983 } 984 spin_unlock(&phba->sli4_hba.sgl_list_lock); 985 return sglq; 986 } 987 988 /** 989 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool 990 * @phba: Pointer to HBA context object. 991 * @piocb: Pointer to the iocbq. 992 * 993 * This function is called with the sgl_list lock held. This function 994 * gets a new driver sglq object from the sglq list. If the 995 * list is not empty then it is successful, it returns pointer to the newly 996 * allocated sglq object else it returns NULL. 997 **/ 998 struct lpfc_sglq * 999 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 1000 { 1001 struct list_head *lpfc_nvmet_sgl_list; 1002 struct lpfc_sglq *sglq = NULL; 1003 1004 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list; 1005 1006 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock); 1007 1008 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list); 1009 if (!sglq) 1010 return NULL; 1011 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 1012 sglq->state = SGL_ALLOCATED; 1013 return sglq; 1014 } 1015 1016 /** 1017 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 1018 * @phba: Pointer to HBA context object. 1019 * 1020 * This function is called with no lock held. This function 1021 * allocates a new driver iocb object from the iocb pool. If the 1022 * allocation is successful, it returns pointer to the newly 1023 * allocated iocb object else it returns NULL. 1024 **/ 1025 struct lpfc_iocbq * 1026 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 1027 { 1028 struct lpfc_iocbq * iocbq = NULL; 1029 unsigned long iflags; 1030 1031 spin_lock_irqsave(&phba->hbalock, iflags); 1032 iocbq = __lpfc_sli_get_iocbq(phba); 1033 spin_unlock_irqrestore(&phba->hbalock, iflags); 1034 return iocbq; 1035 } 1036 1037 /** 1038 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 1039 * @phba: Pointer to HBA context object. 1040 * @iocbq: Pointer to driver iocb object. 1041 * 1042 * This function is called with hbalock held to release driver 1043 * iocb object to the iocb pool. The iotag in the iocb object 1044 * does not change for each use of the iocb object. This function 1045 * clears all other fields of the iocb object when it is freed. 1046 * The sqlq structure that holds the xritag and phys and virtual 1047 * mappings for the scatter gather list is retrieved from the 1048 * active array of sglq. The get of the sglq pointer also clears 1049 * the entry in the array. If the status of the IO indiactes that 1050 * this IO was aborted then the sglq entry it put on the 1051 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1052 * IO has good status or fails for any other reason then the sglq 1053 * entry is added to the free list (lpfc_els_sgl_list). 1054 **/ 1055 static void 1056 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1057 { 1058 struct lpfc_sglq *sglq; 1059 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1060 unsigned long iflag = 0; 1061 struct lpfc_sli_ring *pring; 1062 1063 lockdep_assert_held(&phba->hbalock); 1064 1065 if (iocbq->sli4_xritag == NO_XRI) 1066 sglq = NULL; 1067 else 1068 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1069 1070 1071 if (sglq) { 1072 if (iocbq->iocb_flag & LPFC_IO_NVMET) { 1073 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1074 iflag); 1075 sglq->state = SGL_FREED; 1076 sglq->ndlp = NULL; 1077 list_add_tail(&sglq->list, 1078 &phba->sli4_hba.lpfc_nvmet_sgl_list); 1079 spin_unlock_irqrestore( 1080 &phba->sli4_hba.sgl_list_lock, iflag); 1081 goto out; 1082 } 1083 1084 pring = phba->sli4_hba.els_wq->pring; 1085 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1086 (sglq->state != SGL_XRI_ABORTED)) { 1087 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1088 iflag); 1089 list_add(&sglq->list, 1090 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1091 spin_unlock_irqrestore( 1092 &phba->sli4_hba.sgl_list_lock, iflag); 1093 } else { 1094 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, 1095 iflag); 1096 sglq->state = SGL_FREED; 1097 sglq->ndlp = NULL; 1098 list_add_tail(&sglq->list, 1099 &phba->sli4_hba.lpfc_els_sgl_list); 1100 spin_unlock_irqrestore( 1101 &phba->sli4_hba.sgl_list_lock, iflag); 1102 1103 /* Check if TXQ queue needs to be serviced */ 1104 if (!list_empty(&pring->txq)) 1105 lpfc_worker_wake_up(phba); 1106 } 1107 } 1108 1109 out: 1110 /* 1111 * Clean all volatile data fields, preserve iotag and node struct. 1112 */ 1113 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1114 iocbq->sli4_lxritag = NO_XRI; 1115 iocbq->sli4_xritag = NO_XRI; 1116 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | 1117 LPFC_IO_NVME_LS); 1118 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1119 } 1120 1121 1122 /** 1123 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1124 * @phba: Pointer to HBA context object. 1125 * @iocbq: Pointer to driver iocb object. 1126 * 1127 * This function is called with hbalock held to release driver 1128 * iocb object to the iocb pool. The iotag in the iocb object 1129 * does not change for each use of the iocb object. This function 1130 * clears all other fields of the iocb object when it is freed. 1131 **/ 1132 static void 1133 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1134 { 1135 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1136 1137 lockdep_assert_held(&phba->hbalock); 1138 1139 /* 1140 * Clean all volatile data fields, preserve iotag and node struct. 1141 */ 1142 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1143 iocbq->sli4_xritag = NO_XRI; 1144 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1145 } 1146 1147 /** 1148 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1149 * @phba: Pointer to HBA context object. 1150 * @iocbq: Pointer to driver iocb object. 1151 * 1152 * This function is called with hbalock held to release driver 1153 * iocb object to the iocb pool. The iotag in the iocb object 1154 * does not change for each use of the iocb object. This function 1155 * clears all other fields of the iocb object when it is freed. 1156 **/ 1157 static void 1158 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1159 { 1160 lockdep_assert_held(&phba->hbalock); 1161 1162 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1163 phba->iocb_cnt--; 1164 } 1165 1166 /** 1167 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1168 * @phba: Pointer to HBA context object. 1169 * @iocbq: Pointer to driver iocb object. 1170 * 1171 * This function is called with no lock held to release the iocb to 1172 * iocb pool. 1173 **/ 1174 void 1175 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1176 { 1177 unsigned long iflags; 1178 1179 /* 1180 * Clean all volatile data fields, preserve iotag and node struct. 1181 */ 1182 spin_lock_irqsave(&phba->hbalock, iflags); 1183 __lpfc_sli_release_iocbq(phba, iocbq); 1184 spin_unlock_irqrestore(&phba->hbalock, iflags); 1185 } 1186 1187 /** 1188 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1189 * @phba: Pointer to HBA context object. 1190 * @iocblist: List of IOCBs. 1191 * @ulpstatus: ULP status in IOCB command field. 1192 * @ulpWord4: ULP word-4 in IOCB command field. 1193 * 1194 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1195 * on the list by invoking the complete callback function associated with the 1196 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1197 * fields. 1198 **/ 1199 void 1200 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1201 uint32_t ulpstatus, uint32_t ulpWord4) 1202 { 1203 struct lpfc_iocbq *piocb; 1204 1205 while (!list_empty(iocblist)) { 1206 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1207 if (!piocb->iocb_cmpl) 1208 lpfc_sli_release_iocbq(phba, piocb); 1209 else { 1210 piocb->iocb.ulpStatus = ulpstatus; 1211 piocb->iocb.un.ulpWord[4] = ulpWord4; 1212 (piocb->iocb_cmpl) (phba, piocb, piocb); 1213 } 1214 } 1215 return; 1216 } 1217 1218 /** 1219 * lpfc_sli_iocb_cmd_type - Get the iocb type 1220 * @iocb_cmnd: iocb command code. 1221 * 1222 * This function is called by ring event handler function to get the iocb type. 1223 * This function translates the iocb command to an iocb command type used to 1224 * decide the final disposition of each completed IOCB. 1225 * The function returns 1226 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1227 * LPFC_SOL_IOCB if it is a solicited iocb completion 1228 * LPFC_ABORT_IOCB if it is an abort iocb 1229 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1230 * 1231 * The caller is not required to hold any lock. 1232 **/ 1233 static lpfc_iocb_type 1234 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1235 { 1236 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1237 1238 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1239 return 0; 1240 1241 switch (iocb_cmnd) { 1242 case CMD_XMIT_SEQUENCE_CR: 1243 case CMD_XMIT_SEQUENCE_CX: 1244 case CMD_XMIT_BCAST_CN: 1245 case CMD_XMIT_BCAST_CX: 1246 case CMD_ELS_REQUEST_CR: 1247 case CMD_ELS_REQUEST_CX: 1248 case CMD_CREATE_XRI_CR: 1249 case CMD_CREATE_XRI_CX: 1250 case CMD_GET_RPI_CN: 1251 case CMD_XMIT_ELS_RSP_CX: 1252 case CMD_GET_RPI_CR: 1253 case CMD_FCP_IWRITE_CR: 1254 case CMD_FCP_IWRITE_CX: 1255 case CMD_FCP_IREAD_CR: 1256 case CMD_FCP_IREAD_CX: 1257 case CMD_FCP_ICMND_CR: 1258 case CMD_FCP_ICMND_CX: 1259 case CMD_FCP_TSEND_CX: 1260 case CMD_FCP_TRSP_CX: 1261 case CMD_FCP_TRECEIVE_CX: 1262 case CMD_FCP_AUTO_TRSP_CX: 1263 case CMD_ADAPTER_MSG: 1264 case CMD_ADAPTER_DUMP: 1265 case CMD_XMIT_SEQUENCE64_CR: 1266 case CMD_XMIT_SEQUENCE64_CX: 1267 case CMD_XMIT_BCAST64_CN: 1268 case CMD_XMIT_BCAST64_CX: 1269 case CMD_ELS_REQUEST64_CR: 1270 case CMD_ELS_REQUEST64_CX: 1271 case CMD_FCP_IWRITE64_CR: 1272 case CMD_FCP_IWRITE64_CX: 1273 case CMD_FCP_IREAD64_CR: 1274 case CMD_FCP_IREAD64_CX: 1275 case CMD_FCP_ICMND64_CR: 1276 case CMD_FCP_ICMND64_CX: 1277 case CMD_FCP_TSEND64_CX: 1278 case CMD_FCP_TRSP64_CX: 1279 case CMD_FCP_TRECEIVE64_CX: 1280 case CMD_GEN_REQUEST64_CR: 1281 case CMD_GEN_REQUEST64_CX: 1282 case CMD_XMIT_ELS_RSP64_CX: 1283 case DSSCMD_IWRITE64_CR: 1284 case DSSCMD_IWRITE64_CX: 1285 case DSSCMD_IREAD64_CR: 1286 case DSSCMD_IREAD64_CX: 1287 type = LPFC_SOL_IOCB; 1288 break; 1289 case CMD_ABORT_XRI_CN: 1290 case CMD_ABORT_XRI_CX: 1291 case CMD_CLOSE_XRI_CN: 1292 case CMD_CLOSE_XRI_CX: 1293 case CMD_XRI_ABORTED_CX: 1294 case CMD_ABORT_MXRI64_CN: 1295 case CMD_XMIT_BLS_RSP64_CX: 1296 type = LPFC_ABORT_IOCB; 1297 break; 1298 case CMD_RCV_SEQUENCE_CX: 1299 case CMD_RCV_ELS_REQ_CX: 1300 case CMD_RCV_SEQUENCE64_CX: 1301 case CMD_RCV_ELS_REQ64_CX: 1302 case CMD_ASYNC_STATUS: 1303 case CMD_IOCB_RCV_SEQ64_CX: 1304 case CMD_IOCB_RCV_ELS64_CX: 1305 case CMD_IOCB_RCV_CONT64_CX: 1306 case CMD_IOCB_RET_XRI64_CX: 1307 type = LPFC_UNSOL_IOCB; 1308 break; 1309 case CMD_IOCB_XMIT_MSEQ64_CR: 1310 case CMD_IOCB_XMIT_MSEQ64_CX: 1311 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1312 case CMD_IOCB_RCV_ELS_LIST64_CX: 1313 case CMD_IOCB_CLOSE_EXTENDED_CN: 1314 case CMD_IOCB_ABORT_EXTENDED_CN: 1315 case CMD_IOCB_RET_HBQE64_CN: 1316 case CMD_IOCB_FCP_IBIDIR64_CR: 1317 case CMD_IOCB_FCP_IBIDIR64_CX: 1318 case CMD_IOCB_FCP_ITASKMGT64_CX: 1319 case CMD_IOCB_LOGENTRY_CN: 1320 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1321 printk("%s - Unhandled SLI-3 Command x%x\n", 1322 __func__, iocb_cmnd); 1323 type = LPFC_UNKNOWN_IOCB; 1324 break; 1325 default: 1326 type = LPFC_UNKNOWN_IOCB; 1327 break; 1328 } 1329 1330 return type; 1331 } 1332 1333 /** 1334 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1335 * @phba: Pointer to HBA context object. 1336 * 1337 * This function is called from SLI initialization code 1338 * to configure every ring of the HBA's SLI interface. The 1339 * caller is not required to hold any lock. This function issues 1340 * a config_ring mailbox command for each ring. 1341 * This function returns zero if successful else returns a negative 1342 * error code. 1343 **/ 1344 static int 1345 lpfc_sli_ring_map(struct lpfc_hba *phba) 1346 { 1347 struct lpfc_sli *psli = &phba->sli; 1348 LPFC_MBOXQ_t *pmb; 1349 MAILBOX_t *pmbox; 1350 int i, rc, ret = 0; 1351 1352 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1353 if (!pmb) 1354 return -ENOMEM; 1355 pmbox = &pmb->u.mb; 1356 phba->link_state = LPFC_INIT_MBX_CMDS; 1357 for (i = 0; i < psli->num_rings; i++) { 1358 lpfc_config_ring(phba, i, pmb); 1359 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1360 if (rc != MBX_SUCCESS) { 1361 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1362 "0446 Adapter failed to init (%d), " 1363 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1364 "ring %d\n", 1365 rc, pmbox->mbxCommand, 1366 pmbox->mbxStatus, i); 1367 phba->link_state = LPFC_HBA_ERROR; 1368 ret = -ENXIO; 1369 break; 1370 } 1371 } 1372 mempool_free(pmb, phba->mbox_mem_pool); 1373 return ret; 1374 } 1375 1376 /** 1377 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1378 * @phba: Pointer to HBA context object. 1379 * @pring: Pointer to driver SLI ring object. 1380 * @piocb: Pointer to the driver iocb object. 1381 * 1382 * This function is called with hbalock held. The function adds the 1383 * new iocb to txcmplq of the given ring. This function always returns 1384 * 0. If this function is called for ELS ring, this function checks if 1385 * there is a vport associated with the ELS command. This function also 1386 * starts els_tmofunc timer if this is an ELS command. 1387 **/ 1388 static int 1389 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1390 struct lpfc_iocbq *piocb) 1391 { 1392 lockdep_assert_held(&phba->hbalock); 1393 1394 BUG_ON(!piocb); 1395 1396 list_add_tail(&piocb->list, &pring->txcmplq); 1397 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1398 1399 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1400 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1401 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 1402 BUG_ON(!piocb->vport); 1403 if (!(piocb->vport->load_flag & FC_UNLOADING)) 1404 mod_timer(&piocb->vport->els_tmofunc, 1405 jiffies + 1406 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1407 } 1408 1409 return 0; 1410 } 1411 1412 /** 1413 * lpfc_sli_ringtx_get - Get first element of the txq 1414 * @phba: Pointer to HBA context object. 1415 * @pring: Pointer to driver SLI ring object. 1416 * 1417 * This function is called with hbalock held to get next 1418 * iocb in txq of the given ring. If there is any iocb in 1419 * the txq, the function returns first iocb in the list after 1420 * removing the iocb from the list, else it returns NULL. 1421 **/ 1422 struct lpfc_iocbq * 1423 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1424 { 1425 struct lpfc_iocbq *cmd_iocb; 1426 1427 lockdep_assert_held(&phba->hbalock); 1428 1429 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1430 return cmd_iocb; 1431 } 1432 1433 /** 1434 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1435 * @phba: Pointer to HBA context object. 1436 * @pring: Pointer to driver SLI ring object. 1437 * 1438 * This function is called with hbalock held and the caller must post the 1439 * iocb without releasing the lock. If the caller releases the lock, 1440 * iocb slot returned by the function is not guaranteed to be available. 1441 * The function returns pointer to the next available iocb slot if there 1442 * is available slot in the ring, else it returns NULL. 1443 * If the get index of the ring is ahead of the put index, the function 1444 * will post an error attention event to the worker thread to take the 1445 * HBA to offline state. 1446 **/ 1447 static IOCB_t * 1448 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1449 { 1450 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1451 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 1452 1453 lockdep_assert_held(&phba->hbalock); 1454 1455 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 1456 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 1457 pring->sli.sli3.next_cmdidx = 0; 1458 1459 if (unlikely(pring->sli.sli3.local_getidx == 1460 pring->sli.sli3.next_cmdidx)) { 1461 1462 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1463 1464 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1465 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1466 "0315 Ring %d issue: portCmdGet %d " 1467 "is bigger than cmd ring %d\n", 1468 pring->ringno, 1469 pring->sli.sli3.local_getidx, 1470 max_cmd_idx); 1471 1472 phba->link_state = LPFC_HBA_ERROR; 1473 /* 1474 * All error attention handlers are posted to 1475 * worker thread 1476 */ 1477 phba->work_ha |= HA_ERATT; 1478 phba->work_hs = HS_FFER3; 1479 1480 lpfc_worker_wake_up(phba); 1481 1482 return NULL; 1483 } 1484 1485 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 1486 return NULL; 1487 } 1488 1489 return lpfc_cmd_iocb(phba, pring); 1490 } 1491 1492 /** 1493 * lpfc_sli_next_iotag - Get an iotag for the iocb 1494 * @phba: Pointer to HBA context object. 1495 * @iocbq: Pointer to driver iocb object. 1496 * 1497 * This function gets an iotag for the iocb. If there is no unused iotag and 1498 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1499 * array and assigns a new iotag. 1500 * The function returns the allocated iotag if successful, else returns zero. 1501 * Zero is not a valid iotag. 1502 * The caller is not required to hold any lock. 1503 **/ 1504 uint16_t 1505 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1506 { 1507 struct lpfc_iocbq **new_arr; 1508 struct lpfc_iocbq **old_arr; 1509 size_t new_len; 1510 struct lpfc_sli *psli = &phba->sli; 1511 uint16_t iotag; 1512 1513 spin_lock_irq(&phba->hbalock); 1514 iotag = psli->last_iotag; 1515 if(++iotag < psli->iocbq_lookup_len) { 1516 psli->last_iotag = iotag; 1517 psli->iocbq_lookup[iotag] = iocbq; 1518 spin_unlock_irq(&phba->hbalock); 1519 iocbq->iotag = iotag; 1520 return iotag; 1521 } else if (psli->iocbq_lookup_len < (0xffff 1522 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1523 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1524 spin_unlock_irq(&phba->hbalock); 1525 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 1526 GFP_KERNEL); 1527 if (new_arr) { 1528 spin_lock_irq(&phba->hbalock); 1529 old_arr = psli->iocbq_lookup; 1530 if (new_len <= psli->iocbq_lookup_len) { 1531 /* highly unprobable case */ 1532 kfree(new_arr); 1533 iotag = psli->last_iotag; 1534 if(++iotag < psli->iocbq_lookup_len) { 1535 psli->last_iotag = iotag; 1536 psli->iocbq_lookup[iotag] = iocbq; 1537 spin_unlock_irq(&phba->hbalock); 1538 iocbq->iotag = iotag; 1539 return iotag; 1540 } 1541 spin_unlock_irq(&phba->hbalock); 1542 return 0; 1543 } 1544 if (psli->iocbq_lookup) 1545 memcpy(new_arr, old_arr, 1546 ((psli->last_iotag + 1) * 1547 sizeof (struct lpfc_iocbq *))); 1548 psli->iocbq_lookup = new_arr; 1549 psli->iocbq_lookup_len = new_len; 1550 psli->last_iotag = iotag; 1551 psli->iocbq_lookup[iotag] = iocbq; 1552 spin_unlock_irq(&phba->hbalock); 1553 iocbq->iotag = iotag; 1554 kfree(old_arr); 1555 return iotag; 1556 } 1557 } else 1558 spin_unlock_irq(&phba->hbalock); 1559 1560 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1561 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1562 psli->last_iotag); 1563 1564 return 0; 1565 } 1566 1567 /** 1568 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1569 * @phba: Pointer to HBA context object. 1570 * @pring: Pointer to driver SLI ring object. 1571 * @iocb: Pointer to iocb slot in the ring. 1572 * @nextiocb: Pointer to driver iocb object which need to be 1573 * posted to firmware. 1574 * 1575 * This function is called with hbalock held to post a new iocb to 1576 * the firmware. This function copies the new iocb to ring iocb slot and 1577 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1578 * a completion call back for this iocb else the function will free the 1579 * iocb object. 1580 **/ 1581 static void 1582 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1583 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1584 { 1585 lockdep_assert_held(&phba->hbalock); 1586 /* 1587 * Set up an iotag 1588 */ 1589 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1590 1591 1592 if (pring->ringno == LPFC_ELS_RING) { 1593 lpfc_debugfs_slow_ring_trc(phba, 1594 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1595 *(((uint32_t *) &nextiocb->iocb) + 4), 1596 *(((uint32_t *) &nextiocb->iocb) + 6), 1597 *(((uint32_t *) &nextiocb->iocb) + 7)); 1598 } 1599 1600 /* 1601 * Issue iocb command to adapter 1602 */ 1603 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1604 wmb(); 1605 pring->stats.iocb_cmd++; 1606 1607 /* 1608 * If there is no completion routine to call, we can release the 1609 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1610 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1611 */ 1612 if (nextiocb->iocb_cmpl) 1613 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1614 else 1615 __lpfc_sli_release_iocbq(phba, nextiocb); 1616 1617 /* 1618 * Let the HBA know what IOCB slot will be the next one the 1619 * driver will put a command into. 1620 */ 1621 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 1622 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1623 } 1624 1625 /** 1626 * lpfc_sli_update_full_ring - Update the chip attention register 1627 * @phba: Pointer to HBA context object. 1628 * @pring: Pointer to driver SLI ring object. 1629 * 1630 * The caller is not required to hold any lock for calling this function. 1631 * This function updates the chip attention bits for the ring to inform firmware 1632 * that there are pending work to be done for this ring and requests an 1633 * interrupt when there is space available in the ring. This function is 1634 * called when the driver is unable to post more iocbs to the ring due 1635 * to unavailability of space in the ring. 1636 **/ 1637 static void 1638 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1639 { 1640 int ringno = pring->ringno; 1641 1642 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1643 1644 wmb(); 1645 1646 /* 1647 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1648 * The HBA will tell us when an IOCB entry is available. 1649 */ 1650 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1651 readl(phba->CAregaddr); /* flush */ 1652 1653 pring->stats.iocb_cmd_full++; 1654 } 1655 1656 /** 1657 * lpfc_sli_update_ring - Update chip attention register 1658 * @phba: Pointer to HBA context object. 1659 * @pring: Pointer to driver SLI ring object. 1660 * 1661 * This function updates the chip attention register bit for the 1662 * given ring to inform HBA that there is more work to be done 1663 * in this ring. The caller is not required to hold any lock. 1664 **/ 1665 static void 1666 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1667 { 1668 int ringno = pring->ringno; 1669 1670 /* 1671 * Tell the HBA that there is work to do in this ring. 1672 */ 1673 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1674 wmb(); 1675 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1676 readl(phba->CAregaddr); /* flush */ 1677 } 1678 } 1679 1680 /** 1681 * lpfc_sli_resume_iocb - Process iocbs in the txq 1682 * @phba: Pointer to HBA context object. 1683 * @pring: Pointer to driver SLI ring object. 1684 * 1685 * This function is called with hbalock held to post pending iocbs 1686 * in the txq to the firmware. This function is called when driver 1687 * detects space available in the ring. 1688 **/ 1689 static void 1690 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1691 { 1692 IOCB_t *iocb; 1693 struct lpfc_iocbq *nextiocb; 1694 1695 lockdep_assert_held(&phba->hbalock); 1696 1697 /* 1698 * Check to see if: 1699 * (a) there is anything on the txq to send 1700 * (b) link is up 1701 * (c) link attention events can be processed (fcp ring only) 1702 * (d) IOCB processing is not blocked by the outstanding mbox command. 1703 */ 1704 1705 if (lpfc_is_link_up(phba) && 1706 (!list_empty(&pring->txq)) && 1707 (pring->ringno != LPFC_FCP_RING || 1708 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1709 1710 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1711 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1712 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1713 1714 if (iocb) 1715 lpfc_sli_update_ring(phba, pring); 1716 else 1717 lpfc_sli_update_full_ring(phba, pring); 1718 } 1719 1720 return; 1721 } 1722 1723 /** 1724 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1725 * @phba: Pointer to HBA context object. 1726 * @hbqno: HBQ number. 1727 * 1728 * This function is called with hbalock held to get the next 1729 * available slot for the given HBQ. If there is free slot 1730 * available for the HBQ it will return pointer to the next available 1731 * HBQ entry else it will return NULL. 1732 **/ 1733 static struct lpfc_hbq_entry * 1734 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1735 { 1736 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1737 1738 lockdep_assert_held(&phba->hbalock); 1739 1740 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1741 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1742 hbqp->next_hbqPutIdx = 0; 1743 1744 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1745 uint32_t raw_index = phba->hbq_get[hbqno]; 1746 uint32_t getidx = le32_to_cpu(raw_index); 1747 1748 hbqp->local_hbqGetIdx = getidx; 1749 1750 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1751 lpfc_printf_log(phba, KERN_ERR, 1752 LOG_SLI | LOG_VPORT, 1753 "1802 HBQ %d: local_hbqGetIdx " 1754 "%u is > than hbqp->entry_count %u\n", 1755 hbqno, hbqp->local_hbqGetIdx, 1756 hbqp->entry_count); 1757 1758 phba->link_state = LPFC_HBA_ERROR; 1759 return NULL; 1760 } 1761 1762 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1763 return NULL; 1764 } 1765 1766 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1767 hbqp->hbqPutIdx; 1768 } 1769 1770 /** 1771 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1772 * @phba: Pointer to HBA context object. 1773 * 1774 * This function is called with no lock held to free all the 1775 * hbq buffers while uninitializing the SLI interface. It also 1776 * frees the HBQ buffers returned by the firmware but not yet 1777 * processed by the upper layers. 1778 **/ 1779 void 1780 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1781 { 1782 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1783 struct hbq_dmabuf *hbq_buf; 1784 unsigned long flags; 1785 int i, hbq_count; 1786 1787 hbq_count = lpfc_sli_hbq_count(); 1788 /* Return all memory used by all HBQs */ 1789 spin_lock_irqsave(&phba->hbalock, flags); 1790 for (i = 0; i < hbq_count; ++i) { 1791 list_for_each_entry_safe(dmabuf, next_dmabuf, 1792 &phba->hbqs[i].hbq_buffer_list, list) { 1793 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1794 list_del(&hbq_buf->dbuf.list); 1795 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1796 } 1797 phba->hbqs[i].buffer_count = 0; 1798 } 1799 1800 /* Mark the HBQs not in use */ 1801 phba->hbq_in_use = 0; 1802 spin_unlock_irqrestore(&phba->hbalock, flags); 1803 } 1804 1805 /** 1806 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1807 * @phba: Pointer to HBA context object. 1808 * @hbqno: HBQ number. 1809 * @hbq_buf: Pointer to HBQ buffer. 1810 * 1811 * This function is called with the hbalock held to post a 1812 * hbq buffer to the firmware. If the function finds an empty 1813 * slot in the HBQ, it will post the buffer. The function will return 1814 * pointer to the hbq entry if it successfully post the buffer 1815 * else it will return NULL. 1816 **/ 1817 static int 1818 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1819 struct hbq_dmabuf *hbq_buf) 1820 { 1821 lockdep_assert_held(&phba->hbalock); 1822 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 1823 } 1824 1825 /** 1826 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 1827 * @phba: Pointer to HBA context object. 1828 * @hbqno: HBQ number. 1829 * @hbq_buf: Pointer to HBQ buffer. 1830 * 1831 * This function is called with the hbalock held to post a hbq buffer to the 1832 * firmware. If the function finds an empty slot in the HBQ, it will post the 1833 * buffer and place it on the hbq_buffer_list. The function will return zero if 1834 * it successfully post the buffer else it will return an error. 1835 **/ 1836 static int 1837 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 1838 struct hbq_dmabuf *hbq_buf) 1839 { 1840 struct lpfc_hbq_entry *hbqe; 1841 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1842 1843 lockdep_assert_held(&phba->hbalock); 1844 /* Get next HBQ entry slot to use */ 1845 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 1846 if (hbqe) { 1847 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1848 1849 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1850 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1851 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size; 1852 hbqe->bde.tus.f.bdeFlags = 0; 1853 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 1854 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 1855 /* Sync SLIM */ 1856 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 1857 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 1858 /* flush */ 1859 readl(phba->hbq_put + hbqno); 1860 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1861 return 0; 1862 } else 1863 return -ENOMEM; 1864 } 1865 1866 /** 1867 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 1868 * @phba: Pointer to HBA context object. 1869 * @hbqno: HBQ number. 1870 * @hbq_buf: Pointer to HBQ buffer. 1871 * 1872 * This function is called with the hbalock held to post an RQE to the SLI4 1873 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 1874 * the hbq_buffer_list and return zero, otherwise it will return an error. 1875 **/ 1876 static int 1877 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 1878 struct hbq_dmabuf *hbq_buf) 1879 { 1880 int rc; 1881 struct lpfc_rqe hrqe; 1882 struct lpfc_rqe drqe; 1883 struct lpfc_queue *hrq; 1884 struct lpfc_queue *drq; 1885 1886 if (hbqno != LPFC_ELS_HBQ) 1887 return 1; 1888 hrq = phba->sli4_hba.hdr_rq; 1889 drq = phba->sli4_hba.dat_rq; 1890 1891 lockdep_assert_held(&phba->hbalock); 1892 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 1893 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 1894 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 1895 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 1896 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 1897 if (rc < 0) 1898 return rc; 1899 hbq_buf->tag = (rc | (hbqno << 16)); 1900 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 1901 return 0; 1902 } 1903 1904 /* HBQ for ELS and CT traffic. */ 1905 static struct lpfc_hbq_init lpfc_els_hbq = { 1906 .rn = 1, 1907 .entry_count = 256, 1908 .mask_count = 0, 1909 .profile = 0, 1910 .ring_mask = (1 << LPFC_ELS_RING), 1911 .buffer_count = 0, 1912 .init_count = 40, 1913 .add_count = 40, 1914 }; 1915 1916 /* Array of HBQs */ 1917 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 1918 &lpfc_els_hbq, 1919 }; 1920 1921 /** 1922 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 1923 * @phba: Pointer to HBA context object. 1924 * @hbqno: HBQ number. 1925 * @count: Number of HBQ buffers to be posted. 1926 * 1927 * This function is called with no lock held to post more hbq buffers to the 1928 * given HBQ. The function returns the number of HBQ buffers successfully 1929 * posted. 1930 **/ 1931 static int 1932 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 1933 { 1934 uint32_t i, posted = 0; 1935 unsigned long flags; 1936 struct hbq_dmabuf *hbq_buffer; 1937 LIST_HEAD(hbq_buf_list); 1938 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 1939 return 0; 1940 1941 if ((phba->hbqs[hbqno].buffer_count + count) > 1942 lpfc_hbq_defs[hbqno]->entry_count) 1943 count = lpfc_hbq_defs[hbqno]->entry_count - 1944 phba->hbqs[hbqno].buffer_count; 1945 if (!count) 1946 return 0; 1947 /* Allocate HBQ entries */ 1948 for (i = 0; i < count; i++) { 1949 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 1950 if (!hbq_buffer) 1951 break; 1952 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 1953 } 1954 /* Check whether HBQ is still in use */ 1955 spin_lock_irqsave(&phba->hbalock, flags); 1956 if (!phba->hbq_in_use) 1957 goto err; 1958 while (!list_empty(&hbq_buf_list)) { 1959 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1960 dbuf.list); 1961 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1962 (hbqno << 16)); 1963 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1964 phba->hbqs[hbqno].buffer_count++; 1965 posted++; 1966 } else 1967 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1968 } 1969 spin_unlock_irqrestore(&phba->hbalock, flags); 1970 return posted; 1971 err: 1972 spin_unlock_irqrestore(&phba->hbalock, flags); 1973 while (!list_empty(&hbq_buf_list)) { 1974 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1975 dbuf.list); 1976 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1977 } 1978 return 0; 1979 } 1980 1981 /** 1982 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 1983 * @phba: Pointer to HBA context object. 1984 * @qno: HBQ number. 1985 * 1986 * This function posts more buffers to the HBQ. This function 1987 * is called with no lock held. The function returns the number of HBQ entries 1988 * successfully allocated. 1989 **/ 1990 int 1991 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1992 { 1993 if (phba->sli_rev == LPFC_SLI_REV4) 1994 return 0; 1995 else 1996 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1997 lpfc_hbq_defs[qno]->add_count); 1998 } 1999 2000 /** 2001 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 2002 * @phba: Pointer to HBA context object. 2003 * @qno: HBQ queue number. 2004 * 2005 * This function is called from SLI initialization code path with 2006 * no lock held to post initial HBQ buffers to firmware. The 2007 * function returns the number of HBQ entries successfully allocated. 2008 **/ 2009 static int 2010 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 2011 { 2012 if (phba->sli_rev == LPFC_SLI_REV4) 2013 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2014 lpfc_hbq_defs[qno]->entry_count); 2015 else 2016 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 2017 lpfc_hbq_defs[qno]->init_count); 2018 } 2019 2020 /** 2021 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 2022 * @phba: Pointer to HBA context object. 2023 * @hbqno: HBQ number. 2024 * 2025 * This function removes the first hbq buffer on an hbq list and returns a 2026 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2027 **/ 2028 static struct hbq_dmabuf * 2029 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 2030 { 2031 struct lpfc_dmabuf *d_buf; 2032 2033 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 2034 if (!d_buf) 2035 return NULL; 2036 return container_of(d_buf, struct hbq_dmabuf, dbuf); 2037 } 2038 2039 /** 2040 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list 2041 * @phba: Pointer to HBA context object. 2042 * @hbqno: HBQ number. 2043 * 2044 * This function removes the first RQ buffer on an RQ buffer list and returns a 2045 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 2046 **/ 2047 static struct rqb_dmabuf * 2048 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq) 2049 { 2050 struct lpfc_dmabuf *h_buf; 2051 struct lpfc_rqb *rqbp; 2052 2053 rqbp = hrq->rqbp; 2054 list_remove_head(&rqbp->rqb_buffer_list, h_buf, 2055 struct lpfc_dmabuf, list); 2056 if (!h_buf) 2057 return NULL; 2058 rqbp->buffer_count--; 2059 return container_of(h_buf, struct rqb_dmabuf, hbuf); 2060 } 2061 2062 /** 2063 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 2064 * @phba: Pointer to HBA context object. 2065 * @tag: Tag of the hbq buffer. 2066 * 2067 * This function searches for the hbq buffer associated with the given tag in 2068 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer 2069 * otherwise it returns NULL. 2070 **/ 2071 static struct hbq_dmabuf * 2072 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 2073 { 2074 struct lpfc_dmabuf *d_buf; 2075 struct hbq_dmabuf *hbq_buf; 2076 uint32_t hbqno; 2077 2078 hbqno = tag >> 16; 2079 if (hbqno >= LPFC_MAX_HBQS) 2080 return NULL; 2081 2082 spin_lock_irq(&phba->hbalock); 2083 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2084 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2085 if (hbq_buf->tag == tag) { 2086 spin_unlock_irq(&phba->hbalock); 2087 return hbq_buf; 2088 } 2089 } 2090 spin_unlock_irq(&phba->hbalock); 2091 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2092 "1803 Bad hbq tag. Data: x%x x%x\n", 2093 tag, phba->hbqs[tag >> 16].buffer_count); 2094 return NULL; 2095 } 2096 2097 /** 2098 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2099 * @phba: Pointer to HBA context object. 2100 * @hbq_buffer: Pointer to HBQ buffer. 2101 * 2102 * This function is called with hbalock. This function gives back 2103 * the hbq buffer to firmware. If the HBQ does not have space to 2104 * post the buffer, it will free the buffer. 2105 **/ 2106 void 2107 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2108 { 2109 uint32_t hbqno; 2110 2111 if (hbq_buffer) { 2112 hbqno = hbq_buffer->tag >> 16; 2113 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2114 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2115 } 2116 } 2117 2118 /** 2119 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2120 * @mbxCommand: mailbox command code. 2121 * 2122 * This function is called by the mailbox event handler function to verify 2123 * that the completed mailbox command is a legitimate mailbox command. If the 2124 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2125 * and the mailbox event handler will take the HBA offline. 2126 **/ 2127 static int 2128 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2129 { 2130 uint8_t ret; 2131 2132 switch (mbxCommand) { 2133 case MBX_LOAD_SM: 2134 case MBX_READ_NV: 2135 case MBX_WRITE_NV: 2136 case MBX_WRITE_VPARMS: 2137 case MBX_RUN_BIU_DIAG: 2138 case MBX_INIT_LINK: 2139 case MBX_DOWN_LINK: 2140 case MBX_CONFIG_LINK: 2141 case MBX_CONFIG_RING: 2142 case MBX_RESET_RING: 2143 case MBX_READ_CONFIG: 2144 case MBX_READ_RCONFIG: 2145 case MBX_READ_SPARM: 2146 case MBX_READ_STATUS: 2147 case MBX_READ_RPI: 2148 case MBX_READ_XRI: 2149 case MBX_READ_REV: 2150 case MBX_READ_LNK_STAT: 2151 case MBX_REG_LOGIN: 2152 case MBX_UNREG_LOGIN: 2153 case MBX_CLEAR_LA: 2154 case MBX_DUMP_MEMORY: 2155 case MBX_DUMP_CONTEXT: 2156 case MBX_RUN_DIAGS: 2157 case MBX_RESTART: 2158 case MBX_UPDATE_CFG: 2159 case MBX_DOWN_LOAD: 2160 case MBX_DEL_LD_ENTRY: 2161 case MBX_RUN_PROGRAM: 2162 case MBX_SET_MASK: 2163 case MBX_SET_VARIABLE: 2164 case MBX_UNREG_D_ID: 2165 case MBX_KILL_BOARD: 2166 case MBX_CONFIG_FARP: 2167 case MBX_BEACON: 2168 case MBX_LOAD_AREA: 2169 case MBX_RUN_BIU_DIAG64: 2170 case MBX_CONFIG_PORT: 2171 case MBX_READ_SPARM64: 2172 case MBX_READ_RPI64: 2173 case MBX_REG_LOGIN64: 2174 case MBX_READ_TOPOLOGY: 2175 case MBX_WRITE_WWN: 2176 case MBX_SET_DEBUG: 2177 case MBX_LOAD_EXP_ROM: 2178 case MBX_ASYNCEVT_ENABLE: 2179 case MBX_REG_VPI: 2180 case MBX_UNREG_VPI: 2181 case MBX_HEARTBEAT: 2182 case MBX_PORT_CAPABILITIES: 2183 case MBX_PORT_IOV_CONTROL: 2184 case MBX_SLI4_CONFIG: 2185 case MBX_SLI4_REQ_FTRS: 2186 case MBX_REG_FCFI: 2187 case MBX_UNREG_FCFI: 2188 case MBX_REG_VFI: 2189 case MBX_UNREG_VFI: 2190 case MBX_INIT_VPI: 2191 case MBX_INIT_VFI: 2192 case MBX_RESUME_RPI: 2193 case MBX_READ_EVENT_LOG_STATUS: 2194 case MBX_READ_EVENT_LOG: 2195 case MBX_SECURITY_MGMT: 2196 case MBX_AUTH_PORT: 2197 case MBX_ACCESS_VDATA: 2198 ret = mbxCommand; 2199 break; 2200 default: 2201 ret = MBX_SHUTDOWN; 2202 break; 2203 } 2204 return ret; 2205 } 2206 2207 /** 2208 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2209 * @phba: Pointer to HBA context object. 2210 * @pmboxq: Pointer to mailbox command. 2211 * 2212 * This is completion handler function for mailbox commands issued from 2213 * lpfc_sli_issue_mbox_wait function. This function is called by the 2214 * mailbox event handler function with no lock held. This function 2215 * will wake up thread waiting on the wait queue pointed by context1 2216 * of the mailbox. 2217 **/ 2218 void 2219 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2220 { 2221 wait_queue_head_t *pdone_q; 2222 unsigned long drvr_flag; 2223 2224 /* 2225 * If pdone_q is empty, the driver thread gave up waiting and 2226 * continued running. 2227 */ 2228 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2229 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2230 pdone_q = (wait_queue_head_t *) pmboxq->context1; 2231 if (pdone_q) 2232 wake_up_interruptible(pdone_q); 2233 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2234 return; 2235 } 2236 2237 2238 /** 2239 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2240 * @phba: Pointer to HBA context object. 2241 * @pmb: Pointer to mailbox object. 2242 * 2243 * This function is the default mailbox completion handler. It 2244 * frees the memory resources associated with the completed mailbox 2245 * command. If the completed command is a REG_LOGIN mailbox command, 2246 * this function will issue a UREG_LOGIN to re-claim the RPI. 2247 **/ 2248 void 2249 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2250 { 2251 struct lpfc_vport *vport = pmb->vport; 2252 struct lpfc_dmabuf *mp; 2253 struct lpfc_nodelist *ndlp; 2254 struct Scsi_Host *shost; 2255 uint16_t rpi, vpi; 2256 int rc; 2257 2258 mp = (struct lpfc_dmabuf *) (pmb->context1); 2259 2260 if (mp) { 2261 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2262 kfree(mp); 2263 } 2264 2265 /* 2266 * If a REG_LOGIN succeeded after node is destroyed or node 2267 * is in re-discovery driver need to cleanup the RPI. 2268 */ 2269 if (!(phba->pport->load_flag & FC_UNLOADING) && 2270 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2271 !pmb->u.mb.mbxStatus) { 2272 rpi = pmb->u.mb.un.varWords[0]; 2273 vpi = pmb->u.mb.un.varRegLogin.vpi; 2274 lpfc_unreg_login(phba, vpi, rpi, pmb); 2275 pmb->vport = vport; 2276 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2277 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2278 if (rc != MBX_NOT_FINISHED) 2279 return; 2280 } 2281 2282 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2283 !(phba->pport->load_flag & FC_UNLOADING) && 2284 !pmb->u.mb.mbxStatus) { 2285 shost = lpfc_shost_from_vport(vport); 2286 spin_lock_irq(shost->host_lock); 2287 vport->vpi_state |= LPFC_VPI_REGISTERED; 2288 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2289 spin_unlock_irq(shost->host_lock); 2290 } 2291 2292 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2293 ndlp = (struct lpfc_nodelist *)pmb->context2; 2294 lpfc_nlp_put(ndlp); 2295 pmb->context2 = NULL; 2296 } 2297 2298 /* Check security permission status on INIT_LINK mailbox command */ 2299 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2300 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2301 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2302 "2860 SLI authentication is required " 2303 "for INIT_LINK but has not done yet\n"); 2304 2305 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2306 lpfc_sli4_mbox_cmd_free(phba, pmb); 2307 else 2308 mempool_free(pmb, phba->mbox_mem_pool); 2309 } 2310 /** 2311 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler 2312 * @phba: Pointer to HBA context object. 2313 * @pmb: Pointer to mailbox object. 2314 * 2315 * This function is the unreg rpi mailbox completion handler. It 2316 * frees the memory resources associated with the completed mailbox 2317 * command. An additional refrenece is put on the ndlp to prevent 2318 * lpfc_nlp_release from freeing the rpi bit in the bitmask before 2319 * the unreg mailbox command completes, this routine puts the 2320 * reference back. 2321 * 2322 **/ 2323 void 2324 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2325 { 2326 struct lpfc_vport *vport = pmb->vport; 2327 struct lpfc_nodelist *ndlp; 2328 2329 ndlp = pmb->context1; 2330 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { 2331 if (phba->sli_rev == LPFC_SLI_REV4 && 2332 (bf_get(lpfc_sli_intf_if_type, 2333 &phba->sli4_hba.sli_intf) == 2334 LPFC_SLI_INTF_IF_TYPE_2)) { 2335 if (ndlp) { 2336 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 2337 "0010 UNREG_LOGIN vpi:%x " 2338 "rpi:%x DID:%x map:%x %p\n", 2339 vport->vpi, ndlp->nlp_rpi, 2340 ndlp->nlp_DID, 2341 ndlp->nlp_usg_map, ndlp); 2342 ndlp->nlp_flag &= ~NLP_LOGO_ACC; 2343 lpfc_nlp_put(ndlp); 2344 } 2345 } 2346 } 2347 2348 mempool_free(pmb, phba->mbox_mem_pool); 2349 } 2350 2351 /** 2352 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2353 * @phba: Pointer to HBA context object. 2354 * 2355 * This function is called with no lock held. This function processes all 2356 * the completed mailbox commands and gives it to upper layers. The interrupt 2357 * service routine processes mailbox completion interrupt and adds completed 2358 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2359 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2360 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2361 * function returns the mailbox commands to the upper layer by calling the 2362 * completion handler function of each mailbox. 2363 **/ 2364 int 2365 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2366 { 2367 MAILBOX_t *pmbox; 2368 LPFC_MBOXQ_t *pmb; 2369 int rc; 2370 LIST_HEAD(cmplq); 2371 2372 phba->sli.slistat.mbox_event++; 2373 2374 /* Get all completed mailboxe buffers into the cmplq */ 2375 spin_lock_irq(&phba->hbalock); 2376 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2377 spin_unlock_irq(&phba->hbalock); 2378 2379 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2380 do { 2381 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2382 if (pmb == NULL) 2383 break; 2384 2385 pmbox = &pmb->u.mb; 2386 2387 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2388 if (pmb->vport) { 2389 lpfc_debugfs_disc_trc(pmb->vport, 2390 LPFC_DISC_TRC_MBOX_VPORT, 2391 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2392 (uint32_t)pmbox->mbxCommand, 2393 pmbox->un.varWords[0], 2394 pmbox->un.varWords[1]); 2395 } 2396 else { 2397 lpfc_debugfs_disc_trc(phba->pport, 2398 LPFC_DISC_TRC_MBOX, 2399 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2400 (uint32_t)pmbox->mbxCommand, 2401 pmbox->un.varWords[0], 2402 pmbox->un.varWords[1]); 2403 } 2404 } 2405 2406 /* 2407 * It is a fatal error if unknown mbox command completion. 2408 */ 2409 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2410 MBX_SHUTDOWN) { 2411 /* Unknown mailbox command compl */ 2412 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2413 "(%d):0323 Unknown Mailbox command " 2414 "x%x (x%x/x%x) Cmpl\n", 2415 pmb->vport ? pmb->vport->vpi : 0, 2416 pmbox->mbxCommand, 2417 lpfc_sli_config_mbox_subsys_get(phba, 2418 pmb), 2419 lpfc_sli_config_mbox_opcode_get(phba, 2420 pmb)); 2421 phba->link_state = LPFC_HBA_ERROR; 2422 phba->work_hs = HS_FFER3; 2423 lpfc_handle_eratt(phba); 2424 continue; 2425 } 2426 2427 if (pmbox->mbxStatus) { 2428 phba->sli.slistat.mbox_stat_err++; 2429 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2430 /* Mbox cmd cmpl error - RETRYing */ 2431 lpfc_printf_log(phba, KERN_INFO, 2432 LOG_MBOX | LOG_SLI, 2433 "(%d):0305 Mbox cmd cmpl " 2434 "error - RETRYing Data: x%x " 2435 "(x%x/x%x) x%x x%x x%x\n", 2436 pmb->vport ? pmb->vport->vpi : 0, 2437 pmbox->mbxCommand, 2438 lpfc_sli_config_mbox_subsys_get(phba, 2439 pmb), 2440 lpfc_sli_config_mbox_opcode_get(phba, 2441 pmb), 2442 pmbox->mbxStatus, 2443 pmbox->un.varWords[0], 2444 pmb->vport->port_state); 2445 pmbox->mbxStatus = 0; 2446 pmbox->mbxOwner = OWN_HOST; 2447 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2448 if (rc != MBX_NOT_FINISHED) 2449 continue; 2450 } 2451 } 2452 2453 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2454 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2455 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2456 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2457 "x%x x%x x%x\n", 2458 pmb->vport ? pmb->vport->vpi : 0, 2459 pmbox->mbxCommand, 2460 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2461 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2462 pmb->mbox_cmpl, 2463 *((uint32_t *) pmbox), 2464 pmbox->un.varWords[0], 2465 pmbox->un.varWords[1], 2466 pmbox->un.varWords[2], 2467 pmbox->un.varWords[3], 2468 pmbox->un.varWords[4], 2469 pmbox->un.varWords[5], 2470 pmbox->un.varWords[6], 2471 pmbox->un.varWords[7], 2472 pmbox->un.varWords[8], 2473 pmbox->un.varWords[9], 2474 pmbox->un.varWords[10]); 2475 2476 if (pmb->mbox_cmpl) 2477 pmb->mbox_cmpl(phba,pmb); 2478 } while (1); 2479 return 0; 2480 } 2481 2482 /** 2483 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2484 * @phba: Pointer to HBA context object. 2485 * @pring: Pointer to driver SLI ring object. 2486 * @tag: buffer tag. 2487 * 2488 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2489 * is set in the tag the buffer is posted for a particular exchange, 2490 * the function will return the buffer without replacing the buffer. 2491 * If the buffer is for unsolicited ELS or CT traffic, this function 2492 * returns the buffer and also posts another buffer to the firmware. 2493 **/ 2494 static struct lpfc_dmabuf * 2495 lpfc_sli_get_buff(struct lpfc_hba *phba, 2496 struct lpfc_sli_ring *pring, 2497 uint32_t tag) 2498 { 2499 struct hbq_dmabuf *hbq_entry; 2500 2501 if (tag & QUE_BUFTAG_BIT) 2502 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2503 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2504 if (!hbq_entry) 2505 return NULL; 2506 return &hbq_entry->dbuf; 2507 } 2508 2509 /** 2510 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2511 * @phba: Pointer to HBA context object. 2512 * @pring: Pointer to driver SLI ring object. 2513 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2514 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2515 * @fch_type: the type for the first frame of the sequence. 2516 * 2517 * This function is called with no lock held. This function uses the r_ctl and 2518 * type of the received sequence to find the correct callback function to call 2519 * to process the sequence. 2520 **/ 2521 static int 2522 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2523 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2524 uint32_t fch_type) 2525 { 2526 int i; 2527 2528 switch (fch_type) { 2529 case FC_TYPE_NVME: 2530 lpfc_nvmet_unsol_ls_event(phba, pring, saveq); 2531 return 1; 2532 default: 2533 break; 2534 } 2535 2536 /* unSolicited Responses */ 2537 if (pring->prt[0].profile) { 2538 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2539 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2540 saveq); 2541 return 1; 2542 } 2543 /* We must search, based on rctl / type 2544 for the right routine */ 2545 for (i = 0; i < pring->num_mask; i++) { 2546 if ((pring->prt[i].rctl == fch_r_ctl) && 2547 (pring->prt[i].type == fch_type)) { 2548 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2549 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2550 (phba, pring, saveq); 2551 return 1; 2552 } 2553 } 2554 return 0; 2555 } 2556 2557 /** 2558 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2559 * @phba: Pointer to HBA context object. 2560 * @pring: Pointer to driver SLI ring object. 2561 * @saveq: Pointer to the unsolicited iocb. 2562 * 2563 * This function is called with no lock held by the ring event handler 2564 * when there is an unsolicited iocb posted to the response ring by the 2565 * firmware. This function gets the buffer associated with the iocbs 2566 * and calls the event handler for the ring. This function handles both 2567 * qring buffers and hbq buffers. 2568 * When the function returns 1 the caller can free the iocb object otherwise 2569 * upper layer functions will free the iocb objects. 2570 **/ 2571 static int 2572 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2573 struct lpfc_iocbq *saveq) 2574 { 2575 IOCB_t * irsp; 2576 WORD5 * w5p; 2577 uint32_t Rctl, Type; 2578 struct lpfc_iocbq *iocbq; 2579 struct lpfc_dmabuf *dmzbuf; 2580 2581 irsp = &(saveq->iocb); 2582 2583 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2584 if (pring->lpfc_sli_rcv_async_status) 2585 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2586 else 2587 lpfc_printf_log(phba, 2588 KERN_WARNING, 2589 LOG_SLI, 2590 "0316 Ring %d handler: unexpected " 2591 "ASYNC_STATUS iocb received evt_code " 2592 "0x%x\n", 2593 pring->ringno, 2594 irsp->un.asyncstat.evt_code); 2595 return 1; 2596 } 2597 2598 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2599 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2600 if (irsp->ulpBdeCount > 0) { 2601 dmzbuf = lpfc_sli_get_buff(phba, pring, 2602 irsp->un.ulpWord[3]); 2603 lpfc_in_buf_free(phba, dmzbuf); 2604 } 2605 2606 if (irsp->ulpBdeCount > 1) { 2607 dmzbuf = lpfc_sli_get_buff(phba, pring, 2608 irsp->unsli3.sli3Words[3]); 2609 lpfc_in_buf_free(phba, dmzbuf); 2610 } 2611 2612 if (irsp->ulpBdeCount > 2) { 2613 dmzbuf = lpfc_sli_get_buff(phba, pring, 2614 irsp->unsli3.sli3Words[7]); 2615 lpfc_in_buf_free(phba, dmzbuf); 2616 } 2617 2618 return 1; 2619 } 2620 2621 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2622 if (irsp->ulpBdeCount != 0) { 2623 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2624 irsp->un.ulpWord[3]); 2625 if (!saveq->context2) 2626 lpfc_printf_log(phba, 2627 KERN_ERR, 2628 LOG_SLI, 2629 "0341 Ring %d Cannot find buffer for " 2630 "an unsolicited iocb. tag 0x%x\n", 2631 pring->ringno, 2632 irsp->un.ulpWord[3]); 2633 } 2634 if (irsp->ulpBdeCount == 2) { 2635 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2636 irsp->unsli3.sli3Words[7]); 2637 if (!saveq->context3) 2638 lpfc_printf_log(phba, 2639 KERN_ERR, 2640 LOG_SLI, 2641 "0342 Ring %d Cannot find buffer for an" 2642 " unsolicited iocb. tag 0x%x\n", 2643 pring->ringno, 2644 irsp->unsli3.sli3Words[7]); 2645 } 2646 list_for_each_entry(iocbq, &saveq->list, list) { 2647 irsp = &(iocbq->iocb); 2648 if (irsp->ulpBdeCount != 0) { 2649 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2650 irsp->un.ulpWord[3]); 2651 if (!iocbq->context2) 2652 lpfc_printf_log(phba, 2653 KERN_ERR, 2654 LOG_SLI, 2655 "0343 Ring %d Cannot find " 2656 "buffer for an unsolicited iocb" 2657 ". tag 0x%x\n", pring->ringno, 2658 irsp->un.ulpWord[3]); 2659 } 2660 if (irsp->ulpBdeCount == 2) { 2661 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2662 irsp->unsli3.sli3Words[7]); 2663 if (!iocbq->context3) 2664 lpfc_printf_log(phba, 2665 KERN_ERR, 2666 LOG_SLI, 2667 "0344 Ring %d Cannot find " 2668 "buffer for an unsolicited " 2669 "iocb. tag 0x%x\n", 2670 pring->ringno, 2671 irsp->unsli3.sli3Words[7]); 2672 } 2673 } 2674 } 2675 if (irsp->ulpBdeCount != 0 && 2676 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2677 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2678 int found = 0; 2679 2680 /* search continue save q for same XRI */ 2681 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2682 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2683 saveq->iocb.unsli3.rcvsli3.ox_id) { 2684 list_add_tail(&saveq->list, &iocbq->list); 2685 found = 1; 2686 break; 2687 } 2688 } 2689 if (!found) 2690 list_add_tail(&saveq->clist, 2691 &pring->iocb_continue_saveq); 2692 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2693 list_del_init(&iocbq->clist); 2694 saveq = iocbq; 2695 irsp = &(saveq->iocb); 2696 } else 2697 return 0; 2698 } 2699 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2700 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2701 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2702 Rctl = FC_RCTL_ELS_REQ; 2703 Type = FC_TYPE_ELS; 2704 } else { 2705 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2706 Rctl = w5p->hcsw.Rctl; 2707 Type = w5p->hcsw.Type; 2708 2709 /* Firmware Workaround */ 2710 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2711 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2712 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2713 Rctl = FC_RCTL_ELS_REQ; 2714 Type = FC_TYPE_ELS; 2715 w5p->hcsw.Rctl = Rctl; 2716 w5p->hcsw.Type = Type; 2717 } 2718 } 2719 2720 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2721 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2722 "0313 Ring %d handler: unexpected Rctl x%x " 2723 "Type x%x received\n", 2724 pring->ringno, Rctl, Type); 2725 2726 return 1; 2727 } 2728 2729 /** 2730 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2731 * @phba: Pointer to HBA context object. 2732 * @pring: Pointer to driver SLI ring object. 2733 * @prspiocb: Pointer to response iocb object. 2734 * 2735 * This function looks up the iocb_lookup table to get the command iocb 2736 * corresponding to the given response iocb using the iotag of the 2737 * response iocb. This function is called with the hbalock held 2738 * for sli3 devices or the ring_lock for sli4 devices. 2739 * This function returns the command iocb object if it finds the command 2740 * iocb else returns NULL. 2741 **/ 2742 static struct lpfc_iocbq * 2743 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2744 struct lpfc_sli_ring *pring, 2745 struct lpfc_iocbq *prspiocb) 2746 { 2747 struct lpfc_iocbq *cmd_iocb = NULL; 2748 uint16_t iotag; 2749 lockdep_assert_held(&phba->hbalock); 2750 2751 iotag = prspiocb->iocb.ulpIoTag; 2752 2753 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2754 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2755 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2756 /* remove from txcmpl queue list */ 2757 list_del_init(&cmd_iocb->list); 2758 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2759 return cmd_iocb; 2760 } 2761 } 2762 2763 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2764 "0317 iotag x%x is out of " 2765 "range: max iotag x%x wd0 x%x\n", 2766 iotag, phba->sli.last_iotag, 2767 *(((uint32_t *) &prspiocb->iocb) + 7)); 2768 return NULL; 2769 } 2770 2771 /** 2772 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2773 * @phba: Pointer to HBA context object. 2774 * @pring: Pointer to driver SLI ring object. 2775 * @iotag: IOCB tag. 2776 * 2777 * This function looks up the iocb_lookup table to get the command iocb 2778 * corresponding to the given iotag. This function is called with the 2779 * hbalock held. 2780 * This function returns the command iocb object if it finds the command 2781 * iocb else returns NULL. 2782 **/ 2783 static struct lpfc_iocbq * 2784 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2785 struct lpfc_sli_ring *pring, uint16_t iotag) 2786 { 2787 struct lpfc_iocbq *cmd_iocb = NULL; 2788 2789 lockdep_assert_held(&phba->hbalock); 2790 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2791 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2792 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2793 /* remove from txcmpl queue list */ 2794 list_del_init(&cmd_iocb->list); 2795 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2796 return cmd_iocb; 2797 } 2798 } 2799 2800 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2801 "0372 iotag x%x lookup error: max iotag (x%x) " 2802 "iocb_flag x%x\n", 2803 iotag, phba->sli.last_iotag, 2804 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff); 2805 return NULL; 2806 } 2807 2808 /** 2809 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2810 * @phba: Pointer to HBA context object. 2811 * @pring: Pointer to driver SLI ring object. 2812 * @saveq: Pointer to the response iocb to be processed. 2813 * 2814 * This function is called by the ring event handler for non-fcp 2815 * rings when there is a new response iocb in the response ring. 2816 * The caller is not required to hold any locks. This function 2817 * gets the command iocb associated with the response iocb and 2818 * calls the completion handler for the command iocb. If there 2819 * is no completion handler, the function will free the resources 2820 * associated with command iocb. If the response iocb is for 2821 * an already aborted command iocb, the status of the completion 2822 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 2823 * This function always returns 1. 2824 **/ 2825 static int 2826 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2827 struct lpfc_iocbq *saveq) 2828 { 2829 struct lpfc_iocbq *cmdiocbp; 2830 int rc = 1; 2831 unsigned long iflag; 2832 2833 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 2834 if (phba->sli_rev == LPFC_SLI_REV4) 2835 spin_lock_irqsave(&pring->ring_lock, iflag); 2836 else 2837 spin_lock_irqsave(&phba->hbalock, iflag); 2838 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 2839 if (phba->sli_rev == LPFC_SLI_REV4) 2840 spin_unlock_irqrestore(&pring->ring_lock, iflag); 2841 else 2842 spin_unlock_irqrestore(&phba->hbalock, iflag); 2843 2844 if (cmdiocbp) { 2845 if (cmdiocbp->iocb_cmpl) { 2846 /* 2847 * If an ELS command failed send an event to mgmt 2848 * application. 2849 */ 2850 if (saveq->iocb.ulpStatus && 2851 (pring->ringno == LPFC_ELS_RING) && 2852 (cmdiocbp->iocb.ulpCommand == 2853 CMD_ELS_REQUEST64_CR)) 2854 lpfc_send_els_failure_event(phba, 2855 cmdiocbp, saveq); 2856 2857 /* 2858 * Post all ELS completions to the worker thread. 2859 * All other are passed to the completion callback. 2860 */ 2861 if (pring->ringno == LPFC_ELS_RING) { 2862 if ((phba->sli_rev < LPFC_SLI_REV4) && 2863 (cmdiocbp->iocb_flag & 2864 LPFC_DRIVER_ABORTED)) { 2865 spin_lock_irqsave(&phba->hbalock, 2866 iflag); 2867 cmdiocbp->iocb_flag &= 2868 ~LPFC_DRIVER_ABORTED; 2869 spin_unlock_irqrestore(&phba->hbalock, 2870 iflag); 2871 saveq->iocb.ulpStatus = 2872 IOSTAT_LOCAL_REJECT; 2873 saveq->iocb.un.ulpWord[4] = 2874 IOERR_SLI_ABORTED; 2875 2876 /* Firmware could still be in progress 2877 * of DMAing payload, so don't free data 2878 * buffer till after a hbeat. 2879 */ 2880 spin_lock_irqsave(&phba->hbalock, 2881 iflag); 2882 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2883 spin_unlock_irqrestore(&phba->hbalock, 2884 iflag); 2885 } 2886 if (phba->sli_rev == LPFC_SLI_REV4) { 2887 if (saveq->iocb_flag & 2888 LPFC_EXCHANGE_BUSY) { 2889 /* Set cmdiocb flag for the 2890 * exchange busy so sgl (xri) 2891 * will not be released until 2892 * the abort xri is received 2893 * from hba. 2894 */ 2895 spin_lock_irqsave( 2896 &phba->hbalock, iflag); 2897 cmdiocbp->iocb_flag |= 2898 LPFC_EXCHANGE_BUSY; 2899 spin_unlock_irqrestore( 2900 &phba->hbalock, iflag); 2901 } 2902 if (cmdiocbp->iocb_flag & 2903 LPFC_DRIVER_ABORTED) { 2904 /* 2905 * Clear LPFC_DRIVER_ABORTED 2906 * bit in case it was driver 2907 * initiated abort. 2908 */ 2909 spin_lock_irqsave( 2910 &phba->hbalock, iflag); 2911 cmdiocbp->iocb_flag &= 2912 ~LPFC_DRIVER_ABORTED; 2913 spin_unlock_irqrestore( 2914 &phba->hbalock, iflag); 2915 cmdiocbp->iocb.ulpStatus = 2916 IOSTAT_LOCAL_REJECT; 2917 cmdiocbp->iocb.un.ulpWord[4] = 2918 IOERR_ABORT_REQUESTED; 2919 /* 2920 * For SLI4, irsiocb contains 2921 * NO_XRI in sli_xritag, it 2922 * shall not affect releasing 2923 * sgl (xri) process. 2924 */ 2925 saveq->iocb.ulpStatus = 2926 IOSTAT_LOCAL_REJECT; 2927 saveq->iocb.un.ulpWord[4] = 2928 IOERR_SLI_ABORTED; 2929 spin_lock_irqsave( 2930 &phba->hbalock, iflag); 2931 saveq->iocb_flag |= 2932 LPFC_DELAY_MEM_FREE; 2933 spin_unlock_irqrestore( 2934 &phba->hbalock, iflag); 2935 } 2936 } 2937 } 2938 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2939 } else 2940 lpfc_sli_release_iocbq(phba, cmdiocbp); 2941 } else { 2942 /* 2943 * Unknown initiating command based on the response iotag. 2944 * This could be the case on the ELS ring because of 2945 * lpfc_els_abort(). 2946 */ 2947 if (pring->ringno != LPFC_ELS_RING) { 2948 /* 2949 * Ring <ringno> handler: unexpected completion IoTag 2950 * <IoTag> 2951 */ 2952 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2953 "0322 Ring %d handler: " 2954 "unexpected completion IoTag x%x " 2955 "Data: x%x x%x x%x x%x\n", 2956 pring->ringno, 2957 saveq->iocb.ulpIoTag, 2958 saveq->iocb.ulpStatus, 2959 saveq->iocb.un.ulpWord[4], 2960 saveq->iocb.ulpCommand, 2961 saveq->iocb.ulpContext); 2962 } 2963 } 2964 2965 return rc; 2966 } 2967 2968 /** 2969 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 2970 * @phba: Pointer to HBA context object. 2971 * @pring: Pointer to driver SLI ring object. 2972 * 2973 * This function is called from the iocb ring event handlers when 2974 * put pointer is ahead of the get pointer for a ring. This function signal 2975 * an error attention condition to the worker thread and the worker 2976 * thread will transition the HBA to offline state. 2977 **/ 2978 static void 2979 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2980 { 2981 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2982 /* 2983 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2984 * rsp ring <portRspMax> 2985 */ 2986 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2987 "0312 Ring %d handler: portRspPut %d " 2988 "is bigger than rsp ring %d\n", 2989 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2990 pring->sli.sli3.numRiocb); 2991 2992 phba->link_state = LPFC_HBA_ERROR; 2993 2994 /* 2995 * All error attention handlers are posted to 2996 * worker thread 2997 */ 2998 phba->work_ha |= HA_ERATT; 2999 phba->work_hs = HS_FFER3; 3000 3001 lpfc_worker_wake_up(phba); 3002 3003 return; 3004 } 3005 3006 /** 3007 * lpfc_poll_eratt - Error attention polling timer timeout handler 3008 * @ptr: Pointer to address of HBA context object. 3009 * 3010 * This function is invoked by the Error Attention polling timer when the 3011 * timer times out. It will check the SLI Error Attention register for 3012 * possible attention events. If so, it will post an Error Attention event 3013 * and wake up worker thread to process it. Otherwise, it will set up the 3014 * Error Attention polling timer for the next poll. 3015 **/ 3016 void lpfc_poll_eratt(struct timer_list *t) 3017 { 3018 struct lpfc_hba *phba; 3019 uint32_t eratt = 0; 3020 uint64_t sli_intr, cnt; 3021 3022 phba = from_timer(phba, t, eratt_poll); 3023 3024 /* Here we will also keep track of interrupts per sec of the hba */ 3025 sli_intr = phba->sli.slistat.sli_intr; 3026 3027 if (phba->sli.slistat.sli_prev_intr > sli_intr) 3028 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 3029 sli_intr); 3030 else 3031 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 3032 3033 /* 64-bit integer division not supported on 32-bit x86 - use do_div */ 3034 do_div(cnt, phba->eratt_poll_interval); 3035 phba->sli.slistat.sli_ips = cnt; 3036 3037 phba->sli.slistat.sli_prev_intr = sli_intr; 3038 3039 /* Check chip HA register for error event */ 3040 eratt = lpfc_sli_check_eratt(phba); 3041 3042 if (eratt) 3043 /* Tell the worker thread there is work to do */ 3044 lpfc_worker_wake_up(phba); 3045 else 3046 /* Restart the timer for next eratt poll */ 3047 mod_timer(&phba->eratt_poll, 3048 jiffies + 3049 msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 3050 return; 3051 } 3052 3053 3054 /** 3055 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 3056 * @phba: Pointer to HBA context object. 3057 * @pring: Pointer to driver SLI ring object. 3058 * @mask: Host attention register mask for this ring. 3059 * 3060 * This function is called from the interrupt context when there is a ring 3061 * event for the fcp ring. The caller does not hold any lock. 3062 * The function processes each response iocb in the response ring until it 3063 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 3064 * LE bit set. The function will call the completion handler of the command iocb 3065 * if the response iocb indicates a completion for a command iocb or it is 3066 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 3067 * function if this is an unsolicited iocb. 3068 * This routine presumes LPFC_FCP_RING handling and doesn't bother 3069 * to check it explicitly. 3070 */ 3071 int 3072 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 3073 struct lpfc_sli_ring *pring, uint32_t mask) 3074 { 3075 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 3076 IOCB_t *irsp = NULL; 3077 IOCB_t *entry = NULL; 3078 struct lpfc_iocbq *cmdiocbq = NULL; 3079 struct lpfc_iocbq rspiocbq; 3080 uint32_t status; 3081 uint32_t portRspPut, portRspMax; 3082 int rc = 1; 3083 lpfc_iocb_type type; 3084 unsigned long iflag; 3085 uint32_t rsp_cmpl = 0; 3086 3087 spin_lock_irqsave(&phba->hbalock, iflag); 3088 pring->stats.iocb_event++; 3089 3090 /* 3091 * The next available response entry should never exceed the maximum 3092 * entries. If it does, treat it as an adapter hardware error. 3093 */ 3094 portRspMax = pring->sli.sli3.numRiocb; 3095 portRspPut = le32_to_cpu(pgp->rspPutInx); 3096 if (unlikely(portRspPut >= portRspMax)) { 3097 lpfc_sli_rsp_pointers_error(phba, pring); 3098 spin_unlock_irqrestore(&phba->hbalock, iflag); 3099 return 1; 3100 } 3101 if (phba->fcp_ring_in_use) { 3102 spin_unlock_irqrestore(&phba->hbalock, iflag); 3103 return 1; 3104 } else 3105 phba->fcp_ring_in_use = 1; 3106 3107 rmb(); 3108 while (pring->sli.sli3.rspidx != portRspPut) { 3109 /* 3110 * Fetch an entry off the ring and copy it into a local data 3111 * structure. The copy involves a byte-swap since the 3112 * network byte order and pci byte orders are different. 3113 */ 3114 entry = lpfc_resp_iocb(phba, pring); 3115 phba->last_completion_time = jiffies; 3116 3117 if (++pring->sli.sli3.rspidx >= portRspMax) 3118 pring->sli.sli3.rspidx = 0; 3119 3120 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 3121 (uint32_t *) &rspiocbq.iocb, 3122 phba->iocb_rsp_size); 3123 INIT_LIST_HEAD(&(rspiocbq.list)); 3124 irsp = &rspiocbq.iocb; 3125 3126 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 3127 pring->stats.iocb_rsp++; 3128 rsp_cmpl++; 3129 3130 if (unlikely(irsp->ulpStatus)) { 3131 /* 3132 * If resource errors reported from HBA, reduce 3133 * queuedepths of the SCSI device. 3134 */ 3135 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3136 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3137 IOERR_NO_RESOURCES)) { 3138 spin_unlock_irqrestore(&phba->hbalock, iflag); 3139 phba->lpfc_rampdown_queue_depth(phba); 3140 spin_lock_irqsave(&phba->hbalock, iflag); 3141 } 3142 3143 /* Rsp ring <ringno> error: IOCB */ 3144 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3145 "0336 Rsp Ring %d error: IOCB Data: " 3146 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 3147 pring->ringno, 3148 irsp->un.ulpWord[0], 3149 irsp->un.ulpWord[1], 3150 irsp->un.ulpWord[2], 3151 irsp->un.ulpWord[3], 3152 irsp->un.ulpWord[4], 3153 irsp->un.ulpWord[5], 3154 *(uint32_t *)&irsp->un1, 3155 *((uint32_t *)&irsp->un1 + 1)); 3156 } 3157 3158 switch (type) { 3159 case LPFC_ABORT_IOCB: 3160 case LPFC_SOL_IOCB: 3161 /* 3162 * Idle exchange closed via ABTS from port. No iocb 3163 * resources need to be recovered. 3164 */ 3165 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 3166 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3167 "0333 IOCB cmd 0x%x" 3168 " processed. Skipping" 3169 " completion\n", 3170 irsp->ulpCommand); 3171 break; 3172 } 3173 3174 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 3175 &rspiocbq); 3176 if (unlikely(!cmdiocbq)) 3177 break; 3178 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 3179 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3180 if (cmdiocbq->iocb_cmpl) { 3181 spin_unlock_irqrestore(&phba->hbalock, iflag); 3182 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 3183 &rspiocbq); 3184 spin_lock_irqsave(&phba->hbalock, iflag); 3185 } 3186 break; 3187 case LPFC_UNSOL_IOCB: 3188 spin_unlock_irqrestore(&phba->hbalock, iflag); 3189 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 3190 spin_lock_irqsave(&phba->hbalock, iflag); 3191 break; 3192 default: 3193 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3194 char adaptermsg[LPFC_MAX_ADPTMSG]; 3195 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3196 memcpy(&adaptermsg[0], (uint8_t *) irsp, 3197 MAX_MSG_DATA); 3198 dev_warn(&((phba->pcidev)->dev), 3199 "lpfc%d: %s\n", 3200 phba->brd_no, adaptermsg); 3201 } else { 3202 /* Unknown IOCB command */ 3203 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3204 "0334 Unknown IOCB command " 3205 "Data: x%x, x%x x%x x%x x%x\n", 3206 type, irsp->ulpCommand, 3207 irsp->ulpStatus, 3208 irsp->ulpIoTag, 3209 irsp->ulpContext); 3210 } 3211 break; 3212 } 3213 3214 /* 3215 * The response IOCB has been processed. Update the ring 3216 * pointer in SLIM. If the port response put pointer has not 3217 * been updated, sync the pgp->rspPutInx and fetch the new port 3218 * response put pointer. 3219 */ 3220 writel(pring->sli.sli3.rspidx, 3221 &phba->host_gp[pring->ringno].rspGetInx); 3222 3223 if (pring->sli.sli3.rspidx == portRspPut) 3224 portRspPut = le32_to_cpu(pgp->rspPutInx); 3225 } 3226 3227 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3228 pring->stats.iocb_rsp_full++; 3229 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3230 writel(status, phba->CAregaddr); 3231 readl(phba->CAregaddr); 3232 } 3233 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3234 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3235 pring->stats.iocb_cmd_empty++; 3236 3237 /* Force update of the local copy of cmdGetInx */ 3238 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3239 lpfc_sli_resume_iocb(phba, pring); 3240 3241 if ((pring->lpfc_sli_cmd_available)) 3242 (pring->lpfc_sli_cmd_available) (phba, pring); 3243 3244 } 3245 3246 phba->fcp_ring_in_use = 0; 3247 spin_unlock_irqrestore(&phba->hbalock, iflag); 3248 return rc; 3249 } 3250 3251 /** 3252 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3253 * @phba: Pointer to HBA context object. 3254 * @pring: Pointer to driver SLI ring object. 3255 * @rspiocbp: Pointer to driver response IOCB object. 3256 * 3257 * This function is called from the worker thread when there is a slow-path 3258 * response IOCB to process. This function chains all the response iocbs until 3259 * seeing the iocb with the LE bit set. The function will call 3260 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3261 * completion of a command iocb. The function will call the 3262 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3263 * The function frees the resources or calls the completion handler if this 3264 * iocb is an abort completion. The function returns NULL when the response 3265 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3266 * this function shall chain the iocb on to the iocb_continueq and return the 3267 * response iocb passed in. 3268 **/ 3269 static struct lpfc_iocbq * 3270 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3271 struct lpfc_iocbq *rspiocbp) 3272 { 3273 struct lpfc_iocbq *saveq; 3274 struct lpfc_iocbq *cmdiocbp; 3275 struct lpfc_iocbq *next_iocb; 3276 IOCB_t *irsp = NULL; 3277 uint32_t free_saveq; 3278 uint8_t iocb_cmd_type; 3279 lpfc_iocb_type type; 3280 unsigned long iflag; 3281 int rc; 3282 3283 spin_lock_irqsave(&phba->hbalock, iflag); 3284 /* First add the response iocb to the countinueq list */ 3285 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3286 pring->iocb_continueq_cnt++; 3287 3288 /* Now, determine whether the list is completed for processing */ 3289 irsp = &rspiocbp->iocb; 3290 if (irsp->ulpLe) { 3291 /* 3292 * By default, the driver expects to free all resources 3293 * associated with this iocb completion. 3294 */ 3295 free_saveq = 1; 3296 saveq = list_get_first(&pring->iocb_continueq, 3297 struct lpfc_iocbq, list); 3298 irsp = &(saveq->iocb); 3299 list_del_init(&pring->iocb_continueq); 3300 pring->iocb_continueq_cnt = 0; 3301 3302 pring->stats.iocb_rsp++; 3303 3304 /* 3305 * If resource errors reported from HBA, reduce 3306 * queuedepths of the SCSI device. 3307 */ 3308 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3309 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3310 IOERR_NO_RESOURCES)) { 3311 spin_unlock_irqrestore(&phba->hbalock, iflag); 3312 phba->lpfc_rampdown_queue_depth(phba); 3313 spin_lock_irqsave(&phba->hbalock, iflag); 3314 } 3315 3316 if (irsp->ulpStatus) { 3317 /* Rsp ring <ringno> error: IOCB */ 3318 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3319 "0328 Rsp Ring %d error: " 3320 "IOCB Data: " 3321 "x%x x%x x%x x%x " 3322 "x%x x%x x%x x%x " 3323 "x%x x%x x%x x%x " 3324 "x%x x%x x%x x%x\n", 3325 pring->ringno, 3326 irsp->un.ulpWord[0], 3327 irsp->un.ulpWord[1], 3328 irsp->un.ulpWord[2], 3329 irsp->un.ulpWord[3], 3330 irsp->un.ulpWord[4], 3331 irsp->un.ulpWord[5], 3332 *(((uint32_t *) irsp) + 6), 3333 *(((uint32_t *) irsp) + 7), 3334 *(((uint32_t *) irsp) + 8), 3335 *(((uint32_t *) irsp) + 9), 3336 *(((uint32_t *) irsp) + 10), 3337 *(((uint32_t *) irsp) + 11), 3338 *(((uint32_t *) irsp) + 12), 3339 *(((uint32_t *) irsp) + 13), 3340 *(((uint32_t *) irsp) + 14), 3341 *(((uint32_t *) irsp) + 15)); 3342 } 3343 3344 /* 3345 * Fetch the IOCB command type and call the correct completion 3346 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3347 * get freed back to the lpfc_iocb_list by the discovery 3348 * kernel thread. 3349 */ 3350 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3351 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3352 switch (type) { 3353 case LPFC_SOL_IOCB: 3354 spin_unlock_irqrestore(&phba->hbalock, iflag); 3355 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3356 spin_lock_irqsave(&phba->hbalock, iflag); 3357 break; 3358 3359 case LPFC_UNSOL_IOCB: 3360 spin_unlock_irqrestore(&phba->hbalock, iflag); 3361 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3362 spin_lock_irqsave(&phba->hbalock, iflag); 3363 if (!rc) 3364 free_saveq = 0; 3365 break; 3366 3367 case LPFC_ABORT_IOCB: 3368 cmdiocbp = NULL; 3369 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3370 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3371 saveq); 3372 if (cmdiocbp) { 3373 /* Call the specified completion routine */ 3374 if (cmdiocbp->iocb_cmpl) { 3375 spin_unlock_irqrestore(&phba->hbalock, 3376 iflag); 3377 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3378 saveq); 3379 spin_lock_irqsave(&phba->hbalock, 3380 iflag); 3381 } else 3382 __lpfc_sli_release_iocbq(phba, 3383 cmdiocbp); 3384 } 3385 break; 3386 3387 case LPFC_UNKNOWN_IOCB: 3388 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3389 char adaptermsg[LPFC_MAX_ADPTMSG]; 3390 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3391 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3392 MAX_MSG_DATA); 3393 dev_warn(&((phba->pcidev)->dev), 3394 "lpfc%d: %s\n", 3395 phba->brd_no, adaptermsg); 3396 } else { 3397 /* Unknown IOCB command */ 3398 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3399 "0335 Unknown IOCB " 3400 "command Data: x%x " 3401 "x%x x%x x%x\n", 3402 irsp->ulpCommand, 3403 irsp->ulpStatus, 3404 irsp->ulpIoTag, 3405 irsp->ulpContext); 3406 } 3407 break; 3408 } 3409 3410 if (free_saveq) { 3411 list_for_each_entry_safe(rspiocbp, next_iocb, 3412 &saveq->list, list) { 3413 list_del_init(&rspiocbp->list); 3414 __lpfc_sli_release_iocbq(phba, rspiocbp); 3415 } 3416 __lpfc_sli_release_iocbq(phba, saveq); 3417 } 3418 rspiocbp = NULL; 3419 } 3420 spin_unlock_irqrestore(&phba->hbalock, iflag); 3421 return rspiocbp; 3422 } 3423 3424 /** 3425 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3426 * @phba: Pointer to HBA context object. 3427 * @pring: Pointer to driver SLI ring object. 3428 * @mask: Host attention register mask for this ring. 3429 * 3430 * This routine wraps the actual slow_ring event process routine from the 3431 * API jump table function pointer from the lpfc_hba struct. 3432 **/ 3433 void 3434 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3435 struct lpfc_sli_ring *pring, uint32_t mask) 3436 { 3437 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3438 } 3439 3440 /** 3441 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3442 * @phba: Pointer to HBA context object. 3443 * @pring: Pointer to driver SLI ring object. 3444 * @mask: Host attention register mask for this ring. 3445 * 3446 * This function is called from the worker thread when there is a ring event 3447 * for non-fcp rings. The caller does not hold any lock. The function will 3448 * remove each response iocb in the response ring and calls the handle 3449 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3450 **/ 3451 static void 3452 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3453 struct lpfc_sli_ring *pring, uint32_t mask) 3454 { 3455 struct lpfc_pgp *pgp; 3456 IOCB_t *entry; 3457 IOCB_t *irsp = NULL; 3458 struct lpfc_iocbq *rspiocbp = NULL; 3459 uint32_t portRspPut, portRspMax; 3460 unsigned long iflag; 3461 uint32_t status; 3462 3463 pgp = &phba->port_gp[pring->ringno]; 3464 spin_lock_irqsave(&phba->hbalock, iflag); 3465 pring->stats.iocb_event++; 3466 3467 /* 3468 * The next available response entry should never exceed the maximum 3469 * entries. If it does, treat it as an adapter hardware error. 3470 */ 3471 portRspMax = pring->sli.sli3.numRiocb; 3472 portRspPut = le32_to_cpu(pgp->rspPutInx); 3473 if (portRspPut >= portRspMax) { 3474 /* 3475 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3476 * rsp ring <portRspMax> 3477 */ 3478 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3479 "0303 Ring %d handler: portRspPut %d " 3480 "is bigger than rsp ring %d\n", 3481 pring->ringno, portRspPut, portRspMax); 3482 3483 phba->link_state = LPFC_HBA_ERROR; 3484 spin_unlock_irqrestore(&phba->hbalock, iflag); 3485 3486 phba->work_hs = HS_FFER3; 3487 lpfc_handle_eratt(phba); 3488 3489 return; 3490 } 3491 3492 rmb(); 3493 while (pring->sli.sli3.rspidx != portRspPut) { 3494 /* 3495 * Build a completion list and call the appropriate handler. 3496 * The process is to get the next available response iocb, get 3497 * a free iocb from the list, copy the response data into the 3498 * free iocb, insert to the continuation list, and update the 3499 * next response index to slim. This process makes response 3500 * iocb's in the ring available to DMA as fast as possible but 3501 * pays a penalty for a copy operation. Since the iocb is 3502 * only 32 bytes, this penalty is considered small relative to 3503 * the PCI reads for register values and a slim write. When 3504 * the ulpLe field is set, the entire Command has been 3505 * received. 3506 */ 3507 entry = lpfc_resp_iocb(phba, pring); 3508 3509 phba->last_completion_time = jiffies; 3510 rspiocbp = __lpfc_sli_get_iocbq(phba); 3511 if (rspiocbp == NULL) { 3512 printk(KERN_ERR "%s: out of buffers! Failing " 3513 "completion.\n", __func__); 3514 break; 3515 } 3516 3517 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3518 phba->iocb_rsp_size); 3519 irsp = &rspiocbp->iocb; 3520 3521 if (++pring->sli.sli3.rspidx >= portRspMax) 3522 pring->sli.sli3.rspidx = 0; 3523 3524 if (pring->ringno == LPFC_ELS_RING) { 3525 lpfc_debugfs_slow_ring_trc(phba, 3526 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3527 *(((uint32_t *) irsp) + 4), 3528 *(((uint32_t *) irsp) + 6), 3529 *(((uint32_t *) irsp) + 7)); 3530 } 3531 3532 writel(pring->sli.sli3.rspidx, 3533 &phba->host_gp[pring->ringno].rspGetInx); 3534 3535 spin_unlock_irqrestore(&phba->hbalock, iflag); 3536 /* Handle the response IOCB */ 3537 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3538 spin_lock_irqsave(&phba->hbalock, iflag); 3539 3540 /* 3541 * If the port response put pointer has not been updated, sync 3542 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3543 * response put pointer. 3544 */ 3545 if (pring->sli.sli3.rspidx == portRspPut) { 3546 portRspPut = le32_to_cpu(pgp->rspPutInx); 3547 } 3548 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 3549 3550 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3551 /* At least one response entry has been freed */ 3552 pring->stats.iocb_rsp_full++; 3553 /* SET RxRE_RSP in Chip Att register */ 3554 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3555 writel(status, phba->CAregaddr); 3556 readl(phba->CAregaddr); /* flush */ 3557 } 3558 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3559 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3560 pring->stats.iocb_cmd_empty++; 3561 3562 /* Force update of the local copy of cmdGetInx */ 3563 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3564 lpfc_sli_resume_iocb(phba, pring); 3565 3566 if ((pring->lpfc_sli_cmd_available)) 3567 (pring->lpfc_sli_cmd_available) (phba, pring); 3568 3569 } 3570 3571 spin_unlock_irqrestore(&phba->hbalock, iflag); 3572 return; 3573 } 3574 3575 /** 3576 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3577 * @phba: Pointer to HBA context object. 3578 * @pring: Pointer to driver SLI ring object. 3579 * @mask: Host attention register mask for this ring. 3580 * 3581 * This function is called from the worker thread when there is a pending 3582 * ELS response iocb on the driver internal slow-path response iocb worker 3583 * queue. The caller does not hold any lock. The function will remove each 3584 * response iocb from the response worker queue and calls the handle 3585 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3586 **/ 3587 static void 3588 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3589 struct lpfc_sli_ring *pring, uint32_t mask) 3590 { 3591 struct lpfc_iocbq *irspiocbq; 3592 struct hbq_dmabuf *dmabuf; 3593 struct lpfc_cq_event *cq_event; 3594 unsigned long iflag; 3595 3596 spin_lock_irqsave(&phba->hbalock, iflag); 3597 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3598 spin_unlock_irqrestore(&phba->hbalock, iflag); 3599 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3600 /* Get the response iocb from the head of work queue */ 3601 spin_lock_irqsave(&phba->hbalock, iflag); 3602 list_remove_head(&phba->sli4_hba.sp_queue_event, 3603 cq_event, struct lpfc_cq_event, list); 3604 spin_unlock_irqrestore(&phba->hbalock, iflag); 3605 3606 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3607 case CQE_CODE_COMPL_WQE: 3608 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3609 cq_event); 3610 /* Translate ELS WCQE to response IOCBQ */ 3611 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3612 irspiocbq); 3613 if (irspiocbq) 3614 lpfc_sli_sp_handle_rspiocb(phba, pring, 3615 irspiocbq); 3616 break; 3617 case CQE_CODE_RECEIVE: 3618 case CQE_CODE_RECEIVE_V1: 3619 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3620 cq_event); 3621 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3622 break; 3623 default: 3624 break; 3625 } 3626 } 3627 } 3628 3629 /** 3630 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3631 * @phba: Pointer to HBA context object. 3632 * @pring: Pointer to driver SLI ring object. 3633 * 3634 * This function aborts all iocbs in the given ring and frees all the iocb 3635 * objects in txq. This function issues an abort iocb for all the iocb commands 3636 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3637 * the return of this function. The caller is not required to hold any locks. 3638 **/ 3639 void 3640 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3641 { 3642 LIST_HEAD(completions); 3643 struct lpfc_iocbq *iocb, *next_iocb; 3644 3645 if (pring->ringno == LPFC_ELS_RING) { 3646 lpfc_fabric_abort_hba(phba); 3647 } 3648 3649 /* Error everything on txq and txcmplq 3650 * First do the txq. 3651 */ 3652 if (phba->sli_rev >= LPFC_SLI_REV4) { 3653 spin_lock_irq(&pring->ring_lock); 3654 list_splice_init(&pring->txq, &completions); 3655 pring->txq_cnt = 0; 3656 spin_unlock_irq(&pring->ring_lock); 3657 3658 spin_lock_irq(&phba->hbalock); 3659 /* Next issue ABTS for everything on the txcmplq */ 3660 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3661 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3662 spin_unlock_irq(&phba->hbalock); 3663 } else { 3664 spin_lock_irq(&phba->hbalock); 3665 list_splice_init(&pring->txq, &completions); 3666 pring->txq_cnt = 0; 3667 3668 /* Next issue ABTS for everything on the txcmplq */ 3669 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3670 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3671 spin_unlock_irq(&phba->hbalock); 3672 } 3673 3674 /* Cancel all the IOCBs from the completions list */ 3675 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3676 IOERR_SLI_ABORTED); 3677 } 3678 3679 /** 3680 * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring 3681 * @phba: Pointer to HBA context object. 3682 * @pring: Pointer to driver SLI ring object. 3683 * 3684 * This function aborts all iocbs in the given ring and frees all the iocb 3685 * objects in txq. This function issues an abort iocb for all the iocb commands 3686 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3687 * the return of this function. The caller is not required to hold any locks. 3688 **/ 3689 void 3690 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3691 { 3692 LIST_HEAD(completions); 3693 struct lpfc_iocbq *iocb, *next_iocb; 3694 3695 if (pring->ringno == LPFC_ELS_RING) 3696 lpfc_fabric_abort_hba(phba); 3697 3698 spin_lock_irq(&phba->hbalock); 3699 /* Next issue ABTS for everything on the txcmplq */ 3700 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3701 lpfc_sli4_abort_nvme_io(phba, pring, iocb); 3702 spin_unlock_irq(&phba->hbalock); 3703 } 3704 3705 3706 /** 3707 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 3708 * @phba: Pointer to HBA context object. 3709 * @pring: Pointer to driver SLI ring object. 3710 * 3711 * This function aborts all iocbs in FCP rings and frees all the iocb 3712 * objects in txq. This function issues an abort iocb for all the iocb commands 3713 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3714 * the return of this function. The caller is not required to hold any locks. 3715 **/ 3716 void 3717 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 3718 { 3719 struct lpfc_sli *psli = &phba->sli; 3720 struct lpfc_sli_ring *pring; 3721 uint32_t i; 3722 3723 /* Look on all the FCP Rings for the iotag */ 3724 if (phba->sli_rev >= LPFC_SLI_REV4) { 3725 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 3726 pring = phba->sli4_hba.fcp_wq[i]->pring; 3727 lpfc_sli_abort_iocb_ring(phba, pring); 3728 } 3729 } else { 3730 pring = &psli->sli3_ring[LPFC_FCP_RING]; 3731 lpfc_sli_abort_iocb_ring(phba, pring); 3732 } 3733 } 3734 3735 /** 3736 * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings 3737 * @phba: Pointer to HBA context object. 3738 * 3739 * This function aborts all wqes in NVME rings. This function issues an 3740 * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in 3741 * the txcmplq is not guaranteed to complete before the return of this 3742 * function. The caller is not required to hold any locks. 3743 **/ 3744 void 3745 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba) 3746 { 3747 struct lpfc_sli_ring *pring; 3748 uint32_t i; 3749 3750 if (phba->sli_rev < LPFC_SLI_REV4) 3751 return; 3752 3753 /* Abort all IO on each NVME ring. */ 3754 for (i = 0; i < phba->cfg_nvme_io_channel; i++) { 3755 pring = phba->sli4_hba.nvme_wq[i]->pring; 3756 lpfc_sli_abort_wqe_ring(phba, pring); 3757 } 3758 } 3759 3760 3761 /** 3762 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3763 * @phba: Pointer to HBA context object. 3764 * 3765 * This function flushes all iocbs in the fcp ring and frees all the iocb 3766 * objects in txq and txcmplq. This function will not issue abort iocbs 3767 * for all the iocb commands in txcmplq, they will just be returned with 3768 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3769 * slot has been permanently disabled. 3770 **/ 3771 void 3772 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3773 { 3774 LIST_HEAD(txq); 3775 LIST_HEAD(txcmplq); 3776 struct lpfc_sli *psli = &phba->sli; 3777 struct lpfc_sli_ring *pring; 3778 uint32_t i; 3779 3780 spin_lock_irq(&phba->hbalock); 3781 /* Indicate the I/O queues are flushed */ 3782 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3783 spin_unlock_irq(&phba->hbalock); 3784 3785 /* Look on all the FCP Rings for the iotag */ 3786 if (phba->sli_rev >= LPFC_SLI_REV4) { 3787 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 3788 pring = phba->sli4_hba.fcp_wq[i]->pring; 3789 3790 spin_lock_irq(&pring->ring_lock); 3791 /* Retrieve everything on txq */ 3792 list_splice_init(&pring->txq, &txq); 3793 /* Retrieve everything on the txcmplq */ 3794 list_splice_init(&pring->txcmplq, &txcmplq); 3795 pring->txq_cnt = 0; 3796 pring->txcmplq_cnt = 0; 3797 spin_unlock_irq(&pring->ring_lock); 3798 3799 /* Flush the txq */ 3800 lpfc_sli_cancel_iocbs(phba, &txq, 3801 IOSTAT_LOCAL_REJECT, 3802 IOERR_SLI_DOWN); 3803 /* Flush the txcmpq */ 3804 lpfc_sli_cancel_iocbs(phba, &txcmplq, 3805 IOSTAT_LOCAL_REJECT, 3806 IOERR_SLI_DOWN); 3807 } 3808 } else { 3809 pring = &psli->sli3_ring[LPFC_FCP_RING]; 3810 3811 spin_lock_irq(&phba->hbalock); 3812 /* Retrieve everything on txq */ 3813 list_splice_init(&pring->txq, &txq); 3814 /* Retrieve everything on the txcmplq */ 3815 list_splice_init(&pring->txcmplq, &txcmplq); 3816 pring->txq_cnt = 0; 3817 pring->txcmplq_cnt = 0; 3818 spin_unlock_irq(&phba->hbalock); 3819 3820 /* Flush the txq */ 3821 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3822 IOERR_SLI_DOWN); 3823 /* Flush the txcmpq */ 3824 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3825 IOERR_SLI_DOWN); 3826 } 3827 } 3828 3829 /** 3830 * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings 3831 * @phba: Pointer to HBA context object. 3832 * 3833 * This function flushes all wqes in the nvme rings and frees all resources 3834 * in the txcmplq. This function does not issue abort wqes for the IO 3835 * commands in txcmplq, they will just be returned with 3836 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3837 * slot has been permanently disabled. 3838 **/ 3839 void 3840 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) 3841 { 3842 LIST_HEAD(txcmplq); 3843 struct lpfc_sli_ring *pring; 3844 uint32_t i; 3845 3846 if (phba->sli_rev < LPFC_SLI_REV4) 3847 return; 3848 3849 /* Hint to other driver operations that a flush is in progress. */ 3850 spin_lock_irq(&phba->hbalock); 3851 phba->hba_flag |= HBA_NVME_IOQ_FLUSH; 3852 spin_unlock_irq(&phba->hbalock); 3853 3854 /* Cycle through all NVME rings and complete each IO with 3855 * a local driver reason code. This is a flush so no 3856 * abort exchange to FW. 3857 */ 3858 for (i = 0; i < phba->cfg_nvme_io_channel; i++) { 3859 pring = phba->sli4_hba.nvme_wq[i]->pring; 3860 3861 /* Retrieve everything on the txcmplq */ 3862 spin_lock_irq(&pring->ring_lock); 3863 list_splice_init(&pring->txcmplq, &txcmplq); 3864 pring->txcmplq_cnt = 0; 3865 spin_unlock_irq(&pring->ring_lock); 3866 3867 /* Flush the txcmpq &&&PAE */ 3868 lpfc_sli_cancel_iocbs(phba, &txcmplq, 3869 IOSTAT_LOCAL_REJECT, 3870 IOERR_SLI_DOWN); 3871 } 3872 } 3873 3874 /** 3875 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 3876 * @phba: Pointer to HBA context object. 3877 * @mask: Bit mask to be checked. 3878 * 3879 * This function reads the host status register and compares 3880 * with the provided bit mask to check if HBA completed 3881 * the restart. This function will wait in a loop for the 3882 * HBA to complete restart. If the HBA does not restart within 3883 * 15 iterations, the function will reset the HBA again. The 3884 * function returns 1 when HBA fail to restart otherwise returns 3885 * zero. 3886 **/ 3887 static int 3888 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 3889 { 3890 uint32_t status; 3891 int i = 0; 3892 int retval = 0; 3893 3894 /* Read the HBA Host Status Register */ 3895 if (lpfc_readl(phba->HSregaddr, &status)) 3896 return 1; 3897 3898 /* 3899 * Check status register every 100ms for 5 retries, then every 3900 * 500ms for 5, then every 2.5 sec for 5, then reset board and 3901 * every 2.5 sec for 4. 3902 * Break our of the loop if errors occurred during init. 3903 */ 3904 while (((status & mask) != mask) && 3905 !(status & HS_FFERM) && 3906 i++ < 20) { 3907 3908 if (i <= 5) 3909 msleep(10); 3910 else if (i <= 10) 3911 msleep(500); 3912 else 3913 msleep(2500); 3914 3915 if (i == 15) { 3916 /* Do post */ 3917 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3918 lpfc_sli_brdrestart(phba); 3919 } 3920 /* Read the HBA Host Status Register */ 3921 if (lpfc_readl(phba->HSregaddr, &status)) { 3922 retval = 1; 3923 break; 3924 } 3925 } 3926 3927 /* Check to see if any errors occurred during init */ 3928 if ((status & HS_FFERM) || (i >= 20)) { 3929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3930 "2751 Adapter failed to restart, " 3931 "status reg x%x, FW Data: A8 x%x AC x%x\n", 3932 status, 3933 readl(phba->MBslimaddr + 0xa8), 3934 readl(phba->MBslimaddr + 0xac)); 3935 phba->link_state = LPFC_HBA_ERROR; 3936 retval = 1; 3937 } 3938 3939 return retval; 3940 } 3941 3942 /** 3943 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 3944 * @phba: Pointer to HBA context object. 3945 * @mask: Bit mask to be checked. 3946 * 3947 * This function checks the host status register to check if HBA is 3948 * ready. This function will wait in a loop for the HBA to be ready 3949 * If the HBA is not ready , the function will will reset the HBA PCI 3950 * function again. The function returns 1 when HBA fail to be ready 3951 * otherwise returns zero. 3952 **/ 3953 static int 3954 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 3955 { 3956 uint32_t status; 3957 int retval = 0; 3958 3959 /* Read the HBA Host Status Register */ 3960 status = lpfc_sli4_post_status_check(phba); 3961 3962 if (status) { 3963 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3964 lpfc_sli_brdrestart(phba); 3965 status = lpfc_sli4_post_status_check(phba); 3966 } 3967 3968 /* Check to see if any errors occurred during init */ 3969 if (status) { 3970 phba->link_state = LPFC_HBA_ERROR; 3971 retval = 1; 3972 } else 3973 phba->sli4_hba.intr_enable = 0; 3974 3975 return retval; 3976 } 3977 3978 /** 3979 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 3980 * @phba: Pointer to HBA context object. 3981 * @mask: Bit mask to be checked. 3982 * 3983 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 3984 * from the API jump table function pointer from the lpfc_hba struct. 3985 **/ 3986 int 3987 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3988 { 3989 return phba->lpfc_sli_brdready(phba, mask); 3990 } 3991 3992 #define BARRIER_TEST_PATTERN (0xdeadbeef) 3993 3994 /** 3995 * lpfc_reset_barrier - Make HBA ready for HBA reset 3996 * @phba: Pointer to HBA context object. 3997 * 3998 * This function is called before resetting an HBA. This function is called 3999 * with hbalock held and requests HBA to quiesce DMAs before a reset. 4000 **/ 4001 void lpfc_reset_barrier(struct lpfc_hba *phba) 4002 { 4003 uint32_t __iomem *resp_buf; 4004 uint32_t __iomem *mbox_buf; 4005 volatile uint32_t mbox; 4006 uint32_t hc_copy, ha_copy, resp_data; 4007 int i; 4008 uint8_t hdrtype; 4009 4010 lockdep_assert_held(&phba->hbalock); 4011 4012 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 4013 if (hdrtype != 0x80 || 4014 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 4015 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 4016 return; 4017 4018 /* 4019 * Tell the other part of the chip to suspend temporarily all 4020 * its DMA activity. 4021 */ 4022 resp_buf = phba->MBslimaddr; 4023 4024 /* Disable the error attention */ 4025 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 4026 return; 4027 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 4028 readl(phba->HCregaddr); /* flush */ 4029 phba->link_flag |= LS_IGNORE_ERATT; 4030 4031 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4032 return; 4033 if (ha_copy & HA_ERATT) { 4034 /* Clear Chip error bit */ 4035 writel(HA_ERATT, phba->HAregaddr); 4036 phba->pport->stopped = 1; 4037 } 4038 4039 mbox = 0; 4040 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 4041 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 4042 4043 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 4044 mbox_buf = phba->MBslimaddr; 4045 writel(mbox, mbox_buf); 4046 4047 for (i = 0; i < 50; i++) { 4048 if (lpfc_readl((resp_buf + 1), &resp_data)) 4049 return; 4050 if (resp_data != ~(BARRIER_TEST_PATTERN)) 4051 mdelay(1); 4052 else 4053 break; 4054 } 4055 resp_data = 0; 4056 if (lpfc_readl((resp_buf + 1), &resp_data)) 4057 return; 4058 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 4059 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 4060 phba->pport->stopped) 4061 goto restore_hc; 4062 else 4063 goto clear_errat; 4064 } 4065 4066 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 4067 resp_data = 0; 4068 for (i = 0; i < 500; i++) { 4069 if (lpfc_readl(resp_buf, &resp_data)) 4070 return; 4071 if (resp_data != mbox) 4072 mdelay(1); 4073 else 4074 break; 4075 } 4076 4077 clear_errat: 4078 4079 while (++i < 500) { 4080 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4081 return; 4082 if (!(ha_copy & HA_ERATT)) 4083 mdelay(1); 4084 else 4085 break; 4086 } 4087 4088 if (readl(phba->HAregaddr) & HA_ERATT) { 4089 writel(HA_ERATT, phba->HAregaddr); 4090 phba->pport->stopped = 1; 4091 } 4092 4093 restore_hc: 4094 phba->link_flag &= ~LS_IGNORE_ERATT; 4095 writel(hc_copy, phba->HCregaddr); 4096 readl(phba->HCregaddr); /* flush */ 4097 } 4098 4099 /** 4100 * lpfc_sli_brdkill - Issue a kill_board mailbox command 4101 * @phba: Pointer to HBA context object. 4102 * 4103 * This function issues a kill_board mailbox command and waits for 4104 * the error attention interrupt. This function is called for stopping 4105 * the firmware processing. The caller is not required to hold any 4106 * locks. This function calls lpfc_hba_down_post function to free 4107 * any pending commands after the kill. The function will return 1 when it 4108 * fails to kill the board else will return 0. 4109 **/ 4110 int 4111 lpfc_sli_brdkill(struct lpfc_hba *phba) 4112 { 4113 struct lpfc_sli *psli; 4114 LPFC_MBOXQ_t *pmb; 4115 uint32_t status; 4116 uint32_t ha_copy; 4117 int retval; 4118 int i = 0; 4119 4120 psli = &phba->sli; 4121 4122 /* Kill HBA */ 4123 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4124 "0329 Kill HBA Data: x%x x%x\n", 4125 phba->pport->port_state, psli->sli_flag); 4126 4127 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4128 if (!pmb) 4129 return 1; 4130 4131 /* Disable the error attention */ 4132 spin_lock_irq(&phba->hbalock); 4133 if (lpfc_readl(phba->HCregaddr, &status)) { 4134 spin_unlock_irq(&phba->hbalock); 4135 mempool_free(pmb, phba->mbox_mem_pool); 4136 return 1; 4137 } 4138 status &= ~HC_ERINT_ENA; 4139 writel(status, phba->HCregaddr); 4140 readl(phba->HCregaddr); /* flush */ 4141 phba->link_flag |= LS_IGNORE_ERATT; 4142 spin_unlock_irq(&phba->hbalock); 4143 4144 lpfc_kill_board(phba, pmb); 4145 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4146 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 4147 4148 if (retval != MBX_SUCCESS) { 4149 if (retval != MBX_BUSY) 4150 mempool_free(pmb, phba->mbox_mem_pool); 4151 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4152 "2752 KILL_BOARD command failed retval %d\n", 4153 retval); 4154 spin_lock_irq(&phba->hbalock); 4155 phba->link_flag &= ~LS_IGNORE_ERATT; 4156 spin_unlock_irq(&phba->hbalock); 4157 return 1; 4158 } 4159 4160 spin_lock_irq(&phba->hbalock); 4161 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 4162 spin_unlock_irq(&phba->hbalock); 4163 4164 mempool_free(pmb, phba->mbox_mem_pool); 4165 4166 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 4167 * attention every 100ms for 3 seconds. If we don't get ERATT after 4168 * 3 seconds we still set HBA_ERROR state because the status of the 4169 * board is now undefined. 4170 */ 4171 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4172 return 1; 4173 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 4174 mdelay(100); 4175 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 4176 return 1; 4177 } 4178 4179 del_timer_sync(&psli->mbox_tmo); 4180 if (ha_copy & HA_ERATT) { 4181 writel(HA_ERATT, phba->HAregaddr); 4182 phba->pport->stopped = 1; 4183 } 4184 spin_lock_irq(&phba->hbalock); 4185 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4186 psli->mbox_active = NULL; 4187 phba->link_flag &= ~LS_IGNORE_ERATT; 4188 spin_unlock_irq(&phba->hbalock); 4189 4190 lpfc_hba_down_post(phba); 4191 phba->link_state = LPFC_HBA_ERROR; 4192 4193 return ha_copy & HA_ERATT ? 0 : 1; 4194 } 4195 4196 /** 4197 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 4198 * @phba: Pointer to HBA context object. 4199 * 4200 * This function resets the HBA by writing HC_INITFF to the control 4201 * register. After the HBA resets, this function resets all the iocb ring 4202 * indices. This function disables PCI layer parity checking during 4203 * the reset. 4204 * This function returns 0 always. 4205 * The caller is not required to hold any locks. 4206 **/ 4207 int 4208 lpfc_sli_brdreset(struct lpfc_hba *phba) 4209 { 4210 struct lpfc_sli *psli; 4211 struct lpfc_sli_ring *pring; 4212 uint16_t cfg_value; 4213 int i; 4214 4215 psli = &phba->sli; 4216 4217 /* Reset HBA */ 4218 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4219 "0325 Reset HBA Data: x%x x%x\n", 4220 (phba->pport) ? phba->pport->port_state : 0, 4221 psli->sli_flag); 4222 4223 /* perform board reset */ 4224 phba->fc_eventTag = 0; 4225 phba->link_events = 0; 4226 if (phba->pport) { 4227 phba->pport->fc_myDID = 0; 4228 phba->pport->fc_prevDID = 0; 4229 } 4230 4231 /* Turn off parity checking and serr during the physical reset */ 4232 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4233 pci_write_config_word(phba->pcidev, PCI_COMMAND, 4234 (cfg_value & 4235 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4236 4237 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 4238 4239 /* Now toggle INITFF bit in the Host Control Register */ 4240 writel(HC_INITFF, phba->HCregaddr); 4241 mdelay(1); 4242 readl(phba->HCregaddr); /* flush */ 4243 writel(0, phba->HCregaddr); 4244 readl(phba->HCregaddr); /* flush */ 4245 4246 /* Restore PCI cmd register */ 4247 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4248 4249 /* Initialize relevant SLI info */ 4250 for (i = 0; i < psli->num_rings; i++) { 4251 pring = &psli->sli3_ring[i]; 4252 pring->flag = 0; 4253 pring->sli.sli3.rspidx = 0; 4254 pring->sli.sli3.next_cmdidx = 0; 4255 pring->sli.sli3.local_getidx = 0; 4256 pring->sli.sli3.cmdidx = 0; 4257 pring->missbufcnt = 0; 4258 } 4259 4260 phba->link_state = LPFC_WARM_START; 4261 return 0; 4262 } 4263 4264 /** 4265 * lpfc_sli4_brdreset - Reset a sli-4 HBA 4266 * @phba: Pointer to HBA context object. 4267 * 4268 * This function resets a SLI4 HBA. This function disables PCI layer parity 4269 * checking during resets the device. The caller is not required to hold 4270 * any locks. 4271 * 4272 * This function returns 0 always. 4273 **/ 4274 int 4275 lpfc_sli4_brdreset(struct lpfc_hba *phba) 4276 { 4277 struct lpfc_sli *psli = &phba->sli; 4278 uint16_t cfg_value; 4279 int rc = 0; 4280 4281 /* Reset HBA */ 4282 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4283 "0295 Reset HBA Data: x%x x%x x%x\n", 4284 phba->pport->port_state, psli->sli_flag, 4285 phba->hba_flag); 4286 4287 /* perform board reset */ 4288 phba->fc_eventTag = 0; 4289 phba->link_events = 0; 4290 phba->pport->fc_myDID = 0; 4291 phba->pport->fc_prevDID = 0; 4292 4293 spin_lock_irq(&phba->hbalock); 4294 psli->sli_flag &= ~(LPFC_PROCESS_LA); 4295 phba->fcf.fcf_flag = 0; 4296 spin_unlock_irq(&phba->hbalock); 4297 4298 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ 4299 if (phba->hba_flag & HBA_FW_DUMP_OP) { 4300 phba->hba_flag &= ~HBA_FW_DUMP_OP; 4301 return rc; 4302 } 4303 4304 /* Now physically reset the device */ 4305 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4306 "0389 Performing PCI function reset!\n"); 4307 4308 /* Turn off parity checking and serr during the physical reset */ 4309 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4310 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 4311 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4312 4313 /* Perform FCoE PCI function reset before freeing queue memory */ 4314 rc = lpfc_pci_function_reset(phba); 4315 4316 /* Restore PCI cmd register */ 4317 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4318 4319 return rc; 4320 } 4321 4322 /** 4323 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 4324 * @phba: Pointer to HBA context object. 4325 * 4326 * This function is called in the SLI initialization code path to 4327 * restart the HBA. The caller is not required to hold any lock. 4328 * This function writes MBX_RESTART mailbox command to the SLIM and 4329 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 4330 * function to free any pending commands. The function enables 4331 * POST only during the first initialization. The function returns zero. 4332 * The function does not guarantee completion of MBX_RESTART mailbox 4333 * command before the return of this function. 4334 **/ 4335 static int 4336 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 4337 { 4338 MAILBOX_t *mb; 4339 struct lpfc_sli *psli; 4340 volatile uint32_t word0; 4341 void __iomem *to_slim; 4342 uint32_t hba_aer_enabled; 4343 4344 spin_lock_irq(&phba->hbalock); 4345 4346 /* Take PCIe device Advanced Error Reporting (AER) state */ 4347 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4348 4349 psli = &phba->sli; 4350 4351 /* Restart HBA */ 4352 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4353 "0337 Restart HBA Data: x%x x%x\n", 4354 (phba->pport) ? phba->pport->port_state : 0, 4355 psli->sli_flag); 4356 4357 word0 = 0; 4358 mb = (MAILBOX_t *) &word0; 4359 mb->mbxCommand = MBX_RESTART; 4360 mb->mbxHc = 1; 4361 4362 lpfc_reset_barrier(phba); 4363 4364 to_slim = phba->MBslimaddr; 4365 writel(*(uint32_t *) mb, to_slim); 4366 readl(to_slim); /* flush */ 4367 4368 /* Only skip post after fc_ffinit is completed */ 4369 if (phba->pport && phba->pport->port_state) 4370 word0 = 1; /* This is really setting up word1 */ 4371 else 4372 word0 = 0; /* This is really setting up word1 */ 4373 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4374 writel(*(uint32_t *) mb, to_slim); 4375 readl(to_slim); /* flush */ 4376 4377 lpfc_sli_brdreset(phba); 4378 if (phba->pport) 4379 phba->pport->stopped = 0; 4380 phba->link_state = LPFC_INIT_START; 4381 phba->hba_flag = 0; 4382 spin_unlock_irq(&phba->hbalock); 4383 4384 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4385 psli->stats_start = get_seconds(); 4386 4387 /* Give the INITFF and Post time to settle. */ 4388 mdelay(100); 4389 4390 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4391 if (hba_aer_enabled) 4392 pci_disable_pcie_error_reporting(phba->pcidev); 4393 4394 lpfc_hba_down_post(phba); 4395 4396 return 0; 4397 } 4398 4399 /** 4400 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4401 * @phba: Pointer to HBA context object. 4402 * 4403 * This function is called in the SLI initialization code path to restart 4404 * a SLI4 HBA. The caller is not required to hold any lock. 4405 * At the end of the function, it calls lpfc_hba_down_post function to 4406 * free any pending commands. 4407 **/ 4408 static int 4409 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4410 { 4411 struct lpfc_sli *psli = &phba->sli; 4412 uint32_t hba_aer_enabled; 4413 int rc; 4414 4415 /* Restart HBA */ 4416 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4417 "0296 Restart HBA Data: x%x x%x\n", 4418 phba->pport->port_state, psli->sli_flag); 4419 4420 /* Take PCIe device Advanced Error Reporting (AER) state */ 4421 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4422 4423 rc = lpfc_sli4_brdreset(phba); 4424 4425 spin_lock_irq(&phba->hbalock); 4426 phba->pport->stopped = 0; 4427 phba->link_state = LPFC_INIT_START; 4428 phba->hba_flag = 0; 4429 spin_unlock_irq(&phba->hbalock); 4430 4431 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4432 psli->stats_start = get_seconds(); 4433 4434 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4435 if (hba_aer_enabled) 4436 pci_disable_pcie_error_reporting(phba->pcidev); 4437 4438 lpfc_hba_down_post(phba); 4439 lpfc_sli4_queue_destroy(phba); 4440 4441 return rc; 4442 } 4443 4444 /** 4445 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4446 * @phba: Pointer to HBA context object. 4447 * 4448 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4449 * API jump table function pointer from the lpfc_hba struct. 4450 **/ 4451 int 4452 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4453 { 4454 return phba->lpfc_sli_brdrestart(phba); 4455 } 4456 4457 /** 4458 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4459 * @phba: Pointer to HBA context object. 4460 * 4461 * This function is called after a HBA restart to wait for successful 4462 * restart of the HBA. Successful restart of the HBA is indicated by 4463 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4464 * iteration, the function will restart the HBA again. The function returns 4465 * zero if HBA successfully restarted else returns negative error code. 4466 **/ 4467 int 4468 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4469 { 4470 uint32_t status, i = 0; 4471 4472 /* Read the HBA Host Status Register */ 4473 if (lpfc_readl(phba->HSregaddr, &status)) 4474 return -EIO; 4475 4476 /* Check status register to see what current state is */ 4477 i = 0; 4478 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4479 4480 /* Check every 10ms for 10 retries, then every 100ms for 90 4481 * retries, then every 1 sec for 50 retires for a total of 4482 * ~60 seconds before reset the board again and check every 4483 * 1 sec for 50 retries. The up to 60 seconds before the 4484 * board ready is required by the Falcon FIPS zeroization 4485 * complete, and any reset the board in between shall cause 4486 * restart of zeroization, further delay the board ready. 4487 */ 4488 if (i++ >= 200) { 4489 /* Adapter failed to init, timeout, status reg 4490 <status> */ 4491 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4492 "0436 Adapter failed to init, " 4493 "timeout, status reg x%x, " 4494 "FW Data: A8 x%x AC x%x\n", status, 4495 readl(phba->MBslimaddr + 0xa8), 4496 readl(phba->MBslimaddr + 0xac)); 4497 phba->link_state = LPFC_HBA_ERROR; 4498 return -ETIMEDOUT; 4499 } 4500 4501 /* Check to see if any errors occurred during init */ 4502 if (status & HS_FFERM) { 4503 /* ERROR: During chipset initialization */ 4504 /* Adapter failed to init, chipset, status reg 4505 <status> */ 4506 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4507 "0437 Adapter failed to init, " 4508 "chipset, status reg x%x, " 4509 "FW Data: A8 x%x AC x%x\n", status, 4510 readl(phba->MBslimaddr + 0xa8), 4511 readl(phba->MBslimaddr + 0xac)); 4512 phba->link_state = LPFC_HBA_ERROR; 4513 return -EIO; 4514 } 4515 4516 if (i <= 10) 4517 msleep(10); 4518 else if (i <= 100) 4519 msleep(100); 4520 else 4521 msleep(1000); 4522 4523 if (i == 150) { 4524 /* Do post */ 4525 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4526 lpfc_sli_brdrestart(phba); 4527 } 4528 /* Read the HBA Host Status Register */ 4529 if (lpfc_readl(phba->HSregaddr, &status)) 4530 return -EIO; 4531 } 4532 4533 /* Check to see if any errors occurred during init */ 4534 if (status & HS_FFERM) { 4535 /* ERROR: During chipset initialization */ 4536 /* Adapter failed to init, chipset, status reg <status> */ 4537 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4538 "0438 Adapter failed to init, chipset, " 4539 "status reg x%x, " 4540 "FW Data: A8 x%x AC x%x\n", status, 4541 readl(phba->MBslimaddr + 0xa8), 4542 readl(phba->MBslimaddr + 0xac)); 4543 phba->link_state = LPFC_HBA_ERROR; 4544 return -EIO; 4545 } 4546 4547 /* Clear all interrupt enable conditions */ 4548 writel(0, phba->HCregaddr); 4549 readl(phba->HCregaddr); /* flush */ 4550 4551 /* setup host attn register */ 4552 writel(0xffffffff, phba->HAregaddr); 4553 readl(phba->HAregaddr); /* flush */ 4554 return 0; 4555 } 4556 4557 /** 4558 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4559 * 4560 * This function calculates and returns the number of HBQs required to be 4561 * configured. 4562 **/ 4563 int 4564 lpfc_sli_hbq_count(void) 4565 { 4566 return ARRAY_SIZE(lpfc_hbq_defs); 4567 } 4568 4569 /** 4570 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4571 * 4572 * This function adds the number of hbq entries in every HBQ to get 4573 * the total number of hbq entries required for the HBA and returns 4574 * the total count. 4575 **/ 4576 static int 4577 lpfc_sli_hbq_entry_count(void) 4578 { 4579 int hbq_count = lpfc_sli_hbq_count(); 4580 int count = 0; 4581 int i; 4582 4583 for (i = 0; i < hbq_count; ++i) 4584 count += lpfc_hbq_defs[i]->entry_count; 4585 return count; 4586 } 4587 4588 /** 4589 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4590 * 4591 * This function calculates amount of memory required for all hbq entries 4592 * to be configured and returns the total memory required. 4593 **/ 4594 int 4595 lpfc_sli_hbq_size(void) 4596 { 4597 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4598 } 4599 4600 /** 4601 * lpfc_sli_hbq_setup - configure and initialize HBQs 4602 * @phba: Pointer to HBA context object. 4603 * 4604 * This function is called during the SLI initialization to configure 4605 * all the HBQs and post buffers to the HBQ. The caller is not 4606 * required to hold any locks. This function will return zero if successful 4607 * else it will return negative error code. 4608 **/ 4609 static int 4610 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4611 { 4612 int hbq_count = lpfc_sli_hbq_count(); 4613 LPFC_MBOXQ_t *pmb; 4614 MAILBOX_t *pmbox; 4615 uint32_t hbqno; 4616 uint32_t hbq_entry_index; 4617 4618 /* Get a Mailbox buffer to setup mailbox 4619 * commands for HBA initialization 4620 */ 4621 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4622 4623 if (!pmb) 4624 return -ENOMEM; 4625 4626 pmbox = &pmb->u.mb; 4627 4628 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4629 phba->link_state = LPFC_INIT_MBX_CMDS; 4630 phba->hbq_in_use = 1; 4631 4632 hbq_entry_index = 0; 4633 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4634 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4635 phba->hbqs[hbqno].hbqPutIdx = 0; 4636 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4637 phba->hbqs[hbqno].entry_count = 4638 lpfc_hbq_defs[hbqno]->entry_count; 4639 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4640 hbq_entry_index, pmb); 4641 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4642 4643 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4644 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4645 mbxStatus <status>, ring <num> */ 4646 4647 lpfc_printf_log(phba, KERN_ERR, 4648 LOG_SLI | LOG_VPORT, 4649 "1805 Adapter failed to init. " 4650 "Data: x%x x%x x%x\n", 4651 pmbox->mbxCommand, 4652 pmbox->mbxStatus, hbqno); 4653 4654 phba->link_state = LPFC_HBA_ERROR; 4655 mempool_free(pmb, phba->mbox_mem_pool); 4656 return -ENXIO; 4657 } 4658 } 4659 phba->hbq_count = hbq_count; 4660 4661 mempool_free(pmb, phba->mbox_mem_pool); 4662 4663 /* Initially populate or replenish the HBQs */ 4664 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4665 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4666 return 0; 4667 } 4668 4669 /** 4670 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4671 * @phba: Pointer to HBA context object. 4672 * 4673 * This function is called during the SLI initialization to configure 4674 * all the HBQs and post buffers to the HBQ. The caller is not 4675 * required to hold any locks. This function will return zero if successful 4676 * else it will return negative error code. 4677 **/ 4678 static int 4679 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4680 { 4681 phba->hbq_in_use = 1; 4682 phba->hbqs[LPFC_ELS_HBQ].entry_count = 4683 lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; 4684 phba->hbq_count = 1; 4685 lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); 4686 /* Initially populate or replenish the HBQs */ 4687 return 0; 4688 } 4689 4690 /** 4691 * lpfc_sli_config_port - Issue config port mailbox command 4692 * @phba: Pointer to HBA context object. 4693 * @sli_mode: sli mode - 2/3 4694 * 4695 * This function is called by the sli initialization code path 4696 * to issue config_port mailbox command. This function restarts the 4697 * HBA firmware and issues a config_port mailbox command to configure 4698 * the SLI interface in the sli mode specified by sli_mode 4699 * variable. The caller is not required to hold any locks. 4700 * The function returns 0 if successful, else returns negative error 4701 * code. 4702 **/ 4703 int 4704 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4705 { 4706 LPFC_MBOXQ_t *pmb; 4707 uint32_t resetcount = 0, rc = 0, done = 0; 4708 4709 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4710 if (!pmb) { 4711 phba->link_state = LPFC_HBA_ERROR; 4712 return -ENOMEM; 4713 } 4714 4715 phba->sli_rev = sli_mode; 4716 while (resetcount < 2 && !done) { 4717 spin_lock_irq(&phba->hbalock); 4718 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4719 spin_unlock_irq(&phba->hbalock); 4720 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4721 lpfc_sli_brdrestart(phba); 4722 rc = lpfc_sli_chipset_init(phba); 4723 if (rc) 4724 break; 4725 4726 spin_lock_irq(&phba->hbalock); 4727 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4728 spin_unlock_irq(&phba->hbalock); 4729 resetcount++; 4730 4731 /* Call pre CONFIG_PORT mailbox command initialization. A 4732 * value of 0 means the call was successful. Any other 4733 * nonzero value is a failure, but if ERESTART is returned, 4734 * the driver may reset the HBA and try again. 4735 */ 4736 rc = lpfc_config_port_prep(phba); 4737 if (rc == -ERESTART) { 4738 phba->link_state = LPFC_LINK_UNKNOWN; 4739 continue; 4740 } else if (rc) 4741 break; 4742 4743 phba->link_state = LPFC_INIT_MBX_CMDS; 4744 lpfc_config_port(phba, pmb); 4745 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4746 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4747 LPFC_SLI3_HBQ_ENABLED | 4748 LPFC_SLI3_CRP_ENABLED | 4749 LPFC_SLI3_BG_ENABLED | 4750 LPFC_SLI3_DSS_ENABLED); 4751 if (rc != MBX_SUCCESS) { 4752 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4753 "0442 Adapter failed to init, mbxCmd x%x " 4754 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4755 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4756 spin_lock_irq(&phba->hbalock); 4757 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4758 spin_unlock_irq(&phba->hbalock); 4759 rc = -ENXIO; 4760 } else { 4761 /* Allow asynchronous mailbox command to go through */ 4762 spin_lock_irq(&phba->hbalock); 4763 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4764 spin_unlock_irq(&phba->hbalock); 4765 done = 1; 4766 4767 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 4768 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 4769 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4770 "3110 Port did not grant ASABT\n"); 4771 } 4772 } 4773 if (!done) { 4774 rc = -EINVAL; 4775 goto do_prep_failed; 4776 } 4777 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4778 if (!pmb->u.mb.un.varCfgPort.cMA) { 4779 rc = -ENXIO; 4780 goto do_prep_failed; 4781 } 4782 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 4783 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 4784 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 4785 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 4786 phba->max_vpi : phba->max_vports; 4787 4788 } else 4789 phba->max_vpi = 0; 4790 phba->fips_level = 0; 4791 phba->fips_spec_rev = 0; 4792 if (pmb->u.mb.un.varCfgPort.gdss) { 4793 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 4794 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 4795 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 4796 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4797 "2850 Security Crypto Active. FIPS x%d " 4798 "(Spec Rev: x%d)", 4799 phba->fips_level, phba->fips_spec_rev); 4800 } 4801 if (pmb->u.mb.un.varCfgPort.sec_err) { 4802 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4803 "2856 Config Port Security Crypto " 4804 "Error: x%x ", 4805 pmb->u.mb.un.varCfgPort.sec_err); 4806 } 4807 if (pmb->u.mb.un.varCfgPort.gerbm) 4808 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 4809 if (pmb->u.mb.un.varCfgPort.gcrp) 4810 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 4811 4812 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 4813 phba->port_gp = phba->mbox->us.s3_pgp.port; 4814 4815 if (phba->cfg_enable_bg) { 4816 if (pmb->u.mb.un.varCfgPort.gbg) 4817 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 4818 else 4819 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4820 "0443 Adapter did not grant " 4821 "BlockGuard\n"); 4822 } 4823 } else { 4824 phba->hbq_get = NULL; 4825 phba->port_gp = phba->mbox->us.s2.port; 4826 phba->max_vpi = 0; 4827 } 4828 do_prep_failed: 4829 mempool_free(pmb, phba->mbox_mem_pool); 4830 return rc; 4831 } 4832 4833 4834 /** 4835 * lpfc_sli_hba_setup - SLI initialization function 4836 * @phba: Pointer to HBA context object. 4837 * 4838 * This function is the main SLI initialization function. This function 4839 * is called by the HBA initialization code, HBA reset code and HBA 4840 * error attention handler code. Caller is not required to hold any 4841 * locks. This function issues config_port mailbox command to configure 4842 * the SLI, setup iocb rings and HBQ rings. In the end the function 4843 * calls the config_port_post function to issue init_link mailbox 4844 * command and to start the discovery. The function will return zero 4845 * if successful, else it will return negative error code. 4846 **/ 4847 int 4848 lpfc_sli_hba_setup(struct lpfc_hba *phba) 4849 { 4850 uint32_t rc; 4851 int mode = 3, i; 4852 int longs; 4853 4854 switch (phba->cfg_sli_mode) { 4855 case 2: 4856 if (phba->cfg_enable_npiv) { 4857 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4858 "1824 NPIV enabled: Override sli_mode " 4859 "parameter (%d) to auto (0).\n", 4860 phba->cfg_sli_mode); 4861 break; 4862 } 4863 mode = 2; 4864 break; 4865 case 0: 4866 case 3: 4867 break; 4868 default: 4869 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4870 "1819 Unrecognized sli_mode parameter: %d.\n", 4871 phba->cfg_sli_mode); 4872 4873 break; 4874 } 4875 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 4876 4877 rc = lpfc_sli_config_port(phba, mode); 4878 4879 if (rc && phba->cfg_sli_mode == 3) 4880 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4881 "1820 Unable to select SLI-3. " 4882 "Not supported by adapter.\n"); 4883 if (rc && mode != 2) 4884 rc = lpfc_sli_config_port(phba, 2); 4885 else if (rc && mode == 2) 4886 rc = lpfc_sli_config_port(phba, 3); 4887 if (rc) 4888 goto lpfc_sli_hba_setup_error; 4889 4890 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4891 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4892 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4893 if (!rc) { 4894 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4895 "2709 This device supports " 4896 "Advanced Error Reporting (AER)\n"); 4897 spin_lock_irq(&phba->hbalock); 4898 phba->hba_flag |= HBA_AER_ENABLED; 4899 spin_unlock_irq(&phba->hbalock); 4900 } else { 4901 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4902 "2708 This device does not support " 4903 "Advanced Error Reporting (AER): %d\n", 4904 rc); 4905 phba->cfg_aer_support = 0; 4906 } 4907 } 4908 4909 if (phba->sli_rev == 3) { 4910 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4911 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4912 } else { 4913 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 4914 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 4915 phba->sli3_options = 0; 4916 } 4917 4918 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4919 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 4920 phba->sli_rev, phba->max_vpi); 4921 rc = lpfc_sli_ring_map(phba); 4922 4923 if (rc) 4924 goto lpfc_sli_hba_setup_error; 4925 4926 /* Initialize VPIs. */ 4927 if (phba->sli_rev == LPFC_SLI_REV3) { 4928 /* 4929 * The VPI bitmask and physical ID array are allocated 4930 * and initialized once only - at driver load. A port 4931 * reset doesn't need to reinitialize this memory. 4932 */ 4933 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 4934 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 4935 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), 4936 GFP_KERNEL); 4937 if (!phba->vpi_bmask) { 4938 rc = -ENOMEM; 4939 goto lpfc_sli_hba_setup_error; 4940 } 4941 4942 phba->vpi_ids = kzalloc( 4943 (phba->max_vpi+1) * sizeof(uint16_t), 4944 GFP_KERNEL); 4945 if (!phba->vpi_ids) { 4946 kfree(phba->vpi_bmask); 4947 rc = -ENOMEM; 4948 goto lpfc_sli_hba_setup_error; 4949 } 4950 for (i = 0; i < phba->max_vpi; i++) 4951 phba->vpi_ids[i] = i; 4952 } 4953 } 4954 4955 /* Init HBQs */ 4956 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4957 rc = lpfc_sli_hbq_setup(phba); 4958 if (rc) 4959 goto lpfc_sli_hba_setup_error; 4960 } 4961 spin_lock_irq(&phba->hbalock); 4962 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4963 spin_unlock_irq(&phba->hbalock); 4964 4965 rc = lpfc_config_port_post(phba); 4966 if (rc) 4967 goto lpfc_sli_hba_setup_error; 4968 4969 return rc; 4970 4971 lpfc_sli_hba_setup_error: 4972 phba->link_state = LPFC_HBA_ERROR; 4973 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4974 "0445 Firmware initialization failed\n"); 4975 return rc; 4976 } 4977 4978 /** 4979 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 4980 * @phba: Pointer to HBA context object. 4981 * @mboxq: mailbox pointer. 4982 * This function issue a dump mailbox command to read config region 4983 * 23 and parse the records in the region and populate driver 4984 * data structure. 4985 **/ 4986 static int 4987 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 4988 { 4989 LPFC_MBOXQ_t *mboxq; 4990 struct lpfc_dmabuf *mp; 4991 struct lpfc_mqe *mqe; 4992 uint32_t data_length; 4993 int rc; 4994 4995 /* Program the default value of vlan_id and fc_map */ 4996 phba->valid_vlan = 0; 4997 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4998 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4999 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 5000 5001 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5002 if (!mboxq) 5003 return -ENOMEM; 5004 5005 mqe = &mboxq->u.mqe; 5006 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 5007 rc = -ENOMEM; 5008 goto out_free_mboxq; 5009 } 5010 5011 mp = (struct lpfc_dmabuf *) mboxq->context1; 5012 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5013 5014 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 5015 "(%d):2571 Mailbox cmd x%x Status x%x " 5016 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5017 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 5018 "CQ: x%x x%x x%x x%x\n", 5019 mboxq->vport ? mboxq->vport->vpi : 0, 5020 bf_get(lpfc_mqe_command, mqe), 5021 bf_get(lpfc_mqe_status, mqe), 5022 mqe->un.mb_words[0], mqe->un.mb_words[1], 5023 mqe->un.mb_words[2], mqe->un.mb_words[3], 5024 mqe->un.mb_words[4], mqe->un.mb_words[5], 5025 mqe->un.mb_words[6], mqe->un.mb_words[7], 5026 mqe->un.mb_words[8], mqe->un.mb_words[9], 5027 mqe->un.mb_words[10], mqe->un.mb_words[11], 5028 mqe->un.mb_words[12], mqe->un.mb_words[13], 5029 mqe->un.mb_words[14], mqe->un.mb_words[15], 5030 mqe->un.mb_words[16], mqe->un.mb_words[50], 5031 mboxq->mcqe.word0, 5032 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 5033 mboxq->mcqe.trailer); 5034 5035 if (rc) { 5036 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5037 kfree(mp); 5038 rc = -EIO; 5039 goto out_free_mboxq; 5040 } 5041 data_length = mqe->un.mb_words[5]; 5042 if (data_length > DMP_RGN23_SIZE) { 5043 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5044 kfree(mp); 5045 rc = -EIO; 5046 goto out_free_mboxq; 5047 } 5048 5049 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 5050 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5051 kfree(mp); 5052 rc = 0; 5053 5054 out_free_mboxq: 5055 mempool_free(mboxq, phba->mbox_mem_pool); 5056 return rc; 5057 } 5058 5059 /** 5060 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 5061 * @phba: pointer to lpfc hba data structure. 5062 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 5063 * @vpd: pointer to the memory to hold resulting port vpd data. 5064 * @vpd_size: On input, the number of bytes allocated to @vpd. 5065 * On output, the number of data bytes in @vpd. 5066 * 5067 * This routine executes a READ_REV SLI4 mailbox command. In 5068 * addition, this routine gets the port vpd data. 5069 * 5070 * Return codes 5071 * 0 - successful 5072 * -ENOMEM - could not allocated memory. 5073 **/ 5074 static int 5075 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 5076 uint8_t *vpd, uint32_t *vpd_size) 5077 { 5078 int rc = 0; 5079 uint32_t dma_size; 5080 struct lpfc_dmabuf *dmabuf; 5081 struct lpfc_mqe *mqe; 5082 5083 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 5084 if (!dmabuf) 5085 return -ENOMEM; 5086 5087 /* 5088 * Get a DMA buffer for the vpd data resulting from the READ_REV 5089 * mailbox command. 5090 */ 5091 dma_size = *vpd_size; 5092 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size, 5093 &dmabuf->phys, GFP_KERNEL); 5094 if (!dmabuf->virt) { 5095 kfree(dmabuf); 5096 return -ENOMEM; 5097 } 5098 5099 /* 5100 * The SLI4 implementation of READ_REV conflicts at word1, 5101 * bits 31:16 and SLI4 adds vpd functionality not present 5102 * in SLI3. This code corrects the conflicts. 5103 */ 5104 lpfc_read_rev(phba, mboxq); 5105 mqe = &mboxq->u.mqe; 5106 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 5107 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 5108 mqe->un.read_rev.word1 &= 0x0000FFFF; 5109 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 5110 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 5111 5112 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5113 if (rc) { 5114 dma_free_coherent(&phba->pcidev->dev, dma_size, 5115 dmabuf->virt, dmabuf->phys); 5116 kfree(dmabuf); 5117 return -EIO; 5118 } 5119 5120 /* 5121 * The available vpd length cannot be bigger than the 5122 * DMA buffer passed to the port. Catch the less than 5123 * case and update the caller's size. 5124 */ 5125 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 5126 *vpd_size = mqe->un.read_rev.avail_vpd_len; 5127 5128 memcpy(vpd, dmabuf->virt, *vpd_size); 5129 5130 dma_free_coherent(&phba->pcidev->dev, dma_size, 5131 dmabuf->virt, dmabuf->phys); 5132 kfree(dmabuf); 5133 return 0; 5134 } 5135 5136 /** 5137 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 5138 * @phba: pointer to lpfc hba data structure. 5139 * 5140 * This routine retrieves SLI4 device physical port name this PCI function 5141 * is attached to. 5142 * 5143 * Return codes 5144 * 0 - successful 5145 * otherwise - failed to retrieve physical port name 5146 **/ 5147 static int 5148 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 5149 { 5150 LPFC_MBOXQ_t *mboxq; 5151 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 5152 struct lpfc_controller_attribute *cntl_attr; 5153 struct lpfc_mbx_get_port_name *get_port_name; 5154 void *virtaddr = NULL; 5155 uint32_t alloclen, reqlen; 5156 uint32_t shdr_status, shdr_add_status; 5157 union lpfc_sli4_cfg_shdr *shdr; 5158 char cport_name = 0; 5159 int rc; 5160 5161 /* We assume nothing at this point */ 5162 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5163 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 5164 5165 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5166 if (!mboxq) 5167 return -ENOMEM; 5168 /* obtain link type and link number via READ_CONFIG */ 5169 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 5170 lpfc_sli4_read_config(phba); 5171 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 5172 goto retrieve_ppname; 5173 5174 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 5175 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 5176 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5177 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 5178 LPFC_SLI4_MBX_NEMBED); 5179 if (alloclen < reqlen) { 5180 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5181 "3084 Allocated DMA memory size (%d) is " 5182 "less than the requested DMA memory size " 5183 "(%d)\n", alloclen, reqlen); 5184 rc = -ENOMEM; 5185 goto out_free_mboxq; 5186 } 5187 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5188 virtaddr = mboxq->sge_array->addr[0]; 5189 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 5190 shdr = &mbx_cntl_attr->cfg_shdr; 5191 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5192 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5193 if (shdr_status || shdr_add_status || rc) { 5194 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5195 "3085 Mailbox x%x (x%x/x%x) failed, " 5196 "rc:x%x, status:x%x, add_status:x%x\n", 5197 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5198 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5199 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5200 rc, shdr_status, shdr_add_status); 5201 rc = -ENXIO; 5202 goto out_free_mboxq; 5203 } 5204 cntl_attr = &mbx_cntl_attr->cntl_attr; 5205 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 5206 phba->sli4_hba.lnk_info.lnk_tp = 5207 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 5208 phba->sli4_hba.lnk_info.lnk_no = 5209 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 5210 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5211 "3086 lnk_type:%d, lnk_numb:%d\n", 5212 phba->sli4_hba.lnk_info.lnk_tp, 5213 phba->sli4_hba.lnk_info.lnk_no); 5214 5215 retrieve_ppname: 5216 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 5217 LPFC_MBOX_OPCODE_GET_PORT_NAME, 5218 sizeof(struct lpfc_mbx_get_port_name) - 5219 sizeof(struct lpfc_sli4_cfg_mhdr), 5220 LPFC_SLI4_MBX_EMBED); 5221 get_port_name = &mboxq->u.mqe.un.get_port_name; 5222 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 5223 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 5224 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 5225 phba->sli4_hba.lnk_info.lnk_tp); 5226 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5227 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5228 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5229 if (shdr_status || shdr_add_status || rc) { 5230 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5231 "3087 Mailbox x%x (x%x/x%x) failed: " 5232 "rc:x%x, status:x%x, add_status:x%x\n", 5233 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5234 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5235 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5236 rc, shdr_status, shdr_add_status); 5237 rc = -ENXIO; 5238 goto out_free_mboxq; 5239 } 5240 switch (phba->sli4_hba.lnk_info.lnk_no) { 5241 case LPFC_LINK_NUMBER_0: 5242 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 5243 &get_port_name->u.response); 5244 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5245 break; 5246 case LPFC_LINK_NUMBER_1: 5247 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 5248 &get_port_name->u.response); 5249 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5250 break; 5251 case LPFC_LINK_NUMBER_2: 5252 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 5253 &get_port_name->u.response); 5254 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5255 break; 5256 case LPFC_LINK_NUMBER_3: 5257 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 5258 &get_port_name->u.response); 5259 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5260 break; 5261 default: 5262 break; 5263 } 5264 5265 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 5266 phba->Port[0] = cport_name; 5267 phba->Port[1] = '\0'; 5268 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5269 "3091 SLI get port name: %s\n", phba->Port); 5270 } 5271 5272 out_free_mboxq: 5273 if (rc != MBX_TIMEOUT) { 5274 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5275 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5276 else 5277 mempool_free(mboxq, phba->mbox_mem_pool); 5278 } 5279 return rc; 5280 } 5281 5282 /** 5283 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 5284 * @phba: pointer to lpfc hba data structure. 5285 * 5286 * This routine is called to explicitly arm the SLI4 device's completion and 5287 * event queues 5288 **/ 5289 static void 5290 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 5291 { 5292 int qidx; 5293 5294 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 5295 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 5296 if (phba->sli4_hba.nvmels_cq) 5297 lpfc_sli4_cq_release(phba->sli4_hba.nvmels_cq, 5298 LPFC_QUEUE_REARM); 5299 5300 if (phba->sli4_hba.fcp_cq) 5301 for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) 5302 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[qidx], 5303 LPFC_QUEUE_REARM); 5304 5305 if (phba->sli4_hba.nvme_cq) 5306 for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) 5307 lpfc_sli4_cq_release(phba->sli4_hba.nvme_cq[qidx], 5308 LPFC_QUEUE_REARM); 5309 5310 if (phba->cfg_fof) 5311 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM); 5312 5313 if (phba->sli4_hba.hba_eq) 5314 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) 5315 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[qidx], 5316 LPFC_QUEUE_REARM); 5317 5318 if (phba->nvmet_support) { 5319 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) { 5320 lpfc_sli4_cq_release( 5321 phba->sli4_hba.nvmet_cqset[qidx], 5322 LPFC_QUEUE_REARM); 5323 } 5324 } 5325 5326 if (phba->cfg_fof) 5327 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM); 5328 } 5329 5330 /** 5331 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 5332 * @phba: Pointer to HBA context object. 5333 * @type: The resource extent type. 5334 * @extnt_count: buffer to hold port available extent count. 5335 * @extnt_size: buffer to hold element count per extent. 5336 * 5337 * This function calls the port and retrievs the number of available 5338 * extents and their size for a particular extent type. 5339 * 5340 * Returns: 0 if successful. Nonzero otherwise. 5341 **/ 5342 int 5343 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 5344 uint16_t *extnt_count, uint16_t *extnt_size) 5345 { 5346 int rc = 0; 5347 uint32_t length; 5348 uint32_t mbox_tmo; 5349 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 5350 LPFC_MBOXQ_t *mbox; 5351 5352 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5353 if (!mbox) 5354 return -ENOMEM; 5355 5356 /* Find out how many extents are available for this resource type */ 5357 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 5358 sizeof(struct lpfc_sli4_cfg_mhdr)); 5359 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5360 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 5361 length, LPFC_SLI4_MBX_EMBED); 5362 5363 /* Send an extents count of 0 - the GET doesn't use it. */ 5364 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5365 LPFC_SLI4_MBX_EMBED); 5366 if (unlikely(rc)) { 5367 rc = -EIO; 5368 goto err_exit; 5369 } 5370 5371 if (!phba->sli4_hba.intr_enable) 5372 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5373 else { 5374 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5375 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5376 } 5377 if (unlikely(rc)) { 5378 rc = -EIO; 5379 goto err_exit; 5380 } 5381 5382 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5383 if (bf_get(lpfc_mbox_hdr_status, 5384 &rsrc_info->header.cfg_shdr.response)) { 5385 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5386 "2930 Failed to get resource extents " 5387 "Status 0x%x Add'l Status 0x%x\n", 5388 bf_get(lpfc_mbox_hdr_status, 5389 &rsrc_info->header.cfg_shdr.response), 5390 bf_get(lpfc_mbox_hdr_add_status, 5391 &rsrc_info->header.cfg_shdr.response)); 5392 rc = -EIO; 5393 goto err_exit; 5394 } 5395 5396 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 5397 &rsrc_info->u.rsp); 5398 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 5399 &rsrc_info->u.rsp); 5400 5401 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5402 "3162 Retrieved extents type-%d from port: count:%d, " 5403 "size:%d\n", type, *extnt_count, *extnt_size); 5404 5405 err_exit: 5406 mempool_free(mbox, phba->mbox_mem_pool); 5407 return rc; 5408 } 5409 5410 /** 5411 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 5412 * @phba: Pointer to HBA context object. 5413 * @type: The extent type to check. 5414 * 5415 * This function reads the current available extents from the port and checks 5416 * if the extent count or extent size has changed since the last access. 5417 * Callers use this routine post port reset to understand if there is a 5418 * extent reprovisioning requirement. 5419 * 5420 * Returns: 5421 * -Error: error indicates problem. 5422 * 1: Extent count or size has changed. 5423 * 0: No changes. 5424 **/ 5425 static int 5426 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5427 { 5428 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5429 uint16_t size_diff, rsrc_ext_size; 5430 int rc = 0; 5431 struct lpfc_rsrc_blks *rsrc_entry; 5432 struct list_head *rsrc_blk_list = NULL; 5433 5434 size_diff = 0; 5435 curr_ext_cnt = 0; 5436 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5437 &rsrc_ext_cnt, 5438 &rsrc_ext_size); 5439 if (unlikely(rc)) 5440 return -EIO; 5441 5442 switch (type) { 5443 case LPFC_RSC_TYPE_FCOE_RPI: 5444 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5445 break; 5446 case LPFC_RSC_TYPE_FCOE_VPI: 5447 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5448 break; 5449 case LPFC_RSC_TYPE_FCOE_XRI: 5450 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5451 break; 5452 case LPFC_RSC_TYPE_FCOE_VFI: 5453 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5454 break; 5455 default: 5456 break; 5457 } 5458 5459 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5460 curr_ext_cnt++; 5461 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5462 size_diff++; 5463 } 5464 5465 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5466 rc = 1; 5467 5468 return rc; 5469 } 5470 5471 /** 5472 * lpfc_sli4_cfg_post_extnts - 5473 * @phba: Pointer to HBA context object. 5474 * @extnt_cnt - number of available extents. 5475 * @type - the extent type (rpi, xri, vfi, vpi). 5476 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5477 * @mbox - pointer to the caller's allocated mailbox structure. 5478 * 5479 * This function executes the extents allocation request. It also 5480 * takes care of the amount of memory needed to allocate or get the 5481 * allocated extents. It is the caller's responsibility to evaluate 5482 * the response. 5483 * 5484 * Returns: 5485 * -Error: Error value describes the condition found. 5486 * 0: if successful 5487 **/ 5488 static int 5489 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5490 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5491 { 5492 int rc = 0; 5493 uint32_t req_len; 5494 uint32_t emb_len; 5495 uint32_t alloc_len, mbox_tmo; 5496 5497 /* Calculate the total requested length of the dma memory */ 5498 req_len = extnt_cnt * sizeof(uint16_t); 5499 5500 /* 5501 * Calculate the size of an embedded mailbox. The uint32_t 5502 * accounts for extents-specific word. 5503 */ 5504 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5505 sizeof(uint32_t); 5506 5507 /* 5508 * Presume the allocation and response will fit into an embedded 5509 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5510 */ 5511 *emb = LPFC_SLI4_MBX_EMBED; 5512 if (req_len > emb_len) { 5513 req_len = extnt_cnt * sizeof(uint16_t) + 5514 sizeof(union lpfc_sli4_cfg_shdr) + 5515 sizeof(uint32_t); 5516 *emb = LPFC_SLI4_MBX_NEMBED; 5517 } 5518 5519 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5520 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5521 req_len, *emb); 5522 if (alloc_len < req_len) { 5523 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5524 "2982 Allocated DMA memory size (x%x) is " 5525 "less than the requested DMA memory " 5526 "size (x%x)\n", alloc_len, req_len); 5527 return -ENOMEM; 5528 } 5529 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5530 if (unlikely(rc)) 5531 return -EIO; 5532 5533 if (!phba->sli4_hba.intr_enable) 5534 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5535 else { 5536 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5537 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5538 } 5539 5540 if (unlikely(rc)) 5541 rc = -EIO; 5542 return rc; 5543 } 5544 5545 /** 5546 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5547 * @phba: Pointer to HBA context object. 5548 * @type: The resource extent type to allocate. 5549 * 5550 * This function allocates the number of elements for the specified 5551 * resource type. 5552 **/ 5553 static int 5554 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5555 { 5556 bool emb = false; 5557 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5558 uint16_t rsrc_id, rsrc_start, j, k; 5559 uint16_t *ids; 5560 int i, rc; 5561 unsigned long longs; 5562 unsigned long *bmask; 5563 struct lpfc_rsrc_blks *rsrc_blks; 5564 LPFC_MBOXQ_t *mbox; 5565 uint32_t length; 5566 struct lpfc_id_range *id_array = NULL; 5567 void *virtaddr = NULL; 5568 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5569 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5570 struct list_head *ext_blk_list; 5571 5572 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5573 &rsrc_cnt, 5574 &rsrc_size); 5575 if (unlikely(rc)) 5576 return -EIO; 5577 5578 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5579 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5580 "3009 No available Resource Extents " 5581 "for resource type 0x%x: Count: 0x%x, " 5582 "Size 0x%x\n", type, rsrc_cnt, 5583 rsrc_size); 5584 return -ENOMEM; 5585 } 5586 5587 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5588 "2903 Post resource extents type-0x%x: " 5589 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5590 5591 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5592 if (!mbox) 5593 return -ENOMEM; 5594 5595 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5596 if (unlikely(rc)) { 5597 rc = -EIO; 5598 goto err_exit; 5599 } 5600 5601 /* 5602 * Figure out where the response is located. Then get local pointers 5603 * to the response data. The port does not guarantee to respond to 5604 * all extents counts request so update the local variable with the 5605 * allocated count from the port. 5606 */ 5607 if (emb == LPFC_SLI4_MBX_EMBED) { 5608 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5609 id_array = &rsrc_ext->u.rsp.id[0]; 5610 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5611 } else { 5612 virtaddr = mbox->sge_array->addr[0]; 5613 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5614 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5615 id_array = &n_rsrc->id; 5616 } 5617 5618 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5619 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5620 5621 /* 5622 * Based on the resource size and count, correct the base and max 5623 * resource values. 5624 */ 5625 length = sizeof(struct lpfc_rsrc_blks); 5626 switch (type) { 5627 case LPFC_RSC_TYPE_FCOE_RPI: 5628 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5629 sizeof(unsigned long), 5630 GFP_KERNEL); 5631 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5632 rc = -ENOMEM; 5633 goto err_exit; 5634 } 5635 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt * 5636 sizeof(uint16_t), 5637 GFP_KERNEL); 5638 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5639 kfree(phba->sli4_hba.rpi_bmask); 5640 rc = -ENOMEM; 5641 goto err_exit; 5642 } 5643 5644 /* 5645 * The next_rpi was initialized with the maximum available 5646 * count but the port may allocate a smaller number. Catch 5647 * that case and update the next_rpi. 5648 */ 5649 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5650 5651 /* Initialize local ptrs for common extent processing later. */ 5652 bmask = phba->sli4_hba.rpi_bmask; 5653 ids = phba->sli4_hba.rpi_ids; 5654 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5655 break; 5656 case LPFC_RSC_TYPE_FCOE_VPI: 5657 phba->vpi_bmask = kzalloc(longs * 5658 sizeof(unsigned long), 5659 GFP_KERNEL); 5660 if (unlikely(!phba->vpi_bmask)) { 5661 rc = -ENOMEM; 5662 goto err_exit; 5663 } 5664 phba->vpi_ids = kzalloc(rsrc_id_cnt * 5665 sizeof(uint16_t), 5666 GFP_KERNEL); 5667 if (unlikely(!phba->vpi_ids)) { 5668 kfree(phba->vpi_bmask); 5669 rc = -ENOMEM; 5670 goto err_exit; 5671 } 5672 5673 /* Initialize local ptrs for common extent processing later. */ 5674 bmask = phba->vpi_bmask; 5675 ids = phba->vpi_ids; 5676 ext_blk_list = &phba->lpfc_vpi_blk_list; 5677 break; 5678 case LPFC_RSC_TYPE_FCOE_XRI: 5679 phba->sli4_hba.xri_bmask = kzalloc(longs * 5680 sizeof(unsigned long), 5681 GFP_KERNEL); 5682 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5683 rc = -ENOMEM; 5684 goto err_exit; 5685 } 5686 phba->sli4_hba.max_cfg_param.xri_used = 0; 5687 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * 5688 sizeof(uint16_t), 5689 GFP_KERNEL); 5690 if (unlikely(!phba->sli4_hba.xri_ids)) { 5691 kfree(phba->sli4_hba.xri_bmask); 5692 rc = -ENOMEM; 5693 goto err_exit; 5694 } 5695 5696 /* Initialize local ptrs for common extent processing later. */ 5697 bmask = phba->sli4_hba.xri_bmask; 5698 ids = phba->sli4_hba.xri_ids; 5699 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5700 break; 5701 case LPFC_RSC_TYPE_FCOE_VFI: 5702 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5703 sizeof(unsigned long), 5704 GFP_KERNEL); 5705 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5706 rc = -ENOMEM; 5707 goto err_exit; 5708 } 5709 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt * 5710 sizeof(uint16_t), 5711 GFP_KERNEL); 5712 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5713 kfree(phba->sli4_hba.vfi_bmask); 5714 rc = -ENOMEM; 5715 goto err_exit; 5716 } 5717 5718 /* Initialize local ptrs for common extent processing later. */ 5719 bmask = phba->sli4_hba.vfi_bmask; 5720 ids = phba->sli4_hba.vfi_ids; 5721 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5722 break; 5723 default: 5724 /* Unsupported Opcode. Fail call. */ 5725 id_array = NULL; 5726 bmask = NULL; 5727 ids = NULL; 5728 ext_blk_list = NULL; 5729 goto err_exit; 5730 } 5731 5732 /* 5733 * Complete initializing the extent configuration with the 5734 * allocated ids assigned to this function. The bitmask serves 5735 * as an index into the array and manages the available ids. The 5736 * array just stores the ids communicated to the port via the wqes. 5737 */ 5738 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5739 if ((i % 2) == 0) 5740 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5741 &id_array[k]); 5742 else 5743 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5744 &id_array[k]); 5745 5746 rsrc_blks = kzalloc(length, GFP_KERNEL); 5747 if (unlikely(!rsrc_blks)) { 5748 rc = -ENOMEM; 5749 kfree(bmask); 5750 kfree(ids); 5751 goto err_exit; 5752 } 5753 rsrc_blks->rsrc_start = rsrc_id; 5754 rsrc_blks->rsrc_size = rsrc_size; 5755 list_add_tail(&rsrc_blks->list, ext_blk_list); 5756 rsrc_start = rsrc_id; 5757 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) { 5758 phba->sli4_hba.scsi_xri_start = rsrc_start + 5759 lpfc_sli4_get_iocb_cnt(phba); 5760 phba->sli4_hba.nvme_xri_start = 5761 phba->sli4_hba.scsi_xri_start + 5762 phba->sli4_hba.scsi_xri_max; 5763 } 5764 5765 while (rsrc_id < (rsrc_start + rsrc_size)) { 5766 ids[j] = rsrc_id; 5767 rsrc_id++; 5768 j++; 5769 } 5770 /* Entire word processed. Get next word.*/ 5771 if ((i % 2) == 1) 5772 k++; 5773 } 5774 err_exit: 5775 lpfc_sli4_mbox_cmd_free(phba, mbox); 5776 return rc; 5777 } 5778 5779 5780 5781 /** 5782 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 5783 * @phba: Pointer to HBA context object. 5784 * @type: the extent's type. 5785 * 5786 * This function deallocates all extents of a particular resource type. 5787 * SLI4 does not allow for deallocating a particular extent range. It 5788 * is the caller's responsibility to release all kernel memory resources. 5789 **/ 5790 static int 5791 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 5792 { 5793 int rc; 5794 uint32_t length, mbox_tmo = 0; 5795 LPFC_MBOXQ_t *mbox; 5796 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 5797 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 5798 5799 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5800 if (!mbox) 5801 return -ENOMEM; 5802 5803 /* 5804 * This function sends an embedded mailbox because it only sends the 5805 * the resource type. All extents of this type are released by the 5806 * port. 5807 */ 5808 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 5809 sizeof(struct lpfc_sli4_cfg_mhdr)); 5810 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5811 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 5812 length, LPFC_SLI4_MBX_EMBED); 5813 5814 /* Send an extents count of 0 - the dealloc doesn't use it. */ 5815 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5816 LPFC_SLI4_MBX_EMBED); 5817 if (unlikely(rc)) { 5818 rc = -EIO; 5819 goto out_free_mbox; 5820 } 5821 if (!phba->sli4_hba.intr_enable) 5822 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5823 else { 5824 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5825 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5826 } 5827 if (unlikely(rc)) { 5828 rc = -EIO; 5829 goto out_free_mbox; 5830 } 5831 5832 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 5833 if (bf_get(lpfc_mbox_hdr_status, 5834 &dealloc_rsrc->header.cfg_shdr.response)) { 5835 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5836 "2919 Failed to release resource extents " 5837 "for type %d - Status 0x%x Add'l Status 0x%x. " 5838 "Resource memory not released.\n", 5839 type, 5840 bf_get(lpfc_mbox_hdr_status, 5841 &dealloc_rsrc->header.cfg_shdr.response), 5842 bf_get(lpfc_mbox_hdr_add_status, 5843 &dealloc_rsrc->header.cfg_shdr.response)); 5844 rc = -EIO; 5845 goto out_free_mbox; 5846 } 5847 5848 /* Release kernel memory resources for the specific type. */ 5849 switch (type) { 5850 case LPFC_RSC_TYPE_FCOE_VPI: 5851 kfree(phba->vpi_bmask); 5852 kfree(phba->vpi_ids); 5853 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5854 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5855 &phba->lpfc_vpi_blk_list, list) { 5856 list_del_init(&rsrc_blk->list); 5857 kfree(rsrc_blk); 5858 } 5859 phba->sli4_hba.max_cfg_param.vpi_used = 0; 5860 break; 5861 case LPFC_RSC_TYPE_FCOE_XRI: 5862 kfree(phba->sli4_hba.xri_bmask); 5863 kfree(phba->sli4_hba.xri_ids); 5864 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5865 &phba->sli4_hba.lpfc_xri_blk_list, list) { 5866 list_del_init(&rsrc_blk->list); 5867 kfree(rsrc_blk); 5868 } 5869 break; 5870 case LPFC_RSC_TYPE_FCOE_VFI: 5871 kfree(phba->sli4_hba.vfi_bmask); 5872 kfree(phba->sli4_hba.vfi_ids); 5873 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5874 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5875 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 5876 list_del_init(&rsrc_blk->list); 5877 kfree(rsrc_blk); 5878 } 5879 break; 5880 case LPFC_RSC_TYPE_FCOE_RPI: 5881 /* RPI bitmask and physical id array are cleaned up earlier. */ 5882 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5883 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 5884 list_del_init(&rsrc_blk->list); 5885 kfree(rsrc_blk); 5886 } 5887 break; 5888 default: 5889 break; 5890 } 5891 5892 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5893 5894 out_free_mbox: 5895 mempool_free(mbox, phba->mbox_mem_pool); 5896 return rc; 5897 } 5898 5899 static void 5900 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, 5901 uint32_t feature) 5902 { 5903 uint32_t len; 5904 5905 len = sizeof(struct lpfc_mbx_set_feature) - 5906 sizeof(struct lpfc_sli4_cfg_mhdr); 5907 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5908 LPFC_MBOX_OPCODE_SET_FEATURES, len, 5909 LPFC_SLI4_MBX_EMBED); 5910 5911 switch (feature) { 5912 case LPFC_SET_UE_RECOVERY: 5913 bf_set(lpfc_mbx_set_feature_UER, 5914 &mbox->u.mqe.un.set_feature, 1); 5915 mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY; 5916 mbox->u.mqe.un.set_feature.param_len = 8; 5917 break; 5918 case LPFC_SET_MDS_DIAGS: 5919 bf_set(lpfc_mbx_set_feature_mds, 5920 &mbox->u.mqe.un.set_feature, 1); 5921 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk, 5922 &mbox->u.mqe.un.set_feature, 1); 5923 mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; 5924 mbox->u.mqe.un.set_feature.param_len = 8; 5925 break; 5926 } 5927 5928 return; 5929 } 5930 5931 /** 5932 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 5933 * @phba: Pointer to HBA context object. 5934 * 5935 * This function allocates all SLI4 resource identifiers. 5936 **/ 5937 int 5938 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 5939 { 5940 int i, rc, error = 0; 5941 uint16_t count, base; 5942 unsigned long longs; 5943 5944 if (!phba->sli4_hba.rpi_hdrs_in_use) 5945 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 5946 if (phba->sli4_hba.extents_in_use) { 5947 /* 5948 * The port supports resource extents. The XRI, VPI, VFI, RPI 5949 * resource extent count must be read and allocated before 5950 * provisioning the resource id arrays. 5951 */ 5952 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5953 LPFC_IDX_RSRC_RDY) { 5954 /* 5955 * Extent-based resources are set - the driver could 5956 * be in a port reset. Figure out if any corrective 5957 * actions need to be taken. 5958 */ 5959 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5960 LPFC_RSC_TYPE_FCOE_VFI); 5961 if (rc != 0) 5962 error++; 5963 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5964 LPFC_RSC_TYPE_FCOE_VPI); 5965 if (rc != 0) 5966 error++; 5967 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5968 LPFC_RSC_TYPE_FCOE_XRI); 5969 if (rc != 0) 5970 error++; 5971 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5972 LPFC_RSC_TYPE_FCOE_RPI); 5973 if (rc != 0) 5974 error++; 5975 5976 /* 5977 * It's possible that the number of resources 5978 * provided to this port instance changed between 5979 * resets. Detect this condition and reallocate 5980 * resources. Otherwise, there is no action. 5981 */ 5982 if (error) { 5983 lpfc_printf_log(phba, KERN_INFO, 5984 LOG_MBOX | LOG_INIT, 5985 "2931 Detected extent resource " 5986 "change. Reallocating all " 5987 "extents.\n"); 5988 rc = lpfc_sli4_dealloc_extent(phba, 5989 LPFC_RSC_TYPE_FCOE_VFI); 5990 rc = lpfc_sli4_dealloc_extent(phba, 5991 LPFC_RSC_TYPE_FCOE_VPI); 5992 rc = lpfc_sli4_dealloc_extent(phba, 5993 LPFC_RSC_TYPE_FCOE_XRI); 5994 rc = lpfc_sli4_dealloc_extent(phba, 5995 LPFC_RSC_TYPE_FCOE_RPI); 5996 } else 5997 return 0; 5998 } 5999 6000 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6001 if (unlikely(rc)) 6002 goto err_exit; 6003 6004 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6005 if (unlikely(rc)) 6006 goto err_exit; 6007 6008 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6009 if (unlikely(rc)) 6010 goto err_exit; 6011 6012 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6013 if (unlikely(rc)) 6014 goto err_exit; 6015 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6016 LPFC_IDX_RSRC_RDY); 6017 return rc; 6018 } else { 6019 /* 6020 * The port does not support resource extents. The XRI, VPI, 6021 * VFI, RPI resource ids were determined from READ_CONFIG. 6022 * Just allocate the bitmasks and provision the resource id 6023 * arrays. If a port reset is active, the resources don't 6024 * need any action - just exit. 6025 */ 6026 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 6027 LPFC_IDX_RSRC_RDY) { 6028 lpfc_sli4_dealloc_resource_identifiers(phba); 6029 lpfc_sli4_remove_rpis(phba); 6030 } 6031 /* RPIs. */ 6032 count = phba->sli4_hba.max_cfg_param.max_rpi; 6033 if (count <= 0) { 6034 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6035 "3279 Invalid provisioning of " 6036 "rpi:%d\n", count); 6037 rc = -EINVAL; 6038 goto err_exit; 6039 } 6040 base = phba->sli4_hba.max_cfg_param.rpi_base; 6041 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6042 phba->sli4_hba.rpi_bmask = kzalloc(longs * 6043 sizeof(unsigned long), 6044 GFP_KERNEL); 6045 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 6046 rc = -ENOMEM; 6047 goto err_exit; 6048 } 6049 phba->sli4_hba.rpi_ids = kzalloc(count * 6050 sizeof(uint16_t), 6051 GFP_KERNEL); 6052 if (unlikely(!phba->sli4_hba.rpi_ids)) { 6053 rc = -ENOMEM; 6054 goto free_rpi_bmask; 6055 } 6056 6057 for (i = 0; i < count; i++) 6058 phba->sli4_hba.rpi_ids[i] = base + i; 6059 6060 /* VPIs. */ 6061 count = phba->sli4_hba.max_cfg_param.max_vpi; 6062 if (count <= 0) { 6063 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6064 "3280 Invalid provisioning of " 6065 "vpi:%d\n", count); 6066 rc = -EINVAL; 6067 goto free_rpi_ids; 6068 } 6069 base = phba->sli4_hba.max_cfg_param.vpi_base; 6070 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6071 phba->vpi_bmask = kzalloc(longs * 6072 sizeof(unsigned long), 6073 GFP_KERNEL); 6074 if (unlikely(!phba->vpi_bmask)) { 6075 rc = -ENOMEM; 6076 goto free_rpi_ids; 6077 } 6078 phba->vpi_ids = kzalloc(count * 6079 sizeof(uint16_t), 6080 GFP_KERNEL); 6081 if (unlikely(!phba->vpi_ids)) { 6082 rc = -ENOMEM; 6083 goto free_vpi_bmask; 6084 } 6085 6086 for (i = 0; i < count; i++) 6087 phba->vpi_ids[i] = base + i; 6088 6089 /* XRIs. */ 6090 count = phba->sli4_hba.max_cfg_param.max_xri; 6091 if (count <= 0) { 6092 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6093 "3281 Invalid provisioning of " 6094 "xri:%d\n", count); 6095 rc = -EINVAL; 6096 goto free_vpi_ids; 6097 } 6098 base = phba->sli4_hba.max_cfg_param.xri_base; 6099 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6100 phba->sli4_hba.xri_bmask = kzalloc(longs * 6101 sizeof(unsigned long), 6102 GFP_KERNEL); 6103 if (unlikely(!phba->sli4_hba.xri_bmask)) { 6104 rc = -ENOMEM; 6105 goto free_vpi_ids; 6106 } 6107 phba->sli4_hba.max_cfg_param.xri_used = 0; 6108 phba->sli4_hba.xri_ids = kzalloc(count * 6109 sizeof(uint16_t), 6110 GFP_KERNEL); 6111 if (unlikely(!phba->sli4_hba.xri_ids)) { 6112 rc = -ENOMEM; 6113 goto free_xri_bmask; 6114 } 6115 6116 for (i = 0; i < count; i++) 6117 phba->sli4_hba.xri_ids[i] = base + i; 6118 6119 /* VFIs. */ 6120 count = phba->sli4_hba.max_cfg_param.max_vfi; 6121 if (count <= 0) { 6122 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6123 "3282 Invalid provisioning of " 6124 "vfi:%d\n", count); 6125 rc = -EINVAL; 6126 goto free_xri_ids; 6127 } 6128 base = phba->sli4_hba.max_cfg_param.vfi_base; 6129 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 6130 phba->sli4_hba.vfi_bmask = kzalloc(longs * 6131 sizeof(unsigned long), 6132 GFP_KERNEL); 6133 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 6134 rc = -ENOMEM; 6135 goto free_xri_ids; 6136 } 6137 phba->sli4_hba.vfi_ids = kzalloc(count * 6138 sizeof(uint16_t), 6139 GFP_KERNEL); 6140 if (unlikely(!phba->sli4_hba.vfi_ids)) { 6141 rc = -ENOMEM; 6142 goto free_vfi_bmask; 6143 } 6144 6145 for (i = 0; i < count; i++) 6146 phba->sli4_hba.vfi_ids[i] = base + i; 6147 6148 /* 6149 * Mark all resources ready. An HBA reset doesn't need 6150 * to reset the initialization. 6151 */ 6152 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 6153 LPFC_IDX_RSRC_RDY); 6154 return 0; 6155 } 6156 6157 free_vfi_bmask: 6158 kfree(phba->sli4_hba.vfi_bmask); 6159 phba->sli4_hba.vfi_bmask = NULL; 6160 free_xri_ids: 6161 kfree(phba->sli4_hba.xri_ids); 6162 phba->sli4_hba.xri_ids = NULL; 6163 free_xri_bmask: 6164 kfree(phba->sli4_hba.xri_bmask); 6165 phba->sli4_hba.xri_bmask = NULL; 6166 free_vpi_ids: 6167 kfree(phba->vpi_ids); 6168 phba->vpi_ids = NULL; 6169 free_vpi_bmask: 6170 kfree(phba->vpi_bmask); 6171 phba->vpi_bmask = NULL; 6172 free_rpi_ids: 6173 kfree(phba->sli4_hba.rpi_ids); 6174 phba->sli4_hba.rpi_ids = NULL; 6175 free_rpi_bmask: 6176 kfree(phba->sli4_hba.rpi_bmask); 6177 phba->sli4_hba.rpi_bmask = NULL; 6178 err_exit: 6179 return rc; 6180 } 6181 6182 /** 6183 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 6184 * @phba: Pointer to HBA context object. 6185 * 6186 * This function allocates the number of elements for the specified 6187 * resource type. 6188 **/ 6189 int 6190 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 6191 { 6192 if (phba->sli4_hba.extents_in_use) { 6193 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 6194 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 6195 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 6196 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 6197 } else { 6198 kfree(phba->vpi_bmask); 6199 phba->sli4_hba.max_cfg_param.vpi_used = 0; 6200 kfree(phba->vpi_ids); 6201 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6202 kfree(phba->sli4_hba.xri_bmask); 6203 kfree(phba->sli4_hba.xri_ids); 6204 kfree(phba->sli4_hba.vfi_bmask); 6205 kfree(phba->sli4_hba.vfi_ids); 6206 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6207 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 6208 } 6209 6210 return 0; 6211 } 6212 6213 /** 6214 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 6215 * @phba: Pointer to HBA context object. 6216 * @type: The resource extent type. 6217 * @extnt_count: buffer to hold port extent count response 6218 * @extnt_size: buffer to hold port extent size response. 6219 * 6220 * This function calls the port to read the host allocated extents 6221 * for a particular type. 6222 **/ 6223 int 6224 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 6225 uint16_t *extnt_cnt, uint16_t *extnt_size) 6226 { 6227 bool emb; 6228 int rc = 0; 6229 uint16_t curr_blks = 0; 6230 uint32_t req_len, emb_len; 6231 uint32_t alloc_len, mbox_tmo; 6232 struct list_head *blk_list_head; 6233 struct lpfc_rsrc_blks *rsrc_blk; 6234 LPFC_MBOXQ_t *mbox; 6235 void *virtaddr = NULL; 6236 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 6237 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 6238 union lpfc_sli4_cfg_shdr *shdr; 6239 6240 switch (type) { 6241 case LPFC_RSC_TYPE_FCOE_VPI: 6242 blk_list_head = &phba->lpfc_vpi_blk_list; 6243 break; 6244 case LPFC_RSC_TYPE_FCOE_XRI: 6245 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 6246 break; 6247 case LPFC_RSC_TYPE_FCOE_VFI: 6248 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 6249 break; 6250 case LPFC_RSC_TYPE_FCOE_RPI: 6251 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 6252 break; 6253 default: 6254 return -EIO; 6255 } 6256 6257 /* Count the number of extents currently allocatd for this type. */ 6258 list_for_each_entry(rsrc_blk, blk_list_head, list) { 6259 if (curr_blks == 0) { 6260 /* 6261 * The GET_ALLOCATED mailbox does not return the size, 6262 * just the count. The size should be just the size 6263 * stored in the current allocated block and all sizes 6264 * for an extent type are the same so set the return 6265 * value now. 6266 */ 6267 *extnt_size = rsrc_blk->rsrc_size; 6268 } 6269 curr_blks++; 6270 } 6271 6272 /* 6273 * Calculate the size of an embedded mailbox. The uint32_t 6274 * accounts for extents-specific word. 6275 */ 6276 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 6277 sizeof(uint32_t); 6278 6279 /* 6280 * Presume the allocation and response will fit into an embedded 6281 * mailbox. If not true, reconfigure to a non-embedded mailbox. 6282 */ 6283 emb = LPFC_SLI4_MBX_EMBED; 6284 req_len = emb_len; 6285 if (req_len > emb_len) { 6286 req_len = curr_blks * sizeof(uint16_t) + 6287 sizeof(union lpfc_sli4_cfg_shdr) + 6288 sizeof(uint32_t); 6289 emb = LPFC_SLI4_MBX_NEMBED; 6290 } 6291 6292 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6293 if (!mbox) 6294 return -ENOMEM; 6295 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 6296 6297 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6298 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 6299 req_len, emb); 6300 if (alloc_len < req_len) { 6301 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6302 "2983 Allocated DMA memory size (x%x) is " 6303 "less than the requested DMA memory " 6304 "size (x%x)\n", alloc_len, req_len); 6305 rc = -ENOMEM; 6306 goto err_exit; 6307 } 6308 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 6309 if (unlikely(rc)) { 6310 rc = -EIO; 6311 goto err_exit; 6312 } 6313 6314 if (!phba->sli4_hba.intr_enable) 6315 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6316 else { 6317 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6318 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6319 } 6320 6321 if (unlikely(rc)) { 6322 rc = -EIO; 6323 goto err_exit; 6324 } 6325 6326 /* 6327 * Figure out where the response is located. Then get local pointers 6328 * to the response data. The port does not guarantee to respond to 6329 * all extents counts request so update the local variable with the 6330 * allocated count from the port. 6331 */ 6332 if (emb == LPFC_SLI4_MBX_EMBED) { 6333 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 6334 shdr = &rsrc_ext->header.cfg_shdr; 6335 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 6336 } else { 6337 virtaddr = mbox->sge_array->addr[0]; 6338 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 6339 shdr = &n_rsrc->cfg_shdr; 6340 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 6341 } 6342 6343 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 6344 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6345 "2984 Failed to read allocated resources " 6346 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 6347 type, 6348 bf_get(lpfc_mbox_hdr_status, &shdr->response), 6349 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 6350 rc = -EIO; 6351 goto err_exit; 6352 } 6353 err_exit: 6354 lpfc_sli4_mbox_cmd_free(phba, mbox); 6355 return rc; 6356 } 6357 6358 /** 6359 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block 6360 * @phba: pointer to lpfc hba data structure. 6361 * @pring: Pointer to driver SLI ring object. 6362 * @sgl_list: linked link of sgl buffers to post 6363 * @cnt: number of linked list buffers 6364 * 6365 * This routine walks the list of buffers that have been allocated and 6366 * repost them to the port by using SGL block post. This is needed after a 6367 * pci_function_reset/warm_start or start. It attempts to construct blocks 6368 * of buffer sgls which contains contiguous xris and uses the non-embedded 6369 * SGL block post mailbox commands to post them to the port. For single 6370 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 6371 * mailbox command for posting. 6372 * 6373 * Returns: 0 = success, non-zero failure. 6374 **/ 6375 static int 6376 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba, 6377 struct list_head *sgl_list, int cnt) 6378 { 6379 struct lpfc_sglq *sglq_entry = NULL; 6380 struct lpfc_sglq *sglq_entry_next = NULL; 6381 struct lpfc_sglq *sglq_entry_first = NULL; 6382 int status, total_cnt; 6383 int post_cnt = 0, num_posted = 0, block_cnt = 0; 6384 int last_xritag = NO_XRI; 6385 LIST_HEAD(prep_sgl_list); 6386 LIST_HEAD(blck_sgl_list); 6387 LIST_HEAD(allc_sgl_list); 6388 LIST_HEAD(post_sgl_list); 6389 LIST_HEAD(free_sgl_list); 6390 6391 spin_lock_irq(&phba->hbalock); 6392 spin_lock(&phba->sli4_hba.sgl_list_lock); 6393 list_splice_init(sgl_list, &allc_sgl_list); 6394 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6395 spin_unlock_irq(&phba->hbalock); 6396 6397 total_cnt = cnt; 6398 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6399 &allc_sgl_list, list) { 6400 list_del_init(&sglq_entry->list); 6401 block_cnt++; 6402 if ((last_xritag != NO_XRI) && 6403 (sglq_entry->sli4_xritag != last_xritag + 1)) { 6404 /* a hole in xri block, form a sgl posting block */ 6405 list_splice_init(&prep_sgl_list, &blck_sgl_list); 6406 post_cnt = block_cnt - 1; 6407 /* prepare list for next posting block */ 6408 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6409 block_cnt = 1; 6410 } else { 6411 /* prepare list for next posting block */ 6412 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6413 /* enough sgls for non-embed sgl mbox command */ 6414 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 6415 list_splice_init(&prep_sgl_list, 6416 &blck_sgl_list); 6417 post_cnt = block_cnt; 6418 block_cnt = 0; 6419 } 6420 } 6421 num_posted++; 6422 6423 /* keep track of last sgl's xritag */ 6424 last_xritag = sglq_entry->sli4_xritag; 6425 6426 /* end of repost sgl list condition for buffers */ 6427 if (num_posted == total_cnt) { 6428 if (post_cnt == 0) { 6429 list_splice_init(&prep_sgl_list, 6430 &blck_sgl_list); 6431 post_cnt = block_cnt; 6432 } else if (block_cnt == 1) { 6433 status = lpfc_sli4_post_sgl(phba, 6434 sglq_entry->phys, 0, 6435 sglq_entry->sli4_xritag); 6436 if (!status) { 6437 /* successful, put sgl to posted list */ 6438 list_add_tail(&sglq_entry->list, 6439 &post_sgl_list); 6440 } else { 6441 /* Failure, put sgl to free list */ 6442 lpfc_printf_log(phba, KERN_WARNING, 6443 LOG_SLI, 6444 "3159 Failed to post " 6445 "sgl, xritag:x%x\n", 6446 sglq_entry->sli4_xritag); 6447 list_add_tail(&sglq_entry->list, 6448 &free_sgl_list); 6449 total_cnt--; 6450 } 6451 } 6452 } 6453 6454 /* continue until a nembed page worth of sgls */ 6455 if (post_cnt == 0) 6456 continue; 6457 6458 /* post the buffer list sgls as a block */ 6459 status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list, 6460 post_cnt); 6461 6462 if (!status) { 6463 /* success, put sgl list to posted sgl list */ 6464 list_splice_init(&blck_sgl_list, &post_sgl_list); 6465 } else { 6466 /* Failure, put sgl list to free sgl list */ 6467 sglq_entry_first = list_first_entry(&blck_sgl_list, 6468 struct lpfc_sglq, 6469 list); 6470 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6471 "3160 Failed to post sgl-list, " 6472 "xritag:x%x-x%x\n", 6473 sglq_entry_first->sli4_xritag, 6474 (sglq_entry_first->sli4_xritag + 6475 post_cnt - 1)); 6476 list_splice_init(&blck_sgl_list, &free_sgl_list); 6477 total_cnt -= post_cnt; 6478 } 6479 6480 /* don't reset xirtag due to hole in xri block */ 6481 if (block_cnt == 0) 6482 last_xritag = NO_XRI; 6483 6484 /* reset sgl post count for next round of posting */ 6485 post_cnt = 0; 6486 } 6487 6488 /* free the sgls failed to post */ 6489 lpfc_free_sgl_list(phba, &free_sgl_list); 6490 6491 /* push sgls posted to the available list */ 6492 if (!list_empty(&post_sgl_list)) { 6493 spin_lock_irq(&phba->hbalock); 6494 spin_lock(&phba->sli4_hba.sgl_list_lock); 6495 list_splice_init(&post_sgl_list, sgl_list); 6496 spin_unlock(&phba->sli4_hba.sgl_list_lock); 6497 spin_unlock_irq(&phba->hbalock); 6498 } else { 6499 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6500 "3161 Failure to post sgl to port.\n"); 6501 return -EIO; 6502 } 6503 6504 /* return the number of XRIs actually posted */ 6505 return total_cnt; 6506 } 6507 6508 void 6509 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 6510 { 6511 uint32_t len; 6512 6513 len = sizeof(struct lpfc_mbx_set_host_data) - 6514 sizeof(struct lpfc_sli4_cfg_mhdr); 6515 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6516 LPFC_MBOX_OPCODE_SET_HOST_DATA, len, 6517 LPFC_SLI4_MBX_EMBED); 6518 6519 mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION; 6520 mbox->u.mqe.un.set_host_data.param_len = 6521 LPFC_HOST_OS_DRIVER_VERSION_SIZE; 6522 snprintf(mbox->u.mqe.un.set_host_data.data, 6523 LPFC_HOST_OS_DRIVER_VERSION_SIZE, 6524 "Linux %s v"LPFC_DRIVER_VERSION, 6525 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC"); 6526 } 6527 6528 int 6529 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, 6530 struct lpfc_queue *drq, int count, int idx) 6531 { 6532 int rc, i; 6533 struct lpfc_rqe hrqe; 6534 struct lpfc_rqe drqe; 6535 struct lpfc_rqb *rqbp; 6536 struct rqb_dmabuf *rqb_buffer; 6537 LIST_HEAD(rqb_buf_list); 6538 6539 rqbp = hrq->rqbp; 6540 for (i = 0; i < count; i++) { 6541 /* IF RQ is already full, don't bother */ 6542 if (rqbp->buffer_count + i >= rqbp->entry_count - 1) 6543 break; 6544 rqb_buffer = rqbp->rqb_alloc_buffer(phba); 6545 if (!rqb_buffer) 6546 break; 6547 rqb_buffer->hrq = hrq; 6548 rqb_buffer->drq = drq; 6549 rqb_buffer->idx = idx; 6550 list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list); 6551 } 6552 while (!list_empty(&rqb_buf_list)) { 6553 list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf, 6554 hbuf.list); 6555 6556 hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys); 6557 hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys); 6558 drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys); 6559 drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys); 6560 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe); 6561 if (rc < 0) { 6562 rqbp->rqb_free_buffer(phba, rqb_buffer); 6563 } else { 6564 list_add_tail(&rqb_buffer->hbuf.list, 6565 &rqbp->rqb_buffer_list); 6566 rqbp->buffer_count++; 6567 } 6568 } 6569 return 1; 6570 } 6571 6572 /** 6573 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function 6574 * @phba: Pointer to HBA context object. 6575 * 6576 * This function is the main SLI4 device initialization PCI function. This 6577 * function is called by the HBA initialization code, HBA reset code and 6578 * HBA error attention handler code. Caller is not required to hold any 6579 * locks. 6580 **/ 6581 int 6582 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 6583 { 6584 int rc, i, cnt; 6585 LPFC_MBOXQ_t *mboxq; 6586 struct lpfc_mqe *mqe; 6587 uint8_t *vpd; 6588 uint32_t vpd_size; 6589 uint32_t ftr_rsp = 0; 6590 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 6591 struct lpfc_vport *vport = phba->pport; 6592 struct lpfc_dmabuf *mp; 6593 struct lpfc_rqb *rqbp; 6594 6595 /* Perform a PCI function reset to start from clean */ 6596 rc = lpfc_pci_function_reset(phba); 6597 if (unlikely(rc)) 6598 return -ENODEV; 6599 6600 /* Check the HBA Host Status Register for readyness */ 6601 rc = lpfc_sli4_post_status_check(phba); 6602 if (unlikely(rc)) 6603 return -ENODEV; 6604 else { 6605 spin_lock_irq(&phba->hbalock); 6606 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 6607 spin_unlock_irq(&phba->hbalock); 6608 } 6609 6610 /* 6611 * Allocate a single mailbox container for initializing the 6612 * port. 6613 */ 6614 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6615 if (!mboxq) 6616 return -ENOMEM; 6617 6618 /* Issue READ_REV to collect vpd and FW information. */ 6619 vpd_size = SLI4_PAGE_SIZE; 6620 vpd = kzalloc(vpd_size, GFP_KERNEL); 6621 if (!vpd) { 6622 rc = -ENOMEM; 6623 goto out_free_mbox; 6624 } 6625 6626 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 6627 if (unlikely(rc)) { 6628 kfree(vpd); 6629 goto out_free_mbox; 6630 } 6631 6632 mqe = &mboxq->u.mqe; 6633 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 6634 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) { 6635 phba->hba_flag |= HBA_FCOE_MODE; 6636 phba->fcp_embed_io = 0; /* SLI4 FC support only */ 6637 } else { 6638 phba->hba_flag &= ~HBA_FCOE_MODE; 6639 } 6640 6641 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 6642 LPFC_DCBX_CEE_MODE) 6643 phba->hba_flag |= HBA_FIP_SUPPORT; 6644 else 6645 phba->hba_flag &= ~HBA_FIP_SUPPORT; 6646 6647 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; 6648 6649 if (phba->sli_rev != LPFC_SLI_REV4) { 6650 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6651 "0376 READ_REV Error. SLI Level %d " 6652 "FCoE enabled %d\n", 6653 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 6654 rc = -EIO; 6655 kfree(vpd); 6656 goto out_free_mbox; 6657 } 6658 6659 /* 6660 * Continue initialization with default values even if driver failed 6661 * to read FCoE param config regions, only read parameters if the 6662 * board is FCoE 6663 */ 6664 if (phba->hba_flag & HBA_FCOE_MODE && 6665 lpfc_sli4_read_fcoe_params(phba)) 6666 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 6667 "2570 Failed to read FCoE parameters\n"); 6668 6669 /* 6670 * Retrieve sli4 device physical port name, failure of doing it 6671 * is considered as non-fatal. 6672 */ 6673 rc = lpfc_sli4_retrieve_pport_name(phba); 6674 if (!rc) 6675 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6676 "3080 Successful retrieving SLI4 device " 6677 "physical port name: %s.\n", phba->Port); 6678 6679 /* 6680 * Evaluate the read rev and vpd data. Populate the driver 6681 * state with the results. If this routine fails, the failure 6682 * is not fatal as the driver will use generic values. 6683 */ 6684 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 6685 if (unlikely(!rc)) { 6686 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6687 "0377 Error %d parsing vpd. " 6688 "Using defaults.\n", rc); 6689 rc = 0; 6690 } 6691 kfree(vpd); 6692 6693 /* Save information as VPD data */ 6694 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 6695 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 6696 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 6697 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 6698 &mqe->un.read_rev); 6699 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 6700 &mqe->un.read_rev); 6701 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 6702 &mqe->un.read_rev); 6703 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 6704 &mqe->un.read_rev); 6705 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 6706 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 6707 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 6708 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 6709 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 6710 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 6711 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6712 "(%d):0380 READ_REV Status x%x " 6713 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 6714 mboxq->vport ? mboxq->vport->vpi : 0, 6715 bf_get(lpfc_mqe_status, mqe), 6716 phba->vpd.rev.opFwName, 6717 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 6718 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 6719 6720 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 6721 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); 6722 if (phba->pport->cfg_lun_queue_depth > rc) { 6723 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6724 "3362 LUN queue depth changed from %d to %d\n", 6725 phba->pport->cfg_lun_queue_depth, rc); 6726 phba->pport->cfg_lun_queue_depth = rc; 6727 } 6728 6729 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 6730 LPFC_SLI_INTF_IF_TYPE_0) { 6731 lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY); 6732 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6733 if (rc == MBX_SUCCESS) { 6734 phba->hba_flag |= HBA_RECOVERABLE_UE; 6735 /* Set 1Sec interval to detect UE */ 6736 phba->eratt_poll_interval = 1; 6737 phba->sli4_hba.ue_to_sr = bf_get( 6738 lpfc_mbx_set_feature_UESR, 6739 &mboxq->u.mqe.un.set_feature); 6740 phba->sli4_hba.ue_to_rp = bf_get( 6741 lpfc_mbx_set_feature_UERP, 6742 &mboxq->u.mqe.un.set_feature); 6743 } 6744 } 6745 6746 if (phba->cfg_enable_mds_diags && phba->mds_diags_support) { 6747 /* Enable MDS Diagnostics only if the SLI Port supports it */ 6748 lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS); 6749 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6750 if (rc != MBX_SUCCESS) 6751 phba->mds_diags_support = 0; 6752 } 6753 6754 /* 6755 * Discover the port's supported feature set and match it against the 6756 * hosts requests. 6757 */ 6758 lpfc_request_features(phba, mboxq); 6759 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6760 if (unlikely(rc)) { 6761 rc = -EIO; 6762 goto out_free_mbox; 6763 } 6764 6765 /* 6766 * The port must support FCP initiator mode as this is the 6767 * only mode running in the host. 6768 */ 6769 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 6770 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6771 "0378 No support for fcpi mode.\n"); 6772 ftr_rsp++; 6773 } 6774 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 6775 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 6776 else 6777 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 6778 /* 6779 * If the port cannot support the host's requested features 6780 * then turn off the global config parameters to disable the 6781 * feature in the driver. This is not a fatal error. 6782 */ 6783 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 6784 if (phba->cfg_enable_bg) { 6785 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)) 6786 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 6787 else 6788 ftr_rsp++; 6789 } 6790 6791 if (phba->max_vpi && phba->cfg_enable_npiv && 6792 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6793 ftr_rsp++; 6794 6795 if (ftr_rsp) { 6796 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6797 "0379 Feature Mismatch Data: x%08x %08x " 6798 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 6799 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 6800 phba->cfg_enable_npiv, phba->max_vpi); 6801 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 6802 phba->cfg_enable_bg = 0; 6803 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6804 phba->cfg_enable_npiv = 0; 6805 } 6806 6807 /* These SLI3 features are assumed in SLI4 */ 6808 spin_lock_irq(&phba->hbalock); 6809 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 6810 spin_unlock_irq(&phba->hbalock); 6811 6812 /* 6813 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 6814 * calls depends on these resources to complete port setup. 6815 */ 6816 rc = lpfc_sli4_alloc_resource_identifiers(phba); 6817 if (rc) { 6818 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6819 "2920 Failed to alloc Resource IDs " 6820 "rc = x%x\n", rc); 6821 goto out_free_mbox; 6822 } 6823 6824 lpfc_set_host_data(phba, mboxq); 6825 6826 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6827 if (rc) { 6828 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6829 "2134 Failed to set host os driver version %x", 6830 rc); 6831 } 6832 6833 /* Read the port's service parameters. */ 6834 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 6835 if (rc) { 6836 phba->link_state = LPFC_HBA_ERROR; 6837 rc = -ENOMEM; 6838 goto out_free_mbox; 6839 } 6840 6841 mboxq->vport = vport; 6842 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6843 mp = (struct lpfc_dmabuf *) mboxq->context1; 6844 if (rc == MBX_SUCCESS) { 6845 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 6846 rc = 0; 6847 } 6848 6849 /* 6850 * This memory was allocated by the lpfc_read_sparam routine. Release 6851 * it to the mbuf pool. 6852 */ 6853 lpfc_mbuf_free(phba, mp->virt, mp->phys); 6854 kfree(mp); 6855 mboxq->context1 = NULL; 6856 if (unlikely(rc)) { 6857 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6858 "0382 READ_SPARAM command failed " 6859 "status %d, mbxStatus x%x\n", 6860 rc, bf_get(lpfc_mqe_status, mqe)); 6861 phba->link_state = LPFC_HBA_ERROR; 6862 rc = -EIO; 6863 goto out_free_mbox; 6864 } 6865 6866 lpfc_update_vport_wwn(vport); 6867 6868 /* Update the fc_host data structures with new wwn. */ 6869 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 6870 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 6871 6872 /* Create all the SLI4 queues */ 6873 rc = lpfc_sli4_queue_create(phba); 6874 if (rc) { 6875 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6876 "3089 Failed to allocate queues\n"); 6877 rc = -ENODEV; 6878 goto out_free_mbox; 6879 } 6880 /* Set up all the queues to the device */ 6881 rc = lpfc_sli4_queue_setup(phba); 6882 if (unlikely(rc)) { 6883 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6884 "0381 Error %d during queue setup.\n ", rc); 6885 goto out_stop_timers; 6886 } 6887 /* Initialize the driver internal SLI layer lists. */ 6888 lpfc_sli4_setup(phba); 6889 lpfc_sli4_queue_init(phba); 6890 6891 /* update host els xri-sgl sizes and mappings */ 6892 rc = lpfc_sli4_els_sgl_update(phba); 6893 if (unlikely(rc)) { 6894 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6895 "1400 Failed to update xri-sgl size and " 6896 "mapping: %d\n", rc); 6897 goto out_destroy_queue; 6898 } 6899 6900 /* register the els sgl pool to the port */ 6901 rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list, 6902 phba->sli4_hba.els_xri_cnt); 6903 if (unlikely(rc < 0)) { 6904 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6905 "0582 Error %d during els sgl post " 6906 "operation\n", rc); 6907 rc = -ENODEV; 6908 goto out_destroy_queue; 6909 } 6910 phba->sli4_hba.els_xri_cnt = rc; 6911 6912 if (phba->nvmet_support) { 6913 /* update host nvmet xri-sgl sizes and mappings */ 6914 rc = lpfc_sli4_nvmet_sgl_update(phba); 6915 if (unlikely(rc)) { 6916 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6917 "6308 Failed to update nvmet-sgl size " 6918 "and mapping: %d\n", rc); 6919 goto out_destroy_queue; 6920 } 6921 6922 /* register the nvmet sgl pool to the port */ 6923 rc = lpfc_sli4_repost_sgl_list( 6924 phba, 6925 &phba->sli4_hba.lpfc_nvmet_sgl_list, 6926 phba->sli4_hba.nvmet_xri_cnt); 6927 if (unlikely(rc < 0)) { 6928 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6929 "3117 Error %d during nvmet " 6930 "sgl post\n", rc); 6931 rc = -ENODEV; 6932 goto out_destroy_queue; 6933 } 6934 phba->sli4_hba.nvmet_xri_cnt = rc; 6935 6936 cnt = phba->cfg_iocb_cnt * 1024; 6937 /* We need 1 iocbq for every SGL, for IO processing */ 6938 cnt += phba->sli4_hba.nvmet_xri_cnt; 6939 } else { 6940 /* update host scsi xri-sgl sizes and mappings */ 6941 rc = lpfc_sli4_scsi_sgl_update(phba); 6942 if (unlikely(rc)) { 6943 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6944 "6309 Failed to update scsi-sgl size " 6945 "and mapping: %d\n", rc); 6946 goto out_destroy_queue; 6947 } 6948 6949 /* update host nvme xri-sgl sizes and mappings */ 6950 rc = lpfc_sli4_nvme_sgl_update(phba); 6951 if (unlikely(rc)) { 6952 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6953 "6082 Failed to update nvme-sgl size " 6954 "and mapping: %d\n", rc); 6955 goto out_destroy_queue; 6956 } 6957 6958 cnt = phba->cfg_iocb_cnt * 1024; 6959 } 6960 6961 if (!phba->sli.iocbq_lookup) { 6962 /* Initialize and populate the iocb list per host */ 6963 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6964 "2821 initialize iocb list %d total %d\n", 6965 phba->cfg_iocb_cnt, cnt); 6966 rc = lpfc_init_iocb_list(phba, cnt); 6967 if (rc) { 6968 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6969 "1413 Failed to init iocb list.\n"); 6970 goto out_destroy_queue; 6971 } 6972 } 6973 6974 if (phba->nvmet_support) 6975 lpfc_nvmet_create_targetport(phba); 6976 6977 if (phba->nvmet_support && phba->cfg_nvmet_mrq) { 6978 /* Post initial buffers to all RQs created */ 6979 for (i = 0; i < phba->cfg_nvmet_mrq; i++) { 6980 rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp; 6981 INIT_LIST_HEAD(&rqbp->rqb_buffer_list); 6982 rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc; 6983 rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free; 6984 rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT; 6985 rqbp->buffer_count = 0; 6986 6987 lpfc_post_rq_buffer( 6988 phba, phba->sli4_hba.nvmet_mrq_hdr[i], 6989 phba->sli4_hba.nvmet_mrq_data[i], 6990 LPFC_NVMET_RQE_DEF_COUNT, i); 6991 } 6992 } 6993 6994 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { 6995 /* register the allocated scsi sgl pool to the port */ 6996 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 6997 if (unlikely(rc)) { 6998 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6999 "0383 Error %d during scsi sgl post " 7000 "operation\n", rc); 7001 /* Some Scsi buffers were moved to abort scsi list */ 7002 /* A pci function reset will repost them */ 7003 rc = -ENODEV; 7004 goto out_destroy_queue; 7005 } 7006 } 7007 7008 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && 7009 (phba->nvmet_support == 0)) { 7010 7011 /* register the allocated nvme sgl pool to the port */ 7012 rc = lpfc_repost_nvme_sgl_list(phba); 7013 if (unlikely(rc)) { 7014 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7015 "6116 Error %d during nvme sgl post " 7016 "operation\n", rc); 7017 /* Some NVME buffers were moved to abort nvme list */ 7018 /* A pci function reset will repost them */ 7019 rc = -ENODEV; 7020 goto out_destroy_queue; 7021 } 7022 } 7023 7024 /* Post the rpi header region to the device. */ 7025 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 7026 if (unlikely(rc)) { 7027 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7028 "0393 Error %d during rpi post operation\n", 7029 rc); 7030 rc = -ENODEV; 7031 goto out_destroy_queue; 7032 } 7033 lpfc_sli4_node_prep(phba); 7034 7035 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 7036 if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { 7037 /* 7038 * The FC Port needs to register FCFI (index 0) 7039 */ 7040 lpfc_reg_fcfi(phba, mboxq); 7041 mboxq->vport = phba->pport; 7042 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7043 if (rc != MBX_SUCCESS) 7044 goto out_unset_queue; 7045 rc = 0; 7046 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 7047 &mboxq->u.mqe.un.reg_fcfi); 7048 } else { 7049 /* We are a NVME Target mode with MRQ > 1 */ 7050 7051 /* First register the FCFI */ 7052 lpfc_reg_fcfi_mrq(phba, mboxq, 0); 7053 mboxq->vport = phba->pport; 7054 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7055 if (rc != MBX_SUCCESS) 7056 goto out_unset_queue; 7057 rc = 0; 7058 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi, 7059 &mboxq->u.mqe.un.reg_fcfi_mrq); 7060 7061 /* Next register the MRQs */ 7062 lpfc_reg_fcfi_mrq(phba, mboxq, 1); 7063 mboxq->vport = phba->pport; 7064 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7065 if (rc != MBX_SUCCESS) 7066 goto out_unset_queue; 7067 rc = 0; 7068 } 7069 /* Check if the port is configured to be disabled */ 7070 lpfc_sli_read_link_ste(phba); 7071 } 7072 7073 /* Arm the CQs and then EQs on device */ 7074 lpfc_sli4_arm_cqeq_intr(phba); 7075 7076 /* Indicate device interrupt mode */ 7077 phba->sli4_hba.intr_enable = 1; 7078 7079 /* Allow asynchronous mailbox command to go through */ 7080 spin_lock_irq(&phba->hbalock); 7081 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7082 spin_unlock_irq(&phba->hbalock); 7083 7084 /* Post receive buffers to the device */ 7085 lpfc_sli4_rb_setup(phba); 7086 7087 /* Reset HBA FCF states after HBA reset */ 7088 phba->fcf.fcf_flag = 0; 7089 phba->fcf.current_rec.flag = 0; 7090 7091 /* Start the ELS watchdog timer */ 7092 mod_timer(&vport->els_tmofunc, 7093 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 7094 7095 /* Start heart beat timer */ 7096 mod_timer(&phba->hb_tmofunc, 7097 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 7098 phba->hb_outstanding = 0; 7099 phba->last_completion_time = jiffies; 7100 7101 /* Start error attention (ERATT) polling timer */ 7102 mod_timer(&phba->eratt_poll, 7103 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval)); 7104 7105 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 7106 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 7107 rc = pci_enable_pcie_error_reporting(phba->pcidev); 7108 if (!rc) { 7109 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7110 "2829 This device supports " 7111 "Advanced Error Reporting (AER)\n"); 7112 spin_lock_irq(&phba->hbalock); 7113 phba->hba_flag |= HBA_AER_ENABLED; 7114 spin_unlock_irq(&phba->hbalock); 7115 } else { 7116 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7117 "2830 This device does not support " 7118 "Advanced Error Reporting (AER)\n"); 7119 phba->cfg_aer_support = 0; 7120 } 7121 rc = 0; 7122 } 7123 7124 /* 7125 * The port is ready, set the host's link state to LINK_DOWN 7126 * in preparation for link interrupts. 7127 */ 7128 spin_lock_irq(&phba->hbalock); 7129 phba->link_state = LPFC_LINK_DOWN; 7130 spin_unlock_irq(&phba->hbalock); 7131 if (!(phba->hba_flag & HBA_FCOE_MODE) && 7132 (phba->hba_flag & LINK_DISABLED)) { 7133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7134 "3103 Adapter Link is disabled.\n"); 7135 lpfc_down_link(phba, mboxq); 7136 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7137 if (rc != MBX_SUCCESS) { 7138 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 7139 "3104 Adapter failed to issue " 7140 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 7141 goto out_unset_queue; 7142 } 7143 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 7144 /* don't perform init_link on SLI4 FC port loopback test */ 7145 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 7146 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 7147 if (rc) 7148 goto out_unset_queue; 7149 } 7150 } 7151 mempool_free(mboxq, phba->mbox_mem_pool); 7152 return rc; 7153 out_unset_queue: 7154 /* Unset all the queues set up in this routine when error out */ 7155 lpfc_sli4_queue_unset(phba); 7156 out_destroy_queue: 7157 lpfc_free_iocb_list(phba); 7158 lpfc_sli4_queue_destroy(phba); 7159 out_stop_timers: 7160 lpfc_stop_hba_timers(phba); 7161 out_free_mbox: 7162 mempool_free(mboxq, phba->mbox_mem_pool); 7163 return rc; 7164 } 7165 7166 /** 7167 * lpfc_mbox_timeout - Timeout call back function for mbox timer 7168 * @ptr: context object - pointer to hba structure. 7169 * 7170 * This is the callback function for mailbox timer. The mailbox 7171 * timer is armed when a new mailbox command is issued and the timer 7172 * is deleted when the mailbox complete. The function is called by 7173 * the kernel timer code when a mailbox does not complete within 7174 * expected time. This function wakes up the worker thread to 7175 * process the mailbox timeout and returns. All the processing is 7176 * done by the worker thread function lpfc_mbox_timeout_handler. 7177 **/ 7178 void 7179 lpfc_mbox_timeout(struct timer_list *t) 7180 { 7181 struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); 7182 unsigned long iflag; 7183 uint32_t tmo_posted; 7184 7185 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 7186 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 7187 if (!tmo_posted) 7188 phba->pport->work_port_events |= WORKER_MBOX_TMO; 7189 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 7190 7191 if (!tmo_posted) 7192 lpfc_worker_wake_up(phba); 7193 return; 7194 } 7195 7196 /** 7197 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 7198 * are pending 7199 * @phba: Pointer to HBA context object. 7200 * 7201 * This function checks if any mailbox completions are present on the mailbox 7202 * completion queue. 7203 **/ 7204 static bool 7205 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 7206 { 7207 7208 uint32_t idx; 7209 struct lpfc_queue *mcq; 7210 struct lpfc_mcqe *mcqe; 7211 bool pending_completions = false; 7212 7213 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7214 return false; 7215 7216 /* Check for completions on mailbox completion queue */ 7217 7218 mcq = phba->sli4_hba.mbx_cq; 7219 idx = mcq->hba_index; 7220 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) { 7221 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; 7222 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 7223 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 7224 pending_completions = true; 7225 break; 7226 } 7227 idx = (idx + 1) % mcq->entry_count; 7228 if (mcq->hba_index == idx) 7229 break; 7230 } 7231 return pending_completions; 7232 7233 } 7234 7235 /** 7236 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 7237 * that were missed. 7238 * @phba: Pointer to HBA context object. 7239 * 7240 * For sli4, it is possible to miss an interrupt. As such mbox completions 7241 * maybe missed causing erroneous mailbox timeouts to occur. This function 7242 * checks to see if mbox completions are on the mailbox completion queue 7243 * and will process all the completions associated with the eq for the 7244 * mailbox completion queue. 7245 **/ 7246 bool 7247 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 7248 { 7249 7250 uint32_t eqidx; 7251 struct lpfc_queue *fpeq = NULL; 7252 struct lpfc_eqe *eqe; 7253 bool mbox_pending; 7254 7255 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 7256 return false; 7257 7258 /* Find the eq associated with the mcq */ 7259 7260 if (phba->sli4_hba.hba_eq) 7261 for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++) 7262 if (phba->sli4_hba.hba_eq[eqidx]->queue_id == 7263 phba->sli4_hba.mbx_cq->assoc_qid) { 7264 fpeq = phba->sli4_hba.hba_eq[eqidx]; 7265 break; 7266 } 7267 if (!fpeq) 7268 return false; 7269 7270 /* Turn off interrupts from this EQ */ 7271 7272 lpfc_sli4_eq_clr_intr(fpeq); 7273 7274 /* Check to see if a mbox completion is pending */ 7275 7276 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 7277 7278 /* 7279 * If a mbox completion is pending, process all the events on EQ 7280 * associated with the mbox completion queue (this could include 7281 * mailbox commands, async events, els commands, receive queue data 7282 * and fcp commands) 7283 */ 7284 7285 if (mbox_pending) 7286 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 7287 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); 7288 fpeq->EQ_processed++; 7289 } 7290 7291 /* Always clear and re-arm the EQ */ 7292 7293 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 7294 7295 return mbox_pending; 7296 7297 } 7298 7299 /** 7300 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 7301 * @phba: Pointer to HBA context object. 7302 * 7303 * This function is called from worker thread when a mailbox command times out. 7304 * The caller is not required to hold any locks. This function will reset the 7305 * HBA and recover all the pending commands. 7306 **/ 7307 void 7308 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 7309 { 7310 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 7311 MAILBOX_t *mb = NULL; 7312 7313 struct lpfc_sli *psli = &phba->sli; 7314 7315 /* If the mailbox completed, process the completion and return */ 7316 if (lpfc_sli4_process_missed_mbox_completions(phba)) 7317 return; 7318 7319 if (pmbox != NULL) 7320 mb = &pmbox->u.mb; 7321 /* Check the pmbox pointer first. There is a race condition 7322 * between the mbox timeout handler getting executed in the 7323 * worklist and the mailbox actually completing. When this 7324 * race condition occurs, the mbox_active will be NULL. 7325 */ 7326 spin_lock_irq(&phba->hbalock); 7327 if (pmbox == NULL) { 7328 lpfc_printf_log(phba, KERN_WARNING, 7329 LOG_MBOX | LOG_SLI, 7330 "0353 Active Mailbox cleared - mailbox timeout " 7331 "exiting\n"); 7332 spin_unlock_irq(&phba->hbalock); 7333 return; 7334 } 7335 7336 /* Mbox cmd <mbxCommand> timeout */ 7337 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7338 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 7339 mb->mbxCommand, 7340 phba->pport->port_state, 7341 phba->sli.sli_flag, 7342 phba->sli.mbox_active); 7343 spin_unlock_irq(&phba->hbalock); 7344 7345 /* Setting state unknown so lpfc_sli_abort_iocb_ring 7346 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 7347 * it to fail all outstanding SCSI IO. 7348 */ 7349 spin_lock_irq(&phba->pport->work_port_lock); 7350 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 7351 spin_unlock_irq(&phba->pport->work_port_lock); 7352 spin_lock_irq(&phba->hbalock); 7353 phba->link_state = LPFC_LINK_UNKNOWN; 7354 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 7355 spin_unlock_irq(&phba->hbalock); 7356 7357 lpfc_sli_abort_fcp_rings(phba); 7358 7359 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7360 "0345 Resetting board due to mailbox timeout\n"); 7361 7362 /* Reset the HBA device */ 7363 lpfc_reset_hba(phba); 7364 } 7365 7366 /** 7367 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 7368 * @phba: Pointer to HBA context object. 7369 * @pmbox: Pointer to mailbox object. 7370 * @flag: Flag indicating how the mailbox need to be processed. 7371 * 7372 * This function is called by discovery code and HBA management code 7373 * to submit a mailbox command to firmware with SLI-3 interface spec. This 7374 * function gets the hbalock to protect the data structures. 7375 * The mailbox command can be submitted in polling mode, in which case 7376 * this function will wait in a polling loop for the completion of the 7377 * mailbox. 7378 * If the mailbox is submitted in no_wait mode (not polling) the 7379 * function will submit the command and returns immediately without waiting 7380 * for the mailbox completion. The no_wait is supported only when HBA 7381 * is in SLI2/SLI3 mode - interrupts are enabled. 7382 * The SLI interface allows only one mailbox pending at a time. If the 7383 * mailbox is issued in polling mode and there is already a mailbox 7384 * pending, then the function will return an error. If the mailbox is issued 7385 * in NO_WAIT mode and there is a mailbox pending already, the function 7386 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 7387 * The sli layer owns the mailbox object until the completion of mailbox 7388 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 7389 * return codes the caller owns the mailbox command after the return of 7390 * the function. 7391 **/ 7392 static int 7393 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 7394 uint32_t flag) 7395 { 7396 MAILBOX_t *mbx; 7397 struct lpfc_sli *psli = &phba->sli; 7398 uint32_t status, evtctr; 7399 uint32_t ha_copy, hc_copy; 7400 int i; 7401 unsigned long timeout; 7402 unsigned long drvr_flag = 0; 7403 uint32_t word0, ldata; 7404 void __iomem *to_slim; 7405 int processing_queue = 0; 7406 7407 spin_lock_irqsave(&phba->hbalock, drvr_flag); 7408 if (!pmbox) { 7409 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7410 /* processing mbox queue from intr_handler */ 7411 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7412 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7413 return MBX_SUCCESS; 7414 } 7415 processing_queue = 1; 7416 pmbox = lpfc_mbox_get(phba); 7417 if (!pmbox) { 7418 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7419 return MBX_SUCCESS; 7420 } 7421 } 7422 7423 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 7424 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 7425 if(!pmbox->vport) { 7426 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7427 lpfc_printf_log(phba, KERN_ERR, 7428 LOG_MBOX | LOG_VPORT, 7429 "1806 Mbox x%x failed. No vport\n", 7430 pmbox->u.mb.mbxCommand); 7431 dump_stack(); 7432 goto out_not_finished; 7433 } 7434 } 7435 7436 /* If the PCI channel is in offline state, do not post mbox. */ 7437 if (unlikely(pci_channel_offline(phba->pcidev))) { 7438 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7439 goto out_not_finished; 7440 } 7441 7442 /* If HBA has a deferred error attention, fail the iocb. */ 7443 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 7444 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7445 goto out_not_finished; 7446 } 7447 7448 psli = &phba->sli; 7449 7450 mbx = &pmbox->u.mb; 7451 status = MBX_SUCCESS; 7452 7453 if (phba->link_state == LPFC_HBA_ERROR) { 7454 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7455 7456 /* Mbox command <mbxCommand> cannot issue */ 7457 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7458 "(%d):0311 Mailbox command x%x cannot " 7459 "issue Data: x%x x%x\n", 7460 pmbox->vport ? pmbox->vport->vpi : 0, 7461 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 7462 goto out_not_finished; 7463 } 7464 7465 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 7466 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 7467 !(hc_copy & HC_MBINT_ENA)) { 7468 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7469 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7470 "(%d):2528 Mailbox command x%x cannot " 7471 "issue Data: x%x x%x\n", 7472 pmbox->vport ? pmbox->vport->vpi : 0, 7473 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 7474 goto out_not_finished; 7475 } 7476 } 7477 7478 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7479 /* Polling for a mbox command when another one is already active 7480 * is not allowed in SLI. Also, the driver must have established 7481 * SLI2 mode to queue and process multiple mbox commands. 7482 */ 7483 7484 if (flag & MBX_POLL) { 7485 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7486 7487 /* Mbox command <mbxCommand> cannot issue */ 7488 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7489 "(%d):2529 Mailbox command x%x " 7490 "cannot issue Data: x%x x%x\n", 7491 pmbox->vport ? pmbox->vport->vpi : 0, 7492 pmbox->u.mb.mbxCommand, 7493 psli->sli_flag, flag); 7494 goto out_not_finished; 7495 } 7496 7497 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 7498 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7499 /* Mbox command <mbxCommand> cannot issue */ 7500 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7501 "(%d):2530 Mailbox command x%x " 7502 "cannot issue Data: x%x x%x\n", 7503 pmbox->vport ? pmbox->vport->vpi : 0, 7504 pmbox->u.mb.mbxCommand, 7505 psli->sli_flag, flag); 7506 goto out_not_finished; 7507 } 7508 7509 /* Another mailbox command is still being processed, queue this 7510 * command to be processed later. 7511 */ 7512 lpfc_mbox_put(phba, pmbox); 7513 7514 /* Mbox cmd issue - BUSY */ 7515 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7516 "(%d):0308 Mbox cmd issue - BUSY Data: " 7517 "x%x x%x x%x x%x\n", 7518 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 7519 mbx->mbxCommand, 7520 phba->pport ? phba->pport->port_state : 0xff, 7521 psli->sli_flag, flag); 7522 7523 psli->slistat.mbox_busy++; 7524 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7525 7526 if (pmbox->vport) { 7527 lpfc_debugfs_disc_trc(pmbox->vport, 7528 LPFC_DISC_TRC_MBOX_VPORT, 7529 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 7530 (uint32_t)mbx->mbxCommand, 7531 mbx->un.varWords[0], mbx->un.varWords[1]); 7532 } 7533 else { 7534 lpfc_debugfs_disc_trc(phba->pport, 7535 LPFC_DISC_TRC_MBOX, 7536 "MBOX Bsy: cmd:x%x mb:x%x x%x", 7537 (uint32_t)mbx->mbxCommand, 7538 mbx->un.varWords[0], mbx->un.varWords[1]); 7539 } 7540 7541 return MBX_BUSY; 7542 } 7543 7544 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7545 7546 /* If we are not polling, we MUST be in SLI2 mode */ 7547 if (flag != MBX_POLL) { 7548 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 7549 (mbx->mbxCommand != MBX_KILL_BOARD)) { 7550 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7551 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7552 /* Mbox command <mbxCommand> cannot issue */ 7553 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7554 "(%d):2531 Mailbox command x%x " 7555 "cannot issue Data: x%x x%x\n", 7556 pmbox->vport ? pmbox->vport->vpi : 0, 7557 pmbox->u.mb.mbxCommand, 7558 psli->sli_flag, flag); 7559 goto out_not_finished; 7560 } 7561 /* timeout active mbox command */ 7562 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 7563 1000); 7564 mod_timer(&psli->mbox_tmo, jiffies + timeout); 7565 } 7566 7567 /* Mailbox cmd <cmd> issue */ 7568 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7569 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 7570 "x%x\n", 7571 pmbox->vport ? pmbox->vport->vpi : 0, 7572 mbx->mbxCommand, 7573 phba->pport ? phba->pport->port_state : 0xff, 7574 psli->sli_flag, flag); 7575 7576 if (mbx->mbxCommand != MBX_HEARTBEAT) { 7577 if (pmbox->vport) { 7578 lpfc_debugfs_disc_trc(pmbox->vport, 7579 LPFC_DISC_TRC_MBOX_VPORT, 7580 "MBOX Send vport: cmd:x%x mb:x%x x%x", 7581 (uint32_t)mbx->mbxCommand, 7582 mbx->un.varWords[0], mbx->un.varWords[1]); 7583 } 7584 else { 7585 lpfc_debugfs_disc_trc(phba->pport, 7586 LPFC_DISC_TRC_MBOX, 7587 "MBOX Send: cmd:x%x mb:x%x x%x", 7588 (uint32_t)mbx->mbxCommand, 7589 mbx->un.varWords[0], mbx->un.varWords[1]); 7590 } 7591 } 7592 7593 psli->slistat.mbox_cmd++; 7594 evtctr = psli->slistat.mbox_event; 7595 7596 /* next set own bit for the adapter and copy over command word */ 7597 mbx->mbxOwner = OWN_CHIP; 7598 7599 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7600 /* Populate mbox extension offset word. */ 7601 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 7602 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 7603 = (uint8_t *)phba->mbox_ext 7604 - (uint8_t *)phba->mbox; 7605 } 7606 7607 /* Copy the mailbox extension data */ 7608 if (pmbox->in_ext_byte_len && pmbox->context2) { 7609 lpfc_sli_pcimem_bcopy(pmbox->context2, 7610 (uint8_t *)phba->mbox_ext, 7611 pmbox->in_ext_byte_len); 7612 } 7613 /* Copy command data to host SLIM area */ 7614 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 7615 } else { 7616 /* Populate mbox extension offset word. */ 7617 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 7618 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 7619 = MAILBOX_HBA_EXT_OFFSET; 7620 7621 /* Copy the mailbox extension data */ 7622 if (pmbox->in_ext_byte_len && pmbox->context2) 7623 lpfc_memcpy_to_slim(phba->MBslimaddr + 7624 MAILBOX_HBA_EXT_OFFSET, 7625 pmbox->context2, pmbox->in_ext_byte_len); 7626 7627 if (mbx->mbxCommand == MBX_CONFIG_PORT) 7628 /* copy command data into host mbox for cmpl */ 7629 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, 7630 MAILBOX_CMD_SIZE); 7631 7632 /* First copy mbox command data to HBA SLIM, skip past first 7633 word */ 7634 to_slim = phba->MBslimaddr + sizeof (uint32_t); 7635 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 7636 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 7637 7638 /* Next copy over first word, with mbxOwner set */ 7639 ldata = *((uint32_t *)mbx); 7640 to_slim = phba->MBslimaddr; 7641 writel(ldata, to_slim); 7642 readl(to_slim); /* flush */ 7643 7644 if (mbx->mbxCommand == MBX_CONFIG_PORT) 7645 /* switch over to host mailbox */ 7646 psli->sli_flag |= LPFC_SLI_ACTIVE; 7647 } 7648 7649 wmb(); 7650 7651 switch (flag) { 7652 case MBX_NOWAIT: 7653 /* Set up reference to mailbox command */ 7654 psli->mbox_active = pmbox; 7655 /* Interrupt board to do it */ 7656 writel(CA_MBATT, phba->CAregaddr); 7657 readl(phba->CAregaddr); /* flush */ 7658 /* Don't wait for it to finish, just return */ 7659 break; 7660 7661 case MBX_POLL: 7662 /* Set up null reference to mailbox command */ 7663 psli->mbox_active = NULL; 7664 /* Interrupt board to do it */ 7665 writel(CA_MBATT, phba->CAregaddr); 7666 readl(phba->CAregaddr); /* flush */ 7667 7668 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7669 /* First read mbox status word */ 7670 word0 = *((uint32_t *)phba->mbox); 7671 word0 = le32_to_cpu(word0); 7672 } else { 7673 /* First read mbox status word */ 7674 if (lpfc_readl(phba->MBslimaddr, &word0)) { 7675 spin_unlock_irqrestore(&phba->hbalock, 7676 drvr_flag); 7677 goto out_not_finished; 7678 } 7679 } 7680 7681 /* Read the HBA Host Attention Register */ 7682 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 7683 spin_unlock_irqrestore(&phba->hbalock, 7684 drvr_flag); 7685 goto out_not_finished; 7686 } 7687 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 7688 1000) + jiffies; 7689 i = 0; 7690 /* Wait for command to complete */ 7691 while (((word0 & OWN_CHIP) == OWN_CHIP) || 7692 (!(ha_copy & HA_MBATT) && 7693 (phba->link_state > LPFC_WARM_START))) { 7694 if (time_after(jiffies, timeout)) { 7695 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7696 spin_unlock_irqrestore(&phba->hbalock, 7697 drvr_flag); 7698 goto out_not_finished; 7699 } 7700 7701 /* Check if we took a mbox interrupt while we were 7702 polling */ 7703 if (((word0 & OWN_CHIP) != OWN_CHIP) 7704 && (evtctr != psli->slistat.mbox_event)) 7705 break; 7706 7707 if (i++ > 10) { 7708 spin_unlock_irqrestore(&phba->hbalock, 7709 drvr_flag); 7710 msleep(1); 7711 spin_lock_irqsave(&phba->hbalock, drvr_flag); 7712 } 7713 7714 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7715 /* First copy command data */ 7716 word0 = *((uint32_t *)phba->mbox); 7717 word0 = le32_to_cpu(word0); 7718 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 7719 MAILBOX_t *slimmb; 7720 uint32_t slimword0; 7721 /* Check real SLIM for any errors */ 7722 slimword0 = readl(phba->MBslimaddr); 7723 slimmb = (MAILBOX_t *) & slimword0; 7724 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 7725 && slimmb->mbxStatus) { 7726 psli->sli_flag &= 7727 ~LPFC_SLI_ACTIVE; 7728 word0 = slimword0; 7729 } 7730 } 7731 } else { 7732 /* First copy command data */ 7733 word0 = readl(phba->MBslimaddr); 7734 } 7735 /* Read the HBA Host Attention Register */ 7736 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 7737 spin_unlock_irqrestore(&phba->hbalock, 7738 drvr_flag); 7739 goto out_not_finished; 7740 } 7741 } 7742 7743 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7744 /* copy results back to user */ 7745 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, 7746 MAILBOX_CMD_SIZE); 7747 /* Copy the mailbox extension data */ 7748 if (pmbox->out_ext_byte_len && pmbox->context2) { 7749 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 7750 pmbox->context2, 7751 pmbox->out_ext_byte_len); 7752 } 7753 } else { 7754 /* First copy command data */ 7755 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 7756 MAILBOX_CMD_SIZE); 7757 /* Copy the mailbox extension data */ 7758 if (pmbox->out_ext_byte_len && pmbox->context2) { 7759 lpfc_memcpy_from_slim(pmbox->context2, 7760 phba->MBslimaddr + 7761 MAILBOX_HBA_EXT_OFFSET, 7762 pmbox->out_ext_byte_len); 7763 } 7764 } 7765 7766 writel(HA_MBATT, phba->HAregaddr); 7767 readl(phba->HAregaddr); /* flush */ 7768 7769 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7770 status = mbx->mbxStatus; 7771 } 7772 7773 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7774 return status; 7775 7776 out_not_finished: 7777 if (processing_queue) { 7778 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 7779 lpfc_mbox_cmpl_put(phba, pmbox); 7780 } 7781 return MBX_NOT_FINISHED; 7782 } 7783 7784 /** 7785 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 7786 * @phba: Pointer to HBA context object. 7787 * 7788 * The function blocks the posting of SLI4 asynchronous mailbox commands from 7789 * the driver internal pending mailbox queue. It will then try to wait out the 7790 * possible outstanding mailbox command before return. 7791 * 7792 * Returns: 7793 * 0 - the outstanding mailbox command completed; otherwise, the wait for 7794 * the outstanding mailbox command timed out. 7795 **/ 7796 static int 7797 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 7798 { 7799 struct lpfc_sli *psli = &phba->sli; 7800 int rc = 0; 7801 unsigned long timeout = 0; 7802 7803 /* Mark the asynchronous mailbox command posting as blocked */ 7804 spin_lock_irq(&phba->hbalock); 7805 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7806 /* Determine how long we might wait for the active mailbox 7807 * command to be gracefully completed by firmware. 7808 */ 7809 if (phba->sli.mbox_active) 7810 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 7811 phba->sli.mbox_active) * 7812 1000) + jiffies; 7813 spin_unlock_irq(&phba->hbalock); 7814 7815 /* Make sure the mailbox is really active */ 7816 if (timeout) 7817 lpfc_sli4_process_missed_mbox_completions(phba); 7818 7819 /* Wait for the outstnading mailbox command to complete */ 7820 while (phba->sli.mbox_active) { 7821 /* Check active mailbox complete status every 2ms */ 7822 msleep(2); 7823 if (time_after(jiffies, timeout)) { 7824 /* Timeout, marked the outstanding cmd not complete */ 7825 rc = 1; 7826 break; 7827 } 7828 } 7829 7830 /* Can not cleanly block async mailbox command, fails it */ 7831 if (rc) { 7832 spin_lock_irq(&phba->hbalock); 7833 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7834 spin_unlock_irq(&phba->hbalock); 7835 } 7836 return rc; 7837 } 7838 7839 /** 7840 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 7841 * @phba: Pointer to HBA context object. 7842 * 7843 * The function unblocks and resume posting of SLI4 asynchronous mailbox 7844 * commands from the driver internal pending mailbox queue. It makes sure 7845 * that there is no outstanding mailbox command before resuming posting 7846 * asynchronous mailbox commands. If, for any reason, there is outstanding 7847 * mailbox command, it will try to wait it out before resuming asynchronous 7848 * mailbox command posting. 7849 **/ 7850 static void 7851 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 7852 { 7853 struct lpfc_sli *psli = &phba->sli; 7854 7855 spin_lock_irq(&phba->hbalock); 7856 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7857 /* Asynchronous mailbox posting is not blocked, do nothing */ 7858 spin_unlock_irq(&phba->hbalock); 7859 return; 7860 } 7861 7862 /* Outstanding synchronous mailbox command is guaranteed to be done, 7863 * successful or timeout, after timing-out the outstanding mailbox 7864 * command shall always be removed, so just unblock posting async 7865 * mailbox command and resume 7866 */ 7867 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7868 spin_unlock_irq(&phba->hbalock); 7869 7870 /* wake up worker thread to post asynchronlous mailbox command */ 7871 lpfc_worker_wake_up(phba); 7872 } 7873 7874 /** 7875 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 7876 * @phba: Pointer to HBA context object. 7877 * @mboxq: Pointer to mailbox object. 7878 * 7879 * The function waits for the bootstrap mailbox register ready bit from 7880 * port for twice the regular mailbox command timeout value. 7881 * 7882 * 0 - no timeout on waiting for bootstrap mailbox register ready. 7883 * MBXERR_ERROR - wait for bootstrap mailbox register timed out. 7884 **/ 7885 static int 7886 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7887 { 7888 uint32_t db_ready; 7889 unsigned long timeout; 7890 struct lpfc_register bmbx_reg; 7891 7892 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 7893 * 1000) + jiffies; 7894 7895 do { 7896 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 7897 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 7898 if (!db_ready) 7899 msleep(2); 7900 7901 if (time_after(jiffies, timeout)) 7902 return MBXERR_ERROR; 7903 } while (!db_ready); 7904 7905 return 0; 7906 } 7907 7908 /** 7909 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 7910 * @phba: Pointer to HBA context object. 7911 * @mboxq: Pointer to mailbox object. 7912 * 7913 * The function posts a mailbox to the port. The mailbox is expected 7914 * to be comletely filled in and ready for the port to operate on it. 7915 * This routine executes a synchronous completion operation on the 7916 * mailbox by polling for its completion. 7917 * 7918 * The caller must not be holding any locks when calling this routine. 7919 * 7920 * Returns: 7921 * MBX_SUCCESS - mailbox posted successfully 7922 * Any of the MBX error values. 7923 **/ 7924 static int 7925 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7926 { 7927 int rc = MBX_SUCCESS; 7928 unsigned long iflag; 7929 uint32_t mcqe_status; 7930 uint32_t mbx_cmnd; 7931 struct lpfc_sli *psli = &phba->sli; 7932 struct lpfc_mqe *mb = &mboxq->u.mqe; 7933 struct lpfc_bmbx_create *mbox_rgn; 7934 struct dma_address *dma_address; 7935 7936 /* 7937 * Only one mailbox can be active to the bootstrap mailbox region 7938 * at a time and there is no queueing provided. 7939 */ 7940 spin_lock_irqsave(&phba->hbalock, iflag); 7941 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7942 spin_unlock_irqrestore(&phba->hbalock, iflag); 7943 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7944 "(%d):2532 Mailbox command x%x (x%x/x%x) " 7945 "cannot issue Data: x%x x%x\n", 7946 mboxq->vport ? mboxq->vport->vpi : 0, 7947 mboxq->u.mb.mbxCommand, 7948 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7949 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7950 psli->sli_flag, MBX_POLL); 7951 return MBXERR_ERROR; 7952 } 7953 /* The server grabs the token and owns it until release */ 7954 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7955 phba->sli.mbox_active = mboxq; 7956 spin_unlock_irqrestore(&phba->hbalock, iflag); 7957 7958 /* wait for bootstrap mbox register for readyness */ 7959 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7960 if (rc) 7961 goto exit; 7962 7963 /* 7964 * Initialize the bootstrap memory region to avoid stale data areas 7965 * in the mailbox post. Then copy the caller's mailbox contents to 7966 * the bmbx mailbox region. 7967 */ 7968 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 7969 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 7970 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 7971 sizeof(struct lpfc_mqe)); 7972 7973 /* Post the high mailbox dma address to the port and wait for ready. */ 7974 dma_address = &phba->sli4_hba.bmbx.dma_address; 7975 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 7976 7977 /* wait for bootstrap mbox register for hi-address write done */ 7978 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7979 if (rc) 7980 goto exit; 7981 7982 /* Post the low mailbox dma address to the port. */ 7983 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 7984 7985 /* wait for bootstrap mbox register for low address write done */ 7986 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7987 if (rc) 7988 goto exit; 7989 7990 /* 7991 * Read the CQ to ensure the mailbox has completed. 7992 * If so, update the mailbox status so that the upper layers 7993 * can complete the request normally. 7994 */ 7995 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 7996 sizeof(struct lpfc_mqe)); 7997 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 7998 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 7999 sizeof(struct lpfc_mcqe)); 8000 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 8001 /* 8002 * When the CQE status indicates a failure and the mailbox status 8003 * indicates success then copy the CQE status into the mailbox status 8004 * (and prefix it with x4000). 8005 */ 8006 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 8007 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 8008 bf_set(lpfc_mqe_status, mb, 8009 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 8010 rc = MBXERR_ERROR; 8011 } else 8012 lpfc_sli4_swap_str(phba, mboxq); 8013 8014 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8015 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 8016 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 8017 " x%x x%x CQ: x%x x%x x%x x%x\n", 8018 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8019 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8020 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8021 bf_get(lpfc_mqe_status, mb), 8022 mb->un.mb_words[0], mb->un.mb_words[1], 8023 mb->un.mb_words[2], mb->un.mb_words[3], 8024 mb->un.mb_words[4], mb->un.mb_words[5], 8025 mb->un.mb_words[6], mb->un.mb_words[7], 8026 mb->un.mb_words[8], mb->un.mb_words[9], 8027 mb->un.mb_words[10], mb->un.mb_words[11], 8028 mb->un.mb_words[12], mboxq->mcqe.word0, 8029 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 8030 mboxq->mcqe.trailer); 8031 exit: 8032 /* We are holding the token, no needed for lock when release */ 8033 spin_lock_irqsave(&phba->hbalock, iflag); 8034 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8035 phba->sli.mbox_active = NULL; 8036 spin_unlock_irqrestore(&phba->hbalock, iflag); 8037 return rc; 8038 } 8039 8040 /** 8041 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 8042 * @phba: Pointer to HBA context object. 8043 * @pmbox: Pointer to mailbox object. 8044 * @flag: Flag indicating how the mailbox need to be processed. 8045 * 8046 * This function is called by discovery code and HBA management code to submit 8047 * a mailbox command to firmware with SLI-4 interface spec. 8048 * 8049 * Return codes the caller owns the mailbox command after the return of the 8050 * function. 8051 **/ 8052 static int 8053 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 8054 uint32_t flag) 8055 { 8056 struct lpfc_sli *psli = &phba->sli; 8057 unsigned long iflags; 8058 int rc; 8059 8060 /* dump from issue mailbox command if setup */ 8061 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 8062 8063 rc = lpfc_mbox_dev_check(phba); 8064 if (unlikely(rc)) { 8065 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8066 "(%d):2544 Mailbox command x%x (x%x/x%x) " 8067 "cannot issue Data: x%x x%x\n", 8068 mboxq->vport ? mboxq->vport->vpi : 0, 8069 mboxq->u.mb.mbxCommand, 8070 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8071 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8072 psli->sli_flag, flag); 8073 goto out_not_finished; 8074 } 8075 8076 /* Detect polling mode and jump to a handler */ 8077 if (!phba->sli4_hba.intr_enable) { 8078 if (flag == MBX_POLL) 8079 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8080 else 8081 rc = -EIO; 8082 if (rc != MBX_SUCCESS) 8083 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8084 "(%d):2541 Mailbox command x%x " 8085 "(x%x/x%x) failure: " 8086 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8087 "Data: x%x x%x\n,", 8088 mboxq->vport ? mboxq->vport->vpi : 0, 8089 mboxq->u.mb.mbxCommand, 8090 lpfc_sli_config_mbox_subsys_get(phba, 8091 mboxq), 8092 lpfc_sli_config_mbox_opcode_get(phba, 8093 mboxq), 8094 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8095 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8096 bf_get(lpfc_mcqe_ext_status, 8097 &mboxq->mcqe), 8098 psli->sli_flag, flag); 8099 return rc; 8100 } else if (flag == MBX_POLL) { 8101 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 8102 "(%d):2542 Try to issue mailbox command " 8103 "x%x (x%x/x%x) synchronously ahead of async" 8104 "mailbox command queue: x%x x%x\n", 8105 mboxq->vport ? mboxq->vport->vpi : 0, 8106 mboxq->u.mb.mbxCommand, 8107 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8108 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8109 psli->sli_flag, flag); 8110 /* Try to block the asynchronous mailbox posting */ 8111 rc = lpfc_sli4_async_mbox_block(phba); 8112 if (!rc) { 8113 /* Successfully blocked, now issue sync mbox cmd */ 8114 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 8115 if (rc != MBX_SUCCESS) 8116 lpfc_printf_log(phba, KERN_WARNING, 8117 LOG_MBOX | LOG_SLI, 8118 "(%d):2597 Sync Mailbox command " 8119 "x%x (x%x/x%x) failure: " 8120 "mqe_sta: x%x mcqe_sta: x%x/x%x " 8121 "Data: x%x x%x\n,", 8122 mboxq->vport ? mboxq->vport->vpi : 0, 8123 mboxq->u.mb.mbxCommand, 8124 lpfc_sli_config_mbox_subsys_get(phba, 8125 mboxq), 8126 lpfc_sli_config_mbox_opcode_get(phba, 8127 mboxq), 8128 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 8129 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 8130 bf_get(lpfc_mcqe_ext_status, 8131 &mboxq->mcqe), 8132 psli->sli_flag, flag); 8133 /* Unblock the async mailbox posting afterward */ 8134 lpfc_sli4_async_mbox_unblock(phba); 8135 } 8136 return rc; 8137 } 8138 8139 /* Now, interrupt mode asynchrous mailbox command */ 8140 rc = lpfc_mbox_cmd_check(phba, mboxq); 8141 if (rc) { 8142 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8143 "(%d):2543 Mailbox command x%x (x%x/x%x) " 8144 "cannot issue Data: x%x x%x\n", 8145 mboxq->vport ? mboxq->vport->vpi : 0, 8146 mboxq->u.mb.mbxCommand, 8147 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8148 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8149 psli->sli_flag, flag); 8150 goto out_not_finished; 8151 } 8152 8153 /* Put the mailbox command to the driver internal FIFO */ 8154 psli->slistat.mbox_busy++; 8155 spin_lock_irqsave(&phba->hbalock, iflags); 8156 lpfc_mbox_put(phba, mboxq); 8157 spin_unlock_irqrestore(&phba->hbalock, iflags); 8158 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8159 "(%d):0354 Mbox cmd issue - Enqueue Data: " 8160 "x%x (x%x/x%x) x%x x%x x%x\n", 8161 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 8162 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 8163 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8164 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8165 phba->pport->port_state, 8166 psli->sli_flag, MBX_NOWAIT); 8167 /* Wake up worker thread to transport mailbox command from head */ 8168 lpfc_worker_wake_up(phba); 8169 8170 return MBX_BUSY; 8171 8172 out_not_finished: 8173 return MBX_NOT_FINISHED; 8174 } 8175 8176 /** 8177 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 8178 * @phba: Pointer to HBA context object. 8179 * 8180 * This function is called by worker thread to send a mailbox command to 8181 * SLI4 HBA firmware. 8182 * 8183 **/ 8184 int 8185 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 8186 { 8187 struct lpfc_sli *psli = &phba->sli; 8188 LPFC_MBOXQ_t *mboxq; 8189 int rc = MBX_SUCCESS; 8190 unsigned long iflags; 8191 struct lpfc_mqe *mqe; 8192 uint32_t mbx_cmnd; 8193 8194 /* Check interrupt mode before post async mailbox command */ 8195 if (unlikely(!phba->sli4_hba.intr_enable)) 8196 return MBX_NOT_FINISHED; 8197 8198 /* Check for mailbox command service token */ 8199 spin_lock_irqsave(&phba->hbalock, iflags); 8200 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 8201 spin_unlock_irqrestore(&phba->hbalock, iflags); 8202 return MBX_NOT_FINISHED; 8203 } 8204 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 8205 spin_unlock_irqrestore(&phba->hbalock, iflags); 8206 return MBX_NOT_FINISHED; 8207 } 8208 if (unlikely(phba->sli.mbox_active)) { 8209 spin_unlock_irqrestore(&phba->hbalock, iflags); 8210 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8211 "0384 There is pending active mailbox cmd\n"); 8212 return MBX_NOT_FINISHED; 8213 } 8214 /* Take the mailbox command service token */ 8215 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 8216 8217 /* Get the next mailbox command from head of queue */ 8218 mboxq = lpfc_mbox_get(phba); 8219 8220 /* If no more mailbox command waiting for post, we're done */ 8221 if (!mboxq) { 8222 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8223 spin_unlock_irqrestore(&phba->hbalock, iflags); 8224 return MBX_SUCCESS; 8225 } 8226 phba->sli.mbox_active = mboxq; 8227 spin_unlock_irqrestore(&phba->hbalock, iflags); 8228 8229 /* Check device readiness for posting mailbox command */ 8230 rc = lpfc_mbox_dev_check(phba); 8231 if (unlikely(rc)) 8232 /* Driver clean routine will clean up pending mailbox */ 8233 goto out_not_finished; 8234 8235 /* Prepare the mbox command to be posted */ 8236 mqe = &mboxq->u.mqe; 8237 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 8238 8239 /* Start timer for the mbox_tmo and log some mailbox post messages */ 8240 mod_timer(&psli->mbox_tmo, (jiffies + 8241 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 8242 8243 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 8244 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 8245 "x%x x%x\n", 8246 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 8247 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8248 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8249 phba->pport->port_state, psli->sli_flag); 8250 8251 if (mbx_cmnd != MBX_HEARTBEAT) { 8252 if (mboxq->vport) { 8253 lpfc_debugfs_disc_trc(mboxq->vport, 8254 LPFC_DISC_TRC_MBOX_VPORT, 8255 "MBOX Send vport: cmd:x%x mb:x%x x%x", 8256 mbx_cmnd, mqe->un.mb_words[0], 8257 mqe->un.mb_words[1]); 8258 } else { 8259 lpfc_debugfs_disc_trc(phba->pport, 8260 LPFC_DISC_TRC_MBOX, 8261 "MBOX Send: cmd:x%x mb:x%x x%x", 8262 mbx_cmnd, mqe->un.mb_words[0], 8263 mqe->un.mb_words[1]); 8264 } 8265 } 8266 psli->slistat.mbox_cmd++; 8267 8268 /* Post the mailbox command to the port */ 8269 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 8270 if (rc != MBX_SUCCESS) { 8271 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 8272 "(%d):2533 Mailbox command x%x (x%x/x%x) " 8273 "cannot issue Data: x%x x%x\n", 8274 mboxq->vport ? mboxq->vport->vpi : 0, 8275 mboxq->u.mb.mbxCommand, 8276 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 8277 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 8278 psli->sli_flag, MBX_NOWAIT); 8279 goto out_not_finished; 8280 } 8281 8282 return rc; 8283 8284 out_not_finished: 8285 spin_lock_irqsave(&phba->hbalock, iflags); 8286 if (phba->sli.mbox_active) { 8287 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 8288 __lpfc_mbox_cmpl_put(phba, mboxq); 8289 /* Release the token */ 8290 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 8291 phba->sli.mbox_active = NULL; 8292 } 8293 spin_unlock_irqrestore(&phba->hbalock, iflags); 8294 8295 return MBX_NOT_FINISHED; 8296 } 8297 8298 /** 8299 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 8300 * @phba: Pointer to HBA context object. 8301 * @pmbox: Pointer to mailbox object. 8302 * @flag: Flag indicating how the mailbox need to be processed. 8303 * 8304 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 8305 * the API jump table function pointer from the lpfc_hba struct. 8306 * 8307 * Return codes the caller owns the mailbox command after the return of the 8308 * function. 8309 **/ 8310 int 8311 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 8312 { 8313 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 8314 } 8315 8316 /** 8317 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 8318 * @phba: The hba struct for which this call is being executed. 8319 * @dev_grp: The HBA PCI-Device group number. 8320 * 8321 * This routine sets up the mbox interface API function jump table in @phba 8322 * struct. 8323 * Returns: 0 - success, -ENODEV - failure. 8324 **/ 8325 int 8326 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8327 { 8328 8329 switch (dev_grp) { 8330 case LPFC_PCI_DEV_LP: 8331 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 8332 phba->lpfc_sli_handle_slow_ring_event = 8333 lpfc_sli_handle_slow_ring_event_s3; 8334 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 8335 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 8336 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 8337 break; 8338 case LPFC_PCI_DEV_OC: 8339 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 8340 phba->lpfc_sli_handle_slow_ring_event = 8341 lpfc_sli_handle_slow_ring_event_s4; 8342 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 8343 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 8344 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 8345 break; 8346 default: 8347 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8348 "1420 Invalid HBA PCI-device group: 0x%x\n", 8349 dev_grp); 8350 return -ENODEV; 8351 break; 8352 } 8353 return 0; 8354 } 8355 8356 /** 8357 * __lpfc_sli_ringtx_put - Add an iocb to the txq 8358 * @phba: Pointer to HBA context object. 8359 * @pring: Pointer to driver SLI ring object. 8360 * @piocb: Pointer to address of newly added command iocb. 8361 * 8362 * This function is called with hbalock held to add a command 8363 * iocb to the txq when SLI layer cannot submit the command iocb 8364 * to the ring. 8365 **/ 8366 void 8367 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8368 struct lpfc_iocbq *piocb) 8369 { 8370 lockdep_assert_held(&phba->hbalock); 8371 /* Insert the caller's iocb in the txq tail for later processing. */ 8372 list_add_tail(&piocb->list, &pring->txq); 8373 } 8374 8375 /** 8376 * lpfc_sli_next_iocb - Get the next iocb in the txq 8377 * @phba: Pointer to HBA context object. 8378 * @pring: Pointer to driver SLI ring object. 8379 * @piocb: Pointer to address of newly added command iocb. 8380 * 8381 * This function is called with hbalock held before a new 8382 * iocb is submitted to the firmware. This function checks 8383 * txq to flush the iocbs in txq to Firmware before 8384 * submitting new iocbs to the Firmware. 8385 * If there are iocbs in the txq which need to be submitted 8386 * to firmware, lpfc_sli_next_iocb returns the first element 8387 * of the txq after dequeuing it from txq. 8388 * If there is no iocb in the txq then the function will return 8389 * *piocb and *piocb is set to NULL. Caller needs to check 8390 * *piocb to find if there are more commands in the txq. 8391 **/ 8392 static struct lpfc_iocbq * 8393 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 8394 struct lpfc_iocbq **piocb) 8395 { 8396 struct lpfc_iocbq * nextiocb; 8397 8398 lockdep_assert_held(&phba->hbalock); 8399 8400 nextiocb = lpfc_sli_ringtx_get(phba, pring); 8401 if (!nextiocb) { 8402 nextiocb = *piocb; 8403 *piocb = NULL; 8404 } 8405 8406 return nextiocb; 8407 } 8408 8409 /** 8410 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 8411 * @phba: Pointer to HBA context object. 8412 * @ring_number: SLI ring number to issue iocb on. 8413 * @piocb: Pointer to command iocb. 8414 * @flag: Flag indicating if this command can be put into txq. 8415 * 8416 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 8417 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 8418 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 8419 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 8420 * this function allows only iocbs for posting buffers. This function finds 8421 * next available slot in the command ring and posts the command to the 8422 * available slot and writes the port attention register to request HBA start 8423 * processing new iocb. If there is no slot available in the ring and 8424 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 8425 * the function returns IOCB_BUSY. 8426 * 8427 * This function is called with hbalock held. The function will return success 8428 * after it successfully submit the iocb to firmware or after adding to the 8429 * txq. 8430 **/ 8431 static int 8432 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 8433 struct lpfc_iocbq *piocb, uint32_t flag) 8434 { 8435 struct lpfc_iocbq *nextiocb; 8436 IOCB_t *iocb; 8437 struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number]; 8438 8439 lockdep_assert_held(&phba->hbalock); 8440 8441 if (piocb->iocb_cmpl && (!piocb->vport) && 8442 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 8443 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 8444 lpfc_printf_log(phba, KERN_ERR, 8445 LOG_SLI | LOG_VPORT, 8446 "1807 IOCB x%x failed. No vport\n", 8447 piocb->iocb.ulpCommand); 8448 dump_stack(); 8449 return IOCB_ERROR; 8450 } 8451 8452 8453 /* If the PCI channel is in offline state, do not post iocbs. */ 8454 if (unlikely(pci_channel_offline(phba->pcidev))) 8455 return IOCB_ERROR; 8456 8457 /* If HBA has a deferred error attention, fail the iocb. */ 8458 if (unlikely(phba->hba_flag & DEFER_ERATT)) 8459 return IOCB_ERROR; 8460 8461 /* 8462 * We should never get an IOCB if we are in a < LINK_DOWN state 8463 */ 8464 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 8465 return IOCB_ERROR; 8466 8467 /* 8468 * Check to see if we are blocking IOCB processing because of a 8469 * outstanding event. 8470 */ 8471 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 8472 goto iocb_busy; 8473 8474 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 8475 /* 8476 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 8477 * can be issued if the link is not up. 8478 */ 8479 switch (piocb->iocb.ulpCommand) { 8480 case CMD_GEN_REQUEST64_CR: 8481 case CMD_GEN_REQUEST64_CX: 8482 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 8483 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 8484 FC_RCTL_DD_UNSOL_CMD) || 8485 (piocb->iocb.un.genreq64.w5.hcsw.Type != 8486 MENLO_TRANSPORT_TYPE)) 8487 8488 goto iocb_busy; 8489 break; 8490 case CMD_QUE_RING_BUF_CN: 8491 case CMD_QUE_RING_BUF64_CN: 8492 /* 8493 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 8494 * completion, iocb_cmpl MUST be 0. 8495 */ 8496 if (piocb->iocb_cmpl) 8497 piocb->iocb_cmpl = NULL; 8498 /*FALLTHROUGH*/ 8499 case CMD_CREATE_XRI_CR: 8500 case CMD_CLOSE_XRI_CN: 8501 case CMD_CLOSE_XRI_CX: 8502 break; 8503 default: 8504 goto iocb_busy; 8505 } 8506 8507 /* 8508 * For FCP commands, we must be in a state where we can process link 8509 * attention events. 8510 */ 8511 } else if (unlikely(pring->ringno == LPFC_FCP_RING && 8512 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 8513 goto iocb_busy; 8514 } 8515 8516 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 8517 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 8518 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 8519 8520 if (iocb) 8521 lpfc_sli_update_ring(phba, pring); 8522 else 8523 lpfc_sli_update_full_ring(phba, pring); 8524 8525 if (!piocb) 8526 return IOCB_SUCCESS; 8527 8528 goto out_busy; 8529 8530 iocb_busy: 8531 pring->stats.iocb_cmd_delay++; 8532 8533 out_busy: 8534 8535 if (!(flag & SLI_IOCB_RET_IOCB)) { 8536 __lpfc_sli_ringtx_put(phba, pring, piocb); 8537 return IOCB_SUCCESS; 8538 } 8539 8540 return IOCB_BUSY; 8541 } 8542 8543 /** 8544 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 8545 * @phba: Pointer to HBA context object. 8546 * @piocb: Pointer to command iocb. 8547 * @sglq: Pointer to the scatter gather queue object. 8548 * 8549 * This routine converts the bpl or bde that is in the IOCB 8550 * to a sgl list for the sli4 hardware. The physical address 8551 * of the bpl/bde is converted back to a virtual address. 8552 * If the IOCB contains a BPL then the list of BDE's is 8553 * converted to sli4_sge's. If the IOCB contains a single 8554 * BDE then it is converted to a single sli_sge. 8555 * The IOCB is still in cpu endianess so the contents of 8556 * the bpl can be used without byte swapping. 8557 * 8558 * Returns valid XRI = Success, NO_XRI = Failure. 8559 **/ 8560 static uint16_t 8561 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 8562 struct lpfc_sglq *sglq) 8563 { 8564 uint16_t xritag = NO_XRI; 8565 struct ulp_bde64 *bpl = NULL; 8566 struct ulp_bde64 bde; 8567 struct sli4_sge *sgl = NULL; 8568 struct lpfc_dmabuf *dmabuf; 8569 IOCB_t *icmd; 8570 int numBdes = 0; 8571 int i = 0; 8572 uint32_t offset = 0; /* accumulated offset in the sg request list */ 8573 int inbound = 0; /* number of sg reply entries inbound from firmware */ 8574 8575 if (!piocbq || !sglq) 8576 return xritag; 8577 8578 sgl = (struct sli4_sge *)sglq->sgl; 8579 icmd = &piocbq->iocb; 8580 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 8581 return sglq->sli4_xritag; 8582 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8583 numBdes = icmd->un.genreq64.bdl.bdeSize / 8584 sizeof(struct ulp_bde64); 8585 /* The addrHigh and addrLow fields within the IOCB 8586 * have not been byteswapped yet so there is no 8587 * need to swap them back. 8588 */ 8589 if (piocbq->context3) 8590 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 8591 else 8592 return xritag; 8593 8594 bpl = (struct ulp_bde64 *)dmabuf->virt; 8595 if (!bpl) 8596 return xritag; 8597 8598 for (i = 0; i < numBdes; i++) { 8599 /* Should already be byte swapped. */ 8600 sgl->addr_hi = bpl->addrHigh; 8601 sgl->addr_lo = bpl->addrLow; 8602 8603 sgl->word2 = le32_to_cpu(sgl->word2); 8604 if ((i+1) == numBdes) 8605 bf_set(lpfc_sli4_sge_last, sgl, 1); 8606 else 8607 bf_set(lpfc_sli4_sge_last, sgl, 0); 8608 /* swap the size field back to the cpu so we 8609 * can assign it to the sgl. 8610 */ 8611 bde.tus.w = le32_to_cpu(bpl->tus.w); 8612 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 8613 /* The offsets in the sgl need to be accumulated 8614 * separately for the request and reply lists. 8615 * The request is always first, the reply follows. 8616 */ 8617 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 8618 /* add up the reply sg entries */ 8619 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 8620 inbound++; 8621 /* first inbound? reset the offset */ 8622 if (inbound == 1) 8623 offset = 0; 8624 bf_set(lpfc_sli4_sge_offset, sgl, offset); 8625 bf_set(lpfc_sli4_sge_type, sgl, 8626 LPFC_SGE_TYPE_DATA); 8627 offset += bde.tus.f.bdeSize; 8628 } 8629 sgl->word2 = cpu_to_le32(sgl->word2); 8630 bpl++; 8631 sgl++; 8632 } 8633 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 8634 /* The addrHigh and addrLow fields of the BDE have not 8635 * been byteswapped yet so they need to be swapped 8636 * before putting them in the sgl. 8637 */ 8638 sgl->addr_hi = 8639 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 8640 sgl->addr_lo = 8641 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 8642 sgl->word2 = le32_to_cpu(sgl->word2); 8643 bf_set(lpfc_sli4_sge_last, sgl, 1); 8644 sgl->word2 = cpu_to_le32(sgl->word2); 8645 sgl->sge_len = 8646 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 8647 } 8648 return sglq->sli4_xritag; 8649 } 8650 8651 /** 8652 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 8653 * @phba: Pointer to HBA context object. 8654 * @piocb: Pointer to command iocb. 8655 * @wqe: Pointer to the work queue entry. 8656 * 8657 * This routine converts the iocb command to its Work Queue Entry 8658 * equivalent. The wqe pointer should not have any fields set when 8659 * this routine is called because it will memcpy over them. 8660 * This routine does not set the CQ_ID or the WQEC bits in the 8661 * wqe. 8662 * 8663 * Returns: 0 = Success, IOCB_ERROR = Failure. 8664 **/ 8665 static int 8666 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 8667 union lpfc_wqe *wqe) 8668 { 8669 uint32_t xmit_len = 0, total_len = 0; 8670 uint8_t ct = 0; 8671 uint32_t fip; 8672 uint32_t abort_tag; 8673 uint8_t command_type = ELS_COMMAND_NON_FIP; 8674 uint8_t cmnd; 8675 uint16_t xritag; 8676 uint16_t abrt_iotag; 8677 struct lpfc_iocbq *abrtiocbq; 8678 struct ulp_bde64 *bpl = NULL; 8679 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 8680 int numBdes, i; 8681 struct ulp_bde64 bde; 8682 struct lpfc_nodelist *ndlp; 8683 uint32_t *pcmd; 8684 uint32_t if_type; 8685 8686 fip = phba->hba_flag & HBA_FIP_SUPPORT; 8687 /* The fcp commands will set command type */ 8688 if (iocbq->iocb_flag & LPFC_IO_FCP) 8689 command_type = FCP_COMMAND; 8690 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 8691 command_type = ELS_COMMAND_FIP; 8692 else 8693 command_type = ELS_COMMAND_NON_FIP; 8694 8695 if (phba->fcp_embed_io) 8696 memset(wqe, 0, sizeof(union lpfc_wqe128)); 8697 /* Some of the fields are in the right position already */ 8698 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 8699 if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) { 8700 /* The ct field has moved so reset */ 8701 wqe->generic.wqe_com.word7 = 0; 8702 wqe->generic.wqe_com.word10 = 0; 8703 } 8704 8705 abort_tag = (uint32_t) iocbq->iotag; 8706 xritag = iocbq->sli4_xritag; 8707 /* words0-2 bpl convert bde */ 8708 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8709 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 8710 sizeof(struct ulp_bde64); 8711 bpl = (struct ulp_bde64 *) 8712 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 8713 if (!bpl) 8714 return IOCB_ERROR; 8715 8716 /* Should already be byte swapped. */ 8717 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 8718 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 8719 /* swap the size field back to the cpu so we 8720 * can assign it to the sgl. 8721 */ 8722 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 8723 xmit_len = wqe->generic.bde.tus.f.bdeSize; 8724 total_len = 0; 8725 for (i = 0; i < numBdes; i++) { 8726 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 8727 total_len += bde.tus.f.bdeSize; 8728 } 8729 } else 8730 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 8731 8732 iocbq->iocb.ulpIoTag = iocbq->iotag; 8733 cmnd = iocbq->iocb.ulpCommand; 8734 8735 switch (iocbq->iocb.ulpCommand) { 8736 case CMD_ELS_REQUEST64_CR: 8737 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 8738 ndlp = iocbq->context_un.ndlp; 8739 else 8740 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8741 if (!iocbq->iocb.ulpLe) { 8742 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8743 "2007 Only Limited Edition cmd Format" 8744 " supported 0x%x\n", 8745 iocbq->iocb.ulpCommand); 8746 return IOCB_ERROR; 8747 } 8748 8749 wqe->els_req.payload_len = xmit_len; 8750 /* Els_reguest64 has a TMO */ 8751 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 8752 iocbq->iocb.ulpTimeout); 8753 /* Need a VF for word 4 set the vf bit*/ 8754 bf_set(els_req64_vf, &wqe->els_req, 0); 8755 /* And a VFID for word 12 */ 8756 bf_set(els_req64_vfid, &wqe->els_req, 0); 8757 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 8758 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8759 iocbq->iocb.ulpContext); 8760 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 8761 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 8762 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 8763 if (command_type == ELS_COMMAND_FIP) 8764 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 8765 >> LPFC_FIP_ELS_ID_SHIFT); 8766 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8767 iocbq->context2)->virt); 8768 if_type = bf_get(lpfc_sli_intf_if_type, 8769 &phba->sli4_hba.sli_intf); 8770 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8771 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 8772 *pcmd == ELS_CMD_SCR || 8773 *pcmd == ELS_CMD_FDISC || 8774 *pcmd == ELS_CMD_LOGO || 8775 *pcmd == ELS_CMD_PLOGI)) { 8776 bf_set(els_req64_sp, &wqe->els_req, 1); 8777 bf_set(els_req64_sid, &wqe->els_req, 8778 iocbq->vport->fc_myDID); 8779 if ((*pcmd == ELS_CMD_FLOGI) && 8780 !(phba->fc_topology == 8781 LPFC_TOPOLOGY_LOOP)) 8782 bf_set(els_req64_sid, &wqe->els_req, 0); 8783 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 8784 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8785 phba->vpi_ids[iocbq->vport->vpi]); 8786 } else if (pcmd && iocbq->context1) { 8787 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 8788 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8789 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8790 } 8791 } 8792 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 8793 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8794 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 8795 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 8796 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 8797 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 8798 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 8799 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 8800 wqe->els_req.max_response_payload_len = total_len - xmit_len; 8801 break; 8802 case CMD_XMIT_SEQUENCE64_CX: 8803 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 8804 iocbq->iocb.un.ulpWord[3]); 8805 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 8806 iocbq->iocb.unsli3.rcvsli3.ox_id); 8807 /* The entire sequence is transmitted for this IOCB */ 8808 xmit_len = total_len; 8809 cmnd = CMD_XMIT_SEQUENCE64_CR; 8810 if (phba->link_flag & LS_LOOPBACK_MODE) 8811 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 8812 case CMD_XMIT_SEQUENCE64_CR: 8813 /* word3 iocb=io_tag32 wqe=reserved */ 8814 wqe->xmit_sequence.rsvd3 = 0; 8815 /* word4 relative_offset memcpy */ 8816 /* word5 r_ctl/df_ctl memcpy */ 8817 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 8818 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 8819 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 8820 LPFC_WQE_IOD_WRITE); 8821 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 8822 LPFC_WQE_LENLOC_WORD12); 8823 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 8824 wqe->xmit_sequence.xmit_len = xmit_len; 8825 command_type = OTHER_COMMAND; 8826 break; 8827 case CMD_XMIT_BCAST64_CN: 8828 /* word3 iocb=iotag32 wqe=seq_payload_len */ 8829 wqe->xmit_bcast64.seq_payload_len = xmit_len; 8830 /* word4 iocb=rsvd wqe=rsvd */ 8831 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 8832 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 8833 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 8834 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8835 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 8836 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 8837 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 8838 LPFC_WQE_LENLOC_WORD3); 8839 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 8840 break; 8841 case CMD_FCP_IWRITE64_CR: 8842 command_type = FCP_COMMAND_DATA_OUT; 8843 /* word3 iocb=iotag wqe=payload_offset_len */ 8844 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8845 bf_set(payload_offset_len, &wqe->fcp_iwrite, 8846 xmit_len + sizeof(struct fcp_rsp)); 8847 bf_set(cmd_buff_len, &wqe->fcp_iwrite, 8848 0); 8849 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 8850 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 8851 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 8852 iocbq->iocb.ulpFCP2Rcvy); 8853 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 8854 /* Always open the exchange */ 8855 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 8856 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 8857 LPFC_WQE_LENLOC_WORD4); 8858 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 8859 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 8860 if (iocbq->iocb_flag & LPFC_IO_OAS) { 8861 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); 8862 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 8863 if (iocbq->priority) { 8864 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 8865 (iocbq->priority << 1)); 8866 } else { 8867 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 8868 (phba->cfg_XLanePriority << 1)); 8869 } 8870 } 8871 /* Note, word 10 is already initialized to 0 */ 8872 8873 if (phba->fcp_embed_io) { 8874 struct lpfc_scsi_buf *lpfc_cmd; 8875 struct sli4_sge *sgl; 8876 union lpfc_wqe128 *wqe128; 8877 struct fcp_cmnd *fcp_cmnd; 8878 uint32_t *ptr; 8879 8880 /* 128 byte wqe support here */ 8881 wqe128 = (union lpfc_wqe128 *)wqe; 8882 8883 lpfc_cmd = iocbq->context1; 8884 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 8885 fcp_cmnd = lpfc_cmd->fcp_cmnd; 8886 8887 /* Word 0-2 - FCP_CMND */ 8888 wqe128->generic.bde.tus.f.bdeFlags = 8889 BUFF_TYPE_BDE_IMMED; 8890 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; 8891 wqe128->generic.bde.addrHigh = 0; 8892 wqe128->generic.bde.addrLow = 88; /* Word 22 */ 8893 8894 bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1); 8895 8896 /* Word 22-29 FCP CMND Payload */ 8897 ptr = &wqe128->words[22]; 8898 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 8899 } 8900 break; 8901 case CMD_FCP_IREAD64_CR: 8902 /* word3 iocb=iotag wqe=payload_offset_len */ 8903 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8904 bf_set(payload_offset_len, &wqe->fcp_iread, 8905 xmit_len + sizeof(struct fcp_rsp)); 8906 bf_set(cmd_buff_len, &wqe->fcp_iread, 8907 0); 8908 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 8909 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 8910 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 8911 iocbq->iocb.ulpFCP2Rcvy); 8912 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 8913 /* Always open the exchange */ 8914 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 8915 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 8916 LPFC_WQE_LENLOC_WORD4); 8917 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 8918 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 8919 if (iocbq->iocb_flag & LPFC_IO_OAS) { 8920 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); 8921 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); 8922 if (iocbq->priority) { 8923 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 8924 (iocbq->priority << 1)); 8925 } else { 8926 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 8927 (phba->cfg_XLanePriority << 1)); 8928 } 8929 } 8930 /* Note, word 10 is already initialized to 0 */ 8931 8932 if (phba->fcp_embed_io) { 8933 struct lpfc_scsi_buf *lpfc_cmd; 8934 struct sli4_sge *sgl; 8935 union lpfc_wqe128 *wqe128; 8936 struct fcp_cmnd *fcp_cmnd; 8937 uint32_t *ptr; 8938 8939 /* 128 byte wqe support here */ 8940 wqe128 = (union lpfc_wqe128 *)wqe; 8941 8942 lpfc_cmd = iocbq->context1; 8943 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 8944 fcp_cmnd = lpfc_cmd->fcp_cmnd; 8945 8946 /* Word 0-2 - FCP_CMND */ 8947 wqe128->generic.bde.tus.f.bdeFlags = 8948 BUFF_TYPE_BDE_IMMED; 8949 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; 8950 wqe128->generic.bde.addrHigh = 0; 8951 wqe128->generic.bde.addrLow = 88; /* Word 22 */ 8952 8953 bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1); 8954 8955 /* Word 22-29 FCP CMND Payload */ 8956 ptr = &wqe128->words[22]; 8957 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 8958 } 8959 break; 8960 case CMD_FCP_ICMND64_CR: 8961 /* word3 iocb=iotag wqe=payload_offset_len */ 8962 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8963 bf_set(payload_offset_len, &wqe->fcp_icmd, 8964 xmit_len + sizeof(struct fcp_rsp)); 8965 bf_set(cmd_buff_len, &wqe->fcp_icmd, 8966 0); 8967 /* word3 iocb=IO_TAG wqe=reserved */ 8968 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 8969 /* Always open the exchange */ 8970 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 8971 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 8972 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 8973 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 8974 LPFC_WQE_LENLOC_NONE); 8975 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 8976 iocbq->iocb.ulpFCP2Rcvy); 8977 if (iocbq->iocb_flag & LPFC_IO_OAS) { 8978 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); 8979 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); 8980 if (iocbq->priority) { 8981 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 8982 (iocbq->priority << 1)); 8983 } else { 8984 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 8985 (phba->cfg_XLanePriority << 1)); 8986 } 8987 } 8988 /* Note, word 10 is already initialized to 0 */ 8989 8990 if (phba->fcp_embed_io) { 8991 struct lpfc_scsi_buf *lpfc_cmd; 8992 struct sli4_sge *sgl; 8993 union lpfc_wqe128 *wqe128; 8994 struct fcp_cmnd *fcp_cmnd; 8995 uint32_t *ptr; 8996 8997 /* 128 byte wqe support here */ 8998 wqe128 = (union lpfc_wqe128 *)wqe; 8999 9000 lpfc_cmd = iocbq->context1; 9001 sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; 9002 fcp_cmnd = lpfc_cmd->fcp_cmnd; 9003 9004 /* Word 0-2 - FCP_CMND */ 9005 wqe128->generic.bde.tus.f.bdeFlags = 9006 BUFF_TYPE_BDE_IMMED; 9007 wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len; 9008 wqe128->generic.bde.addrHigh = 0; 9009 wqe128->generic.bde.addrLow = 88; /* Word 22 */ 9010 9011 bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1); 9012 9013 /* Word 22-29 FCP CMND Payload */ 9014 ptr = &wqe128->words[22]; 9015 memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd)); 9016 } 9017 break; 9018 case CMD_GEN_REQUEST64_CR: 9019 /* For this command calculate the xmit length of the 9020 * request bde. 9021 */ 9022 xmit_len = 0; 9023 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 9024 sizeof(struct ulp_bde64); 9025 for (i = 0; i < numBdes; i++) { 9026 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 9027 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 9028 break; 9029 xmit_len += bde.tus.f.bdeSize; 9030 } 9031 /* word3 iocb=IO_TAG wqe=request_payload_len */ 9032 wqe->gen_req.request_payload_len = xmit_len; 9033 /* word4 iocb=parameter wqe=relative_offset memcpy */ 9034 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 9035 /* word6 context tag copied in memcpy */ 9036 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 9037 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 9038 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9039 "2015 Invalid CT %x command 0x%x\n", 9040 ct, iocbq->iocb.ulpCommand); 9041 return IOCB_ERROR; 9042 } 9043 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 9044 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 9045 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 9046 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 9047 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 9048 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 9049 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 9050 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 9051 wqe->gen_req.max_response_payload_len = total_len - xmit_len; 9052 command_type = OTHER_COMMAND; 9053 break; 9054 case CMD_XMIT_ELS_RSP64_CX: 9055 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9056 /* words0-2 BDE memcpy */ 9057 /* word3 iocb=iotag32 wqe=response_payload_len */ 9058 wqe->xmit_els_rsp.response_payload_len = xmit_len; 9059 /* word4 */ 9060 wqe->xmit_els_rsp.word4 = 0; 9061 /* word5 iocb=rsvd wge=did */ 9062 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 9063 iocbq->iocb.un.xseq64.xmit_els_remoteID); 9064 9065 if_type = bf_get(lpfc_sli_intf_if_type, 9066 &phba->sli4_hba.sli_intf); 9067 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 9068 if (iocbq->vport->fc_flag & FC_PT2PT) { 9069 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9070 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9071 iocbq->vport->fc_myDID); 9072 if (iocbq->vport->fc_myDID == Fabric_DID) { 9073 bf_set(wqe_els_did, 9074 &wqe->xmit_els_rsp.wqe_dest, 0); 9075 } 9076 } 9077 } 9078 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 9079 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9080 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 9081 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 9082 iocbq->iocb.unsli3.rcvsli3.ox_id); 9083 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 9084 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9085 phba->vpi_ids[iocbq->vport->vpi]); 9086 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 9087 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 9088 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 9089 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 9090 LPFC_WQE_LENLOC_WORD3); 9091 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 9092 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 9093 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 9094 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 9095 iocbq->context2)->virt); 9096 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 9097 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 9098 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 9099 iocbq->vport->fc_myDID); 9100 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 9101 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 9102 phba->vpi_ids[phba->pport->vpi]); 9103 } 9104 command_type = OTHER_COMMAND; 9105 break; 9106 case CMD_CLOSE_XRI_CN: 9107 case CMD_ABORT_XRI_CN: 9108 case CMD_ABORT_XRI_CX: 9109 /* words 0-2 memcpy should be 0 rserved */ 9110 /* port will send abts */ 9111 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 9112 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 9113 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 9114 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 9115 } else 9116 fip = 0; 9117 9118 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 9119 /* 9120 * The link is down, or the command was ELS_FIP 9121 * so the fw does not need to send abts 9122 * on the wire. 9123 */ 9124 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 9125 else 9126 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 9127 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 9128 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 9129 wqe->abort_cmd.rsrvd5 = 0; 9130 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 9131 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 9132 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 9133 /* 9134 * The abort handler will send us CMD_ABORT_XRI_CN or 9135 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 9136 */ 9137 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 9138 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 9139 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 9140 LPFC_WQE_LENLOC_NONE); 9141 cmnd = CMD_ABORT_XRI_CX; 9142 command_type = OTHER_COMMAND; 9143 xritag = 0; 9144 break; 9145 case CMD_XMIT_BLS_RSP64_CX: 9146 ndlp = (struct lpfc_nodelist *)iocbq->context1; 9147 /* As BLS ABTS RSP WQE is very different from other WQEs, 9148 * we re-construct this WQE here based on information in 9149 * iocbq from scratch. 9150 */ 9151 memset(wqe, 0, sizeof(union lpfc_wqe)); 9152 /* OX_ID is invariable to who sent ABTS to CT exchange */ 9153 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 9154 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 9155 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 9156 LPFC_ABTS_UNSOL_INT) { 9157 /* ABTS sent by initiator to CT exchange, the 9158 * RX_ID field will be filled with the newly 9159 * allocated responder XRI. 9160 */ 9161 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9162 iocbq->sli4_xritag); 9163 } else { 9164 /* ABTS sent by responder to CT exchange, the 9165 * RX_ID field will be filled with the responder 9166 * RX_ID from ABTS. 9167 */ 9168 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 9169 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 9170 } 9171 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 9172 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 9173 9174 /* Use CT=VPI */ 9175 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 9176 ndlp->nlp_DID); 9177 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 9178 iocbq->iocb.ulpContext); 9179 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 9180 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 9181 phba->vpi_ids[phba->pport->vpi]); 9182 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 9183 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 9184 LPFC_WQE_LENLOC_NONE); 9185 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 9186 command_type = OTHER_COMMAND; 9187 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 9188 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 9189 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 9190 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 9191 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 9192 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 9193 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 9194 } 9195 9196 break; 9197 case CMD_SEND_FRAME: 9198 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9199 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9200 return 0; 9201 case CMD_XRI_ABORTED_CX: 9202 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 9203 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 9204 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 9205 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 9206 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 9207 default: 9208 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9209 "2014 Invalid command 0x%x\n", 9210 iocbq->iocb.ulpCommand); 9211 return IOCB_ERROR; 9212 break; 9213 } 9214 9215 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) 9216 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 9217 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) 9218 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 9219 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) 9220 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 9221 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | 9222 LPFC_IO_DIF_INSERT); 9223 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 9224 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 9225 wqe->generic.wqe_com.abort_tag = abort_tag; 9226 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 9227 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 9228 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 9229 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 9230 return 0; 9231 } 9232 9233 /** 9234 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 9235 * @phba: Pointer to HBA context object. 9236 * @ring_number: SLI ring number to issue iocb on. 9237 * @piocb: Pointer to command iocb. 9238 * @flag: Flag indicating if this command can be put into txq. 9239 * 9240 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 9241 * an iocb command to an HBA with SLI-4 interface spec. 9242 * 9243 * This function is called with hbalock held. The function will return success 9244 * after it successfully submit the iocb to firmware or after adding to the 9245 * txq. 9246 **/ 9247 static int 9248 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 9249 struct lpfc_iocbq *piocb, uint32_t flag) 9250 { 9251 struct lpfc_sglq *sglq; 9252 union lpfc_wqe *wqe; 9253 union lpfc_wqe128 wqe128; 9254 struct lpfc_queue *wq; 9255 struct lpfc_sli_ring *pring; 9256 9257 /* Get the WQ */ 9258 if ((piocb->iocb_flag & LPFC_IO_FCP) || 9259 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9260 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) 9261 wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx]; 9262 else 9263 wq = phba->sli4_hba.oas_wq; 9264 } else { 9265 wq = phba->sli4_hba.els_wq; 9266 } 9267 9268 /* Get corresponding ring */ 9269 pring = wq->pring; 9270 9271 /* 9272 * The WQE can be either 64 or 128 bytes, 9273 * so allocate space on the stack assuming the largest. 9274 */ 9275 wqe = (union lpfc_wqe *)&wqe128; 9276 9277 lockdep_assert_held(&phba->hbalock); 9278 9279 if (piocb->sli4_xritag == NO_XRI) { 9280 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 9281 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 9282 sglq = NULL; 9283 else { 9284 if (!list_empty(&pring->txq)) { 9285 if (!(flag & SLI_IOCB_RET_IOCB)) { 9286 __lpfc_sli_ringtx_put(phba, 9287 pring, piocb); 9288 return IOCB_SUCCESS; 9289 } else { 9290 return IOCB_BUSY; 9291 } 9292 } else { 9293 sglq = __lpfc_sli_get_els_sglq(phba, piocb); 9294 if (!sglq) { 9295 if (!(flag & SLI_IOCB_RET_IOCB)) { 9296 __lpfc_sli_ringtx_put(phba, 9297 pring, 9298 piocb); 9299 return IOCB_SUCCESS; 9300 } else 9301 return IOCB_BUSY; 9302 } 9303 } 9304 } 9305 } else if (piocb->iocb_flag & LPFC_IO_FCP) 9306 /* These IO's already have an XRI and a mapped sgl. */ 9307 sglq = NULL; 9308 else { 9309 /* 9310 * This is a continuation of a commandi,(CX) so this 9311 * sglq is on the active list 9312 */ 9313 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 9314 if (!sglq) 9315 return IOCB_ERROR; 9316 } 9317 9318 if (sglq) { 9319 piocb->sli4_lxritag = sglq->sli4_lxritag; 9320 piocb->sli4_xritag = sglq->sli4_xritag; 9321 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 9322 return IOCB_ERROR; 9323 } 9324 9325 if (lpfc_sli4_iocb2wqe(phba, piocb, wqe)) 9326 return IOCB_ERROR; 9327 9328 if (lpfc_sli4_wq_put(wq, wqe)) 9329 return IOCB_ERROR; 9330 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 9331 9332 return 0; 9333 } 9334 9335 /** 9336 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 9337 * 9338 * This routine wraps the actual lockless version for issusing IOCB function 9339 * pointer from the lpfc_hba struct. 9340 * 9341 * Return codes: 9342 * IOCB_ERROR - Error 9343 * IOCB_SUCCESS - Success 9344 * IOCB_BUSY - Busy 9345 **/ 9346 int 9347 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 9348 struct lpfc_iocbq *piocb, uint32_t flag) 9349 { 9350 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9351 } 9352 9353 /** 9354 * lpfc_sli_api_table_setup - Set up sli api function jump table 9355 * @phba: The hba struct for which this call is being executed. 9356 * @dev_grp: The HBA PCI-Device group number. 9357 * 9358 * This routine sets up the SLI interface API function jump table in @phba 9359 * struct. 9360 * Returns: 0 - success, -ENODEV - failure. 9361 **/ 9362 int 9363 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 9364 { 9365 9366 switch (dev_grp) { 9367 case LPFC_PCI_DEV_LP: 9368 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 9369 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 9370 break; 9371 case LPFC_PCI_DEV_OC: 9372 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 9373 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 9374 break; 9375 default: 9376 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9377 "1419 Invalid HBA PCI-device group: 0x%x\n", 9378 dev_grp); 9379 return -ENODEV; 9380 break; 9381 } 9382 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 9383 return 0; 9384 } 9385 9386 /** 9387 * lpfc_sli4_calc_ring - Calculates which ring to use 9388 * @phba: Pointer to HBA context object. 9389 * @piocb: Pointer to command iocb. 9390 * 9391 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on 9392 * hba_wqidx, thus we need to calculate the corresponding ring. 9393 * Since ABORTS must go on the same WQ of the command they are 9394 * aborting, we use command's hba_wqidx. 9395 */ 9396 struct lpfc_sli_ring * 9397 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) 9398 { 9399 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { 9400 if (!(phba->cfg_fof) || 9401 (!(piocb->iocb_flag & LPFC_IO_FOF))) { 9402 if (unlikely(!phba->sli4_hba.fcp_wq)) 9403 return NULL; 9404 /* 9405 * for abort iocb hba_wqidx should already 9406 * be setup based on what work queue we used. 9407 */ 9408 if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 9409 piocb->hba_wqidx = 9410 lpfc_sli4_scmd_to_wqidx_distr(phba, 9411 piocb->context1); 9412 piocb->hba_wqidx = piocb->hba_wqidx % 9413 phba->cfg_fcp_io_channel; 9414 } 9415 return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring; 9416 } else { 9417 if (unlikely(!phba->sli4_hba.oas_wq)) 9418 return NULL; 9419 piocb->hba_wqidx = 0; 9420 return phba->sli4_hba.oas_wq->pring; 9421 } 9422 } else { 9423 if (unlikely(!phba->sli4_hba.els_wq)) 9424 return NULL; 9425 piocb->hba_wqidx = 0; 9426 return phba->sli4_hba.els_wq->pring; 9427 } 9428 } 9429 9430 /** 9431 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 9432 * @phba: Pointer to HBA context object. 9433 * @pring: Pointer to driver SLI ring object. 9434 * @piocb: Pointer to command iocb. 9435 * @flag: Flag indicating if this command can be put into txq. 9436 * 9437 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 9438 * function. This function gets the hbalock and calls 9439 * __lpfc_sli_issue_iocb function and will return the error returned 9440 * by __lpfc_sli_issue_iocb function. This wrapper is used by 9441 * functions which do not hold hbalock. 9442 **/ 9443 int 9444 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 9445 struct lpfc_iocbq *piocb, uint32_t flag) 9446 { 9447 struct lpfc_hba_eq_hdl *hba_eq_hdl; 9448 struct lpfc_sli_ring *pring; 9449 struct lpfc_queue *fpeq; 9450 struct lpfc_eqe *eqe; 9451 unsigned long iflags; 9452 int rc, idx; 9453 9454 if (phba->sli_rev == LPFC_SLI_REV4) { 9455 pring = lpfc_sli4_calc_ring(phba, piocb); 9456 if (unlikely(pring == NULL)) 9457 return IOCB_ERROR; 9458 9459 spin_lock_irqsave(&pring->ring_lock, iflags); 9460 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9461 spin_unlock_irqrestore(&pring->ring_lock, iflags); 9462 9463 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) { 9464 idx = piocb->hba_wqidx; 9465 hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx]; 9466 9467 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) { 9468 9469 /* Get associated EQ with this index */ 9470 fpeq = phba->sli4_hba.hba_eq[idx]; 9471 9472 /* Turn off interrupts from this EQ */ 9473 lpfc_sli4_eq_clr_intr(fpeq); 9474 9475 /* 9476 * Process all the events on FCP EQ 9477 */ 9478 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 9479 lpfc_sli4_hba_handle_eqe(phba, 9480 eqe, idx); 9481 fpeq->EQ_processed++; 9482 } 9483 9484 /* Always clear and re-arm the EQ */ 9485 lpfc_sli4_eq_release(fpeq, 9486 LPFC_QUEUE_REARM); 9487 } 9488 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 9489 } 9490 } else { 9491 /* For now, SLI2/3 will still use hbalock */ 9492 spin_lock_irqsave(&phba->hbalock, iflags); 9493 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 9494 spin_unlock_irqrestore(&phba->hbalock, iflags); 9495 } 9496 return rc; 9497 } 9498 9499 /** 9500 * lpfc_extra_ring_setup - Extra ring setup function 9501 * @phba: Pointer to HBA context object. 9502 * 9503 * This function is called while driver attaches with the 9504 * HBA to setup the extra ring. The extra ring is used 9505 * only when driver needs to support target mode functionality 9506 * or IP over FC functionalities. 9507 * 9508 * This function is called with no lock held. SLI3 only. 9509 **/ 9510 static int 9511 lpfc_extra_ring_setup( struct lpfc_hba *phba) 9512 { 9513 struct lpfc_sli *psli; 9514 struct lpfc_sli_ring *pring; 9515 9516 psli = &phba->sli; 9517 9518 /* Adjust cmd/rsp ring iocb entries more evenly */ 9519 9520 /* Take some away from the FCP ring */ 9521 pring = &psli->sli3_ring[LPFC_FCP_RING]; 9522 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 9523 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 9524 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 9525 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 9526 9527 /* and give them to the extra ring */ 9528 pring = &psli->sli3_ring[LPFC_EXTRA_RING]; 9529 9530 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 9531 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 9532 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 9533 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 9534 9535 /* Setup default profile for this ring */ 9536 pring->iotag_max = 4096; 9537 pring->num_mask = 1; 9538 pring->prt[0].profile = 0; /* Mask 0 */ 9539 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 9540 pring->prt[0].type = phba->cfg_multi_ring_type; 9541 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 9542 return 0; 9543 } 9544 9545 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 9546 * @phba: Pointer to HBA context object. 9547 * @iocbq: Pointer to iocb object. 9548 * 9549 * The async_event handler calls this routine when it receives 9550 * an ASYNC_STATUS_CN event from the port. The port generates 9551 * this event when an Abort Sequence request to an rport fails 9552 * twice in succession. The abort could be originated by the 9553 * driver or by the port. The ABTS could have been for an ELS 9554 * or FCP IO. The port only generates this event when an ABTS 9555 * fails to complete after one retry. 9556 */ 9557 static void 9558 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 9559 struct lpfc_iocbq *iocbq) 9560 { 9561 struct lpfc_nodelist *ndlp = NULL; 9562 uint16_t rpi = 0, vpi = 0; 9563 struct lpfc_vport *vport = NULL; 9564 9565 /* The rpi in the ulpContext is vport-sensitive. */ 9566 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 9567 rpi = iocbq->iocb.ulpContext; 9568 9569 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9570 "3092 Port generated ABTS async event " 9571 "on vpi %d rpi %d status 0x%x\n", 9572 vpi, rpi, iocbq->iocb.ulpStatus); 9573 9574 vport = lpfc_find_vport_by_vpid(phba, vpi); 9575 if (!vport) 9576 goto err_exit; 9577 ndlp = lpfc_findnode_rpi(vport, rpi); 9578 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 9579 goto err_exit; 9580 9581 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 9582 lpfc_sli_abts_recover_port(vport, ndlp); 9583 return; 9584 9585 err_exit: 9586 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9587 "3095 Event Context not found, no " 9588 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 9589 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 9590 vpi, rpi); 9591 } 9592 9593 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 9594 * @phba: pointer to HBA context object. 9595 * @ndlp: nodelist pointer for the impacted rport. 9596 * @axri: pointer to the wcqe containing the failed exchange. 9597 * 9598 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 9599 * port. The port generates this event when an abort exchange request to an 9600 * rport fails twice in succession with no reply. The abort could be originated 9601 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 9602 */ 9603 void 9604 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 9605 struct lpfc_nodelist *ndlp, 9606 struct sli4_wcqe_xri_aborted *axri) 9607 { 9608 struct lpfc_vport *vport; 9609 uint32_t ext_status = 0; 9610 9611 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 9612 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 9613 "3115 Node Context not found, driver " 9614 "ignoring abts err event\n"); 9615 return; 9616 } 9617 9618 vport = ndlp->vport; 9619 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 9620 "3116 Port generated FCP XRI ABORT event on " 9621 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 9622 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 9623 bf_get(lpfc_wcqe_xa_xri, axri), 9624 bf_get(lpfc_wcqe_xa_status, axri), 9625 axri->parameter); 9626 9627 /* 9628 * Catch the ABTS protocol failure case. Older OCe FW releases returned 9629 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 9630 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 9631 */ 9632 ext_status = axri->parameter & IOERR_PARAM_MASK; 9633 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 9634 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 9635 lpfc_sli_abts_recover_port(vport, ndlp); 9636 } 9637 9638 /** 9639 * lpfc_sli_async_event_handler - ASYNC iocb handler function 9640 * @phba: Pointer to HBA context object. 9641 * @pring: Pointer to driver SLI ring object. 9642 * @iocbq: Pointer to iocb object. 9643 * 9644 * This function is called by the slow ring event handler 9645 * function when there is an ASYNC event iocb in the ring. 9646 * This function is called with no lock held. 9647 * Currently this function handles only temperature related 9648 * ASYNC events. The function decodes the temperature sensor 9649 * event message and posts events for the management applications. 9650 **/ 9651 static void 9652 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 9653 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 9654 { 9655 IOCB_t *icmd; 9656 uint16_t evt_code; 9657 struct temp_event temp_event_data; 9658 struct Scsi_Host *shost; 9659 uint32_t *iocb_w; 9660 9661 icmd = &iocbq->iocb; 9662 evt_code = icmd->un.asyncstat.evt_code; 9663 9664 switch (evt_code) { 9665 case ASYNC_TEMP_WARN: 9666 case ASYNC_TEMP_SAFE: 9667 temp_event_data.data = (uint32_t) icmd->ulpContext; 9668 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 9669 if (evt_code == ASYNC_TEMP_WARN) { 9670 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 9671 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 9672 "0347 Adapter is very hot, please take " 9673 "corrective action. temperature : %d Celsius\n", 9674 (uint32_t) icmd->ulpContext); 9675 } else { 9676 temp_event_data.event_code = LPFC_NORMAL_TEMP; 9677 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 9678 "0340 Adapter temperature is OK now. " 9679 "temperature : %d Celsius\n", 9680 (uint32_t) icmd->ulpContext); 9681 } 9682 9683 /* Send temperature change event to applications */ 9684 shost = lpfc_shost_from_vport(phba->pport); 9685 fc_host_post_vendor_event(shost, fc_get_event_number(), 9686 sizeof(temp_event_data), (char *) &temp_event_data, 9687 LPFC_NL_VENDOR_ID); 9688 break; 9689 case ASYNC_STATUS_CN: 9690 lpfc_sli_abts_err_handler(phba, iocbq); 9691 break; 9692 default: 9693 iocb_w = (uint32_t *) icmd; 9694 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9695 "0346 Ring %d handler: unexpected ASYNC_STATUS" 9696 " evt_code 0x%x\n" 9697 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 9698 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 9699 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 9700 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 9701 pring->ringno, icmd->un.asyncstat.evt_code, 9702 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 9703 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 9704 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 9705 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 9706 9707 break; 9708 } 9709 } 9710 9711 9712 /** 9713 * lpfc_sli4_setup - SLI ring setup function 9714 * @phba: Pointer to HBA context object. 9715 * 9716 * lpfc_sli_setup sets up rings of the SLI interface with 9717 * number of iocbs per ring and iotags. This function is 9718 * called while driver attach to the HBA and before the 9719 * interrupts are enabled. So there is no need for locking. 9720 * 9721 * This function always returns 0. 9722 **/ 9723 int 9724 lpfc_sli4_setup(struct lpfc_hba *phba) 9725 { 9726 struct lpfc_sli_ring *pring; 9727 9728 pring = phba->sli4_hba.els_wq->pring; 9729 pring->num_mask = LPFC_MAX_RING_MASK; 9730 pring->prt[0].profile = 0; /* Mask 0 */ 9731 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 9732 pring->prt[0].type = FC_TYPE_ELS; 9733 pring->prt[0].lpfc_sli_rcv_unsol_event = 9734 lpfc_els_unsol_event; 9735 pring->prt[1].profile = 0; /* Mask 1 */ 9736 pring->prt[1].rctl = FC_RCTL_ELS_REP; 9737 pring->prt[1].type = FC_TYPE_ELS; 9738 pring->prt[1].lpfc_sli_rcv_unsol_event = 9739 lpfc_els_unsol_event; 9740 pring->prt[2].profile = 0; /* Mask 2 */ 9741 /* NameServer Inquiry */ 9742 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 9743 /* NameServer */ 9744 pring->prt[2].type = FC_TYPE_CT; 9745 pring->prt[2].lpfc_sli_rcv_unsol_event = 9746 lpfc_ct_unsol_event; 9747 pring->prt[3].profile = 0; /* Mask 3 */ 9748 /* NameServer response */ 9749 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 9750 /* NameServer */ 9751 pring->prt[3].type = FC_TYPE_CT; 9752 pring->prt[3].lpfc_sli_rcv_unsol_event = 9753 lpfc_ct_unsol_event; 9754 return 0; 9755 } 9756 9757 /** 9758 * lpfc_sli_setup - SLI ring setup function 9759 * @phba: Pointer to HBA context object. 9760 * 9761 * lpfc_sli_setup sets up rings of the SLI interface with 9762 * number of iocbs per ring and iotags. This function is 9763 * called while driver attach to the HBA and before the 9764 * interrupts are enabled. So there is no need for locking. 9765 * 9766 * This function always returns 0. SLI3 only. 9767 **/ 9768 int 9769 lpfc_sli_setup(struct lpfc_hba *phba) 9770 { 9771 int i, totiocbsize = 0; 9772 struct lpfc_sli *psli = &phba->sli; 9773 struct lpfc_sli_ring *pring; 9774 9775 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 9776 psli->sli_flag = 0; 9777 9778 psli->iocbq_lookup = NULL; 9779 psli->iocbq_lookup_len = 0; 9780 psli->last_iotag = 0; 9781 9782 for (i = 0; i < psli->num_rings; i++) { 9783 pring = &psli->sli3_ring[i]; 9784 switch (i) { 9785 case LPFC_FCP_RING: /* ring 0 - FCP */ 9786 /* numCiocb and numRiocb are used in config_port */ 9787 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 9788 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 9789 pring->sli.sli3.numCiocb += 9790 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 9791 pring->sli.sli3.numRiocb += 9792 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 9793 pring->sli.sli3.numCiocb += 9794 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 9795 pring->sli.sli3.numRiocb += 9796 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 9797 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9798 SLI3_IOCB_CMD_SIZE : 9799 SLI2_IOCB_CMD_SIZE; 9800 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9801 SLI3_IOCB_RSP_SIZE : 9802 SLI2_IOCB_RSP_SIZE; 9803 pring->iotag_ctr = 0; 9804 pring->iotag_max = 9805 (phba->cfg_hba_queue_depth * 2); 9806 pring->fast_iotag = pring->iotag_max; 9807 pring->num_mask = 0; 9808 break; 9809 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 9810 /* numCiocb and numRiocb are used in config_port */ 9811 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 9812 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 9813 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9814 SLI3_IOCB_CMD_SIZE : 9815 SLI2_IOCB_CMD_SIZE; 9816 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9817 SLI3_IOCB_RSP_SIZE : 9818 SLI2_IOCB_RSP_SIZE; 9819 pring->iotag_max = phba->cfg_hba_queue_depth; 9820 pring->num_mask = 0; 9821 break; 9822 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 9823 /* numCiocb and numRiocb are used in config_port */ 9824 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 9825 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 9826 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9827 SLI3_IOCB_CMD_SIZE : 9828 SLI2_IOCB_CMD_SIZE; 9829 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9830 SLI3_IOCB_RSP_SIZE : 9831 SLI2_IOCB_RSP_SIZE; 9832 pring->fast_iotag = 0; 9833 pring->iotag_ctr = 0; 9834 pring->iotag_max = 4096; 9835 pring->lpfc_sli_rcv_async_status = 9836 lpfc_sli_async_event_handler; 9837 pring->num_mask = LPFC_MAX_RING_MASK; 9838 pring->prt[0].profile = 0; /* Mask 0 */ 9839 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 9840 pring->prt[0].type = FC_TYPE_ELS; 9841 pring->prt[0].lpfc_sli_rcv_unsol_event = 9842 lpfc_els_unsol_event; 9843 pring->prt[1].profile = 0; /* Mask 1 */ 9844 pring->prt[1].rctl = FC_RCTL_ELS_REP; 9845 pring->prt[1].type = FC_TYPE_ELS; 9846 pring->prt[1].lpfc_sli_rcv_unsol_event = 9847 lpfc_els_unsol_event; 9848 pring->prt[2].profile = 0; /* Mask 2 */ 9849 /* NameServer Inquiry */ 9850 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 9851 /* NameServer */ 9852 pring->prt[2].type = FC_TYPE_CT; 9853 pring->prt[2].lpfc_sli_rcv_unsol_event = 9854 lpfc_ct_unsol_event; 9855 pring->prt[3].profile = 0; /* Mask 3 */ 9856 /* NameServer response */ 9857 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 9858 /* NameServer */ 9859 pring->prt[3].type = FC_TYPE_CT; 9860 pring->prt[3].lpfc_sli_rcv_unsol_event = 9861 lpfc_ct_unsol_event; 9862 break; 9863 } 9864 totiocbsize += (pring->sli.sli3.numCiocb * 9865 pring->sli.sli3.sizeCiocb) + 9866 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 9867 } 9868 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 9869 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 9870 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 9871 "SLI2 SLIM Data: x%x x%lx\n", 9872 phba->brd_no, totiocbsize, 9873 (unsigned long) MAX_SLIM_IOCB_SIZE); 9874 } 9875 if (phba->cfg_multi_ring_support == 2) 9876 lpfc_extra_ring_setup(phba); 9877 9878 return 0; 9879 } 9880 9881 /** 9882 * lpfc_sli4_queue_init - Queue initialization function 9883 * @phba: Pointer to HBA context object. 9884 * 9885 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each 9886 * ring. This function also initializes ring indices of each ring. 9887 * This function is called during the initialization of the SLI 9888 * interface of an HBA. 9889 * This function is called with no lock held and always returns 9890 * 1. 9891 **/ 9892 void 9893 lpfc_sli4_queue_init(struct lpfc_hba *phba) 9894 { 9895 struct lpfc_sli *psli; 9896 struct lpfc_sli_ring *pring; 9897 int i; 9898 9899 psli = &phba->sli; 9900 spin_lock_irq(&phba->hbalock); 9901 INIT_LIST_HEAD(&psli->mboxq); 9902 INIT_LIST_HEAD(&psli->mboxq_cmpl); 9903 /* Initialize list headers for txq and txcmplq as double linked lists */ 9904 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 9905 pring = phba->sli4_hba.fcp_wq[i]->pring; 9906 pring->flag = 0; 9907 pring->ringno = LPFC_FCP_RING; 9908 INIT_LIST_HEAD(&pring->txq); 9909 INIT_LIST_HEAD(&pring->txcmplq); 9910 INIT_LIST_HEAD(&pring->iocb_continueq); 9911 spin_lock_init(&pring->ring_lock); 9912 } 9913 for (i = 0; i < phba->cfg_nvme_io_channel; i++) { 9914 pring = phba->sli4_hba.nvme_wq[i]->pring; 9915 pring->flag = 0; 9916 pring->ringno = LPFC_FCP_RING; 9917 INIT_LIST_HEAD(&pring->txq); 9918 INIT_LIST_HEAD(&pring->txcmplq); 9919 INIT_LIST_HEAD(&pring->iocb_continueq); 9920 spin_lock_init(&pring->ring_lock); 9921 } 9922 pring = phba->sli4_hba.els_wq->pring; 9923 pring->flag = 0; 9924 pring->ringno = LPFC_ELS_RING; 9925 INIT_LIST_HEAD(&pring->txq); 9926 INIT_LIST_HEAD(&pring->txcmplq); 9927 INIT_LIST_HEAD(&pring->iocb_continueq); 9928 spin_lock_init(&pring->ring_lock); 9929 9930 if (phba->cfg_nvme_io_channel) { 9931 pring = phba->sli4_hba.nvmels_wq->pring; 9932 pring->flag = 0; 9933 pring->ringno = LPFC_ELS_RING; 9934 INIT_LIST_HEAD(&pring->txq); 9935 INIT_LIST_HEAD(&pring->txcmplq); 9936 INIT_LIST_HEAD(&pring->iocb_continueq); 9937 spin_lock_init(&pring->ring_lock); 9938 } 9939 9940 if (phba->cfg_fof) { 9941 pring = phba->sli4_hba.oas_wq->pring; 9942 pring->flag = 0; 9943 pring->ringno = LPFC_FCP_RING; 9944 INIT_LIST_HEAD(&pring->txq); 9945 INIT_LIST_HEAD(&pring->txcmplq); 9946 INIT_LIST_HEAD(&pring->iocb_continueq); 9947 spin_lock_init(&pring->ring_lock); 9948 } 9949 9950 spin_unlock_irq(&phba->hbalock); 9951 } 9952 9953 /** 9954 * lpfc_sli_queue_init - Queue initialization function 9955 * @phba: Pointer to HBA context object. 9956 * 9957 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each 9958 * ring. This function also initializes ring indices of each ring. 9959 * This function is called during the initialization of the SLI 9960 * interface of an HBA. 9961 * This function is called with no lock held and always returns 9962 * 1. 9963 **/ 9964 void 9965 lpfc_sli_queue_init(struct lpfc_hba *phba) 9966 { 9967 struct lpfc_sli *psli; 9968 struct lpfc_sli_ring *pring; 9969 int i; 9970 9971 psli = &phba->sli; 9972 spin_lock_irq(&phba->hbalock); 9973 INIT_LIST_HEAD(&psli->mboxq); 9974 INIT_LIST_HEAD(&psli->mboxq_cmpl); 9975 /* Initialize list headers for txq and txcmplq as double linked lists */ 9976 for (i = 0; i < psli->num_rings; i++) { 9977 pring = &psli->sli3_ring[i]; 9978 pring->ringno = i; 9979 pring->sli.sli3.next_cmdidx = 0; 9980 pring->sli.sli3.local_getidx = 0; 9981 pring->sli.sli3.cmdidx = 0; 9982 INIT_LIST_HEAD(&pring->iocb_continueq); 9983 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 9984 INIT_LIST_HEAD(&pring->postbufq); 9985 pring->flag = 0; 9986 INIT_LIST_HEAD(&pring->txq); 9987 INIT_LIST_HEAD(&pring->txcmplq); 9988 spin_lock_init(&pring->ring_lock); 9989 } 9990 spin_unlock_irq(&phba->hbalock); 9991 } 9992 9993 /** 9994 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 9995 * @phba: Pointer to HBA context object. 9996 * 9997 * This routine flushes the mailbox command subsystem. It will unconditionally 9998 * flush all the mailbox commands in the three possible stages in the mailbox 9999 * command sub-system: pending mailbox command queue; the outstanding mailbox 10000 * command; and completed mailbox command queue. It is caller's responsibility 10001 * to make sure that the driver is in the proper state to flush the mailbox 10002 * command sub-system. Namely, the posting of mailbox commands into the 10003 * pending mailbox command queue from the various clients must be stopped; 10004 * either the HBA is in a state that it will never works on the outstanding 10005 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 10006 * mailbox command has been completed. 10007 **/ 10008 static void 10009 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 10010 { 10011 LIST_HEAD(completions); 10012 struct lpfc_sli *psli = &phba->sli; 10013 LPFC_MBOXQ_t *pmb; 10014 unsigned long iflag; 10015 10016 /* Flush all the mailbox commands in the mbox system */ 10017 spin_lock_irqsave(&phba->hbalock, iflag); 10018 /* The pending mailbox command queue */ 10019 list_splice_init(&phba->sli.mboxq, &completions); 10020 /* The outstanding active mailbox command */ 10021 if (psli->mbox_active) { 10022 list_add_tail(&psli->mbox_active->list, &completions); 10023 psli->mbox_active = NULL; 10024 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 10025 } 10026 /* The completed mailbox command queue */ 10027 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 10028 spin_unlock_irqrestore(&phba->hbalock, iflag); 10029 10030 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 10031 while (!list_empty(&completions)) { 10032 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 10033 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 10034 if (pmb->mbox_cmpl) 10035 pmb->mbox_cmpl(phba, pmb); 10036 } 10037 } 10038 10039 /** 10040 * lpfc_sli_host_down - Vport cleanup function 10041 * @vport: Pointer to virtual port object. 10042 * 10043 * lpfc_sli_host_down is called to clean up the resources 10044 * associated with a vport before destroying virtual 10045 * port data structures. 10046 * This function does following operations: 10047 * - Free discovery resources associated with this virtual 10048 * port. 10049 * - Free iocbs associated with this virtual port in 10050 * the txq. 10051 * - Send abort for all iocb commands associated with this 10052 * vport in txcmplq. 10053 * 10054 * This function is called with no lock held and always returns 1. 10055 **/ 10056 int 10057 lpfc_sli_host_down(struct lpfc_vport *vport) 10058 { 10059 LIST_HEAD(completions); 10060 struct lpfc_hba *phba = vport->phba; 10061 struct lpfc_sli *psli = &phba->sli; 10062 struct lpfc_queue *qp = NULL; 10063 struct lpfc_sli_ring *pring; 10064 struct lpfc_iocbq *iocb, *next_iocb; 10065 int i; 10066 unsigned long flags = 0; 10067 uint16_t prev_pring_flag; 10068 10069 lpfc_cleanup_discovery_resources(vport); 10070 10071 spin_lock_irqsave(&phba->hbalock, flags); 10072 10073 /* 10074 * Error everything on the txq since these iocbs 10075 * have not been given to the FW yet. 10076 * Also issue ABTS for everything on the txcmplq 10077 */ 10078 if (phba->sli_rev != LPFC_SLI_REV4) { 10079 for (i = 0; i < psli->num_rings; i++) { 10080 pring = &psli->sli3_ring[i]; 10081 prev_pring_flag = pring->flag; 10082 /* Only slow rings */ 10083 if (pring->ringno == LPFC_ELS_RING) { 10084 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10085 /* Set the lpfc data pending flag */ 10086 set_bit(LPFC_DATA_READY, &phba->data_flags); 10087 } 10088 list_for_each_entry_safe(iocb, next_iocb, 10089 &pring->txq, list) { 10090 if (iocb->vport != vport) 10091 continue; 10092 list_move_tail(&iocb->list, &completions); 10093 } 10094 list_for_each_entry_safe(iocb, next_iocb, 10095 &pring->txcmplq, list) { 10096 if (iocb->vport != vport) 10097 continue; 10098 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10099 } 10100 pring->flag = prev_pring_flag; 10101 } 10102 } else { 10103 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10104 pring = qp->pring; 10105 if (!pring) 10106 continue; 10107 if (pring == phba->sli4_hba.els_wq->pring) { 10108 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10109 /* Set the lpfc data pending flag */ 10110 set_bit(LPFC_DATA_READY, &phba->data_flags); 10111 } 10112 prev_pring_flag = pring->flag; 10113 spin_lock_irq(&pring->ring_lock); 10114 list_for_each_entry_safe(iocb, next_iocb, 10115 &pring->txq, list) { 10116 if (iocb->vport != vport) 10117 continue; 10118 list_move_tail(&iocb->list, &completions); 10119 } 10120 spin_unlock_irq(&pring->ring_lock); 10121 list_for_each_entry_safe(iocb, next_iocb, 10122 &pring->txcmplq, list) { 10123 if (iocb->vport != vport) 10124 continue; 10125 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 10126 } 10127 pring->flag = prev_pring_flag; 10128 } 10129 } 10130 spin_unlock_irqrestore(&phba->hbalock, flags); 10131 10132 /* Cancel all the IOCBs from the completions list */ 10133 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10134 IOERR_SLI_DOWN); 10135 return 1; 10136 } 10137 10138 /** 10139 * lpfc_sli_hba_down - Resource cleanup function for the HBA 10140 * @phba: Pointer to HBA context object. 10141 * 10142 * This function cleans up all iocb, buffers, mailbox commands 10143 * while shutting down the HBA. This function is called with no 10144 * lock held and always returns 1. 10145 * This function does the following to cleanup driver resources: 10146 * - Free discovery resources for each virtual port 10147 * - Cleanup any pending fabric iocbs 10148 * - Iterate through the iocb txq and free each entry 10149 * in the list. 10150 * - Free up any buffer posted to the HBA 10151 * - Free mailbox commands in the mailbox queue. 10152 **/ 10153 int 10154 lpfc_sli_hba_down(struct lpfc_hba *phba) 10155 { 10156 LIST_HEAD(completions); 10157 struct lpfc_sli *psli = &phba->sli; 10158 struct lpfc_queue *qp = NULL; 10159 struct lpfc_sli_ring *pring; 10160 struct lpfc_dmabuf *buf_ptr; 10161 unsigned long flags = 0; 10162 int i; 10163 10164 /* Shutdown the mailbox command sub-system */ 10165 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 10166 10167 lpfc_hba_down_prep(phba); 10168 10169 lpfc_fabric_abort_hba(phba); 10170 10171 spin_lock_irqsave(&phba->hbalock, flags); 10172 10173 /* 10174 * Error everything on the txq since these iocbs 10175 * have not been given to the FW yet. 10176 */ 10177 if (phba->sli_rev != LPFC_SLI_REV4) { 10178 for (i = 0; i < psli->num_rings; i++) { 10179 pring = &psli->sli3_ring[i]; 10180 /* Only slow rings */ 10181 if (pring->ringno == LPFC_ELS_RING) { 10182 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10183 /* Set the lpfc data pending flag */ 10184 set_bit(LPFC_DATA_READY, &phba->data_flags); 10185 } 10186 list_splice_init(&pring->txq, &completions); 10187 } 10188 } else { 10189 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10190 pring = qp->pring; 10191 if (!pring) 10192 continue; 10193 spin_lock_irq(&pring->ring_lock); 10194 list_splice_init(&pring->txq, &completions); 10195 spin_unlock_irq(&pring->ring_lock); 10196 if (pring == phba->sli4_hba.els_wq->pring) { 10197 pring->flag |= LPFC_DEFERRED_RING_EVENT; 10198 /* Set the lpfc data pending flag */ 10199 set_bit(LPFC_DATA_READY, &phba->data_flags); 10200 } 10201 } 10202 } 10203 spin_unlock_irqrestore(&phba->hbalock, flags); 10204 10205 /* Cancel all the IOCBs from the completions list */ 10206 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 10207 IOERR_SLI_DOWN); 10208 10209 spin_lock_irqsave(&phba->hbalock, flags); 10210 list_splice_init(&phba->elsbuf, &completions); 10211 phba->elsbuf_cnt = 0; 10212 phba->elsbuf_prev_cnt = 0; 10213 spin_unlock_irqrestore(&phba->hbalock, flags); 10214 10215 while (!list_empty(&completions)) { 10216 list_remove_head(&completions, buf_ptr, 10217 struct lpfc_dmabuf, list); 10218 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 10219 kfree(buf_ptr); 10220 } 10221 10222 /* Return any active mbox cmds */ 10223 del_timer_sync(&psli->mbox_tmo); 10224 10225 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 10226 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 10227 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 10228 10229 return 1; 10230 } 10231 10232 /** 10233 * lpfc_sli_pcimem_bcopy - SLI memory copy function 10234 * @srcp: Source memory pointer. 10235 * @destp: Destination memory pointer. 10236 * @cnt: Number of words required to be copied. 10237 * 10238 * This function is used for copying data between driver memory 10239 * and the SLI memory. This function also changes the endianness 10240 * of each word if native endianness is different from SLI 10241 * endianness. This function can be called with or without 10242 * lock. 10243 **/ 10244 void 10245 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 10246 { 10247 uint32_t *src = srcp; 10248 uint32_t *dest = destp; 10249 uint32_t ldata; 10250 int i; 10251 10252 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 10253 ldata = *src; 10254 ldata = le32_to_cpu(ldata); 10255 *dest = ldata; 10256 src++; 10257 dest++; 10258 } 10259 } 10260 10261 10262 /** 10263 * lpfc_sli_bemem_bcopy - SLI memory copy function 10264 * @srcp: Source memory pointer. 10265 * @destp: Destination memory pointer. 10266 * @cnt: Number of words required to be copied. 10267 * 10268 * This function is used for copying data between a data structure 10269 * with big endian representation to local endianness. 10270 * This function can be called with or without lock. 10271 **/ 10272 void 10273 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 10274 { 10275 uint32_t *src = srcp; 10276 uint32_t *dest = destp; 10277 uint32_t ldata; 10278 int i; 10279 10280 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 10281 ldata = *src; 10282 ldata = be32_to_cpu(ldata); 10283 *dest = ldata; 10284 src++; 10285 dest++; 10286 } 10287 } 10288 10289 /** 10290 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 10291 * @phba: Pointer to HBA context object. 10292 * @pring: Pointer to driver SLI ring object. 10293 * @mp: Pointer to driver buffer object. 10294 * 10295 * This function is called with no lock held. 10296 * It always return zero after adding the buffer to the postbufq 10297 * buffer list. 10298 **/ 10299 int 10300 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10301 struct lpfc_dmabuf *mp) 10302 { 10303 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 10304 later */ 10305 spin_lock_irq(&phba->hbalock); 10306 list_add_tail(&mp->list, &pring->postbufq); 10307 pring->postbufq_cnt++; 10308 spin_unlock_irq(&phba->hbalock); 10309 return 0; 10310 } 10311 10312 /** 10313 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 10314 * @phba: Pointer to HBA context object. 10315 * 10316 * When HBQ is enabled, buffers are searched based on tags. This function 10317 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 10318 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 10319 * does not conflict with tags of buffer posted for unsolicited events. 10320 * The function returns the allocated tag. The function is called with 10321 * no locks held. 10322 **/ 10323 uint32_t 10324 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 10325 { 10326 spin_lock_irq(&phba->hbalock); 10327 phba->buffer_tag_count++; 10328 /* 10329 * Always set the QUE_BUFTAG_BIT to distiguish between 10330 * a tag assigned by HBQ. 10331 */ 10332 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 10333 spin_unlock_irq(&phba->hbalock); 10334 return phba->buffer_tag_count; 10335 } 10336 10337 /** 10338 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 10339 * @phba: Pointer to HBA context object. 10340 * @pring: Pointer to driver SLI ring object. 10341 * @tag: Buffer tag. 10342 * 10343 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 10344 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 10345 * iocb is posted to the response ring with the tag of the buffer. 10346 * This function searches the pring->postbufq list using the tag 10347 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 10348 * iocb. If the buffer is found then lpfc_dmabuf object of the 10349 * buffer is returned to the caller else NULL is returned. 10350 * This function is called with no lock held. 10351 **/ 10352 struct lpfc_dmabuf * 10353 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10354 uint32_t tag) 10355 { 10356 struct lpfc_dmabuf *mp, *next_mp; 10357 struct list_head *slp = &pring->postbufq; 10358 10359 /* Search postbufq, from the beginning, looking for a match on tag */ 10360 spin_lock_irq(&phba->hbalock); 10361 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10362 if (mp->buffer_tag == tag) { 10363 list_del_init(&mp->list); 10364 pring->postbufq_cnt--; 10365 spin_unlock_irq(&phba->hbalock); 10366 return mp; 10367 } 10368 } 10369 10370 spin_unlock_irq(&phba->hbalock); 10371 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10372 "0402 Cannot find virtual addr for buffer tag on " 10373 "ring %d Data x%lx x%p x%p x%x\n", 10374 pring->ringno, (unsigned long) tag, 10375 slp->next, slp->prev, pring->postbufq_cnt); 10376 10377 return NULL; 10378 } 10379 10380 /** 10381 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 10382 * @phba: Pointer to HBA context object. 10383 * @pring: Pointer to driver SLI ring object. 10384 * @phys: DMA address of the buffer. 10385 * 10386 * This function searches the buffer list using the dma_address 10387 * of unsolicited event to find the driver's lpfc_dmabuf object 10388 * corresponding to the dma_address. The function returns the 10389 * lpfc_dmabuf object if a buffer is found else it returns NULL. 10390 * This function is called by the ct and els unsolicited event 10391 * handlers to get the buffer associated with the unsolicited 10392 * event. 10393 * 10394 * This function is called with no lock held. 10395 **/ 10396 struct lpfc_dmabuf * 10397 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10398 dma_addr_t phys) 10399 { 10400 struct lpfc_dmabuf *mp, *next_mp; 10401 struct list_head *slp = &pring->postbufq; 10402 10403 /* Search postbufq, from the beginning, looking for a match on phys */ 10404 spin_lock_irq(&phba->hbalock); 10405 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 10406 if (mp->phys == phys) { 10407 list_del_init(&mp->list); 10408 pring->postbufq_cnt--; 10409 spin_unlock_irq(&phba->hbalock); 10410 return mp; 10411 } 10412 } 10413 10414 spin_unlock_irq(&phba->hbalock); 10415 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10416 "0410 Cannot find virtual addr for mapped buf on " 10417 "ring %d Data x%llx x%p x%p x%x\n", 10418 pring->ringno, (unsigned long long)phys, 10419 slp->next, slp->prev, pring->postbufq_cnt); 10420 return NULL; 10421 } 10422 10423 /** 10424 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 10425 * @phba: Pointer to HBA context object. 10426 * @cmdiocb: Pointer to driver command iocb object. 10427 * @rspiocb: Pointer to driver response iocb object. 10428 * 10429 * This function is the completion handler for the abort iocbs for 10430 * ELS commands. This function is called from the ELS ring event 10431 * handler with no lock held. This function frees memory resources 10432 * associated with the abort iocb. 10433 **/ 10434 static void 10435 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10436 struct lpfc_iocbq *rspiocb) 10437 { 10438 IOCB_t *irsp = &rspiocb->iocb; 10439 uint16_t abort_iotag, abort_context; 10440 struct lpfc_iocbq *abort_iocb = NULL; 10441 10442 if (irsp->ulpStatus) { 10443 10444 /* 10445 * Assume that the port already completed and returned, or 10446 * will return the iocb. Just Log the message. 10447 */ 10448 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 10449 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 10450 10451 spin_lock_irq(&phba->hbalock); 10452 if (phba->sli_rev < LPFC_SLI_REV4) { 10453 if (abort_iotag != 0 && 10454 abort_iotag <= phba->sli.last_iotag) 10455 abort_iocb = 10456 phba->sli.iocbq_lookup[abort_iotag]; 10457 } else 10458 /* For sli4 the abort_tag is the XRI, 10459 * so the abort routine puts the iotag of the iocb 10460 * being aborted in the context field of the abort 10461 * IOCB. 10462 */ 10463 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 10464 10465 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 10466 "0327 Cannot abort els iocb %p " 10467 "with tag %x context %x, abort status %x, " 10468 "abort code %x\n", 10469 abort_iocb, abort_iotag, abort_context, 10470 irsp->ulpStatus, irsp->un.ulpWord[4]); 10471 10472 spin_unlock_irq(&phba->hbalock); 10473 } 10474 lpfc_sli_release_iocbq(phba, cmdiocb); 10475 return; 10476 } 10477 10478 /** 10479 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 10480 * @phba: Pointer to HBA context object. 10481 * @cmdiocb: Pointer to driver command iocb object. 10482 * @rspiocb: Pointer to driver response iocb object. 10483 * 10484 * The function is called from SLI ring event handler with no 10485 * lock held. This function is the completion handler for ELS commands 10486 * which are aborted. The function frees memory resources used for 10487 * the aborted ELS commands. 10488 **/ 10489 static void 10490 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10491 struct lpfc_iocbq *rspiocb) 10492 { 10493 IOCB_t *irsp = &rspiocb->iocb; 10494 10495 /* ELS cmd tag <ulpIoTag> completes */ 10496 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 10497 "0139 Ignoring ELS cmd tag x%x completion Data: " 10498 "x%x x%x x%x\n", 10499 irsp->ulpIoTag, irsp->ulpStatus, 10500 irsp->un.ulpWord[4], irsp->ulpTimeout); 10501 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 10502 lpfc_ct_free_iocb(phba, cmdiocb); 10503 else 10504 lpfc_els_free_iocb(phba, cmdiocb); 10505 return; 10506 } 10507 10508 /** 10509 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 10510 * @phba: Pointer to HBA context object. 10511 * @pring: Pointer to driver SLI ring object. 10512 * @cmdiocb: Pointer to driver command iocb object. 10513 * 10514 * This function issues an abort iocb for the provided command iocb down to 10515 * the port. Other than the case the outstanding command iocb is an abort 10516 * request, this function issues abort out unconditionally. This function is 10517 * called with hbalock held. The function returns 0 when it fails due to 10518 * memory allocation failure or when the command iocb is an abort request. 10519 **/ 10520 static int 10521 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10522 struct lpfc_iocbq *cmdiocb) 10523 { 10524 struct lpfc_vport *vport = cmdiocb->vport; 10525 struct lpfc_iocbq *abtsiocbp; 10526 IOCB_t *icmd = NULL; 10527 IOCB_t *iabt = NULL; 10528 int retval; 10529 unsigned long iflags; 10530 10531 lockdep_assert_held(&phba->hbalock); 10532 10533 /* 10534 * There are certain command types we don't want to abort. And we 10535 * don't want to abort commands that are already in the process of 10536 * being aborted. 10537 */ 10538 icmd = &cmdiocb->iocb; 10539 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 10540 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 10541 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 10542 return 0; 10543 10544 /* issue ABTS for this IOCB based on iotag */ 10545 abtsiocbp = __lpfc_sli_get_iocbq(phba); 10546 if (abtsiocbp == NULL) 10547 return 0; 10548 10549 /* This signals the response to set the correct status 10550 * before calling the completion handler 10551 */ 10552 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 10553 10554 iabt = &abtsiocbp->iocb; 10555 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 10556 iabt->un.acxri.abortContextTag = icmd->ulpContext; 10557 if (phba->sli_rev == LPFC_SLI_REV4) { 10558 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 10559 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 10560 } 10561 else 10562 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 10563 iabt->ulpLe = 1; 10564 iabt->ulpClass = icmd->ulpClass; 10565 10566 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 10567 abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx; 10568 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 10569 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 10570 if (cmdiocb->iocb_flag & LPFC_IO_FOF) 10571 abtsiocbp->iocb_flag |= LPFC_IO_FOF; 10572 10573 if (phba->link_state >= LPFC_LINK_UP) 10574 iabt->ulpCommand = CMD_ABORT_XRI_CN; 10575 else 10576 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 10577 10578 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 10579 abtsiocbp->vport = vport; 10580 10581 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 10582 "0339 Abort xri x%x, original iotag x%x, " 10583 "abort cmd iotag x%x\n", 10584 iabt->un.acxri.abortIoTag, 10585 iabt->un.acxri.abortContextTag, 10586 abtsiocbp->iotag); 10587 10588 if (phba->sli_rev == LPFC_SLI_REV4) { 10589 pring = lpfc_sli4_calc_ring(phba, abtsiocbp); 10590 if (unlikely(pring == NULL)) 10591 return 0; 10592 /* Note: both hbalock and ring_lock need to be set here */ 10593 spin_lock_irqsave(&pring->ring_lock, iflags); 10594 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 10595 abtsiocbp, 0); 10596 spin_unlock_irqrestore(&pring->ring_lock, iflags); 10597 } else { 10598 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 10599 abtsiocbp, 0); 10600 } 10601 10602 if (retval) 10603 __lpfc_sli_release_iocbq(phba, abtsiocbp); 10604 10605 /* 10606 * Caller to this routine should check for IOCB_ERROR 10607 * and handle it properly. This routine no longer removes 10608 * iocb off txcmplq and call compl in case of IOCB_ERROR. 10609 */ 10610 return retval; 10611 } 10612 10613 /** 10614 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 10615 * @phba: Pointer to HBA context object. 10616 * @pring: Pointer to driver SLI ring object. 10617 * @cmdiocb: Pointer to driver command iocb object. 10618 * 10619 * This function issues an abort iocb for the provided command iocb. In case 10620 * of unloading, the abort iocb will not be issued to commands on the ELS 10621 * ring. Instead, the callback function shall be changed to those commands 10622 * so that nothing happens when them finishes. This function is called with 10623 * hbalock held. The function returns 0 when the command iocb is an abort 10624 * request. 10625 **/ 10626 int 10627 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10628 struct lpfc_iocbq *cmdiocb) 10629 { 10630 struct lpfc_vport *vport = cmdiocb->vport; 10631 int retval = IOCB_ERROR; 10632 IOCB_t *icmd = NULL; 10633 10634 lockdep_assert_held(&phba->hbalock); 10635 10636 /* 10637 * There are certain command types we don't want to abort. And we 10638 * don't want to abort commands that are already in the process of 10639 * being aborted. 10640 */ 10641 icmd = &cmdiocb->iocb; 10642 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 10643 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 10644 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 10645 return 0; 10646 10647 if (!pring) { 10648 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 10649 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 10650 else 10651 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 10652 goto abort_iotag_exit; 10653 } 10654 10655 /* 10656 * If we're unloading, don't abort iocb on the ELS ring, but change 10657 * the callback so that nothing happens when it finishes. 10658 */ 10659 if ((vport->load_flag & FC_UNLOADING) && 10660 (pring->ringno == LPFC_ELS_RING)) { 10661 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 10662 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 10663 else 10664 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 10665 goto abort_iotag_exit; 10666 } 10667 10668 /* Now, we try to issue the abort to the cmdiocb out */ 10669 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 10670 10671 abort_iotag_exit: 10672 /* 10673 * Caller to this routine should check for IOCB_ERROR 10674 * and handle it properly. This routine no longer removes 10675 * iocb off txcmplq and call compl in case of IOCB_ERROR. 10676 */ 10677 return retval; 10678 } 10679 10680 /** 10681 * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb 10682 * @phba: Pointer to HBA context object. 10683 * @pring: Pointer to driver SLI ring object. 10684 * @cmdiocb: Pointer to driver command iocb object. 10685 * 10686 * This function issues an abort iocb for the provided command iocb down to 10687 * the port. Other than the case the outstanding command iocb is an abort 10688 * request, this function issues abort out unconditionally. This function is 10689 * called with hbalock held. The function returns 0 when it fails due to 10690 * memory allocation failure or when the command iocb is an abort request. 10691 **/ 10692 static int 10693 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 10694 struct lpfc_iocbq *cmdiocb) 10695 { 10696 struct lpfc_vport *vport = cmdiocb->vport; 10697 struct lpfc_iocbq *abtsiocbp; 10698 union lpfc_wqe *abts_wqe; 10699 int retval; 10700 10701 /* 10702 * There are certain command types we don't want to abort. And we 10703 * don't want to abort commands that are already in the process of 10704 * being aborted. 10705 */ 10706 if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 10707 cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || 10708 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 10709 return 0; 10710 10711 /* issue ABTS for this io based on iotag */ 10712 abtsiocbp = __lpfc_sli_get_iocbq(phba); 10713 if (abtsiocbp == NULL) 10714 return 0; 10715 10716 /* This signals the response to set the correct status 10717 * before calling the completion handler 10718 */ 10719 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 10720 10721 /* Complete prepping the abort wqe and issue to the FW. */ 10722 abts_wqe = &abtsiocbp->wqe; 10723 bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0); 10724 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); 10725 10726 /* Explicitly set reserved fields to zero.*/ 10727 abts_wqe->abort_cmd.rsrvd4 = 0; 10728 abts_wqe->abort_cmd.rsrvd5 = 0; 10729 10730 /* WQE Common - word 6. Context is XRI tag. Set 0. */ 10731 bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0); 10732 bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0); 10733 10734 /* word 7 */ 10735 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); 10736 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 10737 bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, 10738 cmdiocb->iocb.ulpClass); 10739 10740 /* word 8 - tell the FW to abort the IO associated with this 10741 * outstanding exchange ID. 10742 */ 10743 abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag; 10744 10745 /* word 9 - this is the iotag for the abts_wqe completion. */ 10746 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, 10747 abtsiocbp->iotag); 10748 10749 /* word 10 */ 10750 bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx); 10751 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); 10752 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); 10753 10754 /* word 11 */ 10755 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); 10756 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); 10757 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 10758 10759 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 10760 abtsiocbp->iocb_flag |= LPFC_IO_NVME; 10761 abtsiocbp->vport = vport; 10762 abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl; 10763 retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp); 10764 if (retval) { 10765 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 10766 "6147 Failed abts issue_wqe with status x%x " 10767 "for oxid x%x\n", 10768 retval, cmdiocb->sli4_xritag); 10769 lpfc_sli_release_iocbq(phba, abtsiocbp); 10770 return retval; 10771 } 10772 10773 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME, 10774 "6148 Drv Abort NVME Request Issued for " 10775 "ox_id x%x on reqtag x%x\n", 10776 cmdiocb->sli4_xritag, 10777 abtsiocbp->iotag); 10778 10779 return retval; 10780 } 10781 10782 /** 10783 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 10784 * @phba: pointer to lpfc HBA data structure. 10785 * 10786 * This routine will abort all pending and outstanding iocbs to an HBA. 10787 **/ 10788 void 10789 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 10790 { 10791 struct lpfc_sli *psli = &phba->sli; 10792 struct lpfc_sli_ring *pring; 10793 struct lpfc_queue *qp = NULL; 10794 int i; 10795 10796 if (phba->sli_rev != LPFC_SLI_REV4) { 10797 for (i = 0; i < psli->num_rings; i++) { 10798 pring = &psli->sli3_ring[i]; 10799 lpfc_sli_abort_iocb_ring(phba, pring); 10800 } 10801 return; 10802 } 10803 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) { 10804 pring = qp->pring; 10805 if (!pring) 10806 continue; 10807 lpfc_sli_abort_iocb_ring(phba, pring); 10808 } 10809 } 10810 10811 /** 10812 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 10813 * @iocbq: Pointer to driver iocb object. 10814 * @vport: Pointer to driver virtual port object. 10815 * @tgt_id: SCSI ID of the target. 10816 * @lun_id: LUN ID of the scsi device. 10817 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 10818 * 10819 * This function acts as an iocb filter for functions which abort or count 10820 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 10821 * 0 if the filtering criteria is met for the given iocb and will return 10822 * 1 if the filtering criteria is not met. 10823 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 10824 * given iocb is for the SCSI device specified by vport, tgt_id and 10825 * lun_id parameter. 10826 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 10827 * given iocb is for the SCSI target specified by vport and tgt_id 10828 * parameters. 10829 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 10830 * given iocb is for the SCSI host associated with the given vport. 10831 * This function is called with no locks held. 10832 **/ 10833 static int 10834 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 10835 uint16_t tgt_id, uint64_t lun_id, 10836 lpfc_ctx_cmd ctx_cmd) 10837 { 10838 struct lpfc_scsi_buf *lpfc_cmd; 10839 int rc = 1; 10840 10841 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 10842 return rc; 10843 10844 if (iocbq->vport != vport) 10845 return rc; 10846 10847 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 10848 10849 if (lpfc_cmd->pCmd == NULL) 10850 return rc; 10851 10852 switch (ctx_cmd) { 10853 case LPFC_CTX_LUN: 10854 if ((lpfc_cmd->rdata->pnode) && 10855 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 10856 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 10857 rc = 0; 10858 break; 10859 case LPFC_CTX_TGT: 10860 if ((lpfc_cmd->rdata->pnode) && 10861 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 10862 rc = 0; 10863 break; 10864 case LPFC_CTX_HOST: 10865 rc = 0; 10866 break; 10867 default: 10868 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 10869 __func__, ctx_cmd); 10870 break; 10871 } 10872 10873 return rc; 10874 } 10875 10876 /** 10877 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 10878 * @vport: Pointer to virtual port. 10879 * @tgt_id: SCSI ID of the target. 10880 * @lun_id: LUN ID of the scsi device. 10881 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 10882 * 10883 * This function returns number of FCP commands pending for the vport. 10884 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 10885 * commands pending on the vport associated with SCSI device specified 10886 * by tgt_id and lun_id parameters. 10887 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 10888 * commands pending on the vport associated with SCSI target specified 10889 * by tgt_id parameter. 10890 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 10891 * commands pending on the vport. 10892 * This function returns the number of iocbs which satisfy the filter. 10893 * This function is called without any lock held. 10894 **/ 10895 int 10896 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 10897 lpfc_ctx_cmd ctx_cmd) 10898 { 10899 struct lpfc_hba *phba = vport->phba; 10900 struct lpfc_iocbq *iocbq; 10901 int sum, i; 10902 10903 spin_lock_irq(&phba->hbalock); 10904 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 10905 iocbq = phba->sli.iocbq_lookup[i]; 10906 10907 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 10908 ctx_cmd) == 0) 10909 sum++; 10910 } 10911 spin_unlock_irq(&phba->hbalock); 10912 10913 return sum; 10914 } 10915 10916 /** 10917 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 10918 * @phba: Pointer to HBA context object 10919 * @cmdiocb: Pointer to command iocb object. 10920 * @rspiocb: Pointer to response iocb object. 10921 * 10922 * This function is called when an aborted FCP iocb completes. This 10923 * function is called by the ring event handler with no lock held. 10924 * This function frees the iocb. 10925 **/ 10926 void 10927 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10928 struct lpfc_iocbq *rspiocb) 10929 { 10930 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10931 "3096 ABORT_XRI_CN completing on rpi x%x " 10932 "original iotag x%x, abort cmd iotag x%x " 10933 "status 0x%x, reason 0x%x\n", 10934 cmdiocb->iocb.un.acxri.abortContextTag, 10935 cmdiocb->iocb.un.acxri.abortIoTag, 10936 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 10937 rspiocb->iocb.un.ulpWord[4]); 10938 lpfc_sli_release_iocbq(phba, cmdiocb); 10939 return; 10940 } 10941 10942 /** 10943 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 10944 * @vport: Pointer to virtual port. 10945 * @pring: Pointer to driver SLI ring object. 10946 * @tgt_id: SCSI ID of the target. 10947 * @lun_id: LUN ID of the scsi device. 10948 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 10949 * 10950 * This function sends an abort command for every SCSI command 10951 * associated with the given virtual port pending on the ring 10952 * filtered by lpfc_sli_validate_fcp_iocb function. 10953 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 10954 * FCP iocbs associated with lun specified by tgt_id and lun_id 10955 * parameters 10956 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 10957 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 10958 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 10959 * FCP iocbs associated with virtual port. 10960 * This function returns number of iocbs it failed to abort. 10961 * This function is called with no locks held. 10962 **/ 10963 int 10964 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 10965 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 10966 { 10967 struct lpfc_hba *phba = vport->phba; 10968 struct lpfc_iocbq *iocbq; 10969 struct lpfc_iocbq *abtsiocb; 10970 struct lpfc_sli_ring *pring_s4; 10971 IOCB_t *cmd = NULL; 10972 int errcnt = 0, ret_val = 0; 10973 int i; 10974 10975 for (i = 1; i <= phba->sli.last_iotag; i++) { 10976 iocbq = phba->sli.iocbq_lookup[i]; 10977 10978 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 10979 abort_cmd) != 0) 10980 continue; 10981 10982 /* 10983 * If the iocbq is already being aborted, don't take a second 10984 * action, but do count it. 10985 */ 10986 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 10987 continue; 10988 10989 /* issue ABTS for this IOCB based on iotag */ 10990 abtsiocb = lpfc_sli_get_iocbq(phba); 10991 if (abtsiocb == NULL) { 10992 errcnt++; 10993 continue; 10994 } 10995 10996 /* indicate the IO is being aborted by the driver. */ 10997 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 10998 10999 cmd = &iocbq->iocb; 11000 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11001 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 11002 if (phba->sli_rev == LPFC_SLI_REV4) 11003 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 11004 else 11005 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 11006 abtsiocb->iocb.ulpLe = 1; 11007 abtsiocb->iocb.ulpClass = cmd->ulpClass; 11008 abtsiocb->vport = vport; 11009 11010 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11011 abtsiocb->hba_wqidx = iocbq->hba_wqidx; 11012 if (iocbq->iocb_flag & LPFC_IO_FCP) 11013 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 11014 if (iocbq->iocb_flag & LPFC_IO_FOF) 11015 abtsiocb->iocb_flag |= LPFC_IO_FOF; 11016 11017 if (lpfc_is_link_up(phba)) 11018 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11019 else 11020 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11021 11022 /* Setup callback routine and issue the command. */ 11023 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11024 if (phba->sli_rev == LPFC_SLI_REV4) { 11025 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); 11026 if (!pring_s4) 11027 continue; 11028 ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11029 abtsiocb, 0); 11030 } else 11031 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 11032 abtsiocb, 0); 11033 if (ret_val == IOCB_ERROR) { 11034 lpfc_sli_release_iocbq(phba, abtsiocb); 11035 errcnt++; 11036 continue; 11037 } 11038 } 11039 11040 return errcnt; 11041 } 11042 11043 /** 11044 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 11045 * @vport: Pointer to virtual port. 11046 * @pring: Pointer to driver SLI ring object. 11047 * @tgt_id: SCSI ID of the target. 11048 * @lun_id: LUN ID of the scsi device. 11049 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 11050 * 11051 * This function sends an abort command for every SCSI command 11052 * associated with the given virtual port pending on the ring 11053 * filtered by lpfc_sli_validate_fcp_iocb function. 11054 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 11055 * FCP iocbs associated with lun specified by tgt_id and lun_id 11056 * parameters 11057 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 11058 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 11059 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 11060 * FCP iocbs associated with virtual port. 11061 * This function returns number of iocbs it aborted . 11062 * This function is called with no locks held right after a taskmgmt 11063 * command is sent. 11064 **/ 11065 int 11066 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 11067 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 11068 { 11069 struct lpfc_hba *phba = vport->phba; 11070 struct lpfc_scsi_buf *lpfc_cmd; 11071 struct lpfc_iocbq *abtsiocbq; 11072 struct lpfc_nodelist *ndlp; 11073 struct lpfc_iocbq *iocbq; 11074 IOCB_t *icmd; 11075 int sum, i, ret_val; 11076 unsigned long iflags; 11077 struct lpfc_sli_ring *pring_s4; 11078 11079 spin_lock_irq(&phba->hbalock); 11080 11081 /* all I/Os are in process of being flushed */ 11082 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 11083 spin_unlock_irq(&phba->hbalock); 11084 return 0; 11085 } 11086 sum = 0; 11087 11088 for (i = 1; i <= phba->sli.last_iotag; i++) { 11089 iocbq = phba->sli.iocbq_lookup[i]; 11090 11091 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 11092 cmd) != 0) 11093 continue; 11094 11095 /* 11096 * If the iocbq is already being aborted, don't take a second 11097 * action, but do count it. 11098 */ 11099 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 11100 continue; 11101 11102 /* issue ABTS for this IOCB based on iotag */ 11103 abtsiocbq = __lpfc_sli_get_iocbq(phba); 11104 if (abtsiocbq == NULL) 11105 continue; 11106 11107 icmd = &iocbq->iocb; 11108 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 11109 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; 11110 if (phba->sli_rev == LPFC_SLI_REV4) 11111 abtsiocbq->iocb.un.acxri.abortIoTag = 11112 iocbq->sli4_xritag; 11113 else 11114 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; 11115 abtsiocbq->iocb.ulpLe = 1; 11116 abtsiocbq->iocb.ulpClass = icmd->ulpClass; 11117 abtsiocbq->vport = vport; 11118 11119 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 11120 abtsiocbq->hba_wqidx = iocbq->hba_wqidx; 11121 if (iocbq->iocb_flag & LPFC_IO_FCP) 11122 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 11123 if (iocbq->iocb_flag & LPFC_IO_FOF) 11124 abtsiocbq->iocb_flag |= LPFC_IO_FOF; 11125 11126 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 11127 ndlp = lpfc_cmd->rdata->pnode; 11128 11129 if (lpfc_is_link_up(phba) && 11130 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE)) 11131 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; 11132 else 11133 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 11134 11135 /* Setup callback routine and issue the command. */ 11136 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 11137 11138 /* 11139 * Indicate the IO is being aborted by the driver and set 11140 * the caller's flag into the aborted IO. 11141 */ 11142 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 11143 11144 if (phba->sli_rev == LPFC_SLI_REV4) { 11145 pring_s4 = lpfc_sli4_calc_ring(phba, iocbq); 11146 if (pring_s4 == NULL) 11147 continue; 11148 /* Note: both hbalock and ring_lock must be set here */ 11149 spin_lock_irqsave(&pring_s4->ring_lock, iflags); 11150 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 11151 abtsiocbq, 0); 11152 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); 11153 } else { 11154 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 11155 abtsiocbq, 0); 11156 } 11157 11158 11159 if (ret_val == IOCB_ERROR) 11160 __lpfc_sli_release_iocbq(phba, abtsiocbq); 11161 else 11162 sum++; 11163 } 11164 spin_unlock_irq(&phba->hbalock); 11165 return sum; 11166 } 11167 11168 /** 11169 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 11170 * @phba: Pointer to HBA context object. 11171 * @cmdiocbq: Pointer to command iocb. 11172 * @rspiocbq: Pointer to response iocb. 11173 * 11174 * This function is the completion handler for iocbs issued using 11175 * lpfc_sli_issue_iocb_wait function. This function is called by the 11176 * ring event handler function without any lock held. This function 11177 * can be called from both worker thread context and interrupt 11178 * context. This function also can be called from other thread which 11179 * cleans up the SLI layer objects. 11180 * This function copy the contents of the response iocb to the 11181 * response iocb memory object provided by the caller of 11182 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 11183 * sleeps for the iocb completion. 11184 **/ 11185 static void 11186 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 11187 struct lpfc_iocbq *cmdiocbq, 11188 struct lpfc_iocbq *rspiocbq) 11189 { 11190 wait_queue_head_t *pdone_q; 11191 unsigned long iflags; 11192 struct lpfc_scsi_buf *lpfc_cmd; 11193 11194 spin_lock_irqsave(&phba->hbalock, iflags); 11195 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { 11196 11197 /* 11198 * A time out has occurred for the iocb. If a time out 11199 * completion handler has been supplied, call it. Otherwise, 11200 * just free the iocbq. 11201 */ 11202 11203 spin_unlock_irqrestore(&phba->hbalock, iflags); 11204 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; 11205 cmdiocbq->wait_iocb_cmpl = NULL; 11206 if (cmdiocbq->iocb_cmpl) 11207 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); 11208 else 11209 lpfc_sli_release_iocbq(phba, cmdiocbq); 11210 return; 11211 } 11212 11213 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 11214 if (cmdiocbq->context2 && rspiocbq) 11215 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 11216 &rspiocbq->iocb, sizeof(IOCB_t)); 11217 11218 /* Set the exchange busy flag for task management commands */ 11219 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 11220 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 11221 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 11222 cur_iocbq); 11223 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 11224 } 11225 11226 pdone_q = cmdiocbq->context_un.wait_queue; 11227 if (pdone_q) 11228 wake_up(pdone_q); 11229 spin_unlock_irqrestore(&phba->hbalock, iflags); 11230 return; 11231 } 11232 11233 /** 11234 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 11235 * @phba: Pointer to HBA context object.. 11236 * @piocbq: Pointer to command iocb. 11237 * @flag: Flag to test. 11238 * 11239 * This routine grabs the hbalock and then test the iocb_flag to 11240 * see if the passed in flag is set. 11241 * Returns: 11242 * 1 if flag is set. 11243 * 0 if flag is not set. 11244 **/ 11245 static int 11246 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 11247 struct lpfc_iocbq *piocbq, uint32_t flag) 11248 { 11249 unsigned long iflags; 11250 int ret; 11251 11252 spin_lock_irqsave(&phba->hbalock, iflags); 11253 ret = piocbq->iocb_flag & flag; 11254 spin_unlock_irqrestore(&phba->hbalock, iflags); 11255 return ret; 11256 11257 } 11258 11259 /** 11260 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 11261 * @phba: Pointer to HBA context object.. 11262 * @pring: Pointer to sli ring. 11263 * @piocb: Pointer to command iocb. 11264 * @prspiocbq: Pointer to response iocb. 11265 * @timeout: Timeout in number of seconds. 11266 * 11267 * This function issues the iocb to firmware and waits for the 11268 * iocb to complete. The iocb_cmpl field of the shall be used 11269 * to handle iocbs which time out. If the field is NULL, the 11270 * function shall free the iocbq structure. If more clean up is 11271 * needed, the caller is expected to provide a completion function 11272 * that will provide the needed clean up. If the iocb command is 11273 * not completed within timeout seconds, the function will either 11274 * free the iocbq structure (if iocb_cmpl == NULL) or execute the 11275 * completion function set in the iocb_cmpl field and then return 11276 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 11277 * resources if this function returns IOCB_TIMEDOUT. 11278 * The function waits for the iocb completion using an 11279 * non-interruptible wait. 11280 * This function will sleep while waiting for iocb completion. 11281 * So, this function should not be called from any context which 11282 * does not allow sleeping. Due to the same reason, this function 11283 * cannot be called with interrupt disabled. 11284 * This function assumes that the iocb completions occur while 11285 * this function sleep. So, this function cannot be called from 11286 * the thread which process iocb completion for this ring. 11287 * This function clears the iocb_flag of the iocb object before 11288 * issuing the iocb and the iocb completion handler sets this 11289 * flag and wakes this thread when the iocb completes. 11290 * The contents of the response iocb will be copied to prspiocbq 11291 * by the completion handler when the command completes. 11292 * This function returns IOCB_SUCCESS when success. 11293 * This function is called with no lock held. 11294 **/ 11295 int 11296 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 11297 uint32_t ring_number, 11298 struct lpfc_iocbq *piocb, 11299 struct lpfc_iocbq *prspiocbq, 11300 uint32_t timeout) 11301 { 11302 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 11303 long timeleft, timeout_req = 0; 11304 int retval = IOCB_SUCCESS; 11305 uint32_t creg_val; 11306 struct lpfc_iocbq *iocb; 11307 int txq_cnt = 0; 11308 int txcmplq_cnt = 0; 11309 struct lpfc_sli_ring *pring; 11310 unsigned long iflags; 11311 bool iocb_completed = true; 11312 11313 if (phba->sli_rev >= LPFC_SLI_REV4) 11314 pring = lpfc_sli4_calc_ring(phba, piocb); 11315 else 11316 pring = &phba->sli.sli3_ring[ring_number]; 11317 /* 11318 * If the caller has provided a response iocbq buffer, then context2 11319 * is NULL or its an error. 11320 */ 11321 if (prspiocbq) { 11322 if (piocb->context2) 11323 return IOCB_ERROR; 11324 piocb->context2 = prspiocbq; 11325 } 11326 11327 piocb->wait_iocb_cmpl = piocb->iocb_cmpl; 11328 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 11329 piocb->context_un.wait_queue = &done_q; 11330 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 11331 11332 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11333 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11334 return IOCB_ERROR; 11335 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 11336 writel(creg_val, phba->HCregaddr); 11337 readl(phba->HCregaddr); /* flush */ 11338 } 11339 11340 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 11341 SLI_IOCB_RET_IOCB); 11342 if (retval == IOCB_SUCCESS) { 11343 timeout_req = msecs_to_jiffies(timeout * 1000); 11344 timeleft = wait_event_timeout(done_q, 11345 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 11346 timeout_req); 11347 spin_lock_irqsave(&phba->hbalock, iflags); 11348 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 11349 11350 /* 11351 * IOCB timed out. Inform the wake iocb wait 11352 * completion function and set local status 11353 */ 11354 11355 iocb_completed = false; 11356 piocb->iocb_flag |= LPFC_IO_WAKE_TMO; 11357 } 11358 spin_unlock_irqrestore(&phba->hbalock, iflags); 11359 if (iocb_completed) { 11360 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11361 "0331 IOCB wake signaled\n"); 11362 /* Note: we are not indicating if the IOCB has a success 11363 * status or not - that's for the caller to check. 11364 * IOCB_SUCCESS means just that the command was sent and 11365 * completed. Not that it completed successfully. 11366 * */ 11367 } else if (timeleft == 0) { 11368 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11369 "0338 IOCB wait timeout error - no " 11370 "wake response Data x%x\n", timeout); 11371 retval = IOCB_TIMEDOUT; 11372 } else { 11373 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11374 "0330 IOCB wake NOT set, " 11375 "Data x%x x%lx\n", 11376 timeout, (timeleft / jiffies)); 11377 retval = IOCB_TIMEDOUT; 11378 } 11379 } else if (retval == IOCB_BUSY) { 11380 if (phba->cfg_log_verbose & LOG_SLI) { 11381 list_for_each_entry(iocb, &pring->txq, list) { 11382 txq_cnt++; 11383 } 11384 list_for_each_entry(iocb, &pring->txcmplq, list) { 11385 txcmplq_cnt++; 11386 } 11387 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11388 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 11389 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 11390 } 11391 return retval; 11392 } else { 11393 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11394 "0332 IOCB wait issue failed, Data x%x\n", 11395 retval); 11396 retval = IOCB_ERROR; 11397 } 11398 11399 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 11400 if (lpfc_readl(phba->HCregaddr, &creg_val)) 11401 return IOCB_ERROR; 11402 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 11403 writel(creg_val, phba->HCregaddr); 11404 readl(phba->HCregaddr); /* flush */ 11405 } 11406 11407 if (prspiocbq) 11408 piocb->context2 = NULL; 11409 11410 piocb->context_un.wait_queue = NULL; 11411 piocb->iocb_cmpl = NULL; 11412 return retval; 11413 } 11414 11415 /** 11416 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 11417 * @phba: Pointer to HBA context object. 11418 * @pmboxq: Pointer to driver mailbox object. 11419 * @timeout: Timeout in number of seconds. 11420 * 11421 * This function issues the mailbox to firmware and waits for the 11422 * mailbox command to complete. If the mailbox command is not 11423 * completed within timeout seconds, it returns MBX_TIMEOUT. 11424 * The function waits for the mailbox completion using an 11425 * interruptible wait. If the thread is woken up due to a 11426 * signal, MBX_TIMEOUT error is returned to the caller. Caller 11427 * should not free the mailbox resources, if this function returns 11428 * MBX_TIMEOUT. 11429 * This function will sleep while waiting for mailbox completion. 11430 * So, this function should not be called from any context which 11431 * does not allow sleeping. Due to the same reason, this function 11432 * cannot be called with interrupt disabled. 11433 * This function assumes that the mailbox completion occurs while 11434 * this function sleep. So, this function cannot be called from 11435 * the worker thread which processes mailbox completion. 11436 * This function is called in the context of HBA management 11437 * applications. 11438 * This function returns MBX_SUCCESS when successful. 11439 * This function is called with no lock held. 11440 **/ 11441 int 11442 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 11443 uint32_t timeout) 11444 { 11445 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 11446 MAILBOX_t *mb = NULL; 11447 int retval; 11448 unsigned long flag; 11449 11450 /* The caller might set context1 for extended buffer */ 11451 if (pmboxq->context1) 11452 mb = (MAILBOX_t *)pmboxq->context1; 11453 11454 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 11455 /* setup wake call as IOCB callback */ 11456 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 11457 /* setup context field to pass wait_queue pointer to wake function */ 11458 pmboxq->context1 = &done_q; 11459 11460 /* now issue the command */ 11461 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 11462 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 11463 wait_event_interruptible_timeout(done_q, 11464 pmboxq->mbox_flag & LPFC_MBX_WAKE, 11465 msecs_to_jiffies(timeout * 1000)); 11466 11467 spin_lock_irqsave(&phba->hbalock, flag); 11468 /* restore the possible extended buffer for free resource */ 11469 pmboxq->context1 = (uint8_t *)mb; 11470 /* 11471 * if LPFC_MBX_WAKE flag is set the mailbox is completed 11472 * else do not free the resources. 11473 */ 11474 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 11475 retval = MBX_SUCCESS; 11476 } else { 11477 retval = MBX_TIMEOUT; 11478 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 11479 } 11480 spin_unlock_irqrestore(&phba->hbalock, flag); 11481 } else { 11482 /* restore the possible extended buffer for free resource */ 11483 pmboxq->context1 = (uint8_t *)mb; 11484 } 11485 11486 return retval; 11487 } 11488 11489 /** 11490 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 11491 * @phba: Pointer to HBA context. 11492 * 11493 * This function is called to shutdown the driver's mailbox sub-system. 11494 * It first marks the mailbox sub-system is in a block state to prevent 11495 * the asynchronous mailbox command from issued off the pending mailbox 11496 * command queue. If the mailbox command sub-system shutdown is due to 11497 * HBA error conditions such as EEH or ERATT, this routine shall invoke 11498 * the mailbox sub-system flush routine to forcefully bring down the 11499 * mailbox sub-system. Otherwise, if it is due to normal condition (such 11500 * as with offline or HBA function reset), this routine will wait for the 11501 * outstanding mailbox command to complete before invoking the mailbox 11502 * sub-system flush routine to gracefully bring down mailbox sub-system. 11503 **/ 11504 void 11505 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 11506 { 11507 struct lpfc_sli *psli = &phba->sli; 11508 unsigned long timeout; 11509 11510 if (mbx_action == LPFC_MBX_NO_WAIT) { 11511 /* delay 100ms for port state */ 11512 msleep(100); 11513 lpfc_sli_mbox_sys_flush(phba); 11514 return; 11515 } 11516 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 11517 11518 spin_lock_irq(&phba->hbalock); 11519 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 11520 11521 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 11522 /* Determine how long we might wait for the active mailbox 11523 * command to be gracefully completed by firmware. 11524 */ 11525 if (phba->sli.mbox_active) 11526 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 11527 phba->sli.mbox_active) * 11528 1000) + jiffies; 11529 spin_unlock_irq(&phba->hbalock); 11530 11531 while (phba->sli.mbox_active) { 11532 /* Check active mailbox complete status every 2ms */ 11533 msleep(2); 11534 if (time_after(jiffies, timeout)) 11535 /* Timeout, let the mailbox flush routine to 11536 * forcefully release active mailbox command 11537 */ 11538 break; 11539 } 11540 } else 11541 spin_unlock_irq(&phba->hbalock); 11542 11543 lpfc_sli_mbox_sys_flush(phba); 11544 } 11545 11546 /** 11547 * lpfc_sli_eratt_read - read sli-3 error attention events 11548 * @phba: Pointer to HBA context. 11549 * 11550 * This function is called to read the SLI3 device error attention registers 11551 * for possible error attention events. The caller must hold the hostlock 11552 * with spin_lock_irq(). 11553 * 11554 * This function returns 1 when there is Error Attention in the Host Attention 11555 * Register and returns 0 otherwise. 11556 **/ 11557 static int 11558 lpfc_sli_eratt_read(struct lpfc_hba *phba) 11559 { 11560 uint32_t ha_copy; 11561 11562 /* Read chip Host Attention (HA) register */ 11563 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 11564 goto unplug_err; 11565 11566 if (ha_copy & HA_ERATT) { 11567 /* Read host status register to retrieve error event */ 11568 if (lpfc_sli_read_hs(phba)) 11569 goto unplug_err; 11570 11571 /* Check if there is a deferred error condition is active */ 11572 if ((HS_FFER1 & phba->work_hs) && 11573 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 11574 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 11575 phba->hba_flag |= DEFER_ERATT; 11576 /* Clear all interrupt enable conditions */ 11577 writel(0, phba->HCregaddr); 11578 readl(phba->HCregaddr); 11579 } 11580 11581 /* Set the driver HA work bitmap */ 11582 phba->work_ha |= HA_ERATT; 11583 /* Indicate polling handles this ERATT */ 11584 phba->hba_flag |= HBA_ERATT_HANDLED; 11585 return 1; 11586 } 11587 return 0; 11588 11589 unplug_err: 11590 /* Set the driver HS work bitmap */ 11591 phba->work_hs |= UNPLUG_ERR; 11592 /* Set the driver HA work bitmap */ 11593 phba->work_ha |= HA_ERATT; 11594 /* Indicate polling handles this ERATT */ 11595 phba->hba_flag |= HBA_ERATT_HANDLED; 11596 return 1; 11597 } 11598 11599 /** 11600 * lpfc_sli4_eratt_read - read sli-4 error attention events 11601 * @phba: Pointer to HBA context. 11602 * 11603 * This function is called to read the SLI4 device error attention registers 11604 * for possible error attention events. The caller must hold the hostlock 11605 * with spin_lock_irq(). 11606 * 11607 * This function returns 1 when there is Error Attention in the Host Attention 11608 * Register and returns 0 otherwise. 11609 **/ 11610 static int 11611 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 11612 { 11613 uint32_t uerr_sta_hi, uerr_sta_lo; 11614 uint32_t if_type, portsmphr; 11615 struct lpfc_register portstat_reg; 11616 11617 /* 11618 * For now, use the SLI4 device internal unrecoverable error 11619 * registers for error attention. This can be changed later. 11620 */ 11621 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 11622 switch (if_type) { 11623 case LPFC_SLI_INTF_IF_TYPE_0: 11624 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 11625 &uerr_sta_lo) || 11626 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 11627 &uerr_sta_hi)) { 11628 phba->work_hs |= UNPLUG_ERR; 11629 phba->work_ha |= HA_ERATT; 11630 phba->hba_flag |= HBA_ERATT_HANDLED; 11631 return 1; 11632 } 11633 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 11634 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 11635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11636 "1423 HBA Unrecoverable error: " 11637 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 11638 "ue_mask_lo_reg=0x%x, " 11639 "ue_mask_hi_reg=0x%x\n", 11640 uerr_sta_lo, uerr_sta_hi, 11641 phba->sli4_hba.ue_mask_lo, 11642 phba->sli4_hba.ue_mask_hi); 11643 phba->work_status[0] = uerr_sta_lo; 11644 phba->work_status[1] = uerr_sta_hi; 11645 phba->work_ha |= HA_ERATT; 11646 phba->hba_flag |= HBA_ERATT_HANDLED; 11647 return 1; 11648 } 11649 break; 11650 case LPFC_SLI_INTF_IF_TYPE_2: 11651 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 11652 &portstat_reg.word0) || 11653 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 11654 &portsmphr)){ 11655 phba->work_hs |= UNPLUG_ERR; 11656 phba->work_ha |= HA_ERATT; 11657 phba->hba_flag |= HBA_ERATT_HANDLED; 11658 return 1; 11659 } 11660 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 11661 phba->work_status[0] = 11662 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 11663 phba->work_status[1] = 11664 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 11665 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11666 "2885 Port Status Event: " 11667 "port status reg 0x%x, " 11668 "port smphr reg 0x%x, " 11669 "error 1=0x%x, error 2=0x%x\n", 11670 portstat_reg.word0, 11671 portsmphr, 11672 phba->work_status[0], 11673 phba->work_status[1]); 11674 phba->work_ha |= HA_ERATT; 11675 phba->hba_flag |= HBA_ERATT_HANDLED; 11676 return 1; 11677 } 11678 break; 11679 case LPFC_SLI_INTF_IF_TYPE_1: 11680 default: 11681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11682 "2886 HBA Error Attention on unsupported " 11683 "if type %d.", if_type); 11684 return 1; 11685 } 11686 11687 return 0; 11688 } 11689 11690 /** 11691 * lpfc_sli_check_eratt - check error attention events 11692 * @phba: Pointer to HBA context. 11693 * 11694 * This function is called from timer soft interrupt context to check HBA's 11695 * error attention register bit for error attention events. 11696 * 11697 * This function returns 1 when there is Error Attention in the Host Attention 11698 * Register and returns 0 otherwise. 11699 **/ 11700 int 11701 lpfc_sli_check_eratt(struct lpfc_hba *phba) 11702 { 11703 uint32_t ha_copy; 11704 11705 /* If somebody is waiting to handle an eratt, don't process it 11706 * here. The brdkill function will do this. 11707 */ 11708 if (phba->link_flag & LS_IGNORE_ERATT) 11709 return 0; 11710 11711 /* Check if interrupt handler handles this ERATT */ 11712 spin_lock_irq(&phba->hbalock); 11713 if (phba->hba_flag & HBA_ERATT_HANDLED) { 11714 /* Interrupt handler has handled ERATT */ 11715 spin_unlock_irq(&phba->hbalock); 11716 return 0; 11717 } 11718 11719 /* 11720 * If there is deferred error attention, do not check for error 11721 * attention 11722 */ 11723 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 11724 spin_unlock_irq(&phba->hbalock); 11725 return 0; 11726 } 11727 11728 /* If PCI channel is offline, don't process it */ 11729 if (unlikely(pci_channel_offline(phba->pcidev))) { 11730 spin_unlock_irq(&phba->hbalock); 11731 return 0; 11732 } 11733 11734 switch (phba->sli_rev) { 11735 case LPFC_SLI_REV2: 11736 case LPFC_SLI_REV3: 11737 /* Read chip Host Attention (HA) register */ 11738 ha_copy = lpfc_sli_eratt_read(phba); 11739 break; 11740 case LPFC_SLI_REV4: 11741 /* Read device Uncoverable Error (UERR) registers */ 11742 ha_copy = lpfc_sli4_eratt_read(phba); 11743 break; 11744 default: 11745 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 11746 "0299 Invalid SLI revision (%d)\n", 11747 phba->sli_rev); 11748 ha_copy = 0; 11749 break; 11750 } 11751 spin_unlock_irq(&phba->hbalock); 11752 11753 return ha_copy; 11754 } 11755 11756 /** 11757 * lpfc_intr_state_check - Check device state for interrupt handling 11758 * @phba: Pointer to HBA context. 11759 * 11760 * This inline routine checks whether a device or its PCI slot is in a state 11761 * that the interrupt should be handled. 11762 * 11763 * This function returns 0 if the device or the PCI slot is in a state that 11764 * interrupt should be handled, otherwise -EIO. 11765 */ 11766 static inline int 11767 lpfc_intr_state_check(struct lpfc_hba *phba) 11768 { 11769 /* If the pci channel is offline, ignore all the interrupts */ 11770 if (unlikely(pci_channel_offline(phba->pcidev))) 11771 return -EIO; 11772 11773 /* Update device level interrupt statistics */ 11774 phba->sli.slistat.sli_intr++; 11775 11776 /* Ignore all interrupts during initialization. */ 11777 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 11778 return -EIO; 11779 11780 return 0; 11781 } 11782 11783 /** 11784 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 11785 * @irq: Interrupt number. 11786 * @dev_id: The device context pointer. 11787 * 11788 * This function is directly called from the PCI layer as an interrupt 11789 * service routine when device with SLI-3 interface spec is enabled with 11790 * MSI-X multi-message interrupt mode and there are slow-path events in 11791 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 11792 * interrupt mode, this function is called as part of the device-level 11793 * interrupt handler. When the PCI slot is in error recovery or the HBA 11794 * is undergoing initialization, the interrupt handler will not process 11795 * the interrupt. The link attention and ELS ring attention events are 11796 * handled by the worker thread. The interrupt handler signals the worker 11797 * thread and returns for these events. This function is called without 11798 * any lock held. It gets the hbalock to access and update SLI data 11799 * structures. 11800 * 11801 * This function returns IRQ_HANDLED when interrupt is handled else it 11802 * returns IRQ_NONE. 11803 **/ 11804 irqreturn_t 11805 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 11806 { 11807 struct lpfc_hba *phba; 11808 uint32_t ha_copy, hc_copy; 11809 uint32_t work_ha_copy; 11810 unsigned long status; 11811 unsigned long iflag; 11812 uint32_t control; 11813 11814 MAILBOX_t *mbox, *pmbox; 11815 struct lpfc_vport *vport; 11816 struct lpfc_nodelist *ndlp; 11817 struct lpfc_dmabuf *mp; 11818 LPFC_MBOXQ_t *pmb; 11819 int rc; 11820 11821 /* 11822 * Get the driver's phba structure from the dev_id and 11823 * assume the HBA is not interrupting. 11824 */ 11825 phba = (struct lpfc_hba *)dev_id; 11826 11827 if (unlikely(!phba)) 11828 return IRQ_NONE; 11829 11830 /* 11831 * Stuff needs to be attented to when this function is invoked as an 11832 * individual interrupt handler in MSI-X multi-message interrupt mode 11833 */ 11834 if (phba->intr_type == MSIX) { 11835 /* Check device state for handling interrupt */ 11836 if (lpfc_intr_state_check(phba)) 11837 return IRQ_NONE; 11838 /* Need to read HA REG for slow-path events */ 11839 spin_lock_irqsave(&phba->hbalock, iflag); 11840 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 11841 goto unplug_error; 11842 /* If somebody is waiting to handle an eratt don't process it 11843 * here. The brdkill function will do this. 11844 */ 11845 if (phba->link_flag & LS_IGNORE_ERATT) 11846 ha_copy &= ~HA_ERATT; 11847 /* Check the need for handling ERATT in interrupt handler */ 11848 if (ha_copy & HA_ERATT) { 11849 if (phba->hba_flag & HBA_ERATT_HANDLED) 11850 /* ERATT polling has handled ERATT */ 11851 ha_copy &= ~HA_ERATT; 11852 else 11853 /* Indicate interrupt handler handles ERATT */ 11854 phba->hba_flag |= HBA_ERATT_HANDLED; 11855 } 11856 11857 /* 11858 * If there is deferred error attention, do not check for any 11859 * interrupt. 11860 */ 11861 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 11862 spin_unlock_irqrestore(&phba->hbalock, iflag); 11863 return IRQ_NONE; 11864 } 11865 11866 /* Clear up only attention source related to slow-path */ 11867 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 11868 goto unplug_error; 11869 11870 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 11871 HC_LAINT_ENA | HC_ERINT_ENA), 11872 phba->HCregaddr); 11873 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 11874 phba->HAregaddr); 11875 writel(hc_copy, phba->HCregaddr); 11876 readl(phba->HAregaddr); /* flush */ 11877 spin_unlock_irqrestore(&phba->hbalock, iflag); 11878 } else 11879 ha_copy = phba->ha_copy; 11880 11881 work_ha_copy = ha_copy & phba->work_ha_mask; 11882 11883 if (work_ha_copy) { 11884 if (work_ha_copy & HA_LATT) { 11885 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 11886 /* 11887 * Turn off Link Attention interrupts 11888 * until CLEAR_LA done 11889 */ 11890 spin_lock_irqsave(&phba->hbalock, iflag); 11891 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 11892 if (lpfc_readl(phba->HCregaddr, &control)) 11893 goto unplug_error; 11894 control &= ~HC_LAINT_ENA; 11895 writel(control, phba->HCregaddr); 11896 readl(phba->HCregaddr); /* flush */ 11897 spin_unlock_irqrestore(&phba->hbalock, iflag); 11898 } 11899 else 11900 work_ha_copy &= ~HA_LATT; 11901 } 11902 11903 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 11904 /* 11905 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 11906 * the only slow ring. 11907 */ 11908 status = (work_ha_copy & 11909 (HA_RXMASK << (4*LPFC_ELS_RING))); 11910 status >>= (4*LPFC_ELS_RING); 11911 if (status & HA_RXMASK) { 11912 spin_lock_irqsave(&phba->hbalock, iflag); 11913 if (lpfc_readl(phba->HCregaddr, &control)) 11914 goto unplug_error; 11915 11916 lpfc_debugfs_slow_ring_trc(phba, 11917 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 11918 control, status, 11919 (uint32_t)phba->sli.slistat.sli_intr); 11920 11921 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 11922 lpfc_debugfs_slow_ring_trc(phba, 11923 "ISR Disable ring:" 11924 "pwork:x%x hawork:x%x wait:x%x", 11925 phba->work_ha, work_ha_copy, 11926 (uint32_t)((unsigned long) 11927 &phba->work_waitq)); 11928 11929 control &= 11930 ~(HC_R0INT_ENA << LPFC_ELS_RING); 11931 writel(control, phba->HCregaddr); 11932 readl(phba->HCregaddr); /* flush */ 11933 } 11934 else { 11935 lpfc_debugfs_slow_ring_trc(phba, 11936 "ISR slow ring: pwork:" 11937 "x%x hawork:x%x wait:x%x", 11938 phba->work_ha, work_ha_copy, 11939 (uint32_t)((unsigned long) 11940 &phba->work_waitq)); 11941 } 11942 spin_unlock_irqrestore(&phba->hbalock, iflag); 11943 } 11944 } 11945 spin_lock_irqsave(&phba->hbalock, iflag); 11946 if (work_ha_copy & HA_ERATT) { 11947 if (lpfc_sli_read_hs(phba)) 11948 goto unplug_error; 11949 /* 11950 * Check if there is a deferred error condition 11951 * is active 11952 */ 11953 if ((HS_FFER1 & phba->work_hs) && 11954 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 11955 HS_FFER6 | HS_FFER7 | HS_FFER8) & 11956 phba->work_hs)) { 11957 phba->hba_flag |= DEFER_ERATT; 11958 /* Clear all interrupt enable conditions */ 11959 writel(0, phba->HCregaddr); 11960 readl(phba->HCregaddr); 11961 } 11962 } 11963 11964 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 11965 pmb = phba->sli.mbox_active; 11966 pmbox = &pmb->u.mb; 11967 mbox = phba->mbox; 11968 vport = pmb->vport; 11969 11970 /* First check out the status word */ 11971 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 11972 if (pmbox->mbxOwner != OWN_HOST) { 11973 spin_unlock_irqrestore(&phba->hbalock, iflag); 11974 /* 11975 * Stray Mailbox Interrupt, mbxCommand <cmd> 11976 * mbxStatus <status> 11977 */ 11978 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 11979 LOG_SLI, 11980 "(%d):0304 Stray Mailbox " 11981 "Interrupt mbxCommand x%x " 11982 "mbxStatus x%x\n", 11983 (vport ? vport->vpi : 0), 11984 pmbox->mbxCommand, 11985 pmbox->mbxStatus); 11986 /* clear mailbox attention bit */ 11987 work_ha_copy &= ~HA_MBATT; 11988 } else { 11989 phba->sli.mbox_active = NULL; 11990 spin_unlock_irqrestore(&phba->hbalock, iflag); 11991 phba->last_completion_time = jiffies; 11992 del_timer(&phba->sli.mbox_tmo); 11993 if (pmb->mbox_cmpl) { 11994 lpfc_sli_pcimem_bcopy(mbox, pmbox, 11995 MAILBOX_CMD_SIZE); 11996 if (pmb->out_ext_byte_len && 11997 pmb->context2) 11998 lpfc_sli_pcimem_bcopy( 11999 phba->mbox_ext, 12000 pmb->context2, 12001 pmb->out_ext_byte_len); 12002 } 12003 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 12004 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 12005 12006 lpfc_debugfs_disc_trc(vport, 12007 LPFC_DISC_TRC_MBOX_VPORT, 12008 "MBOX dflt rpi: : " 12009 "status:x%x rpi:x%x", 12010 (uint32_t)pmbox->mbxStatus, 12011 pmbox->un.varWords[0], 0); 12012 12013 if (!pmbox->mbxStatus) { 12014 mp = (struct lpfc_dmabuf *) 12015 (pmb->context1); 12016 ndlp = (struct lpfc_nodelist *) 12017 pmb->context2; 12018 12019 /* Reg_LOGIN of dflt RPI was 12020 * successful. new lets get 12021 * rid of the RPI using the 12022 * same mbox buffer. 12023 */ 12024 lpfc_unreg_login(phba, 12025 vport->vpi, 12026 pmbox->un.varWords[0], 12027 pmb); 12028 pmb->mbox_cmpl = 12029 lpfc_mbx_cmpl_dflt_rpi; 12030 pmb->context1 = mp; 12031 pmb->context2 = ndlp; 12032 pmb->vport = vport; 12033 rc = lpfc_sli_issue_mbox(phba, 12034 pmb, 12035 MBX_NOWAIT); 12036 if (rc != MBX_BUSY) 12037 lpfc_printf_log(phba, 12038 KERN_ERR, 12039 LOG_MBOX | LOG_SLI, 12040 "0350 rc should have" 12041 "been MBX_BUSY\n"); 12042 if (rc != MBX_NOT_FINISHED) 12043 goto send_current_mbox; 12044 } 12045 } 12046 spin_lock_irqsave( 12047 &phba->pport->work_port_lock, 12048 iflag); 12049 phba->pport->work_port_events &= 12050 ~WORKER_MBOX_TMO; 12051 spin_unlock_irqrestore( 12052 &phba->pport->work_port_lock, 12053 iflag); 12054 lpfc_mbox_cmpl_put(phba, pmb); 12055 } 12056 } else 12057 spin_unlock_irqrestore(&phba->hbalock, iflag); 12058 12059 if ((work_ha_copy & HA_MBATT) && 12060 (phba->sli.mbox_active == NULL)) { 12061 send_current_mbox: 12062 /* Process next mailbox command if there is one */ 12063 do { 12064 rc = lpfc_sli_issue_mbox(phba, NULL, 12065 MBX_NOWAIT); 12066 } while (rc == MBX_NOT_FINISHED); 12067 if (rc != MBX_SUCCESS) 12068 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12069 LOG_SLI, "0349 rc should be " 12070 "MBX_SUCCESS\n"); 12071 } 12072 12073 spin_lock_irqsave(&phba->hbalock, iflag); 12074 phba->work_ha |= work_ha_copy; 12075 spin_unlock_irqrestore(&phba->hbalock, iflag); 12076 lpfc_worker_wake_up(phba); 12077 } 12078 return IRQ_HANDLED; 12079 unplug_error: 12080 spin_unlock_irqrestore(&phba->hbalock, iflag); 12081 return IRQ_HANDLED; 12082 12083 } /* lpfc_sli_sp_intr_handler */ 12084 12085 /** 12086 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 12087 * @irq: Interrupt number. 12088 * @dev_id: The device context pointer. 12089 * 12090 * This function is directly called from the PCI layer as an interrupt 12091 * service routine when device with SLI-3 interface spec is enabled with 12092 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 12093 * ring event in the HBA. However, when the device is enabled with either 12094 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12095 * device-level interrupt handler. When the PCI slot is in error recovery 12096 * or the HBA is undergoing initialization, the interrupt handler will not 12097 * process the interrupt. The SCSI FCP fast-path ring event are handled in 12098 * the intrrupt context. This function is called without any lock held. 12099 * It gets the hbalock to access and update SLI data structures. 12100 * 12101 * This function returns IRQ_HANDLED when interrupt is handled else it 12102 * returns IRQ_NONE. 12103 **/ 12104 irqreturn_t 12105 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 12106 { 12107 struct lpfc_hba *phba; 12108 uint32_t ha_copy; 12109 unsigned long status; 12110 unsigned long iflag; 12111 struct lpfc_sli_ring *pring; 12112 12113 /* Get the driver's phba structure from the dev_id and 12114 * assume the HBA is not interrupting. 12115 */ 12116 phba = (struct lpfc_hba *) dev_id; 12117 12118 if (unlikely(!phba)) 12119 return IRQ_NONE; 12120 12121 /* 12122 * Stuff needs to be attented to when this function is invoked as an 12123 * individual interrupt handler in MSI-X multi-message interrupt mode 12124 */ 12125 if (phba->intr_type == MSIX) { 12126 /* Check device state for handling interrupt */ 12127 if (lpfc_intr_state_check(phba)) 12128 return IRQ_NONE; 12129 /* Need to read HA REG for FCP ring and other ring events */ 12130 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 12131 return IRQ_HANDLED; 12132 /* Clear up only attention source related to fast-path */ 12133 spin_lock_irqsave(&phba->hbalock, iflag); 12134 /* 12135 * If there is deferred error attention, do not check for 12136 * any interrupt. 12137 */ 12138 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12139 spin_unlock_irqrestore(&phba->hbalock, iflag); 12140 return IRQ_NONE; 12141 } 12142 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 12143 phba->HAregaddr); 12144 readl(phba->HAregaddr); /* flush */ 12145 spin_unlock_irqrestore(&phba->hbalock, iflag); 12146 } else 12147 ha_copy = phba->ha_copy; 12148 12149 /* 12150 * Process all events on FCP ring. Take the optimized path for FCP IO. 12151 */ 12152 ha_copy &= ~(phba->work_ha_mask); 12153 12154 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12155 status >>= (4*LPFC_FCP_RING); 12156 pring = &phba->sli.sli3_ring[LPFC_FCP_RING]; 12157 if (status & HA_RXMASK) 12158 lpfc_sli_handle_fast_ring_event(phba, pring, status); 12159 12160 if (phba->cfg_multi_ring_support == 2) { 12161 /* 12162 * Process all events on extra ring. Take the optimized path 12163 * for extra ring IO. 12164 */ 12165 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12166 status >>= (4*LPFC_EXTRA_RING); 12167 if (status & HA_RXMASK) { 12168 lpfc_sli_handle_fast_ring_event(phba, 12169 &phba->sli.sli3_ring[LPFC_EXTRA_RING], 12170 status); 12171 } 12172 } 12173 return IRQ_HANDLED; 12174 } /* lpfc_sli_fp_intr_handler */ 12175 12176 /** 12177 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 12178 * @irq: Interrupt number. 12179 * @dev_id: The device context pointer. 12180 * 12181 * This function is the HBA device-level interrupt handler to device with 12182 * SLI-3 interface spec, called from the PCI layer when either MSI or 12183 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 12184 * requires driver attention. This function invokes the slow-path interrupt 12185 * attention handling function and fast-path interrupt attention handling 12186 * function in turn to process the relevant HBA attention events. This 12187 * function is called without any lock held. It gets the hbalock to access 12188 * and update SLI data structures. 12189 * 12190 * This function returns IRQ_HANDLED when interrupt is handled, else it 12191 * returns IRQ_NONE. 12192 **/ 12193 irqreturn_t 12194 lpfc_sli_intr_handler(int irq, void *dev_id) 12195 { 12196 struct lpfc_hba *phba; 12197 irqreturn_t sp_irq_rc, fp_irq_rc; 12198 unsigned long status1, status2; 12199 uint32_t hc_copy; 12200 12201 /* 12202 * Get the driver's phba structure from the dev_id and 12203 * assume the HBA is not interrupting. 12204 */ 12205 phba = (struct lpfc_hba *) dev_id; 12206 12207 if (unlikely(!phba)) 12208 return IRQ_NONE; 12209 12210 /* Check device state for handling interrupt */ 12211 if (lpfc_intr_state_check(phba)) 12212 return IRQ_NONE; 12213 12214 spin_lock(&phba->hbalock); 12215 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 12216 spin_unlock(&phba->hbalock); 12217 return IRQ_HANDLED; 12218 } 12219 12220 if (unlikely(!phba->ha_copy)) { 12221 spin_unlock(&phba->hbalock); 12222 return IRQ_NONE; 12223 } else if (phba->ha_copy & HA_ERATT) { 12224 if (phba->hba_flag & HBA_ERATT_HANDLED) 12225 /* ERATT polling has handled ERATT */ 12226 phba->ha_copy &= ~HA_ERATT; 12227 else 12228 /* Indicate interrupt handler handles ERATT */ 12229 phba->hba_flag |= HBA_ERATT_HANDLED; 12230 } 12231 12232 /* 12233 * If there is deferred error attention, do not check for any interrupt. 12234 */ 12235 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 12236 spin_unlock(&phba->hbalock); 12237 return IRQ_NONE; 12238 } 12239 12240 /* Clear attention sources except link and error attentions */ 12241 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 12242 spin_unlock(&phba->hbalock); 12243 return IRQ_HANDLED; 12244 } 12245 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 12246 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 12247 phba->HCregaddr); 12248 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 12249 writel(hc_copy, phba->HCregaddr); 12250 readl(phba->HAregaddr); /* flush */ 12251 spin_unlock(&phba->hbalock); 12252 12253 /* 12254 * Invokes slow-path host attention interrupt handling as appropriate. 12255 */ 12256 12257 /* status of events with mailbox and link attention */ 12258 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 12259 12260 /* status of events with ELS ring */ 12261 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 12262 status2 >>= (4*LPFC_ELS_RING); 12263 12264 if (status1 || (status2 & HA_RXMASK)) 12265 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 12266 else 12267 sp_irq_rc = IRQ_NONE; 12268 12269 /* 12270 * Invoke fast-path host attention interrupt handling as appropriate. 12271 */ 12272 12273 /* status of events with FCP ring */ 12274 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 12275 status1 >>= (4*LPFC_FCP_RING); 12276 12277 /* status of events with extra ring */ 12278 if (phba->cfg_multi_ring_support == 2) { 12279 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 12280 status2 >>= (4*LPFC_EXTRA_RING); 12281 } else 12282 status2 = 0; 12283 12284 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 12285 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 12286 else 12287 fp_irq_rc = IRQ_NONE; 12288 12289 /* Return device-level interrupt handling status */ 12290 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 12291 } /* lpfc_sli_intr_handler */ 12292 12293 /** 12294 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 12295 * @phba: pointer to lpfc hba data structure. 12296 * 12297 * This routine is invoked by the worker thread to process all the pending 12298 * SLI4 FCP abort XRI events. 12299 **/ 12300 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 12301 { 12302 struct lpfc_cq_event *cq_event; 12303 12304 /* First, declare the fcp xri abort event has been handled */ 12305 spin_lock_irq(&phba->hbalock); 12306 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 12307 spin_unlock_irq(&phba->hbalock); 12308 /* Now, handle all the fcp xri abort events */ 12309 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 12310 /* Get the first event from the head of the event queue */ 12311 spin_lock_irq(&phba->hbalock); 12312 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 12313 cq_event, struct lpfc_cq_event, list); 12314 spin_unlock_irq(&phba->hbalock); 12315 /* Notify aborted XRI for FCP work queue */ 12316 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12317 /* Free the event processed back to the free pool */ 12318 lpfc_sli4_cq_event_release(phba, cq_event); 12319 } 12320 } 12321 12322 /** 12323 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 12324 * @phba: pointer to lpfc hba data structure. 12325 * 12326 * This routine is invoked by the worker thread to process all the pending 12327 * SLI4 els abort xri events. 12328 **/ 12329 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 12330 { 12331 struct lpfc_cq_event *cq_event; 12332 12333 /* First, declare the els xri abort event has been handled */ 12334 spin_lock_irq(&phba->hbalock); 12335 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 12336 spin_unlock_irq(&phba->hbalock); 12337 /* Now, handle all the els xri abort events */ 12338 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 12339 /* Get the first event from the head of the event queue */ 12340 spin_lock_irq(&phba->hbalock); 12341 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 12342 cq_event, struct lpfc_cq_event, list); 12343 spin_unlock_irq(&phba->hbalock); 12344 /* Notify aborted XRI for ELS work queue */ 12345 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 12346 /* Free the event processed back to the free pool */ 12347 lpfc_sli4_cq_event_release(phba, cq_event); 12348 } 12349 } 12350 12351 /** 12352 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 12353 * @phba: pointer to lpfc hba data structure 12354 * @pIocbIn: pointer to the rspiocbq 12355 * @pIocbOut: pointer to the cmdiocbq 12356 * @wcqe: pointer to the complete wcqe 12357 * 12358 * This routine transfers the fields of a command iocbq to a response iocbq 12359 * by copying all the IOCB fields from command iocbq and transferring the 12360 * completion status information from the complete wcqe. 12361 **/ 12362 static void 12363 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 12364 struct lpfc_iocbq *pIocbIn, 12365 struct lpfc_iocbq *pIocbOut, 12366 struct lpfc_wcqe_complete *wcqe) 12367 { 12368 int numBdes, i; 12369 unsigned long iflags; 12370 uint32_t status, max_response; 12371 struct lpfc_dmabuf *dmabuf; 12372 struct ulp_bde64 *bpl, bde; 12373 size_t offset = offsetof(struct lpfc_iocbq, iocb); 12374 12375 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 12376 sizeof(struct lpfc_iocbq) - offset); 12377 /* Map WCQE parameters into irspiocb parameters */ 12378 status = bf_get(lpfc_wcqe_c_status, wcqe); 12379 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 12380 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 12381 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 12382 pIocbIn->iocb.un.fcpi.fcpi_parm = 12383 pIocbOut->iocb.un.fcpi.fcpi_parm - 12384 wcqe->total_data_placed; 12385 else 12386 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12387 else { 12388 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 12389 switch (pIocbOut->iocb.ulpCommand) { 12390 case CMD_ELS_REQUEST64_CR: 12391 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12392 bpl = (struct ulp_bde64 *)dmabuf->virt; 12393 bde.tus.w = le32_to_cpu(bpl[1].tus.w); 12394 max_response = bde.tus.f.bdeSize; 12395 break; 12396 case CMD_GEN_REQUEST64_CR: 12397 max_response = 0; 12398 if (!pIocbOut->context3) 12399 break; 12400 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 12401 sizeof(struct ulp_bde64); 12402 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 12403 bpl = (struct ulp_bde64 *)dmabuf->virt; 12404 for (i = 0; i < numBdes; i++) { 12405 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 12406 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 12407 max_response += bde.tus.f.bdeSize; 12408 } 12409 break; 12410 default: 12411 max_response = wcqe->total_data_placed; 12412 break; 12413 } 12414 if (max_response < wcqe->total_data_placed) 12415 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 12416 else 12417 pIocbIn->iocb.un.genreq64.bdl.bdeSize = 12418 wcqe->total_data_placed; 12419 } 12420 12421 /* Convert BG errors for completion status */ 12422 if (status == CQE_STATUS_DI_ERROR) { 12423 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 12424 12425 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 12426 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 12427 else 12428 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 12429 12430 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 12431 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 12432 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12433 BGS_GUARD_ERR_MASK; 12434 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 12435 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12436 BGS_APPTAG_ERR_MASK; 12437 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 12438 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12439 BGS_REFTAG_ERR_MASK; 12440 12441 /* Check to see if there was any good data before the error */ 12442 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 12443 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12444 BGS_HI_WATER_MARK_PRESENT_MASK; 12445 pIocbIn->iocb.unsli3.sli3_bg.bghm = 12446 wcqe->total_data_placed; 12447 } 12448 12449 /* 12450 * Set ALL the error bits to indicate we don't know what 12451 * type of error it is. 12452 */ 12453 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 12454 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 12455 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 12456 BGS_GUARD_ERR_MASK); 12457 } 12458 12459 /* Pick up HBA exchange busy condition */ 12460 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 12461 spin_lock_irqsave(&phba->hbalock, iflags); 12462 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 12463 spin_unlock_irqrestore(&phba->hbalock, iflags); 12464 } 12465 } 12466 12467 /** 12468 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 12469 * @phba: Pointer to HBA context object. 12470 * @wcqe: Pointer to work-queue completion queue entry. 12471 * 12472 * This routine handles an ELS work-queue completion event and construct 12473 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 12474 * discovery engine to handle. 12475 * 12476 * Return: Pointer to the receive IOCBQ, NULL otherwise. 12477 **/ 12478 static struct lpfc_iocbq * 12479 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 12480 struct lpfc_iocbq *irspiocbq) 12481 { 12482 struct lpfc_sli_ring *pring; 12483 struct lpfc_iocbq *cmdiocbq; 12484 struct lpfc_wcqe_complete *wcqe; 12485 unsigned long iflags; 12486 12487 pring = lpfc_phba_elsring(phba); 12488 if (unlikely(!pring)) 12489 return NULL; 12490 12491 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 12492 spin_lock_irqsave(&pring->ring_lock, iflags); 12493 pring->stats.iocb_event++; 12494 /* Look up the ELS command IOCB and create pseudo response IOCB */ 12495 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 12496 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 12497 if (unlikely(!cmdiocbq)) { 12498 spin_unlock_irqrestore(&pring->ring_lock, iflags); 12499 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12500 "0386 ELS complete with no corresponding " 12501 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n", 12502 wcqe->word0, wcqe->total_data_placed, 12503 wcqe->parameter, wcqe->word3); 12504 lpfc_sli_release_iocbq(phba, irspiocbq); 12505 return NULL; 12506 } 12507 12508 /* Put the iocb back on the txcmplq */ 12509 lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq); 12510 spin_unlock_irqrestore(&pring->ring_lock, iflags); 12511 12512 /* Fake the irspiocbq and copy necessary response information */ 12513 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 12514 12515 return irspiocbq; 12516 } 12517 12518 inline struct lpfc_cq_event * 12519 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) 12520 { 12521 struct lpfc_cq_event *cq_event; 12522 12523 /* Allocate a new internal CQ_EVENT entry */ 12524 cq_event = lpfc_sli4_cq_event_alloc(phba); 12525 if (!cq_event) { 12526 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12527 "0602 Failed to alloc CQ_EVENT entry\n"); 12528 return NULL; 12529 } 12530 12531 /* Move the CQE into the event */ 12532 memcpy(&cq_event->cqe, entry, size); 12533 return cq_event; 12534 } 12535 12536 /** 12537 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 12538 * @phba: Pointer to HBA context object. 12539 * @cqe: Pointer to mailbox completion queue entry. 12540 * 12541 * This routine process a mailbox completion queue entry with asynchrous 12542 * event. 12543 * 12544 * Return: true if work posted to worker thread, otherwise false. 12545 **/ 12546 static bool 12547 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 12548 { 12549 struct lpfc_cq_event *cq_event; 12550 unsigned long iflags; 12551 12552 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 12553 "0392 Async Event: word0:x%x, word1:x%x, " 12554 "word2:x%x, word3:x%x\n", mcqe->word0, 12555 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 12556 12557 cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe)); 12558 if (!cq_event) 12559 return false; 12560 spin_lock_irqsave(&phba->hbalock, iflags); 12561 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 12562 /* Set the async event flag */ 12563 phba->hba_flag |= ASYNC_EVENT; 12564 spin_unlock_irqrestore(&phba->hbalock, iflags); 12565 12566 return true; 12567 } 12568 12569 /** 12570 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 12571 * @phba: Pointer to HBA context object. 12572 * @cqe: Pointer to mailbox completion queue entry. 12573 * 12574 * This routine process a mailbox completion queue entry with mailbox 12575 * completion event. 12576 * 12577 * Return: true if work posted to worker thread, otherwise false. 12578 **/ 12579 static bool 12580 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 12581 { 12582 uint32_t mcqe_status; 12583 MAILBOX_t *mbox, *pmbox; 12584 struct lpfc_mqe *mqe; 12585 struct lpfc_vport *vport; 12586 struct lpfc_nodelist *ndlp; 12587 struct lpfc_dmabuf *mp; 12588 unsigned long iflags; 12589 LPFC_MBOXQ_t *pmb; 12590 bool workposted = false; 12591 int rc; 12592 12593 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 12594 if (!bf_get(lpfc_trailer_completed, mcqe)) 12595 goto out_no_mqe_complete; 12596 12597 /* Get the reference to the active mbox command */ 12598 spin_lock_irqsave(&phba->hbalock, iflags); 12599 pmb = phba->sli.mbox_active; 12600 if (unlikely(!pmb)) { 12601 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 12602 "1832 No pending MBOX command to handle\n"); 12603 spin_unlock_irqrestore(&phba->hbalock, iflags); 12604 goto out_no_mqe_complete; 12605 } 12606 spin_unlock_irqrestore(&phba->hbalock, iflags); 12607 mqe = &pmb->u.mqe; 12608 pmbox = (MAILBOX_t *)&pmb->u.mqe; 12609 mbox = phba->mbox; 12610 vport = pmb->vport; 12611 12612 /* Reset heartbeat timer */ 12613 phba->last_completion_time = jiffies; 12614 del_timer(&phba->sli.mbox_tmo); 12615 12616 /* Move mbox data to caller's mailbox region, do endian swapping */ 12617 if (pmb->mbox_cmpl && mbox) 12618 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 12619 12620 /* 12621 * For mcqe errors, conditionally move a modified error code to 12622 * the mbox so that the error will not be missed. 12623 */ 12624 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 12625 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 12626 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 12627 bf_set(lpfc_mqe_status, mqe, 12628 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 12629 } 12630 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 12631 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 12632 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 12633 "MBOX dflt rpi: status:x%x rpi:x%x", 12634 mcqe_status, 12635 pmbox->un.varWords[0], 0); 12636 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 12637 mp = (struct lpfc_dmabuf *)(pmb->context1); 12638 ndlp = (struct lpfc_nodelist *)pmb->context2; 12639 /* Reg_LOGIN of dflt RPI was successful. Now lets get 12640 * RID of the PPI using the same mbox buffer. 12641 */ 12642 lpfc_unreg_login(phba, vport->vpi, 12643 pmbox->un.varWords[0], pmb); 12644 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 12645 pmb->context1 = mp; 12646 pmb->context2 = ndlp; 12647 pmb->vport = vport; 12648 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 12649 if (rc != MBX_BUSY) 12650 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 12651 LOG_SLI, "0385 rc should " 12652 "have been MBX_BUSY\n"); 12653 if (rc != MBX_NOT_FINISHED) 12654 goto send_current_mbox; 12655 } 12656 } 12657 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 12658 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 12659 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 12660 12661 /* There is mailbox completion work to do */ 12662 spin_lock_irqsave(&phba->hbalock, iflags); 12663 __lpfc_mbox_cmpl_put(phba, pmb); 12664 phba->work_ha |= HA_MBATT; 12665 spin_unlock_irqrestore(&phba->hbalock, iflags); 12666 workposted = true; 12667 12668 send_current_mbox: 12669 spin_lock_irqsave(&phba->hbalock, iflags); 12670 /* Release the mailbox command posting token */ 12671 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 12672 /* Setting active mailbox pointer need to be in sync to flag clear */ 12673 phba->sli.mbox_active = NULL; 12674 spin_unlock_irqrestore(&phba->hbalock, iflags); 12675 /* Wake up worker thread to post the next pending mailbox command */ 12676 lpfc_worker_wake_up(phba); 12677 out_no_mqe_complete: 12678 if (bf_get(lpfc_trailer_consumed, mcqe)) 12679 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 12680 return workposted; 12681 } 12682 12683 /** 12684 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 12685 * @phba: Pointer to HBA context object. 12686 * @cqe: Pointer to mailbox completion queue entry. 12687 * 12688 * This routine process a mailbox completion queue entry, it invokes the 12689 * proper mailbox complete handling or asynchrous event handling routine 12690 * according to the MCQE's async bit. 12691 * 12692 * Return: true if work posted to worker thread, otherwise false. 12693 **/ 12694 static bool 12695 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 12696 { 12697 struct lpfc_mcqe mcqe; 12698 bool workposted; 12699 12700 /* Copy the mailbox MCQE and convert endian order as needed */ 12701 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 12702 12703 /* Invoke the proper event handling routine */ 12704 if (!bf_get(lpfc_trailer_async, &mcqe)) 12705 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 12706 else 12707 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 12708 return workposted; 12709 } 12710 12711 /** 12712 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 12713 * @phba: Pointer to HBA context object. 12714 * @cq: Pointer to associated CQ 12715 * @wcqe: Pointer to work-queue completion queue entry. 12716 * 12717 * This routine handles an ELS work-queue completion event. 12718 * 12719 * Return: true if work posted to worker thread, otherwise false. 12720 **/ 12721 static bool 12722 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 12723 struct lpfc_wcqe_complete *wcqe) 12724 { 12725 struct lpfc_iocbq *irspiocbq; 12726 unsigned long iflags; 12727 struct lpfc_sli_ring *pring = cq->pring; 12728 int txq_cnt = 0; 12729 int txcmplq_cnt = 0; 12730 int fcp_txcmplq_cnt = 0; 12731 12732 /* Get an irspiocbq for later ELS response processing use */ 12733 irspiocbq = lpfc_sli_get_iocbq(phba); 12734 if (!irspiocbq) { 12735 if (!list_empty(&pring->txq)) 12736 txq_cnt++; 12737 if (!list_empty(&pring->txcmplq)) 12738 txcmplq_cnt++; 12739 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12740 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 12741 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 12742 txq_cnt, phba->iocb_cnt, 12743 fcp_txcmplq_cnt, 12744 txcmplq_cnt); 12745 return false; 12746 } 12747 12748 /* Save off the slow-path queue event for work thread to process */ 12749 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 12750 spin_lock_irqsave(&phba->hbalock, iflags); 12751 list_add_tail(&irspiocbq->cq_event.list, 12752 &phba->sli4_hba.sp_queue_event); 12753 phba->hba_flag |= HBA_SP_QUEUE_EVT; 12754 spin_unlock_irqrestore(&phba->hbalock, iflags); 12755 12756 return true; 12757 } 12758 12759 /** 12760 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 12761 * @phba: Pointer to HBA context object. 12762 * @wcqe: Pointer to work-queue completion queue entry. 12763 * 12764 * This routine handles slow-path WQ entry consumed event by invoking the 12765 * proper WQ release routine to the slow-path WQ. 12766 **/ 12767 static void 12768 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 12769 struct lpfc_wcqe_release *wcqe) 12770 { 12771 /* sanity check on queue memory */ 12772 if (unlikely(!phba->sli4_hba.els_wq)) 12773 return; 12774 /* Check for the slow-path ELS work queue */ 12775 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 12776 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 12777 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 12778 else 12779 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12780 "2579 Slow-path wqe consume event carries " 12781 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 12782 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 12783 phba->sli4_hba.els_wq->queue_id); 12784 } 12785 12786 /** 12787 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 12788 * @phba: Pointer to HBA context object. 12789 * @cq: Pointer to a WQ completion queue. 12790 * @wcqe: Pointer to work-queue completion queue entry. 12791 * 12792 * This routine handles an XRI abort event. 12793 * 12794 * Return: true if work posted to worker thread, otherwise false. 12795 **/ 12796 static bool 12797 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 12798 struct lpfc_queue *cq, 12799 struct sli4_wcqe_xri_aborted *wcqe) 12800 { 12801 bool workposted = false; 12802 struct lpfc_cq_event *cq_event; 12803 unsigned long iflags; 12804 12805 switch (cq->subtype) { 12806 case LPFC_FCP: 12807 cq_event = lpfc_cq_event_setup( 12808 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 12809 if (!cq_event) 12810 return false; 12811 spin_lock_irqsave(&phba->hbalock, iflags); 12812 list_add_tail(&cq_event->list, 12813 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 12814 /* Set the fcp xri abort event flag */ 12815 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 12816 spin_unlock_irqrestore(&phba->hbalock, iflags); 12817 workposted = true; 12818 break; 12819 case LPFC_NVME_LS: /* NVME LS uses ELS resources */ 12820 case LPFC_ELS: 12821 cq_event = lpfc_cq_event_setup( 12822 phba, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 12823 if (!cq_event) 12824 return false; 12825 spin_lock_irqsave(&phba->hbalock, iflags); 12826 list_add_tail(&cq_event->list, 12827 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 12828 /* Set the els xri abort event flag */ 12829 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 12830 spin_unlock_irqrestore(&phba->hbalock, iflags); 12831 workposted = true; 12832 break; 12833 case LPFC_NVME: 12834 /* Notify aborted XRI for NVME work queue */ 12835 if (phba->nvmet_support) 12836 lpfc_sli4_nvmet_xri_aborted(phba, wcqe); 12837 else 12838 lpfc_sli4_nvme_xri_aborted(phba, wcqe); 12839 12840 workposted = false; 12841 break; 12842 default: 12843 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12844 "0603 Invalid CQ subtype %d: " 12845 "%08x %08x %08x %08x\n", 12846 cq->subtype, wcqe->word0, wcqe->parameter, 12847 wcqe->word2, wcqe->word3); 12848 workposted = false; 12849 break; 12850 } 12851 return workposted; 12852 } 12853 12854 /** 12855 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 12856 * @phba: Pointer to HBA context object. 12857 * @rcqe: Pointer to receive-queue completion queue entry. 12858 * 12859 * This routine process a receive-queue completion queue entry. 12860 * 12861 * Return: true if work posted to worker thread, otherwise false. 12862 **/ 12863 static bool 12864 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 12865 { 12866 bool workposted = false; 12867 struct fc_frame_header *fc_hdr; 12868 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 12869 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 12870 struct lpfc_nvmet_tgtport *tgtp; 12871 struct hbq_dmabuf *dma_buf; 12872 uint32_t status, rq_id; 12873 unsigned long iflags; 12874 12875 /* sanity check on queue memory */ 12876 if (unlikely(!hrq) || unlikely(!drq)) 12877 return workposted; 12878 12879 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 12880 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 12881 else 12882 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 12883 if (rq_id != hrq->queue_id) 12884 goto out; 12885 12886 status = bf_get(lpfc_rcqe_status, rcqe); 12887 switch (status) { 12888 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 12889 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12890 "2537 Receive Frame Truncated!!\n"); 12891 case FC_STATUS_RQ_SUCCESS: 12892 spin_lock_irqsave(&phba->hbalock, iflags); 12893 lpfc_sli4_rq_release(hrq, drq); 12894 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 12895 if (!dma_buf) { 12896 hrq->RQ_no_buf_found++; 12897 spin_unlock_irqrestore(&phba->hbalock, iflags); 12898 goto out; 12899 } 12900 hrq->RQ_rcv_buf++; 12901 hrq->RQ_buf_posted--; 12902 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 12903 12904 /* If a NVME LS event (type 0x28), treat it as Fast path */ 12905 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 12906 12907 /* save off the frame for the word thread to process */ 12908 list_add_tail(&dma_buf->cq_event.list, 12909 &phba->sli4_hba.sp_queue_event); 12910 /* Frame received */ 12911 phba->hba_flag |= HBA_SP_QUEUE_EVT; 12912 spin_unlock_irqrestore(&phba->hbalock, iflags); 12913 workposted = true; 12914 break; 12915 case FC_STATUS_INSUFF_BUF_FRM_DISC: 12916 if (phba->nvmet_support) { 12917 tgtp = phba->targetport->private; 12918 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 12919 "6402 RQE Error x%x, posted %d err_cnt " 12920 "%d: %x %x %x\n", 12921 status, hrq->RQ_buf_posted, 12922 hrq->RQ_no_posted_buf, 12923 atomic_read(&tgtp->rcv_fcp_cmd_in), 12924 atomic_read(&tgtp->rcv_fcp_cmd_out), 12925 atomic_read(&tgtp->xmt_fcp_release)); 12926 } 12927 /* fallthrough */ 12928 12929 case FC_STATUS_INSUFF_BUF_NEED_BUF: 12930 hrq->RQ_no_posted_buf++; 12931 /* Post more buffers if possible */ 12932 spin_lock_irqsave(&phba->hbalock, iflags); 12933 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 12934 spin_unlock_irqrestore(&phba->hbalock, iflags); 12935 workposted = true; 12936 break; 12937 } 12938 out: 12939 return workposted; 12940 } 12941 12942 /** 12943 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 12944 * @phba: Pointer to HBA context object. 12945 * @cq: Pointer to the completion queue. 12946 * @wcqe: Pointer to a completion queue entry. 12947 * 12948 * This routine process a slow-path work-queue or receive queue completion queue 12949 * entry. 12950 * 12951 * Return: true if work posted to worker thread, otherwise false. 12952 **/ 12953 static bool 12954 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 12955 struct lpfc_cqe *cqe) 12956 { 12957 struct lpfc_cqe cqevt; 12958 bool workposted = false; 12959 12960 /* Copy the work queue CQE and convert endian order if needed */ 12961 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 12962 12963 /* Check and process for different type of WCQE and dispatch */ 12964 switch (bf_get(lpfc_cqe_code, &cqevt)) { 12965 case CQE_CODE_COMPL_WQE: 12966 /* Process the WQ/RQ complete event */ 12967 phba->last_completion_time = jiffies; 12968 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 12969 (struct lpfc_wcqe_complete *)&cqevt); 12970 break; 12971 case CQE_CODE_RELEASE_WQE: 12972 /* Process the WQ release event */ 12973 lpfc_sli4_sp_handle_rel_wcqe(phba, 12974 (struct lpfc_wcqe_release *)&cqevt); 12975 break; 12976 case CQE_CODE_XRI_ABORTED: 12977 /* Process the WQ XRI abort event */ 12978 phba->last_completion_time = jiffies; 12979 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 12980 (struct sli4_wcqe_xri_aborted *)&cqevt); 12981 break; 12982 case CQE_CODE_RECEIVE: 12983 case CQE_CODE_RECEIVE_V1: 12984 /* Process the RQ event */ 12985 phba->last_completion_time = jiffies; 12986 workposted = lpfc_sli4_sp_handle_rcqe(phba, 12987 (struct lpfc_rcqe *)&cqevt); 12988 break; 12989 default: 12990 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12991 "0388 Not a valid WCQE code: x%x\n", 12992 bf_get(lpfc_cqe_code, &cqevt)); 12993 break; 12994 } 12995 return workposted; 12996 } 12997 12998 /** 12999 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 13000 * @phba: Pointer to HBA context object. 13001 * @eqe: Pointer to fast-path event queue entry. 13002 * 13003 * This routine process a event queue entry from the slow-path event queue. 13004 * It will check the MajorCode and MinorCode to determine this is for a 13005 * completion event on a completion queue, if not, an error shall be logged 13006 * and just return. Otherwise, it will get to the corresponding completion 13007 * queue and process all the entries on that completion queue, rearm the 13008 * completion queue, and then return. 13009 * 13010 **/ 13011 static void 13012 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13013 struct lpfc_queue *speq) 13014 { 13015 struct lpfc_queue *cq = NULL, *childq; 13016 uint16_t cqid; 13017 13018 /* Get the reference to the corresponding CQ */ 13019 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13020 13021 list_for_each_entry(childq, &speq->child_list, list) { 13022 if (childq->queue_id == cqid) { 13023 cq = childq; 13024 break; 13025 } 13026 } 13027 if (unlikely(!cq)) { 13028 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13029 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13030 "0365 Slow-path CQ identifier " 13031 "(%d) does not exist\n", cqid); 13032 return; 13033 } 13034 13035 /* Save EQ associated with this CQ */ 13036 cq->assoc_qp = speq; 13037 13038 if (!queue_work(phba->wq, &cq->spwork)) 13039 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13040 "0390 Cannot schedule soft IRQ " 13041 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13042 cqid, cq->queue_id, smp_processor_id()); 13043 } 13044 13045 /** 13046 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry 13047 * @phba: Pointer to HBA context object. 13048 * 13049 * This routine process a event queue entry from the slow-path event queue. 13050 * It will check the MajorCode and MinorCode to determine this is for a 13051 * completion event on a completion queue, if not, an error shall be logged 13052 * and just return. Otherwise, it will get to the corresponding completion 13053 * queue and process all the entries on that completion queue, rearm the 13054 * completion queue, and then return. 13055 * 13056 **/ 13057 static void 13058 lpfc_sli4_sp_process_cq(struct work_struct *work) 13059 { 13060 struct lpfc_queue *cq = 13061 container_of(work, struct lpfc_queue, spwork); 13062 struct lpfc_hba *phba = cq->phba; 13063 struct lpfc_cqe *cqe; 13064 bool workposted = false; 13065 int ccount = 0; 13066 13067 /* Process all the entries to the CQ */ 13068 switch (cq->type) { 13069 case LPFC_MCQ: 13070 while ((cqe = lpfc_sli4_cq_get(cq))) { 13071 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 13072 if (!(++ccount % cq->entry_repost)) 13073 break; 13074 cq->CQ_mbox++; 13075 } 13076 break; 13077 case LPFC_WCQ: 13078 while ((cqe = lpfc_sli4_cq_get(cq))) { 13079 if (cq->subtype == LPFC_FCP || 13080 cq->subtype == LPFC_NVME) { 13081 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13082 if (phba->ktime_on) 13083 cq->isr_timestamp = ktime_get_ns(); 13084 else 13085 cq->isr_timestamp = 0; 13086 #endif 13087 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, 13088 cqe); 13089 } else { 13090 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 13091 cqe); 13092 } 13093 if (!(++ccount % cq->entry_repost)) 13094 break; 13095 } 13096 13097 /* Track the max number of CQEs processed in 1 EQ */ 13098 if (ccount > cq->CQ_max_cqe) 13099 cq->CQ_max_cqe = ccount; 13100 break; 13101 default: 13102 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13103 "0370 Invalid completion queue type (%d)\n", 13104 cq->type); 13105 return; 13106 } 13107 13108 /* Catch the no cq entry condition, log an error */ 13109 if (unlikely(ccount == 0)) 13110 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13111 "0371 No entry from the CQ: identifier " 13112 "(x%x), type (%d)\n", cq->queue_id, cq->type); 13113 13114 /* In any case, flash and re-arm the RCQ */ 13115 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 13116 13117 /* wake up worker thread if there are works to be done */ 13118 if (workposted) 13119 lpfc_worker_wake_up(phba); 13120 } 13121 13122 /** 13123 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 13124 * @phba: Pointer to HBA context object. 13125 * @cq: Pointer to associated CQ 13126 * @wcqe: Pointer to work-queue completion queue entry. 13127 * 13128 * This routine process a fast-path work queue completion entry from fast-path 13129 * event queue for FCP command response completion. 13130 **/ 13131 static void 13132 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13133 struct lpfc_wcqe_complete *wcqe) 13134 { 13135 struct lpfc_sli_ring *pring = cq->pring; 13136 struct lpfc_iocbq *cmdiocbq; 13137 struct lpfc_iocbq irspiocbq; 13138 unsigned long iflags; 13139 13140 /* Check for response status */ 13141 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 13142 /* If resource errors reported from HBA, reduce queue 13143 * depth of the SCSI device. 13144 */ 13145 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 13146 IOSTAT_LOCAL_REJECT)) && 13147 ((wcqe->parameter & IOERR_PARAM_MASK) == 13148 IOERR_NO_RESOURCES)) 13149 phba->lpfc_rampdown_queue_depth(phba); 13150 13151 /* Log the error status */ 13152 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13153 "0373 FCP complete error: status=x%x, " 13154 "hw_status=x%x, total_data_specified=%d, " 13155 "parameter=x%x, word3=x%x\n", 13156 bf_get(lpfc_wcqe_c_status, wcqe), 13157 bf_get(lpfc_wcqe_c_hw_status, wcqe), 13158 wcqe->total_data_placed, wcqe->parameter, 13159 wcqe->word3); 13160 } 13161 13162 /* Look up the FCP command IOCB and create pseudo response IOCB */ 13163 spin_lock_irqsave(&pring->ring_lock, iflags); 13164 pring->stats.iocb_event++; 13165 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 13166 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13167 spin_unlock_irqrestore(&pring->ring_lock, iflags); 13168 if (unlikely(!cmdiocbq)) { 13169 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13170 "0374 FCP complete with no corresponding " 13171 "cmdiocb: iotag (%d)\n", 13172 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13173 return; 13174 } 13175 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13176 cmdiocbq->isr_timestamp = cq->isr_timestamp; 13177 #endif 13178 if (cmdiocbq->iocb_cmpl == NULL) { 13179 if (cmdiocbq->wqe_cmpl) { 13180 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13181 spin_lock_irqsave(&phba->hbalock, iflags); 13182 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13183 spin_unlock_irqrestore(&phba->hbalock, iflags); 13184 } 13185 13186 /* Pass the cmd_iocb and the wcqe to the upper layer */ 13187 (cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe); 13188 return; 13189 } 13190 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13191 "0375 FCP cmdiocb not callback function " 13192 "iotag: (%d)\n", 13193 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 13194 return; 13195 } 13196 13197 /* Fake the irspiocb and copy necessary response information */ 13198 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 13199 13200 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 13201 spin_lock_irqsave(&phba->hbalock, iflags); 13202 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 13203 spin_unlock_irqrestore(&phba->hbalock, iflags); 13204 } 13205 13206 /* Pass the cmd_iocb and the rsp state to the upper layer */ 13207 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 13208 } 13209 13210 /** 13211 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 13212 * @phba: Pointer to HBA context object. 13213 * @cq: Pointer to completion queue. 13214 * @wcqe: Pointer to work-queue completion queue entry. 13215 * 13216 * This routine handles an fast-path WQ entry consumed event by invoking the 13217 * proper WQ release routine to the slow-path WQ. 13218 **/ 13219 static void 13220 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13221 struct lpfc_wcqe_release *wcqe) 13222 { 13223 struct lpfc_queue *childwq; 13224 bool wqid_matched = false; 13225 uint16_t hba_wqid; 13226 13227 /* Check for fast-path FCP work queue release */ 13228 hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 13229 list_for_each_entry(childwq, &cq->child_list, list) { 13230 if (childwq->queue_id == hba_wqid) { 13231 lpfc_sli4_wq_release(childwq, 13232 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 13233 wqid_matched = true; 13234 break; 13235 } 13236 } 13237 /* Report warning log message if no match found */ 13238 if (wqid_matched != true) 13239 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13240 "2580 Fast-path wqe consume event carries " 13241 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid); 13242 } 13243 13244 /** 13245 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry 13246 * @phba: Pointer to HBA context object. 13247 * @rcqe: Pointer to receive-queue completion queue entry. 13248 * 13249 * This routine process a receive-queue completion queue entry. 13250 * 13251 * Return: true if work posted to worker thread, otherwise false. 13252 **/ 13253 static bool 13254 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13255 struct lpfc_rcqe *rcqe) 13256 { 13257 bool workposted = false; 13258 struct lpfc_queue *hrq; 13259 struct lpfc_queue *drq; 13260 struct rqb_dmabuf *dma_buf; 13261 struct fc_frame_header *fc_hdr; 13262 struct lpfc_nvmet_tgtport *tgtp; 13263 uint32_t status, rq_id; 13264 unsigned long iflags; 13265 uint32_t fctl, idx; 13266 13267 if ((phba->nvmet_support == 0) || 13268 (phba->sli4_hba.nvmet_cqset == NULL)) 13269 return workposted; 13270 13271 idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id; 13272 hrq = phba->sli4_hba.nvmet_mrq_hdr[idx]; 13273 drq = phba->sli4_hba.nvmet_mrq_data[idx]; 13274 13275 /* sanity check on queue memory */ 13276 if (unlikely(!hrq) || unlikely(!drq)) 13277 return workposted; 13278 13279 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 13280 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 13281 else 13282 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 13283 13284 if ((phba->nvmet_support == 0) || 13285 (rq_id != hrq->queue_id)) 13286 return workposted; 13287 13288 status = bf_get(lpfc_rcqe_status, rcqe); 13289 switch (status) { 13290 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 13291 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13292 "6126 Receive Frame Truncated!!\n"); 13293 /* Drop thru */ 13294 case FC_STATUS_RQ_SUCCESS: 13295 spin_lock_irqsave(&phba->hbalock, iflags); 13296 lpfc_sli4_rq_release(hrq, drq); 13297 dma_buf = lpfc_sli_rqbuf_get(phba, hrq); 13298 if (!dma_buf) { 13299 hrq->RQ_no_buf_found++; 13300 spin_unlock_irqrestore(&phba->hbalock, iflags); 13301 goto out; 13302 } 13303 spin_unlock_irqrestore(&phba->hbalock, iflags); 13304 hrq->RQ_rcv_buf++; 13305 hrq->RQ_buf_posted--; 13306 fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt; 13307 13308 /* Just some basic sanity checks on FCP Command frame */ 13309 fctl = (fc_hdr->fh_f_ctl[0] << 16 | 13310 fc_hdr->fh_f_ctl[1] << 8 | 13311 fc_hdr->fh_f_ctl[2]); 13312 if (((fctl & 13313 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) != 13314 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) || 13315 (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */ 13316 goto drop; 13317 13318 if (fc_hdr->fh_type == FC_TYPE_FCP) { 13319 dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe); 13320 lpfc_nvmet_unsol_fcp_event( 13321 phba, idx, dma_buf, 13322 cq->isr_timestamp); 13323 return false; 13324 } 13325 drop: 13326 lpfc_in_buf_free(phba, &dma_buf->dbuf); 13327 break; 13328 case FC_STATUS_INSUFF_BUF_FRM_DISC: 13329 if (phba->nvmet_support) { 13330 tgtp = phba->targetport->private; 13331 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME, 13332 "6401 RQE Error x%x, posted %d err_cnt " 13333 "%d: %x %x %x\n", 13334 status, hrq->RQ_buf_posted, 13335 hrq->RQ_no_posted_buf, 13336 atomic_read(&tgtp->rcv_fcp_cmd_in), 13337 atomic_read(&tgtp->rcv_fcp_cmd_out), 13338 atomic_read(&tgtp->xmt_fcp_release)); 13339 } 13340 /* fallthrough */ 13341 13342 case FC_STATUS_INSUFF_BUF_NEED_BUF: 13343 hrq->RQ_no_posted_buf++; 13344 /* Post more buffers if possible */ 13345 break; 13346 } 13347 out: 13348 return workposted; 13349 } 13350 13351 /** 13352 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry 13353 * @cq: Pointer to the completion queue. 13354 * @eqe: Pointer to fast-path completion queue entry. 13355 * 13356 * This routine process a fast-path work queue completion entry from fast-path 13357 * event queue for FCP command response completion. 13358 **/ 13359 static int 13360 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 13361 struct lpfc_cqe *cqe) 13362 { 13363 struct lpfc_wcqe_release wcqe; 13364 bool workposted = false; 13365 13366 /* Copy the work queue CQE and convert endian order if needed */ 13367 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 13368 13369 /* Check and process for different type of WCQE and dispatch */ 13370 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 13371 case CQE_CODE_COMPL_WQE: 13372 case CQE_CODE_NVME_ERSP: 13373 cq->CQ_wq++; 13374 /* Process the WQ complete event */ 13375 phba->last_completion_time = jiffies; 13376 if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME)) 13377 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 13378 (struct lpfc_wcqe_complete *)&wcqe); 13379 if (cq->subtype == LPFC_NVME_LS) 13380 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 13381 (struct lpfc_wcqe_complete *)&wcqe); 13382 break; 13383 case CQE_CODE_RELEASE_WQE: 13384 cq->CQ_release_wqe++; 13385 /* Process the WQ release event */ 13386 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 13387 (struct lpfc_wcqe_release *)&wcqe); 13388 break; 13389 case CQE_CODE_XRI_ABORTED: 13390 cq->CQ_xri_aborted++; 13391 /* Process the WQ XRI abort event */ 13392 phba->last_completion_time = jiffies; 13393 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 13394 (struct sli4_wcqe_xri_aborted *)&wcqe); 13395 break; 13396 case CQE_CODE_RECEIVE_V1: 13397 case CQE_CODE_RECEIVE: 13398 phba->last_completion_time = jiffies; 13399 if (cq->subtype == LPFC_NVMET) { 13400 workposted = lpfc_sli4_nvmet_handle_rcqe( 13401 phba, cq, (struct lpfc_rcqe *)&wcqe); 13402 } 13403 break; 13404 default: 13405 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13406 "0144 Not a valid CQE code: x%x\n", 13407 bf_get(lpfc_wcqe_c_code, &wcqe)); 13408 break; 13409 } 13410 return workposted; 13411 } 13412 13413 /** 13414 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 13415 * @phba: Pointer to HBA context object. 13416 * @eqe: Pointer to fast-path event queue entry. 13417 * 13418 * This routine process a event queue entry from the fast-path event queue. 13419 * It will check the MajorCode and MinorCode to determine this is for a 13420 * completion event on a completion queue, if not, an error shall be logged 13421 * and just return. Otherwise, it will get to the corresponding completion 13422 * queue and process all the entries on the completion queue, rearm the 13423 * completion queue, and then return. 13424 **/ 13425 static void 13426 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 13427 uint32_t qidx) 13428 { 13429 struct lpfc_queue *cq = NULL; 13430 uint16_t cqid, id; 13431 13432 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 13433 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13434 "0366 Not a valid completion " 13435 "event: majorcode=x%x, minorcode=x%x\n", 13436 bf_get_le32(lpfc_eqe_major_code, eqe), 13437 bf_get_le32(lpfc_eqe_minor_code, eqe)); 13438 return; 13439 } 13440 13441 /* Get the reference to the corresponding CQ */ 13442 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13443 13444 if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) { 13445 id = phba->sli4_hba.nvmet_cqset[0]->queue_id; 13446 if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) { 13447 /* Process NVMET unsol rcv */ 13448 cq = phba->sli4_hba.nvmet_cqset[cqid - id]; 13449 goto process_cq; 13450 } 13451 } 13452 13453 if (phba->sli4_hba.nvme_cq_map && 13454 (cqid == phba->sli4_hba.nvme_cq_map[qidx])) { 13455 /* Process NVME / NVMET command completion */ 13456 cq = phba->sli4_hba.nvme_cq[qidx]; 13457 goto process_cq; 13458 } 13459 13460 if (phba->sli4_hba.fcp_cq_map && 13461 (cqid == phba->sli4_hba.fcp_cq_map[qidx])) { 13462 /* Process FCP command completion */ 13463 cq = phba->sli4_hba.fcp_cq[qidx]; 13464 goto process_cq; 13465 } 13466 13467 if (phba->sli4_hba.nvmels_cq && 13468 (cqid == phba->sli4_hba.nvmels_cq->queue_id)) { 13469 /* Process NVME unsol rcv */ 13470 cq = phba->sli4_hba.nvmels_cq; 13471 } 13472 13473 /* Otherwise this is a Slow path event */ 13474 if (cq == NULL) { 13475 lpfc_sli4_sp_handle_eqe(phba, eqe, phba->sli4_hba.hba_eq[qidx]); 13476 return; 13477 } 13478 13479 process_cq: 13480 if (unlikely(cqid != cq->queue_id)) { 13481 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13482 "0368 Miss-matched fast-path completion " 13483 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 13484 cqid, cq->queue_id); 13485 return; 13486 } 13487 13488 /* Save EQ associated with this CQ */ 13489 cq->assoc_qp = phba->sli4_hba.hba_eq[qidx]; 13490 13491 if (!queue_work(phba->wq, &cq->irqwork)) 13492 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13493 "0363 Cannot schedule soft IRQ " 13494 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13495 cqid, cq->queue_id, smp_processor_id()); 13496 } 13497 13498 /** 13499 * lpfc_sli4_hba_process_cq - Process a fast-path event queue entry 13500 * @phba: Pointer to HBA context object. 13501 * @eqe: Pointer to fast-path event queue entry. 13502 * 13503 * This routine process a event queue entry from the fast-path event queue. 13504 * It will check the MajorCode and MinorCode to determine this is for a 13505 * completion event on a completion queue, if not, an error shall be logged 13506 * and just return. Otherwise, it will get to the corresponding completion 13507 * queue and process all the entries on the completion queue, rearm the 13508 * completion queue, and then return. 13509 **/ 13510 static void 13511 lpfc_sli4_hba_process_cq(struct work_struct *work) 13512 { 13513 struct lpfc_queue *cq = 13514 container_of(work, struct lpfc_queue, irqwork); 13515 struct lpfc_hba *phba = cq->phba; 13516 struct lpfc_cqe *cqe; 13517 bool workposted = false; 13518 int ccount = 0; 13519 13520 /* Process all the entries to the CQ */ 13521 while ((cqe = lpfc_sli4_cq_get(cq))) { 13522 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS 13523 if (phba->ktime_on) 13524 cq->isr_timestamp = ktime_get_ns(); 13525 else 13526 cq->isr_timestamp = 0; 13527 #endif 13528 workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe); 13529 if (!(++ccount % cq->entry_repost)) 13530 break; 13531 } 13532 13533 /* Track the max number of CQEs processed in 1 EQ */ 13534 if (ccount > cq->CQ_max_cqe) 13535 cq->CQ_max_cqe = ccount; 13536 cq->assoc_qp->EQ_cqe_cnt += ccount; 13537 13538 /* Catch the no cq entry condition */ 13539 if (unlikely(ccount == 0)) 13540 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13541 "0369 No entry from fast-path completion " 13542 "queue fcpcqid=%d\n", cq->queue_id); 13543 13544 /* In any case, flash and re-arm the CQ */ 13545 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 13546 13547 /* wake up worker thread if there are works to be done */ 13548 if (workposted) 13549 lpfc_worker_wake_up(phba); 13550 } 13551 13552 static void 13553 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 13554 { 13555 struct lpfc_eqe *eqe; 13556 13557 /* walk all the EQ entries and drop on the floor */ 13558 while ((eqe = lpfc_sli4_eq_get(eq))) 13559 ; 13560 13561 /* Clear and re-arm the EQ */ 13562 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 13563 } 13564 13565 13566 /** 13567 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue 13568 * entry 13569 * @phba: Pointer to HBA context object. 13570 * @eqe: Pointer to fast-path event queue entry. 13571 * 13572 * This routine process a event queue entry from the Flash Optimized Fabric 13573 * event queue. It will check the MajorCode and MinorCode to determine this 13574 * is for a completion event on a completion queue, if not, an error shall be 13575 * logged and just return. Otherwise, it will get to the corresponding 13576 * completion queue and process all the entries on the completion queue, rearm 13577 * the completion queue, and then return. 13578 **/ 13579 static void 13580 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 13581 { 13582 struct lpfc_queue *cq; 13583 uint16_t cqid; 13584 13585 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 13586 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13587 "9147 Not a valid completion " 13588 "event: majorcode=x%x, minorcode=x%x\n", 13589 bf_get_le32(lpfc_eqe_major_code, eqe), 13590 bf_get_le32(lpfc_eqe_minor_code, eqe)); 13591 return; 13592 } 13593 13594 /* Get the reference to the corresponding CQ */ 13595 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 13596 13597 /* Next check for OAS */ 13598 cq = phba->sli4_hba.oas_cq; 13599 if (unlikely(!cq)) { 13600 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 13601 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13602 "9148 OAS completion queue " 13603 "does not exist\n"); 13604 return; 13605 } 13606 13607 if (unlikely(cqid != cq->queue_id)) { 13608 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13609 "9149 Miss-matched fast-path compl " 13610 "queue id: eqcqid=%d, fcpcqid=%d\n", 13611 cqid, cq->queue_id); 13612 return; 13613 } 13614 13615 /* Save EQ associated with this CQ */ 13616 cq->assoc_qp = phba->sli4_hba.fof_eq; 13617 13618 /* CQ work will be processed on CPU affinitized to this IRQ */ 13619 if (!queue_work(phba->wq, &cq->irqwork)) 13620 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13621 "0367 Cannot schedule soft IRQ " 13622 "for CQ eqcqid=%d, cqid=%d on CPU %d\n", 13623 cqid, cq->queue_id, smp_processor_id()); 13624 } 13625 13626 /** 13627 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device 13628 * @irq: Interrupt number. 13629 * @dev_id: The device context pointer. 13630 * 13631 * This function is directly called from the PCI layer as an interrupt 13632 * service routine when device with SLI-4 interface spec is enabled with 13633 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric 13634 * IOCB ring event in the HBA. However, when the device is enabled with either 13635 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 13636 * device-level interrupt handler. When the PCI slot is in error recovery 13637 * or the HBA is undergoing initialization, the interrupt handler will not 13638 * process the interrupt. The Flash Optimized Fabric ring event are handled in 13639 * the intrrupt context. This function is called without any lock held. 13640 * It gets the hbalock to access and update SLI data structures. Note that, 13641 * the EQ to CQ are one-to-one map such that the EQ index is 13642 * equal to that of CQ index. 13643 * 13644 * This function returns IRQ_HANDLED when interrupt is handled else it 13645 * returns IRQ_NONE. 13646 **/ 13647 irqreturn_t 13648 lpfc_sli4_fof_intr_handler(int irq, void *dev_id) 13649 { 13650 struct lpfc_hba *phba; 13651 struct lpfc_hba_eq_hdl *hba_eq_hdl; 13652 struct lpfc_queue *eq; 13653 struct lpfc_eqe *eqe; 13654 unsigned long iflag; 13655 int ecount = 0; 13656 13657 /* Get the driver's phba structure from the dev_id */ 13658 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 13659 phba = hba_eq_hdl->phba; 13660 13661 if (unlikely(!phba)) 13662 return IRQ_NONE; 13663 13664 /* Get to the EQ struct associated with this vector */ 13665 eq = phba->sli4_hba.fof_eq; 13666 if (unlikely(!eq)) 13667 return IRQ_NONE; 13668 13669 /* Check device state for handling interrupt */ 13670 if (unlikely(lpfc_intr_state_check(phba))) { 13671 /* Check again for link_state with lock held */ 13672 spin_lock_irqsave(&phba->hbalock, iflag); 13673 if (phba->link_state < LPFC_LINK_DOWN) 13674 /* Flush, clear interrupt, and rearm the EQ */ 13675 lpfc_sli4_eq_flush(phba, eq); 13676 spin_unlock_irqrestore(&phba->hbalock, iflag); 13677 return IRQ_NONE; 13678 } 13679 13680 /* 13681 * Process all the event on FCP fast-path EQ 13682 */ 13683 while ((eqe = lpfc_sli4_eq_get(eq))) { 13684 lpfc_sli4_fof_handle_eqe(phba, eqe); 13685 if (!(++ecount % eq->entry_repost)) 13686 break; 13687 eq->EQ_processed++; 13688 } 13689 13690 /* Track the max number of EQEs processed in 1 intr */ 13691 if (ecount > eq->EQ_max_eqe) 13692 eq->EQ_max_eqe = ecount; 13693 13694 13695 if (unlikely(ecount == 0)) { 13696 eq->EQ_no_entry++; 13697 13698 if (phba->intr_type == MSIX) 13699 /* MSI-X treated interrupt served as no EQ share INT */ 13700 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13701 "9145 MSI-X interrupt with no EQE\n"); 13702 else { 13703 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13704 "9146 ISR interrupt with no EQE\n"); 13705 /* Non MSI-X treated on interrupt as EQ share INT */ 13706 return IRQ_NONE; 13707 } 13708 } 13709 /* Always clear and re-arm the fast-path EQ */ 13710 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 13711 return IRQ_HANDLED; 13712 } 13713 13714 /** 13715 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 13716 * @irq: Interrupt number. 13717 * @dev_id: The device context pointer. 13718 * 13719 * This function is directly called from the PCI layer as an interrupt 13720 * service routine when device with SLI-4 interface spec is enabled with 13721 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 13722 * ring event in the HBA. However, when the device is enabled with either 13723 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 13724 * device-level interrupt handler. When the PCI slot is in error recovery 13725 * or the HBA is undergoing initialization, the interrupt handler will not 13726 * process the interrupt. The SCSI FCP fast-path ring event are handled in 13727 * the intrrupt context. This function is called without any lock held. 13728 * It gets the hbalock to access and update SLI data structures. Note that, 13729 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 13730 * equal to that of FCP CQ index. 13731 * 13732 * The link attention and ELS ring attention events are handled 13733 * by the worker thread. The interrupt handler signals the worker thread 13734 * and returns for these events. This function is called without any lock 13735 * held. It gets the hbalock to access and update SLI data structures. 13736 * 13737 * This function returns IRQ_HANDLED when interrupt is handled else it 13738 * returns IRQ_NONE. 13739 **/ 13740 irqreturn_t 13741 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 13742 { 13743 struct lpfc_hba *phba; 13744 struct lpfc_hba_eq_hdl *hba_eq_hdl; 13745 struct lpfc_queue *fpeq; 13746 struct lpfc_eqe *eqe; 13747 unsigned long iflag; 13748 int ecount = 0; 13749 int hba_eqidx; 13750 13751 /* Get the driver's phba structure from the dev_id */ 13752 hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id; 13753 phba = hba_eq_hdl->phba; 13754 hba_eqidx = hba_eq_hdl->idx; 13755 13756 if (unlikely(!phba)) 13757 return IRQ_NONE; 13758 if (unlikely(!phba->sli4_hba.hba_eq)) 13759 return IRQ_NONE; 13760 13761 /* Get to the EQ struct associated with this vector */ 13762 fpeq = phba->sli4_hba.hba_eq[hba_eqidx]; 13763 if (unlikely(!fpeq)) 13764 return IRQ_NONE; 13765 13766 if (lpfc_fcp_look_ahead) { 13767 if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) 13768 lpfc_sli4_eq_clr_intr(fpeq); 13769 else { 13770 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 13771 return IRQ_NONE; 13772 } 13773 } 13774 13775 /* Check device state for handling interrupt */ 13776 if (unlikely(lpfc_intr_state_check(phba))) { 13777 /* Check again for link_state with lock held */ 13778 spin_lock_irqsave(&phba->hbalock, iflag); 13779 if (phba->link_state < LPFC_LINK_DOWN) 13780 /* Flush, clear interrupt, and rearm the EQ */ 13781 lpfc_sli4_eq_flush(phba, fpeq); 13782 spin_unlock_irqrestore(&phba->hbalock, iflag); 13783 if (lpfc_fcp_look_ahead) 13784 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 13785 return IRQ_NONE; 13786 } 13787 13788 /* 13789 * Process all the event on FCP fast-path EQ 13790 */ 13791 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 13792 lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx); 13793 if (!(++ecount % fpeq->entry_repost)) 13794 break; 13795 fpeq->EQ_processed++; 13796 } 13797 13798 /* Track the max number of EQEs processed in 1 intr */ 13799 if (ecount > fpeq->EQ_max_eqe) 13800 fpeq->EQ_max_eqe = ecount; 13801 13802 /* Always clear and re-arm the fast-path EQ */ 13803 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 13804 13805 if (unlikely(ecount == 0)) { 13806 fpeq->EQ_no_entry++; 13807 13808 if (lpfc_fcp_look_ahead) { 13809 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 13810 return IRQ_NONE; 13811 } 13812 13813 if (phba->intr_type == MSIX) 13814 /* MSI-X treated interrupt served as no EQ share INT */ 13815 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 13816 "0358 MSI-X interrupt with no EQE\n"); 13817 else 13818 /* Non MSI-X treated on interrupt as EQ share INT */ 13819 return IRQ_NONE; 13820 } 13821 13822 if (lpfc_fcp_look_ahead) 13823 atomic_inc(&hba_eq_hdl->hba_eq_in_use); 13824 13825 return IRQ_HANDLED; 13826 } /* lpfc_sli4_fp_intr_handler */ 13827 13828 /** 13829 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 13830 * @irq: Interrupt number. 13831 * @dev_id: The device context pointer. 13832 * 13833 * This function is the device-level interrupt handler to device with SLI-4 13834 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 13835 * interrupt mode is enabled and there is an event in the HBA which requires 13836 * driver attention. This function invokes the slow-path interrupt attention 13837 * handling function and fast-path interrupt attention handling function in 13838 * turn to process the relevant HBA attention events. This function is called 13839 * without any lock held. It gets the hbalock to access and update SLI data 13840 * structures. 13841 * 13842 * This function returns IRQ_HANDLED when interrupt is handled, else it 13843 * returns IRQ_NONE. 13844 **/ 13845 irqreturn_t 13846 lpfc_sli4_intr_handler(int irq, void *dev_id) 13847 { 13848 struct lpfc_hba *phba; 13849 irqreturn_t hba_irq_rc; 13850 bool hba_handled = false; 13851 int qidx; 13852 13853 /* Get the driver's phba structure from the dev_id */ 13854 phba = (struct lpfc_hba *)dev_id; 13855 13856 if (unlikely(!phba)) 13857 return IRQ_NONE; 13858 13859 /* 13860 * Invoke fast-path host attention interrupt handling as appropriate. 13861 */ 13862 for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) { 13863 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 13864 &phba->sli4_hba.hba_eq_hdl[qidx]); 13865 if (hba_irq_rc == IRQ_HANDLED) 13866 hba_handled |= true; 13867 } 13868 13869 if (phba->cfg_fof) { 13870 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq, 13871 &phba->sli4_hba.hba_eq_hdl[qidx]); 13872 if (hba_irq_rc == IRQ_HANDLED) 13873 hba_handled |= true; 13874 } 13875 13876 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 13877 } /* lpfc_sli4_intr_handler */ 13878 13879 /** 13880 * lpfc_sli4_queue_free - free a queue structure and associated memory 13881 * @queue: The queue structure to free. 13882 * 13883 * This function frees a queue structure and the DMAable memory used for 13884 * the host resident queue. This function must be called after destroying the 13885 * queue on the HBA. 13886 **/ 13887 void 13888 lpfc_sli4_queue_free(struct lpfc_queue *queue) 13889 { 13890 struct lpfc_dmabuf *dmabuf; 13891 13892 if (!queue) 13893 return; 13894 13895 while (!list_empty(&queue->page_list)) { 13896 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 13897 list); 13898 dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size, 13899 dmabuf->virt, dmabuf->phys); 13900 kfree(dmabuf); 13901 } 13902 if (queue->rqbp) { 13903 lpfc_free_rq_buffer(queue->phba, queue); 13904 kfree(queue->rqbp); 13905 } 13906 13907 if (!list_empty(&queue->wq_list)) 13908 list_del(&queue->wq_list); 13909 13910 kfree(queue); 13911 return; 13912 } 13913 13914 /** 13915 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 13916 * @phba: The HBA that this queue is being created on. 13917 * @page_size: The size of a queue page 13918 * @entry_size: The size of each queue entry for this queue. 13919 * @entry count: The number of entries that this queue will handle. 13920 * 13921 * This function allocates a queue structure and the DMAable memory used for 13922 * the host resident queue. This function must be called before creating the 13923 * queue on the HBA. 13924 **/ 13925 struct lpfc_queue * 13926 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, 13927 uint32_t entry_size, uint32_t entry_count) 13928 { 13929 struct lpfc_queue *queue; 13930 struct lpfc_dmabuf *dmabuf; 13931 int x, total_qe_count; 13932 void *dma_pointer; 13933 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 13934 13935 if (!phba->sli4_hba.pc_sli4_params.supported) 13936 hw_page_size = page_size; 13937 13938 queue = kzalloc(sizeof(struct lpfc_queue) + 13939 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 13940 if (!queue) 13941 return NULL; 13942 queue->page_count = (ALIGN(entry_size * entry_count, 13943 hw_page_size))/hw_page_size; 13944 13945 /* If needed, Adjust page count to match the max the adapter supports */ 13946 if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt) 13947 queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt; 13948 13949 INIT_LIST_HEAD(&queue->list); 13950 INIT_LIST_HEAD(&queue->wq_list); 13951 INIT_LIST_HEAD(&queue->page_list); 13952 INIT_LIST_HEAD(&queue->child_list); 13953 13954 /* Set queue parameters now. If the system cannot provide memory 13955 * resources, the free routine needs to know what was allocated. 13956 */ 13957 queue->entry_size = entry_size; 13958 queue->entry_count = entry_count; 13959 queue->page_size = hw_page_size; 13960 queue->phba = phba; 13961 13962 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 13963 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 13964 if (!dmabuf) 13965 goto out_fail; 13966 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, 13967 hw_page_size, &dmabuf->phys, 13968 GFP_KERNEL); 13969 if (!dmabuf->virt) { 13970 kfree(dmabuf); 13971 goto out_fail; 13972 } 13973 dmabuf->buffer_tag = x; 13974 list_add_tail(&dmabuf->list, &queue->page_list); 13975 /* initialize queue's entry array */ 13976 dma_pointer = dmabuf->virt; 13977 for (; total_qe_count < entry_count && 13978 dma_pointer < (hw_page_size + dmabuf->virt); 13979 total_qe_count++, dma_pointer += entry_size) { 13980 queue->qe[total_qe_count].address = dma_pointer; 13981 } 13982 } 13983 INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq); 13984 INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq); 13985 13986 /* entry_repost will be set during q creation */ 13987 13988 return queue; 13989 out_fail: 13990 lpfc_sli4_queue_free(queue); 13991 return NULL; 13992 } 13993 13994 /** 13995 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 13996 * @phba: HBA structure that indicates port to create a queue on. 13997 * @pci_barset: PCI BAR set flag. 13998 * 13999 * This function shall perform iomap of the specified PCI BAR address to host 14000 * memory address if not already done so and return it. The returned host 14001 * memory address can be NULL. 14002 */ 14003 static void __iomem * 14004 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 14005 { 14006 if (!phba->pcidev) 14007 return NULL; 14008 14009 switch (pci_barset) { 14010 case WQ_PCI_BAR_0_AND_1: 14011 return phba->pci_bar0_memmap_p; 14012 case WQ_PCI_BAR_2_AND_3: 14013 return phba->pci_bar2_memmap_p; 14014 case WQ_PCI_BAR_4_AND_5: 14015 return phba->pci_bar4_memmap_p; 14016 default: 14017 break; 14018 } 14019 return NULL; 14020 } 14021 14022 /** 14023 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs 14024 * @phba: HBA structure that indicates port to create a queue on. 14025 * @startq: The starting FCP EQ to modify 14026 * 14027 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. 14028 * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be 14029 * updated in one mailbox command. 14030 * 14031 * The @phba struct is used to send mailbox command to HBA. The @startq 14032 * is used to get the starting FCP EQ to change. 14033 * This function is asynchronous and will wait for the mailbox 14034 * command to finish before continuing. 14035 * 14036 * On success this function will return a zero. If unable to allocate enough 14037 * memory this function will return -ENOMEM. If the queue create mailbox command 14038 * fails this function will return -ENXIO. 14039 **/ 14040 int 14041 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq, 14042 uint32_t numq, uint32_t imax) 14043 { 14044 struct lpfc_mbx_modify_eq_delay *eq_delay; 14045 LPFC_MBOXQ_t *mbox; 14046 struct lpfc_queue *eq; 14047 int cnt, rc, length, status = 0; 14048 uint32_t shdr_status, shdr_add_status; 14049 uint32_t result, val; 14050 int qidx; 14051 union lpfc_sli4_cfg_shdr *shdr; 14052 uint16_t dmult; 14053 14054 if (startq >= phba->io_channel_irqs) 14055 return 0; 14056 14057 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14058 if (!mbox) 14059 return -ENOMEM; 14060 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 14061 sizeof(struct lpfc_sli4_cfg_mhdr)); 14062 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14063 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 14064 length, LPFC_SLI4_MBX_EMBED); 14065 eq_delay = &mbox->u.mqe.un.eq_delay; 14066 14067 /* Calculate delay multiper from maximum interrupt per second */ 14068 result = imax / phba->io_channel_irqs; 14069 if (result > LPFC_DMULT_CONST || result == 0) 14070 dmult = 0; 14071 else 14072 dmult = LPFC_DMULT_CONST/result - 1; 14073 if (dmult > LPFC_DMULT_MAX) 14074 dmult = LPFC_DMULT_MAX; 14075 14076 cnt = 0; 14077 for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) { 14078 eq = phba->sli4_hba.hba_eq[qidx]; 14079 if (!eq) 14080 continue; 14081 eq->q_mode = imax; 14082 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 14083 eq_delay->u.request.eq[cnt].phase = 0; 14084 eq_delay->u.request.eq[cnt].delay_multi = dmult; 14085 cnt++; 14086 14087 /* q_mode is only used for auto_imax */ 14088 if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) { 14089 /* Use EQ Delay Register method for q_mode */ 14090 14091 /* Convert for EQ Delay register */ 14092 val = phba->cfg_fcp_imax; 14093 if (val) { 14094 /* First, interrupts per sec per EQ */ 14095 val = phba->cfg_fcp_imax / 14096 phba->io_channel_irqs; 14097 14098 /* us delay between each interrupt */ 14099 val = LPFC_SEC_TO_USEC / val; 14100 } 14101 eq->q_mode = val; 14102 } else { 14103 eq->q_mode = imax; 14104 } 14105 14106 if (cnt >= numq) 14107 break; 14108 } 14109 eq_delay->u.request.num_eq = cnt; 14110 14111 mbox->vport = phba->pport; 14112 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14113 mbox->context1 = NULL; 14114 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14115 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 14116 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14117 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14118 if (shdr_status || shdr_add_status || rc) { 14119 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14120 "2512 MODIFY_EQ_DELAY mailbox failed with " 14121 "status x%x add_status x%x, mbx status x%x\n", 14122 shdr_status, shdr_add_status, rc); 14123 status = -ENXIO; 14124 } 14125 mempool_free(mbox, phba->mbox_mem_pool); 14126 return status; 14127 } 14128 14129 /** 14130 * lpfc_eq_create - Create an Event Queue on the HBA 14131 * @phba: HBA structure that indicates port to create a queue on. 14132 * @eq: The queue structure to use to create the event queue. 14133 * @imax: The maximum interrupt per second limit. 14134 * 14135 * This function creates an event queue, as detailed in @eq, on a port, 14136 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 14137 * 14138 * The @phba struct is used to send mailbox command to HBA. The @eq struct 14139 * is used to get the entry count and entry size that are necessary to 14140 * determine the number of pages to allocate and use for this queue. This 14141 * function will send the EQ_CREATE mailbox command to the HBA to setup the 14142 * event queue. This function is asynchronous and will wait for the mailbox 14143 * command to finish before continuing. 14144 * 14145 * On success this function will return a zero. If unable to allocate enough 14146 * memory this function will return -ENOMEM. If the queue create mailbox command 14147 * fails this function will return -ENXIO. 14148 **/ 14149 int 14150 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 14151 { 14152 struct lpfc_mbx_eq_create *eq_create; 14153 LPFC_MBOXQ_t *mbox; 14154 int rc, length, status = 0; 14155 struct lpfc_dmabuf *dmabuf; 14156 uint32_t shdr_status, shdr_add_status; 14157 union lpfc_sli4_cfg_shdr *shdr; 14158 uint16_t dmult; 14159 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14160 14161 /* sanity check on queue memory */ 14162 if (!eq) 14163 return -ENODEV; 14164 if (!phba->sli4_hba.pc_sli4_params.supported) 14165 hw_page_size = SLI4_PAGE_SIZE; 14166 14167 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14168 if (!mbox) 14169 return -ENOMEM; 14170 length = (sizeof(struct lpfc_mbx_eq_create) - 14171 sizeof(struct lpfc_sli4_cfg_mhdr)); 14172 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14173 LPFC_MBOX_OPCODE_EQ_CREATE, 14174 length, LPFC_SLI4_MBX_EMBED); 14175 eq_create = &mbox->u.mqe.un.eq_create; 14176 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 14177 eq->page_count); 14178 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 14179 LPFC_EQE_SIZE); 14180 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 14181 /* don't setup delay multiplier using EQ_CREATE */ 14182 dmult = 0; 14183 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 14184 dmult); 14185 switch (eq->entry_count) { 14186 default: 14187 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14188 "0360 Unsupported EQ count. (%d)\n", 14189 eq->entry_count); 14190 if (eq->entry_count < 256) 14191 return -EINVAL; 14192 /* otherwise default to smallest count (drop through) */ 14193 case 256: 14194 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14195 LPFC_EQ_CNT_256); 14196 break; 14197 case 512: 14198 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14199 LPFC_EQ_CNT_512); 14200 break; 14201 case 1024: 14202 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14203 LPFC_EQ_CNT_1024); 14204 break; 14205 case 2048: 14206 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14207 LPFC_EQ_CNT_2048); 14208 break; 14209 case 4096: 14210 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 14211 LPFC_EQ_CNT_4096); 14212 break; 14213 } 14214 list_for_each_entry(dmabuf, &eq->page_list, list) { 14215 memset(dmabuf->virt, 0, hw_page_size); 14216 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14217 putPaddrLow(dmabuf->phys); 14218 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14219 putPaddrHigh(dmabuf->phys); 14220 } 14221 mbox->vport = phba->pport; 14222 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14223 mbox->context1 = NULL; 14224 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14225 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 14226 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14227 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14228 if (shdr_status || shdr_add_status || rc) { 14229 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14230 "2500 EQ_CREATE mailbox failed with " 14231 "status x%x add_status x%x, mbx status x%x\n", 14232 shdr_status, shdr_add_status, rc); 14233 status = -ENXIO; 14234 } 14235 eq->type = LPFC_EQ; 14236 eq->subtype = LPFC_NONE; 14237 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 14238 if (eq->queue_id == 0xFFFF) 14239 status = -ENXIO; 14240 eq->host_index = 0; 14241 eq->hba_index = 0; 14242 eq->entry_repost = LPFC_EQ_REPOST; 14243 14244 mempool_free(mbox, phba->mbox_mem_pool); 14245 return status; 14246 } 14247 14248 /** 14249 * lpfc_cq_create - Create a Completion Queue on the HBA 14250 * @phba: HBA structure that indicates port to create a queue on. 14251 * @cq: The queue structure to use to create the completion queue. 14252 * @eq: The event queue to bind this completion queue to. 14253 * 14254 * This function creates a completion queue, as detailed in @wq, on a port, 14255 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 14256 * 14257 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14258 * is used to get the entry count and entry size that are necessary to 14259 * determine the number of pages to allocate and use for this queue. The @eq 14260 * is used to indicate which event queue to bind this completion queue to. This 14261 * function will send the CQ_CREATE mailbox command to the HBA to setup the 14262 * completion queue. This function is asynchronous and will wait for the mailbox 14263 * command to finish before continuing. 14264 * 14265 * On success this function will return a zero. If unable to allocate enough 14266 * memory this function will return -ENOMEM. If the queue create mailbox command 14267 * fails this function will return -ENXIO. 14268 **/ 14269 int 14270 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 14271 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 14272 { 14273 struct lpfc_mbx_cq_create *cq_create; 14274 struct lpfc_dmabuf *dmabuf; 14275 LPFC_MBOXQ_t *mbox; 14276 int rc, length, status = 0; 14277 uint32_t shdr_status, shdr_add_status; 14278 union lpfc_sli4_cfg_shdr *shdr; 14279 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14280 14281 /* sanity check on queue memory */ 14282 if (!cq || !eq) 14283 return -ENODEV; 14284 if (!phba->sli4_hba.pc_sli4_params.supported) 14285 hw_page_size = cq->page_size; 14286 14287 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14288 if (!mbox) 14289 return -ENOMEM; 14290 length = (sizeof(struct lpfc_mbx_cq_create) - 14291 sizeof(struct lpfc_sli4_cfg_mhdr)); 14292 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14293 LPFC_MBOX_OPCODE_CQ_CREATE, 14294 length, LPFC_SLI4_MBX_EMBED); 14295 cq_create = &mbox->u.mqe.un.cq_create; 14296 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 14297 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 14298 cq->page_count); 14299 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 14300 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 14301 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14302 phba->sli4_hba.pc_sli4_params.cqv); 14303 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 14304 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 14305 (cq->page_size / SLI4_PAGE_SIZE)); 14306 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 14307 eq->queue_id); 14308 } else { 14309 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 14310 eq->queue_id); 14311 } 14312 switch (cq->entry_count) { 14313 case 2048: 14314 case 4096: 14315 if (phba->sli4_hba.pc_sli4_params.cqv == 14316 LPFC_Q_CREATE_VERSION_2) { 14317 cq_create->u.request.context.lpfc_cq_context_count = 14318 cq->entry_count; 14319 bf_set(lpfc_cq_context_count, 14320 &cq_create->u.request.context, 14321 LPFC_CQ_CNT_WORD7); 14322 break; 14323 } 14324 /* Fall Thru */ 14325 default: 14326 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14327 "0361 Unsupported CQ count: " 14328 "entry cnt %d sz %d pg cnt %d\n", 14329 cq->entry_count, cq->entry_size, 14330 cq->page_count); 14331 if (cq->entry_count < 256) { 14332 status = -EINVAL; 14333 goto out; 14334 } 14335 /* otherwise default to smallest count (drop through) */ 14336 case 256: 14337 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14338 LPFC_CQ_CNT_256); 14339 break; 14340 case 512: 14341 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14342 LPFC_CQ_CNT_512); 14343 break; 14344 case 1024: 14345 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 14346 LPFC_CQ_CNT_1024); 14347 break; 14348 } 14349 list_for_each_entry(dmabuf, &cq->page_list, list) { 14350 memset(dmabuf->virt, 0, cq->page_size); 14351 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14352 putPaddrLow(dmabuf->phys); 14353 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14354 putPaddrHigh(dmabuf->phys); 14355 } 14356 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14357 14358 /* The IOCTL status is embedded in the mailbox subheader. */ 14359 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14360 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14361 if (shdr_status || shdr_add_status || rc) { 14362 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14363 "2501 CQ_CREATE mailbox failed with " 14364 "status x%x add_status x%x, mbx status x%x\n", 14365 shdr_status, shdr_add_status, rc); 14366 status = -ENXIO; 14367 goto out; 14368 } 14369 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14370 if (cq->queue_id == 0xFFFF) { 14371 status = -ENXIO; 14372 goto out; 14373 } 14374 /* link the cq onto the parent eq child list */ 14375 list_add_tail(&cq->list, &eq->child_list); 14376 /* Set up completion queue's type and subtype */ 14377 cq->type = type; 14378 cq->subtype = subtype; 14379 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 14380 cq->assoc_qid = eq->queue_id; 14381 cq->host_index = 0; 14382 cq->hba_index = 0; 14383 cq->entry_repost = LPFC_CQ_REPOST; 14384 14385 out: 14386 mempool_free(mbox, phba->mbox_mem_pool); 14387 return status; 14388 } 14389 14390 /** 14391 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ 14392 * @phba: HBA structure that indicates port to create a queue on. 14393 * @cqp: The queue structure array to use to create the completion queues. 14394 * @eqp: The event queue array to bind these completion queues to. 14395 * 14396 * This function creates a set of completion queue, s to support MRQ 14397 * as detailed in @cqp, on a port, 14398 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA. 14399 * 14400 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14401 * is used to get the entry count and entry size that are necessary to 14402 * determine the number of pages to allocate and use for this queue. The @eq 14403 * is used to indicate which event queue to bind this completion queue to. This 14404 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the 14405 * completion queue. This function is asynchronous and will wait for the mailbox 14406 * command to finish before continuing. 14407 * 14408 * On success this function will return a zero. If unable to allocate enough 14409 * memory this function will return -ENOMEM. If the queue create mailbox command 14410 * fails this function will return -ENXIO. 14411 **/ 14412 int 14413 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp, 14414 struct lpfc_queue **eqp, uint32_t type, uint32_t subtype) 14415 { 14416 struct lpfc_queue *cq; 14417 struct lpfc_queue *eq; 14418 struct lpfc_mbx_cq_create_set *cq_set; 14419 struct lpfc_dmabuf *dmabuf; 14420 LPFC_MBOXQ_t *mbox; 14421 int rc, length, alloclen, status = 0; 14422 int cnt, idx, numcq, page_idx = 0; 14423 uint32_t shdr_status, shdr_add_status; 14424 union lpfc_sli4_cfg_shdr *shdr; 14425 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14426 14427 /* sanity check on queue memory */ 14428 numcq = phba->cfg_nvmet_mrq; 14429 if (!cqp || !eqp || !numcq) 14430 return -ENODEV; 14431 14432 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14433 if (!mbox) 14434 return -ENOMEM; 14435 14436 length = sizeof(struct lpfc_mbx_cq_create_set); 14437 length += ((numcq * cqp[0]->page_count) * 14438 sizeof(struct dma_address)); 14439 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14440 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length, 14441 LPFC_SLI4_MBX_NEMBED); 14442 if (alloclen < length) { 14443 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14444 "3098 Allocated DMA memory size (%d) is " 14445 "less than the requested DMA memory size " 14446 "(%d)\n", alloclen, length); 14447 status = -ENOMEM; 14448 goto out; 14449 } 14450 cq_set = mbox->sge_array->addr[0]; 14451 shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr; 14452 bf_set(lpfc_mbox_hdr_version, &shdr->request, 0); 14453 14454 for (idx = 0; idx < numcq; idx++) { 14455 cq = cqp[idx]; 14456 eq = eqp[idx]; 14457 if (!cq || !eq) { 14458 status = -ENOMEM; 14459 goto out; 14460 } 14461 if (!phba->sli4_hba.pc_sli4_params.supported) 14462 hw_page_size = cq->page_size; 14463 14464 switch (idx) { 14465 case 0: 14466 bf_set(lpfc_mbx_cq_create_set_page_size, 14467 &cq_set->u.request, 14468 (hw_page_size / SLI4_PAGE_SIZE)); 14469 bf_set(lpfc_mbx_cq_create_set_num_pages, 14470 &cq_set->u.request, cq->page_count); 14471 bf_set(lpfc_mbx_cq_create_set_evt, 14472 &cq_set->u.request, 1); 14473 bf_set(lpfc_mbx_cq_create_set_valid, 14474 &cq_set->u.request, 1); 14475 bf_set(lpfc_mbx_cq_create_set_cqe_size, 14476 &cq_set->u.request, 0); 14477 bf_set(lpfc_mbx_cq_create_set_num_cq, 14478 &cq_set->u.request, numcq); 14479 switch (cq->entry_count) { 14480 case 2048: 14481 case 4096: 14482 if (phba->sli4_hba.pc_sli4_params.cqv == 14483 LPFC_Q_CREATE_VERSION_2) { 14484 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14485 &cq_set->u.request, 14486 cq->entry_count); 14487 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14488 &cq_set->u.request, 14489 LPFC_CQ_CNT_WORD7); 14490 break; 14491 } 14492 /* Fall Thru */ 14493 default: 14494 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14495 "3118 Bad CQ count. (%d)\n", 14496 cq->entry_count); 14497 if (cq->entry_count < 256) { 14498 status = -EINVAL; 14499 goto out; 14500 } 14501 /* otherwise default to smallest (drop thru) */ 14502 case 256: 14503 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14504 &cq_set->u.request, LPFC_CQ_CNT_256); 14505 break; 14506 case 512: 14507 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14508 &cq_set->u.request, LPFC_CQ_CNT_512); 14509 break; 14510 case 1024: 14511 bf_set(lpfc_mbx_cq_create_set_cqe_cnt, 14512 &cq_set->u.request, LPFC_CQ_CNT_1024); 14513 break; 14514 } 14515 bf_set(lpfc_mbx_cq_create_set_eq_id0, 14516 &cq_set->u.request, eq->queue_id); 14517 break; 14518 case 1: 14519 bf_set(lpfc_mbx_cq_create_set_eq_id1, 14520 &cq_set->u.request, eq->queue_id); 14521 break; 14522 case 2: 14523 bf_set(lpfc_mbx_cq_create_set_eq_id2, 14524 &cq_set->u.request, eq->queue_id); 14525 break; 14526 case 3: 14527 bf_set(lpfc_mbx_cq_create_set_eq_id3, 14528 &cq_set->u.request, eq->queue_id); 14529 break; 14530 case 4: 14531 bf_set(lpfc_mbx_cq_create_set_eq_id4, 14532 &cq_set->u.request, eq->queue_id); 14533 break; 14534 case 5: 14535 bf_set(lpfc_mbx_cq_create_set_eq_id5, 14536 &cq_set->u.request, eq->queue_id); 14537 break; 14538 case 6: 14539 bf_set(lpfc_mbx_cq_create_set_eq_id6, 14540 &cq_set->u.request, eq->queue_id); 14541 break; 14542 case 7: 14543 bf_set(lpfc_mbx_cq_create_set_eq_id7, 14544 &cq_set->u.request, eq->queue_id); 14545 break; 14546 case 8: 14547 bf_set(lpfc_mbx_cq_create_set_eq_id8, 14548 &cq_set->u.request, eq->queue_id); 14549 break; 14550 case 9: 14551 bf_set(lpfc_mbx_cq_create_set_eq_id9, 14552 &cq_set->u.request, eq->queue_id); 14553 break; 14554 case 10: 14555 bf_set(lpfc_mbx_cq_create_set_eq_id10, 14556 &cq_set->u.request, eq->queue_id); 14557 break; 14558 case 11: 14559 bf_set(lpfc_mbx_cq_create_set_eq_id11, 14560 &cq_set->u.request, eq->queue_id); 14561 break; 14562 case 12: 14563 bf_set(lpfc_mbx_cq_create_set_eq_id12, 14564 &cq_set->u.request, eq->queue_id); 14565 break; 14566 case 13: 14567 bf_set(lpfc_mbx_cq_create_set_eq_id13, 14568 &cq_set->u.request, eq->queue_id); 14569 break; 14570 case 14: 14571 bf_set(lpfc_mbx_cq_create_set_eq_id14, 14572 &cq_set->u.request, eq->queue_id); 14573 break; 14574 case 15: 14575 bf_set(lpfc_mbx_cq_create_set_eq_id15, 14576 &cq_set->u.request, eq->queue_id); 14577 break; 14578 } 14579 14580 /* link the cq onto the parent eq child list */ 14581 list_add_tail(&cq->list, &eq->child_list); 14582 /* Set up completion queue's type and subtype */ 14583 cq->type = type; 14584 cq->subtype = subtype; 14585 cq->assoc_qid = eq->queue_id; 14586 cq->host_index = 0; 14587 cq->hba_index = 0; 14588 cq->entry_repost = LPFC_CQ_REPOST; 14589 cq->chann = idx; 14590 14591 rc = 0; 14592 list_for_each_entry(dmabuf, &cq->page_list, list) { 14593 memset(dmabuf->virt, 0, hw_page_size); 14594 cnt = page_idx + dmabuf->buffer_tag; 14595 cq_set->u.request.page[cnt].addr_lo = 14596 putPaddrLow(dmabuf->phys); 14597 cq_set->u.request.page[cnt].addr_hi = 14598 putPaddrHigh(dmabuf->phys); 14599 rc++; 14600 } 14601 page_idx += rc; 14602 } 14603 14604 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14605 14606 /* The IOCTL status is embedded in the mailbox subheader. */ 14607 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14608 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14609 if (shdr_status || shdr_add_status || rc) { 14610 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14611 "3119 CQ_CREATE_SET mailbox failed with " 14612 "status x%x add_status x%x, mbx status x%x\n", 14613 shdr_status, shdr_add_status, rc); 14614 status = -ENXIO; 14615 goto out; 14616 } 14617 rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response); 14618 if (rc == 0xFFFF) { 14619 status = -ENXIO; 14620 goto out; 14621 } 14622 14623 for (idx = 0; idx < numcq; idx++) { 14624 cq = cqp[idx]; 14625 cq->queue_id = rc + idx; 14626 } 14627 14628 out: 14629 lpfc_sli4_mbox_cmd_free(phba, mbox); 14630 return status; 14631 } 14632 14633 /** 14634 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 14635 * @phba: HBA structure that indicates port to create a queue on. 14636 * @mq: The queue structure to use to create the mailbox queue. 14637 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 14638 * @cq: The completion queue to associate with this cq. 14639 * 14640 * This function provides failback (fb) functionality when the 14641 * mq_create_ext fails on older FW generations. It's purpose is identical 14642 * to mq_create_ext otherwise. 14643 * 14644 * This routine cannot fail as all attributes were previously accessed and 14645 * initialized in mq_create_ext. 14646 **/ 14647 static void 14648 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 14649 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 14650 { 14651 struct lpfc_mbx_mq_create *mq_create; 14652 struct lpfc_dmabuf *dmabuf; 14653 int length; 14654 14655 length = (sizeof(struct lpfc_mbx_mq_create) - 14656 sizeof(struct lpfc_sli4_cfg_mhdr)); 14657 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14658 LPFC_MBOX_OPCODE_MQ_CREATE, 14659 length, LPFC_SLI4_MBX_EMBED); 14660 mq_create = &mbox->u.mqe.un.mq_create; 14661 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 14662 mq->page_count); 14663 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 14664 cq->queue_id); 14665 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 14666 switch (mq->entry_count) { 14667 case 16: 14668 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 14669 LPFC_MQ_RING_SIZE_16); 14670 break; 14671 case 32: 14672 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 14673 LPFC_MQ_RING_SIZE_32); 14674 break; 14675 case 64: 14676 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 14677 LPFC_MQ_RING_SIZE_64); 14678 break; 14679 case 128: 14680 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 14681 LPFC_MQ_RING_SIZE_128); 14682 break; 14683 } 14684 list_for_each_entry(dmabuf, &mq->page_list, list) { 14685 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 14686 putPaddrLow(dmabuf->phys); 14687 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 14688 putPaddrHigh(dmabuf->phys); 14689 } 14690 } 14691 14692 /** 14693 * lpfc_mq_create - Create a mailbox Queue on the HBA 14694 * @phba: HBA structure that indicates port to create a queue on. 14695 * @mq: The queue structure to use to create the mailbox queue. 14696 * @cq: The completion queue to associate with this cq. 14697 * @subtype: The queue's subtype. 14698 * 14699 * This function creates a mailbox queue, as detailed in @mq, on a port, 14700 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 14701 * 14702 * The @phba struct is used to send mailbox command to HBA. The @cq struct 14703 * is used to get the entry count and entry size that are necessary to 14704 * determine the number of pages to allocate and use for this queue. This 14705 * function will send the MQ_CREATE mailbox command to the HBA to setup the 14706 * mailbox queue. This function is asynchronous and will wait for the mailbox 14707 * command to finish before continuing. 14708 * 14709 * On success this function will return a zero. If unable to allocate enough 14710 * memory this function will return -ENOMEM. If the queue create mailbox command 14711 * fails this function will return -ENXIO. 14712 **/ 14713 int32_t 14714 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 14715 struct lpfc_queue *cq, uint32_t subtype) 14716 { 14717 struct lpfc_mbx_mq_create *mq_create; 14718 struct lpfc_mbx_mq_create_ext *mq_create_ext; 14719 struct lpfc_dmabuf *dmabuf; 14720 LPFC_MBOXQ_t *mbox; 14721 int rc, length, status = 0; 14722 uint32_t shdr_status, shdr_add_status; 14723 union lpfc_sli4_cfg_shdr *shdr; 14724 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14725 14726 /* sanity check on queue memory */ 14727 if (!mq || !cq) 14728 return -ENODEV; 14729 if (!phba->sli4_hba.pc_sli4_params.supported) 14730 hw_page_size = SLI4_PAGE_SIZE; 14731 14732 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14733 if (!mbox) 14734 return -ENOMEM; 14735 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 14736 sizeof(struct lpfc_sli4_cfg_mhdr)); 14737 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14738 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 14739 length, LPFC_SLI4_MBX_EMBED); 14740 14741 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 14742 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 14743 bf_set(lpfc_mbx_mq_create_ext_num_pages, 14744 &mq_create_ext->u.request, mq->page_count); 14745 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 14746 &mq_create_ext->u.request, 1); 14747 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 14748 &mq_create_ext->u.request, 1); 14749 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 14750 &mq_create_ext->u.request, 1); 14751 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 14752 &mq_create_ext->u.request, 1); 14753 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 14754 &mq_create_ext->u.request, 1); 14755 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 14756 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14757 phba->sli4_hba.pc_sli4_params.mqv); 14758 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 14759 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 14760 cq->queue_id); 14761 else 14762 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 14763 cq->queue_id); 14764 switch (mq->entry_count) { 14765 default: 14766 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14767 "0362 Unsupported MQ count. (%d)\n", 14768 mq->entry_count); 14769 if (mq->entry_count < 16) { 14770 status = -EINVAL; 14771 goto out; 14772 } 14773 /* otherwise default to smallest count (drop through) */ 14774 case 16: 14775 bf_set(lpfc_mq_context_ring_size, 14776 &mq_create_ext->u.request.context, 14777 LPFC_MQ_RING_SIZE_16); 14778 break; 14779 case 32: 14780 bf_set(lpfc_mq_context_ring_size, 14781 &mq_create_ext->u.request.context, 14782 LPFC_MQ_RING_SIZE_32); 14783 break; 14784 case 64: 14785 bf_set(lpfc_mq_context_ring_size, 14786 &mq_create_ext->u.request.context, 14787 LPFC_MQ_RING_SIZE_64); 14788 break; 14789 case 128: 14790 bf_set(lpfc_mq_context_ring_size, 14791 &mq_create_ext->u.request.context, 14792 LPFC_MQ_RING_SIZE_128); 14793 break; 14794 } 14795 list_for_each_entry(dmabuf, &mq->page_list, list) { 14796 memset(dmabuf->virt, 0, hw_page_size); 14797 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 14798 putPaddrLow(dmabuf->phys); 14799 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 14800 putPaddrHigh(dmabuf->phys); 14801 } 14802 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14803 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 14804 &mq_create_ext->u.response); 14805 if (rc != MBX_SUCCESS) { 14806 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 14807 "2795 MQ_CREATE_EXT failed with " 14808 "status x%x. Failback to MQ_CREATE.\n", 14809 rc); 14810 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 14811 mq_create = &mbox->u.mqe.un.mq_create; 14812 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14813 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 14814 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 14815 &mq_create->u.response); 14816 } 14817 14818 /* The IOCTL status is embedded in the mailbox subheader. */ 14819 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14820 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14821 if (shdr_status || shdr_add_status || rc) { 14822 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14823 "2502 MQ_CREATE mailbox failed with " 14824 "status x%x add_status x%x, mbx status x%x\n", 14825 shdr_status, shdr_add_status, rc); 14826 status = -ENXIO; 14827 goto out; 14828 } 14829 if (mq->queue_id == 0xFFFF) { 14830 status = -ENXIO; 14831 goto out; 14832 } 14833 mq->type = LPFC_MQ; 14834 mq->assoc_qid = cq->queue_id; 14835 mq->subtype = subtype; 14836 mq->host_index = 0; 14837 mq->hba_index = 0; 14838 mq->entry_repost = LPFC_MQ_REPOST; 14839 14840 /* link the mq onto the parent cq child list */ 14841 list_add_tail(&mq->list, &cq->child_list); 14842 out: 14843 mempool_free(mbox, phba->mbox_mem_pool); 14844 return status; 14845 } 14846 14847 /** 14848 * lpfc_wq_create - Create a Work Queue on the HBA 14849 * @phba: HBA structure that indicates port to create a queue on. 14850 * @wq: The queue structure to use to create the work queue. 14851 * @cq: The completion queue to bind this work queue to. 14852 * @subtype: The subtype of the work queue indicating its functionality. 14853 * 14854 * This function creates a work queue, as detailed in @wq, on a port, described 14855 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 14856 * 14857 * The @phba struct is used to send mailbox command to HBA. The @wq struct 14858 * is used to get the entry count and entry size that are necessary to 14859 * determine the number of pages to allocate and use for this queue. The @cq 14860 * is used to indicate which completion queue to bind this work queue to. This 14861 * function will send the WQ_CREATE mailbox command to the HBA to setup the 14862 * work queue. This function is asynchronous and will wait for the mailbox 14863 * command to finish before continuing. 14864 * 14865 * On success this function will return a zero. If unable to allocate enough 14866 * memory this function will return -ENOMEM. If the queue create mailbox command 14867 * fails this function will return -ENXIO. 14868 **/ 14869 int 14870 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 14871 struct lpfc_queue *cq, uint32_t subtype) 14872 { 14873 struct lpfc_mbx_wq_create *wq_create; 14874 struct lpfc_dmabuf *dmabuf; 14875 LPFC_MBOXQ_t *mbox; 14876 int rc, length, status = 0; 14877 uint32_t shdr_status, shdr_add_status; 14878 union lpfc_sli4_cfg_shdr *shdr; 14879 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 14880 struct dma_address *page; 14881 void __iomem *bar_memmap_p; 14882 uint32_t db_offset; 14883 uint16_t pci_barset; 14884 uint8_t wq_create_version; 14885 14886 /* sanity check on queue memory */ 14887 if (!wq || !cq) 14888 return -ENODEV; 14889 if (!phba->sli4_hba.pc_sli4_params.supported) 14890 hw_page_size = wq->page_size; 14891 14892 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14893 if (!mbox) 14894 return -ENOMEM; 14895 length = (sizeof(struct lpfc_mbx_wq_create) - 14896 sizeof(struct lpfc_sli4_cfg_mhdr)); 14897 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14898 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 14899 length, LPFC_SLI4_MBX_EMBED); 14900 wq_create = &mbox->u.mqe.un.wq_create; 14901 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 14902 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 14903 wq->page_count); 14904 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 14905 cq->queue_id); 14906 14907 /* wqv is the earliest version supported, NOT the latest */ 14908 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14909 phba->sli4_hba.pc_sli4_params.wqv); 14910 14911 if (phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) 14912 wq_create_version = LPFC_Q_CREATE_VERSION_1; 14913 else 14914 wq_create_version = LPFC_Q_CREATE_VERSION_0; 14915 14916 switch (wq_create_version) { 14917 case LPFC_Q_CREATE_VERSION_0: 14918 switch (wq->entry_size) { 14919 default: 14920 case 64: 14921 /* Nothing to do, version 0 ONLY supports 64 byte */ 14922 page = wq_create->u.request.page; 14923 break; 14924 case 128: 14925 if (!(phba->sli4_hba.pc_sli4_params.wqsize & 14926 LPFC_WQ_SZ128_SUPPORT)) { 14927 status = -ERANGE; 14928 goto out; 14929 } 14930 /* If we get here the HBA MUST also support V1 and 14931 * we MUST use it 14932 */ 14933 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14934 LPFC_Q_CREATE_VERSION_1); 14935 14936 bf_set(lpfc_mbx_wq_create_wqe_count, 14937 &wq_create->u.request_1, wq->entry_count); 14938 bf_set(lpfc_mbx_wq_create_wqe_size, 14939 &wq_create->u.request_1, 14940 LPFC_WQ_WQE_SIZE_128); 14941 bf_set(lpfc_mbx_wq_create_page_size, 14942 &wq_create->u.request_1, 14943 LPFC_WQ_PAGE_SIZE_4096); 14944 page = wq_create->u.request_1.page; 14945 break; 14946 } 14947 break; 14948 case LPFC_Q_CREATE_VERSION_1: 14949 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 14950 wq->entry_count); 14951 bf_set(lpfc_mbox_hdr_version, &shdr->request, 14952 LPFC_Q_CREATE_VERSION_1); 14953 14954 switch (wq->entry_size) { 14955 default: 14956 case 64: 14957 bf_set(lpfc_mbx_wq_create_wqe_size, 14958 &wq_create->u.request_1, 14959 LPFC_WQ_WQE_SIZE_64); 14960 break; 14961 case 128: 14962 if (!(phba->sli4_hba.pc_sli4_params.wqsize & 14963 LPFC_WQ_SZ128_SUPPORT)) { 14964 status = -ERANGE; 14965 goto out; 14966 } 14967 bf_set(lpfc_mbx_wq_create_wqe_size, 14968 &wq_create->u.request_1, 14969 LPFC_WQ_WQE_SIZE_128); 14970 break; 14971 } 14972 bf_set(lpfc_mbx_wq_create_page_size, 14973 &wq_create->u.request_1, 14974 (wq->page_size / SLI4_PAGE_SIZE)); 14975 page = wq_create->u.request_1.page; 14976 break; 14977 default: 14978 status = -ERANGE; 14979 goto out; 14980 } 14981 14982 list_for_each_entry(dmabuf, &wq->page_list, list) { 14983 memset(dmabuf->virt, 0, hw_page_size); 14984 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 14985 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 14986 } 14987 14988 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 14989 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 14990 14991 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14992 /* The IOCTL status is embedded in the mailbox subheader. */ 14993 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14994 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14995 if (shdr_status || shdr_add_status || rc) { 14996 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14997 "2503 WQ_CREATE mailbox failed with " 14998 "status x%x add_status x%x, mbx status x%x\n", 14999 shdr_status, shdr_add_status, rc); 15000 status = -ENXIO; 15001 goto out; 15002 } 15003 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); 15004 if (wq->queue_id == 0xFFFF) { 15005 status = -ENXIO; 15006 goto out; 15007 } 15008 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15009 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 15010 &wq_create->u.response); 15011 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 15012 (wq->db_format != LPFC_DB_RING_FORMAT)) { 15013 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15014 "3265 WQ[%d] doorbell format not " 15015 "supported: x%x\n", wq->queue_id, 15016 wq->db_format); 15017 status = -EINVAL; 15018 goto out; 15019 } 15020 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 15021 &wq_create->u.response); 15022 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 15023 if (!bar_memmap_p) { 15024 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15025 "3263 WQ[%d] failed to memmap pci " 15026 "barset:x%x\n", wq->queue_id, 15027 pci_barset); 15028 status = -ENOMEM; 15029 goto out; 15030 } 15031 db_offset = wq_create->u.response.doorbell_offset; 15032 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 15033 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 15034 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15035 "3252 WQ[%d] doorbell offset not " 15036 "supported: x%x\n", wq->queue_id, 15037 db_offset); 15038 status = -EINVAL; 15039 goto out; 15040 } 15041 wq->db_regaddr = bar_memmap_p + db_offset; 15042 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15043 "3264 WQ[%d]: barset:x%x, offset:x%x, " 15044 "format:x%x\n", wq->queue_id, pci_barset, 15045 db_offset, wq->db_format); 15046 } else { 15047 wq->db_format = LPFC_DB_LIST_FORMAT; 15048 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 15049 } 15050 wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); 15051 if (wq->pring == NULL) { 15052 status = -ENOMEM; 15053 goto out; 15054 } 15055 wq->type = LPFC_WQ; 15056 wq->assoc_qid = cq->queue_id; 15057 wq->subtype = subtype; 15058 wq->host_index = 0; 15059 wq->hba_index = 0; 15060 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL; 15061 15062 /* link the wq onto the parent cq child list */ 15063 list_add_tail(&wq->list, &cq->child_list); 15064 out: 15065 mempool_free(mbox, phba->mbox_mem_pool); 15066 return status; 15067 } 15068 15069 /** 15070 * lpfc_rq_create - Create a Receive Queue on the HBA 15071 * @phba: HBA structure that indicates port to create a queue on. 15072 * @hrq: The queue structure to use to create the header receive queue. 15073 * @drq: The queue structure to use to create the data receive queue. 15074 * @cq: The completion queue to bind this work queue to. 15075 * 15076 * This function creates a receive buffer queue pair , as detailed in @hrq and 15077 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15078 * to the HBA. 15079 * 15080 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15081 * struct is used to get the entry count that is necessary to determine the 15082 * number of pages to use for this queue. The @cq is used to indicate which 15083 * completion queue to bind received buffers that are posted to these queues to. 15084 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15085 * receive queue pair. This function is asynchronous and will wait for the 15086 * mailbox command to finish before continuing. 15087 * 15088 * On success this function will return a zero. If unable to allocate enough 15089 * memory this function will return -ENOMEM. If the queue create mailbox command 15090 * fails this function will return -ENXIO. 15091 **/ 15092 int 15093 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 15094 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 15095 { 15096 struct lpfc_mbx_rq_create *rq_create; 15097 struct lpfc_dmabuf *dmabuf; 15098 LPFC_MBOXQ_t *mbox; 15099 int rc, length, status = 0; 15100 uint32_t shdr_status, shdr_add_status; 15101 union lpfc_sli4_cfg_shdr *shdr; 15102 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15103 void __iomem *bar_memmap_p; 15104 uint32_t db_offset; 15105 uint16_t pci_barset; 15106 15107 /* sanity check on queue memory */ 15108 if (!hrq || !drq || !cq) 15109 return -ENODEV; 15110 if (!phba->sli4_hba.pc_sli4_params.supported) 15111 hw_page_size = SLI4_PAGE_SIZE; 15112 15113 if (hrq->entry_count != drq->entry_count) 15114 return -EINVAL; 15115 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15116 if (!mbox) 15117 return -ENOMEM; 15118 length = (sizeof(struct lpfc_mbx_rq_create) - 15119 sizeof(struct lpfc_sli4_cfg_mhdr)); 15120 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15121 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15122 length, LPFC_SLI4_MBX_EMBED); 15123 rq_create = &mbox->u.mqe.un.rq_create; 15124 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15125 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15126 phba->sli4_hba.pc_sli4_params.rqv); 15127 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15128 bf_set(lpfc_rq_context_rqe_count_1, 15129 &rq_create->u.request.context, 15130 hrq->entry_count); 15131 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 15132 bf_set(lpfc_rq_context_rqe_size, 15133 &rq_create->u.request.context, 15134 LPFC_RQE_SIZE_8); 15135 bf_set(lpfc_rq_context_page_size, 15136 &rq_create->u.request.context, 15137 LPFC_RQ_PAGE_SIZE_4096); 15138 } else { 15139 switch (hrq->entry_count) { 15140 default: 15141 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15142 "2535 Unsupported RQ count. (%d)\n", 15143 hrq->entry_count); 15144 if (hrq->entry_count < 512) { 15145 status = -EINVAL; 15146 goto out; 15147 } 15148 /* otherwise default to smallest count (drop through) */ 15149 case 512: 15150 bf_set(lpfc_rq_context_rqe_count, 15151 &rq_create->u.request.context, 15152 LPFC_RQ_RING_SIZE_512); 15153 break; 15154 case 1024: 15155 bf_set(lpfc_rq_context_rqe_count, 15156 &rq_create->u.request.context, 15157 LPFC_RQ_RING_SIZE_1024); 15158 break; 15159 case 2048: 15160 bf_set(lpfc_rq_context_rqe_count, 15161 &rq_create->u.request.context, 15162 LPFC_RQ_RING_SIZE_2048); 15163 break; 15164 case 4096: 15165 bf_set(lpfc_rq_context_rqe_count, 15166 &rq_create->u.request.context, 15167 LPFC_RQ_RING_SIZE_4096); 15168 break; 15169 } 15170 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 15171 LPFC_HDR_BUF_SIZE); 15172 } 15173 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15174 cq->queue_id); 15175 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15176 hrq->page_count); 15177 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15178 memset(dmabuf->virt, 0, hw_page_size); 15179 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15180 putPaddrLow(dmabuf->phys); 15181 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15182 putPaddrHigh(dmabuf->phys); 15183 } 15184 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15185 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15186 15187 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15188 /* The IOCTL status is embedded in the mailbox subheader. */ 15189 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15190 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15191 if (shdr_status || shdr_add_status || rc) { 15192 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15193 "2504 RQ_CREATE mailbox failed with " 15194 "status x%x add_status x%x, mbx status x%x\n", 15195 shdr_status, shdr_add_status, rc); 15196 status = -ENXIO; 15197 goto out; 15198 } 15199 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15200 if (hrq->queue_id == 0xFFFF) { 15201 status = -ENXIO; 15202 goto out; 15203 } 15204 15205 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 15206 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 15207 &rq_create->u.response); 15208 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 15209 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 15210 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15211 "3262 RQ [%d] doorbell format not " 15212 "supported: x%x\n", hrq->queue_id, 15213 hrq->db_format); 15214 status = -EINVAL; 15215 goto out; 15216 } 15217 15218 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 15219 &rq_create->u.response); 15220 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 15221 if (!bar_memmap_p) { 15222 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15223 "3269 RQ[%d] failed to memmap pci " 15224 "barset:x%x\n", hrq->queue_id, 15225 pci_barset); 15226 status = -ENOMEM; 15227 goto out; 15228 } 15229 15230 db_offset = rq_create->u.response.doorbell_offset; 15231 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 15232 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 15233 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15234 "3270 RQ[%d] doorbell offset not " 15235 "supported: x%x\n", hrq->queue_id, 15236 db_offset); 15237 status = -EINVAL; 15238 goto out; 15239 } 15240 hrq->db_regaddr = bar_memmap_p + db_offset; 15241 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 15242 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 15243 "format:x%x\n", hrq->queue_id, pci_barset, 15244 db_offset, hrq->db_format); 15245 } else { 15246 hrq->db_format = LPFC_DB_RING_FORMAT; 15247 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15248 } 15249 hrq->type = LPFC_HRQ; 15250 hrq->assoc_qid = cq->queue_id; 15251 hrq->subtype = subtype; 15252 hrq->host_index = 0; 15253 hrq->hba_index = 0; 15254 hrq->entry_repost = LPFC_RQ_REPOST; 15255 15256 /* now create the data queue */ 15257 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15258 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 15259 length, LPFC_SLI4_MBX_EMBED); 15260 bf_set(lpfc_mbox_hdr_version, &shdr->request, 15261 phba->sli4_hba.pc_sli4_params.rqv); 15262 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 15263 bf_set(lpfc_rq_context_rqe_count_1, 15264 &rq_create->u.request.context, hrq->entry_count); 15265 if (subtype == LPFC_NVMET) 15266 rq_create->u.request.context.buffer_size = 15267 LPFC_NVMET_DATA_BUF_SIZE; 15268 else 15269 rq_create->u.request.context.buffer_size = 15270 LPFC_DATA_BUF_SIZE; 15271 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 15272 LPFC_RQE_SIZE_8); 15273 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 15274 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15275 } else { 15276 switch (drq->entry_count) { 15277 default: 15278 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15279 "2536 Unsupported RQ count. (%d)\n", 15280 drq->entry_count); 15281 if (drq->entry_count < 512) { 15282 status = -EINVAL; 15283 goto out; 15284 } 15285 /* otherwise default to smallest count (drop through) */ 15286 case 512: 15287 bf_set(lpfc_rq_context_rqe_count, 15288 &rq_create->u.request.context, 15289 LPFC_RQ_RING_SIZE_512); 15290 break; 15291 case 1024: 15292 bf_set(lpfc_rq_context_rqe_count, 15293 &rq_create->u.request.context, 15294 LPFC_RQ_RING_SIZE_1024); 15295 break; 15296 case 2048: 15297 bf_set(lpfc_rq_context_rqe_count, 15298 &rq_create->u.request.context, 15299 LPFC_RQ_RING_SIZE_2048); 15300 break; 15301 case 4096: 15302 bf_set(lpfc_rq_context_rqe_count, 15303 &rq_create->u.request.context, 15304 LPFC_RQ_RING_SIZE_4096); 15305 break; 15306 } 15307 if (subtype == LPFC_NVMET) 15308 bf_set(lpfc_rq_context_buf_size, 15309 &rq_create->u.request.context, 15310 LPFC_NVMET_DATA_BUF_SIZE); 15311 else 15312 bf_set(lpfc_rq_context_buf_size, 15313 &rq_create->u.request.context, 15314 LPFC_DATA_BUF_SIZE); 15315 } 15316 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 15317 cq->queue_id); 15318 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 15319 drq->page_count); 15320 list_for_each_entry(dmabuf, &drq->page_list, list) { 15321 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 15322 putPaddrLow(dmabuf->phys); 15323 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 15324 putPaddrHigh(dmabuf->phys); 15325 } 15326 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 15327 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 15328 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15329 /* The IOCTL status is embedded in the mailbox subheader. */ 15330 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 15331 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15332 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15333 if (shdr_status || shdr_add_status || rc) { 15334 status = -ENXIO; 15335 goto out; 15336 } 15337 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15338 if (drq->queue_id == 0xFFFF) { 15339 status = -ENXIO; 15340 goto out; 15341 } 15342 drq->type = LPFC_DRQ; 15343 drq->assoc_qid = cq->queue_id; 15344 drq->subtype = subtype; 15345 drq->host_index = 0; 15346 drq->hba_index = 0; 15347 drq->entry_repost = LPFC_RQ_REPOST; 15348 15349 /* link the header and data RQs onto the parent cq child list */ 15350 list_add_tail(&hrq->list, &cq->child_list); 15351 list_add_tail(&drq->list, &cq->child_list); 15352 15353 out: 15354 mempool_free(mbox, phba->mbox_mem_pool); 15355 return status; 15356 } 15357 15358 /** 15359 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA 15360 * @phba: HBA structure that indicates port to create a queue on. 15361 * @hrqp: The queue structure array to use to create the header receive queues. 15362 * @drqp: The queue structure array to use to create the data receive queues. 15363 * @cqp: The completion queue array to bind these receive queues to. 15364 * 15365 * This function creates a receive buffer queue pair , as detailed in @hrq and 15366 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 15367 * to the HBA. 15368 * 15369 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 15370 * struct is used to get the entry count that is necessary to determine the 15371 * number of pages to use for this queue. The @cq is used to indicate which 15372 * completion queue to bind received buffers that are posted to these queues to. 15373 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 15374 * receive queue pair. This function is asynchronous and will wait for the 15375 * mailbox command to finish before continuing. 15376 * 15377 * On success this function will return a zero. If unable to allocate enough 15378 * memory this function will return -ENOMEM. If the queue create mailbox command 15379 * fails this function will return -ENXIO. 15380 **/ 15381 int 15382 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp, 15383 struct lpfc_queue **drqp, struct lpfc_queue **cqp, 15384 uint32_t subtype) 15385 { 15386 struct lpfc_queue *hrq, *drq, *cq; 15387 struct lpfc_mbx_rq_create_v2 *rq_create; 15388 struct lpfc_dmabuf *dmabuf; 15389 LPFC_MBOXQ_t *mbox; 15390 int rc, length, alloclen, status = 0; 15391 int cnt, idx, numrq, page_idx = 0; 15392 uint32_t shdr_status, shdr_add_status; 15393 union lpfc_sli4_cfg_shdr *shdr; 15394 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 15395 15396 numrq = phba->cfg_nvmet_mrq; 15397 /* sanity check on array memory */ 15398 if (!hrqp || !drqp || !cqp || !numrq) 15399 return -ENODEV; 15400 if (!phba->sli4_hba.pc_sli4_params.supported) 15401 hw_page_size = SLI4_PAGE_SIZE; 15402 15403 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15404 if (!mbox) 15405 return -ENOMEM; 15406 15407 length = sizeof(struct lpfc_mbx_rq_create_v2); 15408 length += ((2 * numrq * hrqp[0]->page_count) * 15409 sizeof(struct dma_address)); 15410 15411 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15412 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length, 15413 LPFC_SLI4_MBX_NEMBED); 15414 if (alloclen < length) { 15415 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15416 "3099 Allocated DMA memory size (%d) is " 15417 "less than the requested DMA memory size " 15418 "(%d)\n", alloclen, length); 15419 status = -ENOMEM; 15420 goto out; 15421 } 15422 15423 15424 15425 rq_create = mbox->sge_array->addr[0]; 15426 shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr; 15427 15428 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2); 15429 cnt = 0; 15430 15431 for (idx = 0; idx < numrq; idx++) { 15432 hrq = hrqp[idx]; 15433 drq = drqp[idx]; 15434 cq = cqp[idx]; 15435 15436 /* sanity check on queue memory */ 15437 if (!hrq || !drq || !cq) { 15438 status = -ENODEV; 15439 goto out; 15440 } 15441 15442 if (hrq->entry_count != drq->entry_count) { 15443 status = -EINVAL; 15444 goto out; 15445 } 15446 15447 if (idx == 0) { 15448 bf_set(lpfc_mbx_rq_create_num_pages, 15449 &rq_create->u.request, 15450 hrq->page_count); 15451 bf_set(lpfc_mbx_rq_create_rq_cnt, 15452 &rq_create->u.request, (numrq * 2)); 15453 bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request, 15454 1); 15455 bf_set(lpfc_rq_context_base_cq, 15456 &rq_create->u.request.context, 15457 cq->queue_id); 15458 bf_set(lpfc_rq_context_data_size, 15459 &rq_create->u.request.context, 15460 LPFC_NVMET_DATA_BUF_SIZE); 15461 bf_set(lpfc_rq_context_hdr_size, 15462 &rq_create->u.request.context, 15463 LPFC_HDR_BUF_SIZE); 15464 bf_set(lpfc_rq_context_rqe_count_1, 15465 &rq_create->u.request.context, 15466 hrq->entry_count); 15467 bf_set(lpfc_rq_context_rqe_size, 15468 &rq_create->u.request.context, 15469 LPFC_RQE_SIZE_8); 15470 bf_set(lpfc_rq_context_page_size, 15471 &rq_create->u.request.context, 15472 (PAGE_SIZE/SLI4_PAGE_SIZE)); 15473 } 15474 rc = 0; 15475 list_for_each_entry(dmabuf, &hrq->page_list, list) { 15476 memset(dmabuf->virt, 0, hw_page_size); 15477 cnt = page_idx + dmabuf->buffer_tag; 15478 rq_create->u.request.page[cnt].addr_lo = 15479 putPaddrLow(dmabuf->phys); 15480 rq_create->u.request.page[cnt].addr_hi = 15481 putPaddrHigh(dmabuf->phys); 15482 rc++; 15483 } 15484 page_idx += rc; 15485 15486 rc = 0; 15487 list_for_each_entry(dmabuf, &drq->page_list, list) { 15488 memset(dmabuf->virt, 0, hw_page_size); 15489 cnt = page_idx + dmabuf->buffer_tag; 15490 rq_create->u.request.page[cnt].addr_lo = 15491 putPaddrLow(dmabuf->phys); 15492 rq_create->u.request.page[cnt].addr_hi = 15493 putPaddrHigh(dmabuf->phys); 15494 rc++; 15495 } 15496 page_idx += rc; 15497 15498 hrq->db_format = LPFC_DB_RING_FORMAT; 15499 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15500 hrq->type = LPFC_HRQ; 15501 hrq->assoc_qid = cq->queue_id; 15502 hrq->subtype = subtype; 15503 hrq->host_index = 0; 15504 hrq->hba_index = 0; 15505 hrq->entry_repost = LPFC_RQ_REPOST; 15506 15507 drq->db_format = LPFC_DB_RING_FORMAT; 15508 drq->db_regaddr = phba->sli4_hba.RQDBregaddr; 15509 drq->type = LPFC_DRQ; 15510 drq->assoc_qid = cq->queue_id; 15511 drq->subtype = subtype; 15512 drq->host_index = 0; 15513 drq->hba_index = 0; 15514 drq->entry_repost = LPFC_RQ_REPOST; 15515 15516 list_add_tail(&hrq->list, &cq->child_list); 15517 list_add_tail(&drq->list, &cq->child_list); 15518 } 15519 15520 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15521 /* The IOCTL status is embedded in the mailbox subheader. */ 15522 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15523 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15524 if (shdr_status || shdr_add_status || rc) { 15525 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15526 "3120 RQ_CREATE mailbox failed with " 15527 "status x%x add_status x%x, mbx status x%x\n", 15528 shdr_status, shdr_add_status, rc); 15529 status = -ENXIO; 15530 goto out; 15531 } 15532 rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 15533 if (rc == 0xFFFF) { 15534 status = -ENXIO; 15535 goto out; 15536 } 15537 15538 /* Initialize all RQs with associated queue id */ 15539 for (idx = 0; idx < numrq; idx++) { 15540 hrq = hrqp[idx]; 15541 hrq->queue_id = rc + (2 * idx); 15542 drq = drqp[idx]; 15543 drq->queue_id = rc + (2 * idx) + 1; 15544 } 15545 15546 out: 15547 lpfc_sli4_mbox_cmd_free(phba, mbox); 15548 return status; 15549 } 15550 15551 /** 15552 * lpfc_eq_destroy - Destroy an event Queue on the HBA 15553 * @eq: The queue structure associated with the queue to destroy. 15554 * 15555 * This function destroys a queue, as detailed in @eq by sending an mailbox 15556 * command, specific to the type of queue, to the HBA. 15557 * 15558 * The @eq struct is used to get the queue ID of the queue to destroy. 15559 * 15560 * On success this function will return a zero. If the queue destroy mailbox 15561 * command fails this function will return -ENXIO. 15562 **/ 15563 int 15564 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 15565 { 15566 LPFC_MBOXQ_t *mbox; 15567 int rc, length, status = 0; 15568 uint32_t shdr_status, shdr_add_status; 15569 union lpfc_sli4_cfg_shdr *shdr; 15570 15571 /* sanity check on queue memory */ 15572 if (!eq) 15573 return -ENODEV; 15574 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 15575 if (!mbox) 15576 return -ENOMEM; 15577 length = (sizeof(struct lpfc_mbx_eq_destroy) - 15578 sizeof(struct lpfc_sli4_cfg_mhdr)); 15579 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15580 LPFC_MBOX_OPCODE_EQ_DESTROY, 15581 length, LPFC_SLI4_MBX_EMBED); 15582 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 15583 eq->queue_id); 15584 mbox->vport = eq->phba->pport; 15585 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15586 15587 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 15588 /* The IOCTL status is embedded in the mailbox subheader. */ 15589 shdr = (union lpfc_sli4_cfg_shdr *) 15590 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 15591 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15592 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15593 if (shdr_status || shdr_add_status || rc) { 15594 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15595 "2505 EQ_DESTROY mailbox failed with " 15596 "status x%x add_status x%x, mbx status x%x\n", 15597 shdr_status, shdr_add_status, rc); 15598 status = -ENXIO; 15599 } 15600 15601 /* Remove eq from any list */ 15602 list_del_init(&eq->list); 15603 mempool_free(mbox, eq->phba->mbox_mem_pool); 15604 return status; 15605 } 15606 15607 /** 15608 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 15609 * @cq: The queue structure associated with the queue to destroy. 15610 * 15611 * This function destroys a queue, as detailed in @cq by sending an mailbox 15612 * command, specific to the type of queue, to the HBA. 15613 * 15614 * The @cq struct is used to get the queue ID of the queue to destroy. 15615 * 15616 * On success this function will return a zero. If the queue destroy mailbox 15617 * command fails this function will return -ENXIO. 15618 **/ 15619 int 15620 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 15621 { 15622 LPFC_MBOXQ_t *mbox; 15623 int rc, length, status = 0; 15624 uint32_t shdr_status, shdr_add_status; 15625 union lpfc_sli4_cfg_shdr *shdr; 15626 15627 /* sanity check on queue memory */ 15628 if (!cq) 15629 return -ENODEV; 15630 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 15631 if (!mbox) 15632 return -ENOMEM; 15633 length = (sizeof(struct lpfc_mbx_cq_destroy) - 15634 sizeof(struct lpfc_sli4_cfg_mhdr)); 15635 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15636 LPFC_MBOX_OPCODE_CQ_DESTROY, 15637 length, LPFC_SLI4_MBX_EMBED); 15638 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 15639 cq->queue_id); 15640 mbox->vport = cq->phba->pport; 15641 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15642 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 15643 /* The IOCTL status is embedded in the mailbox subheader. */ 15644 shdr = (union lpfc_sli4_cfg_shdr *) 15645 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 15646 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15647 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15648 if (shdr_status || shdr_add_status || rc) { 15649 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15650 "2506 CQ_DESTROY mailbox failed with " 15651 "status x%x add_status x%x, mbx status x%x\n", 15652 shdr_status, shdr_add_status, rc); 15653 status = -ENXIO; 15654 } 15655 /* Remove cq from any list */ 15656 list_del_init(&cq->list); 15657 mempool_free(mbox, cq->phba->mbox_mem_pool); 15658 return status; 15659 } 15660 15661 /** 15662 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 15663 * @qm: The queue structure associated with the queue to destroy. 15664 * 15665 * This function destroys a queue, as detailed in @mq by sending an mailbox 15666 * command, specific to the type of queue, to the HBA. 15667 * 15668 * The @mq struct is used to get the queue ID of the queue to destroy. 15669 * 15670 * On success this function will return a zero. If the queue destroy mailbox 15671 * command fails this function will return -ENXIO. 15672 **/ 15673 int 15674 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 15675 { 15676 LPFC_MBOXQ_t *mbox; 15677 int rc, length, status = 0; 15678 uint32_t shdr_status, shdr_add_status; 15679 union lpfc_sli4_cfg_shdr *shdr; 15680 15681 /* sanity check on queue memory */ 15682 if (!mq) 15683 return -ENODEV; 15684 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 15685 if (!mbox) 15686 return -ENOMEM; 15687 length = (sizeof(struct lpfc_mbx_mq_destroy) - 15688 sizeof(struct lpfc_sli4_cfg_mhdr)); 15689 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 15690 LPFC_MBOX_OPCODE_MQ_DESTROY, 15691 length, LPFC_SLI4_MBX_EMBED); 15692 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 15693 mq->queue_id); 15694 mbox->vport = mq->phba->pport; 15695 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15696 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 15697 /* The IOCTL status is embedded in the mailbox subheader. */ 15698 shdr = (union lpfc_sli4_cfg_shdr *) 15699 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 15700 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15701 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15702 if (shdr_status || shdr_add_status || rc) { 15703 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15704 "2507 MQ_DESTROY mailbox failed with " 15705 "status x%x add_status x%x, mbx status x%x\n", 15706 shdr_status, shdr_add_status, rc); 15707 status = -ENXIO; 15708 } 15709 /* Remove mq from any list */ 15710 list_del_init(&mq->list); 15711 mempool_free(mbox, mq->phba->mbox_mem_pool); 15712 return status; 15713 } 15714 15715 /** 15716 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 15717 * @wq: The queue structure associated with the queue to destroy. 15718 * 15719 * This function destroys a queue, as detailed in @wq by sending an mailbox 15720 * command, specific to the type of queue, to the HBA. 15721 * 15722 * The @wq struct is used to get the queue ID of the queue to destroy. 15723 * 15724 * On success this function will return a zero. If the queue destroy mailbox 15725 * command fails this function will return -ENXIO. 15726 **/ 15727 int 15728 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 15729 { 15730 LPFC_MBOXQ_t *mbox; 15731 int rc, length, status = 0; 15732 uint32_t shdr_status, shdr_add_status; 15733 union lpfc_sli4_cfg_shdr *shdr; 15734 15735 /* sanity check on queue memory */ 15736 if (!wq) 15737 return -ENODEV; 15738 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 15739 if (!mbox) 15740 return -ENOMEM; 15741 length = (sizeof(struct lpfc_mbx_wq_destroy) - 15742 sizeof(struct lpfc_sli4_cfg_mhdr)); 15743 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15744 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 15745 length, LPFC_SLI4_MBX_EMBED); 15746 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 15747 wq->queue_id); 15748 mbox->vport = wq->phba->pport; 15749 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15750 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 15751 shdr = (union lpfc_sli4_cfg_shdr *) 15752 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 15753 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15754 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15755 if (shdr_status || shdr_add_status || rc) { 15756 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15757 "2508 WQ_DESTROY mailbox failed with " 15758 "status x%x add_status x%x, mbx status x%x\n", 15759 shdr_status, shdr_add_status, rc); 15760 status = -ENXIO; 15761 } 15762 /* Remove wq from any list */ 15763 list_del_init(&wq->list); 15764 kfree(wq->pring); 15765 wq->pring = NULL; 15766 mempool_free(mbox, wq->phba->mbox_mem_pool); 15767 return status; 15768 } 15769 15770 /** 15771 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 15772 * @rq: The queue structure associated with the queue to destroy. 15773 * 15774 * This function destroys a queue, as detailed in @rq by sending an mailbox 15775 * command, specific to the type of queue, to the HBA. 15776 * 15777 * The @rq struct is used to get the queue ID of the queue to destroy. 15778 * 15779 * On success this function will return a zero. If the queue destroy mailbox 15780 * command fails this function will return -ENXIO. 15781 **/ 15782 int 15783 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 15784 struct lpfc_queue *drq) 15785 { 15786 LPFC_MBOXQ_t *mbox; 15787 int rc, length, status = 0; 15788 uint32_t shdr_status, shdr_add_status; 15789 union lpfc_sli4_cfg_shdr *shdr; 15790 15791 /* sanity check on queue memory */ 15792 if (!hrq || !drq) 15793 return -ENODEV; 15794 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 15795 if (!mbox) 15796 return -ENOMEM; 15797 length = (sizeof(struct lpfc_mbx_rq_destroy) - 15798 sizeof(struct lpfc_sli4_cfg_mhdr)); 15799 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15800 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 15801 length, LPFC_SLI4_MBX_EMBED); 15802 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 15803 hrq->queue_id); 15804 mbox->vport = hrq->phba->pport; 15805 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15806 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 15807 /* The IOCTL status is embedded in the mailbox subheader. */ 15808 shdr = (union lpfc_sli4_cfg_shdr *) 15809 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 15810 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15811 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15812 if (shdr_status || shdr_add_status || rc) { 15813 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15814 "2509 RQ_DESTROY mailbox failed with " 15815 "status x%x add_status x%x, mbx status x%x\n", 15816 shdr_status, shdr_add_status, rc); 15817 if (rc != MBX_TIMEOUT) 15818 mempool_free(mbox, hrq->phba->mbox_mem_pool); 15819 return -ENXIO; 15820 } 15821 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 15822 drq->queue_id); 15823 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 15824 shdr = (union lpfc_sli4_cfg_shdr *) 15825 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 15826 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15827 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15828 if (shdr_status || shdr_add_status || rc) { 15829 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15830 "2510 RQ_DESTROY mailbox failed with " 15831 "status x%x add_status x%x, mbx status x%x\n", 15832 shdr_status, shdr_add_status, rc); 15833 status = -ENXIO; 15834 } 15835 list_del_init(&hrq->list); 15836 list_del_init(&drq->list); 15837 mempool_free(mbox, hrq->phba->mbox_mem_pool); 15838 return status; 15839 } 15840 15841 /** 15842 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 15843 * @phba: The virtual port for which this call being executed. 15844 * @pdma_phys_addr0: Physical address of the 1st SGL page. 15845 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 15846 * @xritag: the xritag that ties this io to the SGL pages. 15847 * 15848 * This routine will post the sgl pages for the IO that has the xritag 15849 * that is in the iocbq structure. The xritag is assigned during iocbq 15850 * creation and persists for as long as the driver is loaded. 15851 * if the caller has fewer than 256 scatter gather segments to map then 15852 * pdma_phys_addr1 should be 0. 15853 * If the caller needs to map more than 256 scatter gather segment then 15854 * pdma_phys_addr1 should be a valid physical address. 15855 * physical address for SGLs must be 64 byte aligned. 15856 * If you are going to map 2 SGL's then the first one must have 256 entries 15857 * the second sgl can have between 1 and 256 entries. 15858 * 15859 * Return codes: 15860 * 0 - Success 15861 * -ENXIO, -ENOMEM - Failure 15862 **/ 15863 int 15864 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 15865 dma_addr_t pdma_phys_addr0, 15866 dma_addr_t pdma_phys_addr1, 15867 uint16_t xritag) 15868 { 15869 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 15870 LPFC_MBOXQ_t *mbox; 15871 int rc; 15872 uint32_t shdr_status, shdr_add_status; 15873 uint32_t mbox_tmo; 15874 union lpfc_sli4_cfg_shdr *shdr; 15875 15876 if (xritag == NO_XRI) { 15877 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15878 "0364 Invalid param:\n"); 15879 return -EINVAL; 15880 } 15881 15882 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15883 if (!mbox) 15884 return -ENOMEM; 15885 15886 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 15887 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 15888 sizeof(struct lpfc_mbx_post_sgl_pages) - 15889 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 15890 15891 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 15892 &mbox->u.mqe.un.post_sgl_pages; 15893 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 15894 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 15895 15896 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 15897 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 15898 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 15899 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 15900 15901 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 15902 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 15903 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 15904 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 15905 if (!phba->sli4_hba.intr_enable) 15906 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15907 else { 15908 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 15909 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 15910 } 15911 /* The IOCTL status is embedded in the mailbox subheader. */ 15912 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 15913 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15914 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15915 if (rc != MBX_TIMEOUT) 15916 mempool_free(mbox, phba->mbox_mem_pool); 15917 if (shdr_status || shdr_add_status || rc) { 15918 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15919 "2511 POST_SGL mailbox failed with " 15920 "status x%x add_status x%x, mbx status x%x\n", 15921 shdr_status, shdr_add_status, rc); 15922 } 15923 return 0; 15924 } 15925 15926 /** 15927 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 15928 * @phba: pointer to lpfc hba data structure. 15929 * 15930 * This routine is invoked to post rpi header templates to the 15931 * HBA consistent with the SLI-4 interface spec. This routine 15932 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 15933 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 15934 * 15935 * Returns 15936 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 15937 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 15938 **/ 15939 static uint16_t 15940 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 15941 { 15942 unsigned long xri; 15943 15944 /* 15945 * Fetch the next logical xri. Because this index is logical, 15946 * the driver starts at 0 each time. 15947 */ 15948 spin_lock_irq(&phba->hbalock); 15949 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 15950 phba->sli4_hba.max_cfg_param.max_xri, 0); 15951 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 15952 spin_unlock_irq(&phba->hbalock); 15953 return NO_XRI; 15954 } else { 15955 set_bit(xri, phba->sli4_hba.xri_bmask); 15956 phba->sli4_hba.max_cfg_param.xri_used++; 15957 } 15958 spin_unlock_irq(&phba->hbalock); 15959 return xri; 15960 } 15961 15962 /** 15963 * lpfc_sli4_free_xri - Release an xri for reuse. 15964 * @phba: pointer to lpfc hba data structure. 15965 * 15966 * This routine is invoked to release an xri to the pool of 15967 * available rpis maintained by the driver. 15968 **/ 15969 static void 15970 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 15971 { 15972 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 15973 phba->sli4_hba.max_cfg_param.xri_used--; 15974 } 15975 } 15976 15977 /** 15978 * lpfc_sli4_free_xri - Release an xri for reuse. 15979 * @phba: pointer to lpfc hba data structure. 15980 * 15981 * This routine is invoked to release an xri to the pool of 15982 * available rpis maintained by the driver. 15983 **/ 15984 void 15985 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 15986 { 15987 spin_lock_irq(&phba->hbalock); 15988 __lpfc_sli4_free_xri(phba, xri); 15989 spin_unlock_irq(&phba->hbalock); 15990 } 15991 15992 /** 15993 * lpfc_sli4_next_xritag - Get an xritag for the io 15994 * @phba: Pointer to HBA context object. 15995 * 15996 * This function gets an xritag for the iocb. If there is no unused xritag 15997 * it will return 0xffff. 15998 * The function returns the allocated xritag if successful, else returns zero. 15999 * Zero is not a valid xritag. 16000 * The caller is not required to hold any lock. 16001 **/ 16002 uint16_t 16003 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 16004 { 16005 uint16_t xri_index; 16006 16007 xri_index = lpfc_sli4_alloc_xri(phba); 16008 if (xri_index == NO_XRI) 16009 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 16010 "2004 Failed to allocate XRI.last XRITAG is %d" 16011 " Max XRI is %d, Used XRI is %d\n", 16012 xri_index, 16013 phba->sli4_hba.max_cfg_param.max_xri, 16014 phba->sli4_hba.max_cfg_param.xri_used); 16015 return xri_index; 16016 } 16017 16018 /** 16019 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port. 16020 * @phba: pointer to lpfc hba data structure. 16021 * @post_sgl_list: pointer to els sgl entry list. 16022 * @count: number of els sgl entries on the list. 16023 * 16024 * This routine is invoked to post a block of driver's sgl pages to the 16025 * HBA using non-embedded mailbox command. No Lock is held. This routine 16026 * is only called when the driver is loading and after all IO has been 16027 * stopped. 16028 **/ 16029 static int 16030 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba, 16031 struct list_head *post_sgl_list, 16032 int post_cnt) 16033 { 16034 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 16035 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16036 struct sgl_page_pairs *sgl_pg_pairs; 16037 void *viraddr; 16038 LPFC_MBOXQ_t *mbox; 16039 uint32_t reqlen, alloclen, pg_pairs; 16040 uint32_t mbox_tmo; 16041 uint16_t xritag_start = 0; 16042 int rc = 0; 16043 uint32_t shdr_status, shdr_add_status; 16044 union lpfc_sli4_cfg_shdr *shdr; 16045 16046 reqlen = post_cnt * sizeof(struct sgl_page_pairs) + 16047 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16048 if (reqlen > SLI4_PAGE_SIZE) { 16049 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16050 "2559 Block sgl registration required DMA " 16051 "size (%d) great than a page\n", reqlen); 16052 return -ENOMEM; 16053 } 16054 16055 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16056 if (!mbox) 16057 return -ENOMEM; 16058 16059 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16060 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16061 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 16062 LPFC_SLI4_MBX_NEMBED); 16063 16064 if (alloclen < reqlen) { 16065 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16066 "0285 Allocated DMA memory size (%d) is " 16067 "less than the requested DMA memory " 16068 "size (%d)\n", alloclen, reqlen); 16069 lpfc_sli4_mbox_cmd_free(phba, mbox); 16070 return -ENOMEM; 16071 } 16072 /* Set up the SGL pages in the non-embedded DMA pages */ 16073 viraddr = mbox->sge_array->addr[0]; 16074 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16075 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16076 16077 pg_pairs = 0; 16078 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 16079 /* Set up the sge entry */ 16080 sgl_pg_pairs->sgl_pg0_addr_lo = 16081 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 16082 sgl_pg_pairs->sgl_pg0_addr_hi = 16083 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 16084 sgl_pg_pairs->sgl_pg1_addr_lo = 16085 cpu_to_le32(putPaddrLow(0)); 16086 sgl_pg_pairs->sgl_pg1_addr_hi = 16087 cpu_to_le32(putPaddrHigh(0)); 16088 16089 /* Keep the first xritag on the list */ 16090 if (pg_pairs == 0) 16091 xritag_start = sglq_entry->sli4_xritag; 16092 sgl_pg_pairs++; 16093 pg_pairs++; 16094 } 16095 16096 /* Complete initialization and perform endian conversion. */ 16097 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16098 bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt); 16099 sgl->word0 = cpu_to_le32(sgl->word0); 16100 16101 if (!phba->sli4_hba.intr_enable) 16102 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16103 else { 16104 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16105 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16106 } 16107 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16108 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16109 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16110 if (rc != MBX_TIMEOUT) 16111 lpfc_sli4_mbox_cmd_free(phba, mbox); 16112 if (shdr_status || shdr_add_status || rc) { 16113 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16114 "2513 POST_SGL_BLOCK mailbox command failed " 16115 "status x%x add_status x%x mbx status x%x\n", 16116 shdr_status, shdr_add_status, rc); 16117 rc = -ENXIO; 16118 } 16119 return rc; 16120 } 16121 16122 /** 16123 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 16124 * @phba: pointer to lpfc hba data structure. 16125 * @sblist: pointer to scsi buffer list. 16126 * @count: number of scsi buffers on the list. 16127 * 16128 * This routine is invoked to post a block of @count scsi sgl pages from a 16129 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 16130 * No Lock is held. 16131 * 16132 **/ 16133 int 16134 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, 16135 struct list_head *sblist, 16136 int count) 16137 { 16138 struct lpfc_scsi_buf *psb; 16139 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 16140 struct sgl_page_pairs *sgl_pg_pairs; 16141 void *viraddr; 16142 LPFC_MBOXQ_t *mbox; 16143 uint32_t reqlen, alloclen, pg_pairs; 16144 uint32_t mbox_tmo; 16145 uint16_t xritag_start = 0; 16146 int rc = 0; 16147 uint32_t shdr_status, shdr_add_status; 16148 dma_addr_t pdma_phys_bpl1; 16149 union lpfc_sli4_cfg_shdr *shdr; 16150 16151 /* Calculate the requested length of the dma memory */ 16152 reqlen = count * sizeof(struct sgl_page_pairs) + 16153 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 16154 if (reqlen > SLI4_PAGE_SIZE) { 16155 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 16156 "0217 Block sgl registration required DMA " 16157 "size (%d) great than a page\n", reqlen); 16158 return -ENOMEM; 16159 } 16160 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16161 if (!mbox) { 16162 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16163 "0283 Failed to allocate mbox cmd memory\n"); 16164 return -ENOMEM; 16165 } 16166 16167 /* Allocate DMA memory and set up the non-embedded mailbox command */ 16168 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16169 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 16170 LPFC_SLI4_MBX_NEMBED); 16171 16172 if (alloclen < reqlen) { 16173 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16174 "2561 Allocated DMA memory size (%d) is " 16175 "less than the requested DMA memory " 16176 "size (%d)\n", alloclen, reqlen); 16177 lpfc_sli4_mbox_cmd_free(phba, mbox); 16178 return -ENOMEM; 16179 } 16180 16181 /* Get the first SGE entry from the non-embedded DMA memory */ 16182 viraddr = mbox->sge_array->addr[0]; 16183 16184 /* Set up the SGL pages in the non-embedded DMA pages */ 16185 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 16186 sgl_pg_pairs = &sgl->sgl_pg_pairs; 16187 16188 pg_pairs = 0; 16189 list_for_each_entry(psb, sblist, list) { 16190 /* Set up the sge entry */ 16191 sgl_pg_pairs->sgl_pg0_addr_lo = 16192 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 16193 sgl_pg_pairs->sgl_pg0_addr_hi = 16194 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 16195 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 16196 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 16197 else 16198 pdma_phys_bpl1 = 0; 16199 sgl_pg_pairs->sgl_pg1_addr_lo = 16200 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 16201 sgl_pg_pairs->sgl_pg1_addr_hi = 16202 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 16203 /* Keep the first xritag on the list */ 16204 if (pg_pairs == 0) 16205 xritag_start = psb->cur_iocbq.sli4_xritag; 16206 sgl_pg_pairs++; 16207 pg_pairs++; 16208 } 16209 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 16210 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 16211 /* Perform endian conversion if necessary */ 16212 sgl->word0 = cpu_to_le32(sgl->word0); 16213 16214 if (!phba->sli4_hba.intr_enable) 16215 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16216 else { 16217 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16218 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16219 } 16220 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 16221 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16222 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16223 if (rc != MBX_TIMEOUT) 16224 lpfc_sli4_mbox_cmd_free(phba, mbox); 16225 if (shdr_status || shdr_add_status || rc) { 16226 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16227 "2564 POST_SGL_BLOCK mailbox command failed " 16228 "status x%x add_status x%x mbx status x%x\n", 16229 shdr_status, shdr_add_status, rc); 16230 rc = -ENXIO; 16231 } 16232 return rc; 16233 } 16234 16235 /** 16236 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 16237 * @phba: pointer to lpfc_hba struct that the frame was received on 16238 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16239 * 16240 * This function checks the fields in the @fc_hdr to see if the FC frame is a 16241 * valid type of frame that the LPFC driver will handle. This function will 16242 * return a zero if the frame is a valid frame or a non zero value when the 16243 * frame does not pass the check. 16244 **/ 16245 static int 16246 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 16247 { 16248 /* make rctl_names static to save stack space */ 16249 struct fc_vft_header *fc_vft_hdr; 16250 uint32_t *header = (uint32_t *) fc_hdr; 16251 16252 #define FC_RCTL_MDS_DIAGS 0xF4 16253 16254 switch (fc_hdr->fh_r_ctl) { 16255 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 16256 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 16257 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 16258 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 16259 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 16260 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 16261 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 16262 case FC_RCTL_DD_CMD_STATUS: /* command status */ 16263 case FC_RCTL_ELS_REQ: /* extended link services request */ 16264 case FC_RCTL_ELS_REP: /* extended link services reply */ 16265 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 16266 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 16267 case FC_RCTL_BA_NOP: /* basic link service NOP */ 16268 case FC_RCTL_BA_ABTS: /* basic link service abort */ 16269 case FC_RCTL_BA_RMC: /* remove connection */ 16270 case FC_RCTL_BA_ACC: /* basic accept */ 16271 case FC_RCTL_BA_RJT: /* basic reject */ 16272 case FC_RCTL_BA_PRMT: 16273 case FC_RCTL_ACK_1: /* acknowledge_1 */ 16274 case FC_RCTL_ACK_0: /* acknowledge_0 */ 16275 case FC_RCTL_P_RJT: /* port reject */ 16276 case FC_RCTL_F_RJT: /* fabric reject */ 16277 case FC_RCTL_P_BSY: /* port busy */ 16278 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 16279 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 16280 case FC_RCTL_LCR: /* link credit reset */ 16281 case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */ 16282 case FC_RCTL_END: /* end */ 16283 break; 16284 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 16285 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16286 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 16287 return lpfc_fc_frame_check(phba, fc_hdr); 16288 default: 16289 goto drop; 16290 } 16291 16292 #define FC_TYPE_VENDOR_UNIQUE 0xFF 16293 16294 switch (fc_hdr->fh_type) { 16295 case FC_TYPE_BLS: 16296 case FC_TYPE_ELS: 16297 case FC_TYPE_FCP: 16298 case FC_TYPE_CT: 16299 case FC_TYPE_NVME: 16300 case FC_TYPE_VENDOR_UNIQUE: 16301 break; 16302 case FC_TYPE_IP: 16303 case FC_TYPE_ILS: 16304 default: 16305 goto drop; 16306 } 16307 16308 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 16309 "2538 Received frame rctl:x%x, type:x%x, " 16310 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 16311 fc_hdr->fh_r_ctl, fc_hdr->fh_type, 16312 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 16313 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 16314 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 16315 be32_to_cpu(header[6])); 16316 return 0; 16317 drop: 16318 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 16319 "2539 Dropped frame rctl:x%x type:x%x\n", 16320 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 16321 return 1; 16322 } 16323 16324 /** 16325 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 16326 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16327 * 16328 * This function processes the FC header to retrieve the VFI from the VF 16329 * header, if one exists. This function will return the VFI if one exists 16330 * or 0 if no VSAN Header exists. 16331 **/ 16332 static uint32_t 16333 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 16334 { 16335 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 16336 16337 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 16338 return 0; 16339 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 16340 } 16341 16342 /** 16343 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 16344 * @phba: Pointer to the HBA structure to search for the vport on 16345 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 16346 * @fcfi: The FC Fabric ID that the frame came from 16347 * 16348 * This function searches the @phba for a vport that matches the content of the 16349 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 16350 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 16351 * returns the matching vport pointer or NULL if unable to match frame to a 16352 * vport. 16353 **/ 16354 static struct lpfc_vport * 16355 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 16356 uint16_t fcfi, uint32_t did) 16357 { 16358 struct lpfc_vport **vports; 16359 struct lpfc_vport *vport = NULL; 16360 int i; 16361 16362 if (did == Fabric_DID) 16363 return phba->pport; 16364 if ((phba->pport->fc_flag & FC_PT2PT) && 16365 !(phba->link_state == LPFC_HBA_READY)) 16366 return phba->pport; 16367 16368 vports = lpfc_create_vport_work_array(phba); 16369 if (vports != NULL) { 16370 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 16371 if (phba->fcf.fcfi == fcfi && 16372 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 16373 vports[i]->fc_myDID == did) { 16374 vport = vports[i]; 16375 break; 16376 } 16377 } 16378 } 16379 lpfc_destroy_vport_work_array(phba, vports); 16380 return vport; 16381 } 16382 16383 /** 16384 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 16385 * @vport: The vport to work on. 16386 * 16387 * This function updates the receive sequence time stamp for this vport. The 16388 * receive sequence time stamp indicates the time that the last frame of the 16389 * the sequence that has been idle for the longest amount of time was received. 16390 * the driver uses this time stamp to indicate if any received sequences have 16391 * timed out. 16392 **/ 16393 static void 16394 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 16395 { 16396 struct lpfc_dmabuf *h_buf; 16397 struct hbq_dmabuf *dmabuf = NULL; 16398 16399 /* get the oldest sequence on the rcv list */ 16400 h_buf = list_get_first(&vport->rcv_buffer_list, 16401 struct lpfc_dmabuf, list); 16402 if (!h_buf) 16403 return; 16404 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16405 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 16406 } 16407 16408 /** 16409 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 16410 * @vport: The vport that the received sequences were sent to. 16411 * 16412 * This function cleans up all outstanding received sequences. This is called 16413 * by the driver when a link event or user action invalidates all the received 16414 * sequences. 16415 **/ 16416 void 16417 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 16418 { 16419 struct lpfc_dmabuf *h_buf, *hnext; 16420 struct lpfc_dmabuf *d_buf, *dnext; 16421 struct hbq_dmabuf *dmabuf = NULL; 16422 16423 /* start with the oldest sequence on the rcv list */ 16424 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 16425 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16426 list_del_init(&dmabuf->hbuf.list); 16427 list_for_each_entry_safe(d_buf, dnext, 16428 &dmabuf->dbuf.list, list) { 16429 list_del_init(&d_buf->list); 16430 lpfc_in_buf_free(vport->phba, d_buf); 16431 } 16432 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 16433 } 16434 } 16435 16436 /** 16437 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 16438 * @vport: The vport that the received sequences were sent to. 16439 * 16440 * This function determines whether any received sequences have timed out by 16441 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 16442 * indicates that there is at least one timed out sequence this routine will 16443 * go through the received sequences one at a time from most inactive to most 16444 * active to determine which ones need to be cleaned up. Once it has determined 16445 * that a sequence needs to be cleaned up it will simply free up the resources 16446 * without sending an abort. 16447 **/ 16448 void 16449 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 16450 { 16451 struct lpfc_dmabuf *h_buf, *hnext; 16452 struct lpfc_dmabuf *d_buf, *dnext; 16453 struct hbq_dmabuf *dmabuf = NULL; 16454 unsigned long timeout; 16455 int abort_count = 0; 16456 16457 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 16458 vport->rcv_buffer_time_stamp); 16459 if (list_empty(&vport->rcv_buffer_list) || 16460 time_before(jiffies, timeout)) 16461 return; 16462 /* start with the oldest sequence on the rcv list */ 16463 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 16464 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16465 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 16466 dmabuf->time_stamp); 16467 if (time_before(jiffies, timeout)) 16468 break; 16469 abort_count++; 16470 list_del_init(&dmabuf->hbuf.list); 16471 list_for_each_entry_safe(d_buf, dnext, 16472 &dmabuf->dbuf.list, list) { 16473 list_del_init(&d_buf->list); 16474 lpfc_in_buf_free(vport->phba, d_buf); 16475 } 16476 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 16477 } 16478 if (abort_count) 16479 lpfc_update_rcv_time_stamp(vport); 16480 } 16481 16482 /** 16483 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 16484 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 16485 * 16486 * This function searches through the existing incomplete sequences that have 16487 * been sent to this @vport. If the frame matches one of the incomplete 16488 * sequences then the dbuf in the @dmabuf is added to the list of frames that 16489 * make up that sequence. If no sequence is found that matches this frame then 16490 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 16491 * This function returns a pointer to the first dmabuf in the sequence list that 16492 * the frame was linked to. 16493 **/ 16494 static struct hbq_dmabuf * 16495 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 16496 { 16497 struct fc_frame_header *new_hdr; 16498 struct fc_frame_header *temp_hdr; 16499 struct lpfc_dmabuf *d_buf; 16500 struct lpfc_dmabuf *h_buf; 16501 struct hbq_dmabuf *seq_dmabuf = NULL; 16502 struct hbq_dmabuf *temp_dmabuf = NULL; 16503 uint8_t found = 0; 16504 16505 INIT_LIST_HEAD(&dmabuf->dbuf.list); 16506 dmabuf->time_stamp = jiffies; 16507 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 16508 16509 /* Use the hdr_buf to find the sequence that this frame belongs to */ 16510 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 16511 temp_hdr = (struct fc_frame_header *)h_buf->virt; 16512 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 16513 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 16514 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 16515 continue; 16516 /* found a pending sequence that matches this frame */ 16517 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16518 break; 16519 } 16520 if (!seq_dmabuf) { 16521 /* 16522 * This indicates first frame received for this sequence. 16523 * Queue the buffer on the vport's rcv_buffer_list. 16524 */ 16525 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 16526 lpfc_update_rcv_time_stamp(vport); 16527 return dmabuf; 16528 } 16529 temp_hdr = seq_dmabuf->hbuf.virt; 16530 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 16531 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 16532 list_del_init(&seq_dmabuf->hbuf.list); 16533 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 16534 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 16535 lpfc_update_rcv_time_stamp(vport); 16536 return dmabuf; 16537 } 16538 /* move this sequence to the tail to indicate a young sequence */ 16539 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 16540 seq_dmabuf->time_stamp = jiffies; 16541 lpfc_update_rcv_time_stamp(vport); 16542 if (list_empty(&seq_dmabuf->dbuf.list)) { 16543 temp_hdr = dmabuf->hbuf.virt; 16544 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 16545 return seq_dmabuf; 16546 } 16547 /* find the correct place in the sequence to insert this frame */ 16548 d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list); 16549 while (!found) { 16550 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 16551 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 16552 /* 16553 * If the frame's sequence count is greater than the frame on 16554 * the list then insert the frame right after this frame 16555 */ 16556 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 16557 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 16558 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 16559 found = 1; 16560 break; 16561 } 16562 16563 if (&d_buf->list == &seq_dmabuf->dbuf.list) 16564 break; 16565 d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list); 16566 } 16567 16568 if (found) 16569 return seq_dmabuf; 16570 return NULL; 16571 } 16572 16573 /** 16574 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 16575 * @vport: pointer to a vitural port 16576 * @dmabuf: pointer to a dmabuf that describes the FC sequence 16577 * 16578 * This function tries to abort from the partially assembed sequence, described 16579 * by the information from basic abbort @dmabuf. It checks to see whether such 16580 * partially assembled sequence held by the driver. If so, it shall free up all 16581 * the frames from the partially assembled sequence. 16582 * 16583 * Return 16584 * true -- if there is matching partially assembled sequence present and all 16585 * the frames freed with the sequence; 16586 * false -- if there is no matching partially assembled sequence present so 16587 * nothing got aborted in the lower layer driver 16588 **/ 16589 static bool 16590 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 16591 struct hbq_dmabuf *dmabuf) 16592 { 16593 struct fc_frame_header *new_hdr; 16594 struct fc_frame_header *temp_hdr; 16595 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 16596 struct hbq_dmabuf *seq_dmabuf = NULL; 16597 16598 /* Use the hdr_buf to find the sequence that matches this frame */ 16599 INIT_LIST_HEAD(&dmabuf->dbuf.list); 16600 INIT_LIST_HEAD(&dmabuf->hbuf.list); 16601 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 16602 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 16603 temp_hdr = (struct fc_frame_header *)h_buf->virt; 16604 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 16605 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 16606 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 16607 continue; 16608 /* found a pending sequence that matches this frame */ 16609 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 16610 break; 16611 } 16612 16613 /* Free up all the frames from the partially assembled sequence */ 16614 if (seq_dmabuf) { 16615 list_for_each_entry_safe(d_buf, n_buf, 16616 &seq_dmabuf->dbuf.list, list) { 16617 list_del_init(&d_buf->list); 16618 lpfc_in_buf_free(vport->phba, d_buf); 16619 } 16620 return true; 16621 } 16622 return false; 16623 } 16624 16625 /** 16626 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 16627 * @vport: pointer to a vitural port 16628 * @dmabuf: pointer to a dmabuf that describes the FC sequence 16629 * 16630 * This function tries to abort from the assembed sequence from upper level 16631 * protocol, described by the information from basic abbort @dmabuf. It 16632 * checks to see whether such pending context exists at upper level protocol. 16633 * If so, it shall clean up the pending context. 16634 * 16635 * Return 16636 * true -- if there is matching pending context of the sequence cleaned 16637 * at ulp; 16638 * false -- if there is no matching pending context of the sequence present 16639 * at ulp. 16640 **/ 16641 static bool 16642 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 16643 { 16644 struct lpfc_hba *phba = vport->phba; 16645 int handled; 16646 16647 /* Accepting abort at ulp with SLI4 only */ 16648 if (phba->sli_rev < LPFC_SLI_REV4) 16649 return false; 16650 16651 /* Register all caring upper level protocols to attend abort */ 16652 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 16653 if (handled) 16654 return true; 16655 16656 return false; 16657 } 16658 16659 /** 16660 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 16661 * @phba: Pointer to HBA context object. 16662 * @cmd_iocbq: pointer to the command iocbq structure. 16663 * @rsp_iocbq: pointer to the response iocbq structure. 16664 * 16665 * This function handles the sequence abort response iocb command complete 16666 * event. It properly releases the memory allocated to the sequence abort 16667 * accept iocb. 16668 **/ 16669 static void 16670 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 16671 struct lpfc_iocbq *cmd_iocbq, 16672 struct lpfc_iocbq *rsp_iocbq) 16673 { 16674 struct lpfc_nodelist *ndlp; 16675 16676 if (cmd_iocbq) { 16677 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; 16678 lpfc_nlp_put(ndlp); 16679 lpfc_nlp_not_used(ndlp); 16680 lpfc_sli_release_iocbq(phba, cmd_iocbq); 16681 } 16682 16683 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 16684 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 16685 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16686 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 16687 rsp_iocbq->iocb.ulpStatus, 16688 rsp_iocbq->iocb.un.ulpWord[4]); 16689 } 16690 16691 /** 16692 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 16693 * @phba: Pointer to HBA context object. 16694 * @xri: xri id in transaction. 16695 * 16696 * This function validates the xri maps to the known range of XRIs allocated an 16697 * used by the driver. 16698 **/ 16699 uint16_t 16700 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 16701 uint16_t xri) 16702 { 16703 uint16_t i; 16704 16705 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 16706 if (xri == phba->sli4_hba.xri_ids[i]) 16707 return i; 16708 } 16709 return NO_XRI; 16710 } 16711 16712 /** 16713 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 16714 * @phba: Pointer to HBA context object. 16715 * @fc_hdr: pointer to a FC frame header. 16716 * 16717 * This function sends a basic response to a previous unsol sequence abort 16718 * event after aborting the sequence handling. 16719 **/ 16720 void 16721 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 16722 struct fc_frame_header *fc_hdr, bool aborted) 16723 { 16724 struct lpfc_hba *phba = vport->phba; 16725 struct lpfc_iocbq *ctiocb = NULL; 16726 struct lpfc_nodelist *ndlp; 16727 uint16_t oxid, rxid, xri, lxri; 16728 uint32_t sid, fctl; 16729 IOCB_t *icmd; 16730 int rc; 16731 16732 if (!lpfc_is_link_up(phba)) 16733 return; 16734 16735 sid = sli4_sid_from_fc_hdr(fc_hdr); 16736 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 16737 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 16738 16739 ndlp = lpfc_findnode_did(vport, sid); 16740 if (!ndlp) { 16741 ndlp = lpfc_nlp_init(vport, sid); 16742 if (!ndlp) { 16743 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 16744 "1268 Failed to allocate ndlp for " 16745 "oxid:x%x SID:x%x\n", oxid, sid); 16746 return; 16747 } 16748 /* Put ndlp onto pport node list */ 16749 lpfc_enqueue_node(vport, ndlp); 16750 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 16751 /* re-setup ndlp without removing from node list */ 16752 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 16753 if (!ndlp) { 16754 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 16755 "3275 Failed to active ndlp found " 16756 "for oxid:x%x SID:x%x\n", oxid, sid); 16757 return; 16758 } 16759 } 16760 16761 /* Allocate buffer for rsp iocb */ 16762 ctiocb = lpfc_sli_get_iocbq(phba); 16763 if (!ctiocb) 16764 return; 16765 16766 /* Extract the F_CTL field from FC_HDR */ 16767 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 16768 16769 icmd = &ctiocb->iocb; 16770 icmd->un.xseq64.bdl.bdeSize = 0; 16771 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 16772 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 16773 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 16774 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 16775 16776 /* Fill in the rest of iocb fields */ 16777 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 16778 icmd->ulpBdeCount = 0; 16779 icmd->ulpLe = 1; 16780 icmd->ulpClass = CLASS3; 16781 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 16782 ctiocb->context1 = lpfc_nlp_get(ndlp); 16783 16784 ctiocb->iocb_cmpl = NULL; 16785 ctiocb->vport = phba->pport; 16786 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 16787 ctiocb->sli4_lxritag = NO_XRI; 16788 ctiocb->sli4_xritag = NO_XRI; 16789 16790 if (fctl & FC_FC_EX_CTX) 16791 /* Exchange responder sent the abort so we 16792 * own the oxid. 16793 */ 16794 xri = oxid; 16795 else 16796 xri = rxid; 16797 lxri = lpfc_sli4_xri_inrange(phba, xri); 16798 if (lxri != NO_XRI) 16799 lpfc_set_rrq_active(phba, ndlp, lxri, 16800 (xri == oxid) ? rxid : oxid, 0); 16801 /* For BA_ABTS from exchange responder, if the logical xri with 16802 * the oxid maps to the FCP XRI range, the port no longer has 16803 * that exchange context, send a BLS_RJT. Override the IOCB for 16804 * a BA_RJT. 16805 */ 16806 if ((fctl & FC_FC_EX_CTX) && 16807 (lxri > lpfc_sli4_get_iocb_cnt(phba))) { 16808 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 16809 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 16810 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 16811 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 16812 } 16813 16814 /* If BA_ABTS failed to abort a partially assembled receive sequence, 16815 * the driver no longer has that exchange, send a BLS_RJT. Override 16816 * the IOCB for a BA_RJT. 16817 */ 16818 if (aborted == false) { 16819 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 16820 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 16821 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 16822 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 16823 } 16824 16825 if (fctl & FC_FC_EX_CTX) { 16826 /* ABTS sent by responder to CT exchange, construction 16827 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 16828 * field and RX_ID from ABTS for RX_ID field. 16829 */ 16830 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 16831 } else { 16832 /* ABTS sent by initiator to CT exchange, construction 16833 * of BA_ACC will need to allocate a new XRI as for the 16834 * XRI_TAG field. 16835 */ 16836 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 16837 } 16838 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 16839 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 16840 16841 /* Xmit CT abts response on exchange <xid> */ 16842 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 16843 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 16844 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 16845 16846 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 16847 if (rc == IOCB_ERROR) { 16848 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 16849 "2925 Failed to issue CT ABTS RSP x%x on " 16850 "xri x%x, Data x%x\n", 16851 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 16852 phba->link_state); 16853 lpfc_nlp_put(ndlp); 16854 ctiocb->context1 = NULL; 16855 lpfc_sli_release_iocbq(phba, ctiocb); 16856 } 16857 } 16858 16859 /** 16860 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 16861 * @vport: Pointer to the vport on which this sequence was received 16862 * @dmabuf: pointer to a dmabuf that describes the FC sequence 16863 * 16864 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 16865 * receive sequence is only partially assembed by the driver, it shall abort 16866 * the partially assembled frames for the sequence. Otherwise, if the 16867 * unsolicited receive sequence has been completely assembled and passed to 16868 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 16869 * unsolicited sequence has been aborted. After that, it will issue a basic 16870 * accept to accept the abort. 16871 **/ 16872 static void 16873 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 16874 struct hbq_dmabuf *dmabuf) 16875 { 16876 struct lpfc_hba *phba = vport->phba; 16877 struct fc_frame_header fc_hdr; 16878 uint32_t fctl; 16879 bool aborted; 16880 16881 /* Make a copy of fc_hdr before the dmabuf being released */ 16882 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 16883 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 16884 16885 if (fctl & FC_FC_EX_CTX) { 16886 /* ABTS by responder to exchange, no cleanup needed */ 16887 aborted = true; 16888 } else { 16889 /* ABTS by initiator to exchange, need to do cleanup */ 16890 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 16891 if (aborted == false) 16892 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 16893 } 16894 lpfc_in_buf_free(phba, &dmabuf->dbuf); 16895 16896 if (phba->nvmet_support) { 16897 lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr); 16898 return; 16899 } 16900 16901 /* Respond with BA_ACC or BA_RJT accordingly */ 16902 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 16903 } 16904 16905 /** 16906 * lpfc_seq_complete - Indicates if a sequence is complete 16907 * @dmabuf: pointer to a dmabuf that describes the FC sequence 16908 * 16909 * This function checks the sequence, starting with the frame described by 16910 * @dmabuf, to see if all the frames associated with this sequence are present. 16911 * the frames associated with this sequence are linked to the @dmabuf using the 16912 * dbuf list. This function looks for two major things. 1) That the first frame 16913 * has a sequence count of zero. 2) There is a frame with last frame of sequence 16914 * set. 3) That there are no holes in the sequence count. The function will 16915 * return 1 when the sequence is complete, otherwise it will return 0. 16916 **/ 16917 static int 16918 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 16919 { 16920 struct fc_frame_header *hdr; 16921 struct lpfc_dmabuf *d_buf; 16922 struct hbq_dmabuf *seq_dmabuf; 16923 uint32_t fctl; 16924 int seq_count = 0; 16925 16926 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 16927 /* make sure first fame of sequence has a sequence count of zero */ 16928 if (hdr->fh_seq_cnt != seq_count) 16929 return 0; 16930 fctl = (hdr->fh_f_ctl[0] << 16 | 16931 hdr->fh_f_ctl[1] << 8 | 16932 hdr->fh_f_ctl[2]); 16933 /* If last frame of sequence we can return success. */ 16934 if (fctl & FC_FC_END_SEQ) 16935 return 1; 16936 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 16937 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 16938 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 16939 /* If there is a hole in the sequence count then fail. */ 16940 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 16941 return 0; 16942 fctl = (hdr->fh_f_ctl[0] << 16 | 16943 hdr->fh_f_ctl[1] << 8 | 16944 hdr->fh_f_ctl[2]); 16945 /* If last frame of sequence we can return success. */ 16946 if (fctl & FC_FC_END_SEQ) 16947 return 1; 16948 } 16949 return 0; 16950 } 16951 16952 /** 16953 * lpfc_prep_seq - Prep sequence for ULP processing 16954 * @vport: Pointer to the vport on which this sequence was received 16955 * @dmabuf: pointer to a dmabuf that describes the FC sequence 16956 * 16957 * This function takes a sequence, described by a list of frames, and creates 16958 * a list of iocbq structures to describe the sequence. This iocbq list will be 16959 * used to issue to the generic unsolicited sequence handler. This routine 16960 * returns a pointer to the first iocbq in the list. If the function is unable 16961 * to allocate an iocbq then it throw out the received frames that were not 16962 * able to be described and return a pointer to the first iocbq. If unable to 16963 * allocate any iocbqs (including the first) this function will return NULL. 16964 **/ 16965 static struct lpfc_iocbq * 16966 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 16967 { 16968 struct hbq_dmabuf *hbq_buf; 16969 struct lpfc_dmabuf *d_buf, *n_buf; 16970 struct lpfc_iocbq *first_iocbq, *iocbq; 16971 struct fc_frame_header *fc_hdr; 16972 uint32_t sid; 16973 uint32_t len, tot_len; 16974 struct ulp_bde64 *pbde; 16975 16976 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 16977 /* remove from receive buffer list */ 16978 list_del_init(&seq_dmabuf->hbuf.list); 16979 lpfc_update_rcv_time_stamp(vport); 16980 /* get the Remote Port's SID */ 16981 sid = sli4_sid_from_fc_hdr(fc_hdr); 16982 tot_len = 0; 16983 /* Get an iocbq struct to fill in. */ 16984 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 16985 if (first_iocbq) { 16986 /* Initialize the first IOCB. */ 16987 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 16988 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 16989 first_iocbq->vport = vport; 16990 16991 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 16992 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 16993 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 16994 first_iocbq->iocb.un.rcvels.parmRo = 16995 sli4_did_from_fc_hdr(fc_hdr); 16996 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 16997 } else 16998 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 16999 first_iocbq->iocb.ulpContext = NO_XRI; 17000 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 17001 be16_to_cpu(fc_hdr->fh_ox_id); 17002 /* iocbq is prepped for internal consumption. Physical vpi. */ 17003 first_iocbq->iocb.unsli3.rcvsli3.vpi = 17004 vport->phba->vpi_ids[vport->vpi]; 17005 /* put the first buffer into the first IOCBq */ 17006 tot_len = bf_get(lpfc_rcqe_length, 17007 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 17008 17009 first_iocbq->context2 = &seq_dmabuf->dbuf; 17010 first_iocbq->context3 = NULL; 17011 first_iocbq->iocb.ulpBdeCount = 1; 17012 if (tot_len > LPFC_DATA_BUF_SIZE) 17013 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17014 LPFC_DATA_BUF_SIZE; 17015 else 17016 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; 17017 17018 first_iocbq->iocb.un.rcvels.remoteID = sid; 17019 17020 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17021 } 17022 iocbq = first_iocbq; 17023 /* 17024 * Each IOCBq can have two Buffers assigned, so go through the list 17025 * of buffers for this sequence and save two buffers in each IOCBq 17026 */ 17027 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 17028 if (!iocbq) { 17029 lpfc_in_buf_free(vport->phba, d_buf); 17030 continue; 17031 } 17032 if (!iocbq->context3) { 17033 iocbq->context3 = d_buf; 17034 iocbq->iocb.ulpBdeCount++; 17035 /* We need to get the size out of the right CQE */ 17036 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17037 len = bf_get(lpfc_rcqe_length, 17038 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17039 pbde = (struct ulp_bde64 *) 17040 &iocbq->iocb.unsli3.sli3Words[4]; 17041 if (len > LPFC_DATA_BUF_SIZE) 17042 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 17043 else 17044 pbde->tus.f.bdeSize = len; 17045 17046 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 17047 tot_len += len; 17048 } else { 17049 iocbq = lpfc_sli_get_iocbq(vport->phba); 17050 if (!iocbq) { 17051 if (first_iocbq) { 17052 first_iocbq->iocb.ulpStatus = 17053 IOSTAT_FCP_RSP_ERROR; 17054 first_iocbq->iocb.un.ulpWord[4] = 17055 IOERR_NO_RESOURCES; 17056 } 17057 lpfc_in_buf_free(vport->phba, d_buf); 17058 continue; 17059 } 17060 /* We need to get the size out of the right CQE */ 17061 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 17062 len = bf_get(lpfc_rcqe_length, 17063 &hbq_buf->cq_event.cqe.rcqe_cmpl); 17064 iocbq->context2 = d_buf; 17065 iocbq->context3 = NULL; 17066 iocbq->iocb.ulpBdeCount = 1; 17067 if (len > LPFC_DATA_BUF_SIZE) 17068 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 17069 LPFC_DATA_BUF_SIZE; 17070 else 17071 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; 17072 17073 tot_len += len; 17074 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 17075 17076 iocbq->iocb.un.rcvels.remoteID = sid; 17077 list_add_tail(&iocbq->list, &first_iocbq->list); 17078 } 17079 } 17080 return first_iocbq; 17081 } 17082 17083 static void 17084 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 17085 struct hbq_dmabuf *seq_dmabuf) 17086 { 17087 struct fc_frame_header *fc_hdr; 17088 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 17089 struct lpfc_hba *phba = vport->phba; 17090 17091 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 17092 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 17093 if (!iocbq) { 17094 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17095 "2707 Ring %d handler: Failed to allocate " 17096 "iocb Rctl x%x Type x%x received\n", 17097 LPFC_ELS_RING, 17098 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17099 return; 17100 } 17101 if (!lpfc_complete_unsol_iocb(phba, 17102 phba->sli4_hba.els_wq->pring, 17103 iocbq, fc_hdr->fh_r_ctl, 17104 fc_hdr->fh_type)) 17105 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17106 "2540 Ring %d handler: unexpected Rctl " 17107 "x%x Type x%x received\n", 17108 LPFC_ELS_RING, 17109 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 17110 17111 /* Free iocb created in lpfc_prep_seq */ 17112 list_for_each_entry_safe(curr_iocb, next_iocb, 17113 &iocbq->list, list) { 17114 list_del_init(&curr_iocb->list); 17115 lpfc_sli_release_iocbq(phba, curr_iocb); 17116 } 17117 lpfc_sli_release_iocbq(phba, iocbq); 17118 } 17119 17120 static void 17121 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 17122 struct lpfc_iocbq *rspiocb) 17123 { 17124 struct lpfc_dmabuf *pcmd = cmdiocb->context2; 17125 17126 if (pcmd && pcmd->virt) 17127 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17128 kfree(pcmd); 17129 lpfc_sli_release_iocbq(phba, cmdiocb); 17130 } 17131 17132 static void 17133 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport, 17134 struct hbq_dmabuf *dmabuf) 17135 { 17136 struct fc_frame_header *fc_hdr; 17137 struct lpfc_hba *phba = vport->phba; 17138 struct lpfc_iocbq *iocbq = NULL; 17139 union lpfc_wqe *wqe; 17140 struct lpfc_dmabuf *pcmd = NULL; 17141 uint32_t frame_len; 17142 int rc; 17143 17144 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17145 frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl); 17146 17147 /* Send the received frame back */ 17148 iocbq = lpfc_sli_get_iocbq(phba); 17149 if (!iocbq) 17150 goto exit; 17151 17152 /* Allocate buffer for command payload */ 17153 pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 17154 if (pcmd) 17155 pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, 17156 &pcmd->phys); 17157 if (!pcmd || !pcmd->virt) 17158 goto exit; 17159 17160 INIT_LIST_HEAD(&pcmd->list); 17161 17162 /* copyin the payload */ 17163 memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len); 17164 17165 /* fill in BDE's for command */ 17166 iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys); 17167 iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys); 17168 iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64; 17169 iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len; 17170 17171 iocbq->context2 = pcmd; 17172 iocbq->vport = vport; 17173 iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK; 17174 iocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 17175 17176 /* 17177 * Setup rest of the iocb as though it were a WQE 17178 * Build the SEND_FRAME WQE 17179 */ 17180 wqe = (union lpfc_wqe *)&iocbq->iocb; 17181 17182 wqe->send_frame.frame_len = frame_len; 17183 wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr)); 17184 wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1)); 17185 wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2)); 17186 wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3)); 17187 wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4)); 17188 wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5)); 17189 17190 iocbq->iocb.ulpCommand = CMD_SEND_FRAME; 17191 iocbq->iocb.ulpLe = 1; 17192 iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl; 17193 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0); 17194 if (rc == IOCB_ERROR) 17195 goto exit; 17196 17197 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17198 return; 17199 17200 exit: 17201 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 17202 "2023 Unable to process MDS loopback frame\n"); 17203 if (pcmd && pcmd->virt) 17204 dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys); 17205 kfree(pcmd); 17206 if (iocbq) 17207 lpfc_sli_release_iocbq(phba, iocbq); 17208 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17209 } 17210 17211 /** 17212 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 17213 * @phba: Pointer to HBA context object. 17214 * 17215 * This function is called with no lock held. This function processes all 17216 * the received buffers and gives it to upper layers when a received buffer 17217 * indicates that it is the final frame in the sequence. The interrupt 17218 * service routine processes received buffers at interrupt contexts. 17219 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 17220 * appropriate receive function when the final frame in a sequence is received. 17221 **/ 17222 void 17223 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 17224 struct hbq_dmabuf *dmabuf) 17225 { 17226 struct hbq_dmabuf *seq_dmabuf; 17227 struct fc_frame_header *fc_hdr; 17228 struct lpfc_vport *vport; 17229 uint32_t fcfi; 17230 uint32_t did; 17231 17232 /* Process each received buffer */ 17233 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 17234 17235 /* check to see if this a valid type of frame */ 17236 if (lpfc_fc_frame_check(phba, fc_hdr)) { 17237 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17238 return; 17239 } 17240 17241 if ((bf_get(lpfc_cqe_code, 17242 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 17243 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 17244 &dmabuf->cq_event.cqe.rcqe_cmpl); 17245 else 17246 fcfi = bf_get(lpfc_rcqe_fcf_id, 17247 &dmabuf->cq_event.cqe.rcqe_cmpl); 17248 17249 if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { 17250 vport = phba->pport; 17251 /* Handle MDS Loopback frames */ 17252 lpfc_sli4_handle_mds_loopback(vport, dmabuf); 17253 return; 17254 } 17255 17256 /* d_id this frame is directed to */ 17257 did = sli4_did_from_fc_hdr(fc_hdr); 17258 17259 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did); 17260 if (!vport) { 17261 /* throw out the frame */ 17262 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17263 return; 17264 } 17265 17266 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 17267 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 17268 (did != Fabric_DID)) { 17269 /* 17270 * Throw out the frame if we are not pt2pt. 17271 * The pt2pt protocol allows for discovery frames 17272 * to be received without a registered VPI. 17273 */ 17274 if (!(vport->fc_flag & FC_PT2PT) || 17275 (phba->link_state == LPFC_HBA_READY)) { 17276 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17277 return; 17278 } 17279 } 17280 17281 /* Handle the basic abort sequence (BA_ABTS) event */ 17282 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 17283 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 17284 return; 17285 } 17286 17287 /* Link this frame */ 17288 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 17289 if (!seq_dmabuf) { 17290 /* unable to add frame to vport - throw it out */ 17291 lpfc_in_buf_free(phba, &dmabuf->dbuf); 17292 return; 17293 } 17294 /* If not last frame in sequence continue processing frames. */ 17295 if (!lpfc_seq_complete(seq_dmabuf)) 17296 return; 17297 17298 /* Send the complete sequence to the upper layer protocol */ 17299 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 17300 } 17301 17302 /** 17303 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 17304 * @phba: pointer to lpfc hba data structure. 17305 * 17306 * This routine is invoked to post rpi header templates to the 17307 * HBA consistent with the SLI-4 interface spec. This routine 17308 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 17309 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 17310 * 17311 * This routine does not require any locks. It's usage is expected 17312 * to be driver load or reset recovery when the driver is 17313 * sequential. 17314 * 17315 * Return codes 17316 * 0 - successful 17317 * -EIO - The mailbox failed to complete successfully. 17318 * When this error occurs, the driver is not guaranteed 17319 * to have any rpi regions posted to the device and 17320 * must either attempt to repost the regions or take a 17321 * fatal error. 17322 **/ 17323 int 17324 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 17325 { 17326 struct lpfc_rpi_hdr *rpi_page; 17327 uint32_t rc = 0; 17328 uint16_t lrpi = 0; 17329 17330 /* SLI4 ports that support extents do not require RPI headers. */ 17331 if (!phba->sli4_hba.rpi_hdrs_in_use) 17332 goto exit; 17333 if (phba->sli4_hba.extents_in_use) 17334 return -EIO; 17335 17336 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 17337 /* 17338 * Assign the rpi headers a physical rpi only if the driver 17339 * has not initialized those resources. A port reset only 17340 * needs the headers posted. 17341 */ 17342 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 17343 LPFC_RPI_RSRC_RDY) 17344 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 17345 17346 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 17347 if (rc != MBX_SUCCESS) { 17348 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17349 "2008 Error %d posting all rpi " 17350 "headers\n", rc); 17351 rc = -EIO; 17352 break; 17353 } 17354 } 17355 17356 exit: 17357 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 17358 LPFC_RPI_RSRC_RDY); 17359 return rc; 17360 } 17361 17362 /** 17363 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 17364 * @phba: pointer to lpfc hba data structure. 17365 * @rpi_page: pointer to the rpi memory region. 17366 * 17367 * This routine is invoked to post a single rpi header to the 17368 * HBA consistent with the SLI-4 interface spec. This memory region 17369 * maps up to 64 rpi context regions. 17370 * 17371 * Return codes 17372 * 0 - successful 17373 * -ENOMEM - No available memory 17374 * -EIO - The mailbox failed to complete successfully. 17375 **/ 17376 int 17377 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 17378 { 17379 LPFC_MBOXQ_t *mboxq; 17380 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 17381 uint32_t rc = 0; 17382 uint32_t shdr_status, shdr_add_status; 17383 union lpfc_sli4_cfg_shdr *shdr; 17384 17385 /* SLI4 ports that support extents do not require RPI headers. */ 17386 if (!phba->sli4_hba.rpi_hdrs_in_use) 17387 return rc; 17388 if (phba->sli4_hba.extents_in_use) 17389 return -EIO; 17390 17391 /* The port is notified of the header region via a mailbox command. */ 17392 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17393 if (!mboxq) { 17394 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17395 "2001 Unable to allocate memory for issuing " 17396 "SLI_CONFIG_SPECIAL mailbox command\n"); 17397 return -ENOMEM; 17398 } 17399 17400 /* Post all rpi memory regions to the port. */ 17401 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 17402 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 17403 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 17404 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 17405 sizeof(struct lpfc_sli4_cfg_mhdr), 17406 LPFC_SLI4_MBX_EMBED); 17407 17408 17409 /* Post the physical rpi to the port for this rpi header. */ 17410 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 17411 rpi_page->start_rpi); 17412 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 17413 hdr_tmpl, rpi_page->page_count); 17414 17415 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 17416 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 17417 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 17418 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 17419 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17420 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17421 if (rc != MBX_TIMEOUT) 17422 mempool_free(mboxq, phba->mbox_mem_pool); 17423 if (shdr_status || shdr_add_status || rc) { 17424 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17425 "2514 POST_RPI_HDR mailbox failed with " 17426 "status x%x add_status x%x, mbx status x%x\n", 17427 shdr_status, shdr_add_status, rc); 17428 rc = -ENXIO; 17429 } else { 17430 /* 17431 * The next_rpi stores the next logical module-64 rpi value used 17432 * to post physical rpis in subsequent rpi postings. 17433 */ 17434 spin_lock_irq(&phba->hbalock); 17435 phba->sli4_hba.next_rpi = rpi_page->next_rpi; 17436 spin_unlock_irq(&phba->hbalock); 17437 } 17438 return rc; 17439 } 17440 17441 /** 17442 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 17443 * @phba: pointer to lpfc hba data structure. 17444 * 17445 * This routine is invoked to post rpi header templates to the 17446 * HBA consistent with the SLI-4 interface spec. This routine 17447 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 17448 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 17449 * 17450 * Returns 17451 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 17452 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 17453 **/ 17454 int 17455 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 17456 { 17457 unsigned long rpi; 17458 uint16_t max_rpi, rpi_limit; 17459 uint16_t rpi_remaining, lrpi = 0; 17460 struct lpfc_rpi_hdr *rpi_hdr; 17461 unsigned long iflag; 17462 17463 /* 17464 * Fetch the next logical rpi. Because this index is logical, 17465 * the driver starts at 0 each time. 17466 */ 17467 spin_lock_irqsave(&phba->hbalock, iflag); 17468 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 17469 rpi_limit = phba->sli4_hba.next_rpi; 17470 17471 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 17472 if (rpi >= rpi_limit) 17473 rpi = LPFC_RPI_ALLOC_ERROR; 17474 else { 17475 set_bit(rpi, phba->sli4_hba.rpi_bmask); 17476 phba->sli4_hba.max_cfg_param.rpi_used++; 17477 phba->sli4_hba.rpi_count++; 17478 } 17479 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 17480 "0001 rpi:%x max:%x lim:%x\n", 17481 (int) rpi, max_rpi, rpi_limit); 17482 17483 /* 17484 * Don't try to allocate more rpi header regions if the device limit 17485 * has been exhausted. 17486 */ 17487 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 17488 (phba->sli4_hba.rpi_count >= max_rpi)) { 17489 spin_unlock_irqrestore(&phba->hbalock, iflag); 17490 return rpi; 17491 } 17492 17493 /* 17494 * RPI header postings are not required for SLI4 ports capable of 17495 * extents. 17496 */ 17497 if (!phba->sli4_hba.rpi_hdrs_in_use) { 17498 spin_unlock_irqrestore(&phba->hbalock, iflag); 17499 return rpi; 17500 } 17501 17502 /* 17503 * If the driver is running low on rpi resources, allocate another 17504 * page now. Note that the next_rpi value is used because 17505 * it represents how many are actually in use whereas max_rpi notes 17506 * how many are supported max by the device. 17507 */ 17508 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 17509 spin_unlock_irqrestore(&phba->hbalock, iflag); 17510 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 17511 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 17512 if (!rpi_hdr) { 17513 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17514 "2002 Error Could not grow rpi " 17515 "count\n"); 17516 } else { 17517 lrpi = rpi_hdr->start_rpi; 17518 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 17519 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 17520 } 17521 } 17522 17523 return rpi; 17524 } 17525 17526 /** 17527 * lpfc_sli4_free_rpi - Release an rpi for reuse. 17528 * @phba: pointer to lpfc hba data structure. 17529 * 17530 * This routine is invoked to release an rpi to the pool of 17531 * available rpis maintained by the driver. 17532 **/ 17533 static void 17534 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 17535 { 17536 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 17537 phba->sli4_hba.rpi_count--; 17538 phba->sli4_hba.max_cfg_param.rpi_used--; 17539 } 17540 } 17541 17542 /** 17543 * lpfc_sli4_free_rpi - Release an rpi for reuse. 17544 * @phba: pointer to lpfc hba data structure. 17545 * 17546 * This routine is invoked to release an rpi to the pool of 17547 * available rpis maintained by the driver. 17548 **/ 17549 void 17550 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 17551 { 17552 spin_lock_irq(&phba->hbalock); 17553 __lpfc_sli4_free_rpi(phba, rpi); 17554 spin_unlock_irq(&phba->hbalock); 17555 } 17556 17557 /** 17558 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 17559 * @phba: pointer to lpfc hba data structure. 17560 * 17561 * This routine is invoked to remove the memory region that 17562 * provided rpi via a bitmask. 17563 **/ 17564 void 17565 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 17566 { 17567 kfree(phba->sli4_hba.rpi_bmask); 17568 kfree(phba->sli4_hba.rpi_ids); 17569 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 17570 } 17571 17572 /** 17573 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 17574 * @phba: pointer to lpfc hba data structure. 17575 * 17576 * This routine is invoked to remove the memory region that 17577 * provided rpi via a bitmask. 17578 **/ 17579 int 17580 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 17581 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 17582 { 17583 LPFC_MBOXQ_t *mboxq; 17584 struct lpfc_hba *phba = ndlp->phba; 17585 int rc; 17586 17587 /* The port is notified of the header region via a mailbox command. */ 17588 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17589 if (!mboxq) 17590 return -ENOMEM; 17591 17592 /* Post all rpi memory regions to the port. */ 17593 lpfc_resume_rpi(mboxq, ndlp); 17594 if (cmpl) { 17595 mboxq->mbox_cmpl = cmpl; 17596 mboxq->context1 = arg; 17597 mboxq->context2 = ndlp; 17598 } else 17599 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 17600 mboxq->vport = ndlp->vport; 17601 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 17602 if (rc == MBX_NOT_FINISHED) { 17603 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17604 "2010 Resume RPI Mailbox failed " 17605 "status %d, mbxStatus x%x\n", rc, 17606 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 17607 mempool_free(mboxq, phba->mbox_mem_pool); 17608 return -EIO; 17609 } 17610 return 0; 17611 } 17612 17613 /** 17614 * lpfc_sli4_init_vpi - Initialize a vpi with the port 17615 * @vport: Pointer to the vport for which the vpi is being initialized 17616 * 17617 * This routine is invoked to activate a vpi with the port. 17618 * 17619 * Returns: 17620 * 0 success 17621 * -Evalue otherwise 17622 **/ 17623 int 17624 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 17625 { 17626 LPFC_MBOXQ_t *mboxq; 17627 int rc = 0; 17628 int retval = MBX_SUCCESS; 17629 uint32_t mbox_tmo; 17630 struct lpfc_hba *phba = vport->phba; 17631 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17632 if (!mboxq) 17633 return -ENOMEM; 17634 lpfc_init_vpi(phba, mboxq, vport->vpi); 17635 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 17636 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 17637 if (rc != MBX_SUCCESS) { 17638 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 17639 "2022 INIT VPI Mailbox failed " 17640 "status %d, mbxStatus x%x\n", rc, 17641 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 17642 retval = -EIO; 17643 } 17644 if (rc != MBX_TIMEOUT) 17645 mempool_free(mboxq, vport->phba->mbox_mem_pool); 17646 17647 return retval; 17648 } 17649 17650 /** 17651 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 17652 * @phba: pointer to lpfc hba data structure. 17653 * @mboxq: Pointer to mailbox object. 17654 * 17655 * This routine is invoked to manually add a single FCF record. The caller 17656 * must pass a completely initialized FCF_Record. This routine takes 17657 * care of the nonembedded mailbox operations. 17658 **/ 17659 static void 17660 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 17661 { 17662 void *virt_addr; 17663 union lpfc_sli4_cfg_shdr *shdr; 17664 uint32_t shdr_status, shdr_add_status; 17665 17666 virt_addr = mboxq->sge_array->addr[0]; 17667 /* The IOCTL status is embedded in the mailbox subheader. */ 17668 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 17669 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 17670 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 17671 17672 if ((shdr_status || shdr_add_status) && 17673 (shdr_status != STATUS_FCF_IN_USE)) 17674 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17675 "2558 ADD_FCF_RECORD mailbox failed with " 17676 "status x%x add_status x%x\n", 17677 shdr_status, shdr_add_status); 17678 17679 lpfc_sli4_mbox_cmd_free(phba, mboxq); 17680 } 17681 17682 /** 17683 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 17684 * @phba: pointer to lpfc hba data structure. 17685 * @fcf_record: pointer to the initialized fcf record to add. 17686 * 17687 * This routine is invoked to manually add a single FCF record. The caller 17688 * must pass a completely initialized FCF_Record. This routine takes 17689 * care of the nonembedded mailbox operations. 17690 **/ 17691 int 17692 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 17693 { 17694 int rc = 0; 17695 LPFC_MBOXQ_t *mboxq; 17696 uint8_t *bytep; 17697 void *virt_addr; 17698 struct lpfc_mbx_sge sge; 17699 uint32_t alloc_len, req_len; 17700 uint32_t fcfindex; 17701 17702 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17703 if (!mboxq) { 17704 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17705 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 17706 return -ENOMEM; 17707 } 17708 17709 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 17710 sizeof(uint32_t); 17711 17712 /* Allocate DMA memory and set up the non-embedded mailbox command */ 17713 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 17714 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 17715 req_len, LPFC_SLI4_MBX_NEMBED); 17716 if (alloc_len < req_len) { 17717 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17718 "2523 Allocated DMA memory size (x%x) is " 17719 "less than the requested DMA memory " 17720 "size (x%x)\n", alloc_len, req_len); 17721 lpfc_sli4_mbox_cmd_free(phba, mboxq); 17722 return -ENOMEM; 17723 } 17724 17725 /* 17726 * Get the first SGE entry from the non-embedded DMA memory. This 17727 * routine only uses a single SGE. 17728 */ 17729 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 17730 virt_addr = mboxq->sge_array->addr[0]; 17731 /* 17732 * Configure the FCF record for FCFI 0. This is the driver's 17733 * hardcoded default and gets used in nonFIP mode. 17734 */ 17735 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 17736 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 17737 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 17738 17739 /* 17740 * Copy the fcf_index and the FCF Record Data. The data starts after 17741 * the FCoE header plus word10. The data copy needs to be endian 17742 * correct. 17743 */ 17744 bytep += sizeof(uint32_t); 17745 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 17746 mboxq->vport = phba->pport; 17747 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 17748 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 17749 if (rc == MBX_NOT_FINISHED) { 17750 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17751 "2515 ADD_FCF_RECORD mailbox failed with " 17752 "status 0x%x\n", rc); 17753 lpfc_sli4_mbox_cmd_free(phba, mboxq); 17754 rc = -EIO; 17755 } else 17756 rc = 0; 17757 17758 return rc; 17759 } 17760 17761 /** 17762 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 17763 * @phba: pointer to lpfc hba data structure. 17764 * @fcf_record: pointer to the fcf record to write the default data. 17765 * @fcf_index: FCF table entry index. 17766 * 17767 * This routine is invoked to build the driver's default FCF record. The 17768 * values used are hardcoded. This routine handles memory initialization. 17769 * 17770 **/ 17771 void 17772 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 17773 struct fcf_record *fcf_record, 17774 uint16_t fcf_index) 17775 { 17776 memset(fcf_record, 0, sizeof(struct fcf_record)); 17777 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 17778 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 17779 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 17780 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 17781 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 17782 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 17783 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 17784 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 17785 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 17786 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 17787 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 17788 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 17789 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 17790 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 17791 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 17792 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 17793 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 17794 /* Set the VLAN bit map */ 17795 if (phba->valid_vlan) { 17796 fcf_record->vlan_bitmap[phba->vlan_id / 8] 17797 = 1 << (phba->vlan_id % 8); 17798 } 17799 } 17800 17801 /** 17802 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 17803 * @phba: pointer to lpfc hba data structure. 17804 * @fcf_index: FCF table entry offset. 17805 * 17806 * This routine is invoked to scan the entire FCF table by reading FCF 17807 * record and processing it one at a time starting from the @fcf_index 17808 * for initial FCF discovery or fast FCF failover rediscovery. 17809 * 17810 * Return 0 if the mailbox command is submitted successfully, none 0 17811 * otherwise. 17812 **/ 17813 int 17814 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 17815 { 17816 int rc = 0, error; 17817 LPFC_MBOXQ_t *mboxq; 17818 17819 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 17820 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 17821 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17822 if (!mboxq) { 17823 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 17824 "2000 Failed to allocate mbox for " 17825 "READ_FCF cmd\n"); 17826 error = -ENOMEM; 17827 goto fail_fcf_scan; 17828 } 17829 /* Construct the read FCF record mailbox command */ 17830 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 17831 if (rc) { 17832 error = -EINVAL; 17833 goto fail_fcf_scan; 17834 } 17835 /* Issue the mailbox command asynchronously */ 17836 mboxq->vport = phba->pport; 17837 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 17838 17839 spin_lock_irq(&phba->hbalock); 17840 phba->hba_flag |= FCF_TS_INPROG; 17841 spin_unlock_irq(&phba->hbalock); 17842 17843 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 17844 if (rc == MBX_NOT_FINISHED) 17845 error = -EIO; 17846 else { 17847 /* Reset eligible FCF count for new scan */ 17848 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 17849 phba->fcf.eligible_fcf_cnt = 0; 17850 error = 0; 17851 } 17852 fail_fcf_scan: 17853 if (error) { 17854 if (mboxq) 17855 lpfc_sli4_mbox_cmd_free(phba, mboxq); 17856 /* FCF scan failed, clear FCF_TS_INPROG flag */ 17857 spin_lock_irq(&phba->hbalock); 17858 phba->hba_flag &= ~FCF_TS_INPROG; 17859 spin_unlock_irq(&phba->hbalock); 17860 } 17861 return error; 17862 } 17863 17864 /** 17865 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 17866 * @phba: pointer to lpfc hba data structure. 17867 * @fcf_index: FCF table entry offset. 17868 * 17869 * This routine is invoked to read an FCF record indicated by @fcf_index 17870 * and to use it for FLOGI roundrobin FCF failover. 17871 * 17872 * Return 0 if the mailbox command is submitted successfully, none 0 17873 * otherwise. 17874 **/ 17875 int 17876 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 17877 { 17878 int rc = 0, error; 17879 LPFC_MBOXQ_t *mboxq; 17880 17881 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17882 if (!mboxq) { 17883 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 17884 "2763 Failed to allocate mbox for " 17885 "READ_FCF cmd\n"); 17886 error = -ENOMEM; 17887 goto fail_fcf_read; 17888 } 17889 /* Construct the read FCF record mailbox command */ 17890 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 17891 if (rc) { 17892 error = -EINVAL; 17893 goto fail_fcf_read; 17894 } 17895 /* Issue the mailbox command asynchronously */ 17896 mboxq->vport = phba->pport; 17897 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 17898 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 17899 if (rc == MBX_NOT_FINISHED) 17900 error = -EIO; 17901 else 17902 error = 0; 17903 17904 fail_fcf_read: 17905 if (error && mboxq) 17906 lpfc_sli4_mbox_cmd_free(phba, mboxq); 17907 return error; 17908 } 17909 17910 /** 17911 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 17912 * @phba: pointer to lpfc hba data structure. 17913 * @fcf_index: FCF table entry offset. 17914 * 17915 * This routine is invoked to read an FCF record indicated by @fcf_index to 17916 * determine whether it's eligible for FLOGI roundrobin failover list. 17917 * 17918 * Return 0 if the mailbox command is submitted successfully, none 0 17919 * otherwise. 17920 **/ 17921 int 17922 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 17923 { 17924 int rc = 0, error; 17925 LPFC_MBOXQ_t *mboxq; 17926 17927 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 17928 if (!mboxq) { 17929 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 17930 "2758 Failed to allocate mbox for " 17931 "READ_FCF cmd\n"); 17932 error = -ENOMEM; 17933 goto fail_fcf_read; 17934 } 17935 /* Construct the read FCF record mailbox command */ 17936 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 17937 if (rc) { 17938 error = -EINVAL; 17939 goto fail_fcf_read; 17940 } 17941 /* Issue the mailbox command asynchronously */ 17942 mboxq->vport = phba->pport; 17943 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 17944 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 17945 if (rc == MBX_NOT_FINISHED) 17946 error = -EIO; 17947 else 17948 error = 0; 17949 17950 fail_fcf_read: 17951 if (error && mboxq) 17952 lpfc_sli4_mbox_cmd_free(phba, mboxq); 17953 return error; 17954 } 17955 17956 /** 17957 * lpfc_check_next_fcf_pri_level 17958 * phba pointer to the lpfc_hba struct for this port. 17959 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 17960 * routine when the rr_bmask is empty. The FCF indecies are put into the 17961 * rr_bmask based on their priority level. Starting from the highest priority 17962 * to the lowest. The most likely FCF candidate will be in the highest 17963 * priority group. When this routine is called it searches the fcf_pri list for 17964 * next lowest priority group and repopulates the rr_bmask with only those 17965 * fcf_indexes. 17966 * returns: 17967 * 1=success 0=failure 17968 **/ 17969 static int 17970 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 17971 { 17972 uint16_t next_fcf_pri; 17973 uint16_t last_index; 17974 struct lpfc_fcf_pri *fcf_pri; 17975 int rc; 17976 int ret = 0; 17977 17978 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 17979 LPFC_SLI4_FCF_TBL_INDX_MAX); 17980 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 17981 "3060 Last IDX %d\n", last_index); 17982 17983 /* Verify the priority list has 2 or more entries */ 17984 spin_lock_irq(&phba->hbalock); 17985 if (list_empty(&phba->fcf.fcf_pri_list) || 17986 list_is_singular(&phba->fcf.fcf_pri_list)) { 17987 spin_unlock_irq(&phba->hbalock); 17988 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 17989 "3061 Last IDX %d\n", last_index); 17990 return 0; /* Empty rr list */ 17991 } 17992 spin_unlock_irq(&phba->hbalock); 17993 17994 next_fcf_pri = 0; 17995 /* 17996 * Clear the rr_bmask and set all of the bits that are at this 17997 * priority. 17998 */ 17999 memset(phba->fcf.fcf_rr_bmask, 0, 18000 sizeof(*phba->fcf.fcf_rr_bmask)); 18001 spin_lock_irq(&phba->hbalock); 18002 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18003 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 18004 continue; 18005 /* 18006 * the 1st priority that has not FLOGI failed 18007 * will be the highest. 18008 */ 18009 if (!next_fcf_pri) 18010 next_fcf_pri = fcf_pri->fcf_rec.priority; 18011 spin_unlock_irq(&phba->hbalock); 18012 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18013 rc = lpfc_sli4_fcf_rr_index_set(phba, 18014 fcf_pri->fcf_rec.fcf_index); 18015 if (rc) 18016 return 0; 18017 } 18018 spin_lock_irq(&phba->hbalock); 18019 } 18020 /* 18021 * if next_fcf_pri was not set above and the list is not empty then 18022 * we have failed flogis on all of them. So reset flogi failed 18023 * and start at the beginning. 18024 */ 18025 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 18026 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 18027 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 18028 /* 18029 * the 1st priority that has not FLOGI failed 18030 * will be the highest. 18031 */ 18032 if (!next_fcf_pri) 18033 next_fcf_pri = fcf_pri->fcf_rec.priority; 18034 spin_unlock_irq(&phba->hbalock); 18035 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 18036 rc = lpfc_sli4_fcf_rr_index_set(phba, 18037 fcf_pri->fcf_rec.fcf_index); 18038 if (rc) 18039 return 0; 18040 } 18041 spin_lock_irq(&phba->hbalock); 18042 } 18043 } else 18044 ret = 1; 18045 spin_unlock_irq(&phba->hbalock); 18046 18047 return ret; 18048 } 18049 /** 18050 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 18051 * @phba: pointer to lpfc hba data structure. 18052 * 18053 * This routine is to get the next eligible FCF record index in a round 18054 * robin fashion. If the next eligible FCF record index equals to the 18055 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 18056 * shall be returned, otherwise, the next eligible FCF record's index 18057 * shall be returned. 18058 **/ 18059 uint16_t 18060 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 18061 { 18062 uint16_t next_fcf_index; 18063 18064 initial_priority: 18065 /* Search start from next bit of currently registered FCF index */ 18066 next_fcf_index = phba->fcf.current_rec.fcf_indx; 18067 18068 next_priority: 18069 /* Determine the next fcf index to check */ 18070 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 18071 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18072 LPFC_SLI4_FCF_TBL_INDX_MAX, 18073 next_fcf_index); 18074 18075 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 18076 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18077 /* 18078 * If we have wrapped then we need to clear the bits that 18079 * have been tested so that we can detect when we should 18080 * change the priority level. 18081 */ 18082 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 18083 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 18084 } 18085 18086 18087 /* Check roundrobin failover list empty condition */ 18088 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 18089 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 18090 /* 18091 * If next fcf index is not found check if there are lower 18092 * Priority level fcf's in the fcf_priority list. 18093 * Set up the rr_bmask with all of the avaiable fcf bits 18094 * at that level and continue the selection process. 18095 */ 18096 if (lpfc_check_next_fcf_pri_level(phba)) 18097 goto initial_priority; 18098 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 18099 "2844 No roundrobin failover FCF available\n"); 18100 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 18101 return LPFC_FCOE_FCF_NEXT_NONE; 18102 else { 18103 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 18104 "3063 Only FCF available idx %d, flag %x\n", 18105 next_fcf_index, 18106 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); 18107 return next_fcf_index; 18108 } 18109 } 18110 18111 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 18112 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 18113 LPFC_FCF_FLOGI_FAILED) { 18114 if (list_is_singular(&phba->fcf.fcf_pri_list)) 18115 return LPFC_FCOE_FCF_NEXT_NONE; 18116 18117 goto next_priority; 18118 } 18119 18120 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18121 "2845 Get next roundrobin failover FCF (x%x)\n", 18122 next_fcf_index); 18123 18124 return next_fcf_index; 18125 } 18126 18127 /** 18128 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 18129 * @phba: pointer to lpfc hba data structure. 18130 * 18131 * This routine sets the FCF record index in to the eligible bmask for 18132 * roundrobin failover search. It checks to make sure that the index 18133 * does not go beyond the range of the driver allocated bmask dimension 18134 * before setting the bit. 18135 * 18136 * Returns 0 if the index bit successfully set, otherwise, it returns 18137 * -EINVAL. 18138 **/ 18139 int 18140 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 18141 { 18142 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18143 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18144 "2610 FCF (x%x) reached driver's book " 18145 "keeping dimension:x%x\n", 18146 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18147 return -EINVAL; 18148 } 18149 /* Set the eligible FCF record index bmask */ 18150 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18151 18152 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18153 "2790 Set FCF (x%x) to roundrobin FCF failover " 18154 "bmask\n", fcf_index); 18155 18156 return 0; 18157 } 18158 18159 /** 18160 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 18161 * @phba: pointer to lpfc hba data structure. 18162 * 18163 * This routine clears the FCF record index from the eligible bmask for 18164 * roundrobin failover search. It checks to make sure that the index 18165 * does not go beyond the range of the driver allocated bmask dimension 18166 * before clearing the bit. 18167 **/ 18168 void 18169 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 18170 { 18171 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 18172 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 18173 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18174 "2762 FCF (x%x) reached driver's book " 18175 "keeping dimension:x%x\n", 18176 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 18177 return; 18178 } 18179 /* Clear the eligible FCF record index bmask */ 18180 spin_lock_irq(&phba->hbalock); 18181 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 18182 list) { 18183 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 18184 list_del_init(&fcf_pri->list); 18185 break; 18186 } 18187 } 18188 spin_unlock_irq(&phba->hbalock); 18189 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 18190 18191 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18192 "2791 Clear FCF (x%x) from roundrobin failover " 18193 "bmask\n", fcf_index); 18194 } 18195 18196 /** 18197 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 18198 * @phba: pointer to lpfc hba data structure. 18199 * 18200 * This routine is the completion routine for the rediscover FCF table mailbox 18201 * command. If the mailbox command returned failure, it will try to stop the 18202 * FCF rediscover wait timer. 18203 **/ 18204 static void 18205 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 18206 { 18207 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18208 uint32_t shdr_status, shdr_add_status; 18209 18210 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18211 18212 shdr_status = bf_get(lpfc_mbox_hdr_status, 18213 &redisc_fcf->header.cfg_shdr.response); 18214 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 18215 &redisc_fcf->header.cfg_shdr.response); 18216 if (shdr_status || shdr_add_status) { 18217 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 18218 "2746 Requesting for FCF rediscovery failed " 18219 "status x%x add_status x%x\n", 18220 shdr_status, shdr_add_status); 18221 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 18222 spin_lock_irq(&phba->hbalock); 18223 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 18224 spin_unlock_irq(&phba->hbalock); 18225 /* 18226 * CVL event triggered FCF rediscover request failed, 18227 * last resort to re-try current registered FCF entry. 18228 */ 18229 lpfc_retry_pport_discovery(phba); 18230 } else { 18231 spin_lock_irq(&phba->hbalock); 18232 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 18233 spin_unlock_irq(&phba->hbalock); 18234 /* 18235 * DEAD FCF event triggered FCF rediscover request 18236 * failed, last resort to fail over as a link down 18237 * to FCF registration. 18238 */ 18239 lpfc_sli4_fcf_dead_failthrough(phba); 18240 } 18241 } else { 18242 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 18243 "2775 Start FCF rediscover quiescent timer\n"); 18244 /* 18245 * Start FCF rediscovery wait timer for pending FCF 18246 * before rescan FCF record table. 18247 */ 18248 lpfc_fcf_redisc_wait_start_timer(phba); 18249 } 18250 18251 mempool_free(mbox, phba->mbox_mem_pool); 18252 } 18253 18254 /** 18255 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 18256 * @phba: pointer to lpfc hba data structure. 18257 * 18258 * This routine is invoked to request for rediscovery of the entire FCF table 18259 * by the port. 18260 **/ 18261 int 18262 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 18263 { 18264 LPFC_MBOXQ_t *mbox; 18265 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 18266 int rc, length; 18267 18268 /* Cancel retry delay timers to all vports before FCF rediscover */ 18269 lpfc_cancel_all_vport_retry_delay_timer(phba); 18270 18271 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18272 if (!mbox) { 18273 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18274 "2745 Failed to allocate mbox for " 18275 "requesting FCF rediscover.\n"); 18276 return -ENOMEM; 18277 } 18278 18279 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 18280 sizeof(struct lpfc_sli4_cfg_mhdr)); 18281 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 18282 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 18283 length, LPFC_SLI4_MBX_EMBED); 18284 18285 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 18286 /* Set count to 0 for invalidating the entire FCF database */ 18287 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 18288 18289 /* Issue the mailbox command asynchronously */ 18290 mbox->vport = phba->pport; 18291 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 18292 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 18293 18294 if (rc == MBX_NOT_FINISHED) { 18295 mempool_free(mbox, phba->mbox_mem_pool); 18296 return -EIO; 18297 } 18298 return 0; 18299 } 18300 18301 /** 18302 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 18303 * @phba: pointer to lpfc hba data structure. 18304 * 18305 * This function is the failover routine as a last resort to the FCF DEAD 18306 * event when driver failed to perform fast FCF failover. 18307 **/ 18308 void 18309 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 18310 { 18311 uint32_t link_state; 18312 18313 /* 18314 * Last resort as FCF DEAD event failover will treat this as 18315 * a link down, but save the link state because we don't want 18316 * it to be changed to Link Down unless it is already down. 18317 */ 18318 link_state = phba->link_state; 18319 lpfc_linkdown(phba); 18320 phba->link_state = link_state; 18321 18322 /* Unregister FCF if no devices connected to it */ 18323 lpfc_unregister_unused_fcf(phba); 18324 } 18325 18326 /** 18327 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 18328 * @phba: pointer to lpfc hba data structure. 18329 * @rgn23_data: pointer to configure region 23 data. 18330 * 18331 * This function gets SLI3 port configure region 23 data through memory dump 18332 * mailbox command. When it successfully retrieves data, the size of the data 18333 * will be returned, otherwise, 0 will be returned. 18334 **/ 18335 static uint32_t 18336 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 18337 { 18338 LPFC_MBOXQ_t *pmb = NULL; 18339 MAILBOX_t *mb; 18340 uint32_t offset = 0; 18341 int rc; 18342 18343 if (!rgn23_data) 18344 return 0; 18345 18346 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18347 if (!pmb) { 18348 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18349 "2600 failed to allocate mailbox memory\n"); 18350 return 0; 18351 } 18352 mb = &pmb->u.mb; 18353 18354 do { 18355 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 18356 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 18357 18358 if (rc != MBX_SUCCESS) { 18359 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 18360 "2601 failed to read config " 18361 "region 23, rc 0x%x Status 0x%x\n", 18362 rc, mb->mbxStatus); 18363 mb->un.varDmp.word_cnt = 0; 18364 } 18365 /* 18366 * dump mem may return a zero when finished or we got a 18367 * mailbox error, either way we are done. 18368 */ 18369 if (mb->un.varDmp.word_cnt == 0) 18370 break; 18371 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 18372 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 18373 18374 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 18375 rgn23_data + offset, 18376 mb->un.varDmp.word_cnt); 18377 offset += mb->un.varDmp.word_cnt; 18378 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 18379 18380 mempool_free(pmb, phba->mbox_mem_pool); 18381 return offset; 18382 } 18383 18384 /** 18385 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 18386 * @phba: pointer to lpfc hba data structure. 18387 * @rgn23_data: pointer to configure region 23 data. 18388 * 18389 * This function gets SLI4 port configure region 23 data through memory dump 18390 * mailbox command. When it successfully retrieves data, the size of the data 18391 * will be returned, otherwise, 0 will be returned. 18392 **/ 18393 static uint32_t 18394 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 18395 { 18396 LPFC_MBOXQ_t *mboxq = NULL; 18397 struct lpfc_dmabuf *mp = NULL; 18398 struct lpfc_mqe *mqe; 18399 uint32_t data_length = 0; 18400 int rc; 18401 18402 if (!rgn23_data) 18403 return 0; 18404 18405 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18406 if (!mboxq) { 18407 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18408 "3105 failed to allocate mailbox memory\n"); 18409 return 0; 18410 } 18411 18412 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 18413 goto out; 18414 mqe = &mboxq->u.mqe; 18415 mp = (struct lpfc_dmabuf *) mboxq->context1; 18416 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 18417 if (rc) 18418 goto out; 18419 data_length = mqe->un.mb_words[5]; 18420 if (data_length == 0) 18421 goto out; 18422 if (data_length > DMP_RGN23_SIZE) { 18423 data_length = 0; 18424 goto out; 18425 } 18426 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 18427 out: 18428 mempool_free(mboxq, phba->mbox_mem_pool); 18429 if (mp) { 18430 lpfc_mbuf_free(phba, mp->virt, mp->phys); 18431 kfree(mp); 18432 } 18433 return data_length; 18434 } 18435 18436 /** 18437 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 18438 * @phba: pointer to lpfc hba data structure. 18439 * 18440 * This function read region 23 and parse TLV for port status to 18441 * decide if the user disaled the port. If the TLV indicates the 18442 * port is disabled, the hba_flag is set accordingly. 18443 **/ 18444 void 18445 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 18446 { 18447 uint8_t *rgn23_data = NULL; 18448 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 18449 uint32_t offset = 0; 18450 18451 /* Get adapter Region 23 data */ 18452 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 18453 if (!rgn23_data) 18454 goto out; 18455 18456 if (phba->sli_rev < LPFC_SLI_REV4) 18457 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 18458 else { 18459 if_type = bf_get(lpfc_sli_intf_if_type, 18460 &phba->sli4_hba.sli_intf); 18461 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 18462 goto out; 18463 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 18464 } 18465 18466 if (!data_size) 18467 goto out; 18468 18469 /* Check the region signature first */ 18470 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 18471 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18472 "2619 Config region 23 has bad signature\n"); 18473 goto out; 18474 } 18475 offset += 4; 18476 18477 /* Check the data structure version */ 18478 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 18479 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18480 "2620 Config region 23 has bad version\n"); 18481 goto out; 18482 } 18483 offset += 4; 18484 18485 /* Parse TLV entries in the region */ 18486 while (offset < data_size) { 18487 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 18488 break; 18489 /* 18490 * If the TLV is not driver specific TLV or driver id is 18491 * not linux driver id, skip the record. 18492 */ 18493 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 18494 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 18495 (rgn23_data[offset + 3] != 0)) { 18496 offset += rgn23_data[offset + 1] * 4 + 4; 18497 continue; 18498 } 18499 18500 /* Driver found a driver specific TLV in the config region */ 18501 sub_tlv_len = rgn23_data[offset + 1] * 4; 18502 offset += 4; 18503 tlv_offset = 0; 18504 18505 /* 18506 * Search for configured port state sub-TLV. 18507 */ 18508 while ((offset < data_size) && 18509 (tlv_offset < sub_tlv_len)) { 18510 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 18511 offset += 4; 18512 tlv_offset += 4; 18513 break; 18514 } 18515 if (rgn23_data[offset] != PORT_STE_TYPE) { 18516 offset += rgn23_data[offset + 1] * 4 + 4; 18517 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 18518 continue; 18519 } 18520 18521 /* This HBA contains PORT_STE configured */ 18522 if (!rgn23_data[offset + 2]) 18523 phba->hba_flag |= LINK_DISABLED; 18524 18525 goto out; 18526 } 18527 } 18528 18529 out: 18530 kfree(rgn23_data); 18531 return; 18532 } 18533 18534 /** 18535 * lpfc_wr_object - write an object to the firmware 18536 * @phba: HBA structure that indicates port to create a queue on. 18537 * @dmabuf_list: list of dmabufs to write to the port. 18538 * @size: the total byte value of the objects to write to the port. 18539 * @offset: the current offset to be used to start the transfer. 18540 * 18541 * This routine will create a wr_object mailbox command to send to the port. 18542 * the mailbox command will be constructed using the dma buffers described in 18543 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 18544 * BDEs that the imbedded mailbox can support. The @offset variable will be 18545 * used to indicate the starting offset of the transfer and will also return 18546 * the offset after the write object mailbox has completed. @size is used to 18547 * determine the end of the object and whether the eof bit should be set. 18548 * 18549 * Return 0 is successful and offset will contain the the new offset to use 18550 * for the next write. 18551 * Return negative value for error cases. 18552 **/ 18553 int 18554 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 18555 uint32_t size, uint32_t *offset) 18556 { 18557 struct lpfc_mbx_wr_object *wr_object; 18558 LPFC_MBOXQ_t *mbox; 18559 int rc = 0, i = 0; 18560 uint32_t shdr_status, shdr_add_status; 18561 uint32_t mbox_tmo; 18562 union lpfc_sli4_cfg_shdr *shdr; 18563 struct lpfc_dmabuf *dmabuf; 18564 uint32_t written = 0; 18565 18566 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 18567 if (!mbox) 18568 return -ENOMEM; 18569 18570 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 18571 LPFC_MBOX_OPCODE_WRITE_OBJECT, 18572 sizeof(struct lpfc_mbx_wr_object) - 18573 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 18574 18575 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 18576 wr_object->u.request.write_offset = *offset; 18577 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 18578 wr_object->u.request.object_name[0] = 18579 cpu_to_le32(wr_object->u.request.object_name[0]); 18580 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 18581 list_for_each_entry(dmabuf, dmabuf_list, list) { 18582 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 18583 break; 18584 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 18585 wr_object->u.request.bde[i].addrHigh = 18586 putPaddrHigh(dmabuf->phys); 18587 if (written + SLI4_PAGE_SIZE >= size) { 18588 wr_object->u.request.bde[i].tus.f.bdeSize = 18589 (size - written); 18590 written += (size - written); 18591 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 18592 } else { 18593 wr_object->u.request.bde[i].tus.f.bdeSize = 18594 SLI4_PAGE_SIZE; 18595 written += SLI4_PAGE_SIZE; 18596 } 18597 i++; 18598 } 18599 wr_object->u.request.bde_count = i; 18600 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 18601 if (!phba->sli4_hba.intr_enable) 18602 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 18603 else { 18604 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 18605 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 18606 } 18607 /* The IOCTL status is embedded in the mailbox subheader. */ 18608 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr; 18609 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 18610 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 18611 if (rc != MBX_TIMEOUT) 18612 mempool_free(mbox, phba->mbox_mem_pool); 18613 if (shdr_status || shdr_add_status || rc) { 18614 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 18615 "3025 Write Object mailbox failed with " 18616 "status x%x add_status x%x, mbx status x%x\n", 18617 shdr_status, shdr_add_status, rc); 18618 rc = -ENXIO; 18619 } else 18620 *offset += wr_object->u.response.actual_write_length; 18621 return rc; 18622 } 18623 18624 /** 18625 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 18626 * @vport: pointer to vport data structure. 18627 * 18628 * This function iterate through the mailboxq and clean up all REG_LOGIN 18629 * and REG_VPI mailbox commands associated with the vport. This function 18630 * is called when driver want to restart discovery of the vport due to 18631 * a Clear Virtual Link event. 18632 **/ 18633 void 18634 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 18635 { 18636 struct lpfc_hba *phba = vport->phba; 18637 LPFC_MBOXQ_t *mb, *nextmb; 18638 struct lpfc_dmabuf *mp; 18639 struct lpfc_nodelist *ndlp; 18640 struct lpfc_nodelist *act_mbx_ndlp = NULL; 18641 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 18642 LIST_HEAD(mbox_cmd_list); 18643 uint8_t restart_loop; 18644 18645 /* Clean up internally queued mailbox commands with the vport */ 18646 spin_lock_irq(&phba->hbalock); 18647 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 18648 if (mb->vport != vport) 18649 continue; 18650 18651 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 18652 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 18653 continue; 18654 18655 list_del(&mb->list); 18656 list_add_tail(&mb->list, &mbox_cmd_list); 18657 } 18658 /* Clean up active mailbox command with the vport */ 18659 mb = phba->sli.mbox_active; 18660 if (mb && (mb->vport == vport)) { 18661 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 18662 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 18663 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18664 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 18665 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 18666 /* Put reference count for delayed processing */ 18667 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 18668 /* Unregister the RPI when mailbox complete */ 18669 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 18670 } 18671 } 18672 /* Cleanup any mailbox completions which are not yet processed */ 18673 do { 18674 restart_loop = 0; 18675 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 18676 /* 18677 * If this mailox is already processed or it is 18678 * for another vport ignore it. 18679 */ 18680 if ((mb->vport != vport) || 18681 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 18682 continue; 18683 18684 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 18685 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 18686 continue; 18687 18688 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 18689 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 18690 ndlp = (struct lpfc_nodelist *)mb->context2; 18691 /* Unregister the RPI when mailbox complete */ 18692 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 18693 restart_loop = 1; 18694 spin_unlock_irq(&phba->hbalock); 18695 spin_lock(shost->host_lock); 18696 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 18697 spin_unlock(shost->host_lock); 18698 spin_lock_irq(&phba->hbalock); 18699 break; 18700 } 18701 } 18702 } while (restart_loop); 18703 18704 spin_unlock_irq(&phba->hbalock); 18705 18706 /* Release the cleaned-up mailbox commands */ 18707 while (!list_empty(&mbox_cmd_list)) { 18708 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 18709 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 18710 mp = (struct lpfc_dmabuf *) (mb->context1); 18711 if (mp) { 18712 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 18713 kfree(mp); 18714 } 18715 ndlp = (struct lpfc_nodelist *) mb->context2; 18716 mb->context2 = NULL; 18717 if (ndlp) { 18718 spin_lock(shost->host_lock); 18719 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 18720 spin_unlock(shost->host_lock); 18721 lpfc_nlp_put(ndlp); 18722 } 18723 } 18724 mempool_free(mb, phba->mbox_mem_pool); 18725 } 18726 18727 /* Release the ndlp with the cleaned-up active mailbox command */ 18728 if (act_mbx_ndlp) { 18729 spin_lock(shost->host_lock); 18730 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 18731 spin_unlock(shost->host_lock); 18732 lpfc_nlp_put(act_mbx_ndlp); 18733 } 18734 } 18735 18736 /** 18737 * lpfc_drain_txq - Drain the txq 18738 * @phba: Pointer to HBA context object. 18739 * 18740 * This function attempt to submit IOCBs on the txq 18741 * to the adapter. For SLI4 adapters, the txq contains 18742 * ELS IOCBs that have been deferred because the there 18743 * are no SGLs. This congestion can occur with large 18744 * vport counts during node discovery. 18745 **/ 18746 18747 uint32_t 18748 lpfc_drain_txq(struct lpfc_hba *phba) 18749 { 18750 LIST_HEAD(completions); 18751 struct lpfc_sli_ring *pring; 18752 struct lpfc_iocbq *piocbq = NULL; 18753 unsigned long iflags = 0; 18754 char *fail_msg = NULL; 18755 struct lpfc_sglq *sglq; 18756 union lpfc_wqe128 wqe128; 18757 union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128; 18758 uint32_t txq_cnt = 0; 18759 18760 pring = lpfc_phba_elsring(phba); 18761 if (unlikely(!pring)) 18762 return 0; 18763 18764 spin_lock_irqsave(&pring->ring_lock, iflags); 18765 list_for_each_entry(piocbq, &pring->txq, list) { 18766 txq_cnt++; 18767 } 18768 18769 if (txq_cnt > pring->txq_max) 18770 pring->txq_max = txq_cnt; 18771 18772 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18773 18774 while (!list_empty(&pring->txq)) { 18775 spin_lock_irqsave(&pring->ring_lock, iflags); 18776 18777 piocbq = lpfc_sli_ringtx_get(phba, pring); 18778 if (!piocbq) { 18779 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18780 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18781 "2823 txq empty and txq_cnt is %d\n ", 18782 txq_cnt); 18783 break; 18784 } 18785 sglq = __lpfc_sli_get_els_sglq(phba, piocbq); 18786 if (!sglq) { 18787 __lpfc_sli_ringtx_put(phba, pring, piocbq); 18788 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18789 break; 18790 } 18791 txq_cnt--; 18792 18793 /* The xri and iocb resources secured, 18794 * attempt to issue request 18795 */ 18796 piocbq->sli4_lxritag = sglq->sli4_lxritag; 18797 piocbq->sli4_xritag = sglq->sli4_xritag; 18798 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 18799 fail_msg = "to convert bpl to sgl"; 18800 else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe)) 18801 fail_msg = "to convert iocb to wqe"; 18802 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe)) 18803 fail_msg = " - Wq is full"; 18804 else 18805 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 18806 18807 if (fail_msg) { 18808 /* Failed means we can't issue and need to cancel */ 18809 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 18810 "2822 IOCB failed %s iotag 0x%x " 18811 "xri 0x%x\n", 18812 fail_msg, 18813 piocbq->iotag, piocbq->sli4_xritag); 18814 list_add_tail(&piocbq->list, &completions); 18815 } 18816 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18817 } 18818 18819 /* Cancel all the IOCBs that cannot be issued */ 18820 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 18821 IOERR_SLI_ABORTED); 18822 18823 return txq_cnt; 18824 } 18825 18826 /** 18827 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl. 18828 * @phba: Pointer to HBA context object. 18829 * @pwqe: Pointer to command WQE. 18830 * @sglq: Pointer to the scatter gather queue object. 18831 * 18832 * This routine converts the bpl or bde that is in the WQE 18833 * to a sgl list for the sli4 hardware. The physical address 18834 * of the bpl/bde is converted back to a virtual address. 18835 * If the WQE contains a BPL then the list of BDE's is 18836 * converted to sli4_sge's. If the WQE contains a single 18837 * BDE then it is converted to a single sli_sge. 18838 * The WQE is still in cpu endianness so the contents of 18839 * the bpl can be used without byte swapping. 18840 * 18841 * Returns valid XRI = Success, NO_XRI = Failure. 18842 */ 18843 static uint16_t 18844 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq, 18845 struct lpfc_sglq *sglq) 18846 { 18847 uint16_t xritag = NO_XRI; 18848 struct ulp_bde64 *bpl = NULL; 18849 struct ulp_bde64 bde; 18850 struct sli4_sge *sgl = NULL; 18851 struct lpfc_dmabuf *dmabuf; 18852 union lpfc_wqe *wqe; 18853 int numBdes = 0; 18854 int i = 0; 18855 uint32_t offset = 0; /* accumulated offset in the sg request list */ 18856 int inbound = 0; /* number of sg reply entries inbound from firmware */ 18857 uint32_t cmd; 18858 18859 if (!pwqeq || !sglq) 18860 return xritag; 18861 18862 sgl = (struct sli4_sge *)sglq->sgl; 18863 wqe = &pwqeq->wqe; 18864 pwqeq->iocb.ulpIoTag = pwqeq->iotag; 18865 18866 cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com); 18867 if (cmd == CMD_XMIT_BLS_RSP64_WQE) 18868 return sglq->sli4_xritag; 18869 numBdes = pwqeq->rsvd2; 18870 if (numBdes) { 18871 /* The addrHigh and addrLow fields within the WQE 18872 * have not been byteswapped yet so there is no 18873 * need to swap them back. 18874 */ 18875 if (pwqeq->context3) 18876 dmabuf = (struct lpfc_dmabuf *)pwqeq->context3; 18877 else 18878 return xritag; 18879 18880 bpl = (struct ulp_bde64 *)dmabuf->virt; 18881 if (!bpl) 18882 return xritag; 18883 18884 for (i = 0; i < numBdes; i++) { 18885 /* Should already be byte swapped. */ 18886 sgl->addr_hi = bpl->addrHigh; 18887 sgl->addr_lo = bpl->addrLow; 18888 18889 sgl->word2 = le32_to_cpu(sgl->word2); 18890 if ((i+1) == numBdes) 18891 bf_set(lpfc_sli4_sge_last, sgl, 1); 18892 else 18893 bf_set(lpfc_sli4_sge_last, sgl, 0); 18894 /* swap the size field back to the cpu so we 18895 * can assign it to the sgl. 18896 */ 18897 bde.tus.w = le32_to_cpu(bpl->tus.w); 18898 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 18899 /* The offsets in the sgl need to be accumulated 18900 * separately for the request and reply lists. 18901 * The request is always first, the reply follows. 18902 */ 18903 switch (cmd) { 18904 case CMD_GEN_REQUEST64_WQE: 18905 /* add up the reply sg entries */ 18906 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 18907 inbound++; 18908 /* first inbound? reset the offset */ 18909 if (inbound == 1) 18910 offset = 0; 18911 bf_set(lpfc_sli4_sge_offset, sgl, offset); 18912 bf_set(lpfc_sli4_sge_type, sgl, 18913 LPFC_SGE_TYPE_DATA); 18914 offset += bde.tus.f.bdeSize; 18915 break; 18916 case CMD_FCP_TRSP64_WQE: 18917 bf_set(lpfc_sli4_sge_offset, sgl, 0); 18918 bf_set(lpfc_sli4_sge_type, sgl, 18919 LPFC_SGE_TYPE_DATA); 18920 break; 18921 case CMD_FCP_TSEND64_WQE: 18922 case CMD_FCP_TRECEIVE64_WQE: 18923 bf_set(lpfc_sli4_sge_type, sgl, 18924 bpl->tus.f.bdeFlags); 18925 if (i < 3) 18926 offset = 0; 18927 else 18928 offset += bde.tus.f.bdeSize; 18929 bf_set(lpfc_sli4_sge_offset, sgl, offset); 18930 break; 18931 } 18932 sgl->word2 = cpu_to_le32(sgl->word2); 18933 bpl++; 18934 sgl++; 18935 } 18936 } else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) { 18937 /* The addrHigh and addrLow fields of the BDE have not 18938 * been byteswapped yet so they need to be swapped 18939 * before putting them in the sgl. 18940 */ 18941 sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh); 18942 sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow); 18943 sgl->word2 = le32_to_cpu(sgl->word2); 18944 bf_set(lpfc_sli4_sge_last, sgl, 1); 18945 sgl->word2 = cpu_to_le32(sgl->word2); 18946 sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize); 18947 } 18948 return sglq->sli4_xritag; 18949 } 18950 18951 /** 18952 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE) 18953 * @phba: Pointer to HBA context object. 18954 * @ring_number: Base sli ring number 18955 * @pwqe: Pointer to command WQE. 18956 **/ 18957 int 18958 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number, 18959 struct lpfc_iocbq *pwqe) 18960 { 18961 union lpfc_wqe *wqe = &pwqe->wqe; 18962 struct lpfc_nvmet_rcv_ctx *ctxp; 18963 struct lpfc_queue *wq; 18964 struct lpfc_sglq *sglq; 18965 struct lpfc_sli_ring *pring; 18966 unsigned long iflags; 18967 uint32_t ret = 0; 18968 18969 /* NVME_LS and NVME_LS ABTS requests. */ 18970 if (pwqe->iocb_flag & LPFC_IO_NVME_LS) { 18971 pring = phba->sli4_hba.nvmels_wq->pring; 18972 spin_lock_irqsave(&pring->ring_lock, iflags); 18973 sglq = __lpfc_sli_get_els_sglq(phba, pwqe); 18974 if (!sglq) { 18975 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18976 return WQE_BUSY; 18977 } 18978 pwqe->sli4_lxritag = sglq->sli4_lxritag; 18979 pwqe->sli4_xritag = sglq->sli4_xritag; 18980 if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) { 18981 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18982 return WQE_ERROR; 18983 } 18984 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 18985 pwqe->sli4_xritag); 18986 ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe); 18987 if (ret) { 18988 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18989 return ret; 18990 } 18991 18992 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 18993 spin_unlock_irqrestore(&pring->ring_lock, iflags); 18994 return 0; 18995 } 18996 18997 /* NVME_FCREQ and NVME_ABTS requests */ 18998 if (pwqe->iocb_flag & LPFC_IO_NVME) { 18999 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19000 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; 19001 19002 spin_lock_irqsave(&pring->ring_lock, iflags); 19003 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; 19004 bf_set(wqe_cqid, &wqe->generic.wqe_com, 19005 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); 19006 ret = lpfc_sli4_wq_put(wq, wqe); 19007 if (ret) { 19008 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19009 return ret; 19010 } 19011 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19012 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19013 return 0; 19014 } 19015 19016 /* NVMET requests */ 19017 if (pwqe->iocb_flag & LPFC_IO_NVMET) { 19018 /* Get the IO distribution (hba_wqidx) for WQ assignment. */ 19019 pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring; 19020 19021 spin_lock_irqsave(&pring->ring_lock, iflags); 19022 ctxp = pwqe->context2; 19023 sglq = ctxp->ctxbuf->sglq; 19024 if (pwqe->sli4_xritag == NO_XRI) { 19025 pwqe->sli4_lxritag = sglq->sli4_lxritag; 19026 pwqe->sli4_xritag = sglq->sli4_xritag; 19027 } 19028 bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, 19029 pwqe->sli4_xritag); 19030 wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]; 19031 bf_set(wqe_cqid, &wqe->generic.wqe_com, 19032 phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id); 19033 ret = lpfc_sli4_wq_put(wq, wqe); 19034 if (ret) { 19035 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19036 return ret; 19037 } 19038 lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); 19039 spin_unlock_irqrestore(&pring->ring_lock, iflags); 19040 return 0; 19041 } 19042 return WQE_ERROR; 19043 } 19044