1 /******************************************************************* 2 * This file is part of the Emulex Linux Device Driver for * 3 * Fibre Channel Host Bus Adapters. * 4 * Copyright (C) 2004-2014 Emulex. All rights reserved. * 5 * EMULEX and SLI are trademarks of Emulex. * 6 * www.emulex.com * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 8 * * 9 * This program is free software; you can redistribute it and/or * 10 * modify it under the terms of version 2 of the GNU General * 11 * Public License as published by the Free Software Foundation. * 12 * This program is distributed in the hope that it will be useful. * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 18 * more details, a copy of which can be found in the file COPYING * 19 * included with this package. * 20 *******************************************************************/ 21 22 #include <linux/blkdev.h> 23 #include <linux/pci.h> 24 #include <linux/interrupt.h> 25 #include <linux/delay.h> 26 #include <linux/slab.h> 27 28 #include <scsi/scsi.h> 29 #include <scsi/scsi_cmnd.h> 30 #include <scsi/scsi_device.h> 31 #include <scsi/scsi_host.h> 32 #include <scsi/scsi_transport_fc.h> 33 #include <scsi/fc/fc_fs.h> 34 #include <linux/aer.h> 35 36 #include "lpfc_hw4.h" 37 #include "lpfc_hw.h" 38 #include "lpfc_sli.h" 39 #include "lpfc_sli4.h" 40 #include "lpfc_nl.h" 41 #include "lpfc_disc.h" 42 #include "lpfc_scsi.h" 43 #include "lpfc.h" 44 #include "lpfc_crtn.h" 45 #include "lpfc_logmsg.h" 46 #include "lpfc_compat.h" 47 #include "lpfc_debugfs.h" 48 #include "lpfc_vport.h" 49 50 /* There are only four IOCB completion types. */ 51 typedef enum _lpfc_iocb_type { 52 LPFC_UNKNOWN_IOCB, 53 LPFC_UNSOL_IOCB, 54 LPFC_SOL_IOCB, 55 LPFC_ABORT_IOCB 56 } lpfc_iocb_type; 57 58 59 /* Provide function prototypes local to this module. */ 60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, 61 uint32_t); 62 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, 63 uint8_t *, uint32_t *); 64 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *, 65 struct lpfc_iocbq *); 66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *, 67 struct hbq_dmabuf *); 68 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *, struct lpfc_queue *, 69 struct lpfc_cqe *); 70 static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *, 71 int); 72 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *, 73 uint32_t); 74 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); 75 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); 76 77 static IOCB_t * 78 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) 79 { 80 return &iocbq->iocb; 81 } 82 83 /** 84 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue 85 * @q: The Work Queue to operate on. 86 * @wqe: The work Queue Entry to put on the Work queue. 87 * 88 * This routine will copy the contents of @wqe to the next available entry on 89 * the @q. This function will then ring the Work Queue Doorbell to signal the 90 * HBA to start processing the Work Queue Entry. This function returns 0 if 91 * successful. If no entries are available on @q then this function will return 92 * -ENOMEM. 93 * The caller is expected to hold the hbalock when calling this routine. 94 **/ 95 static uint32_t 96 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) 97 { 98 union lpfc_wqe *temp_wqe; 99 struct lpfc_register doorbell; 100 uint32_t host_index; 101 uint32_t idx; 102 103 /* sanity check on queue memory */ 104 if (unlikely(!q)) 105 return -ENOMEM; 106 temp_wqe = q->qe[q->host_index].wqe; 107 108 /* If the host has not yet processed the next entry then we are done */ 109 idx = ((q->host_index + 1) % q->entry_count); 110 if (idx == q->hba_index) { 111 q->WQ_overflow++; 112 return -ENOMEM; 113 } 114 q->WQ_posted++; 115 /* set consumption flag every once in a while */ 116 if (!((q->host_index + 1) % q->entry_repost)) 117 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 118 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) 119 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 120 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 121 122 /* Update the host index before invoking device */ 123 host_index = q->host_index; 124 125 q->host_index = idx; 126 127 /* Ring Doorbell */ 128 doorbell.word0 = 0; 129 if (q->db_format == LPFC_DB_LIST_FORMAT) { 130 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); 131 bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index); 132 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); 133 } else if (q->db_format == LPFC_DB_RING_FORMAT) { 134 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); 135 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); 136 } else { 137 return -EINVAL; 138 } 139 writel(doorbell.word0, q->db_regaddr); 140 141 return 0; 142 } 143 144 /** 145 * lpfc_sli4_wq_release - Updates internal hba index for WQ 146 * @q: The Work Queue to operate on. 147 * @index: The index to advance the hba index to. 148 * 149 * This routine will update the HBA index of a queue to reflect consumption of 150 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed 151 * an entry the host calls this function to update the queue's internal 152 * pointers. This routine returns the number of entries that were consumed by 153 * the HBA. 154 **/ 155 static uint32_t 156 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) 157 { 158 uint32_t released = 0; 159 160 /* sanity check on queue memory */ 161 if (unlikely(!q)) 162 return 0; 163 164 if (q->hba_index == index) 165 return 0; 166 do { 167 q->hba_index = ((q->hba_index + 1) % q->entry_count); 168 released++; 169 } while (q->hba_index != index); 170 return released; 171 } 172 173 /** 174 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue 175 * @q: The Mailbox Queue to operate on. 176 * @wqe: The Mailbox Queue Entry to put on the Work queue. 177 * 178 * This routine will copy the contents of @mqe to the next available entry on 179 * the @q. This function will then ring the Work Queue Doorbell to signal the 180 * HBA to start processing the Work Queue Entry. This function returns 0 if 181 * successful. If no entries are available on @q then this function will return 182 * -ENOMEM. 183 * The caller is expected to hold the hbalock when calling this routine. 184 **/ 185 static uint32_t 186 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) 187 { 188 struct lpfc_mqe *temp_mqe; 189 struct lpfc_register doorbell; 190 uint32_t host_index; 191 192 /* sanity check on queue memory */ 193 if (unlikely(!q)) 194 return -ENOMEM; 195 temp_mqe = q->qe[q->host_index].mqe; 196 197 /* If the host has not yet processed the next entry then we are done */ 198 if (((q->host_index + 1) % q->entry_count) == q->hba_index) 199 return -ENOMEM; 200 lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); 201 /* Save off the mailbox pointer for completion */ 202 q->phba->mbox = (MAILBOX_t *)temp_mqe; 203 204 /* Update the host index before invoking device */ 205 host_index = q->host_index; 206 q->host_index = ((q->host_index + 1) % q->entry_count); 207 208 /* Ring Doorbell */ 209 doorbell.word0 = 0; 210 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); 211 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); 212 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); 213 return 0; 214 } 215 216 /** 217 * lpfc_sli4_mq_release - Updates internal hba index for MQ 218 * @q: The Mailbox Queue to operate on. 219 * 220 * This routine will update the HBA index of a queue to reflect consumption of 221 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed 222 * an entry the host calls this function to update the queue's internal 223 * pointers. This routine returns the number of entries that were consumed by 224 * the HBA. 225 **/ 226 static uint32_t 227 lpfc_sli4_mq_release(struct lpfc_queue *q) 228 { 229 /* sanity check on queue memory */ 230 if (unlikely(!q)) 231 return 0; 232 233 /* Clear the mailbox pointer for completion */ 234 q->phba->mbox = NULL; 235 q->hba_index = ((q->hba_index + 1) % q->entry_count); 236 return 1; 237 } 238 239 /** 240 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ 241 * @q: The Event Queue to get the first valid EQE from 242 * 243 * This routine will get the first valid Event Queue Entry from @q, update 244 * the queue's internal hba index, and return the EQE. If no valid EQEs are in 245 * the Queue (no more work to do), or the Queue is full of EQEs that have been 246 * processed, but not popped back to the HBA then this routine will return NULL. 247 **/ 248 static struct lpfc_eqe * 249 lpfc_sli4_eq_get(struct lpfc_queue *q) 250 { 251 struct lpfc_eqe *eqe; 252 uint32_t idx; 253 254 /* sanity check on queue memory */ 255 if (unlikely(!q)) 256 return NULL; 257 eqe = q->qe[q->hba_index].eqe; 258 259 /* If the next EQE is not valid then we are done */ 260 if (!bf_get_le32(lpfc_eqe_valid, eqe)) 261 return NULL; 262 /* If the host has not yet processed the next entry then we are done */ 263 idx = ((q->hba_index + 1) % q->entry_count); 264 if (idx == q->host_index) 265 return NULL; 266 267 q->hba_index = idx; 268 269 /* 270 * insert barrier for instruction interlock : data from the hardware 271 * must have the valid bit checked before it can be copied and acted 272 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative 273 * instructions allowing action on content before valid bit checked, 274 * add barrier here as well. May not be needed as "content" is a 275 * single 32-bit entity here (vs multi word structure for cq's). 276 */ 277 mb(); 278 return eqe; 279 } 280 281 /** 282 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ 283 * @q: The Event Queue to disable interrupts 284 * 285 **/ 286 static inline void 287 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) 288 { 289 struct lpfc_register doorbell; 290 291 doorbell.word0 = 0; 292 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 293 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 294 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 295 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 296 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 297 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 298 } 299 300 /** 301 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ 302 * @q: The Event Queue that the host has completed processing for. 303 * @arm: Indicates whether the host wants to arms this CQ. 304 * 305 * This routine will mark all Event Queue Entries on @q, from the last 306 * known completed entry to the last entry that was processed, as completed 307 * by clearing the valid bit for each completion queue entry. Then it will 308 * notify the HBA, by ringing the doorbell, that the EQEs have been processed. 309 * The internal host index in the @q will be updated by this routine to indicate 310 * that the host has finished processing the entries. The @arm parameter 311 * indicates that the queue should be rearmed when ringing the doorbell. 312 * 313 * This function will return the number of EQEs that were popped. 314 **/ 315 uint32_t 316 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) 317 { 318 uint32_t released = 0; 319 struct lpfc_eqe *temp_eqe; 320 struct lpfc_register doorbell; 321 322 /* sanity check on queue memory */ 323 if (unlikely(!q)) 324 return 0; 325 326 /* while there are valid entries */ 327 while (q->hba_index != q->host_index) { 328 temp_eqe = q->qe[q->host_index].eqe; 329 bf_set_le32(lpfc_eqe_valid, temp_eqe, 0); 330 released++; 331 q->host_index = ((q->host_index + 1) % q->entry_count); 332 } 333 if (unlikely(released == 0 && !arm)) 334 return 0; 335 336 /* ring doorbell for number popped */ 337 doorbell.word0 = 0; 338 if (arm) { 339 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 340 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); 341 } 342 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 343 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 344 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell, 345 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); 346 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); 347 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 348 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 349 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 350 readl(q->phba->sli4_hba.EQCQDBregaddr); 351 return released; 352 } 353 354 /** 355 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ 356 * @q: The Completion Queue to get the first valid CQE from 357 * 358 * This routine will get the first valid Completion Queue Entry from @q, update 359 * the queue's internal hba index, and return the CQE. If no valid CQEs are in 360 * the Queue (no more work to do), or the Queue is full of CQEs that have been 361 * processed, but not popped back to the HBA then this routine will return NULL. 362 **/ 363 static struct lpfc_cqe * 364 lpfc_sli4_cq_get(struct lpfc_queue *q) 365 { 366 struct lpfc_cqe *cqe; 367 uint32_t idx; 368 369 /* sanity check on queue memory */ 370 if (unlikely(!q)) 371 return NULL; 372 373 /* If the next CQE is not valid then we are done */ 374 if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) 375 return NULL; 376 /* If the host has not yet processed the next entry then we are done */ 377 idx = ((q->hba_index + 1) % q->entry_count); 378 if (idx == q->host_index) 379 return NULL; 380 381 cqe = q->qe[q->hba_index].cqe; 382 q->hba_index = idx; 383 384 /* 385 * insert barrier for instruction interlock : data from the hardware 386 * must have the valid bit checked before it can be copied and acted 387 * upon. Speculative instructions were allowing a bcopy at the start 388 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately 389 * after our return, to copy data before the valid bit check above 390 * was done. As such, some of the copied data was stale. The barrier 391 * ensures the check is before any data is copied. 392 */ 393 mb(); 394 return cqe; 395 } 396 397 /** 398 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ 399 * @q: The Completion Queue that the host has completed processing for. 400 * @arm: Indicates whether the host wants to arms this CQ. 401 * 402 * This routine will mark all Completion queue entries on @q, from the last 403 * known completed entry to the last entry that was processed, as completed 404 * by clearing the valid bit for each completion queue entry. Then it will 405 * notify the HBA, by ringing the doorbell, that the CQEs have been processed. 406 * The internal host index in the @q will be updated by this routine to indicate 407 * that the host has finished processing the entries. The @arm parameter 408 * indicates that the queue should be rearmed when ringing the doorbell. 409 * 410 * This function will return the number of CQEs that were released. 411 **/ 412 uint32_t 413 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) 414 { 415 uint32_t released = 0; 416 struct lpfc_cqe *temp_qe; 417 struct lpfc_register doorbell; 418 419 /* sanity check on queue memory */ 420 if (unlikely(!q)) 421 return 0; 422 /* while there are valid entries */ 423 while (q->hba_index != q->host_index) { 424 temp_qe = q->qe[q->host_index].cqe; 425 bf_set_le32(lpfc_cqe_valid, temp_qe, 0); 426 released++; 427 q->host_index = ((q->host_index + 1) % q->entry_count); 428 } 429 if (unlikely(released == 0 && !arm)) 430 return 0; 431 432 /* ring doorbell for number popped */ 433 doorbell.word0 = 0; 434 if (arm) 435 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 436 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 437 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 438 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell, 439 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); 440 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); 441 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 442 return released; 443 } 444 445 /** 446 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue 447 * @q: The Header Receive Queue to operate on. 448 * @wqe: The Receive Queue Entry to put on the Receive queue. 449 * 450 * This routine will copy the contents of @wqe to the next available entry on 451 * the @q. This function will then ring the Receive Queue Doorbell to signal the 452 * HBA to start processing the Receive Queue Entry. This function returns the 453 * index that the rqe was copied to if successful. If no entries are available 454 * on @q then this function will return -ENOMEM. 455 * The caller is expected to hold the hbalock when calling this routine. 456 **/ 457 static int 458 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, 459 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) 460 { 461 struct lpfc_rqe *temp_hrqe; 462 struct lpfc_rqe *temp_drqe; 463 struct lpfc_register doorbell; 464 int put_index; 465 466 /* sanity check on queue memory */ 467 if (unlikely(!hq) || unlikely(!dq)) 468 return -ENOMEM; 469 put_index = hq->host_index; 470 temp_hrqe = hq->qe[hq->host_index].rqe; 471 temp_drqe = dq->qe[dq->host_index].rqe; 472 473 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) 474 return -EINVAL; 475 if (hq->host_index != dq->host_index) 476 return -EINVAL; 477 /* If the host has not yet processed the next entry then we are done */ 478 if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) 479 return -EBUSY; 480 lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); 481 lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); 482 483 /* Update the host index to point to the next slot */ 484 hq->host_index = ((hq->host_index + 1) % hq->entry_count); 485 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 486 487 /* Ring The Header Receive Queue Doorbell */ 488 if (!(hq->host_index % hq->entry_repost)) { 489 doorbell.word0 = 0; 490 if (hq->db_format == LPFC_DB_RING_FORMAT) { 491 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell, 492 hq->entry_repost); 493 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id); 494 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) { 495 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell, 496 hq->entry_repost); 497 bf_set(lpfc_rq_db_list_fm_index, &doorbell, 498 hq->host_index); 499 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id); 500 } else { 501 return -EINVAL; 502 } 503 writel(doorbell.word0, hq->db_regaddr); 504 } 505 return put_index; 506 } 507 508 /** 509 * lpfc_sli4_rq_release - Updates internal hba index for RQ 510 * @q: The Header Receive Queue to operate on. 511 * 512 * This routine will update the HBA index of a queue to reflect consumption of 513 * one Receive Queue Entry by the HBA. When the HBA indicates that it has 514 * consumed an entry the host calls this function to update the queue's 515 * internal pointers. This routine returns the number of entries that were 516 * consumed by the HBA. 517 **/ 518 static uint32_t 519 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) 520 { 521 /* sanity check on queue memory */ 522 if (unlikely(!hq) || unlikely(!dq)) 523 return 0; 524 525 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) 526 return 0; 527 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); 528 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); 529 return 1; 530 } 531 532 /** 533 * lpfc_cmd_iocb - Get next command iocb entry in the ring 534 * @phba: Pointer to HBA context object. 535 * @pring: Pointer to driver SLI ring object. 536 * 537 * This function returns pointer to next command iocb entry 538 * in the command ring. The caller must hold hbalock to prevent 539 * other threads consume the next command iocb. 540 * SLI-2/SLI-3 provide different sized iocbs. 541 **/ 542 static inline IOCB_t * 543 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 544 { 545 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) + 546 pring->sli.sli3.cmdidx * phba->iocb_cmd_size); 547 } 548 549 /** 550 * lpfc_resp_iocb - Get next response iocb entry in the ring 551 * @phba: Pointer to HBA context object. 552 * @pring: Pointer to driver SLI ring object. 553 * 554 * This function returns pointer to next response iocb entry 555 * in the response ring. The caller must hold hbalock to make sure 556 * that no other thread consume the next response iocb. 557 * SLI-2/SLI-3 provide different sized iocbs. 558 **/ 559 static inline IOCB_t * 560 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 561 { 562 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) + 563 pring->sli.sli3.rspidx * phba->iocb_rsp_size); 564 } 565 566 /** 567 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 568 * @phba: Pointer to HBA context object. 569 * 570 * This function is called with hbalock held. This function 571 * allocates a new driver iocb object from the iocb pool. If the 572 * allocation is successful, it returns pointer to the newly 573 * allocated iocb object else it returns NULL. 574 **/ 575 struct lpfc_iocbq * 576 __lpfc_sli_get_iocbq(struct lpfc_hba *phba) 577 { 578 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list; 579 struct lpfc_iocbq * iocbq = NULL; 580 581 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list); 582 if (iocbq) 583 phba->iocb_cnt++; 584 if (phba->iocb_cnt > phba->iocb_max) 585 phba->iocb_max = phba->iocb_cnt; 586 return iocbq; 587 } 588 589 /** 590 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. 591 * @phba: Pointer to HBA context object. 592 * @xritag: XRI value. 593 * 594 * This function clears the sglq pointer from the array of acive 595 * sglq's. The xritag that is passed in is used to index into the 596 * array. Before the xritag can be used it needs to be adjusted 597 * by subtracting the xribase. 598 * 599 * Returns sglq ponter = success, NULL = Failure. 600 **/ 601 static struct lpfc_sglq * 602 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 603 { 604 struct lpfc_sglq *sglq; 605 606 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 607 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL; 608 return sglq; 609 } 610 611 /** 612 * __lpfc_get_active_sglq - Get the active sglq for this XRI. 613 * @phba: Pointer to HBA context object. 614 * @xritag: XRI value. 615 * 616 * This function returns the sglq pointer from the array of acive 617 * sglq's. The xritag that is passed in is used to index into the 618 * array. Before the xritag can be used it needs to be adjusted 619 * by subtracting the xribase. 620 * 621 * Returns sglq ponter = success, NULL = Failure. 622 **/ 623 struct lpfc_sglq * 624 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) 625 { 626 struct lpfc_sglq *sglq; 627 628 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag]; 629 return sglq; 630 } 631 632 /** 633 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 634 * @phba: Pointer to HBA context object. 635 * @xritag: xri used in this exchange. 636 * @rrq: The RRQ to be cleared. 637 * 638 **/ 639 void 640 lpfc_clr_rrq_active(struct lpfc_hba *phba, 641 uint16_t xritag, 642 struct lpfc_node_rrq *rrq) 643 { 644 struct lpfc_nodelist *ndlp = NULL; 645 646 if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp)) 647 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID); 648 649 /* The target DID could have been swapped (cable swap) 650 * we should use the ndlp from the findnode if it is 651 * available. 652 */ 653 if ((!ndlp) && rrq->ndlp) 654 ndlp = rrq->ndlp; 655 656 if (!ndlp) 657 goto out; 658 659 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) { 660 rrq->send_rrq = 0; 661 rrq->xritag = 0; 662 rrq->rrq_stop_time = 0; 663 } 664 out: 665 mempool_free(rrq, phba->rrq_pool); 666 } 667 668 /** 669 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV. 670 * @phba: Pointer to HBA context object. 671 * 672 * This function is called with hbalock held. This function 673 * Checks if stop_time (ratov from setting rrq active) has 674 * been reached, if it has and the send_rrq flag is set then 675 * it will call lpfc_send_rrq. If the send_rrq flag is not set 676 * then it will just call the routine to clear the rrq and 677 * free the rrq resource. 678 * The timer is set to the next rrq that is going to expire before 679 * leaving the routine. 680 * 681 **/ 682 void 683 lpfc_handle_rrq_active(struct lpfc_hba *phba) 684 { 685 struct lpfc_node_rrq *rrq; 686 struct lpfc_node_rrq *nextrrq; 687 unsigned long next_time; 688 unsigned long iflags; 689 LIST_HEAD(send_rrq); 690 691 spin_lock_irqsave(&phba->hbalock, iflags); 692 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 693 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 694 list_for_each_entry_safe(rrq, nextrrq, 695 &phba->active_rrq_list, list) { 696 if (time_after(jiffies, rrq->rrq_stop_time)) 697 list_move(&rrq->list, &send_rrq); 698 else if (time_before(rrq->rrq_stop_time, next_time)) 699 next_time = rrq->rrq_stop_time; 700 } 701 spin_unlock_irqrestore(&phba->hbalock, iflags); 702 if ((!list_empty(&phba->active_rrq_list)) && 703 (!(phba->pport->load_flag & FC_UNLOADING))) 704 mod_timer(&phba->rrq_tmr, next_time); 705 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) { 706 list_del(&rrq->list); 707 if (!rrq->send_rrq) 708 /* this call will free the rrq */ 709 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 710 else if (lpfc_send_rrq(phba, rrq)) { 711 /* if we send the rrq then the completion handler 712 * will clear the bit in the xribitmap. 713 */ 714 lpfc_clr_rrq_active(phba, rrq->xritag, 715 rrq); 716 } 717 } 718 } 719 720 /** 721 * lpfc_get_active_rrq - Get the active RRQ for this exchange. 722 * @vport: Pointer to vport context object. 723 * @xri: The xri used in the exchange. 724 * @did: The targets DID for this exchange. 725 * 726 * returns NULL = rrq not found in the phba->active_rrq_list. 727 * rrq = rrq for this xri and target. 728 **/ 729 struct lpfc_node_rrq * 730 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did) 731 { 732 struct lpfc_hba *phba = vport->phba; 733 struct lpfc_node_rrq *rrq; 734 struct lpfc_node_rrq *nextrrq; 735 unsigned long iflags; 736 737 if (phba->sli_rev != LPFC_SLI_REV4) 738 return NULL; 739 spin_lock_irqsave(&phba->hbalock, iflags); 740 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { 741 if (rrq->vport == vport && rrq->xritag == xri && 742 rrq->nlp_DID == did){ 743 list_del(&rrq->list); 744 spin_unlock_irqrestore(&phba->hbalock, iflags); 745 return rrq; 746 } 747 } 748 spin_unlock_irqrestore(&phba->hbalock, iflags); 749 return NULL; 750 } 751 752 /** 753 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport. 754 * @vport: Pointer to vport context object. 755 * @ndlp: Pointer to the lpfc_node_list structure. 756 * If ndlp is NULL Remove all active RRQs for this vport from the 757 * phba->active_rrq_list and clear the rrq. 758 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp. 759 **/ 760 void 761 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 762 763 { 764 struct lpfc_hba *phba = vport->phba; 765 struct lpfc_node_rrq *rrq; 766 struct lpfc_node_rrq *nextrrq; 767 unsigned long iflags; 768 LIST_HEAD(rrq_list); 769 770 if (phba->sli_rev != LPFC_SLI_REV4) 771 return; 772 if (!ndlp) { 773 lpfc_sli4_vport_delete_els_xri_aborted(vport); 774 lpfc_sli4_vport_delete_fcp_xri_aborted(vport); 775 } 776 spin_lock_irqsave(&phba->hbalock, iflags); 777 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) 778 if ((rrq->vport == vport) && (!ndlp || rrq->ndlp == ndlp)) 779 list_move(&rrq->list, &rrq_list); 780 spin_unlock_irqrestore(&phba->hbalock, iflags); 781 782 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 783 list_del(&rrq->list); 784 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 785 } 786 } 787 788 /** 789 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list. 790 * @phba: Pointer to HBA context object. 791 * 792 * Remove all rrqs from the phba->active_rrq_list and free them by 793 * calling __lpfc_clr_active_rrq 794 * 795 **/ 796 void 797 lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) 798 { 799 struct lpfc_node_rrq *rrq; 800 struct lpfc_node_rrq *nextrrq; 801 unsigned long next_time; 802 unsigned long iflags; 803 LIST_HEAD(rrq_list); 804 805 if (phba->sli_rev != LPFC_SLI_REV4) 806 return; 807 spin_lock_irqsave(&phba->hbalock, iflags); 808 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 809 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)); 810 list_splice_init(&phba->active_rrq_list, &rrq_list); 811 spin_unlock_irqrestore(&phba->hbalock, iflags); 812 813 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) { 814 list_del(&rrq->list); 815 lpfc_clr_rrq_active(phba, rrq->xritag, rrq); 816 } 817 if ((!list_empty(&phba->active_rrq_list)) && 818 (!(phba->pport->load_flag & FC_UNLOADING))) 819 820 mod_timer(&phba->rrq_tmr, next_time); 821 } 822 823 824 /** 825 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 826 * @phba: Pointer to HBA context object. 827 * @ndlp: Targets nodelist pointer for this exchange. 828 * @xritag the xri in the bitmap to test. 829 * 830 * This function is called with hbalock held. This function 831 * returns 0 = rrq not active for this xri 832 * 1 = rrq is valid for this xri. 833 **/ 834 int 835 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 836 uint16_t xritag) 837 { 838 if (!ndlp) 839 return 0; 840 if (!ndlp->active_rrqs_xri_bitmap) 841 return 0; 842 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 843 return 1; 844 else 845 return 0; 846 } 847 848 /** 849 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap. 850 * @phba: Pointer to HBA context object. 851 * @ndlp: nodelist pointer for this target. 852 * @xritag: xri used in this exchange. 853 * @rxid: Remote Exchange ID. 854 * @send_rrq: Flag used to determine if we should send rrq els cmd. 855 * 856 * This function takes the hbalock. 857 * The active bit is always set in the active rrq xri_bitmap even 858 * if there is no slot avaiable for the other rrq information. 859 * 860 * returns 0 rrq actived for this xri 861 * < 0 No memory or invalid ndlp. 862 **/ 863 int 864 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 865 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 866 { 867 unsigned long iflags; 868 struct lpfc_node_rrq *rrq; 869 int empty; 870 871 if (!ndlp) 872 return -EINVAL; 873 874 if (!phba->cfg_enable_rrq) 875 return -EINVAL; 876 877 spin_lock_irqsave(&phba->hbalock, iflags); 878 if (phba->pport->load_flag & FC_UNLOADING) { 879 phba->hba_flag &= ~HBA_RRQ_ACTIVE; 880 goto out; 881 } 882 883 /* 884 * set the active bit even if there is no mem available. 885 */ 886 if (NLP_CHK_FREE_REQ(ndlp)) 887 goto out; 888 889 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING)) 890 goto out; 891 892 if (!ndlp->active_rrqs_xri_bitmap) 893 goto out; 894 895 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap)) 896 goto out; 897 898 spin_unlock_irqrestore(&phba->hbalock, iflags); 899 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL); 900 if (!rrq) { 901 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 902 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x" 903 " DID:0x%x Send:%d\n", 904 xritag, rxid, ndlp->nlp_DID, send_rrq); 905 return -EINVAL; 906 } 907 if (phba->cfg_enable_rrq == 1) 908 rrq->send_rrq = send_rrq; 909 else 910 rrq->send_rrq = 0; 911 rrq->xritag = xritag; 912 rrq->rrq_stop_time = jiffies + 913 msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); 914 rrq->ndlp = ndlp; 915 rrq->nlp_DID = ndlp->nlp_DID; 916 rrq->vport = ndlp->vport; 917 rrq->rxid = rxid; 918 spin_lock_irqsave(&phba->hbalock, iflags); 919 empty = list_empty(&phba->active_rrq_list); 920 list_add_tail(&rrq->list, &phba->active_rrq_list); 921 phba->hba_flag |= HBA_RRQ_ACTIVE; 922 if (empty) 923 lpfc_worker_wake_up(phba); 924 spin_unlock_irqrestore(&phba->hbalock, iflags); 925 return 0; 926 out: 927 spin_unlock_irqrestore(&phba->hbalock, iflags); 928 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 929 "2921 Can't set rrq active xri:0x%x rxid:0x%x" 930 " DID:0x%x Send:%d\n", 931 xritag, rxid, ndlp->nlp_DID, send_rrq); 932 return -EINVAL; 933 } 934 935 /** 936 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool 937 * @phba: Pointer to HBA context object. 938 * @piocb: Pointer to the iocbq. 939 * 940 * This function is called with hbalock held. This function 941 * gets a new driver sglq object from the sglq list. If the 942 * list is not empty then it is successful, it returns pointer to the newly 943 * allocated sglq object else it returns NULL. 944 **/ 945 static struct lpfc_sglq * 946 __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) 947 { 948 struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; 949 struct lpfc_sglq *sglq = NULL; 950 struct lpfc_sglq *start_sglq = NULL; 951 struct lpfc_scsi_buf *lpfc_cmd; 952 struct lpfc_nodelist *ndlp; 953 int found = 0; 954 955 if (piocbq->iocb_flag & LPFC_IO_FCP) { 956 lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1; 957 ndlp = lpfc_cmd->rdata->pnode; 958 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && 959 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) 960 ndlp = piocbq->context_un.ndlp; 961 else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) 962 ndlp = piocbq->context_un.ndlp; 963 else 964 ndlp = piocbq->context1; 965 966 list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); 967 start_sglq = sglq; 968 while (!found) { 969 if (!sglq) 970 return NULL; 971 if (lpfc_test_rrq_active(phba, ndlp, sglq->sli4_lxritag)) { 972 /* This xri has an rrq outstanding for this DID. 973 * put it back in the list and get another xri. 974 */ 975 list_add_tail(&sglq->list, lpfc_sgl_list); 976 sglq = NULL; 977 list_remove_head(lpfc_sgl_list, sglq, 978 struct lpfc_sglq, list); 979 if (sglq == start_sglq) { 980 sglq = NULL; 981 break; 982 } else 983 continue; 984 } 985 sglq->ndlp = ndlp; 986 found = 1; 987 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq; 988 sglq->state = SGL_ALLOCATED; 989 } 990 return sglq; 991 } 992 993 /** 994 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool 995 * @phba: Pointer to HBA context object. 996 * 997 * This function is called with no lock held. This function 998 * allocates a new driver iocb object from the iocb pool. If the 999 * allocation is successful, it returns pointer to the newly 1000 * allocated iocb object else it returns NULL. 1001 **/ 1002 struct lpfc_iocbq * 1003 lpfc_sli_get_iocbq(struct lpfc_hba *phba) 1004 { 1005 struct lpfc_iocbq * iocbq = NULL; 1006 unsigned long iflags; 1007 1008 spin_lock_irqsave(&phba->hbalock, iflags); 1009 iocbq = __lpfc_sli_get_iocbq(phba); 1010 spin_unlock_irqrestore(&phba->hbalock, iflags); 1011 return iocbq; 1012 } 1013 1014 /** 1015 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool 1016 * @phba: Pointer to HBA context object. 1017 * @iocbq: Pointer to driver iocb object. 1018 * 1019 * This function is called with hbalock held to release driver 1020 * iocb object to the iocb pool. The iotag in the iocb object 1021 * does not change for each use of the iocb object. This function 1022 * clears all other fields of the iocb object when it is freed. 1023 * The sqlq structure that holds the xritag and phys and virtual 1024 * mappings for the scatter gather list is retrieved from the 1025 * active array of sglq. The get of the sglq pointer also clears 1026 * the entry in the array. If the status of the IO indiactes that 1027 * this IO was aborted then the sglq entry it put on the 1028 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the 1029 * IO has good status or fails for any other reason then the sglq 1030 * entry is added to the free list (lpfc_sgl_list). 1031 **/ 1032 static void 1033 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1034 { 1035 struct lpfc_sglq *sglq; 1036 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1037 unsigned long iflag = 0; 1038 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 1039 1040 if (iocbq->sli4_xritag == NO_XRI) 1041 sglq = NULL; 1042 else 1043 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag); 1044 1045 1046 if (sglq) { 1047 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) && 1048 (sglq->state != SGL_XRI_ABORTED)) { 1049 spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, 1050 iflag); 1051 list_add(&sglq->list, 1052 &phba->sli4_hba.lpfc_abts_els_sgl_list); 1053 spin_unlock_irqrestore( 1054 &phba->sli4_hba.abts_sgl_list_lock, iflag); 1055 } else { 1056 sglq->state = SGL_FREED; 1057 sglq->ndlp = NULL; 1058 list_add_tail(&sglq->list, 1059 &phba->sli4_hba.lpfc_sgl_list); 1060 1061 /* Check if TXQ queue needs to be serviced */ 1062 if (!list_empty(&pring->txq)) 1063 lpfc_worker_wake_up(phba); 1064 } 1065 } 1066 1067 1068 /* 1069 * Clean all volatile data fields, preserve iotag and node struct. 1070 */ 1071 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1072 iocbq->sli4_lxritag = NO_XRI; 1073 iocbq->sli4_xritag = NO_XRI; 1074 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1075 } 1076 1077 1078 /** 1079 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool 1080 * @phba: Pointer to HBA context object. 1081 * @iocbq: Pointer to driver iocb object. 1082 * 1083 * This function is called with hbalock held to release driver 1084 * iocb object to the iocb pool. The iotag in the iocb object 1085 * does not change for each use of the iocb object. This function 1086 * clears all other fields of the iocb object when it is freed. 1087 **/ 1088 static void 1089 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1090 { 1091 size_t start_clean = offsetof(struct lpfc_iocbq, iocb); 1092 1093 1094 /* 1095 * Clean all volatile data fields, preserve iotag and node struct. 1096 */ 1097 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); 1098 iocbq->sli4_xritag = NO_XRI; 1099 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); 1100 } 1101 1102 /** 1103 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool 1104 * @phba: Pointer to HBA context object. 1105 * @iocbq: Pointer to driver iocb object. 1106 * 1107 * This function is called with hbalock held to release driver 1108 * iocb object to the iocb pool. The iotag in the iocb object 1109 * does not change for each use of the iocb object. This function 1110 * clears all other fields of the iocb object when it is freed. 1111 **/ 1112 static void 1113 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1114 { 1115 phba->__lpfc_sli_release_iocbq(phba, iocbq); 1116 phba->iocb_cnt--; 1117 } 1118 1119 /** 1120 * lpfc_sli_release_iocbq - Release iocb to the iocb pool 1121 * @phba: Pointer to HBA context object. 1122 * @iocbq: Pointer to driver iocb object. 1123 * 1124 * This function is called with no lock held to release the iocb to 1125 * iocb pool. 1126 **/ 1127 void 1128 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1129 { 1130 unsigned long iflags; 1131 1132 /* 1133 * Clean all volatile data fields, preserve iotag and node struct. 1134 */ 1135 spin_lock_irqsave(&phba->hbalock, iflags); 1136 __lpfc_sli_release_iocbq(phba, iocbq); 1137 spin_unlock_irqrestore(&phba->hbalock, iflags); 1138 } 1139 1140 /** 1141 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list. 1142 * @phba: Pointer to HBA context object. 1143 * @iocblist: List of IOCBs. 1144 * @ulpstatus: ULP status in IOCB command field. 1145 * @ulpWord4: ULP word-4 in IOCB command field. 1146 * 1147 * This function is called with a list of IOCBs to cancel. It cancels the IOCB 1148 * on the list by invoking the complete callback function associated with the 1149 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond 1150 * fields. 1151 **/ 1152 void 1153 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, 1154 uint32_t ulpstatus, uint32_t ulpWord4) 1155 { 1156 struct lpfc_iocbq *piocb; 1157 1158 while (!list_empty(iocblist)) { 1159 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); 1160 if (!piocb->iocb_cmpl) 1161 lpfc_sli_release_iocbq(phba, piocb); 1162 else { 1163 piocb->iocb.ulpStatus = ulpstatus; 1164 piocb->iocb.un.ulpWord[4] = ulpWord4; 1165 (piocb->iocb_cmpl) (phba, piocb, piocb); 1166 } 1167 } 1168 return; 1169 } 1170 1171 /** 1172 * lpfc_sli_iocb_cmd_type - Get the iocb type 1173 * @iocb_cmnd: iocb command code. 1174 * 1175 * This function is called by ring event handler function to get the iocb type. 1176 * This function translates the iocb command to an iocb command type used to 1177 * decide the final disposition of each completed IOCB. 1178 * The function returns 1179 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb 1180 * LPFC_SOL_IOCB if it is a solicited iocb completion 1181 * LPFC_ABORT_IOCB if it is an abort iocb 1182 * LPFC_UNSOL_IOCB if it is an unsolicited iocb 1183 * 1184 * The caller is not required to hold any lock. 1185 **/ 1186 static lpfc_iocb_type 1187 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) 1188 { 1189 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB; 1190 1191 if (iocb_cmnd > CMD_MAX_IOCB_CMD) 1192 return 0; 1193 1194 switch (iocb_cmnd) { 1195 case CMD_XMIT_SEQUENCE_CR: 1196 case CMD_XMIT_SEQUENCE_CX: 1197 case CMD_XMIT_BCAST_CN: 1198 case CMD_XMIT_BCAST_CX: 1199 case CMD_ELS_REQUEST_CR: 1200 case CMD_ELS_REQUEST_CX: 1201 case CMD_CREATE_XRI_CR: 1202 case CMD_CREATE_XRI_CX: 1203 case CMD_GET_RPI_CN: 1204 case CMD_XMIT_ELS_RSP_CX: 1205 case CMD_GET_RPI_CR: 1206 case CMD_FCP_IWRITE_CR: 1207 case CMD_FCP_IWRITE_CX: 1208 case CMD_FCP_IREAD_CR: 1209 case CMD_FCP_IREAD_CX: 1210 case CMD_FCP_ICMND_CR: 1211 case CMD_FCP_ICMND_CX: 1212 case CMD_FCP_TSEND_CX: 1213 case CMD_FCP_TRSP_CX: 1214 case CMD_FCP_TRECEIVE_CX: 1215 case CMD_FCP_AUTO_TRSP_CX: 1216 case CMD_ADAPTER_MSG: 1217 case CMD_ADAPTER_DUMP: 1218 case CMD_XMIT_SEQUENCE64_CR: 1219 case CMD_XMIT_SEQUENCE64_CX: 1220 case CMD_XMIT_BCAST64_CN: 1221 case CMD_XMIT_BCAST64_CX: 1222 case CMD_ELS_REQUEST64_CR: 1223 case CMD_ELS_REQUEST64_CX: 1224 case CMD_FCP_IWRITE64_CR: 1225 case CMD_FCP_IWRITE64_CX: 1226 case CMD_FCP_IREAD64_CR: 1227 case CMD_FCP_IREAD64_CX: 1228 case CMD_FCP_ICMND64_CR: 1229 case CMD_FCP_ICMND64_CX: 1230 case CMD_FCP_TSEND64_CX: 1231 case CMD_FCP_TRSP64_CX: 1232 case CMD_FCP_TRECEIVE64_CX: 1233 case CMD_GEN_REQUEST64_CR: 1234 case CMD_GEN_REQUEST64_CX: 1235 case CMD_XMIT_ELS_RSP64_CX: 1236 case DSSCMD_IWRITE64_CR: 1237 case DSSCMD_IWRITE64_CX: 1238 case DSSCMD_IREAD64_CR: 1239 case DSSCMD_IREAD64_CX: 1240 type = LPFC_SOL_IOCB; 1241 break; 1242 case CMD_ABORT_XRI_CN: 1243 case CMD_ABORT_XRI_CX: 1244 case CMD_CLOSE_XRI_CN: 1245 case CMD_CLOSE_XRI_CX: 1246 case CMD_XRI_ABORTED_CX: 1247 case CMD_ABORT_MXRI64_CN: 1248 case CMD_XMIT_BLS_RSP64_CX: 1249 type = LPFC_ABORT_IOCB; 1250 break; 1251 case CMD_RCV_SEQUENCE_CX: 1252 case CMD_RCV_ELS_REQ_CX: 1253 case CMD_RCV_SEQUENCE64_CX: 1254 case CMD_RCV_ELS_REQ64_CX: 1255 case CMD_ASYNC_STATUS: 1256 case CMD_IOCB_RCV_SEQ64_CX: 1257 case CMD_IOCB_RCV_ELS64_CX: 1258 case CMD_IOCB_RCV_CONT64_CX: 1259 case CMD_IOCB_RET_XRI64_CX: 1260 type = LPFC_UNSOL_IOCB; 1261 break; 1262 case CMD_IOCB_XMIT_MSEQ64_CR: 1263 case CMD_IOCB_XMIT_MSEQ64_CX: 1264 case CMD_IOCB_RCV_SEQ_LIST64_CX: 1265 case CMD_IOCB_RCV_ELS_LIST64_CX: 1266 case CMD_IOCB_CLOSE_EXTENDED_CN: 1267 case CMD_IOCB_ABORT_EXTENDED_CN: 1268 case CMD_IOCB_RET_HBQE64_CN: 1269 case CMD_IOCB_FCP_IBIDIR64_CR: 1270 case CMD_IOCB_FCP_IBIDIR64_CX: 1271 case CMD_IOCB_FCP_ITASKMGT64_CX: 1272 case CMD_IOCB_LOGENTRY_CN: 1273 case CMD_IOCB_LOGENTRY_ASYNC_CN: 1274 printk("%s - Unhandled SLI-3 Command x%x\n", 1275 __func__, iocb_cmnd); 1276 type = LPFC_UNKNOWN_IOCB; 1277 break; 1278 default: 1279 type = LPFC_UNKNOWN_IOCB; 1280 break; 1281 } 1282 1283 return type; 1284 } 1285 1286 /** 1287 * lpfc_sli_ring_map - Issue config_ring mbox for all rings 1288 * @phba: Pointer to HBA context object. 1289 * 1290 * This function is called from SLI initialization code 1291 * to configure every ring of the HBA's SLI interface. The 1292 * caller is not required to hold any lock. This function issues 1293 * a config_ring mailbox command for each ring. 1294 * This function returns zero if successful else returns a negative 1295 * error code. 1296 **/ 1297 static int 1298 lpfc_sli_ring_map(struct lpfc_hba *phba) 1299 { 1300 struct lpfc_sli *psli = &phba->sli; 1301 LPFC_MBOXQ_t *pmb; 1302 MAILBOX_t *pmbox; 1303 int i, rc, ret = 0; 1304 1305 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1306 if (!pmb) 1307 return -ENOMEM; 1308 pmbox = &pmb->u.mb; 1309 phba->link_state = LPFC_INIT_MBX_CMDS; 1310 for (i = 0; i < psli->num_rings; i++) { 1311 lpfc_config_ring(phba, i, pmb); 1312 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 1313 if (rc != MBX_SUCCESS) { 1314 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1315 "0446 Adapter failed to init (%d), " 1316 "mbxCmd x%x CFG_RING, mbxStatus x%x, " 1317 "ring %d\n", 1318 rc, pmbox->mbxCommand, 1319 pmbox->mbxStatus, i); 1320 phba->link_state = LPFC_HBA_ERROR; 1321 ret = -ENXIO; 1322 break; 1323 } 1324 } 1325 mempool_free(pmb, phba->mbox_mem_pool); 1326 return ret; 1327 } 1328 1329 /** 1330 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq 1331 * @phba: Pointer to HBA context object. 1332 * @pring: Pointer to driver SLI ring object. 1333 * @piocb: Pointer to the driver iocb object. 1334 * 1335 * This function is called with hbalock held. The function adds the 1336 * new iocb to txcmplq of the given ring. This function always returns 1337 * 0. If this function is called for ELS ring, this function checks if 1338 * there is a vport associated with the ELS command. This function also 1339 * starts els_tmofunc timer if this is an ELS command. 1340 **/ 1341 static int 1342 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1343 struct lpfc_iocbq *piocb) 1344 { 1345 list_add_tail(&piocb->list, &pring->txcmplq); 1346 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ; 1347 1348 if ((unlikely(pring->ringno == LPFC_ELS_RING)) && 1349 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 1350 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) && 1351 (!(piocb->vport->load_flag & FC_UNLOADING))) { 1352 if (!piocb->vport) 1353 BUG(); 1354 else 1355 mod_timer(&piocb->vport->els_tmofunc, 1356 jiffies + 1357 msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); 1358 } 1359 1360 1361 return 0; 1362 } 1363 1364 /** 1365 * lpfc_sli_ringtx_get - Get first element of the txq 1366 * @phba: Pointer to HBA context object. 1367 * @pring: Pointer to driver SLI ring object. 1368 * 1369 * This function is called with hbalock held to get next 1370 * iocb in txq of the given ring. If there is any iocb in 1371 * the txq, the function returns first iocb in the list after 1372 * removing the iocb from the list, else it returns NULL. 1373 **/ 1374 struct lpfc_iocbq * 1375 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1376 { 1377 struct lpfc_iocbq *cmd_iocb; 1378 1379 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list); 1380 return cmd_iocb; 1381 } 1382 1383 /** 1384 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring 1385 * @phba: Pointer to HBA context object. 1386 * @pring: Pointer to driver SLI ring object. 1387 * 1388 * This function is called with hbalock held and the caller must post the 1389 * iocb without releasing the lock. If the caller releases the lock, 1390 * iocb slot returned by the function is not guaranteed to be available. 1391 * The function returns pointer to the next available iocb slot if there 1392 * is available slot in the ring, else it returns NULL. 1393 * If the get index of the ring is ahead of the put index, the function 1394 * will post an error attention event to the worker thread to take the 1395 * HBA to offline state. 1396 **/ 1397 static IOCB_t * 1398 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1399 { 1400 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 1401 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb; 1402 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) && 1403 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx)) 1404 pring->sli.sli3.next_cmdidx = 0; 1405 1406 if (unlikely(pring->sli.sli3.local_getidx == 1407 pring->sli.sli3.next_cmdidx)) { 1408 1409 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 1410 1411 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) { 1412 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 1413 "0315 Ring %d issue: portCmdGet %d " 1414 "is bigger than cmd ring %d\n", 1415 pring->ringno, 1416 pring->sli.sli3.local_getidx, 1417 max_cmd_idx); 1418 1419 phba->link_state = LPFC_HBA_ERROR; 1420 /* 1421 * All error attention handlers are posted to 1422 * worker thread 1423 */ 1424 phba->work_ha |= HA_ERATT; 1425 phba->work_hs = HS_FFER3; 1426 1427 lpfc_worker_wake_up(phba); 1428 1429 return NULL; 1430 } 1431 1432 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx) 1433 return NULL; 1434 } 1435 1436 return lpfc_cmd_iocb(phba, pring); 1437 } 1438 1439 /** 1440 * lpfc_sli_next_iotag - Get an iotag for the iocb 1441 * @phba: Pointer to HBA context object. 1442 * @iocbq: Pointer to driver iocb object. 1443 * 1444 * This function gets an iotag for the iocb. If there is no unused iotag and 1445 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup 1446 * array and assigns a new iotag. 1447 * The function returns the allocated iotag if successful, else returns zero. 1448 * Zero is not a valid iotag. 1449 * The caller is not required to hold any lock. 1450 **/ 1451 uint16_t 1452 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) 1453 { 1454 struct lpfc_iocbq **new_arr; 1455 struct lpfc_iocbq **old_arr; 1456 size_t new_len; 1457 struct lpfc_sli *psli = &phba->sli; 1458 uint16_t iotag; 1459 1460 spin_lock_irq(&phba->hbalock); 1461 iotag = psli->last_iotag; 1462 if(++iotag < psli->iocbq_lookup_len) { 1463 psli->last_iotag = iotag; 1464 psli->iocbq_lookup[iotag] = iocbq; 1465 spin_unlock_irq(&phba->hbalock); 1466 iocbq->iotag = iotag; 1467 return iotag; 1468 } else if (psli->iocbq_lookup_len < (0xffff 1469 - LPFC_IOCBQ_LOOKUP_INCREMENT)) { 1470 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT; 1471 spin_unlock_irq(&phba->hbalock); 1472 new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *), 1473 GFP_KERNEL); 1474 if (new_arr) { 1475 spin_lock_irq(&phba->hbalock); 1476 old_arr = psli->iocbq_lookup; 1477 if (new_len <= psli->iocbq_lookup_len) { 1478 /* highly unprobable case */ 1479 kfree(new_arr); 1480 iotag = psli->last_iotag; 1481 if(++iotag < psli->iocbq_lookup_len) { 1482 psli->last_iotag = iotag; 1483 psli->iocbq_lookup[iotag] = iocbq; 1484 spin_unlock_irq(&phba->hbalock); 1485 iocbq->iotag = iotag; 1486 return iotag; 1487 } 1488 spin_unlock_irq(&phba->hbalock); 1489 return 0; 1490 } 1491 if (psli->iocbq_lookup) 1492 memcpy(new_arr, old_arr, 1493 ((psli->last_iotag + 1) * 1494 sizeof (struct lpfc_iocbq *))); 1495 psli->iocbq_lookup = new_arr; 1496 psli->iocbq_lookup_len = new_len; 1497 psli->last_iotag = iotag; 1498 psli->iocbq_lookup[iotag] = iocbq; 1499 spin_unlock_irq(&phba->hbalock); 1500 iocbq->iotag = iotag; 1501 kfree(old_arr); 1502 return iotag; 1503 } 1504 } else 1505 spin_unlock_irq(&phba->hbalock); 1506 1507 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 1508 "0318 Failed to allocate IOTAG.last IOTAG is %d\n", 1509 psli->last_iotag); 1510 1511 return 0; 1512 } 1513 1514 /** 1515 * lpfc_sli_submit_iocb - Submit an iocb to the firmware 1516 * @phba: Pointer to HBA context object. 1517 * @pring: Pointer to driver SLI ring object. 1518 * @iocb: Pointer to iocb slot in the ring. 1519 * @nextiocb: Pointer to driver iocb object which need to be 1520 * posted to firmware. 1521 * 1522 * This function is called with hbalock held to post a new iocb to 1523 * the firmware. This function copies the new iocb to ring iocb slot and 1524 * updates the ring pointers. It adds the new iocb to txcmplq if there is 1525 * a completion call back for this iocb else the function will free the 1526 * iocb object. 1527 **/ 1528 static void 1529 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 1530 IOCB_t *iocb, struct lpfc_iocbq *nextiocb) 1531 { 1532 /* 1533 * Set up an iotag 1534 */ 1535 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0; 1536 1537 1538 if (pring->ringno == LPFC_ELS_RING) { 1539 lpfc_debugfs_slow_ring_trc(phba, 1540 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x", 1541 *(((uint32_t *) &nextiocb->iocb) + 4), 1542 *(((uint32_t *) &nextiocb->iocb) + 6), 1543 *(((uint32_t *) &nextiocb->iocb) + 7)); 1544 } 1545 1546 /* 1547 * Issue iocb command to adapter 1548 */ 1549 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size); 1550 wmb(); 1551 pring->stats.iocb_cmd++; 1552 1553 /* 1554 * If there is no completion routine to call, we can release the 1555 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF, 1556 * that have no rsp ring completion, iocb_cmpl MUST be NULL. 1557 */ 1558 if (nextiocb->iocb_cmpl) 1559 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb); 1560 else 1561 __lpfc_sli_release_iocbq(phba, nextiocb); 1562 1563 /* 1564 * Let the HBA know what IOCB slot will be the next one the 1565 * driver will put a command into. 1566 */ 1567 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx; 1568 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx); 1569 } 1570 1571 /** 1572 * lpfc_sli_update_full_ring - Update the chip attention register 1573 * @phba: Pointer to HBA context object. 1574 * @pring: Pointer to driver SLI ring object. 1575 * 1576 * The caller is not required to hold any lock for calling this function. 1577 * This function updates the chip attention bits for the ring to inform firmware 1578 * that there are pending work to be done for this ring and requests an 1579 * interrupt when there is space available in the ring. This function is 1580 * called when the driver is unable to post more iocbs to the ring due 1581 * to unavailability of space in the ring. 1582 **/ 1583 static void 1584 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1585 { 1586 int ringno = pring->ringno; 1587 1588 pring->flag |= LPFC_CALL_RING_AVAILABLE; 1589 1590 wmb(); 1591 1592 /* 1593 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register. 1594 * The HBA will tell us when an IOCB entry is available. 1595 */ 1596 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr); 1597 readl(phba->CAregaddr); /* flush */ 1598 1599 pring->stats.iocb_cmd_full++; 1600 } 1601 1602 /** 1603 * lpfc_sli_update_ring - Update chip attention register 1604 * @phba: Pointer to HBA context object. 1605 * @pring: Pointer to driver SLI ring object. 1606 * 1607 * This function updates the chip attention register bit for the 1608 * given ring to inform HBA that there is more work to be done 1609 * in this ring. The caller is not required to hold any lock. 1610 **/ 1611 static void 1612 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1613 { 1614 int ringno = pring->ringno; 1615 1616 /* 1617 * Tell the HBA that there is work to do in this ring. 1618 */ 1619 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) { 1620 wmb(); 1621 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr); 1622 readl(phba->CAregaddr); /* flush */ 1623 } 1624 } 1625 1626 /** 1627 * lpfc_sli_resume_iocb - Process iocbs in the txq 1628 * @phba: Pointer to HBA context object. 1629 * @pring: Pointer to driver SLI ring object. 1630 * 1631 * This function is called with hbalock held to post pending iocbs 1632 * in the txq to the firmware. This function is called when driver 1633 * detects space available in the ring. 1634 **/ 1635 static void 1636 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 1637 { 1638 IOCB_t *iocb; 1639 struct lpfc_iocbq *nextiocb; 1640 1641 /* 1642 * Check to see if: 1643 * (a) there is anything on the txq to send 1644 * (b) link is up 1645 * (c) link attention events can be processed (fcp ring only) 1646 * (d) IOCB processing is not blocked by the outstanding mbox command. 1647 */ 1648 1649 if (lpfc_is_link_up(phba) && 1650 (!list_empty(&pring->txq)) && 1651 (pring->ringno != phba->sli.fcp_ring || 1652 phba->sli.sli_flag & LPFC_PROCESS_LA)) { 1653 1654 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 1655 (nextiocb = lpfc_sli_ringtx_get(phba, pring))) 1656 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 1657 1658 if (iocb) 1659 lpfc_sli_update_ring(phba, pring); 1660 else 1661 lpfc_sli_update_full_ring(phba, pring); 1662 } 1663 1664 return; 1665 } 1666 1667 /** 1668 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ 1669 * @phba: Pointer to HBA context object. 1670 * @hbqno: HBQ number. 1671 * 1672 * This function is called with hbalock held to get the next 1673 * available slot for the given HBQ. If there is free slot 1674 * available for the HBQ it will return pointer to the next available 1675 * HBQ entry else it will return NULL. 1676 **/ 1677 static struct lpfc_hbq_entry * 1678 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno) 1679 { 1680 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1681 1682 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx && 1683 ++hbqp->next_hbqPutIdx >= hbqp->entry_count) 1684 hbqp->next_hbqPutIdx = 0; 1685 1686 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) { 1687 uint32_t raw_index = phba->hbq_get[hbqno]; 1688 uint32_t getidx = le32_to_cpu(raw_index); 1689 1690 hbqp->local_hbqGetIdx = getidx; 1691 1692 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) { 1693 lpfc_printf_log(phba, KERN_ERR, 1694 LOG_SLI | LOG_VPORT, 1695 "1802 HBQ %d: local_hbqGetIdx " 1696 "%u is > than hbqp->entry_count %u\n", 1697 hbqno, hbqp->local_hbqGetIdx, 1698 hbqp->entry_count); 1699 1700 phba->link_state = LPFC_HBA_ERROR; 1701 return NULL; 1702 } 1703 1704 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx) 1705 return NULL; 1706 } 1707 1708 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt + 1709 hbqp->hbqPutIdx; 1710 } 1711 1712 /** 1713 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers 1714 * @phba: Pointer to HBA context object. 1715 * 1716 * This function is called with no lock held to free all the 1717 * hbq buffers while uninitializing the SLI interface. It also 1718 * frees the HBQ buffers returned by the firmware but not yet 1719 * processed by the upper layers. 1720 **/ 1721 void 1722 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) 1723 { 1724 struct lpfc_dmabuf *dmabuf, *next_dmabuf; 1725 struct hbq_dmabuf *hbq_buf; 1726 unsigned long flags; 1727 int i, hbq_count; 1728 uint32_t hbqno; 1729 1730 hbq_count = lpfc_sli_hbq_count(); 1731 /* Return all memory used by all HBQs */ 1732 spin_lock_irqsave(&phba->hbalock, flags); 1733 for (i = 0; i < hbq_count; ++i) { 1734 list_for_each_entry_safe(dmabuf, next_dmabuf, 1735 &phba->hbqs[i].hbq_buffer_list, list) { 1736 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1737 list_del(&hbq_buf->dbuf.list); 1738 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf); 1739 } 1740 phba->hbqs[i].buffer_count = 0; 1741 } 1742 /* Return all HBQ buffer that are in-fly */ 1743 list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, 1744 list) { 1745 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); 1746 list_del(&hbq_buf->dbuf.list); 1747 if (hbq_buf->tag == -1) { 1748 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1749 (phba, hbq_buf); 1750 } else { 1751 hbqno = hbq_buf->tag >> 16; 1752 if (hbqno >= LPFC_MAX_HBQS) 1753 (phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer) 1754 (phba, hbq_buf); 1755 else 1756 (phba->hbqs[hbqno].hbq_free_buffer)(phba, 1757 hbq_buf); 1758 } 1759 } 1760 1761 /* Mark the HBQs not in use */ 1762 phba->hbq_in_use = 0; 1763 spin_unlock_irqrestore(&phba->hbalock, flags); 1764 } 1765 1766 /** 1767 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware 1768 * @phba: Pointer to HBA context object. 1769 * @hbqno: HBQ number. 1770 * @hbq_buf: Pointer to HBQ buffer. 1771 * 1772 * This function is called with the hbalock held to post a 1773 * hbq buffer to the firmware. If the function finds an empty 1774 * slot in the HBQ, it will post the buffer. The function will return 1775 * pointer to the hbq entry if it successfully post the buffer 1776 * else it will return NULL. 1777 **/ 1778 static int 1779 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, 1780 struct hbq_dmabuf *hbq_buf) 1781 { 1782 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); 1783 } 1784 1785 /** 1786 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware 1787 * @phba: Pointer to HBA context object. 1788 * @hbqno: HBQ number. 1789 * @hbq_buf: Pointer to HBQ buffer. 1790 * 1791 * This function is called with the hbalock held to post a hbq buffer to the 1792 * firmware. If the function finds an empty slot in the HBQ, it will post the 1793 * buffer and place it on the hbq_buffer_list. The function will return zero if 1794 * it successfully post the buffer else it will return an error. 1795 **/ 1796 static int 1797 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, 1798 struct hbq_dmabuf *hbq_buf) 1799 { 1800 struct lpfc_hbq_entry *hbqe; 1801 dma_addr_t physaddr = hbq_buf->dbuf.phys; 1802 1803 /* Get next HBQ entry slot to use */ 1804 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno); 1805 if (hbqe) { 1806 struct hbq_s *hbqp = &phba->hbqs[hbqno]; 1807 1808 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr)); 1809 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr)); 1810 hbqe->bde.tus.f.bdeSize = hbq_buf->size; 1811 hbqe->bde.tus.f.bdeFlags = 0; 1812 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w); 1813 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag); 1814 /* Sync SLIM */ 1815 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx; 1816 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno); 1817 /* flush */ 1818 readl(phba->hbq_put + hbqno); 1819 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); 1820 return 0; 1821 } else 1822 return -ENOMEM; 1823 } 1824 1825 /** 1826 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware 1827 * @phba: Pointer to HBA context object. 1828 * @hbqno: HBQ number. 1829 * @hbq_buf: Pointer to HBQ buffer. 1830 * 1831 * This function is called with the hbalock held to post an RQE to the SLI4 1832 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to 1833 * the hbq_buffer_list and return zero, otherwise it will return an error. 1834 **/ 1835 static int 1836 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, 1837 struct hbq_dmabuf *hbq_buf) 1838 { 1839 int rc; 1840 struct lpfc_rqe hrqe; 1841 struct lpfc_rqe drqe; 1842 1843 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); 1844 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); 1845 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); 1846 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); 1847 rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 1848 &hrqe, &drqe); 1849 if (rc < 0) 1850 return rc; 1851 hbq_buf->tag = rc; 1852 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); 1853 return 0; 1854 } 1855 1856 /* HBQ for ELS and CT traffic. */ 1857 static struct lpfc_hbq_init lpfc_els_hbq = { 1858 .rn = 1, 1859 .entry_count = 256, 1860 .mask_count = 0, 1861 .profile = 0, 1862 .ring_mask = (1 << LPFC_ELS_RING), 1863 .buffer_count = 0, 1864 .init_count = 40, 1865 .add_count = 40, 1866 }; 1867 1868 /* HBQ for the extra ring if needed */ 1869 static struct lpfc_hbq_init lpfc_extra_hbq = { 1870 .rn = 1, 1871 .entry_count = 200, 1872 .mask_count = 0, 1873 .profile = 0, 1874 .ring_mask = (1 << LPFC_EXTRA_RING), 1875 .buffer_count = 0, 1876 .init_count = 0, 1877 .add_count = 5, 1878 }; 1879 1880 /* Array of HBQs */ 1881 struct lpfc_hbq_init *lpfc_hbq_defs[] = { 1882 &lpfc_els_hbq, 1883 &lpfc_extra_hbq, 1884 }; 1885 1886 /** 1887 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ 1888 * @phba: Pointer to HBA context object. 1889 * @hbqno: HBQ number. 1890 * @count: Number of HBQ buffers to be posted. 1891 * 1892 * This function is called with no lock held to post more hbq buffers to the 1893 * given HBQ. The function returns the number of HBQ buffers successfully 1894 * posted. 1895 **/ 1896 static int 1897 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) 1898 { 1899 uint32_t i, posted = 0; 1900 unsigned long flags; 1901 struct hbq_dmabuf *hbq_buffer; 1902 LIST_HEAD(hbq_buf_list); 1903 if (!phba->hbqs[hbqno].hbq_alloc_buffer) 1904 return 0; 1905 1906 if ((phba->hbqs[hbqno].buffer_count + count) > 1907 lpfc_hbq_defs[hbqno]->entry_count) 1908 count = lpfc_hbq_defs[hbqno]->entry_count - 1909 phba->hbqs[hbqno].buffer_count; 1910 if (!count) 1911 return 0; 1912 /* Allocate HBQ entries */ 1913 for (i = 0; i < count; i++) { 1914 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba); 1915 if (!hbq_buffer) 1916 break; 1917 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list); 1918 } 1919 /* Check whether HBQ is still in use */ 1920 spin_lock_irqsave(&phba->hbalock, flags); 1921 if (!phba->hbq_in_use) 1922 goto err; 1923 while (!list_empty(&hbq_buf_list)) { 1924 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1925 dbuf.list); 1926 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | 1927 (hbqno << 16)); 1928 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { 1929 phba->hbqs[hbqno].buffer_count++; 1930 posted++; 1931 } else 1932 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1933 } 1934 spin_unlock_irqrestore(&phba->hbalock, flags); 1935 return posted; 1936 err: 1937 spin_unlock_irqrestore(&phba->hbalock, flags); 1938 while (!list_empty(&hbq_buf_list)) { 1939 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf, 1940 dbuf.list); 1941 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 1942 } 1943 return 0; 1944 } 1945 1946 /** 1947 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware 1948 * @phba: Pointer to HBA context object. 1949 * @qno: HBQ number. 1950 * 1951 * This function posts more buffers to the HBQ. This function 1952 * is called with no lock held. The function returns the number of HBQ entries 1953 * successfully allocated. 1954 **/ 1955 int 1956 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno) 1957 { 1958 if (phba->sli_rev == LPFC_SLI_REV4) 1959 return 0; 1960 else 1961 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1962 lpfc_hbq_defs[qno]->add_count); 1963 } 1964 1965 /** 1966 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ 1967 * @phba: Pointer to HBA context object. 1968 * @qno: HBQ queue number. 1969 * 1970 * This function is called from SLI initialization code path with 1971 * no lock held to post initial HBQ buffers to firmware. The 1972 * function returns the number of HBQ entries successfully allocated. 1973 **/ 1974 static int 1975 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) 1976 { 1977 if (phba->sli_rev == LPFC_SLI_REV4) 1978 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1979 lpfc_hbq_defs[qno]->entry_count); 1980 else 1981 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1982 lpfc_hbq_defs[qno]->init_count); 1983 } 1984 1985 /** 1986 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list 1987 * @phba: Pointer to HBA context object. 1988 * @hbqno: HBQ number. 1989 * 1990 * This function removes the first hbq buffer on an hbq list and returns a 1991 * pointer to that buffer. If it finds no buffers on the list it returns NULL. 1992 **/ 1993 static struct hbq_dmabuf * 1994 lpfc_sli_hbqbuf_get(struct list_head *rb_list) 1995 { 1996 struct lpfc_dmabuf *d_buf; 1997 1998 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); 1999 if (!d_buf) 2000 return NULL; 2001 return container_of(d_buf, struct hbq_dmabuf, dbuf); 2002 } 2003 2004 /** 2005 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag 2006 * @phba: Pointer to HBA context object. 2007 * @tag: Tag of the hbq buffer. 2008 * 2009 * This function is called with hbalock held. This function searches 2010 * for the hbq buffer associated with the given tag in the hbq buffer 2011 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise 2012 * it returns NULL. 2013 **/ 2014 static struct hbq_dmabuf * 2015 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) 2016 { 2017 struct lpfc_dmabuf *d_buf; 2018 struct hbq_dmabuf *hbq_buf; 2019 uint32_t hbqno; 2020 2021 hbqno = tag >> 16; 2022 if (hbqno >= LPFC_MAX_HBQS) 2023 return NULL; 2024 2025 spin_lock_irq(&phba->hbalock); 2026 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { 2027 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 2028 if (hbq_buf->tag == tag) { 2029 spin_unlock_irq(&phba->hbalock); 2030 return hbq_buf; 2031 } 2032 } 2033 spin_unlock_irq(&phba->hbalock); 2034 lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, 2035 "1803 Bad hbq tag. Data: x%x x%x\n", 2036 tag, phba->hbqs[tag >> 16].buffer_count); 2037 return NULL; 2038 } 2039 2040 /** 2041 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware 2042 * @phba: Pointer to HBA context object. 2043 * @hbq_buffer: Pointer to HBQ buffer. 2044 * 2045 * This function is called with hbalock. This function gives back 2046 * the hbq buffer to firmware. If the HBQ does not have space to 2047 * post the buffer, it will free the buffer. 2048 **/ 2049 void 2050 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) 2051 { 2052 uint32_t hbqno; 2053 2054 if (hbq_buffer) { 2055 hbqno = hbq_buffer->tag >> 16; 2056 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) 2057 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); 2058 } 2059 } 2060 2061 /** 2062 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox 2063 * @mbxCommand: mailbox command code. 2064 * 2065 * This function is called by the mailbox event handler function to verify 2066 * that the completed mailbox command is a legitimate mailbox command. If the 2067 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN 2068 * and the mailbox event handler will take the HBA offline. 2069 **/ 2070 static int 2071 lpfc_sli_chk_mbx_command(uint8_t mbxCommand) 2072 { 2073 uint8_t ret; 2074 2075 switch (mbxCommand) { 2076 case MBX_LOAD_SM: 2077 case MBX_READ_NV: 2078 case MBX_WRITE_NV: 2079 case MBX_WRITE_VPARMS: 2080 case MBX_RUN_BIU_DIAG: 2081 case MBX_INIT_LINK: 2082 case MBX_DOWN_LINK: 2083 case MBX_CONFIG_LINK: 2084 case MBX_CONFIG_RING: 2085 case MBX_RESET_RING: 2086 case MBX_READ_CONFIG: 2087 case MBX_READ_RCONFIG: 2088 case MBX_READ_SPARM: 2089 case MBX_READ_STATUS: 2090 case MBX_READ_RPI: 2091 case MBX_READ_XRI: 2092 case MBX_READ_REV: 2093 case MBX_READ_LNK_STAT: 2094 case MBX_REG_LOGIN: 2095 case MBX_UNREG_LOGIN: 2096 case MBX_CLEAR_LA: 2097 case MBX_DUMP_MEMORY: 2098 case MBX_DUMP_CONTEXT: 2099 case MBX_RUN_DIAGS: 2100 case MBX_RESTART: 2101 case MBX_UPDATE_CFG: 2102 case MBX_DOWN_LOAD: 2103 case MBX_DEL_LD_ENTRY: 2104 case MBX_RUN_PROGRAM: 2105 case MBX_SET_MASK: 2106 case MBX_SET_VARIABLE: 2107 case MBX_UNREG_D_ID: 2108 case MBX_KILL_BOARD: 2109 case MBX_CONFIG_FARP: 2110 case MBX_BEACON: 2111 case MBX_LOAD_AREA: 2112 case MBX_RUN_BIU_DIAG64: 2113 case MBX_CONFIG_PORT: 2114 case MBX_READ_SPARM64: 2115 case MBX_READ_RPI64: 2116 case MBX_REG_LOGIN64: 2117 case MBX_READ_TOPOLOGY: 2118 case MBX_WRITE_WWN: 2119 case MBX_SET_DEBUG: 2120 case MBX_LOAD_EXP_ROM: 2121 case MBX_ASYNCEVT_ENABLE: 2122 case MBX_REG_VPI: 2123 case MBX_UNREG_VPI: 2124 case MBX_HEARTBEAT: 2125 case MBX_PORT_CAPABILITIES: 2126 case MBX_PORT_IOV_CONTROL: 2127 case MBX_SLI4_CONFIG: 2128 case MBX_SLI4_REQ_FTRS: 2129 case MBX_REG_FCFI: 2130 case MBX_UNREG_FCFI: 2131 case MBX_REG_VFI: 2132 case MBX_UNREG_VFI: 2133 case MBX_INIT_VPI: 2134 case MBX_INIT_VFI: 2135 case MBX_RESUME_RPI: 2136 case MBX_READ_EVENT_LOG_STATUS: 2137 case MBX_READ_EVENT_LOG: 2138 case MBX_SECURITY_MGMT: 2139 case MBX_AUTH_PORT: 2140 case MBX_ACCESS_VDATA: 2141 ret = mbxCommand; 2142 break; 2143 default: 2144 ret = MBX_SHUTDOWN; 2145 break; 2146 } 2147 return ret; 2148 } 2149 2150 /** 2151 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler 2152 * @phba: Pointer to HBA context object. 2153 * @pmboxq: Pointer to mailbox command. 2154 * 2155 * This is completion handler function for mailbox commands issued from 2156 * lpfc_sli_issue_mbox_wait function. This function is called by the 2157 * mailbox event handler function with no lock held. This function 2158 * will wake up thread waiting on the wait queue pointed by context1 2159 * of the mailbox. 2160 **/ 2161 void 2162 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 2163 { 2164 wait_queue_head_t *pdone_q; 2165 unsigned long drvr_flag; 2166 2167 /* 2168 * If pdone_q is empty, the driver thread gave up waiting and 2169 * continued running. 2170 */ 2171 pmboxq->mbox_flag |= LPFC_MBX_WAKE; 2172 spin_lock_irqsave(&phba->hbalock, drvr_flag); 2173 pdone_q = (wait_queue_head_t *) pmboxq->context1; 2174 if (pdone_q) 2175 wake_up_interruptible(pdone_q); 2176 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 2177 return; 2178 } 2179 2180 2181 /** 2182 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler 2183 * @phba: Pointer to HBA context object. 2184 * @pmb: Pointer to mailbox object. 2185 * 2186 * This function is the default mailbox completion handler. It 2187 * frees the memory resources associated with the completed mailbox 2188 * command. If the completed command is a REG_LOGIN mailbox command, 2189 * this function will issue a UREG_LOGIN to re-claim the RPI. 2190 **/ 2191 void 2192 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2193 { 2194 struct lpfc_vport *vport = pmb->vport; 2195 struct lpfc_dmabuf *mp; 2196 struct lpfc_nodelist *ndlp; 2197 struct Scsi_Host *shost; 2198 uint16_t rpi, vpi; 2199 int rc; 2200 2201 mp = (struct lpfc_dmabuf *) (pmb->context1); 2202 2203 if (mp) { 2204 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2205 kfree(mp); 2206 } 2207 2208 /* 2209 * If a REG_LOGIN succeeded after node is destroyed or node 2210 * is in re-discovery driver need to cleanup the RPI. 2211 */ 2212 if (!(phba->pport->load_flag & FC_UNLOADING) && 2213 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && 2214 !pmb->u.mb.mbxStatus) { 2215 rpi = pmb->u.mb.un.varWords[0]; 2216 vpi = pmb->u.mb.un.varRegLogin.vpi; 2217 lpfc_unreg_login(phba, vpi, rpi, pmb); 2218 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 2219 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2220 if (rc != MBX_NOT_FINISHED) 2221 return; 2222 } 2223 2224 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && 2225 !(phba->pport->load_flag & FC_UNLOADING) && 2226 !pmb->u.mb.mbxStatus) { 2227 shost = lpfc_shost_from_vport(vport); 2228 spin_lock_irq(shost->host_lock); 2229 vport->vpi_state |= LPFC_VPI_REGISTERED; 2230 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2231 spin_unlock_irq(shost->host_lock); 2232 } 2233 2234 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 2235 ndlp = (struct lpfc_nodelist *)pmb->context2; 2236 lpfc_nlp_put(ndlp); 2237 pmb->context2 = NULL; 2238 } 2239 2240 /* Check security permission status on INIT_LINK mailbox command */ 2241 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) && 2242 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION)) 2243 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2244 "2860 SLI authentication is required " 2245 "for INIT_LINK but has not done yet\n"); 2246 2247 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) 2248 lpfc_sli4_mbox_cmd_free(phba, pmb); 2249 else 2250 mempool_free(pmb, phba->mbox_mem_pool); 2251 } 2252 2253 /** 2254 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware 2255 * @phba: Pointer to HBA context object. 2256 * 2257 * This function is called with no lock held. This function processes all 2258 * the completed mailbox commands and gives it to upper layers. The interrupt 2259 * service routine processes mailbox completion interrupt and adds completed 2260 * mailbox commands to the mboxq_cmpl queue and signals the worker thread. 2261 * Worker thread call lpfc_sli_handle_mb_event, which will return the 2262 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This 2263 * function returns the mailbox commands to the upper layer by calling the 2264 * completion handler function of each mailbox. 2265 **/ 2266 int 2267 lpfc_sli_handle_mb_event(struct lpfc_hba *phba) 2268 { 2269 MAILBOX_t *pmbox; 2270 LPFC_MBOXQ_t *pmb; 2271 int rc; 2272 LIST_HEAD(cmplq); 2273 2274 phba->sli.slistat.mbox_event++; 2275 2276 /* Get all completed mailboxe buffers into the cmplq */ 2277 spin_lock_irq(&phba->hbalock); 2278 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq); 2279 spin_unlock_irq(&phba->hbalock); 2280 2281 /* Get a Mailbox buffer to setup mailbox commands for callback */ 2282 do { 2283 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list); 2284 if (pmb == NULL) 2285 break; 2286 2287 pmbox = &pmb->u.mb; 2288 2289 if (pmbox->mbxCommand != MBX_HEARTBEAT) { 2290 if (pmb->vport) { 2291 lpfc_debugfs_disc_trc(pmb->vport, 2292 LPFC_DISC_TRC_MBOX_VPORT, 2293 "MBOX cmpl vport: cmd:x%x mb:x%x x%x", 2294 (uint32_t)pmbox->mbxCommand, 2295 pmbox->un.varWords[0], 2296 pmbox->un.varWords[1]); 2297 } 2298 else { 2299 lpfc_debugfs_disc_trc(phba->pport, 2300 LPFC_DISC_TRC_MBOX, 2301 "MBOX cmpl: cmd:x%x mb:x%x x%x", 2302 (uint32_t)pmbox->mbxCommand, 2303 pmbox->un.varWords[0], 2304 pmbox->un.varWords[1]); 2305 } 2306 } 2307 2308 /* 2309 * It is a fatal error if unknown mbox command completion. 2310 */ 2311 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) == 2312 MBX_SHUTDOWN) { 2313 /* Unknown mailbox command compl */ 2314 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2315 "(%d):0323 Unknown Mailbox command " 2316 "x%x (x%x/x%x) Cmpl\n", 2317 pmb->vport ? pmb->vport->vpi : 0, 2318 pmbox->mbxCommand, 2319 lpfc_sli_config_mbox_subsys_get(phba, 2320 pmb), 2321 lpfc_sli_config_mbox_opcode_get(phba, 2322 pmb)); 2323 phba->link_state = LPFC_HBA_ERROR; 2324 phba->work_hs = HS_FFER3; 2325 lpfc_handle_eratt(phba); 2326 continue; 2327 } 2328 2329 if (pmbox->mbxStatus) { 2330 phba->sli.slistat.mbox_stat_err++; 2331 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2332 /* Mbox cmd cmpl error - RETRYing */ 2333 lpfc_printf_log(phba, KERN_INFO, 2334 LOG_MBOX | LOG_SLI, 2335 "(%d):0305 Mbox cmd cmpl " 2336 "error - RETRYing Data: x%x " 2337 "(x%x/x%x) x%x x%x x%x\n", 2338 pmb->vport ? pmb->vport->vpi : 0, 2339 pmbox->mbxCommand, 2340 lpfc_sli_config_mbox_subsys_get(phba, 2341 pmb), 2342 lpfc_sli_config_mbox_opcode_get(phba, 2343 pmb), 2344 pmbox->mbxStatus, 2345 pmbox->un.varWords[0], 2346 pmb->vport->port_state); 2347 pmbox->mbxStatus = 0; 2348 pmbox->mbxOwner = OWN_HOST; 2349 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2350 if (rc != MBX_NOT_FINISHED) 2351 continue; 2352 } 2353 } 2354 2355 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2356 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2357 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " 2358 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 2359 "x%x x%x x%x\n", 2360 pmb->vport ? pmb->vport->vpi : 0, 2361 pmbox->mbxCommand, 2362 lpfc_sli_config_mbox_subsys_get(phba, pmb), 2363 lpfc_sli_config_mbox_opcode_get(phba, pmb), 2364 pmb->mbox_cmpl, 2365 *((uint32_t *) pmbox), 2366 pmbox->un.varWords[0], 2367 pmbox->un.varWords[1], 2368 pmbox->un.varWords[2], 2369 pmbox->un.varWords[3], 2370 pmbox->un.varWords[4], 2371 pmbox->un.varWords[5], 2372 pmbox->un.varWords[6], 2373 pmbox->un.varWords[7], 2374 pmbox->un.varWords[8], 2375 pmbox->un.varWords[9], 2376 pmbox->un.varWords[10]); 2377 2378 if (pmb->mbox_cmpl) 2379 pmb->mbox_cmpl(phba,pmb); 2380 } while (1); 2381 return 0; 2382 } 2383 2384 /** 2385 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag 2386 * @phba: Pointer to HBA context object. 2387 * @pring: Pointer to driver SLI ring object. 2388 * @tag: buffer tag. 2389 * 2390 * This function is called with no lock held. When QUE_BUFTAG_BIT bit 2391 * is set in the tag the buffer is posted for a particular exchange, 2392 * the function will return the buffer without replacing the buffer. 2393 * If the buffer is for unsolicited ELS or CT traffic, this function 2394 * returns the buffer and also posts another buffer to the firmware. 2395 **/ 2396 static struct lpfc_dmabuf * 2397 lpfc_sli_get_buff(struct lpfc_hba *phba, 2398 struct lpfc_sli_ring *pring, 2399 uint32_t tag) 2400 { 2401 struct hbq_dmabuf *hbq_entry; 2402 2403 if (tag & QUE_BUFTAG_BIT) 2404 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag); 2405 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag); 2406 if (!hbq_entry) 2407 return NULL; 2408 return &hbq_entry->dbuf; 2409 } 2410 2411 /** 2412 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence 2413 * @phba: Pointer to HBA context object. 2414 * @pring: Pointer to driver SLI ring object. 2415 * @saveq: Pointer to the iocbq struct representing the sequence starting frame. 2416 * @fch_r_ctl: the r_ctl for the first frame of the sequence. 2417 * @fch_type: the type for the first frame of the sequence. 2418 * 2419 * This function is called with no lock held. This function uses the r_ctl and 2420 * type of the received sequence to find the correct callback function to call 2421 * to process the sequence. 2422 **/ 2423 static int 2424 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2425 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, 2426 uint32_t fch_type) 2427 { 2428 int i; 2429 2430 /* unSolicited Responses */ 2431 if (pring->prt[0].profile) { 2432 if (pring->prt[0].lpfc_sli_rcv_unsol_event) 2433 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, 2434 saveq); 2435 return 1; 2436 } 2437 /* We must search, based on rctl / type 2438 for the right routine */ 2439 for (i = 0; i < pring->num_mask; i++) { 2440 if ((pring->prt[i].rctl == fch_r_ctl) && 2441 (pring->prt[i].type == fch_type)) { 2442 if (pring->prt[i].lpfc_sli_rcv_unsol_event) 2443 (pring->prt[i].lpfc_sli_rcv_unsol_event) 2444 (phba, pring, saveq); 2445 return 1; 2446 } 2447 } 2448 return 0; 2449 } 2450 2451 /** 2452 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler 2453 * @phba: Pointer to HBA context object. 2454 * @pring: Pointer to driver SLI ring object. 2455 * @saveq: Pointer to the unsolicited iocb. 2456 * 2457 * This function is called with no lock held by the ring event handler 2458 * when there is an unsolicited iocb posted to the response ring by the 2459 * firmware. This function gets the buffer associated with the iocbs 2460 * and calls the event handler for the ring. This function handles both 2461 * qring buffers and hbq buffers. 2462 * When the function returns 1 the caller can free the iocb object otherwise 2463 * upper layer functions will free the iocb objects. 2464 **/ 2465 static int 2466 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2467 struct lpfc_iocbq *saveq) 2468 { 2469 IOCB_t * irsp; 2470 WORD5 * w5p; 2471 uint32_t Rctl, Type; 2472 uint32_t match; 2473 struct lpfc_iocbq *iocbq; 2474 struct lpfc_dmabuf *dmzbuf; 2475 2476 match = 0; 2477 irsp = &(saveq->iocb); 2478 2479 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2480 if (pring->lpfc_sli_rcv_async_status) 2481 pring->lpfc_sli_rcv_async_status(phba, pring, saveq); 2482 else 2483 lpfc_printf_log(phba, 2484 KERN_WARNING, 2485 LOG_SLI, 2486 "0316 Ring %d handler: unexpected " 2487 "ASYNC_STATUS iocb received evt_code " 2488 "0x%x\n", 2489 pring->ringno, 2490 irsp->un.asyncstat.evt_code); 2491 return 1; 2492 } 2493 2494 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) && 2495 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) { 2496 if (irsp->ulpBdeCount > 0) { 2497 dmzbuf = lpfc_sli_get_buff(phba, pring, 2498 irsp->un.ulpWord[3]); 2499 lpfc_in_buf_free(phba, dmzbuf); 2500 } 2501 2502 if (irsp->ulpBdeCount > 1) { 2503 dmzbuf = lpfc_sli_get_buff(phba, pring, 2504 irsp->unsli3.sli3Words[3]); 2505 lpfc_in_buf_free(phba, dmzbuf); 2506 } 2507 2508 if (irsp->ulpBdeCount > 2) { 2509 dmzbuf = lpfc_sli_get_buff(phba, pring, 2510 irsp->unsli3.sli3Words[7]); 2511 lpfc_in_buf_free(phba, dmzbuf); 2512 } 2513 2514 return 1; 2515 } 2516 2517 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 2518 if (irsp->ulpBdeCount != 0) { 2519 saveq->context2 = lpfc_sli_get_buff(phba, pring, 2520 irsp->un.ulpWord[3]); 2521 if (!saveq->context2) 2522 lpfc_printf_log(phba, 2523 KERN_ERR, 2524 LOG_SLI, 2525 "0341 Ring %d Cannot find buffer for " 2526 "an unsolicited iocb. tag 0x%x\n", 2527 pring->ringno, 2528 irsp->un.ulpWord[3]); 2529 } 2530 if (irsp->ulpBdeCount == 2) { 2531 saveq->context3 = lpfc_sli_get_buff(phba, pring, 2532 irsp->unsli3.sli3Words[7]); 2533 if (!saveq->context3) 2534 lpfc_printf_log(phba, 2535 KERN_ERR, 2536 LOG_SLI, 2537 "0342 Ring %d Cannot find buffer for an" 2538 " unsolicited iocb. tag 0x%x\n", 2539 pring->ringno, 2540 irsp->unsli3.sli3Words[7]); 2541 } 2542 list_for_each_entry(iocbq, &saveq->list, list) { 2543 irsp = &(iocbq->iocb); 2544 if (irsp->ulpBdeCount != 0) { 2545 iocbq->context2 = lpfc_sli_get_buff(phba, pring, 2546 irsp->un.ulpWord[3]); 2547 if (!iocbq->context2) 2548 lpfc_printf_log(phba, 2549 KERN_ERR, 2550 LOG_SLI, 2551 "0343 Ring %d Cannot find " 2552 "buffer for an unsolicited iocb" 2553 ". tag 0x%x\n", pring->ringno, 2554 irsp->un.ulpWord[3]); 2555 } 2556 if (irsp->ulpBdeCount == 2) { 2557 iocbq->context3 = lpfc_sli_get_buff(phba, pring, 2558 irsp->unsli3.sli3Words[7]); 2559 if (!iocbq->context3) 2560 lpfc_printf_log(phba, 2561 KERN_ERR, 2562 LOG_SLI, 2563 "0344 Ring %d Cannot find " 2564 "buffer for an unsolicited " 2565 "iocb. tag 0x%x\n", 2566 pring->ringno, 2567 irsp->unsli3.sli3Words[7]); 2568 } 2569 } 2570 } 2571 if (irsp->ulpBdeCount != 0 && 2572 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX || 2573 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) { 2574 int found = 0; 2575 2576 /* search continue save q for same XRI */ 2577 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) { 2578 if (iocbq->iocb.unsli3.rcvsli3.ox_id == 2579 saveq->iocb.unsli3.rcvsli3.ox_id) { 2580 list_add_tail(&saveq->list, &iocbq->list); 2581 found = 1; 2582 break; 2583 } 2584 } 2585 if (!found) 2586 list_add_tail(&saveq->clist, 2587 &pring->iocb_continue_saveq); 2588 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) { 2589 list_del_init(&iocbq->clist); 2590 saveq = iocbq; 2591 irsp = &(saveq->iocb); 2592 } else 2593 return 0; 2594 } 2595 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) || 2596 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) || 2597 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) { 2598 Rctl = FC_RCTL_ELS_REQ; 2599 Type = FC_TYPE_ELS; 2600 } else { 2601 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]); 2602 Rctl = w5p->hcsw.Rctl; 2603 Type = w5p->hcsw.Type; 2604 2605 /* Firmware Workaround */ 2606 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) && 2607 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX || 2608 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { 2609 Rctl = FC_RCTL_ELS_REQ; 2610 Type = FC_TYPE_ELS; 2611 w5p->hcsw.Rctl = Rctl; 2612 w5p->hcsw.Type = Type; 2613 } 2614 } 2615 2616 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) 2617 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2618 "0313 Ring %d handler: unexpected Rctl x%x " 2619 "Type x%x received\n", 2620 pring->ringno, Rctl, Type); 2621 2622 return 1; 2623 } 2624 2625 /** 2626 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb 2627 * @phba: Pointer to HBA context object. 2628 * @pring: Pointer to driver SLI ring object. 2629 * @prspiocb: Pointer to response iocb object. 2630 * 2631 * This function looks up the iocb_lookup table to get the command iocb 2632 * corresponding to the given response iocb using the iotag of the 2633 * response iocb. This function is called with the hbalock held. 2634 * This function returns the command iocb object if it finds the command 2635 * iocb else returns NULL. 2636 **/ 2637 static struct lpfc_iocbq * 2638 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, 2639 struct lpfc_sli_ring *pring, 2640 struct lpfc_iocbq *prspiocb) 2641 { 2642 struct lpfc_iocbq *cmd_iocb = NULL; 2643 uint16_t iotag; 2644 2645 iotag = prspiocb->iocb.ulpIoTag; 2646 2647 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2648 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2649 list_del_init(&cmd_iocb->list); 2650 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2651 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2652 } 2653 return cmd_iocb; 2654 } 2655 2656 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2657 "0317 iotag x%x is out off " 2658 "range: max iotag x%x wd0 x%x\n", 2659 iotag, phba->sli.last_iotag, 2660 *(((uint32_t *) &prspiocb->iocb) + 7)); 2661 return NULL; 2662 } 2663 2664 /** 2665 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag 2666 * @phba: Pointer to HBA context object. 2667 * @pring: Pointer to driver SLI ring object. 2668 * @iotag: IOCB tag. 2669 * 2670 * This function looks up the iocb_lookup table to get the command iocb 2671 * corresponding to the given iotag. This function is called with the 2672 * hbalock held. 2673 * This function returns the command iocb object if it finds the command 2674 * iocb else returns NULL. 2675 **/ 2676 static struct lpfc_iocbq * 2677 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, 2678 struct lpfc_sli_ring *pring, uint16_t iotag) 2679 { 2680 struct lpfc_iocbq *cmd_iocb; 2681 2682 if (iotag != 0 && iotag <= phba->sli.last_iotag) { 2683 cmd_iocb = phba->sli.iocbq_lookup[iotag]; 2684 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) { 2685 /* remove from txcmpl queue list */ 2686 list_del_init(&cmd_iocb->list); 2687 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; 2688 return cmd_iocb; 2689 } 2690 } 2691 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2692 "0372 iotag x%x is out off range: max iotag (x%x)\n", 2693 iotag, phba->sli.last_iotag); 2694 return NULL; 2695 } 2696 2697 /** 2698 * lpfc_sli_process_sol_iocb - process solicited iocb completion 2699 * @phba: Pointer to HBA context object. 2700 * @pring: Pointer to driver SLI ring object. 2701 * @saveq: Pointer to the response iocb to be processed. 2702 * 2703 * This function is called by the ring event handler for non-fcp 2704 * rings when there is a new response iocb in the response ring. 2705 * The caller is not required to hold any locks. This function 2706 * gets the command iocb associated with the response iocb and 2707 * calls the completion handler for the command iocb. If there 2708 * is no completion handler, the function will free the resources 2709 * associated with command iocb. If the response iocb is for 2710 * an already aborted command iocb, the status of the completion 2711 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED. 2712 * This function always returns 1. 2713 **/ 2714 static int 2715 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 2716 struct lpfc_iocbq *saveq) 2717 { 2718 struct lpfc_iocbq *cmdiocbp; 2719 int rc = 1; 2720 unsigned long iflag; 2721 2722 /* Based on the iotag field, get the cmd IOCB from the txcmplq */ 2723 spin_lock_irqsave(&phba->hbalock, iflag); 2724 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq); 2725 spin_unlock_irqrestore(&phba->hbalock, iflag); 2726 2727 if (cmdiocbp) { 2728 if (cmdiocbp->iocb_cmpl) { 2729 /* 2730 * If an ELS command failed send an event to mgmt 2731 * application. 2732 */ 2733 if (saveq->iocb.ulpStatus && 2734 (pring->ringno == LPFC_ELS_RING) && 2735 (cmdiocbp->iocb.ulpCommand == 2736 CMD_ELS_REQUEST64_CR)) 2737 lpfc_send_els_failure_event(phba, 2738 cmdiocbp, saveq); 2739 2740 /* 2741 * Post all ELS completions to the worker thread. 2742 * All other are passed to the completion callback. 2743 */ 2744 if (pring->ringno == LPFC_ELS_RING) { 2745 if ((phba->sli_rev < LPFC_SLI_REV4) && 2746 (cmdiocbp->iocb_flag & 2747 LPFC_DRIVER_ABORTED)) { 2748 spin_lock_irqsave(&phba->hbalock, 2749 iflag); 2750 cmdiocbp->iocb_flag &= 2751 ~LPFC_DRIVER_ABORTED; 2752 spin_unlock_irqrestore(&phba->hbalock, 2753 iflag); 2754 saveq->iocb.ulpStatus = 2755 IOSTAT_LOCAL_REJECT; 2756 saveq->iocb.un.ulpWord[4] = 2757 IOERR_SLI_ABORTED; 2758 2759 /* Firmware could still be in progress 2760 * of DMAing payload, so don't free data 2761 * buffer till after a hbeat. 2762 */ 2763 spin_lock_irqsave(&phba->hbalock, 2764 iflag); 2765 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; 2766 spin_unlock_irqrestore(&phba->hbalock, 2767 iflag); 2768 } 2769 if (phba->sli_rev == LPFC_SLI_REV4) { 2770 if (saveq->iocb_flag & 2771 LPFC_EXCHANGE_BUSY) { 2772 /* Set cmdiocb flag for the 2773 * exchange busy so sgl (xri) 2774 * will not be released until 2775 * the abort xri is received 2776 * from hba. 2777 */ 2778 spin_lock_irqsave( 2779 &phba->hbalock, iflag); 2780 cmdiocbp->iocb_flag |= 2781 LPFC_EXCHANGE_BUSY; 2782 spin_unlock_irqrestore( 2783 &phba->hbalock, iflag); 2784 } 2785 if (cmdiocbp->iocb_flag & 2786 LPFC_DRIVER_ABORTED) { 2787 /* 2788 * Clear LPFC_DRIVER_ABORTED 2789 * bit in case it was driver 2790 * initiated abort. 2791 */ 2792 spin_lock_irqsave( 2793 &phba->hbalock, iflag); 2794 cmdiocbp->iocb_flag &= 2795 ~LPFC_DRIVER_ABORTED; 2796 spin_unlock_irqrestore( 2797 &phba->hbalock, iflag); 2798 cmdiocbp->iocb.ulpStatus = 2799 IOSTAT_LOCAL_REJECT; 2800 cmdiocbp->iocb.un.ulpWord[4] = 2801 IOERR_ABORT_REQUESTED; 2802 /* 2803 * For SLI4, irsiocb contains 2804 * NO_XRI in sli_xritag, it 2805 * shall not affect releasing 2806 * sgl (xri) process. 2807 */ 2808 saveq->iocb.ulpStatus = 2809 IOSTAT_LOCAL_REJECT; 2810 saveq->iocb.un.ulpWord[4] = 2811 IOERR_SLI_ABORTED; 2812 spin_lock_irqsave( 2813 &phba->hbalock, iflag); 2814 saveq->iocb_flag |= 2815 LPFC_DELAY_MEM_FREE; 2816 spin_unlock_irqrestore( 2817 &phba->hbalock, iflag); 2818 } 2819 } 2820 } 2821 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); 2822 } else 2823 lpfc_sli_release_iocbq(phba, cmdiocbp); 2824 } else { 2825 /* 2826 * Unknown initiating command based on the response iotag. 2827 * This could be the case on the ELS ring because of 2828 * lpfc_els_abort(). 2829 */ 2830 if (pring->ringno != LPFC_ELS_RING) { 2831 /* 2832 * Ring <ringno> handler: unexpected completion IoTag 2833 * <IoTag> 2834 */ 2835 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 2836 "0322 Ring %d handler: " 2837 "unexpected completion IoTag x%x " 2838 "Data: x%x x%x x%x x%x\n", 2839 pring->ringno, 2840 saveq->iocb.ulpIoTag, 2841 saveq->iocb.ulpStatus, 2842 saveq->iocb.un.ulpWord[4], 2843 saveq->iocb.ulpCommand, 2844 saveq->iocb.ulpContext); 2845 } 2846 } 2847 2848 return rc; 2849 } 2850 2851 /** 2852 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler 2853 * @phba: Pointer to HBA context object. 2854 * @pring: Pointer to driver SLI ring object. 2855 * 2856 * This function is called from the iocb ring event handlers when 2857 * put pointer is ahead of the get pointer for a ring. This function signal 2858 * an error attention condition to the worker thread and the worker 2859 * thread will transition the HBA to offline state. 2860 **/ 2861 static void 2862 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 2863 { 2864 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2865 /* 2866 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 2867 * rsp ring <portRspMax> 2868 */ 2869 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 2870 "0312 Ring %d handler: portRspPut %d " 2871 "is bigger than rsp ring %d\n", 2872 pring->ringno, le32_to_cpu(pgp->rspPutInx), 2873 pring->sli.sli3.numRiocb); 2874 2875 phba->link_state = LPFC_HBA_ERROR; 2876 2877 /* 2878 * All error attention handlers are posted to 2879 * worker thread 2880 */ 2881 phba->work_ha |= HA_ERATT; 2882 phba->work_hs = HS_FFER3; 2883 2884 lpfc_worker_wake_up(phba); 2885 2886 return; 2887 } 2888 2889 /** 2890 * lpfc_poll_eratt - Error attention polling timer timeout handler 2891 * @ptr: Pointer to address of HBA context object. 2892 * 2893 * This function is invoked by the Error Attention polling timer when the 2894 * timer times out. It will check the SLI Error Attention register for 2895 * possible attention events. If so, it will post an Error Attention event 2896 * and wake up worker thread to process it. Otherwise, it will set up the 2897 * Error Attention polling timer for the next poll. 2898 **/ 2899 void lpfc_poll_eratt(unsigned long ptr) 2900 { 2901 struct lpfc_hba *phba; 2902 uint32_t eratt = 0, rem; 2903 uint64_t sli_intr, cnt; 2904 2905 phba = (struct lpfc_hba *)ptr; 2906 2907 /* Here we will also keep track of interrupts per sec of the hba */ 2908 sli_intr = phba->sli.slistat.sli_intr; 2909 2910 if (phba->sli.slistat.sli_prev_intr > sli_intr) 2911 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) + 2912 sli_intr); 2913 else 2914 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 2915 2916 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */ 2917 rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL); 2918 phba->sli.slistat.sli_ips = cnt; 2919 2920 phba->sli.slistat.sli_prev_intr = sli_intr; 2921 2922 /* Check chip HA register for error event */ 2923 eratt = lpfc_sli_check_eratt(phba); 2924 2925 if (eratt) 2926 /* Tell the worker thread there is work to do */ 2927 lpfc_worker_wake_up(phba); 2928 else 2929 /* Restart the timer for next eratt poll */ 2930 mod_timer(&phba->eratt_poll, 2931 jiffies + 2932 msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); 2933 return; 2934 } 2935 2936 2937 /** 2938 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring 2939 * @phba: Pointer to HBA context object. 2940 * @pring: Pointer to driver SLI ring object. 2941 * @mask: Host attention register mask for this ring. 2942 * 2943 * This function is called from the interrupt context when there is a ring 2944 * event for the fcp ring. The caller does not hold any lock. 2945 * The function processes each response iocb in the response ring until it 2946 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with 2947 * LE bit set. The function will call the completion handler of the command iocb 2948 * if the response iocb indicates a completion for a command iocb or it is 2949 * an abort completion. The function will call lpfc_sli_process_unsol_iocb 2950 * function if this is an unsolicited iocb. 2951 * This routine presumes LPFC_FCP_RING handling and doesn't bother 2952 * to check it explicitly. 2953 */ 2954 int 2955 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, 2956 struct lpfc_sli_ring *pring, uint32_t mask) 2957 { 2958 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno]; 2959 IOCB_t *irsp = NULL; 2960 IOCB_t *entry = NULL; 2961 struct lpfc_iocbq *cmdiocbq = NULL; 2962 struct lpfc_iocbq rspiocbq; 2963 uint32_t status; 2964 uint32_t portRspPut, portRspMax; 2965 int rc = 1; 2966 lpfc_iocb_type type; 2967 unsigned long iflag; 2968 uint32_t rsp_cmpl = 0; 2969 2970 spin_lock_irqsave(&phba->hbalock, iflag); 2971 pring->stats.iocb_event++; 2972 2973 /* 2974 * The next available response entry should never exceed the maximum 2975 * entries. If it does, treat it as an adapter hardware error. 2976 */ 2977 portRspMax = pring->sli.sli3.numRiocb; 2978 portRspPut = le32_to_cpu(pgp->rspPutInx); 2979 if (unlikely(portRspPut >= portRspMax)) { 2980 lpfc_sli_rsp_pointers_error(phba, pring); 2981 spin_unlock_irqrestore(&phba->hbalock, iflag); 2982 return 1; 2983 } 2984 if (phba->fcp_ring_in_use) { 2985 spin_unlock_irqrestore(&phba->hbalock, iflag); 2986 return 1; 2987 } else 2988 phba->fcp_ring_in_use = 1; 2989 2990 rmb(); 2991 while (pring->sli.sli3.rspidx != portRspPut) { 2992 /* 2993 * Fetch an entry off the ring and copy it into a local data 2994 * structure. The copy involves a byte-swap since the 2995 * network byte order and pci byte orders are different. 2996 */ 2997 entry = lpfc_resp_iocb(phba, pring); 2998 phba->last_completion_time = jiffies; 2999 3000 if (++pring->sli.sli3.rspidx >= portRspMax) 3001 pring->sli.sli3.rspidx = 0; 3002 3003 lpfc_sli_pcimem_bcopy((uint32_t *) entry, 3004 (uint32_t *) &rspiocbq.iocb, 3005 phba->iocb_rsp_size); 3006 INIT_LIST_HEAD(&(rspiocbq.list)); 3007 irsp = &rspiocbq.iocb; 3008 3009 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK); 3010 pring->stats.iocb_rsp++; 3011 rsp_cmpl++; 3012 3013 if (unlikely(irsp->ulpStatus)) { 3014 /* 3015 * If resource errors reported from HBA, reduce 3016 * queuedepths of the SCSI device. 3017 */ 3018 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3019 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3020 IOERR_NO_RESOURCES)) { 3021 spin_unlock_irqrestore(&phba->hbalock, iflag); 3022 phba->lpfc_rampdown_queue_depth(phba); 3023 spin_lock_irqsave(&phba->hbalock, iflag); 3024 } 3025 3026 /* Rsp ring <ringno> error: IOCB */ 3027 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3028 "0336 Rsp Ring %d error: IOCB Data: " 3029 "x%x x%x x%x x%x x%x x%x x%x x%x\n", 3030 pring->ringno, 3031 irsp->un.ulpWord[0], 3032 irsp->un.ulpWord[1], 3033 irsp->un.ulpWord[2], 3034 irsp->un.ulpWord[3], 3035 irsp->un.ulpWord[4], 3036 irsp->un.ulpWord[5], 3037 *(uint32_t *)&irsp->un1, 3038 *((uint32_t *)&irsp->un1 + 1)); 3039 } 3040 3041 switch (type) { 3042 case LPFC_ABORT_IOCB: 3043 case LPFC_SOL_IOCB: 3044 /* 3045 * Idle exchange closed via ABTS from port. No iocb 3046 * resources need to be recovered. 3047 */ 3048 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) { 3049 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3050 "0333 IOCB cmd 0x%x" 3051 " processed. Skipping" 3052 " completion\n", 3053 irsp->ulpCommand); 3054 break; 3055 } 3056 3057 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, 3058 &rspiocbq); 3059 if (unlikely(!cmdiocbq)) 3060 break; 3061 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) 3062 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 3063 if (cmdiocbq->iocb_cmpl) { 3064 spin_unlock_irqrestore(&phba->hbalock, iflag); 3065 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, 3066 &rspiocbq); 3067 spin_lock_irqsave(&phba->hbalock, iflag); 3068 } 3069 break; 3070 case LPFC_UNSOL_IOCB: 3071 spin_unlock_irqrestore(&phba->hbalock, iflag); 3072 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq); 3073 spin_lock_irqsave(&phba->hbalock, iflag); 3074 break; 3075 default: 3076 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3077 char adaptermsg[LPFC_MAX_ADPTMSG]; 3078 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3079 memcpy(&adaptermsg[0], (uint8_t *) irsp, 3080 MAX_MSG_DATA); 3081 dev_warn(&((phba->pcidev)->dev), 3082 "lpfc%d: %s\n", 3083 phba->brd_no, adaptermsg); 3084 } else { 3085 /* Unknown IOCB command */ 3086 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3087 "0334 Unknown IOCB command " 3088 "Data: x%x, x%x x%x x%x x%x\n", 3089 type, irsp->ulpCommand, 3090 irsp->ulpStatus, 3091 irsp->ulpIoTag, 3092 irsp->ulpContext); 3093 } 3094 break; 3095 } 3096 3097 /* 3098 * The response IOCB has been processed. Update the ring 3099 * pointer in SLIM. If the port response put pointer has not 3100 * been updated, sync the pgp->rspPutInx and fetch the new port 3101 * response put pointer. 3102 */ 3103 writel(pring->sli.sli3.rspidx, 3104 &phba->host_gp[pring->ringno].rspGetInx); 3105 3106 if (pring->sli.sli3.rspidx == portRspPut) 3107 portRspPut = le32_to_cpu(pgp->rspPutInx); 3108 } 3109 3110 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) { 3111 pring->stats.iocb_rsp_full++; 3112 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3113 writel(status, phba->CAregaddr); 3114 readl(phba->CAregaddr); 3115 } 3116 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3117 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3118 pring->stats.iocb_cmd_empty++; 3119 3120 /* Force update of the local copy of cmdGetInx */ 3121 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3122 lpfc_sli_resume_iocb(phba, pring); 3123 3124 if ((pring->lpfc_sli_cmd_available)) 3125 (pring->lpfc_sli_cmd_available) (phba, pring); 3126 3127 } 3128 3129 phba->fcp_ring_in_use = 0; 3130 spin_unlock_irqrestore(&phba->hbalock, iflag); 3131 return rc; 3132 } 3133 3134 /** 3135 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb 3136 * @phba: Pointer to HBA context object. 3137 * @pring: Pointer to driver SLI ring object. 3138 * @rspiocbp: Pointer to driver response IOCB object. 3139 * 3140 * This function is called from the worker thread when there is a slow-path 3141 * response IOCB to process. This function chains all the response iocbs until 3142 * seeing the iocb with the LE bit set. The function will call 3143 * lpfc_sli_process_sol_iocb function if the response iocb indicates a 3144 * completion of a command iocb. The function will call the 3145 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. 3146 * The function frees the resources or calls the completion handler if this 3147 * iocb is an abort completion. The function returns NULL when the response 3148 * iocb has the LE bit set and all the chained iocbs are processed, otherwise 3149 * this function shall chain the iocb on to the iocb_continueq and return the 3150 * response iocb passed in. 3151 **/ 3152 static struct lpfc_iocbq * 3153 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 3154 struct lpfc_iocbq *rspiocbp) 3155 { 3156 struct lpfc_iocbq *saveq; 3157 struct lpfc_iocbq *cmdiocbp; 3158 struct lpfc_iocbq *next_iocb; 3159 IOCB_t *irsp = NULL; 3160 uint32_t free_saveq; 3161 uint8_t iocb_cmd_type; 3162 lpfc_iocb_type type; 3163 unsigned long iflag; 3164 int rc; 3165 3166 spin_lock_irqsave(&phba->hbalock, iflag); 3167 /* First add the response iocb to the countinueq list */ 3168 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); 3169 pring->iocb_continueq_cnt++; 3170 3171 /* Now, determine whether the list is completed for processing */ 3172 irsp = &rspiocbp->iocb; 3173 if (irsp->ulpLe) { 3174 /* 3175 * By default, the driver expects to free all resources 3176 * associated with this iocb completion. 3177 */ 3178 free_saveq = 1; 3179 saveq = list_get_first(&pring->iocb_continueq, 3180 struct lpfc_iocbq, list); 3181 irsp = &(saveq->iocb); 3182 list_del_init(&pring->iocb_continueq); 3183 pring->iocb_continueq_cnt = 0; 3184 3185 pring->stats.iocb_rsp++; 3186 3187 /* 3188 * If resource errors reported from HBA, reduce 3189 * queuedepths of the SCSI device. 3190 */ 3191 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && 3192 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == 3193 IOERR_NO_RESOURCES)) { 3194 spin_unlock_irqrestore(&phba->hbalock, iflag); 3195 phba->lpfc_rampdown_queue_depth(phba); 3196 spin_lock_irqsave(&phba->hbalock, iflag); 3197 } 3198 3199 if (irsp->ulpStatus) { 3200 /* Rsp ring <ringno> error: IOCB */ 3201 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 3202 "0328 Rsp Ring %d error: " 3203 "IOCB Data: " 3204 "x%x x%x x%x x%x " 3205 "x%x x%x x%x x%x " 3206 "x%x x%x x%x x%x " 3207 "x%x x%x x%x x%x\n", 3208 pring->ringno, 3209 irsp->un.ulpWord[0], 3210 irsp->un.ulpWord[1], 3211 irsp->un.ulpWord[2], 3212 irsp->un.ulpWord[3], 3213 irsp->un.ulpWord[4], 3214 irsp->un.ulpWord[5], 3215 *(((uint32_t *) irsp) + 6), 3216 *(((uint32_t *) irsp) + 7), 3217 *(((uint32_t *) irsp) + 8), 3218 *(((uint32_t *) irsp) + 9), 3219 *(((uint32_t *) irsp) + 10), 3220 *(((uint32_t *) irsp) + 11), 3221 *(((uint32_t *) irsp) + 12), 3222 *(((uint32_t *) irsp) + 13), 3223 *(((uint32_t *) irsp) + 14), 3224 *(((uint32_t *) irsp) + 15)); 3225 } 3226 3227 /* 3228 * Fetch the IOCB command type and call the correct completion 3229 * routine. Solicited and Unsolicited IOCBs on the ELS ring 3230 * get freed back to the lpfc_iocb_list by the discovery 3231 * kernel thread. 3232 */ 3233 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; 3234 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); 3235 switch (type) { 3236 case LPFC_SOL_IOCB: 3237 spin_unlock_irqrestore(&phba->hbalock, iflag); 3238 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); 3239 spin_lock_irqsave(&phba->hbalock, iflag); 3240 break; 3241 3242 case LPFC_UNSOL_IOCB: 3243 spin_unlock_irqrestore(&phba->hbalock, iflag); 3244 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); 3245 spin_lock_irqsave(&phba->hbalock, iflag); 3246 if (!rc) 3247 free_saveq = 0; 3248 break; 3249 3250 case LPFC_ABORT_IOCB: 3251 cmdiocbp = NULL; 3252 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) 3253 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, 3254 saveq); 3255 if (cmdiocbp) { 3256 /* Call the specified completion routine */ 3257 if (cmdiocbp->iocb_cmpl) { 3258 spin_unlock_irqrestore(&phba->hbalock, 3259 iflag); 3260 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, 3261 saveq); 3262 spin_lock_irqsave(&phba->hbalock, 3263 iflag); 3264 } else 3265 __lpfc_sli_release_iocbq(phba, 3266 cmdiocbp); 3267 } 3268 break; 3269 3270 case LPFC_UNKNOWN_IOCB: 3271 if (irsp->ulpCommand == CMD_ADAPTER_MSG) { 3272 char adaptermsg[LPFC_MAX_ADPTMSG]; 3273 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); 3274 memcpy(&adaptermsg[0], (uint8_t *)irsp, 3275 MAX_MSG_DATA); 3276 dev_warn(&((phba->pcidev)->dev), 3277 "lpfc%d: %s\n", 3278 phba->brd_no, adaptermsg); 3279 } else { 3280 /* Unknown IOCB command */ 3281 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3282 "0335 Unknown IOCB " 3283 "command Data: x%x " 3284 "x%x x%x x%x\n", 3285 irsp->ulpCommand, 3286 irsp->ulpStatus, 3287 irsp->ulpIoTag, 3288 irsp->ulpContext); 3289 } 3290 break; 3291 } 3292 3293 if (free_saveq) { 3294 list_for_each_entry_safe(rspiocbp, next_iocb, 3295 &saveq->list, list) { 3296 list_del_init(&rspiocbp->list); 3297 __lpfc_sli_release_iocbq(phba, rspiocbp); 3298 } 3299 __lpfc_sli_release_iocbq(phba, saveq); 3300 } 3301 rspiocbp = NULL; 3302 } 3303 spin_unlock_irqrestore(&phba->hbalock, iflag); 3304 return rspiocbp; 3305 } 3306 3307 /** 3308 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs 3309 * @phba: Pointer to HBA context object. 3310 * @pring: Pointer to driver SLI ring object. 3311 * @mask: Host attention register mask for this ring. 3312 * 3313 * This routine wraps the actual slow_ring event process routine from the 3314 * API jump table function pointer from the lpfc_hba struct. 3315 **/ 3316 void 3317 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, 3318 struct lpfc_sli_ring *pring, uint32_t mask) 3319 { 3320 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); 3321 } 3322 3323 /** 3324 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings 3325 * @phba: Pointer to HBA context object. 3326 * @pring: Pointer to driver SLI ring object. 3327 * @mask: Host attention register mask for this ring. 3328 * 3329 * This function is called from the worker thread when there is a ring event 3330 * for non-fcp rings. The caller does not hold any lock. The function will 3331 * remove each response iocb in the response ring and calls the handle 3332 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3333 **/ 3334 static void 3335 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, 3336 struct lpfc_sli_ring *pring, uint32_t mask) 3337 { 3338 struct lpfc_pgp *pgp; 3339 IOCB_t *entry; 3340 IOCB_t *irsp = NULL; 3341 struct lpfc_iocbq *rspiocbp = NULL; 3342 uint32_t portRspPut, portRspMax; 3343 unsigned long iflag; 3344 uint32_t status; 3345 3346 pgp = &phba->port_gp[pring->ringno]; 3347 spin_lock_irqsave(&phba->hbalock, iflag); 3348 pring->stats.iocb_event++; 3349 3350 /* 3351 * The next available response entry should never exceed the maximum 3352 * entries. If it does, treat it as an adapter hardware error. 3353 */ 3354 portRspMax = pring->sli.sli3.numRiocb; 3355 portRspPut = le32_to_cpu(pgp->rspPutInx); 3356 if (portRspPut >= portRspMax) { 3357 /* 3358 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than 3359 * rsp ring <portRspMax> 3360 */ 3361 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3362 "0303 Ring %d handler: portRspPut %d " 3363 "is bigger than rsp ring %d\n", 3364 pring->ringno, portRspPut, portRspMax); 3365 3366 phba->link_state = LPFC_HBA_ERROR; 3367 spin_unlock_irqrestore(&phba->hbalock, iflag); 3368 3369 phba->work_hs = HS_FFER3; 3370 lpfc_handle_eratt(phba); 3371 3372 return; 3373 } 3374 3375 rmb(); 3376 while (pring->sli.sli3.rspidx != portRspPut) { 3377 /* 3378 * Build a completion list and call the appropriate handler. 3379 * The process is to get the next available response iocb, get 3380 * a free iocb from the list, copy the response data into the 3381 * free iocb, insert to the continuation list, and update the 3382 * next response index to slim. This process makes response 3383 * iocb's in the ring available to DMA as fast as possible but 3384 * pays a penalty for a copy operation. Since the iocb is 3385 * only 32 bytes, this penalty is considered small relative to 3386 * the PCI reads for register values and a slim write. When 3387 * the ulpLe field is set, the entire Command has been 3388 * received. 3389 */ 3390 entry = lpfc_resp_iocb(phba, pring); 3391 3392 phba->last_completion_time = jiffies; 3393 rspiocbp = __lpfc_sli_get_iocbq(phba); 3394 if (rspiocbp == NULL) { 3395 printk(KERN_ERR "%s: out of buffers! Failing " 3396 "completion.\n", __func__); 3397 break; 3398 } 3399 3400 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, 3401 phba->iocb_rsp_size); 3402 irsp = &rspiocbp->iocb; 3403 3404 if (++pring->sli.sli3.rspidx >= portRspMax) 3405 pring->sli.sli3.rspidx = 0; 3406 3407 if (pring->ringno == LPFC_ELS_RING) { 3408 lpfc_debugfs_slow_ring_trc(phba, 3409 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x", 3410 *(((uint32_t *) irsp) + 4), 3411 *(((uint32_t *) irsp) + 6), 3412 *(((uint32_t *) irsp) + 7)); 3413 } 3414 3415 writel(pring->sli.sli3.rspidx, 3416 &phba->host_gp[pring->ringno].rspGetInx); 3417 3418 spin_unlock_irqrestore(&phba->hbalock, iflag); 3419 /* Handle the response IOCB */ 3420 rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); 3421 spin_lock_irqsave(&phba->hbalock, iflag); 3422 3423 /* 3424 * If the port response put pointer has not been updated, sync 3425 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port 3426 * response put pointer. 3427 */ 3428 if (pring->sli.sli3.rspidx == portRspPut) { 3429 portRspPut = le32_to_cpu(pgp->rspPutInx); 3430 } 3431 } /* while (pring->sli.sli3.rspidx != portRspPut) */ 3432 3433 if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) { 3434 /* At least one response entry has been freed */ 3435 pring->stats.iocb_rsp_full++; 3436 /* SET RxRE_RSP in Chip Att register */ 3437 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4)); 3438 writel(status, phba->CAregaddr); 3439 readl(phba->CAregaddr); /* flush */ 3440 } 3441 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) { 3442 pring->flag &= ~LPFC_CALL_RING_AVAILABLE; 3443 pring->stats.iocb_cmd_empty++; 3444 3445 /* Force update of the local copy of cmdGetInx */ 3446 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx); 3447 lpfc_sli_resume_iocb(phba, pring); 3448 3449 if ((pring->lpfc_sli_cmd_available)) 3450 (pring->lpfc_sli_cmd_available) (phba, pring); 3451 3452 } 3453 3454 spin_unlock_irqrestore(&phba->hbalock, iflag); 3455 return; 3456 } 3457 3458 /** 3459 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events 3460 * @phba: Pointer to HBA context object. 3461 * @pring: Pointer to driver SLI ring object. 3462 * @mask: Host attention register mask for this ring. 3463 * 3464 * This function is called from the worker thread when there is a pending 3465 * ELS response iocb on the driver internal slow-path response iocb worker 3466 * queue. The caller does not hold any lock. The function will remove each 3467 * response iocb from the response worker queue and calls the handle 3468 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. 3469 **/ 3470 static void 3471 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, 3472 struct lpfc_sli_ring *pring, uint32_t mask) 3473 { 3474 struct lpfc_iocbq *irspiocbq; 3475 struct hbq_dmabuf *dmabuf; 3476 struct lpfc_cq_event *cq_event; 3477 unsigned long iflag; 3478 3479 spin_lock_irqsave(&phba->hbalock, iflag); 3480 phba->hba_flag &= ~HBA_SP_QUEUE_EVT; 3481 spin_unlock_irqrestore(&phba->hbalock, iflag); 3482 while (!list_empty(&phba->sli4_hba.sp_queue_event)) { 3483 /* Get the response iocb from the head of work queue */ 3484 spin_lock_irqsave(&phba->hbalock, iflag); 3485 list_remove_head(&phba->sli4_hba.sp_queue_event, 3486 cq_event, struct lpfc_cq_event, list); 3487 spin_unlock_irqrestore(&phba->hbalock, iflag); 3488 3489 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { 3490 case CQE_CODE_COMPL_WQE: 3491 irspiocbq = container_of(cq_event, struct lpfc_iocbq, 3492 cq_event); 3493 /* Translate ELS WCQE to response IOCBQ */ 3494 irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba, 3495 irspiocbq); 3496 if (irspiocbq) 3497 lpfc_sli_sp_handle_rspiocb(phba, pring, 3498 irspiocbq); 3499 break; 3500 case CQE_CODE_RECEIVE: 3501 case CQE_CODE_RECEIVE_V1: 3502 dmabuf = container_of(cq_event, struct hbq_dmabuf, 3503 cq_event); 3504 lpfc_sli4_handle_received_buffer(phba, dmabuf); 3505 break; 3506 default: 3507 break; 3508 } 3509 } 3510 } 3511 3512 /** 3513 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring 3514 * @phba: Pointer to HBA context object. 3515 * @pring: Pointer to driver SLI ring object. 3516 * 3517 * This function aborts all iocbs in the given ring and frees all the iocb 3518 * objects in txq. This function issues an abort iocb for all the iocb commands 3519 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3520 * the return of this function. The caller is not required to hold any locks. 3521 **/ 3522 void 3523 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) 3524 { 3525 LIST_HEAD(completions); 3526 struct lpfc_iocbq *iocb, *next_iocb; 3527 3528 if (pring->ringno == LPFC_ELS_RING) { 3529 lpfc_fabric_abort_hba(phba); 3530 } 3531 3532 /* Error everything on txq and txcmplq 3533 * First do the txq. 3534 */ 3535 if (phba->sli_rev >= LPFC_SLI_REV4) { 3536 spin_lock_irq(&pring->ring_lock); 3537 list_splice_init(&pring->txq, &completions); 3538 pring->txq_cnt = 0; 3539 spin_unlock_irq(&pring->ring_lock); 3540 3541 spin_lock_irq(&phba->hbalock); 3542 /* Next issue ABTS for everything on the txcmplq */ 3543 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3544 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3545 spin_unlock_irq(&phba->hbalock); 3546 } else { 3547 spin_lock_irq(&phba->hbalock); 3548 list_splice_init(&pring->txq, &completions); 3549 pring->txq_cnt = 0; 3550 3551 /* Next issue ABTS for everything on the txcmplq */ 3552 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) 3553 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 3554 spin_unlock_irq(&phba->hbalock); 3555 } 3556 3557 /* Cancel all the IOCBs from the completions list */ 3558 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 3559 IOERR_SLI_ABORTED); 3560 } 3561 3562 /** 3563 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings 3564 * @phba: Pointer to HBA context object. 3565 * @pring: Pointer to driver SLI ring object. 3566 * 3567 * This function aborts all iocbs in FCP rings and frees all the iocb 3568 * objects in txq. This function issues an abort iocb for all the iocb commands 3569 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before 3570 * the return of this function. The caller is not required to hold any locks. 3571 **/ 3572 void 3573 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) 3574 { 3575 struct lpfc_sli *psli = &phba->sli; 3576 struct lpfc_sli_ring *pring; 3577 uint32_t i; 3578 3579 /* Look on all the FCP Rings for the iotag */ 3580 if (phba->sli_rev >= LPFC_SLI_REV4) { 3581 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 3582 pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; 3583 lpfc_sli_abort_iocb_ring(phba, pring); 3584 } 3585 } else { 3586 pring = &psli->ring[psli->fcp_ring]; 3587 lpfc_sli_abort_iocb_ring(phba, pring); 3588 } 3589 } 3590 3591 3592 /** 3593 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring 3594 * @phba: Pointer to HBA context object. 3595 * 3596 * This function flushes all iocbs in the fcp ring and frees all the iocb 3597 * objects in txq and txcmplq. This function will not issue abort iocbs 3598 * for all the iocb commands in txcmplq, they will just be returned with 3599 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI 3600 * slot has been permanently disabled. 3601 **/ 3602 void 3603 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) 3604 { 3605 LIST_HEAD(txq); 3606 LIST_HEAD(txcmplq); 3607 struct lpfc_sli *psli = &phba->sli; 3608 struct lpfc_sli_ring *pring; 3609 uint32_t i; 3610 3611 spin_lock_irq(&phba->hbalock); 3612 /* Indicate the I/O queues are flushed */ 3613 phba->hba_flag |= HBA_FCP_IOQ_FLUSH; 3614 spin_unlock_irq(&phba->hbalock); 3615 3616 /* Look on all the FCP Rings for the iotag */ 3617 if (phba->sli_rev >= LPFC_SLI_REV4) { 3618 for (i = 0; i < phba->cfg_fcp_io_channel; i++) { 3619 pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; 3620 3621 spin_lock_irq(&pring->ring_lock); 3622 /* Retrieve everything on txq */ 3623 list_splice_init(&pring->txq, &txq); 3624 /* Retrieve everything on the txcmplq */ 3625 list_splice_init(&pring->txcmplq, &txcmplq); 3626 pring->txq_cnt = 0; 3627 pring->txcmplq_cnt = 0; 3628 spin_unlock_irq(&pring->ring_lock); 3629 3630 /* Flush the txq */ 3631 lpfc_sli_cancel_iocbs(phba, &txq, 3632 IOSTAT_LOCAL_REJECT, 3633 IOERR_SLI_DOWN); 3634 /* Flush the txcmpq */ 3635 lpfc_sli_cancel_iocbs(phba, &txcmplq, 3636 IOSTAT_LOCAL_REJECT, 3637 IOERR_SLI_DOWN); 3638 } 3639 } else { 3640 pring = &psli->ring[psli->fcp_ring]; 3641 3642 spin_lock_irq(&phba->hbalock); 3643 /* Retrieve everything on txq */ 3644 list_splice_init(&pring->txq, &txq); 3645 /* Retrieve everything on the txcmplq */ 3646 list_splice_init(&pring->txcmplq, &txcmplq); 3647 pring->txq_cnt = 0; 3648 pring->txcmplq_cnt = 0; 3649 spin_unlock_irq(&phba->hbalock); 3650 3651 /* Flush the txq */ 3652 lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, 3653 IOERR_SLI_DOWN); 3654 /* Flush the txcmpq */ 3655 lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, 3656 IOERR_SLI_DOWN); 3657 } 3658 } 3659 3660 /** 3661 * lpfc_sli_brdready_s3 - Check for sli3 host ready status 3662 * @phba: Pointer to HBA context object. 3663 * @mask: Bit mask to be checked. 3664 * 3665 * This function reads the host status register and compares 3666 * with the provided bit mask to check if HBA completed 3667 * the restart. This function will wait in a loop for the 3668 * HBA to complete restart. If the HBA does not restart within 3669 * 15 iterations, the function will reset the HBA again. The 3670 * function returns 1 when HBA fail to restart otherwise returns 3671 * zero. 3672 **/ 3673 static int 3674 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) 3675 { 3676 uint32_t status; 3677 int i = 0; 3678 int retval = 0; 3679 3680 /* Read the HBA Host Status Register */ 3681 if (lpfc_readl(phba->HSregaddr, &status)) 3682 return 1; 3683 3684 /* 3685 * Check status register every 100ms for 5 retries, then every 3686 * 500ms for 5, then every 2.5 sec for 5, then reset board and 3687 * every 2.5 sec for 4. 3688 * Break our of the loop if errors occurred during init. 3689 */ 3690 while (((status & mask) != mask) && 3691 !(status & HS_FFERM) && 3692 i++ < 20) { 3693 3694 if (i <= 5) 3695 msleep(10); 3696 else if (i <= 10) 3697 msleep(500); 3698 else 3699 msleep(2500); 3700 3701 if (i == 15) { 3702 /* Do post */ 3703 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3704 lpfc_sli_brdrestart(phba); 3705 } 3706 /* Read the HBA Host Status Register */ 3707 if (lpfc_readl(phba->HSregaddr, &status)) { 3708 retval = 1; 3709 break; 3710 } 3711 } 3712 3713 /* Check to see if any errors occurred during init */ 3714 if ((status & HS_FFERM) || (i >= 20)) { 3715 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3716 "2751 Adapter failed to restart, " 3717 "status reg x%x, FW Data: A8 x%x AC x%x\n", 3718 status, 3719 readl(phba->MBslimaddr + 0xa8), 3720 readl(phba->MBslimaddr + 0xac)); 3721 phba->link_state = LPFC_HBA_ERROR; 3722 retval = 1; 3723 } 3724 3725 return retval; 3726 } 3727 3728 /** 3729 * lpfc_sli_brdready_s4 - Check for sli4 host ready status 3730 * @phba: Pointer to HBA context object. 3731 * @mask: Bit mask to be checked. 3732 * 3733 * This function checks the host status register to check if HBA is 3734 * ready. This function will wait in a loop for the HBA to be ready 3735 * If the HBA is not ready , the function will will reset the HBA PCI 3736 * function again. The function returns 1 when HBA fail to be ready 3737 * otherwise returns zero. 3738 **/ 3739 static int 3740 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) 3741 { 3742 uint32_t status; 3743 int retval = 0; 3744 3745 /* Read the HBA Host Status Register */ 3746 status = lpfc_sli4_post_status_check(phba); 3747 3748 if (status) { 3749 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 3750 lpfc_sli_brdrestart(phba); 3751 status = lpfc_sli4_post_status_check(phba); 3752 } 3753 3754 /* Check to see if any errors occurred during init */ 3755 if (status) { 3756 phba->link_state = LPFC_HBA_ERROR; 3757 retval = 1; 3758 } else 3759 phba->sli4_hba.intr_enable = 0; 3760 3761 return retval; 3762 } 3763 3764 /** 3765 * lpfc_sli_brdready - Wrapper func for checking the hba readyness 3766 * @phba: Pointer to HBA context object. 3767 * @mask: Bit mask to be checked. 3768 * 3769 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine 3770 * from the API jump table function pointer from the lpfc_hba struct. 3771 **/ 3772 int 3773 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) 3774 { 3775 return phba->lpfc_sli_brdready(phba, mask); 3776 } 3777 3778 #define BARRIER_TEST_PATTERN (0xdeadbeef) 3779 3780 /** 3781 * lpfc_reset_barrier - Make HBA ready for HBA reset 3782 * @phba: Pointer to HBA context object. 3783 * 3784 * This function is called before resetting an HBA. This function is called 3785 * with hbalock held and requests HBA to quiesce DMAs before a reset. 3786 **/ 3787 void lpfc_reset_barrier(struct lpfc_hba *phba) 3788 { 3789 uint32_t __iomem *resp_buf; 3790 uint32_t __iomem *mbox_buf; 3791 volatile uint32_t mbox; 3792 uint32_t hc_copy, ha_copy, resp_data; 3793 int i; 3794 uint8_t hdrtype; 3795 3796 pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); 3797 if (hdrtype != 0x80 || 3798 (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID && 3799 FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID)) 3800 return; 3801 3802 /* 3803 * Tell the other part of the chip to suspend temporarily all 3804 * its DMA activity. 3805 */ 3806 resp_buf = phba->MBslimaddr; 3807 3808 /* Disable the error attention */ 3809 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 3810 return; 3811 writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr); 3812 readl(phba->HCregaddr); /* flush */ 3813 phba->link_flag |= LS_IGNORE_ERATT; 3814 3815 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3816 return; 3817 if (ha_copy & HA_ERATT) { 3818 /* Clear Chip error bit */ 3819 writel(HA_ERATT, phba->HAregaddr); 3820 phba->pport->stopped = 1; 3821 } 3822 3823 mbox = 0; 3824 ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD; 3825 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP; 3826 3827 writel(BARRIER_TEST_PATTERN, (resp_buf + 1)); 3828 mbox_buf = phba->MBslimaddr; 3829 writel(mbox, mbox_buf); 3830 3831 for (i = 0; i < 50; i++) { 3832 if (lpfc_readl((resp_buf + 1), &resp_data)) 3833 return; 3834 if (resp_data != ~(BARRIER_TEST_PATTERN)) 3835 mdelay(1); 3836 else 3837 break; 3838 } 3839 resp_data = 0; 3840 if (lpfc_readl((resp_buf + 1), &resp_data)) 3841 return; 3842 if (resp_data != ~(BARRIER_TEST_PATTERN)) { 3843 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || 3844 phba->pport->stopped) 3845 goto restore_hc; 3846 else 3847 goto clear_errat; 3848 } 3849 3850 ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST; 3851 resp_data = 0; 3852 for (i = 0; i < 500; i++) { 3853 if (lpfc_readl(resp_buf, &resp_data)) 3854 return; 3855 if (resp_data != mbox) 3856 mdelay(1); 3857 else 3858 break; 3859 } 3860 3861 clear_errat: 3862 3863 while (++i < 500) { 3864 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3865 return; 3866 if (!(ha_copy & HA_ERATT)) 3867 mdelay(1); 3868 else 3869 break; 3870 } 3871 3872 if (readl(phba->HAregaddr) & HA_ERATT) { 3873 writel(HA_ERATT, phba->HAregaddr); 3874 phba->pport->stopped = 1; 3875 } 3876 3877 restore_hc: 3878 phba->link_flag &= ~LS_IGNORE_ERATT; 3879 writel(hc_copy, phba->HCregaddr); 3880 readl(phba->HCregaddr); /* flush */ 3881 } 3882 3883 /** 3884 * lpfc_sli_brdkill - Issue a kill_board mailbox command 3885 * @phba: Pointer to HBA context object. 3886 * 3887 * This function issues a kill_board mailbox command and waits for 3888 * the error attention interrupt. This function is called for stopping 3889 * the firmware processing. The caller is not required to hold any 3890 * locks. This function calls lpfc_hba_down_post function to free 3891 * any pending commands after the kill. The function will return 1 when it 3892 * fails to kill the board else will return 0. 3893 **/ 3894 int 3895 lpfc_sli_brdkill(struct lpfc_hba *phba) 3896 { 3897 struct lpfc_sli *psli; 3898 LPFC_MBOXQ_t *pmb; 3899 uint32_t status; 3900 uint32_t ha_copy; 3901 int retval; 3902 int i = 0; 3903 3904 psli = &phba->sli; 3905 3906 /* Kill HBA */ 3907 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3908 "0329 Kill HBA Data: x%x x%x\n", 3909 phba->pport->port_state, psli->sli_flag); 3910 3911 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3912 if (!pmb) 3913 return 1; 3914 3915 /* Disable the error attention */ 3916 spin_lock_irq(&phba->hbalock); 3917 if (lpfc_readl(phba->HCregaddr, &status)) { 3918 spin_unlock_irq(&phba->hbalock); 3919 mempool_free(pmb, phba->mbox_mem_pool); 3920 return 1; 3921 } 3922 status &= ~HC_ERINT_ENA; 3923 writel(status, phba->HCregaddr); 3924 readl(phba->HCregaddr); /* flush */ 3925 phba->link_flag |= LS_IGNORE_ERATT; 3926 spin_unlock_irq(&phba->hbalock); 3927 3928 lpfc_kill_board(phba, pmb); 3929 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 3930 retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 3931 3932 if (retval != MBX_SUCCESS) { 3933 if (retval != MBX_BUSY) 3934 mempool_free(pmb, phba->mbox_mem_pool); 3935 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 3936 "2752 KILL_BOARD command failed retval %d\n", 3937 retval); 3938 spin_lock_irq(&phba->hbalock); 3939 phba->link_flag &= ~LS_IGNORE_ERATT; 3940 spin_unlock_irq(&phba->hbalock); 3941 return 1; 3942 } 3943 3944 spin_lock_irq(&phba->hbalock); 3945 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 3946 spin_unlock_irq(&phba->hbalock); 3947 3948 mempool_free(pmb, phba->mbox_mem_pool); 3949 3950 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error 3951 * attention every 100ms for 3 seconds. If we don't get ERATT after 3952 * 3 seconds we still set HBA_ERROR state because the status of the 3953 * board is now undefined. 3954 */ 3955 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3956 return 1; 3957 while ((i++ < 30) && !(ha_copy & HA_ERATT)) { 3958 mdelay(100); 3959 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 3960 return 1; 3961 } 3962 3963 del_timer_sync(&psli->mbox_tmo); 3964 if (ha_copy & HA_ERATT) { 3965 writel(HA_ERATT, phba->HAregaddr); 3966 phba->pport->stopped = 1; 3967 } 3968 spin_lock_irq(&phba->hbalock); 3969 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 3970 psli->mbox_active = NULL; 3971 phba->link_flag &= ~LS_IGNORE_ERATT; 3972 spin_unlock_irq(&phba->hbalock); 3973 3974 lpfc_hba_down_post(phba); 3975 phba->link_state = LPFC_HBA_ERROR; 3976 3977 return ha_copy & HA_ERATT ? 0 : 1; 3978 } 3979 3980 /** 3981 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA 3982 * @phba: Pointer to HBA context object. 3983 * 3984 * This function resets the HBA by writing HC_INITFF to the control 3985 * register. After the HBA resets, this function resets all the iocb ring 3986 * indices. This function disables PCI layer parity checking during 3987 * the reset. 3988 * This function returns 0 always. 3989 * The caller is not required to hold any locks. 3990 **/ 3991 int 3992 lpfc_sli_brdreset(struct lpfc_hba *phba) 3993 { 3994 struct lpfc_sli *psli; 3995 struct lpfc_sli_ring *pring; 3996 uint16_t cfg_value; 3997 int i; 3998 3999 psli = &phba->sli; 4000 4001 /* Reset HBA */ 4002 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4003 "0325 Reset HBA Data: x%x x%x\n", 4004 phba->pport->port_state, psli->sli_flag); 4005 4006 /* perform board reset */ 4007 phba->fc_eventTag = 0; 4008 phba->link_events = 0; 4009 phba->pport->fc_myDID = 0; 4010 phba->pport->fc_prevDID = 0; 4011 4012 /* Turn off parity checking and serr during the physical reset */ 4013 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4014 pci_write_config_word(phba->pcidev, PCI_COMMAND, 4015 (cfg_value & 4016 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4017 4018 psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); 4019 4020 /* Now toggle INITFF bit in the Host Control Register */ 4021 writel(HC_INITFF, phba->HCregaddr); 4022 mdelay(1); 4023 readl(phba->HCregaddr); /* flush */ 4024 writel(0, phba->HCregaddr); 4025 readl(phba->HCregaddr); /* flush */ 4026 4027 /* Restore PCI cmd register */ 4028 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4029 4030 /* Initialize relevant SLI info */ 4031 for (i = 0; i < psli->num_rings; i++) { 4032 pring = &psli->ring[i]; 4033 pring->flag = 0; 4034 pring->sli.sli3.rspidx = 0; 4035 pring->sli.sli3.next_cmdidx = 0; 4036 pring->sli.sli3.local_getidx = 0; 4037 pring->sli.sli3.cmdidx = 0; 4038 pring->missbufcnt = 0; 4039 } 4040 4041 phba->link_state = LPFC_WARM_START; 4042 return 0; 4043 } 4044 4045 /** 4046 * lpfc_sli4_brdreset - Reset a sli-4 HBA 4047 * @phba: Pointer to HBA context object. 4048 * 4049 * This function resets a SLI4 HBA. This function disables PCI layer parity 4050 * checking during resets the device. The caller is not required to hold 4051 * any locks. 4052 * 4053 * This function returns 0 always. 4054 **/ 4055 int 4056 lpfc_sli4_brdreset(struct lpfc_hba *phba) 4057 { 4058 struct lpfc_sli *psli = &phba->sli; 4059 uint16_t cfg_value; 4060 int rc = 0; 4061 4062 /* Reset HBA */ 4063 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4064 "0295 Reset HBA Data: x%x x%x x%x\n", 4065 phba->pport->port_state, psli->sli_flag, 4066 phba->hba_flag); 4067 4068 /* perform board reset */ 4069 phba->fc_eventTag = 0; 4070 phba->link_events = 0; 4071 phba->pport->fc_myDID = 0; 4072 phba->pport->fc_prevDID = 0; 4073 4074 spin_lock_irq(&phba->hbalock); 4075 psli->sli_flag &= ~(LPFC_PROCESS_LA); 4076 phba->fcf.fcf_flag = 0; 4077 spin_unlock_irq(&phba->hbalock); 4078 4079 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ 4080 if (phba->hba_flag & HBA_FW_DUMP_OP) { 4081 phba->hba_flag &= ~HBA_FW_DUMP_OP; 4082 return rc; 4083 } 4084 4085 /* Now physically reset the device */ 4086 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4087 "0389 Performing PCI function reset!\n"); 4088 4089 /* Turn off parity checking and serr during the physical reset */ 4090 pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); 4091 pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value & 4092 ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); 4093 4094 /* Perform FCoE PCI function reset before freeing queue memory */ 4095 rc = lpfc_pci_function_reset(phba); 4096 lpfc_sli4_queue_destroy(phba); 4097 4098 /* Restore PCI cmd register */ 4099 pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value); 4100 4101 return rc; 4102 } 4103 4104 /** 4105 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba 4106 * @phba: Pointer to HBA context object. 4107 * 4108 * This function is called in the SLI initialization code path to 4109 * restart the HBA. The caller is not required to hold any lock. 4110 * This function writes MBX_RESTART mailbox command to the SLIM and 4111 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post 4112 * function to free any pending commands. The function enables 4113 * POST only during the first initialization. The function returns zero. 4114 * The function does not guarantee completion of MBX_RESTART mailbox 4115 * command before the return of this function. 4116 **/ 4117 static int 4118 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) 4119 { 4120 MAILBOX_t *mb; 4121 struct lpfc_sli *psli; 4122 volatile uint32_t word0; 4123 void __iomem *to_slim; 4124 uint32_t hba_aer_enabled; 4125 4126 spin_lock_irq(&phba->hbalock); 4127 4128 /* Take PCIe device Advanced Error Reporting (AER) state */ 4129 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4130 4131 psli = &phba->sli; 4132 4133 /* Restart HBA */ 4134 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4135 "0337 Restart HBA Data: x%x x%x\n", 4136 phba->pport->port_state, psli->sli_flag); 4137 4138 word0 = 0; 4139 mb = (MAILBOX_t *) &word0; 4140 mb->mbxCommand = MBX_RESTART; 4141 mb->mbxHc = 1; 4142 4143 lpfc_reset_barrier(phba); 4144 4145 to_slim = phba->MBslimaddr; 4146 writel(*(uint32_t *) mb, to_slim); 4147 readl(to_slim); /* flush */ 4148 4149 /* Only skip post after fc_ffinit is completed */ 4150 if (phba->pport->port_state) 4151 word0 = 1; /* This is really setting up word1 */ 4152 else 4153 word0 = 0; /* This is really setting up word1 */ 4154 to_slim = phba->MBslimaddr + sizeof (uint32_t); 4155 writel(*(uint32_t *) mb, to_slim); 4156 readl(to_slim); /* flush */ 4157 4158 lpfc_sli_brdreset(phba); 4159 phba->pport->stopped = 0; 4160 phba->link_state = LPFC_INIT_START; 4161 phba->hba_flag = 0; 4162 spin_unlock_irq(&phba->hbalock); 4163 4164 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4165 psli->stats_start = get_seconds(); 4166 4167 /* Give the INITFF and Post time to settle. */ 4168 mdelay(100); 4169 4170 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4171 if (hba_aer_enabled) 4172 pci_disable_pcie_error_reporting(phba->pcidev); 4173 4174 lpfc_hba_down_post(phba); 4175 4176 return 0; 4177 } 4178 4179 /** 4180 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba 4181 * @phba: Pointer to HBA context object. 4182 * 4183 * This function is called in the SLI initialization code path to restart 4184 * a SLI4 HBA. The caller is not required to hold any lock. 4185 * At the end of the function, it calls lpfc_hba_down_post function to 4186 * free any pending commands. 4187 **/ 4188 static int 4189 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) 4190 { 4191 struct lpfc_sli *psli = &phba->sli; 4192 uint32_t hba_aer_enabled; 4193 int rc; 4194 4195 /* Restart HBA */ 4196 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4197 "0296 Restart HBA Data: x%x x%x\n", 4198 phba->pport->port_state, psli->sli_flag); 4199 4200 /* Take PCIe device Advanced Error Reporting (AER) state */ 4201 hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; 4202 4203 rc = lpfc_sli4_brdreset(phba); 4204 4205 spin_lock_irq(&phba->hbalock); 4206 phba->pport->stopped = 0; 4207 phba->link_state = LPFC_INIT_START; 4208 phba->hba_flag = 0; 4209 spin_unlock_irq(&phba->hbalock); 4210 4211 memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); 4212 psli->stats_start = get_seconds(); 4213 4214 /* Reset HBA AER if it was enabled, note hba_flag was reset above */ 4215 if (hba_aer_enabled) 4216 pci_disable_pcie_error_reporting(phba->pcidev); 4217 4218 lpfc_hba_down_post(phba); 4219 4220 return rc; 4221 } 4222 4223 /** 4224 * lpfc_sli_brdrestart - Wrapper func for restarting hba 4225 * @phba: Pointer to HBA context object. 4226 * 4227 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the 4228 * API jump table function pointer from the lpfc_hba struct. 4229 **/ 4230 int 4231 lpfc_sli_brdrestart(struct lpfc_hba *phba) 4232 { 4233 return phba->lpfc_sli_brdrestart(phba); 4234 } 4235 4236 /** 4237 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart 4238 * @phba: Pointer to HBA context object. 4239 * 4240 * This function is called after a HBA restart to wait for successful 4241 * restart of the HBA. Successful restart of the HBA is indicated by 4242 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15 4243 * iteration, the function will restart the HBA again. The function returns 4244 * zero if HBA successfully restarted else returns negative error code. 4245 **/ 4246 static int 4247 lpfc_sli_chipset_init(struct lpfc_hba *phba) 4248 { 4249 uint32_t status, i = 0; 4250 4251 /* Read the HBA Host Status Register */ 4252 if (lpfc_readl(phba->HSregaddr, &status)) 4253 return -EIO; 4254 4255 /* Check status register to see what current state is */ 4256 i = 0; 4257 while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) { 4258 4259 /* Check every 10ms for 10 retries, then every 100ms for 90 4260 * retries, then every 1 sec for 50 retires for a total of 4261 * ~60 seconds before reset the board again and check every 4262 * 1 sec for 50 retries. The up to 60 seconds before the 4263 * board ready is required by the Falcon FIPS zeroization 4264 * complete, and any reset the board in between shall cause 4265 * restart of zeroization, further delay the board ready. 4266 */ 4267 if (i++ >= 200) { 4268 /* Adapter failed to init, timeout, status reg 4269 <status> */ 4270 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4271 "0436 Adapter failed to init, " 4272 "timeout, status reg x%x, " 4273 "FW Data: A8 x%x AC x%x\n", status, 4274 readl(phba->MBslimaddr + 0xa8), 4275 readl(phba->MBslimaddr + 0xac)); 4276 phba->link_state = LPFC_HBA_ERROR; 4277 return -ETIMEDOUT; 4278 } 4279 4280 /* Check to see if any errors occurred during init */ 4281 if (status & HS_FFERM) { 4282 /* ERROR: During chipset initialization */ 4283 /* Adapter failed to init, chipset, status reg 4284 <status> */ 4285 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4286 "0437 Adapter failed to init, " 4287 "chipset, status reg x%x, " 4288 "FW Data: A8 x%x AC x%x\n", status, 4289 readl(phba->MBslimaddr + 0xa8), 4290 readl(phba->MBslimaddr + 0xac)); 4291 phba->link_state = LPFC_HBA_ERROR; 4292 return -EIO; 4293 } 4294 4295 if (i <= 10) 4296 msleep(10); 4297 else if (i <= 100) 4298 msleep(100); 4299 else 4300 msleep(1000); 4301 4302 if (i == 150) { 4303 /* Do post */ 4304 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4305 lpfc_sli_brdrestart(phba); 4306 } 4307 /* Read the HBA Host Status Register */ 4308 if (lpfc_readl(phba->HSregaddr, &status)) 4309 return -EIO; 4310 } 4311 4312 /* Check to see if any errors occurred during init */ 4313 if (status & HS_FFERM) { 4314 /* ERROR: During chipset initialization */ 4315 /* Adapter failed to init, chipset, status reg <status> */ 4316 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4317 "0438 Adapter failed to init, chipset, " 4318 "status reg x%x, " 4319 "FW Data: A8 x%x AC x%x\n", status, 4320 readl(phba->MBslimaddr + 0xa8), 4321 readl(phba->MBslimaddr + 0xac)); 4322 phba->link_state = LPFC_HBA_ERROR; 4323 return -EIO; 4324 } 4325 4326 /* Clear all interrupt enable conditions */ 4327 writel(0, phba->HCregaddr); 4328 readl(phba->HCregaddr); /* flush */ 4329 4330 /* setup host attn register */ 4331 writel(0xffffffff, phba->HAregaddr); 4332 readl(phba->HAregaddr); /* flush */ 4333 return 0; 4334 } 4335 4336 /** 4337 * lpfc_sli_hbq_count - Get the number of HBQs to be configured 4338 * 4339 * This function calculates and returns the number of HBQs required to be 4340 * configured. 4341 **/ 4342 int 4343 lpfc_sli_hbq_count(void) 4344 { 4345 return ARRAY_SIZE(lpfc_hbq_defs); 4346 } 4347 4348 /** 4349 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries 4350 * 4351 * This function adds the number of hbq entries in every HBQ to get 4352 * the total number of hbq entries required for the HBA and returns 4353 * the total count. 4354 **/ 4355 static int 4356 lpfc_sli_hbq_entry_count(void) 4357 { 4358 int hbq_count = lpfc_sli_hbq_count(); 4359 int count = 0; 4360 int i; 4361 4362 for (i = 0; i < hbq_count; ++i) 4363 count += lpfc_hbq_defs[i]->entry_count; 4364 return count; 4365 } 4366 4367 /** 4368 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries 4369 * 4370 * This function calculates amount of memory required for all hbq entries 4371 * to be configured and returns the total memory required. 4372 **/ 4373 int 4374 lpfc_sli_hbq_size(void) 4375 { 4376 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry); 4377 } 4378 4379 /** 4380 * lpfc_sli_hbq_setup - configure and initialize HBQs 4381 * @phba: Pointer to HBA context object. 4382 * 4383 * This function is called during the SLI initialization to configure 4384 * all the HBQs and post buffers to the HBQ. The caller is not 4385 * required to hold any locks. This function will return zero if successful 4386 * else it will return negative error code. 4387 **/ 4388 static int 4389 lpfc_sli_hbq_setup(struct lpfc_hba *phba) 4390 { 4391 int hbq_count = lpfc_sli_hbq_count(); 4392 LPFC_MBOXQ_t *pmb; 4393 MAILBOX_t *pmbox; 4394 uint32_t hbqno; 4395 uint32_t hbq_entry_index; 4396 4397 /* Get a Mailbox buffer to setup mailbox 4398 * commands for HBA initialization 4399 */ 4400 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4401 4402 if (!pmb) 4403 return -ENOMEM; 4404 4405 pmbox = &pmb->u.mb; 4406 4407 /* Initialize the struct lpfc_sli_hbq structure for each hbq */ 4408 phba->link_state = LPFC_INIT_MBX_CMDS; 4409 phba->hbq_in_use = 1; 4410 4411 hbq_entry_index = 0; 4412 for (hbqno = 0; hbqno < hbq_count; ++hbqno) { 4413 phba->hbqs[hbqno].next_hbqPutIdx = 0; 4414 phba->hbqs[hbqno].hbqPutIdx = 0; 4415 phba->hbqs[hbqno].local_hbqGetIdx = 0; 4416 phba->hbqs[hbqno].entry_count = 4417 lpfc_hbq_defs[hbqno]->entry_count; 4418 lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno], 4419 hbq_entry_index, pmb); 4420 hbq_entry_index += phba->hbqs[hbqno].entry_count; 4421 4422 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { 4423 /* Adapter failed to init, mbxCmd <cmd> CFG_RING, 4424 mbxStatus <status>, ring <num> */ 4425 4426 lpfc_printf_log(phba, KERN_ERR, 4427 LOG_SLI | LOG_VPORT, 4428 "1805 Adapter failed to init. " 4429 "Data: x%x x%x x%x\n", 4430 pmbox->mbxCommand, 4431 pmbox->mbxStatus, hbqno); 4432 4433 phba->link_state = LPFC_HBA_ERROR; 4434 mempool_free(pmb, phba->mbox_mem_pool); 4435 return -ENXIO; 4436 } 4437 } 4438 phba->hbq_count = hbq_count; 4439 4440 mempool_free(pmb, phba->mbox_mem_pool); 4441 4442 /* Initially populate or replenish the HBQs */ 4443 for (hbqno = 0; hbqno < hbq_count; ++hbqno) 4444 lpfc_sli_hbqbuf_init_hbqs(phba, hbqno); 4445 return 0; 4446 } 4447 4448 /** 4449 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA 4450 * @phba: Pointer to HBA context object. 4451 * 4452 * This function is called during the SLI initialization to configure 4453 * all the HBQs and post buffers to the HBQ. The caller is not 4454 * required to hold any locks. This function will return zero if successful 4455 * else it will return negative error code. 4456 **/ 4457 static int 4458 lpfc_sli4_rb_setup(struct lpfc_hba *phba) 4459 { 4460 phba->hbq_in_use = 1; 4461 phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; 4462 phba->hbq_count = 1; 4463 /* Initially populate or replenish the HBQs */ 4464 lpfc_sli_hbqbuf_init_hbqs(phba, 0); 4465 return 0; 4466 } 4467 4468 /** 4469 * lpfc_sli_config_port - Issue config port mailbox command 4470 * @phba: Pointer to HBA context object. 4471 * @sli_mode: sli mode - 2/3 4472 * 4473 * This function is called by the sli intialization code path 4474 * to issue config_port mailbox command. This function restarts the 4475 * HBA firmware and issues a config_port mailbox command to configure 4476 * the SLI interface in the sli mode specified by sli_mode 4477 * variable. The caller is not required to hold any locks. 4478 * The function returns 0 if successful, else returns negative error 4479 * code. 4480 **/ 4481 int 4482 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) 4483 { 4484 LPFC_MBOXQ_t *pmb; 4485 uint32_t resetcount = 0, rc = 0, done = 0; 4486 4487 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4488 if (!pmb) { 4489 phba->link_state = LPFC_HBA_ERROR; 4490 return -ENOMEM; 4491 } 4492 4493 phba->sli_rev = sli_mode; 4494 while (resetcount < 2 && !done) { 4495 spin_lock_irq(&phba->hbalock); 4496 phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE; 4497 spin_unlock_irq(&phba->hbalock); 4498 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 4499 lpfc_sli_brdrestart(phba); 4500 rc = lpfc_sli_chipset_init(phba); 4501 if (rc) 4502 break; 4503 4504 spin_lock_irq(&phba->hbalock); 4505 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 4506 spin_unlock_irq(&phba->hbalock); 4507 resetcount++; 4508 4509 /* Call pre CONFIG_PORT mailbox command initialization. A 4510 * value of 0 means the call was successful. Any other 4511 * nonzero value is a failure, but if ERESTART is returned, 4512 * the driver may reset the HBA and try again. 4513 */ 4514 rc = lpfc_config_port_prep(phba); 4515 if (rc == -ERESTART) { 4516 phba->link_state = LPFC_LINK_UNKNOWN; 4517 continue; 4518 } else if (rc) 4519 break; 4520 4521 phba->link_state = LPFC_INIT_MBX_CMDS; 4522 lpfc_config_port(phba, pmb); 4523 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 4524 phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED | 4525 LPFC_SLI3_HBQ_ENABLED | 4526 LPFC_SLI3_CRP_ENABLED | 4527 LPFC_SLI3_BG_ENABLED | 4528 LPFC_SLI3_DSS_ENABLED); 4529 if (rc != MBX_SUCCESS) { 4530 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4531 "0442 Adapter failed to init, mbxCmd x%x " 4532 "CONFIG_PORT, mbxStatus x%x Data: x%x\n", 4533 pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); 4534 spin_lock_irq(&phba->hbalock); 4535 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; 4536 spin_unlock_irq(&phba->hbalock); 4537 rc = -ENXIO; 4538 } else { 4539 /* Allow asynchronous mailbox command to go through */ 4540 spin_lock_irq(&phba->hbalock); 4541 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 4542 spin_unlock_irq(&phba->hbalock); 4543 done = 1; 4544 4545 if ((pmb->u.mb.un.varCfgPort.casabt == 1) && 4546 (pmb->u.mb.un.varCfgPort.gasabt == 0)) 4547 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 4548 "3110 Port did not grant ASABT\n"); 4549 } 4550 } 4551 if (!done) { 4552 rc = -EINVAL; 4553 goto do_prep_failed; 4554 } 4555 if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { 4556 if (!pmb->u.mb.un.varCfgPort.cMA) { 4557 rc = -ENXIO; 4558 goto do_prep_failed; 4559 } 4560 if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { 4561 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 4562 phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; 4563 phba->max_vports = (phba->max_vpi > phba->max_vports) ? 4564 phba->max_vpi : phba->max_vports; 4565 4566 } else 4567 phba->max_vpi = 0; 4568 phba->fips_level = 0; 4569 phba->fips_spec_rev = 0; 4570 if (pmb->u.mb.un.varCfgPort.gdss) { 4571 phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; 4572 phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level; 4573 phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev; 4574 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4575 "2850 Security Crypto Active. FIPS x%d " 4576 "(Spec Rev: x%d)", 4577 phba->fips_level, phba->fips_spec_rev); 4578 } 4579 if (pmb->u.mb.un.varCfgPort.sec_err) { 4580 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4581 "2856 Config Port Security Crypto " 4582 "Error: x%x ", 4583 pmb->u.mb.un.varCfgPort.sec_err); 4584 } 4585 if (pmb->u.mb.un.varCfgPort.gerbm) 4586 phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; 4587 if (pmb->u.mb.un.varCfgPort.gcrp) 4588 phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; 4589 4590 phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get; 4591 phba->port_gp = phba->mbox->us.s3_pgp.port; 4592 4593 if (phba->cfg_enable_bg) { 4594 if (pmb->u.mb.un.varCfgPort.gbg) 4595 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 4596 else 4597 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4598 "0443 Adapter did not grant " 4599 "BlockGuard\n"); 4600 } 4601 } else { 4602 phba->hbq_get = NULL; 4603 phba->port_gp = phba->mbox->us.s2.port; 4604 phba->max_vpi = 0; 4605 } 4606 do_prep_failed: 4607 mempool_free(pmb, phba->mbox_mem_pool); 4608 return rc; 4609 } 4610 4611 4612 /** 4613 * lpfc_sli_hba_setup - SLI intialization function 4614 * @phba: Pointer to HBA context object. 4615 * 4616 * This function is the main SLI intialization function. This function 4617 * is called by the HBA intialization code, HBA reset code and HBA 4618 * error attention handler code. Caller is not required to hold any 4619 * locks. This function issues config_port mailbox command to configure 4620 * the SLI, setup iocb rings and HBQ rings. In the end the function 4621 * calls the config_port_post function to issue init_link mailbox 4622 * command and to start the discovery. The function will return zero 4623 * if successful, else it will return negative error code. 4624 **/ 4625 int 4626 lpfc_sli_hba_setup(struct lpfc_hba *phba) 4627 { 4628 uint32_t rc; 4629 int mode = 3, i; 4630 int longs; 4631 4632 switch (lpfc_sli_mode) { 4633 case 2: 4634 if (phba->cfg_enable_npiv) { 4635 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4636 "1824 NPIV enabled: Override lpfc_sli_mode " 4637 "parameter (%d) to auto (0).\n", 4638 lpfc_sli_mode); 4639 break; 4640 } 4641 mode = 2; 4642 break; 4643 case 0: 4644 case 3: 4645 break; 4646 default: 4647 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4648 "1819 Unrecognized lpfc_sli_mode " 4649 "parameter: %d.\n", lpfc_sli_mode); 4650 4651 break; 4652 } 4653 4654 rc = lpfc_sli_config_port(phba, mode); 4655 4656 if (rc && lpfc_sli_mode == 3) 4657 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT, 4658 "1820 Unable to select SLI-3. " 4659 "Not supported by adapter.\n"); 4660 if (rc && mode != 2) 4661 rc = lpfc_sli_config_port(phba, 2); 4662 if (rc) 4663 goto lpfc_sli_hba_setup_error; 4664 4665 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 4666 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 4667 rc = pci_enable_pcie_error_reporting(phba->pcidev); 4668 if (!rc) { 4669 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4670 "2709 This device supports " 4671 "Advanced Error Reporting (AER)\n"); 4672 spin_lock_irq(&phba->hbalock); 4673 phba->hba_flag |= HBA_AER_ENABLED; 4674 spin_unlock_irq(&phba->hbalock); 4675 } else { 4676 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4677 "2708 This device does not support " 4678 "Advanced Error Reporting (AER): %d\n", 4679 rc); 4680 phba->cfg_aer_support = 0; 4681 } 4682 } 4683 4684 if (phba->sli_rev == 3) { 4685 phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE; 4686 phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE; 4687 } else { 4688 phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE; 4689 phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE; 4690 phba->sli3_options = 0; 4691 } 4692 4693 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 4694 "0444 Firmware in SLI %x mode. Max_vpi %d\n", 4695 phba->sli_rev, phba->max_vpi); 4696 rc = lpfc_sli_ring_map(phba); 4697 4698 if (rc) 4699 goto lpfc_sli_hba_setup_error; 4700 4701 /* Initialize VPIs. */ 4702 if (phba->sli_rev == LPFC_SLI_REV3) { 4703 /* 4704 * The VPI bitmask and physical ID array are allocated 4705 * and initialized once only - at driver load. A port 4706 * reset doesn't need to reinitialize this memory. 4707 */ 4708 if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) { 4709 longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG; 4710 phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), 4711 GFP_KERNEL); 4712 if (!phba->vpi_bmask) { 4713 rc = -ENOMEM; 4714 goto lpfc_sli_hba_setup_error; 4715 } 4716 4717 phba->vpi_ids = kzalloc( 4718 (phba->max_vpi+1) * sizeof(uint16_t), 4719 GFP_KERNEL); 4720 if (!phba->vpi_ids) { 4721 kfree(phba->vpi_bmask); 4722 rc = -ENOMEM; 4723 goto lpfc_sli_hba_setup_error; 4724 } 4725 for (i = 0; i < phba->max_vpi; i++) 4726 phba->vpi_ids[i] = i; 4727 } 4728 } 4729 4730 /* Init HBQs */ 4731 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { 4732 rc = lpfc_sli_hbq_setup(phba); 4733 if (rc) 4734 goto lpfc_sli_hba_setup_error; 4735 } 4736 spin_lock_irq(&phba->hbalock); 4737 phba->sli.sli_flag |= LPFC_PROCESS_LA; 4738 spin_unlock_irq(&phba->hbalock); 4739 4740 rc = lpfc_config_port_post(phba); 4741 if (rc) 4742 goto lpfc_sli_hba_setup_error; 4743 4744 return rc; 4745 4746 lpfc_sli_hba_setup_error: 4747 phba->link_state = LPFC_HBA_ERROR; 4748 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4749 "0445 Firmware initialization failed\n"); 4750 return rc; 4751 } 4752 4753 /** 4754 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region 4755 * @phba: Pointer to HBA context object. 4756 * @mboxq: mailbox pointer. 4757 * This function issue a dump mailbox command to read config region 4758 * 23 and parse the records in the region and populate driver 4759 * data structure. 4760 **/ 4761 static int 4762 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba) 4763 { 4764 LPFC_MBOXQ_t *mboxq; 4765 struct lpfc_dmabuf *mp; 4766 struct lpfc_mqe *mqe; 4767 uint32_t data_length; 4768 int rc; 4769 4770 /* Program the default value of vlan_id and fc_map */ 4771 phba->valid_vlan = 0; 4772 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; 4773 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4774 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4775 4776 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4777 if (!mboxq) 4778 return -ENOMEM; 4779 4780 mqe = &mboxq->u.mqe; 4781 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) { 4782 rc = -ENOMEM; 4783 goto out_free_mboxq; 4784 } 4785 4786 mp = (struct lpfc_dmabuf *) mboxq->context1; 4787 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4788 4789 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 4790 "(%d):2571 Mailbox cmd x%x Status x%x " 4791 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4792 "x%x x%x x%x x%x x%x x%x x%x x%x x%x " 4793 "CQ: x%x x%x x%x x%x\n", 4794 mboxq->vport ? mboxq->vport->vpi : 0, 4795 bf_get(lpfc_mqe_command, mqe), 4796 bf_get(lpfc_mqe_status, mqe), 4797 mqe->un.mb_words[0], mqe->un.mb_words[1], 4798 mqe->un.mb_words[2], mqe->un.mb_words[3], 4799 mqe->un.mb_words[4], mqe->un.mb_words[5], 4800 mqe->un.mb_words[6], mqe->un.mb_words[7], 4801 mqe->un.mb_words[8], mqe->un.mb_words[9], 4802 mqe->un.mb_words[10], mqe->un.mb_words[11], 4803 mqe->un.mb_words[12], mqe->un.mb_words[13], 4804 mqe->un.mb_words[14], mqe->un.mb_words[15], 4805 mqe->un.mb_words[16], mqe->un.mb_words[50], 4806 mboxq->mcqe.word0, 4807 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 4808 mboxq->mcqe.trailer); 4809 4810 if (rc) { 4811 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4812 kfree(mp); 4813 rc = -EIO; 4814 goto out_free_mboxq; 4815 } 4816 data_length = mqe->un.mb_words[5]; 4817 if (data_length > DMP_RGN23_SIZE) { 4818 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4819 kfree(mp); 4820 rc = -EIO; 4821 goto out_free_mboxq; 4822 } 4823 4824 lpfc_parse_fcoe_conf(phba, mp->virt, data_length); 4825 lpfc_mbuf_free(phba, mp->virt, mp->phys); 4826 kfree(mp); 4827 rc = 0; 4828 4829 out_free_mboxq: 4830 mempool_free(mboxq, phba->mbox_mem_pool); 4831 return rc; 4832 } 4833 4834 /** 4835 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data 4836 * @phba: pointer to lpfc hba data structure. 4837 * @mboxq: pointer to the LPFC_MBOXQ_t structure. 4838 * @vpd: pointer to the memory to hold resulting port vpd data. 4839 * @vpd_size: On input, the number of bytes allocated to @vpd. 4840 * On output, the number of data bytes in @vpd. 4841 * 4842 * This routine executes a READ_REV SLI4 mailbox command. In 4843 * addition, this routine gets the port vpd data. 4844 * 4845 * Return codes 4846 * 0 - successful 4847 * -ENOMEM - could not allocated memory. 4848 **/ 4849 static int 4850 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 4851 uint8_t *vpd, uint32_t *vpd_size) 4852 { 4853 int rc = 0; 4854 uint32_t dma_size; 4855 struct lpfc_dmabuf *dmabuf; 4856 struct lpfc_mqe *mqe; 4857 4858 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 4859 if (!dmabuf) 4860 return -ENOMEM; 4861 4862 /* 4863 * Get a DMA buffer for the vpd data resulting from the READ_REV 4864 * mailbox command. 4865 */ 4866 dma_size = *vpd_size; 4867 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4868 dma_size, 4869 &dmabuf->phys, 4870 GFP_KERNEL); 4871 if (!dmabuf->virt) { 4872 kfree(dmabuf); 4873 return -ENOMEM; 4874 } 4875 memset(dmabuf->virt, 0, dma_size); 4876 4877 /* 4878 * The SLI4 implementation of READ_REV conflicts at word1, 4879 * bits 31:16 and SLI4 adds vpd functionality not present 4880 * in SLI3. This code corrects the conflicts. 4881 */ 4882 lpfc_read_rev(phba, mboxq); 4883 mqe = &mboxq->u.mqe; 4884 mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); 4885 mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); 4886 mqe->un.read_rev.word1 &= 0x0000FFFF; 4887 bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); 4888 bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); 4889 4890 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4891 if (rc) { 4892 dma_free_coherent(&phba->pcidev->dev, dma_size, 4893 dmabuf->virt, dmabuf->phys); 4894 kfree(dmabuf); 4895 return -EIO; 4896 } 4897 4898 /* 4899 * The available vpd length cannot be bigger than the 4900 * DMA buffer passed to the port. Catch the less than 4901 * case and update the caller's size. 4902 */ 4903 if (mqe->un.read_rev.avail_vpd_len < *vpd_size) 4904 *vpd_size = mqe->un.read_rev.avail_vpd_len; 4905 4906 memcpy(vpd, dmabuf->virt, *vpd_size); 4907 4908 dma_free_coherent(&phba->pcidev->dev, dma_size, 4909 dmabuf->virt, dmabuf->phys); 4910 kfree(dmabuf); 4911 return 0; 4912 } 4913 4914 /** 4915 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name 4916 * @phba: pointer to lpfc hba data structure. 4917 * 4918 * This routine retrieves SLI4 device physical port name this PCI function 4919 * is attached to. 4920 * 4921 * Return codes 4922 * 0 - successful 4923 * otherwise - failed to retrieve physical port name 4924 **/ 4925 static int 4926 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) 4927 { 4928 LPFC_MBOXQ_t *mboxq; 4929 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; 4930 struct lpfc_controller_attribute *cntl_attr; 4931 struct lpfc_mbx_get_port_name *get_port_name; 4932 void *virtaddr = NULL; 4933 uint32_t alloclen, reqlen; 4934 uint32_t shdr_status, shdr_add_status; 4935 union lpfc_sli4_cfg_shdr *shdr; 4936 char cport_name = 0; 4937 int rc; 4938 4939 /* We assume nothing at this point */ 4940 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 4941 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; 4942 4943 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4944 if (!mboxq) 4945 return -ENOMEM; 4946 /* obtain link type and link number via READ_CONFIG */ 4947 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; 4948 lpfc_sli4_read_config(phba); 4949 if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) 4950 goto retrieve_ppname; 4951 4952 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ 4953 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); 4954 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 4955 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, 4956 LPFC_SLI4_MBX_NEMBED); 4957 if (alloclen < reqlen) { 4958 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 4959 "3084 Allocated DMA memory size (%d) is " 4960 "less than the requested DMA memory size " 4961 "(%d)\n", alloclen, reqlen); 4962 rc = -ENOMEM; 4963 goto out_free_mboxq; 4964 } 4965 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 4966 virtaddr = mboxq->sge_array->addr[0]; 4967 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; 4968 shdr = &mbx_cntl_attr->cfg_shdr; 4969 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 4970 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 4971 if (shdr_status || shdr_add_status || rc) { 4972 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 4973 "3085 Mailbox x%x (x%x/x%x) failed, " 4974 "rc:x%x, status:x%x, add_status:x%x\n", 4975 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 4976 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 4977 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 4978 rc, shdr_status, shdr_add_status); 4979 rc = -ENXIO; 4980 goto out_free_mboxq; 4981 } 4982 cntl_attr = &mbx_cntl_attr->cntl_attr; 4983 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; 4984 phba->sli4_hba.lnk_info.lnk_tp = 4985 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); 4986 phba->sli4_hba.lnk_info.lnk_no = 4987 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); 4988 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 4989 "3086 lnk_type:%d, lnk_numb:%d\n", 4990 phba->sli4_hba.lnk_info.lnk_tp, 4991 phba->sli4_hba.lnk_info.lnk_no); 4992 4993 retrieve_ppname: 4994 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 4995 LPFC_MBOX_OPCODE_GET_PORT_NAME, 4996 sizeof(struct lpfc_mbx_get_port_name) - 4997 sizeof(struct lpfc_sli4_cfg_mhdr), 4998 LPFC_SLI4_MBX_EMBED); 4999 get_port_name = &mboxq->u.mqe.un.get_port_name; 5000 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; 5001 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); 5002 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, 5003 phba->sli4_hba.lnk_info.lnk_tp); 5004 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 5005 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 5006 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 5007 if (shdr_status || shdr_add_status || rc) { 5008 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 5009 "3087 Mailbox x%x (x%x/x%x) failed: " 5010 "rc:x%x, status:x%x, add_status:x%x\n", 5011 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 5012 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 5013 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 5014 rc, shdr_status, shdr_add_status); 5015 rc = -ENXIO; 5016 goto out_free_mboxq; 5017 } 5018 switch (phba->sli4_hba.lnk_info.lnk_no) { 5019 case LPFC_LINK_NUMBER_0: 5020 cport_name = bf_get(lpfc_mbx_get_port_name_name0, 5021 &get_port_name->u.response); 5022 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5023 break; 5024 case LPFC_LINK_NUMBER_1: 5025 cport_name = bf_get(lpfc_mbx_get_port_name_name1, 5026 &get_port_name->u.response); 5027 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5028 break; 5029 case LPFC_LINK_NUMBER_2: 5030 cport_name = bf_get(lpfc_mbx_get_port_name_name2, 5031 &get_port_name->u.response); 5032 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5033 break; 5034 case LPFC_LINK_NUMBER_3: 5035 cport_name = bf_get(lpfc_mbx_get_port_name_name3, 5036 &get_port_name->u.response); 5037 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; 5038 break; 5039 default: 5040 break; 5041 } 5042 5043 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { 5044 phba->Port[0] = cport_name; 5045 phba->Port[1] = '\0'; 5046 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5047 "3091 SLI get port name: %s\n", phba->Port); 5048 } 5049 5050 out_free_mboxq: 5051 if (rc != MBX_TIMEOUT) { 5052 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) 5053 lpfc_sli4_mbox_cmd_free(phba, mboxq); 5054 else 5055 mempool_free(mboxq, phba->mbox_mem_pool); 5056 } 5057 return rc; 5058 } 5059 5060 /** 5061 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 5062 * @phba: pointer to lpfc hba data structure. 5063 * 5064 * This routine is called to explicitly arm the SLI4 device's completion and 5065 * event queues 5066 **/ 5067 static void 5068 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) 5069 { 5070 int fcp_eqidx; 5071 5072 lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); 5073 lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); 5074 fcp_eqidx = 0; 5075 if (phba->sli4_hba.fcp_cq) { 5076 do { 5077 lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], 5078 LPFC_QUEUE_REARM); 5079 } while (++fcp_eqidx < phba->cfg_fcp_io_channel); 5080 } 5081 5082 if (phba->cfg_fof) 5083 lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM); 5084 5085 if (phba->sli4_hba.hba_eq) { 5086 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; 5087 fcp_eqidx++) 5088 lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx], 5089 LPFC_QUEUE_REARM); 5090 } 5091 5092 if (phba->cfg_fof) 5093 lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM); 5094 } 5095 5096 /** 5097 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count. 5098 * @phba: Pointer to HBA context object. 5099 * @type: The resource extent type. 5100 * @extnt_count: buffer to hold port available extent count. 5101 * @extnt_size: buffer to hold element count per extent. 5102 * 5103 * This function calls the port and retrievs the number of available 5104 * extents and their size for a particular extent type. 5105 * 5106 * Returns: 0 if successful. Nonzero otherwise. 5107 **/ 5108 int 5109 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, 5110 uint16_t *extnt_count, uint16_t *extnt_size) 5111 { 5112 int rc = 0; 5113 uint32_t length; 5114 uint32_t mbox_tmo; 5115 struct lpfc_mbx_get_rsrc_extent_info *rsrc_info; 5116 LPFC_MBOXQ_t *mbox; 5117 5118 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5119 if (!mbox) 5120 return -ENOMEM; 5121 5122 /* Find out how many extents are available for this resource type */ 5123 length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) - 5124 sizeof(struct lpfc_sli4_cfg_mhdr)); 5125 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5126 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO, 5127 length, LPFC_SLI4_MBX_EMBED); 5128 5129 /* Send an extents count of 0 - the GET doesn't use it. */ 5130 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5131 LPFC_SLI4_MBX_EMBED); 5132 if (unlikely(rc)) { 5133 rc = -EIO; 5134 goto err_exit; 5135 } 5136 5137 if (!phba->sli4_hba.intr_enable) 5138 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5139 else { 5140 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5141 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5142 } 5143 if (unlikely(rc)) { 5144 rc = -EIO; 5145 goto err_exit; 5146 } 5147 5148 rsrc_info = &mbox->u.mqe.un.rsrc_extent_info; 5149 if (bf_get(lpfc_mbox_hdr_status, 5150 &rsrc_info->header.cfg_shdr.response)) { 5151 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5152 "2930 Failed to get resource extents " 5153 "Status 0x%x Add'l Status 0x%x\n", 5154 bf_get(lpfc_mbox_hdr_status, 5155 &rsrc_info->header.cfg_shdr.response), 5156 bf_get(lpfc_mbox_hdr_add_status, 5157 &rsrc_info->header.cfg_shdr.response)); 5158 rc = -EIO; 5159 goto err_exit; 5160 } 5161 5162 *extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt, 5163 &rsrc_info->u.rsp); 5164 *extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size, 5165 &rsrc_info->u.rsp); 5166 5167 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 5168 "3162 Retrieved extents type-%d from port: count:%d, " 5169 "size:%d\n", type, *extnt_count, *extnt_size); 5170 5171 err_exit: 5172 mempool_free(mbox, phba->mbox_mem_pool); 5173 return rc; 5174 } 5175 5176 /** 5177 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents. 5178 * @phba: Pointer to HBA context object. 5179 * @type: The extent type to check. 5180 * 5181 * This function reads the current available extents from the port and checks 5182 * if the extent count or extent size has changed since the last access. 5183 * Callers use this routine post port reset to understand if there is a 5184 * extent reprovisioning requirement. 5185 * 5186 * Returns: 5187 * -Error: error indicates problem. 5188 * 1: Extent count or size has changed. 5189 * 0: No changes. 5190 **/ 5191 static int 5192 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type) 5193 { 5194 uint16_t curr_ext_cnt, rsrc_ext_cnt; 5195 uint16_t size_diff, rsrc_ext_size; 5196 int rc = 0; 5197 struct lpfc_rsrc_blks *rsrc_entry; 5198 struct list_head *rsrc_blk_list = NULL; 5199 5200 size_diff = 0; 5201 curr_ext_cnt = 0; 5202 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5203 &rsrc_ext_cnt, 5204 &rsrc_ext_size); 5205 if (unlikely(rc)) 5206 return -EIO; 5207 5208 switch (type) { 5209 case LPFC_RSC_TYPE_FCOE_RPI: 5210 rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5211 break; 5212 case LPFC_RSC_TYPE_FCOE_VPI: 5213 rsrc_blk_list = &phba->lpfc_vpi_blk_list; 5214 break; 5215 case LPFC_RSC_TYPE_FCOE_XRI: 5216 rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5217 break; 5218 case LPFC_RSC_TYPE_FCOE_VFI: 5219 rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5220 break; 5221 default: 5222 break; 5223 } 5224 5225 list_for_each_entry(rsrc_entry, rsrc_blk_list, list) { 5226 curr_ext_cnt++; 5227 if (rsrc_entry->rsrc_size != rsrc_ext_size) 5228 size_diff++; 5229 } 5230 5231 if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0) 5232 rc = 1; 5233 5234 return rc; 5235 } 5236 5237 /** 5238 * lpfc_sli4_cfg_post_extnts - 5239 * @phba: Pointer to HBA context object. 5240 * @extnt_cnt - number of available extents. 5241 * @type - the extent type (rpi, xri, vfi, vpi). 5242 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation. 5243 * @mbox - pointer to the caller's allocated mailbox structure. 5244 * 5245 * This function executes the extents allocation request. It also 5246 * takes care of the amount of memory needed to allocate or get the 5247 * allocated extents. It is the caller's responsibility to evaluate 5248 * the response. 5249 * 5250 * Returns: 5251 * -Error: Error value describes the condition found. 5252 * 0: if successful 5253 **/ 5254 static int 5255 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt, 5256 uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox) 5257 { 5258 int rc = 0; 5259 uint32_t req_len; 5260 uint32_t emb_len; 5261 uint32_t alloc_len, mbox_tmo; 5262 5263 /* Calculate the total requested length of the dma memory */ 5264 req_len = extnt_cnt * sizeof(uint16_t); 5265 5266 /* 5267 * Calculate the size of an embedded mailbox. The uint32_t 5268 * accounts for extents-specific word. 5269 */ 5270 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 5271 sizeof(uint32_t); 5272 5273 /* 5274 * Presume the allocation and response will fit into an embedded 5275 * mailbox. If not true, reconfigure to a non-embedded mailbox. 5276 */ 5277 *emb = LPFC_SLI4_MBX_EMBED; 5278 if (req_len > emb_len) { 5279 req_len = extnt_cnt * sizeof(uint16_t) + 5280 sizeof(union lpfc_sli4_cfg_shdr) + 5281 sizeof(uint32_t); 5282 *emb = LPFC_SLI4_MBX_NEMBED; 5283 } 5284 5285 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5286 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT, 5287 req_len, *emb); 5288 if (alloc_len < req_len) { 5289 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 5290 "2982 Allocated DMA memory size (x%x) is " 5291 "less than the requested DMA memory " 5292 "size (x%x)\n", alloc_len, req_len); 5293 return -ENOMEM; 5294 } 5295 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb); 5296 if (unlikely(rc)) 5297 return -EIO; 5298 5299 if (!phba->sli4_hba.intr_enable) 5300 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5301 else { 5302 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5303 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5304 } 5305 5306 if (unlikely(rc)) 5307 rc = -EIO; 5308 return rc; 5309 } 5310 5311 /** 5312 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent. 5313 * @phba: Pointer to HBA context object. 5314 * @type: The resource extent type to allocate. 5315 * 5316 * This function allocates the number of elements for the specified 5317 * resource type. 5318 **/ 5319 static int 5320 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type) 5321 { 5322 bool emb = false; 5323 uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size; 5324 uint16_t rsrc_id, rsrc_start, j, k; 5325 uint16_t *ids; 5326 int i, rc; 5327 unsigned long longs; 5328 unsigned long *bmask; 5329 struct lpfc_rsrc_blks *rsrc_blks; 5330 LPFC_MBOXQ_t *mbox; 5331 uint32_t length; 5332 struct lpfc_id_range *id_array = NULL; 5333 void *virtaddr = NULL; 5334 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5335 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5336 struct list_head *ext_blk_list; 5337 5338 rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type, 5339 &rsrc_cnt, 5340 &rsrc_size); 5341 if (unlikely(rc)) 5342 return -EIO; 5343 5344 if ((rsrc_cnt == 0) || (rsrc_size == 0)) { 5345 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5346 "3009 No available Resource Extents " 5347 "for resource type 0x%x: Count: 0x%x, " 5348 "Size 0x%x\n", type, rsrc_cnt, 5349 rsrc_size); 5350 return -ENOMEM; 5351 } 5352 5353 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI, 5354 "2903 Post resource extents type-0x%x: " 5355 "count:%d, size %d\n", type, rsrc_cnt, rsrc_size); 5356 5357 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5358 if (!mbox) 5359 return -ENOMEM; 5360 5361 rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox); 5362 if (unlikely(rc)) { 5363 rc = -EIO; 5364 goto err_exit; 5365 } 5366 5367 /* 5368 * Figure out where the response is located. Then get local pointers 5369 * to the response data. The port does not guarantee to respond to 5370 * all extents counts request so update the local variable with the 5371 * allocated count from the port. 5372 */ 5373 if (emb == LPFC_SLI4_MBX_EMBED) { 5374 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 5375 id_array = &rsrc_ext->u.rsp.id[0]; 5376 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 5377 } else { 5378 virtaddr = mbox->sge_array->addr[0]; 5379 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 5380 rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 5381 id_array = &n_rsrc->id; 5382 } 5383 5384 longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG; 5385 rsrc_id_cnt = rsrc_cnt * rsrc_size; 5386 5387 /* 5388 * Based on the resource size and count, correct the base and max 5389 * resource values. 5390 */ 5391 length = sizeof(struct lpfc_rsrc_blks); 5392 switch (type) { 5393 case LPFC_RSC_TYPE_FCOE_RPI: 5394 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5395 sizeof(unsigned long), 5396 GFP_KERNEL); 5397 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5398 rc = -ENOMEM; 5399 goto err_exit; 5400 } 5401 phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt * 5402 sizeof(uint16_t), 5403 GFP_KERNEL); 5404 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5405 kfree(phba->sli4_hba.rpi_bmask); 5406 rc = -ENOMEM; 5407 goto err_exit; 5408 } 5409 5410 /* 5411 * The next_rpi was initialized with the maximum available 5412 * count but the port may allocate a smaller number. Catch 5413 * that case and update the next_rpi. 5414 */ 5415 phba->sli4_hba.next_rpi = rsrc_id_cnt; 5416 5417 /* Initialize local ptrs for common extent processing later. */ 5418 bmask = phba->sli4_hba.rpi_bmask; 5419 ids = phba->sli4_hba.rpi_ids; 5420 ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list; 5421 break; 5422 case LPFC_RSC_TYPE_FCOE_VPI: 5423 phba->vpi_bmask = kzalloc(longs * 5424 sizeof(unsigned long), 5425 GFP_KERNEL); 5426 if (unlikely(!phba->vpi_bmask)) { 5427 rc = -ENOMEM; 5428 goto err_exit; 5429 } 5430 phba->vpi_ids = kzalloc(rsrc_id_cnt * 5431 sizeof(uint16_t), 5432 GFP_KERNEL); 5433 if (unlikely(!phba->vpi_ids)) { 5434 kfree(phba->vpi_bmask); 5435 rc = -ENOMEM; 5436 goto err_exit; 5437 } 5438 5439 /* Initialize local ptrs for common extent processing later. */ 5440 bmask = phba->vpi_bmask; 5441 ids = phba->vpi_ids; 5442 ext_blk_list = &phba->lpfc_vpi_blk_list; 5443 break; 5444 case LPFC_RSC_TYPE_FCOE_XRI: 5445 phba->sli4_hba.xri_bmask = kzalloc(longs * 5446 sizeof(unsigned long), 5447 GFP_KERNEL); 5448 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5449 rc = -ENOMEM; 5450 goto err_exit; 5451 } 5452 phba->sli4_hba.max_cfg_param.xri_used = 0; 5453 phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt * 5454 sizeof(uint16_t), 5455 GFP_KERNEL); 5456 if (unlikely(!phba->sli4_hba.xri_ids)) { 5457 kfree(phba->sli4_hba.xri_bmask); 5458 rc = -ENOMEM; 5459 goto err_exit; 5460 } 5461 5462 /* Initialize local ptrs for common extent processing later. */ 5463 bmask = phba->sli4_hba.xri_bmask; 5464 ids = phba->sli4_hba.xri_ids; 5465 ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list; 5466 break; 5467 case LPFC_RSC_TYPE_FCOE_VFI: 5468 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5469 sizeof(unsigned long), 5470 GFP_KERNEL); 5471 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5472 rc = -ENOMEM; 5473 goto err_exit; 5474 } 5475 phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt * 5476 sizeof(uint16_t), 5477 GFP_KERNEL); 5478 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5479 kfree(phba->sli4_hba.vfi_bmask); 5480 rc = -ENOMEM; 5481 goto err_exit; 5482 } 5483 5484 /* Initialize local ptrs for common extent processing later. */ 5485 bmask = phba->sli4_hba.vfi_bmask; 5486 ids = phba->sli4_hba.vfi_ids; 5487 ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list; 5488 break; 5489 default: 5490 /* Unsupported Opcode. Fail call. */ 5491 id_array = NULL; 5492 bmask = NULL; 5493 ids = NULL; 5494 ext_blk_list = NULL; 5495 goto err_exit; 5496 } 5497 5498 /* 5499 * Complete initializing the extent configuration with the 5500 * allocated ids assigned to this function. The bitmask serves 5501 * as an index into the array and manages the available ids. The 5502 * array just stores the ids communicated to the port via the wqes. 5503 */ 5504 for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) { 5505 if ((i % 2) == 0) 5506 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0, 5507 &id_array[k]); 5508 else 5509 rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1, 5510 &id_array[k]); 5511 5512 rsrc_blks = kzalloc(length, GFP_KERNEL); 5513 if (unlikely(!rsrc_blks)) { 5514 rc = -ENOMEM; 5515 kfree(bmask); 5516 kfree(ids); 5517 goto err_exit; 5518 } 5519 rsrc_blks->rsrc_start = rsrc_id; 5520 rsrc_blks->rsrc_size = rsrc_size; 5521 list_add_tail(&rsrc_blks->list, ext_blk_list); 5522 rsrc_start = rsrc_id; 5523 if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) 5524 phba->sli4_hba.scsi_xri_start = rsrc_start + 5525 lpfc_sli4_get_els_iocb_cnt(phba); 5526 5527 while (rsrc_id < (rsrc_start + rsrc_size)) { 5528 ids[j] = rsrc_id; 5529 rsrc_id++; 5530 j++; 5531 } 5532 /* Entire word processed. Get next word.*/ 5533 if ((i % 2) == 1) 5534 k++; 5535 } 5536 err_exit: 5537 lpfc_sli4_mbox_cmd_free(phba, mbox); 5538 return rc; 5539 } 5540 5541 /** 5542 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent. 5543 * @phba: Pointer to HBA context object. 5544 * @type: the extent's type. 5545 * 5546 * This function deallocates all extents of a particular resource type. 5547 * SLI4 does not allow for deallocating a particular extent range. It 5548 * is the caller's responsibility to release all kernel memory resources. 5549 **/ 5550 static int 5551 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) 5552 { 5553 int rc; 5554 uint32_t length, mbox_tmo = 0; 5555 LPFC_MBOXQ_t *mbox; 5556 struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc; 5557 struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next; 5558 5559 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5560 if (!mbox) 5561 return -ENOMEM; 5562 5563 /* 5564 * This function sends an embedded mailbox because it only sends the 5565 * the resource type. All extents of this type are released by the 5566 * port. 5567 */ 5568 length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) - 5569 sizeof(struct lpfc_sli4_cfg_mhdr)); 5570 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 5571 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT, 5572 length, LPFC_SLI4_MBX_EMBED); 5573 5574 /* Send an extents count of 0 - the dealloc doesn't use it. */ 5575 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type, 5576 LPFC_SLI4_MBX_EMBED); 5577 if (unlikely(rc)) { 5578 rc = -EIO; 5579 goto out_free_mbox; 5580 } 5581 if (!phba->sli4_hba.intr_enable) 5582 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5583 else { 5584 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 5585 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5586 } 5587 if (unlikely(rc)) { 5588 rc = -EIO; 5589 goto out_free_mbox; 5590 } 5591 5592 dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents; 5593 if (bf_get(lpfc_mbox_hdr_status, 5594 &dealloc_rsrc->header.cfg_shdr.response)) { 5595 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 5596 "2919 Failed to release resource extents " 5597 "for type %d - Status 0x%x Add'l Status 0x%x. " 5598 "Resource memory not released.\n", 5599 type, 5600 bf_get(lpfc_mbox_hdr_status, 5601 &dealloc_rsrc->header.cfg_shdr.response), 5602 bf_get(lpfc_mbox_hdr_add_status, 5603 &dealloc_rsrc->header.cfg_shdr.response)); 5604 rc = -EIO; 5605 goto out_free_mbox; 5606 } 5607 5608 /* Release kernel memory resources for the specific type. */ 5609 switch (type) { 5610 case LPFC_RSC_TYPE_FCOE_VPI: 5611 kfree(phba->vpi_bmask); 5612 kfree(phba->vpi_ids); 5613 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5614 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5615 &phba->lpfc_vpi_blk_list, list) { 5616 list_del_init(&rsrc_blk->list); 5617 kfree(rsrc_blk); 5618 } 5619 phba->sli4_hba.max_cfg_param.vpi_used = 0; 5620 break; 5621 case LPFC_RSC_TYPE_FCOE_XRI: 5622 kfree(phba->sli4_hba.xri_bmask); 5623 kfree(phba->sli4_hba.xri_ids); 5624 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5625 &phba->sli4_hba.lpfc_xri_blk_list, list) { 5626 list_del_init(&rsrc_blk->list); 5627 kfree(rsrc_blk); 5628 } 5629 break; 5630 case LPFC_RSC_TYPE_FCOE_VFI: 5631 kfree(phba->sli4_hba.vfi_bmask); 5632 kfree(phba->sli4_hba.vfi_ids); 5633 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5634 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5635 &phba->sli4_hba.lpfc_vfi_blk_list, list) { 5636 list_del_init(&rsrc_blk->list); 5637 kfree(rsrc_blk); 5638 } 5639 break; 5640 case LPFC_RSC_TYPE_FCOE_RPI: 5641 /* RPI bitmask and physical id array are cleaned up earlier. */ 5642 list_for_each_entry_safe(rsrc_blk, rsrc_blk_next, 5643 &phba->sli4_hba.lpfc_rpi_blk_list, list) { 5644 list_del_init(&rsrc_blk->list); 5645 kfree(rsrc_blk); 5646 } 5647 break; 5648 default: 5649 break; 5650 } 5651 5652 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5653 5654 out_free_mbox: 5655 mempool_free(mbox, phba->mbox_mem_pool); 5656 return rc; 5657 } 5658 5659 /** 5660 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents. 5661 * @phba: Pointer to HBA context object. 5662 * 5663 * This function allocates all SLI4 resource identifiers. 5664 **/ 5665 int 5666 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba) 5667 { 5668 int i, rc, error = 0; 5669 uint16_t count, base; 5670 unsigned long longs; 5671 5672 if (!phba->sli4_hba.rpi_hdrs_in_use) 5673 phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 5674 if (phba->sli4_hba.extents_in_use) { 5675 /* 5676 * The port supports resource extents. The XRI, VPI, VFI, RPI 5677 * resource extent count must be read and allocated before 5678 * provisioning the resource id arrays. 5679 */ 5680 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5681 LPFC_IDX_RSRC_RDY) { 5682 /* 5683 * Extent-based resources are set - the driver could 5684 * be in a port reset. Figure out if any corrective 5685 * actions need to be taken. 5686 */ 5687 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5688 LPFC_RSC_TYPE_FCOE_VFI); 5689 if (rc != 0) 5690 error++; 5691 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5692 LPFC_RSC_TYPE_FCOE_VPI); 5693 if (rc != 0) 5694 error++; 5695 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5696 LPFC_RSC_TYPE_FCOE_XRI); 5697 if (rc != 0) 5698 error++; 5699 rc = lpfc_sli4_chk_avail_extnt_rsrc(phba, 5700 LPFC_RSC_TYPE_FCOE_RPI); 5701 if (rc != 0) 5702 error++; 5703 5704 /* 5705 * It's possible that the number of resources 5706 * provided to this port instance changed between 5707 * resets. Detect this condition and reallocate 5708 * resources. Otherwise, there is no action. 5709 */ 5710 if (error) { 5711 lpfc_printf_log(phba, KERN_INFO, 5712 LOG_MBOX | LOG_INIT, 5713 "2931 Detected extent resource " 5714 "change. Reallocating all " 5715 "extents.\n"); 5716 rc = lpfc_sli4_dealloc_extent(phba, 5717 LPFC_RSC_TYPE_FCOE_VFI); 5718 rc = lpfc_sli4_dealloc_extent(phba, 5719 LPFC_RSC_TYPE_FCOE_VPI); 5720 rc = lpfc_sli4_dealloc_extent(phba, 5721 LPFC_RSC_TYPE_FCOE_XRI); 5722 rc = lpfc_sli4_dealloc_extent(phba, 5723 LPFC_RSC_TYPE_FCOE_RPI); 5724 } else 5725 return 0; 5726 } 5727 5728 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5729 if (unlikely(rc)) 5730 goto err_exit; 5731 5732 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5733 if (unlikely(rc)) 5734 goto err_exit; 5735 5736 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5737 if (unlikely(rc)) 5738 goto err_exit; 5739 5740 rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5741 if (unlikely(rc)) 5742 goto err_exit; 5743 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5744 LPFC_IDX_RSRC_RDY); 5745 return rc; 5746 } else { 5747 /* 5748 * The port does not support resource extents. The XRI, VPI, 5749 * VFI, RPI resource ids were determined from READ_CONFIG. 5750 * Just allocate the bitmasks and provision the resource id 5751 * arrays. If a port reset is active, the resources don't 5752 * need any action - just exit. 5753 */ 5754 if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) == 5755 LPFC_IDX_RSRC_RDY) { 5756 lpfc_sli4_dealloc_resource_identifiers(phba); 5757 lpfc_sli4_remove_rpis(phba); 5758 } 5759 /* RPIs. */ 5760 count = phba->sli4_hba.max_cfg_param.max_rpi; 5761 if (count <= 0) { 5762 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5763 "3279 Invalid provisioning of " 5764 "rpi:%d\n", count); 5765 rc = -EINVAL; 5766 goto err_exit; 5767 } 5768 base = phba->sli4_hba.max_cfg_param.rpi_base; 5769 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5770 phba->sli4_hba.rpi_bmask = kzalloc(longs * 5771 sizeof(unsigned long), 5772 GFP_KERNEL); 5773 if (unlikely(!phba->sli4_hba.rpi_bmask)) { 5774 rc = -ENOMEM; 5775 goto err_exit; 5776 } 5777 phba->sli4_hba.rpi_ids = kzalloc(count * 5778 sizeof(uint16_t), 5779 GFP_KERNEL); 5780 if (unlikely(!phba->sli4_hba.rpi_ids)) { 5781 rc = -ENOMEM; 5782 goto free_rpi_bmask; 5783 } 5784 5785 for (i = 0; i < count; i++) 5786 phba->sli4_hba.rpi_ids[i] = base + i; 5787 5788 /* VPIs. */ 5789 count = phba->sli4_hba.max_cfg_param.max_vpi; 5790 if (count <= 0) { 5791 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5792 "3280 Invalid provisioning of " 5793 "vpi:%d\n", count); 5794 rc = -EINVAL; 5795 goto free_rpi_ids; 5796 } 5797 base = phba->sli4_hba.max_cfg_param.vpi_base; 5798 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5799 phba->vpi_bmask = kzalloc(longs * 5800 sizeof(unsigned long), 5801 GFP_KERNEL); 5802 if (unlikely(!phba->vpi_bmask)) { 5803 rc = -ENOMEM; 5804 goto free_rpi_ids; 5805 } 5806 phba->vpi_ids = kzalloc(count * 5807 sizeof(uint16_t), 5808 GFP_KERNEL); 5809 if (unlikely(!phba->vpi_ids)) { 5810 rc = -ENOMEM; 5811 goto free_vpi_bmask; 5812 } 5813 5814 for (i = 0; i < count; i++) 5815 phba->vpi_ids[i] = base + i; 5816 5817 /* XRIs. */ 5818 count = phba->sli4_hba.max_cfg_param.max_xri; 5819 if (count <= 0) { 5820 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5821 "3281 Invalid provisioning of " 5822 "xri:%d\n", count); 5823 rc = -EINVAL; 5824 goto free_vpi_ids; 5825 } 5826 base = phba->sli4_hba.max_cfg_param.xri_base; 5827 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5828 phba->sli4_hba.xri_bmask = kzalloc(longs * 5829 sizeof(unsigned long), 5830 GFP_KERNEL); 5831 if (unlikely(!phba->sli4_hba.xri_bmask)) { 5832 rc = -ENOMEM; 5833 goto free_vpi_ids; 5834 } 5835 phba->sli4_hba.max_cfg_param.xri_used = 0; 5836 phba->sli4_hba.xri_ids = kzalloc(count * 5837 sizeof(uint16_t), 5838 GFP_KERNEL); 5839 if (unlikely(!phba->sli4_hba.xri_ids)) { 5840 rc = -ENOMEM; 5841 goto free_xri_bmask; 5842 } 5843 5844 for (i = 0; i < count; i++) 5845 phba->sli4_hba.xri_ids[i] = base + i; 5846 5847 /* VFIs. */ 5848 count = phba->sli4_hba.max_cfg_param.max_vfi; 5849 if (count <= 0) { 5850 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5851 "3282 Invalid provisioning of " 5852 "vfi:%d\n", count); 5853 rc = -EINVAL; 5854 goto free_xri_ids; 5855 } 5856 base = phba->sli4_hba.max_cfg_param.vfi_base; 5857 longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG; 5858 phba->sli4_hba.vfi_bmask = kzalloc(longs * 5859 sizeof(unsigned long), 5860 GFP_KERNEL); 5861 if (unlikely(!phba->sli4_hba.vfi_bmask)) { 5862 rc = -ENOMEM; 5863 goto free_xri_ids; 5864 } 5865 phba->sli4_hba.vfi_ids = kzalloc(count * 5866 sizeof(uint16_t), 5867 GFP_KERNEL); 5868 if (unlikely(!phba->sli4_hba.vfi_ids)) { 5869 rc = -ENOMEM; 5870 goto free_vfi_bmask; 5871 } 5872 5873 for (i = 0; i < count; i++) 5874 phba->sli4_hba.vfi_ids[i] = base + i; 5875 5876 /* 5877 * Mark all resources ready. An HBA reset doesn't need 5878 * to reset the initialization. 5879 */ 5880 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 5881 LPFC_IDX_RSRC_RDY); 5882 return 0; 5883 } 5884 5885 free_vfi_bmask: 5886 kfree(phba->sli4_hba.vfi_bmask); 5887 free_xri_ids: 5888 kfree(phba->sli4_hba.xri_ids); 5889 free_xri_bmask: 5890 kfree(phba->sli4_hba.xri_bmask); 5891 free_vpi_ids: 5892 kfree(phba->vpi_ids); 5893 free_vpi_bmask: 5894 kfree(phba->vpi_bmask); 5895 free_rpi_ids: 5896 kfree(phba->sli4_hba.rpi_ids); 5897 free_rpi_bmask: 5898 kfree(phba->sli4_hba.rpi_bmask); 5899 err_exit: 5900 return rc; 5901 } 5902 5903 /** 5904 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents. 5905 * @phba: Pointer to HBA context object. 5906 * 5907 * This function allocates the number of elements for the specified 5908 * resource type. 5909 **/ 5910 int 5911 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) 5912 { 5913 if (phba->sli4_hba.extents_in_use) { 5914 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI); 5915 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI); 5916 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI); 5917 lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); 5918 } else { 5919 kfree(phba->vpi_bmask); 5920 phba->sli4_hba.max_cfg_param.vpi_used = 0; 5921 kfree(phba->vpi_ids); 5922 bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5923 kfree(phba->sli4_hba.xri_bmask); 5924 kfree(phba->sli4_hba.xri_ids); 5925 kfree(phba->sli4_hba.vfi_bmask); 5926 kfree(phba->sli4_hba.vfi_ids); 5927 bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5928 bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 5929 } 5930 5931 return 0; 5932 } 5933 5934 /** 5935 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents. 5936 * @phba: Pointer to HBA context object. 5937 * @type: The resource extent type. 5938 * @extnt_count: buffer to hold port extent count response 5939 * @extnt_size: buffer to hold port extent size response. 5940 * 5941 * This function calls the port to read the host allocated extents 5942 * for a particular type. 5943 **/ 5944 int 5945 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, 5946 uint16_t *extnt_cnt, uint16_t *extnt_size) 5947 { 5948 bool emb; 5949 int rc = 0; 5950 uint16_t curr_blks = 0; 5951 uint32_t req_len, emb_len; 5952 uint32_t alloc_len, mbox_tmo; 5953 struct list_head *blk_list_head; 5954 struct lpfc_rsrc_blks *rsrc_blk; 5955 LPFC_MBOXQ_t *mbox; 5956 void *virtaddr = NULL; 5957 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc; 5958 struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext; 5959 union lpfc_sli4_cfg_shdr *shdr; 5960 5961 switch (type) { 5962 case LPFC_RSC_TYPE_FCOE_VPI: 5963 blk_list_head = &phba->lpfc_vpi_blk_list; 5964 break; 5965 case LPFC_RSC_TYPE_FCOE_XRI: 5966 blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list; 5967 break; 5968 case LPFC_RSC_TYPE_FCOE_VFI: 5969 blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list; 5970 break; 5971 case LPFC_RSC_TYPE_FCOE_RPI: 5972 blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list; 5973 break; 5974 default: 5975 return -EIO; 5976 } 5977 5978 /* Count the number of extents currently allocatd for this type. */ 5979 list_for_each_entry(rsrc_blk, blk_list_head, list) { 5980 if (curr_blks == 0) { 5981 /* 5982 * The GET_ALLOCATED mailbox does not return the size, 5983 * just the count. The size should be just the size 5984 * stored in the current allocated block and all sizes 5985 * for an extent type are the same so set the return 5986 * value now. 5987 */ 5988 *extnt_size = rsrc_blk->rsrc_size; 5989 } 5990 curr_blks++; 5991 } 5992 5993 /* Calculate the total requested length of the dma memory. */ 5994 req_len = curr_blks * sizeof(uint16_t); 5995 5996 /* 5997 * Calculate the size of an embedded mailbox. The uint32_t 5998 * accounts for extents-specific word. 5999 */ 6000 emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) - 6001 sizeof(uint32_t); 6002 6003 /* 6004 * Presume the allocation and response will fit into an embedded 6005 * mailbox. If not true, reconfigure to a non-embedded mailbox. 6006 */ 6007 emb = LPFC_SLI4_MBX_EMBED; 6008 req_len = emb_len; 6009 if (req_len > emb_len) { 6010 req_len = curr_blks * sizeof(uint16_t) + 6011 sizeof(union lpfc_sli4_cfg_shdr) + 6012 sizeof(uint32_t); 6013 emb = LPFC_SLI4_MBX_NEMBED; 6014 } 6015 6016 mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6017 if (!mbox) 6018 return -ENOMEM; 6019 memset(mbox, 0, sizeof(LPFC_MBOXQ_t)); 6020 6021 alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 6022 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT, 6023 req_len, emb); 6024 if (alloc_len < req_len) { 6025 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6026 "2983 Allocated DMA memory size (x%x) is " 6027 "less than the requested DMA memory " 6028 "size (x%x)\n", alloc_len, req_len); 6029 rc = -ENOMEM; 6030 goto err_exit; 6031 } 6032 rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb); 6033 if (unlikely(rc)) { 6034 rc = -EIO; 6035 goto err_exit; 6036 } 6037 6038 if (!phba->sli4_hba.intr_enable) 6039 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 6040 else { 6041 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 6042 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 6043 } 6044 6045 if (unlikely(rc)) { 6046 rc = -EIO; 6047 goto err_exit; 6048 } 6049 6050 /* 6051 * Figure out where the response is located. Then get local pointers 6052 * to the response data. The port does not guarantee to respond to 6053 * all extents counts request so update the local variable with the 6054 * allocated count from the port. 6055 */ 6056 if (emb == LPFC_SLI4_MBX_EMBED) { 6057 rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents; 6058 shdr = &rsrc_ext->header.cfg_shdr; 6059 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp); 6060 } else { 6061 virtaddr = mbox->sge_array->addr[0]; 6062 n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 6063 shdr = &n_rsrc->cfg_shdr; 6064 *extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc); 6065 } 6066 6067 if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) { 6068 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, 6069 "2984 Failed to read allocated resources " 6070 "for type %d - Status 0x%x Add'l Status 0x%x.\n", 6071 type, 6072 bf_get(lpfc_mbox_hdr_status, &shdr->response), 6073 bf_get(lpfc_mbox_hdr_add_status, &shdr->response)); 6074 rc = -EIO; 6075 goto err_exit; 6076 } 6077 err_exit: 6078 lpfc_sli4_mbox_cmd_free(phba, mbox); 6079 return rc; 6080 } 6081 6082 /** 6083 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block 6084 * @phba: pointer to lpfc hba data structure. 6085 * 6086 * This routine walks the list of els buffers that have been allocated and 6087 * repost them to the port by using SGL block post. This is needed after a 6088 * pci_function_reset/warm_start or start. It attempts to construct blocks 6089 * of els buffer sgls which contains contiguous xris and uses the non-embedded 6090 * SGL block post mailbox commands to post them to the port. For single els 6091 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post 6092 * mailbox command for posting. 6093 * 6094 * Returns: 0 = success, non-zero failure. 6095 **/ 6096 static int 6097 lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) 6098 { 6099 struct lpfc_sglq *sglq_entry = NULL; 6100 struct lpfc_sglq *sglq_entry_next = NULL; 6101 struct lpfc_sglq *sglq_entry_first = NULL; 6102 int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0; 6103 int last_xritag = NO_XRI; 6104 LIST_HEAD(prep_sgl_list); 6105 LIST_HEAD(blck_sgl_list); 6106 LIST_HEAD(allc_sgl_list); 6107 LIST_HEAD(post_sgl_list); 6108 LIST_HEAD(free_sgl_list); 6109 6110 spin_lock_irq(&phba->hbalock); 6111 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); 6112 spin_unlock_irq(&phba->hbalock); 6113 6114 total_cnt = phba->sli4_hba.els_xri_cnt; 6115 list_for_each_entry_safe(sglq_entry, sglq_entry_next, 6116 &allc_sgl_list, list) { 6117 list_del_init(&sglq_entry->list); 6118 block_cnt++; 6119 if ((last_xritag != NO_XRI) && 6120 (sglq_entry->sli4_xritag != last_xritag + 1)) { 6121 /* a hole in xri block, form a sgl posting block */ 6122 list_splice_init(&prep_sgl_list, &blck_sgl_list); 6123 post_cnt = block_cnt - 1; 6124 /* prepare list for next posting block */ 6125 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6126 block_cnt = 1; 6127 } else { 6128 /* prepare list for next posting block */ 6129 list_add_tail(&sglq_entry->list, &prep_sgl_list); 6130 /* enough sgls for non-embed sgl mbox command */ 6131 if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) { 6132 list_splice_init(&prep_sgl_list, 6133 &blck_sgl_list); 6134 post_cnt = block_cnt; 6135 block_cnt = 0; 6136 } 6137 } 6138 num_posted++; 6139 6140 /* keep track of last sgl's xritag */ 6141 last_xritag = sglq_entry->sli4_xritag; 6142 6143 /* end of repost sgl list condition for els buffers */ 6144 if (num_posted == phba->sli4_hba.els_xri_cnt) { 6145 if (post_cnt == 0) { 6146 list_splice_init(&prep_sgl_list, 6147 &blck_sgl_list); 6148 post_cnt = block_cnt; 6149 } else if (block_cnt == 1) { 6150 status = lpfc_sli4_post_sgl(phba, 6151 sglq_entry->phys, 0, 6152 sglq_entry->sli4_xritag); 6153 if (!status) { 6154 /* successful, put sgl to posted list */ 6155 list_add_tail(&sglq_entry->list, 6156 &post_sgl_list); 6157 } else { 6158 /* Failure, put sgl to free list */ 6159 lpfc_printf_log(phba, KERN_WARNING, 6160 LOG_SLI, 6161 "3159 Failed to post els " 6162 "sgl, xritag:x%x\n", 6163 sglq_entry->sli4_xritag); 6164 list_add_tail(&sglq_entry->list, 6165 &free_sgl_list); 6166 total_cnt--; 6167 } 6168 } 6169 } 6170 6171 /* continue until a nembed page worth of sgls */ 6172 if (post_cnt == 0) 6173 continue; 6174 6175 /* post the els buffer list sgls as a block */ 6176 status = lpfc_sli4_post_els_sgl_list(phba, &blck_sgl_list, 6177 post_cnt); 6178 6179 if (!status) { 6180 /* success, put sgl list to posted sgl list */ 6181 list_splice_init(&blck_sgl_list, &post_sgl_list); 6182 } else { 6183 /* Failure, put sgl list to free sgl list */ 6184 sglq_entry_first = list_first_entry(&blck_sgl_list, 6185 struct lpfc_sglq, 6186 list); 6187 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 6188 "3160 Failed to post els sgl-list, " 6189 "xritag:x%x-x%x\n", 6190 sglq_entry_first->sli4_xritag, 6191 (sglq_entry_first->sli4_xritag + 6192 post_cnt - 1)); 6193 list_splice_init(&blck_sgl_list, &free_sgl_list); 6194 total_cnt -= post_cnt; 6195 } 6196 6197 /* don't reset xirtag due to hole in xri block */ 6198 if (block_cnt == 0) 6199 last_xritag = NO_XRI; 6200 6201 /* reset els sgl post count for next round of posting */ 6202 post_cnt = 0; 6203 } 6204 /* update the number of XRIs posted for ELS */ 6205 phba->sli4_hba.els_xri_cnt = total_cnt; 6206 6207 /* free the els sgls failed to post */ 6208 lpfc_free_sgl_list(phba, &free_sgl_list); 6209 6210 /* push els sgls posted to the availble list */ 6211 if (!list_empty(&post_sgl_list)) { 6212 spin_lock_irq(&phba->hbalock); 6213 list_splice_init(&post_sgl_list, 6214 &phba->sli4_hba.lpfc_sgl_list); 6215 spin_unlock_irq(&phba->hbalock); 6216 } else { 6217 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6218 "3161 Failure to post els sgl to port.\n"); 6219 return -EIO; 6220 } 6221 return 0; 6222 } 6223 6224 /** 6225 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function 6226 * @phba: Pointer to HBA context object. 6227 * 6228 * This function is the main SLI4 device intialization PCI function. This 6229 * function is called by the HBA intialization code, HBA reset code and 6230 * HBA error attention handler code. Caller is not required to hold any 6231 * locks. 6232 **/ 6233 int 6234 lpfc_sli4_hba_setup(struct lpfc_hba *phba) 6235 { 6236 int rc; 6237 LPFC_MBOXQ_t *mboxq; 6238 struct lpfc_mqe *mqe; 6239 uint8_t *vpd; 6240 uint32_t vpd_size; 6241 uint32_t ftr_rsp = 0; 6242 struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); 6243 struct lpfc_vport *vport = phba->pport; 6244 struct lpfc_dmabuf *mp; 6245 6246 /* Perform a PCI function reset to start from clean */ 6247 rc = lpfc_pci_function_reset(phba); 6248 if (unlikely(rc)) 6249 return -ENODEV; 6250 6251 /* Check the HBA Host Status Register for readyness */ 6252 rc = lpfc_sli4_post_status_check(phba); 6253 if (unlikely(rc)) 6254 return -ENODEV; 6255 else { 6256 spin_lock_irq(&phba->hbalock); 6257 phba->sli.sli_flag |= LPFC_SLI_ACTIVE; 6258 spin_unlock_irq(&phba->hbalock); 6259 } 6260 6261 /* 6262 * Allocate a single mailbox container for initializing the 6263 * port. 6264 */ 6265 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6266 if (!mboxq) 6267 return -ENOMEM; 6268 6269 /* Issue READ_REV to collect vpd and FW information. */ 6270 vpd_size = SLI4_PAGE_SIZE; 6271 vpd = kzalloc(vpd_size, GFP_KERNEL); 6272 if (!vpd) { 6273 rc = -ENOMEM; 6274 goto out_free_mbox; 6275 } 6276 6277 rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); 6278 if (unlikely(rc)) { 6279 kfree(vpd); 6280 goto out_free_mbox; 6281 } 6282 6283 mqe = &mboxq->u.mqe; 6284 phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev); 6285 if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) 6286 phba->hba_flag |= HBA_FCOE_MODE; 6287 else 6288 phba->hba_flag &= ~HBA_FCOE_MODE; 6289 6290 if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) == 6291 LPFC_DCBX_CEE_MODE) 6292 phba->hba_flag |= HBA_FIP_SUPPORT; 6293 else 6294 phba->hba_flag &= ~HBA_FIP_SUPPORT; 6295 6296 phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; 6297 6298 if (phba->sli_rev != LPFC_SLI_REV4) { 6299 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6300 "0376 READ_REV Error. SLI Level %d " 6301 "FCoE enabled %d\n", 6302 phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE); 6303 rc = -EIO; 6304 kfree(vpd); 6305 goto out_free_mbox; 6306 } 6307 6308 /* 6309 * Continue initialization with default values even if driver failed 6310 * to read FCoE param config regions, only read parameters if the 6311 * board is FCoE 6312 */ 6313 if (phba->hba_flag & HBA_FCOE_MODE && 6314 lpfc_sli4_read_fcoe_params(phba)) 6315 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT, 6316 "2570 Failed to read FCoE parameters\n"); 6317 6318 /* 6319 * Retrieve sli4 device physical port name, failure of doing it 6320 * is considered as non-fatal. 6321 */ 6322 rc = lpfc_sli4_retrieve_pport_name(phba); 6323 if (!rc) 6324 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6325 "3080 Successful retrieving SLI4 device " 6326 "physical port name: %s.\n", phba->Port); 6327 6328 /* 6329 * Evaluate the read rev and vpd data. Populate the driver 6330 * state with the results. If this routine fails, the failure 6331 * is not fatal as the driver will use generic values. 6332 */ 6333 rc = lpfc_parse_vpd(phba, vpd, vpd_size); 6334 if (unlikely(!rc)) { 6335 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6336 "0377 Error %d parsing vpd. " 6337 "Using defaults.\n", rc); 6338 rc = 0; 6339 } 6340 kfree(vpd); 6341 6342 /* Save information as VPD data */ 6343 phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev; 6344 phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev; 6345 phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev; 6346 phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high, 6347 &mqe->un.read_rev); 6348 phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low, 6349 &mqe->un.read_rev); 6350 phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high, 6351 &mqe->un.read_rev); 6352 phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low, 6353 &mqe->un.read_rev); 6354 phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev; 6355 memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16); 6356 phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev; 6357 memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16); 6358 phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev; 6359 memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16); 6360 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6361 "(%d):0380 READ_REV Status x%x " 6362 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n", 6363 mboxq->vport ? mboxq->vport->vpi : 0, 6364 bf_get(lpfc_mqe_status, mqe), 6365 phba->vpd.rev.opFwName, 6366 phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow, 6367 phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow); 6368 6369 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */ 6370 rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3); 6371 if (phba->pport->cfg_lun_queue_depth > rc) { 6372 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 6373 "3362 LUN queue depth changed from %d to %d\n", 6374 phba->pport->cfg_lun_queue_depth, rc); 6375 phba->pport->cfg_lun_queue_depth = rc; 6376 } 6377 6378 6379 /* 6380 * Discover the port's supported feature set and match it against the 6381 * hosts requests. 6382 */ 6383 lpfc_request_features(phba, mboxq); 6384 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6385 if (unlikely(rc)) { 6386 rc = -EIO; 6387 goto out_free_mbox; 6388 } 6389 6390 /* 6391 * The port must support FCP initiator mode as this is the 6392 * only mode running in the host. 6393 */ 6394 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { 6395 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6396 "0378 No support for fcpi mode.\n"); 6397 ftr_rsp++; 6398 } 6399 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs)) 6400 phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED; 6401 else 6402 phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED; 6403 /* 6404 * If the port cannot support the host's requested features 6405 * then turn off the global config parameters to disable the 6406 * feature in the driver. This is not a fatal error. 6407 */ 6408 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; 6409 if (phba->cfg_enable_bg) { 6410 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)) 6411 phba->sli3_options |= LPFC_SLI3_BG_ENABLED; 6412 else 6413 ftr_rsp++; 6414 } 6415 6416 if (phba->max_vpi && phba->cfg_enable_npiv && 6417 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6418 ftr_rsp++; 6419 6420 if (ftr_rsp) { 6421 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 6422 "0379 Feature Mismatch Data: x%08x %08x " 6423 "x%x x%x x%x\n", mqe->un.req_ftrs.word2, 6424 mqe->un.req_ftrs.word3, phba->cfg_enable_bg, 6425 phba->cfg_enable_npiv, phba->max_vpi); 6426 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 6427 phba->cfg_enable_bg = 0; 6428 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6429 phba->cfg_enable_npiv = 0; 6430 } 6431 6432 /* These SLI3 features are assumed in SLI4 */ 6433 spin_lock_irq(&phba->hbalock); 6434 phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); 6435 spin_unlock_irq(&phba->hbalock); 6436 6437 /* 6438 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent 6439 * calls depends on these resources to complete port setup. 6440 */ 6441 rc = lpfc_sli4_alloc_resource_identifiers(phba); 6442 if (rc) { 6443 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6444 "2920 Failed to alloc Resource IDs " 6445 "rc = x%x\n", rc); 6446 goto out_free_mbox; 6447 } 6448 6449 /* Read the port's service parameters. */ 6450 rc = lpfc_read_sparam(phba, mboxq, vport->vpi); 6451 if (rc) { 6452 phba->link_state = LPFC_HBA_ERROR; 6453 rc = -ENOMEM; 6454 goto out_free_mbox; 6455 } 6456 6457 mboxq->vport = vport; 6458 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6459 mp = (struct lpfc_dmabuf *) mboxq->context1; 6460 if (rc == MBX_SUCCESS) { 6461 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); 6462 rc = 0; 6463 } 6464 6465 /* 6466 * This memory was allocated by the lpfc_read_sparam routine. Release 6467 * it to the mbuf pool. 6468 */ 6469 lpfc_mbuf_free(phba, mp->virt, mp->phys); 6470 kfree(mp); 6471 mboxq->context1 = NULL; 6472 if (unlikely(rc)) { 6473 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6474 "0382 READ_SPARAM command failed " 6475 "status %d, mbxStatus x%x\n", 6476 rc, bf_get(lpfc_mqe_status, mqe)); 6477 phba->link_state = LPFC_HBA_ERROR; 6478 rc = -EIO; 6479 goto out_free_mbox; 6480 } 6481 6482 lpfc_update_vport_wwn(vport); 6483 6484 /* Update the fc_host data structures with new wwn. */ 6485 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); 6486 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); 6487 6488 /* update host els and scsi xri-sgl sizes and mappings */ 6489 rc = lpfc_sli4_xri_sgl_update(phba); 6490 if (unlikely(rc)) { 6491 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6492 "1400 Failed to update xri-sgl size and " 6493 "mapping: %d\n", rc); 6494 goto out_free_mbox; 6495 } 6496 6497 /* register the els sgl pool to the port */ 6498 rc = lpfc_sli4_repost_els_sgl_list(phba); 6499 if (unlikely(rc)) { 6500 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6501 "0582 Error %d during els sgl post " 6502 "operation\n", rc); 6503 rc = -ENODEV; 6504 goto out_free_mbox; 6505 } 6506 6507 /* register the allocated scsi sgl pool to the port */ 6508 rc = lpfc_sli4_repost_scsi_sgl_list(phba); 6509 if (unlikely(rc)) { 6510 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6511 "0383 Error %d during scsi sgl post " 6512 "operation\n", rc); 6513 /* Some Scsi buffers were moved to the abort scsi list */ 6514 /* A pci function reset will repost them */ 6515 rc = -ENODEV; 6516 goto out_free_mbox; 6517 } 6518 6519 /* Post the rpi header region to the device. */ 6520 rc = lpfc_sli4_post_all_rpi_hdrs(phba); 6521 if (unlikely(rc)) { 6522 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6523 "0393 Error %d during rpi post operation\n", 6524 rc); 6525 rc = -ENODEV; 6526 goto out_free_mbox; 6527 } 6528 lpfc_sli4_node_prep(phba); 6529 6530 /* Create all the SLI4 queues */ 6531 rc = lpfc_sli4_queue_create(phba); 6532 if (rc) { 6533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6534 "3089 Failed to allocate queues\n"); 6535 rc = -ENODEV; 6536 goto out_stop_timers; 6537 } 6538 /* Set up all the queues to the device */ 6539 rc = lpfc_sli4_queue_setup(phba); 6540 if (unlikely(rc)) { 6541 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6542 "0381 Error %d during queue setup.\n ", rc); 6543 goto out_destroy_queue; 6544 } 6545 6546 /* Arm the CQs and then EQs on device */ 6547 lpfc_sli4_arm_cqeq_intr(phba); 6548 6549 /* Indicate device interrupt mode */ 6550 phba->sli4_hba.intr_enable = 1; 6551 6552 /* Allow asynchronous mailbox command to go through */ 6553 spin_lock_irq(&phba->hbalock); 6554 phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 6555 spin_unlock_irq(&phba->hbalock); 6556 6557 /* Post receive buffers to the device */ 6558 lpfc_sli4_rb_setup(phba); 6559 6560 /* Reset HBA FCF states after HBA reset */ 6561 phba->fcf.fcf_flag = 0; 6562 phba->fcf.current_rec.flag = 0; 6563 6564 /* Start the ELS watchdog timer */ 6565 mod_timer(&vport->els_tmofunc, 6566 jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); 6567 6568 /* Start heart beat timer */ 6569 mod_timer(&phba->hb_tmofunc, 6570 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); 6571 phba->hb_outstanding = 0; 6572 phba->last_completion_time = jiffies; 6573 6574 /* Start error attention (ERATT) polling timer */ 6575 mod_timer(&phba->eratt_poll, 6576 jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); 6577 6578 /* Enable PCIe device Advanced Error Reporting (AER) if configured */ 6579 if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { 6580 rc = pci_enable_pcie_error_reporting(phba->pcidev); 6581 if (!rc) { 6582 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6583 "2829 This device supports " 6584 "Advanced Error Reporting (AER)\n"); 6585 spin_lock_irq(&phba->hbalock); 6586 phba->hba_flag |= HBA_AER_ENABLED; 6587 spin_unlock_irq(&phba->hbalock); 6588 } else { 6589 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6590 "2830 This device does not support " 6591 "Advanced Error Reporting (AER)\n"); 6592 phba->cfg_aer_support = 0; 6593 } 6594 rc = 0; 6595 } 6596 6597 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 6598 /* 6599 * The FC Port needs to register FCFI (index 0) 6600 */ 6601 lpfc_reg_fcfi(phba, mboxq); 6602 mboxq->vport = phba->pport; 6603 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6604 if (rc != MBX_SUCCESS) 6605 goto out_unset_queue; 6606 rc = 0; 6607 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, 6608 &mboxq->u.mqe.un.reg_fcfi); 6609 6610 /* Check if the port is configured to be disabled */ 6611 lpfc_sli_read_link_ste(phba); 6612 } 6613 6614 /* 6615 * The port is ready, set the host's link state to LINK_DOWN 6616 * in preparation for link interrupts. 6617 */ 6618 spin_lock_irq(&phba->hbalock); 6619 phba->link_state = LPFC_LINK_DOWN; 6620 spin_unlock_irq(&phba->hbalock); 6621 if (!(phba->hba_flag & HBA_FCOE_MODE) && 6622 (phba->hba_flag & LINK_DISABLED)) { 6623 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 6624 "3103 Adapter Link is disabled.\n"); 6625 lpfc_down_link(phba, mboxq); 6626 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 6627 if (rc != MBX_SUCCESS) { 6628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI, 6629 "3104 Adapter failed to issue " 6630 "DOWN_LINK mbox cmd, rc:x%x\n", rc); 6631 goto out_unset_queue; 6632 } 6633 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { 6634 /* don't perform init_link on SLI4 FC port loopback test */ 6635 if (!(phba->link_flag & LS_LOOPBACK_MODE)) { 6636 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 6637 if (rc) 6638 goto out_unset_queue; 6639 } 6640 } 6641 mempool_free(mboxq, phba->mbox_mem_pool); 6642 return rc; 6643 out_unset_queue: 6644 /* Unset all the queues set up in this routine when error out */ 6645 lpfc_sli4_queue_unset(phba); 6646 out_destroy_queue: 6647 lpfc_sli4_queue_destroy(phba); 6648 out_stop_timers: 6649 lpfc_stop_hba_timers(phba); 6650 out_free_mbox: 6651 mempool_free(mboxq, phba->mbox_mem_pool); 6652 return rc; 6653 } 6654 6655 /** 6656 * lpfc_mbox_timeout - Timeout call back function for mbox timer 6657 * @ptr: context object - pointer to hba structure. 6658 * 6659 * This is the callback function for mailbox timer. The mailbox 6660 * timer is armed when a new mailbox command is issued and the timer 6661 * is deleted when the mailbox complete. The function is called by 6662 * the kernel timer code when a mailbox does not complete within 6663 * expected time. This function wakes up the worker thread to 6664 * process the mailbox timeout and returns. All the processing is 6665 * done by the worker thread function lpfc_mbox_timeout_handler. 6666 **/ 6667 void 6668 lpfc_mbox_timeout(unsigned long ptr) 6669 { 6670 struct lpfc_hba *phba = (struct lpfc_hba *) ptr; 6671 unsigned long iflag; 6672 uint32_t tmo_posted; 6673 6674 spin_lock_irqsave(&phba->pport->work_port_lock, iflag); 6675 tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO; 6676 if (!tmo_posted) 6677 phba->pport->work_port_events |= WORKER_MBOX_TMO; 6678 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); 6679 6680 if (!tmo_posted) 6681 lpfc_worker_wake_up(phba); 6682 return; 6683 } 6684 6685 /** 6686 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions 6687 * are pending 6688 * @phba: Pointer to HBA context object. 6689 * 6690 * This function checks if any mailbox completions are present on the mailbox 6691 * completion queue. 6692 **/ 6693 bool 6694 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) 6695 { 6696 6697 uint32_t idx; 6698 struct lpfc_queue *mcq; 6699 struct lpfc_mcqe *mcqe; 6700 bool pending_completions = false; 6701 6702 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 6703 return false; 6704 6705 /* Check for completions on mailbox completion queue */ 6706 6707 mcq = phba->sli4_hba.mbx_cq; 6708 idx = mcq->hba_index; 6709 while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) { 6710 mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; 6711 if (bf_get_le32(lpfc_trailer_completed, mcqe) && 6712 (!bf_get_le32(lpfc_trailer_async, mcqe))) { 6713 pending_completions = true; 6714 break; 6715 } 6716 idx = (idx + 1) % mcq->entry_count; 6717 if (mcq->hba_index == idx) 6718 break; 6719 } 6720 return pending_completions; 6721 6722 } 6723 6724 /** 6725 * lpfc_sli4_process_missed_mbox_completions - process mbox completions 6726 * that were missed. 6727 * @phba: Pointer to HBA context object. 6728 * 6729 * For sli4, it is possible to miss an interrupt. As such mbox completions 6730 * maybe missed causing erroneous mailbox timeouts to occur. This function 6731 * checks to see if mbox completions are on the mailbox completion queue 6732 * and will process all the completions associated with the eq for the 6733 * mailbox completion queue. 6734 **/ 6735 bool 6736 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) 6737 { 6738 6739 uint32_t eqidx; 6740 struct lpfc_queue *fpeq = NULL; 6741 struct lpfc_eqe *eqe; 6742 bool mbox_pending; 6743 6744 if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) 6745 return false; 6746 6747 /* Find the eq associated with the mcq */ 6748 6749 if (phba->sli4_hba.hba_eq) 6750 for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++) 6751 if (phba->sli4_hba.hba_eq[eqidx]->queue_id == 6752 phba->sli4_hba.mbx_cq->assoc_qid) { 6753 fpeq = phba->sli4_hba.hba_eq[eqidx]; 6754 break; 6755 } 6756 if (!fpeq) 6757 return false; 6758 6759 /* Turn off interrupts from this EQ */ 6760 6761 lpfc_sli4_eq_clr_intr(fpeq); 6762 6763 /* Check to see if a mbox completion is pending */ 6764 6765 mbox_pending = lpfc_sli4_mbox_completions_pending(phba); 6766 6767 /* 6768 * If a mbox completion is pending, process all the events on EQ 6769 * associated with the mbox completion queue (this could include 6770 * mailbox commands, async events, els commands, receive queue data 6771 * and fcp commands) 6772 */ 6773 6774 if (mbox_pending) 6775 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 6776 lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); 6777 fpeq->EQ_processed++; 6778 } 6779 6780 /* Always clear and re-arm the EQ */ 6781 6782 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 6783 6784 return mbox_pending; 6785 6786 } 6787 6788 /** 6789 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout 6790 * @phba: Pointer to HBA context object. 6791 * 6792 * This function is called from worker thread when a mailbox command times out. 6793 * The caller is not required to hold any locks. This function will reset the 6794 * HBA and recover all the pending commands. 6795 **/ 6796 void 6797 lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 6798 { 6799 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 6800 MAILBOX_t *mb = &pmbox->u.mb; 6801 struct lpfc_sli *psli = &phba->sli; 6802 6803 /* If the mailbox completed, process the completion and return */ 6804 if (lpfc_sli4_process_missed_mbox_completions(phba)) 6805 return; 6806 6807 /* Check the pmbox pointer first. There is a race condition 6808 * between the mbox timeout handler getting executed in the 6809 * worklist and the mailbox actually completing. When this 6810 * race condition occurs, the mbox_active will be NULL. 6811 */ 6812 spin_lock_irq(&phba->hbalock); 6813 if (pmbox == NULL) { 6814 lpfc_printf_log(phba, KERN_WARNING, 6815 LOG_MBOX | LOG_SLI, 6816 "0353 Active Mailbox cleared - mailbox timeout " 6817 "exiting\n"); 6818 spin_unlock_irq(&phba->hbalock); 6819 return; 6820 } 6821 6822 /* Mbox cmd <mbxCommand> timeout */ 6823 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6824 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", 6825 mb->mbxCommand, 6826 phba->pport->port_state, 6827 phba->sli.sli_flag, 6828 phba->sli.mbox_active); 6829 spin_unlock_irq(&phba->hbalock); 6830 6831 /* Setting state unknown so lpfc_sli_abort_iocb_ring 6832 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing 6833 * it to fail all outstanding SCSI IO. 6834 */ 6835 spin_lock_irq(&phba->pport->work_port_lock); 6836 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 6837 spin_unlock_irq(&phba->pport->work_port_lock); 6838 spin_lock_irq(&phba->hbalock); 6839 phba->link_state = LPFC_LINK_UNKNOWN; 6840 psli->sli_flag &= ~LPFC_SLI_ACTIVE; 6841 spin_unlock_irq(&phba->hbalock); 6842 6843 lpfc_sli_abort_fcp_rings(phba); 6844 6845 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6846 "0345 Resetting board due to mailbox timeout\n"); 6847 6848 /* Reset the HBA device */ 6849 lpfc_reset_hba(phba); 6850 } 6851 6852 /** 6853 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware 6854 * @phba: Pointer to HBA context object. 6855 * @pmbox: Pointer to mailbox object. 6856 * @flag: Flag indicating how the mailbox need to be processed. 6857 * 6858 * This function is called by discovery code and HBA management code 6859 * to submit a mailbox command to firmware with SLI-3 interface spec. This 6860 * function gets the hbalock to protect the data structures. 6861 * The mailbox command can be submitted in polling mode, in which case 6862 * this function will wait in a polling loop for the completion of the 6863 * mailbox. 6864 * If the mailbox is submitted in no_wait mode (not polling) the 6865 * function will submit the command and returns immediately without waiting 6866 * for the mailbox completion. The no_wait is supported only when HBA 6867 * is in SLI2/SLI3 mode - interrupts are enabled. 6868 * The SLI interface allows only one mailbox pending at a time. If the 6869 * mailbox is issued in polling mode and there is already a mailbox 6870 * pending, then the function will return an error. If the mailbox is issued 6871 * in NO_WAIT mode and there is a mailbox pending already, the function 6872 * will return MBX_BUSY after queuing the mailbox into mailbox queue. 6873 * The sli layer owns the mailbox object until the completion of mailbox 6874 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other 6875 * return codes the caller owns the mailbox command after the return of 6876 * the function. 6877 **/ 6878 static int 6879 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, 6880 uint32_t flag) 6881 { 6882 MAILBOX_t *mbx; 6883 struct lpfc_sli *psli = &phba->sli; 6884 uint32_t status, evtctr; 6885 uint32_t ha_copy, hc_copy; 6886 int i; 6887 unsigned long timeout; 6888 unsigned long drvr_flag = 0; 6889 uint32_t word0, ldata; 6890 void __iomem *to_slim; 6891 int processing_queue = 0; 6892 6893 spin_lock_irqsave(&phba->hbalock, drvr_flag); 6894 if (!pmbox) { 6895 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 6896 /* processing mbox queue from intr_handler */ 6897 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 6898 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6899 return MBX_SUCCESS; 6900 } 6901 processing_queue = 1; 6902 pmbox = lpfc_mbox_get(phba); 6903 if (!pmbox) { 6904 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6905 return MBX_SUCCESS; 6906 } 6907 } 6908 6909 if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 6910 pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 6911 if(!pmbox->vport) { 6912 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6913 lpfc_printf_log(phba, KERN_ERR, 6914 LOG_MBOX | LOG_VPORT, 6915 "1806 Mbox x%x failed. No vport\n", 6916 pmbox->u.mb.mbxCommand); 6917 dump_stack(); 6918 goto out_not_finished; 6919 } 6920 } 6921 6922 /* If the PCI channel is in offline state, do not post mbox. */ 6923 if (unlikely(pci_channel_offline(phba->pcidev))) { 6924 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6925 goto out_not_finished; 6926 } 6927 6928 /* If HBA has a deferred error attention, fail the iocb. */ 6929 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 6930 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6931 goto out_not_finished; 6932 } 6933 6934 psli = &phba->sli; 6935 6936 mbx = &pmbox->u.mb; 6937 status = MBX_SUCCESS; 6938 6939 if (phba->link_state == LPFC_HBA_ERROR) { 6940 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6941 6942 /* Mbox command <mbxCommand> cannot issue */ 6943 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6944 "(%d):0311 Mailbox command x%x cannot " 6945 "issue Data: x%x x%x\n", 6946 pmbox->vport ? pmbox->vport->vpi : 0, 6947 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6948 goto out_not_finished; 6949 } 6950 6951 if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) { 6952 if (lpfc_readl(phba->HCregaddr, &hc_copy) || 6953 !(hc_copy & HC_MBINT_ENA)) { 6954 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6955 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6956 "(%d):2528 Mailbox command x%x cannot " 6957 "issue Data: x%x x%x\n", 6958 pmbox->vport ? pmbox->vport->vpi : 0, 6959 pmbox->u.mb.mbxCommand, psli->sli_flag, flag); 6960 goto out_not_finished; 6961 } 6962 } 6963 6964 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6965 /* Polling for a mbox command when another one is already active 6966 * is not allowed in SLI. Also, the driver must have established 6967 * SLI2 mode to queue and process multiple mbox commands. 6968 */ 6969 6970 if (flag & MBX_POLL) { 6971 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6972 6973 /* Mbox command <mbxCommand> cannot issue */ 6974 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6975 "(%d):2529 Mailbox command x%x " 6976 "cannot issue Data: x%x x%x\n", 6977 pmbox->vport ? pmbox->vport->vpi : 0, 6978 pmbox->u.mb.mbxCommand, 6979 psli->sli_flag, flag); 6980 goto out_not_finished; 6981 } 6982 6983 if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { 6984 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 6985 /* Mbox command <mbxCommand> cannot issue */ 6986 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6987 "(%d):2530 Mailbox command x%x " 6988 "cannot issue Data: x%x x%x\n", 6989 pmbox->vport ? pmbox->vport->vpi : 0, 6990 pmbox->u.mb.mbxCommand, 6991 psli->sli_flag, flag); 6992 goto out_not_finished; 6993 } 6994 6995 /* Another mailbox command is still being processed, queue this 6996 * command to be processed later. 6997 */ 6998 lpfc_mbox_put(phba, pmbox); 6999 7000 /* Mbox cmd issue - BUSY */ 7001 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7002 "(%d):0308 Mbox cmd issue - BUSY Data: " 7003 "x%x x%x x%x x%x\n", 7004 pmbox->vport ? pmbox->vport->vpi : 0xffffff, 7005 mbx->mbxCommand, phba->pport->port_state, 7006 psli->sli_flag, flag); 7007 7008 psli->slistat.mbox_busy++; 7009 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7010 7011 if (pmbox->vport) { 7012 lpfc_debugfs_disc_trc(pmbox->vport, 7013 LPFC_DISC_TRC_MBOX_VPORT, 7014 "MBOX Bsy vport: cmd:x%x mb:x%x x%x", 7015 (uint32_t)mbx->mbxCommand, 7016 mbx->un.varWords[0], mbx->un.varWords[1]); 7017 } 7018 else { 7019 lpfc_debugfs_disc_trc(phba->pport, 7020 LPFC_DISC_TRC_MBOX, 7021 "MBOX Bsy: cmd:x%x mb:x%x x%x", 7022 (uint32_t)mbx->mbxCommand, 7023 mbx->un.varWords[0], mbx->un.varWords[1]); 7024 } 7025 7026 return MBX_BUSY; 7027 } 7028 7029 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7030 7031 /* If we are not polling, we MUST be in SLI2 mode */ 7032 if (flag != MBX_POLL) { 7033 if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && 7034 (mbx->mbxCommand != MBX_KILL_BOARD)) { 7035 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7036 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7037 /* Mbox command <mbxCommand> cannot issue */ 7038 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7039 "(%d):2531 Mailbox command x%x " 7040 "cannot issue Data: x%x x%x\n", 7041 pmbox->vport ? pmbox->vport->vpi : 0, 7042 pmbox->u.mb.mbxCommand, 7043 psli->sli_flag, flag); 7044 goto out_not_finished; 7045 } 7046 /* timeout active mbox command */ 7047 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 7048 1000); 7049 mod_timer(&psli->mbox_tmo, jiffies + timeout); 7050 } 7051 7052 /* Mailbox cmd <cmd> issue */ 7053 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7054 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x " 7055 "x%x\n", 7056 pmbox->vport ? pmbox->vport->vpi : 0, 7057 mbx->mbxCommand, phba->pport->port_state, 7058 psli->sli_flag, flag); 7059 7060 if (mbx->mbxCommand != MBX_HEARTBEAT) { 7061 if (pmbox->vport) { 7062 lpfc_debugfs_disc_trc(pmbox->vport, 7063 LPFC_DISC_TRC_MBOX_VPORT, 7064 "MBOX Send vport: cmd:x%x mb:x%x x%x", 7065 (uint32_t)mbx->mbxCommand, 7066 mbx->un.varWords[0], mbx->un.varWords[1]); 7067 } 7068 else { 7069 lpfc_debugfs_disc_trc(phba->pport, 7070 LPFC_DISC_TRC_MBOX, 7071 "MBOX Send: cmd:x%x mb:x%x x%x", 7072 (uint32_t)mbx->mbxCommand, 7073 mbx->un.varWords[0], mbx->un.varWords[1]); 7074 } 7075 } 7076 7077 psli->slistat.mbox_cmd++; 7078 evtctr = psli->slistat.mbox_event; 7079 7080 /* next set own bit for the adapter and copy over command word */ 7081 mbx->mbxOwner = OWN_CHIP; 7082 7083 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7084 /* Populate mbox extension offset word. */ 7085 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) { 7086 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 7087 = (uint8_t *)phba->mbox_ext 7088 - (uint8_t *)phba->mbox; 7089 } 7090 7091 /* Copy the mailbox extension data */ 7092 if (pmbox->in_ext_byte_len && pmbox->context2) { 7093 lpfc_sli_pcimem_bcopy(pmbox->context2, 7094 (uint8_t *)phba->mbox_ext, 7095 pmbox->in_ext_byte_len); 7096 } 7097 /* Copy command data to host SLIM area */ 7098 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 7099 } else { 7100 /* Populate mbox extension offset word. */ 7101 if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) 7102 *(((uint32_t *)mbx) + pmbox->mbox_offset_word) 7103 = MAILBOX_HBA_EXT_OFFSET; 7104 7105 /* Copy the mailbox extension data */ 7106 if (pmbox->in_ext_byte_len && pmbox->context2) { 7107 lpfc_memcpy_to_slim(phba->MBslimaddr + 7108 MAILBOX_HBA_EXT_OFFSET, 7109 pmbox->context2, pmbox->in_ext_byte_len); 7110 7111 } 7112 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 7113 /* copy command data into host mbox for cmpl */ 7114 lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE); 7115 } 7116 7117 /* First copy mbox command data to HBA SLIM, skip past first 7118 word */ 7119 to_slim = phba->MBslimaddr + sizeof (uint32_t); 7120 lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0], 7121 MAILBOX_CMD_SIZE - sizeof (uint32_t)); 7122 7123 /* Next copy over first word, with mbxOwner set */ 7124 ldata = *((uint32_t *)mbx); 7125 to_slim = phba->MBslimaddr; 7126 writel(ldata, to_slim); 7127 readl(to_slim); /* flush */ 7128 7129 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 7130 /* switch over to host mailbox */ 7131 psli->sli_flag |= LPFC_SLI_ACTIVE; 7132 } 7133 } 7134 7135 wmb(); 7136 7137 switch (flag) { 7138 case MBX_NOWAIT: 7139 /* Set up reference to mailbox command */ 7140 psli->mbox_active = pmbox; 7141 /* Interrupt board to do it */ 7142 writel(CA_MBATT, phba->CAregaddr); 7143 readl(phba->CAregaddr); /* flush */ 7144 /* Don't wait for it to finish, just return */ 7145 break; 7146 7147 case MBX_POLL: 7148 /* Set up null reference to mailbox command */ 7149 psli->mbox_active = NULL; 7150 /* Interrupt board to do it */ 7151 writel(CA_MBATT, phba->CAregaddr); 7152 readl(phba->CAregaddr); /* flush */ 7153 7154 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7155 /* First read mbox status word */ 7156 word0 = *((uint32_t *)phba->mbox); 7157 word0 = le32_to_cpu(word0); 7158 } else { 7159 /* First read mbox status word */ 7160 if (lpfc_readl(phba->MBslimaddr, &word0)) { 7161 spin_unlock_irqrestore(&phba->hbalock, 7162 drvr_flag); 7163 goto out_not_finished; 7164 } 7165 } 7166 7167 /* Read the HBA Host Attention Register */ 7168 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 7169 spin_unlock_irqrestore(&phba->hbalock, 7170 drvr_flag); 7171 goto out_not_finished; 7172 } 7173 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * 7174 1000) + jiffies; 7175 i = 0; 7176 /* Wait for command to complete */ 7177 while (((word0 & OWN_CHIP) == OWN_CHIP) || 7178 (!(ha_copy & HA_MBATT) && 7179 (phba->link_state > LPFC_WARM_START))) { 7180 if (time_after(jiffies, timeout)) { 7181 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7182 spin_unlock_irqrestore(&phba->hbalock, 7183 drvr_flag); 7184 goto out_not_finished; 7185 } 7186 7187 /* Check if we took a mbox interrupt while we were 7188 polling */ 7189 if (((word0 & OWN_CHIP) != OWN_CHIP) 7190 && (evtctr != psli->slistat.mbox_event)) 7191 break; 7192 7193 if (i++ > 10) { 7194 spin_unlock_irqrestore(&phba->hbalock, 7195 drvr_flag); 7196 msleep(1); 7197 spin_lock_irqsave(&phba->hbalock, drvr_flag); 7198 } 7199 7200 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7201 /* First copy command data */ 7202 word0 = *((uint32_t *)phba->mbox); 7203 word0 = le32_to_cpu(word0); 7204 if (mbx->mbxCommand == MBX_CONFIG_PORT) { 7205 MAILBOX_t *slimmb; 7206 uint32_t slimword0; 7207 /* Check real SLIM for any errors */ 7208 slimword0 = readl(phba->MBslimaddr); 7209 slimmb = (MAILBOX_t *) & slimword0; 7210 if (((slimword0 & OWN_CHIP) != OWN_CHIP) 7211 && slimmb->mbxStatus) { 7212 psli->sli_flag &= 7213 ~LPFC_SLI_ACTIVE; 7214 word0 = slimword0; 7215 } 7216 } 7217 } else { 7218 /* First copy command data */ 7219 word0 = readl(phba->MBslimaddr); 7220 } 7221 /* Read the HBA Host Attention Register */ 7222 if (lpfc_readl(phba->HAregaddr, &ha_copy)) { 7223 spin_unlock_irqrestore(&phba->hbalock, 7224 drvr_flag); 7225 goto out_not_finished; 7226 } 7227 } 7228 7229 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 7230 /* copy results back to user */ 7231 lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE); 7232 /* Copy the mailbox extension data */ 7233 if (pmbox->out_ext_byte_len && pmbox->context2) { 7234 lpfc_sli_pcimem_bcopy(phba->mbox_ext, 7235 pmbox->context2, 7236 pmbox->out_ext_byte_len); 7237 } 7238 } else { 7239 /* First copy command data */ 7240 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr, 7241 MAILBOX_CMD_SIZE); 7242 /* Copy the mailbox extension data */ 7243 if (pmbox->out_ext_byte_len && pmbox->context2) { 7244 lpfc_memcpy_from_slim(pmbox->context2, 7245 phba->MBslimaddr + 7246 MAILBOX_HBA_EXT_OFFSET, 7247 pmbox->out_ext_byte_len); 7248 } 7249 } 7250 7251 writel(HA_MBATT, phba->HAregaddr); 7252 readl(phba->HAregaddr); /* flush */ 7253 7254 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7255 status = mbx->mbxStatus; 7256 } 7257 7258 spin_unlock_irqrestore(&phba->hbalock, drvr_flag); 7259 return status; 7260 7261 out_not_finished: 7262 if (processing_queue) { 7263 pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; 7264 lpfc_mbox_cmpl_put(phba, pmbox); 7265 } 7266 return MBX_NOT_FINISHED; 7267 } 7268 7269 /** 7270 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command 7271 * @phba: Pointer to HBA context object. 7272 * 7273 * The function blocks the posting of SLI4 asynchronous mailbox commands from 7274 * the driver internal pending mailbox queue. It will then try to wait out the 7275 * possible outstanding mailbox command before return. 7276 * 7277 * Returns: 7278 * 0 - the outstanding mailbox command completed; otherwise, the wait for 7279 * the outstanding mailbox command timed out. 7280 **/ 7281 static int 7282 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 7283 { 7284 struct lpfc_sli *psli = &phba->sli; 7285 int rc = 0; 7286 unsigned long timeout = 0; 7287 7288 /* Mark the asynchronous mailbox command posting as blocked */ 7289 spin_lock_irq(&phba->hbalock); 7290 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 7291 /* Determine how long we might wait for the active mailbox 7292 * command to be gracefully completed by firmware. 7293 */ 7294 if (phba->sli.mbox_active) 7295 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 7296 phba->sli.mbox_active) * 7297 1000) + jiffies; 7298 spin_unlock_irq(&phba->hbalock); 7299 7300 /* Make sure the mailbox is really active */ 7301 if (timeout) 7302 lpfc_sli4_process_missed_mbox_completions(phba); 7303 7304 /* Wait for the outstnading mailbox command to complete */ 7305 while (phba->sli.mbox_active) { 7306 /* Check active mailbox complete status every 2ms */ 7307 msleep(2); 7308 if (time_after(jiffies, timeout)) { 7309 /* Timeout, marked the outstanding cmd not complete */ 7310 rc = 1; 7311 break; 7312 } 7313 } 7314 7315 /* Can not cleanly block async mailbox command, fails it */ 7316 if (rc) { 7317 spin_lock_irq(&phba->hbalock); 7318 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7319 spin_unlock_irq(&phba->hbalock); 7320 } 7321 return rc; 7322 } 7323 7324 /** 7325 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command 7326 * @phba: Pointer to HBA context object. 7327 * 7328 * The function unblocks and resume posting of SLI4 asynchronous mailbox 7329 * commands from the driver internal pending mailbox queue. It makes sure 7330 * that there is no outstanding mailbox command before resuming posting 7331 * asynchronous mailbox commands. If, for any reason, there is outstanding 7332 * mailbox command, it will try to wait it out before resuming asynchronous 7333 * mailbox command posting. 7334 **/ 7335 static void 7336 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) 7337 { 7338 struct lpfc_sli *psli = &phba->sli; 7339 7340 spin_lock_irq(&phba->hbalock); 7341 if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7342 /* Asynchronous mailbox posting is not blocked, do nothing */ 7343 spin_unlock_irq(&phba->hbalock); 7344 return; 7345 } 7346 7347 /* Outstanding synchronous mailbox command is guaranteed to be done, 7348 * successful or timeout, after timing-out the outstanding mailbox 7349 * command shall always be removed, so just unblock posting async 7350 * mailbox command and resume 7351 */ 7352 psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; 7353 spin_unlock_irq(&phba->hbalock); 7354 7355 /* wake up worker thread to post asynchronlous mailbox command */ 7356 lpfc_worker_wake_up(phba); 7357 } 7358 7359 /** 7360 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready 7361 * @phba: Pointer to HBA context object. 7362 * @mboxq: Pointer to mailbox object. 7363 * 7364 * The function waits for the bootstrap mailbox register ready bit from 7365 * port for twice the regular mailbox command timeout value. 7366 * 7367 * 0 - no timeout on waiting for bootstrap mailbox register ready. 7368 * MBXERR_ERROR - wait for bootstrap mailbox register timed out. 7369 **/ 7370 static int 7371 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7372 { 7373 uint32_t db_ready; 7374 unsigned long timeout; 7375 struct lpfc_register bmbx_reg; 7376 7377 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) 7378 * 1000) + jiffies; 7379 7380 do { 7381 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 7382 db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); 7383 if (!db_ready) 7384 msleep(2); 7385 7386 if (time_after(jiffies, timeout)) 7387 return MBXERR_ERROR; 7388 } while (!db_ready); 7389 7390 return 0; 7391 } 7392 7393 /** 7394 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox 7395 * @phba: Pointer to HBA context object. 7396 * @mboxq: Pointer to mailbox object. 7397 * 7398 * The function posts a mailbox to the port. The mailbox is expected 7399 * to be comletely filled in and ready for the port to operate on it. 7400 * This routine executes a synchronous completion operation on the 7401 * mailbox by polling for its completion. 7402 * 7403 * The caller must not be holding any locks when calling this routine. 7404 * 7405 * Returns: 7406 * MBX_SUCCESS - mailbox posted successfully 7407 * Any of the MBX error values. 7408 **/ 7409 static int 7410 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 7411 { 7412 int rc = MBX_SUCCESS; 7413 unsigned long iflag; 7414 uint32_t mcqe_status; 7415 uint32_t mbx_cmnd; 7416 struct lpfc_sli *psli = &phba->sli; 7417 struct lpfc_mqe *mb = &mboxq->u.mqe; 7418 struct lpfc_bmbx_create *mbox_rgn; 7419 struct dma_address *dma_address; 7420 7421 /* 7422 * Only one mailbox can be active to the bootstrap mailbox region 7423 * at a time and there is no queueing provided. 7424 */ 7425 spin_lock_irqsave(&phba->hbalock, iflag); 7426 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7427 spin_unlock_irqrestore(&phba->hbalock, iflag); 7428 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7429 "(%d):2532 Mailbox command x%x (x%x/x%x) " 7430 "cannot issue Data: x%x x%x\n", 7431 mboxq->vport ? mboxq->vport->vpi : 0, 7432 mboxq->u.mb.mbxCommand, 7433 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7434 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7435 psli->sli_flag, MBX_POLL); 7436 return MBXERR_ERROR; 7437 } 7438 /* The server grabs the token and owns it until release */ 7439 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7440 phba->sli.mbox_active = mboxq; 7441 spin_unlock_irqrestore(&phba->hbalock, iflag); 7442 7443 /* wait for bootstrap mbox register for readyness */ 7444 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7445 if (rc) 7446 goto exit; 7447 7448 /* 7449 * Initialize the bootstrap memory region to avoid stale data areas 7450 * in the mailbox post. Then copy the caller's mailbox contents to 7451 * the bmbx mailbox region. 7452 */ 7453 mbx_cmnd = bf_get(lpfc_mqe_command, mb); 7454 memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); 7455 lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, 7456 sizeof(struct lpfc_mqe)); 7457 7458 /* Post the high mailbox dma address to the port and wait for ready. */ 7459 dma_address = &phba->sli4_hba.bmbx.dma_address; 7460 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 7461 7462 /* wait for bootstrap mbox register for hi-address write done */ 7463 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7464 if (rc) 7465 goto exit; 7466 7467 /* Post the low mailbox dma address to the port. */ 7468 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 7469 7470 /* wait for bootstrap mbox register for low address write done */ 7471 rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq); 7472 if (rc) 7473 goto exit; 7474 7475 /* 7476 * Read the CQ to ensure the mailbox has completed. 7477 * If so, update the mailbox status so that the upper layers 7478 * can complete the request normally. 7479 */ 7480 lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, 7481 sizeof(struct lpfc_mqe)); 7482 mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; 7483 lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, 7484 sizeof(struct lpfc_mcqe)); 7485 mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); 7486 /* 7487 * When the CQE status indicates a failure and the mailbox status 7488 * indicates success then copy the CQE status into the mailbox status 7489 * (and prefix it with x4000). 7490 */ 7491 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 7492 if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS) 7493 bf_set(lpfc_mqe_status, mb, 7494 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 7495 rc = MBXERR_ERROR; 7496 } else 7497 lpfc_sli4_swap_str(phba, mboxq); 7498 7499 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7500 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " 7501 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 7502 " x%x x%x CQ: x%x x%x x%x x%x\n", 7503 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 7504 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7505 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7506 bf_get(lpfc_mqe_status, mb), 7507 mb->un.mb_words[0], mb->un.mb_words[1], 7508 mb->un.mb_words[2], mb->un.mb_words[3], 7509 mb->un.mb_words[4], mb->un.mb_words[5], 7510 mb->un.mb_words[6], mb->un.mb_words[7], 7511 mb->un.mb_words[8], mb->un.mb_words[9], 7512 mb->un.mb_words[10], mb->un.mb_words[11], 7513 mb->un.mb_words[12], mboxq->mcqe.word0, 7514 mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, 7515 mboxq->mcqe.trailer); 7516 exit: 7517 /* We are holding the token, no needed for lock when release */ 7518 spin_lock_irqsave(&phba->hbalock, iflag); 7519 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7520 phba->sli.mbox_active = NULL; 7521 spin_unlock_irqrestore(&phba->hbalock, iflag); 7522 return rc; 7523 } 7524 7525 /** 7526 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware 7527 * @phba: Pointer to HBA context object. 7528 * @pmbox: Pointer to mailbox object. 7529 * @flag: Flag indicating how the mailbox need to be processed. 7530 * 7531 * This function is called by discovery code and HBA management code to submit 7532 * a mailbox command to firmware with SLI-4 interface spec. 7533 * 7534 * Return codes the caller owns the mailbox command after the return of the 7535 * function. 7536 **/ 7537 static int 7538 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 7539 uint32_t flag) 7540 { 7541 struct lpfc_sli *psli = &phba->sli; 7542 unsigned long iflags; 7543 int rc; 7544 7545 /* dump from issue mailbox command if setup */ 7546 lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb); 7547 7548 rc = lpfc_mbox_dev_check(phba); 7549 if (unlikely(rc)) { 7550 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7551 "(%d):2544 Mailbox command x%x (x%x/x%x) " 7552 "cannot issue Data: x%x x%x\n", 7553 mboxq->vport ? mboxq->vport->vpi : 0, 7554 mboxq->u.mb.mbxCommand, 7555 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7556 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7557 psli->sli_flag, flag); 7558 goto out_not_finished; 7559 } 7560 7561 /* Detect polling mode and jump to a handler */ 7562 if (!phba->sli4_hba.intr_enable) { 7563 if (flag == MBX_POLL) 7564 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7565 else 7566 rc = -EIO; 7567 if (rc != MBX_SUCCESS) 7568 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7569 "(%d):2541 Mailbox command x%x " 7570 "(x%x/x%x) failure: " 7571 "mqe_sta: x%x mcqe_sta: x%x/x%x " 7572 "Data: x%x x%x\n,", 7573 mboxq->vport ? mboxq->vport->vpi : 0, 7574 mboxq->u.mb.mbxCommand, 7575 lpfc_sli_config_mbox_subsys_get(phba, 7576 mboxq), 7577 lpfc_sli_config_mbox_opcode_get(phba, 7578 mboxq), 7579 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 7580 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 7581 bf_get(lpfc_mcqe_ext_status, 7582 &mboxq->mcqe), 7583 psli->sli_flag, flag); 7584 return rc; 7585 } else if (flag == MBX_POLL) { 7586 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7587 "(%d):2542 Try to issue mailbox command " 7588 "x%x (x%x/x%x) synchronously ahead of async" 7589 "mailbox command queue: x%x x%x\n", 7590 mboxq->vport ? mboxq->vport->vpi : 0, 7591 mboxq->u.mb.mbxCommand, 7592 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7593 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7594 psli->sli_flag, flag); 7595 /* Try to block the asynchronous mailbox posting */ 7596 rc = lpfc_sli4_async_mbox_block(phba); 7597 if (!rc) { 7598 /* Successfully blocked, now issue sync mbox cmd */ 7599 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7600 if (rc != MBX_SUCCESS) 7601 lpfc_printf_log(phba, KERN_WARNING, 7602 LOG_MBOX | LOG_SLI, 7603 "(%d):2597 Sync Mailbox command " 7604 "x%x (x%x/x%x) failure: " 7605 "mqe_sta: x%x mcqe_sta: x%x/x%x " 7606 "Data: x%x x%x\n,", 7607 mboxq->vport ? mboxq->vport->vpi : 0, 7608 mboxq->u.mb.mbxCommand, 7609 lpfc_sli_config_mbox_subsys_get(phba, 7610 mboxq), 7611 lpfc_sli_config_mbox_opcode_get(phba, 7612 mboxq), 7613 bf_get(lpfc_mqe_status, &mboxq->u.mqe), 7614 bf_get(lpfc_mcqe_status, &mboxq->mcqe), 7615 bf_get(lpfc_mcqe_ext_status, 7616 &mboxq->mcqe), 7617 psli->sli_flag, flag); 7618 /* Unblock the async mailbox posting afterward */ 7619 lpfc_sli4_async_mbox_unblock(phba); 7620 } 7621 return rc; 7622 } 7623 7624 /* Now, interrupt mode asynchrous mailbox command */ 7625 rc = lpfc_mbox_cmd_check(phba, mboxq); 7626 if (rc) { 7627 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7628 "(%d):2543 Mailbox command x%x (x%x/x%x) " 7629 "cannot issue Data: x%x x%x\n", 7630 mboxq->vport ? mboxq->vport->vpi : 0, 7631 mboxq->u.mb.mbxCommand, 7632 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7633 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7634 psli->sli_flag, flag); 7635 goto out_not_finished; 7636 } 7637 7638 /* Put the mailbox command to the driver internal FIFO */ 7639 psli->slistat.mbox_busy++; 7640 spin_lock_irqsave(&phba->hbalock, iflags); 7641 lpfc_mbox_put(phba, mboxq); 7642 spin_unlock_irqrestore(&phba->hbalock, iflags); 7643 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7644 "(%d):0354 Mbox cmd issue - Enqueue Data: " 7645 "x%x (x%x/x%x) x%x x%x x%x\n", 7646 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 7647 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 7648 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7649 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7650 phba->pport->port_state, 7651 psli->sli_flag, MBX_NOWAIT); 7652 /* Wake up worker thread to transport mailbox command from head */ 7653 lpfc_worker_wake_up(phba); 7654 7655 return MBX_BUSY; 7656 7657 out_not_finished: 7658 return MBX_NOT_FINISHED; 7659 } 7660 7661 /** 7662 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device 7663 * @phba: Pointer to HBA context object. 7664 * 7665 * This function is called by worker thread to send a mailbox command to 7666 * SLI4 HBA firmware. 7667 * 7668 **/ 7669 int 7670 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) 7671 { 7672 struct lpfc_sli *psli = &phba->sli; 7673 LPFC_MBOXQ_t *mboxq; 7674 int rc = MBX_SUCCESS; 7675 unsigned long iflags; 7676 struct lpfc_mqe *mqe; 7677 uint32_t mbx_cmnd; 7678 7679 /* Check interrupt mode before post async mailbox command */ 7680 if (unlikely(!phba->sli4_hba.intr_enable)) 7681 return MBX_NOT_FINISHED; 7682 7683 /* Check for mailbox command service token */ 7684 spin_lock_irqsave(&phba->hbalock, iflags); 7685 if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { 7686 spin_unlock_irqrestore(&phba->hbalock, iflags); 7687 return MBX_NOT_FINISHED; 7688 } 7689 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 7690 spin_unlock_irqrestore(&phba->hbalock, iflags); 7691 return MBX_NOT_FINISHED; 7692 } 7693 if (unlikely(phba->sli.mbox_active)) { 7694 spin_unlock_irqrestore(&phba->hbalock, iflags); 7695 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7696 "0384 There is pending active mailbox cmd\n"); 7697 return MBX_NOT_FINISHED; 7698 } 7699 /* Take the mailbox command service token */ 7700 psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; 7701 7702 /* Get the next mailbox command from head of queue */ 7703 mboxq = lpfc_mbox_get(phba); 7704 7705 /* If no more mailbox command waiting for post, we're done */ 7706 if (!mboxq) { 7707 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7708 spin_unlock_irqrestore(&phba->hbalock, iflags); 7709 return MBX_SUCCESS; 7710 } 7711 phba->sli.mbox_active = mboxq; 7712 spin_unlock_irqrestore(&phba->hbalock, iflags); 7713 7714 /* Check device readiness for posting mailbox command */ 7715 rc = lpfc_mbox_dev_check(phba); 7716 if (unlikely(rc)) 7717 /* Driver clean routine will clean up pending mailbox */ 7718 goto out_not_finished; 7719 7720 /* Prepare the mbox command to be posted */ 7721 mqe = &mboxq->u.mqe; 7722 mbx_cmnd = bf_get(lpfc_mqe_command, mqe); 7723 7724 /* Start timer for the mbox_tmo and log some mailbox post messages */ 7725 mod_timer(&psli->mbox_tmo, (jiffies + 7726 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); 7727 7728 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7729 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " 7730 "x%x x%x\n", 7731 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 7732 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7733 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7734 phba->pport->port_state, psli->sli_flag); 7735 7736 if (mbx_cmnd != MBX_HEARTBEAT) { 7737 if (mboxq->vport) { 7738 lpfc_debugfs_disc_trc(mboxq->vport, 7739 LPFC_DISC_TRC_MBOX_VPORT, 7740 "MBOX Send vport: cmd:x%x mb:x%x x%x", 7741 mbx_cmnd, mqe->un.mb_words[0], 7742 mqe->un.mb_words[1]); 7743 } else { 7744 lpfc_debugfs_disc_trc(phba->pport, 7745 LPFC_DISC_TRC_MBOX, 7746 "MBOX Send: cmd:x%x mb:x%x x%x", 7747 mbx_cmnd, mqe->un.mb_words[0], 7748 mqe->un.mb_words[1]); 7749 } 7750 } 7751 psli->slistat.mbox_cmd++; 7752 7753 /* Post the mailbox command to the port */ 7754 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 7755 if (rc != MBX_SUCCESS) { 7756 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7757 "(%d):2533 Mailbox command x%x (x%x/x%x) " 7758 "cannot issue Data: x%x x%x\n", 7759 mboxq->vport ? mboxq->vport->vpi : 0, 7760 mboxq->u.mb.mbxCommand, 7761 lpfc_sli_config_mbox_subsys_get(phba, mboxq), 7762 lpfc_sli_config_mbox_opcode_get(phba, mboxq), 7763 psli->sli_flag, MBX_NOWAIT); 7764 goto out_not_finished; 7765 } 7766 7767 return rc; 7768 7769 out_not_finished: 7770 spin_lock_irqsave(&phba->hbalock, iflags); 7771 if (phba->sli.mbox_active) { 7772 mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; 7773 __lpfc_mbox_cmpl_put(phba, mboxq); 7774 /* Release the token */ 7775 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 7776 phba->sli.mbox_active = NULL; 7777 } 7778 spin_unlock_irqrestore(&phba->hbalock, iflags); 7779 7780 return MBX_NOT_FINISHED; 7781 } 7782 7783 /** 7784 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command 7785 * @phba: Pointer to HBA context object. 7786 * @pmbox: Pointer to mailbox object. 7787 * @flag: Flag indicating how the mailbox need to be processed. 7788 * 7789 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from 7790 * the API jump table function pointer from the lpfc_hba struct. 7791 * 7792 * Return codes the caller owns the mailbox command after the return of the 7793 * function. 7794 **/ 7795 int 7796 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) 7797 { 7798 return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); 7799 } 7800 7801 /** 7802 * lpfc_mbox_api_table_setup - Set up mbox api function jump table 7803 * @phba: The hba struct for which this call is being executed. 7804 * @dev_grp: The HBA PCI-Device group number. 7805 * 7806 * This routine sets up the mbox interface API function jump table in @phba 7807 * struct. 7808 * Returns: 0 - success, -ENODEV - failure. 7809 **/ 7810 int 7811 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 7812 { 7813 7814 switch (dev_grp) { 7815 case LPFC_PCI_DEV_LP: 7816 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; 7817 phba->lpfc_sli_handle_slow_ring_event = 7818 lpfc_sli_handle_slow_ring_event_s3; 7819 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; 7820 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; 7821 phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; 7822 break; 7823 case LPFC_PCI_DEV_OC: 7824 phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; 7825 phba->lpfc_sli_handle_slow_ring_event = 7826 lpfc_sli_handle_slow_ring_event_s4; 7827 phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; 7828 phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; 7829 phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; 7830 break; 7831 default: 7832 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7833 "1420 Invalid HBA PCI-device group: 0x%x\n", 7834 dev_grp); 7835 return -ENODEV; 7836 break; 7837 } 7838 return 0; 7839 } 7840 7841 /** 7842 * __lpfc_sli_ringtx_put - Add an iocb to the txq 7843 * @phba: Pointer to HBA context object. 7844 * @pring: Pointer to driver SLI ring object. 7845 * @piocb: Pointer to address of newly added command iocb. 7846 * 7847 * This function is called with hbalock held to add a command 7848 * iocb to the txq when SLI layer cannot submit the command iocb 7849 * to the ring. 7850 **/ 7851 void 7852 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7853 struct lpfc_iocbq *piocb) 7854 { 7855 /* Insert the caller's iocb in the txq tail for later processing. */ 7856 list_add_tail(&piocb->list, &pring->txq); 7857 } 7858 7859 /** 7860 * lpfc_sli_next_iocb - Get the next iocb in the txq 7861 * @phba: Pointer to HBA context object. 7862 * @pring: Pointer to driver SLI ring object. 7863 * @piocb: Pointer to address of newly added command iocb. 7864 * 7865 * This function is called with hbalock held before a new 7866 * iocb is submitted to the firmware. This function checks 7867 * txq to flush the iocbs in txq to Firmware before 7868 * submitting new iocbs to the Firmware. 7869 * If there are iocbs in the txq which need to be submitted 7870 * to firmware, lpfc_sli_next_iocb returns the first element 7871 * of the txq after dequeuing it from txq. 7872 * If there is no iocb in the txq then the function will return 7873 * *piocb and *piocb is set to NULL. Caller needs to check 7874 * *piocb to find if there are more commands in the txq. 7875 **/ 7876 static struct lpfc_iocbq * 7877 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 7878 struct lpfc_iocbq **piocb) 7879 { 7880 struct lpfc_iocbq * nextiocb; 7881 7882 nextiocb = lpfc_sli_ringtx_get(phba, pring); 7883 if (!nextiocb) { 7884 nextiocb = *piocb; 7885 *piocb = NULL; 7886 } 7887 7888 return nextiocb; 7889 } 7890 7891 /** 7892 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb 7893 * @phba: Pointer to HBA context object. 7894 * @ring_number: SLI ring number to issue iocb on. 7895 * @piocb: Pointer to command iocb. 7896 * @flag: Flag indicating if this command can be put into txq. 7897 * 7898 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue 7899 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is 7900 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT 7901 * flag is turned on, the function returns IOCB_ERROR. When the link is down, 7902 * this function allows only iocbs for posting buffers. This function finds 7903 * next available slot in the command ring and posts the command to the 7904 * available slot and writes the port attention register to request HBA start 7905 * processing new iocb. If there is no slot available in the ring and 7906 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise 7907 * the function returns IOCB_BUSY. 7908 * 7909 * This function is called with hbalock held. The function will return success 7910 * after it successfully submit the iocb to firmware or after adding to the 7911 * txq. 7912 **/ 7913 static int 7914 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, 7915 struct lpfc_iocbq *piocb, uint32_t flag) 7916 { 7917 struct lpfc_iocbq *nextiocb; 7918 IOCB_t *iocb; 7919 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 7920 7921 if (piocb->iocb_cmpl && (!piocb->vport) && 7922 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && 7923 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { 7924 lpfc_printf_log(phba, KERN_ERR, 7925 LOG_SLI | LOG_VPORT, 7926 "1807 IOCB x%x failed. No vport\n", 7927 piocb->iocb.ulpCommand); 7928 dump_stack(); 7929 return IOCB_ERROR; 7930 } 7931 7932 7933 /* If the PCI channel is in offline state, do not post iocbs. */ 7934 if (unlikely(pci_channel_offline(phba->pcidev))) 7935 return IOCB_ERROR; 7936 7937 /* If HBA has a deferred error attention, fail the iocb. */ 7938 if (unlikely(phba->hba_flag & DEFER_ERATT)) 7939 return IOCB_ERROR; 7940 7941 /* 7942 * We should never get an IOCB if we are in a < LINK_DOWN state 7943 */ 7944 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 7945 return IOCB_ERROR; 7946 7947 /* 7948 * Check to see if we are blocking IOCB processing because of a 7949 * outstanding event. 7950 */ 7951 if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT)) 7952 goto iocb_busy; 7953 7954 if (unlikely(phba->link_state == LPFC_LINK_DOWN)) { 7955 /* 7956 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF 7957 * can be issued if the link is not up. 7958 */ 7959 switch (piocb->iocb.ulpCommand) { 7960 case CMD_GEN_REQUEST64_CR: 7961 case CMD_GEN_REQUEST64_CX: 7962 if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) || 7963 (piocb->iocb.un.genreq64.w5.hcsw.Rctl != 7964 FC_RCTL_DD_UNSOL_CMD) || 7965 (piocb->iocb.un.genreq64.w5.hcsw.Type != 7966 MENLO_TRANSPORT_TYPE)) 7967 7968 goto iocb_busy; 7969 break; 7970 case CMD_QUE_RING_BUF_CN: 7971 case CMD_QUE_RING_BUF64_CN: 7972 /* 7973 * For IOCBs, like QUE_RING_BUF, that have no rsp ring 7974 * completion, iocb_cmpl MUST be 0. 7975 */ 7976 if (piocb->iocb_cmpl) 7977 piocb->iocb_cmpl = NULL; 7978 /*FALLTHROUGH*/ 7979 case CMD_CREATE_XRI_CR: 7980 case CMD_CLOSE_XRI_CN: 7981 case CMD_CLOSE_XRI_CX: 7982 break; 7983 default: 7984 goto iocb_busy; 7985 } 7986 7987 /* 7988 * For FCP commands, we must be in a state where we can process link 7989 * attention events. 7990 */ 7991 } else if (unlikely(pring->ringno == phba->sli.fcp_ring && 7992 !(phba->sli.sli_flag & LPFC_PROCESS_LA))) { 7993 goto iocb_busy; 7994 } 7995 7996 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) && 7997 (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb))) 7998 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb); 7999 8000 if (iocb) 8001 lpfc_sli_update_ring(phba, pring); 8002 else 8003 lpfc_sli_update_full_ring(phba, pring); 8004 8005 if (!piocb) 8006 return IOCB_SUCCESS; 8007 8008 goto out_busy; 8009 8010 iocb_busy: 8011 pring->stats.iocb_cmd_delay++; 8012 8013 out_busy: 8014 8015 if (!(flag & SLI_IOCB_RET_IOCB)) { 8016 __lpfc_sli_ringtx_put(phba, pring, piocb); 8017 return IOCB_SUCCESS; 8018 } 8019 8020 return IOCB_BUSY; 8021 } 8022 8023 /** 8024 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. 8025 * @phba: Pointer to HBA context object. 8026 * @piocb: Pointer to command iocb. 8027 * @sglq: Pointer to the scatter gather queue object. 8028 * 8029 * This routine converts the bpl or bde that is in the IOCB 8030 * to a sgl list for the sli4 hardware. The physical address 8031 * of the bpl/bde is converted back to a virtual address. 8032 * If the IOCB contains a BPL then the list of BDE's is 8033 * converted to sli4_sge's. If the IOCB contains a single 8034 * BDE then it is converted to a single sli_sge. 8035 * The IOCB is still in cpu endianess so the contents of 8036 * the bpl can be used without byte swapping. 8037 * 8038 * Returns valid XRI = Success, NO_XRI = Failure. 8039 **/ 8040 static uint16_t 8041 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, 8042 struct lpfc_sglq *sglq) 8043 { 8044 uint16_t xritag = NO_XRI; 8045 struct ulp_bde64 *bpl = NULL; 8046 struct ulp_bde64 bde; 8047 struct sli4_sge *sgl = NULL; 8048 struct lpfc_dmabuf *dmabuf; 8049 IOCB_t *icmd; 8050 int numBdes = 0; 8051 int i = 0; 8052 uint32_t offset = 0; /* accumulated offset in the sg request list */ 8053 int inbound = 0; /* number of sg reply entries inbound from firmware */ 8054 8055 if (!piocbq || !sglq) 8056 return xritag; 8057 8058 sgl = (struct sli4_sge *)sglq->sgl; 8059 icmd = &piocbq->iocb; 8060 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX) 8061 return sglq->sli4_xritag; 8062 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8063 numBdes = icmd->un.genreq64.bdl.bdeSize / 8064 sizeof(struct ulp_bde64); 8065 /* The addrHigh and addrLow fields within the IOCB 8066 * have not been byteswapped yet so there is no 8067 * need to swap them back. 8068 */ 8069 if (piocbq->context3) 8070 dmabuf = (struct lpfc_dmabuf *)piocbq->context3; 8071 else 8072 return xritag; 8073 8074 bpl = (struct ulp_bde64 *)dmabuf->virt; 8075 if (!bpl) 8076 return xritag; 8077 8078 for (i = 0; i < numBdes; i++) { 8079 /* Should already be byte swapped. */ 8080 sgl->addr_hi = bpl->addrHigh; 8081 sgl->addr_lo = bpl->addrLow; 8082 8083 sgl->word2 = le32_to_cpu(sgl->word2); 8084 if ((i+1) == numBdes) 8085 bf_set(lpfc_sli4_sge_last, sgl, 1); 8086 else 8087 bf_set(lpfc_sli4_sge_last, sgl, 0); 8088 /* swap the size field back to the cpu so we 8089 * can assign it to the sgl. 8090 */ 8091 bde.tus.w = le32_to_cpu(bpl->tus.w); 8092 sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize); 8093 /* The offsets in the sgl need to be accumulated 8094 * separately for the request and reply lists. 8095 * The request is always first, the reply follows. 8096 */ 8097 if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) { 8098 /* add up the reply sg entries */ 8099 if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I) 8100 inbound++; 8101 /* first inbound? reset the offset */ 8102 if (inbound == 1) 8103 offset = 0; 8104 bf_set(lpfc_sli4_sge_offset, sgl, offset); 8105 bf_set(lpfc_sli4_sge_type, sgl, 8106 LPFC_SGE_TYPE_DATA); 8107 offset += bde.tus.f.bdeSize; 8108 } 8109 sgl->word2 = cpu_to_le32(sgl->word2); 8110 bpl++; 8111 sgl++; 8112 } 8113 } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { 8114 /* The addrHigh and addrLow fields of the BDE have not 8115 * been byteswapped yet so they need to be swapped 8116 * before putting them in the sgl. 8117 */ 8118 sgl->addr_hi = 8119 cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); 8120 sgl->addr_lo = 8121 cpu_to_le32(icmd->un.genreq64.bdl.addrLow); 8122 sgl->word2 = le32_to_cpu(sgl->word2); 8123 bf_set(lpfc_sli4_sge_last, sgl, 1); 8124 sgl->word2 = cpu_to_le32(sgl->word2); 8125 sgl->sge_len = 8126 cpu_to_le32(icmd->un.genreq64.bdl.bdeSize); 8127 } 8128 return sglq->sli4_xritag; 8129 } 8130 8131 /** 8132 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution 8133 * @phba: Pointer to HBA context object. 8134 * 8135 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index 8136 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock 8137 * held. 8138 * 8139 * Return: index into SLI4 fast-path FCP queue index. 8140 **/ 8141 static inline uint32_t 8142 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 8143 { 8144 struct lpfc_vector_map_info *cpup; 8145 int chann, cpu; 8146 8147 if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU 8148 && phba->cfg_fcp_io_channel > 1) { 8149 cpu = smp_processor_id(); 8150 if (cpu < phba->sli4_hba.num_present_cpu) { 8151 cpup = phba->sli4_hba.cpu_map; 8152 cpup += cpu; 8153 return cpup->channel_id; 8154 } 8155 chann = cpu; 8156 } 8157 chann = atomic_add_return(1, &phba->fcp_qidx); 8158 chann = (chann % phba->cfg_fcp_io_channel); 8159 return chann; 8160 } 8161 8162 /** 8163 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. 8164 * @phba: Pointer to HBA context object. 8165 * @piocb: Pointer to command iocb. 8166 * @wqe: Pointer to the work queue entry. 8167 * 8168 * This routine converts the iocb command to its Work Queue Entry 8169 * equivalent. The wqe pointer should not have any fields set when 8170 * this routine is called because it will memcpy over them. 8171 * This routine does not set the CQ_ID or the WQEC bits in the 8172 * wqe. 8173 * 8174 * Returns: 0 = Success, IOCB_ERROR = Failure. 8175 **/ 8176 static int 8177 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, 8178 union lpfc_wqe *wqe) 8179 { 8180 uint32_t xmit_len = 0, total_len = 0; 8181 uint8_t ct = 0; 8182 uint32_t fip; 8183 uint32_t abort_tag; 8184 uint8_t command_type = ELS_COMMAND_NON_FIP; 8185 uint8_t cmnd; 8186 uint16_t xritag; 8187 uint16_t abrt_iotag; 8188 struct lpfc_iocbq *abrtiocbq; 8189 struct ulp_bde64 *bpl = NULL; 8190 uint32_t els_id = LPFC_ELS_ID_DEFAULT; 8191 int numBdes, i; 8192 struct ulp_bde64 bde; 8193 struct lpfc_nodelist *ndlp; 8194 uint32_t *pcmd; 8195 uint32_t if_type; 8196 8197 fip = phba->hba_flag & HBA_FIP_SUPPORT; 8198 /* The fcp commands will set command type */ 8199 if (iocbq->iocb_flag & LPFC_IO_FCP) 8200 command_type = FCP_COMMAND; 8201 else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)) 8202 command_type = ELS_COMMAND_FIP; 8203 else 8204 command_type = ELS_COMMAND_NON_FIP; 8205 8206 /* Some of the fields are in the right position already */ 8207 memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); 8208 abort_tag = (uint32_t) iocbq->iotag; 8209 xritag = iocbq->sli4_xritag; 8210 wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ 8211 wqe->generic.wqe_com.word10 = 0; 8212 /* words0-2 bpl convert bde */ 8213 if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 8214 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 8215 sizeof(struct ulp_bde64); 8216 bpl = (struct ulp_bde64 *) 8217 ((struct lpfc_dmabuf *)iocbq->context3)->virt; 8218 if (!bpl) 8219 return IOCB_ERROR; 8220 8221 /* Should already be byte swapped. */ 8222 wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); 8223 wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); 8224 /* swap the size field back to the cpu so we 8225 * can assign it to the sgl. 8226 */ 8227 wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); 8228 xmit_len = wqe->generic.bde.tus.f.bdeSize; 8229 total_len = 0; 8230 for (i = 0; i < numBdes; i++) { 8231 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 8232 total_len += bde.tus.f.bdeSize; 8233 } 8234 } else 8235 xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; 8236 8237 iocbq->iocb.ulpIoTag = iocbq->iotag; 8238 cmnd = iocbq->iocb.ulpCommand; 8239 8240 switch (iocbq->iocb.ulpCommand) { 8241 case CMD_ELS_REQUEST64_CR: 8242 if (iocbq->iocb_flag & LPFC_IO_LIBDFC) 8243 ndlp = iocbq->context_un.ndlp; 8244 else 8245 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8246 if (!iocbq->iocb.ulpLe) { 8247 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8248 "2007 Only Limited Edition cmd Format" 8249 " supported 0x%x\n", 8250 iocbq->iocb.ulpCommand); 8251 return IOCB_ERROR; 8252 } 8253 8254 wqe->els_req.payload_len = xmit_len; 8255 /* Els_reguest64 has a TMO */ 8256 bf_set(wqe_tmo, &wqe->els_req.wqe_com, 8257 iocbq->iocb.ulpTimeout); 8258 /* Need a VF for word 4 set the vf bit*/ 8259 bf_set(els_req64_vf, &wqe->els_req, 0); 8260 /* And a VFID for word 12 */ 8261 bf_set(els_req64_vfid, &wqe->els_req, 0); 8262 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 8263 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8264 iocbq->iocb.ulpContext); 8265 bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); 8266 bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); 8267 /* CCP CCPE PV PRI in word10 were set in the memcpy */ 8268 if (command_type == ELS_COMMAND_FIP) 8269 els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) 8270 >> LPFC_FIP_ELS_ID_SHIFT); 8271 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8272 iocbq->context2)->virt); 8273 if_type = bf_get(lpfc_sli_intf_if_type, 8274 &phba->sli4_hba.sli_intf); 8275 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8276 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 8277 *pcmd == ELS_CMD_SCR || 8278 *pcmd == ELS_CMD_FDISC || 8279 *pcmd == ELS_CMD_LOGO || 8280 *pcmd == ELS_CMD_PLOGI)) { 8281 bf_set(els_req64_sp, &wqe->els_req, 1); 8282 bf_set(els_req64_sid, &wqe->els_req, 8283 iocbq->vport->fc_myDID); 8284 if ((*pcmd == ELS_CMD_FLOGI) && 8285 !(phba->fc_topology == 8286 LPFC_TOPOLOGY_LOOP)) 8287 bf_set(els_req64_sid, &wqe->els_req, 0); 8288 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 8289 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8290 phba->vpi_ids[iocbq->vport->vpi]); 8291 } else if (pcmd && iocbq->context1) { 8292 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 8293 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 8294 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8295 } 8296 } 8297 bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com, 8298 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8299 bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); 8300 bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); 8301 bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); 8302 bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); 8303 bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); 8304 bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); 8305 wqe->els_req.max_response_payload_len = total_len - xmit_len; 8306 break; 8307 case CMD_XMIT_SEQUENCE64_CX: 8308 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, 8309 iocbq->iocb.un.ulpWord[3]); 8310 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, 8311 iocbq->iocb.unsli3.rcvsli3.ox_id); 8312 /* The entire sequence is transmitted for this IOCB */ 8313 xmit_len = total_len; 8314 cmnd = CMD_XMIT_SEQUENCE64_CR; 8315 if (phba->link_flag & LS_LOOPBACK_MODE) 8316 bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1); 8317 case CMD_XMIT_SEQUENCE64_CR: 8318 /* word3 iocb=io_tag32 wqe=reserved */ 8319 wqe->xmit_sequence.rsvd3 = 0; 8320 /* word4 relative_offset memcpy */ 8321 /* word5 r_ctl/df_ctl memcpy */ 8322 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); 8323 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); 8324 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, 8325 LPFC_WQE_IOD_WRITE); 8326 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, 8327 LPFC_WQE_LENLOC_WORD12); 8328 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); 8329 wqe->xmit_sequence.xmit_len = xmit_len; 8330 command_type = OTHER_COMMAND; 8331 break; 8332 case CMD_XMIT_BCAST64_CN: 8333 /* word3 iocb=iotag32 wqe=seq_payload_len */ 8334 wqe->xmit_bcast64.seq_payload_len = xmit_len; 8335 /* word4 iocb=rsvd wqe=rsvd */ 8336 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ 8337 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ 8338 bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, 8339 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8340 bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); 8341 bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); 8342 bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, 8343 LPFC_WQE_LENLOC_WORD3); 8344 bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); 8345 break; 8346 case CMD_FCP_IWRITE64_CR: 8347 command_type = FCP_COMMAND_DATA_OUT; 8348 /* word3 iocb=iotag wqe=payload_offset_len */ 8349 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8350 bf_set(payload_offset_len, &wqe->fcp_iwrite, 8351 xmit_len + sizeof(struct fcp_rsp)); 8352 bf_set(cmd_buff_len, &wqe->fcp_iwrite, 8353 0); 8354 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 8355 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 8356 bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, 8357 iocbq->iocb.ulpFCP2Rcvy); 8358 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 8359 /* Always open the exchange */ 8360 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); 8361 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 8362 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 8363 LPFC_WQE_LENLOC_WORD4); 8364 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 8365 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 8366 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); 8367 if (iocbq->iocb_flag & LPFC_IO_OAS) { 8368 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); 8369 if (phba->cfg_XLanePriority) { 8370 bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); 8371 bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, 8372 (phba->cfg_XLanePriority << 1)); 8373 } 8374 } 8375 break; 8376 case CMD_FCP_IREAD64_CR: 8377 /* word3 iocb=iotag wqe=payload_offset_len */ 8378 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8379 bf_set(payload_offset_len, &wqe->fcp_iread, 8380 xmit_len + sizeof(struct fcp_rsp)); 8381 bf_set(cmd_buff_len, &wqe->fcp_iread, 8382 0); 8383 /* word4 iocb=parameter wqe=total_xfer_length memcpy */ 8384 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ 8385 bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, 8386 iocbq->iocb.ulpFCP2Rcvy); 8387 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 8388 /* Always open the exchange */ 8389 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 8390 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 8391 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 8392 LPFC_WQE_LENLOC_WORD4); 8393 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 8394 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 8395 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); 8396 if (iocbq->iocb_flag & LPFC_IO_OAS) { 8397 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); 8398 if (phba->cfg_XLanePriority) { 8399 bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); 8400 bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, 8401 (phba->cfg_XLanePriority << 1)); 8402 } 8403 } 8404 break; 8405 case CMD_FCP_ICMND64_CR: 8406 /* word3 iocb=iotag wqe=payload_offset_len */ 8407 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ 8408 bf_set(payload_offset_len, &wqe->fcp_icmd, 8409 xmit_len + sizeof(struct fcp_rsp)); 8410 bf_set(cmd_buff_len, &wqe->fcp_icmd, 8411 0); 8412 /* word3 iocb=IO_TAG wqe=reserved */ 8413 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); 8414 /* Always open the exchange */ 8415 bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); 8416 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); 8417 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); 8418 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); 8419 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, 8420 LPFC_WQE_LENLOC_NONE); 8421 bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); 8422 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com, 8423 iocbq->iocb.ulpFCP2Rcvy); 8424 if (iocbq->iocb_flag & LPFC_IO_OAS) { 8425 bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); 8426 if (phba->cfg_XLanePriority) { 8427 bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); 8428 bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, 8429 (phba->cfg_XLanePriority << 1)); 8430 } 8431 } 8432 break; 8433 case CMD_GEN_REQUEST64_CR: 8434 /* For this command calculate the xmit length of the 8435 * request bde. 8436 */ 8437 xmit_len = 0; 8438 numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / 8439 sizeof(struct ulp_bde64); 8440 for (i = 0; i < numBdes; i++) { 8441 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 8442 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 8443 break; 8444 xmit_len += bde.tus.f.bdeSize; 8445 } 8446 /* word3 iocb=IO_TAG wqe=request_payload_len */ 8447 wqe->gen_req.request_payload_len = xmit_len; 8448 /* word4 iocb=parameter wqe=relative_offset memcpy */ 8449 /* word5 [rctl, type, df_ctl, la] copied in memcpy */ 8450 /* word6 context tag copied in memcpy */ 8451 if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { 8452 ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); 8453 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8454 "2015 Invalid CT %x command 0x%x\n", 8455 ct, iocbq->iocb.ulpCommand); 8456 return IOCB_ERROR; 8457 } 8458 bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); 8459 bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); 8460 bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); 8461 bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); 8462 bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); 8463 bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); 8464 bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); 8465 bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); 8466 wqe->gen_req.max_response_payload_len = total_len - xmit_len; 8467 command_type = OTHER_COMMAND; 8468 break; 8469 case CMD_XMIT_ELS_RSP64_CX: 8470 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8471 /* words0-2 BDE memcpy */ 8472 /* word3 iocb=iotag32 wqe=response_payload_len */ 8473 wqe->xmit_els_rsp.response_payload_len = xmit_len; 8474 /* word4 */ 8475 wqe->xmit_els_rsp.word4 = 0; 8476 /* word5 iocb=rsvd wge=did */ 8477 bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, 8478 iocbq->iocb.un.xseq64.xmit_els_remoteID); 8479 8480 if_type = bf_get(lpfc_sli_intf_if_type, 8481 &phba->sli4_hba.sli_intf); 8482 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 8483 if (iocbq->vport->fc_flag & FC_PT2PT) { 8484 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 8485 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 8486 iocbq->vport->fc_myDID); 8487 if (iocbq->vport->fc_myDID == Fabric_DID) { 8488 bf_set(wqe_els_did, 8489 &wqe->xmit_els_rsp.wqe_dest, 0); 8490 } 8491 } 8492 } 8493 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 8494 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8495 bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); 8496 bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, 8497 iocbq->iocb.unsli3.rcvsli3.ox_id); 8498 if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) 8499 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 8500 phba->vpi_ids[iocbq->vport->vpi]); 8501 bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); 8502 bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); 8503 bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); 8504 bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, 8505 LPFC_WQE_LENLOC_WORD3); 8506 bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); 8507 bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp, 8508 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 8509 pcmd = (uint32_t *) (((struct lpfc_dmabuf *) 8510 iocbq->context2)->virt); 8511 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 8512 bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1); 8513 bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, 8514 iocbq->vport->fc_myDID); 8515 bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1); 8516 bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, 8517 phba->vpi_ids[phba->pport->vpi]); 8518 } 8519 command_type = OTHER_COMMAND; 8520 break; 8521 case CMD_CLOSE_XRI_CN: 8522 case CMD_ABORT_XRI_CN: 8523 case CMD_ABORT_XRI_CX: 8524 /* words 0-2 memcpy should be 0 rserved */ 8525 /* port will send abts */ 8526 abrt_iotag = iocbq->iocb.un.acxri.abortContextTag; 8527 if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) { 8528 abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag]; 8529 fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK; 8530 } else 8531 fip = 0; 8532 8533 if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip) 8534 /* 8535 * The link is down, or the command was ELS_FIP 8536 * so the fw does not need to send abts 8537 * on the wire. 8538 */ 8539 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); 8540 else 8541 bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); 8542 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); 8543 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ 8544 wqe->abort_cmd.rsrvd5 = 0; 8545 bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, 8546 ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); 8547 abort_tag = iocbq->iocb.un.acxri.abortIoTag; 8548 /* 8549 * The abort handler will send us CMD_ABORT_XRI_CN or 8550 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX 8551 */ 8552 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); 8553 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); 8554 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, 8555 LPFC_WQE_LENLOC_NONE); 8556 cmnd = CMD_ABORT_XRI_CX; 8557 command_type = OTHER_COMMAND; 8558 xritag = 0; 8559 break; 8560 case CMD_XMIT_BLS_RSP64_CX: 8561 ndlp = (struct lpfc_nodelist *)iocbq->context1; 8562 /* As BLS ABTS RSP WQE is very different from other WQEs, 8563 * we re-construct this WQE here based on information in 8564 * iocbq from scratch. 8565 */ 8566 memset(wqe, 0, sizeof(union lpfc_wqe)); 8567 /* OX_ID is invariable to who sent ABTS to CT exchange */ 8568 bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, 8569 bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); 8570 if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) == 8571 LPFC_ABTS_UNSOL_INT) { 8572 /* ABTS sent by initiator to CT exchange, the 8573 * RX_ID field will be filled with the newly 8574 * allocated responder XRI. 8575 */ 8576 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 8577 iocbq->sli4_xritag); 8578 } else { 8579 /* ABTS sent by responder to CT exchange, the 8580 * RX_ID field will be filled with the responder 8581 * RX_ID from ABTS. 8582 */ 8583 bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp, 8584 bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp)); 8585 } 8586 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 8587 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 8588 8589 /* Use CT=VPI */ 8590 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest, 8591 ndlp->nlp_DID); 8592 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp, 8593 iocbq->iocb.ulpContext); 8594 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1); 8595 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 8596 phba->vpi_ids[phba->pport->vpi]); 8597 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 8598 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 8599 LPFC_WQE_LENLOC_NONE); 8600 /* Overwrite the pre-set comnd type with OTHER_COMMAND */ 8601 command_type = OTHER_COMMAND; 8602 if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) { 8603 bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp, 8604 bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp)); 8605 bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp, 8606 bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp)); 8607 bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp, 8608 bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp)); 8609 } 8610 8611 break; 8612 case CMD_XRI_ABORTED_CX: 8613 case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ 8614 case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ 8615 case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ 8616 case CMD_FCP_TRSP64_CX: /* Target mode rcv */ 8617 case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ 8618 default: 8619 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 8620 "2014 Invalid command 0x%x\n", 8621 iocbq->iocb.ulpCommand); 8622 return IOCB_ERROR; 8623 break; 8624 } 8625 8626 if (iocbq->iocb_flag & LPFC_IO_DIF_PASS) 8627 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU); 8628 else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP) 8629 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP); 8630 else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT) 8631 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT); 8632 iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP | 8633 LPFC_IO_DIF_INSERT); 8634 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); 8635 bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); 8636 wqe->generic.wqe_com.abort_tag = abort_tag; 8637 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); 8638 bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); 8639 bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); 8640 bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); 8641 return 0; 8642 } 8643 8644 /** 8645 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb 8646 * @phba: Pointer to HBA context object. 8647 * @ring_number: SLI ring number to issue iocb on. 8648 * @piocb: Pointer to command iocb. 8649 * @flag: Flag indicating if this command can be put into txq. 8650 * 8651 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue 8652 * an iocb command to an HBA with SLI-4 interface spec. 8653 * 8654 * This function is called with hbalock held. The function will return success 8655 * after it successfully submit the iocb to firmware or after adding to the 8656 * txq. 8657 **/ 8658 static int 8659 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, 8660 struct lpfc_iocbq *piocb, uint32_t flag) 8661 { 8662 struct lpfc_sglq *sglq; 8663 union lpfc_wqe wqe; 8664 struct lpfc_queue *wq; 8665 struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; 8666 8667 if (piocb->sli4_xritag == NO_XRI) { 8668 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 8669 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) 8670 sglq = NULL; 8671 else { 8672 if (!list_empty(&pring->txq)) { 8673 if (!(flag & SLI_IOCB_RET_IOCB)) { 8674 __lpfc_sli_ringtx_put(phba, 8675 pring, piocb); 8676 return IOCB_SUCCESS; 8677 } else { 8678 return IOCB_BUSY; 8679 } 8680 } else { 8681 sglq = __lpfc_sli_get_sglq(phba, piocb); 8682 if (!sglq) { 8683 if (!(flag & SLI_IOCB_RET_IOCB)) { 8684 __lpfc_sli_ringtx_put(phba, 8685 pring, 8686 piocb); 8687 return IOCB_SUCCESS; 8688 } else 8689 return IOCB_BUSY; 8690 } 8691 } 8692 } 8693 } else if (piocb->iocb_flag & LPFC_IO_FCP) { 8694 /* These IO's already have an XRI and a mapped sgl. */ 8695 sglq = NULL; 8696 } else { 8697 /* 8698 * This is a continuation of a commandi,(CX) so this 8699 * sglq is on the active list 8700 */ 8701 sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag); 8702 if (!sglq) 8703 return IOCB_ERROR; 8704 } 8705 8706 if (sglq) { 8707 piocb->sli4_lxritag = sglq->sli4_lxritag; 8708 piocb->sli4_xritag = sglq->sli4_xritag; 8709 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq)) 8710 return IOCB_ERROR; 8711 } 8712 8713 if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) 8714 return IOCB_ERROR; 8715 8716 if ((piocb->iocb_flag & LPFC_IO_FCP) || 8717 (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { 8718 if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) { 8719 wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx]; 8720 } else { 8721 wq = phba->sli4_hba.oas_wq; 8722 } 8723 if (lpfc_sli4_wq_put(wq, &wqe)) 8724 return IOCB_ERROR; 8725 } else { 8726 if (unlikely(!phba->sli4_hba.els_wq)) 8727 return IOCB_ERROR; 8728 if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 8729 return IOCB_ERROR; 8730 } 8731 lpfc_sli_ringtxcmpl_put(phba, pring, piocb); 8732 8733 return 0; 8734 } 8735 8736 /** 8737 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb 8738 * 8739 * This routine wraps the actual lockless version for issusing IOCB function 8740 * pointer from the lpfc_hba struct. 8741 * 8742 * Return codes: 8743 * IOCB_ERROR - Error 8744 * IOCB_SUCCESS - Success 8745 * IOCB_BUSY - Busy 8746 **/ 8747 int 8748 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8749 struct lpfc_iocbq *piocb, uint32_t flag) 8750 { 8751 return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8752 } 8753 8754 /** 8755 * lpfc_sli_api_table_setup - Set up sli api function jump table 8756 * @phba: The hba struct for which this call is being executed. 8757 * @dev_grp: The HBA PCI-Device group number. 8758 * 8759 * This routine sets up the SLI interface API function jump table in @phba 8760 * struct. 8761 * Returns: 0 - success, -ENODEV - failure. 8762 **/ 8763 int 8764 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) 8765 { 8766 8767 switch (dev_grp) { 8768 case LPFC_PCI_DEV_LP: 8769 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; 8770 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; 8771 break; 8772 case LPFC_PCI_DEV_OC: 8773 phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; 8774 phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; 8775 break; 8776 default: 8777 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8778 "1419 Invalid HBA PCI-device group: 0x%x\n", 8779 dev_grp); 8780 return -ENODEV; 8781 break; 8782 } 8783 phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; 8784 return 0; 8785 } 8786 8787 /** 8788 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 8789 * @phba: Pointer to HBA context object. 8790 * @pring: Pointer to driver SLI ring object. 8791 * @piocb: Pointer to command iocb. 8792 * @flag: Flag indicating if this command can be put into txq. 8793 * 8794 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb 8795 * function. This function gets the hbalock and calls 8796 * __lpfc_sli_issue_iocb function and will return the error returned 8797 * by __lpfc_sli_issue_iocb function. This wrapper is used by 8798 * functions which do not hold hbalock. 8799 **/ 8800 int 8801 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, 8802 struct lpfc_iocbq *piocb, uint32_t flag) 8803 { 8804 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 8805 struct lpfc_sli_ring *pring; 8806 struct lpfc_queue *fpeq; 8807 struct lpfc_eqe *eqe; 8808 unsigned long iflags; 8809 int rc, idx; 8810 8811 if (phba->sli_rev == LPFC_SLI_REV4) { 8812 if (piocb->iocb_flag & LPFC_IO_FCP) { 8813 if (!phba->cfg_fof || (!(piocb->iocb_flag & 8814 LPFC_IO_OAS))) { 8815 if (unlikely(!phba->sli4_hba.fcp_wq)) 8816 return IOCB_ERROR; 8817 idx = lpfc_sli4_scmd_to_wqidx_distr(phba); 8818 piocb->fcp_wqidx = idx; 8819 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; 8820 } else { 8821 if (unlikely(!phba->sli4_hba.oas_wq)) 8822 return IOCB_ERROR; 8823 idx = 0; 8824 piocb->fcp_wqidx = 0; 8825 ring_number = LPFC_FCP_OAS_RING; 8826 } 8827 pring = &phba->sli.ring[ring_number]; 8828 spin_lock_irqsave(&pring->ring_lock, iflags); 8829 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, 8830 flag); 8831 spin_unlock_irqrestore(&pring->ring_lock, iflags); 8832 8833 if (lpfc_fcp_look_ahead) { 8834 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx]; 8835 8836 if (atomic_dec_and_test(&fcp_eq_hdl-> 8837 fcp_eq_in_use)) { 8838 8839 /* Get associated EQ with this index */ 8840 fpeq = phba->sli4_hba.hba_eq[idx]; 8841 8842 /* Turn off interrupts from this EQ */ 8843 lpfc_sli4_eq_clr_intr(fpeq); 8844 8845 /* 8846 * Process all the events on FCP EQ 8847 */ 8848 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 8849 lpfc_sli4_hba_handle_eqe(phba, 8850 eqe, idx); 8851 fpeq->EQ_processed++; 8852 } 8853 8854 /* Always clear and re-arm the EQ */ 8855 lpfc_sli4_eq_release(fpeq, 8856 LPFC_QUEUE_REARM); 8857 } 8858 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 8859 } 8860 } else { 8861 pring = &phba->sli.ring[ring_number]; 8862 spin_lock_irqsave(&pring->ring_lock, iflags); 8863 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, 8864 flag); 8865 spin_unlock_irqrestore(&pring->ring_lock, iflags); 8866 8867 } 8868 } else { 8869 /* For now, SLI2/3 will still use hbalock */ 8870 spin_lock_irqsave(&phba->hbalock, iflags); 8871 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); 8872 spin_unlock_irqrestore(&phba->hbalock, iflags); 8873 } 8874 return rc; 8875 } 8876 8877 /** 8878 * lpfc_extra_ring_setup - Extra ring setup function 8879 * @phba: Pointer to HBA context object. 8880 * 8881 * This function is called while driver attaches with the 8882 * HBA to setup the extra ring. The extra ring is used 8883 * only when driver needs to support target mode functionality 8884 * or IP over FC functionalities. 8885 * 8886 * This function is called with no lock held. 8887 **/ 8888 static int 8889 lpfc_extra_ring_setup( struct lpfc_hba *phba) 8890 { 8891 struct lpfc_sli *psli; 8892 struct lpfc_sli_ring *pring; 8893 8894 psli = &phba->sli; 8895 8896 /* Adjust cmd/rsp ring iocb entries more evenly */ 8897 8898 /* Take some away from the FCP ring */ 8899 pring = &psli->ring[psli->fcp_ring]; 8900 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8901 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8902 pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8903 pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8904 8905 /* and give them to the extra ring */ 8906 pring = &psli->ring[psli->extra_ring]; 8907 8908 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; 8909 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; 8910 pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; 8911 pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; 8912 8913 /* Setup default profile for this ring */ 8914 pring->iotag_max = 4096; 8915 pring->num_mask = 1; 8916 pring->prt[0].profile = 0; /* Mask 0 */ 8917 pring->prt[0].rctl = phba->cfg_multi_ring_rctl; 8918 pring->prt[0].type = phba->cfg_multi_ring_type; 8919 pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; 8920 return 0; 8921 } 8922 8923 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port. 8924 * @phba: Pointer to HBA context object. 8925 * @iocbq: Pointer to iocb object. 8926 * 8927 * The async_event handler calls this routine when it receives 8928 * an ASYNC_STATUS_CN event from the port. The port generates 8929 * this event when an Abort Sequence request to an rport fails 8930 * twice in succession. The abort could be originated by the 8931 * driver or by the port. The ABTS could have been for an ELS 8932 * or FCP IO. The port only generates this event when an ABTS 8933 * fails to complete after one retry. 8934 */ 8935 static void 8936 lpfc_sli_abts_err_handler(struct lpfc_hba *phba, 8937 struct lpfc_iocbq *iocbq) 8938 { 8939 struct lpfc_nodelist *ndlp = NULL; 8940 uint16_t rpi = 0, vpi = 0; 8941 struct lpfc_vport *vport = NULL; 8942 8943 /* The rpi in the ulpContext is vport-sensitive. */ 8944 vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag; 8945 rpi = iocbq->iocb.ulpContext; 8946 8947 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8948 "3092 Port generated ABTS async event " 8949 "on vpi %d rpi %d status 0x%x\n", 8950 vpi, rpi, iocbq->iocb.ulpStatus); 8951 8952 vport = lpfc_find_vport_by_vpid(phba, vpi); 8953 if (!vport) 8954 goto err_exit; 8955 ndlp = lpfc_findnode_rpi(vport, rpi); 8956 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 8957 goto err_exit; 8958 8959 if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) 8960 lpfc_sli_abts_recover_port(vport, ndlp); 8961 return; 8962 8963 err_exit: 8964 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8965 "3095 Event Context not found, no " 8966 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n", 8967 iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus, 8968 vpi, rpi); 8969 } 8970 8971 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port. 8972 * @phba: pointer to HBA context object. 8973 * @ndlp: nodelist pointer for the impacted rport. 8974 * @axri: pointer to the wcqe containing the failed exchange. 8975 * 8976 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the 8977 * port. The port generates this event when an abort exchange request to an 8978 * rport fails twice in succession with no reply. The abort could be originated 8979 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO. 8980 */ 8981 void 8982 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba, 8983 struct lpfc_nodelist *ndlp, 8984 struct sli4_wcqe_xri_aborted *axri) 8985 { 8986 struct lpfc_vport *vport; 8987 uint32_t ext_status = 0; 8988 8989 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 8990 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8991 "3115 Node Context not found, driver " 8992 "ignoring abts err event\n"); 8993 return; 8994 } 8995 8996 vport = ndlp->vport; 8997 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8998 "3116 Port generated FCP XRI ABORT event on " 8999 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n", 9000 ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], 9001 bf_get(lpfc_wcqe_xa_xri, axri), 9002 bf_get(lpfc_wcqe_xa_status, axri), 9003 axri->parameter); 9004 9005 /* 9006 * Catch the ABTS protocol failure case. Older OCe FW releases returned 9007 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and 9008 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT. 9009 */ 9010 ext_status = axri->parameter & IOERR_PARAM_MASK; 9011 if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) && 9012 ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0))) 9013 lpfc_sli_abts_recover_port(vport, ndlp); 9014 } 9015 9016 /** 9017 * lpfc_sli_async_event_handler - ASYNC iocb handler function 9018 * @phba: Pointer to HBA context object. 9019 * @pring: Pointer to driver SLI ring object. 9020 * @iocbq: Pointer to iocb object. 9021 * 9022 * This function is called by the slow ring event handler 9023 * function when there is an ASYNC event iocb in the ring. 9024 * This function is called with no lock held. 9025 * Currently this function handles only temperature related 9026 * ASYNC events. The function decodes the temperature sensor 9027 * event message and posts events for the management applications. 9028 **/ 9029 static void 9030 lpfc_sli_async_event_handler(struct lpfc_hba * phba, 9031 struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq) 9032 { 9033 IOCB_t *icmd; 9034 uint16_t evt_code; 9035 struct temp_event temp_event_data; 9036 struct Scsi_Host *shost; 9037 uint32_t *iocb_w; 9038 9039 icmd = &iocbq->iocb; 9040 evt_code = icmd->un.asyncstat.evt_code; 9041 9042 switch (evt_code) { 9043 case ASYNC_TEMP_WARN: 9044 case ASYNC_TEMP_SAFE: 9045 temp_event_data.data = (uint32_t) icmd->ulpContext; 9046 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT; 9047 if (evt_code == ASYNC_TEMP_WARN) { 9048 temp_event_data.event_code = LPFC_THRESHOLD_TEMP; 9049 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 9050 "0347 Adapter is very hot, please take " 9051 "corrective action. temperature : %d Celsius\n", 9052 (uint32_t) icmd->ulpContext); 9053 } else { 9054 temp_event_data.event_code = LPFC_NORMAL_TEMP; 9055 lpfc_printf_log(phba, KERN_ERR, LOG_TEMP, 9056 "0340 Adapter temperature is OK now. " 9057 "temperature : %d Celsius\n", 9058 (uint32_t) icmd->ulpContext); 9059 } 9060 9061 /* Send temperature change event to applications */ 9062 shost = lpfc_shost_from_vport(phba->pport); 9063 fc_host_post_vendor_event(shost, fc_get_event_number(), 9064 sizeof(temp_event_data), (char *) &temp_event_data, 9065 LPFC_NL_VENDOR_ID); 9066 break; 9067 case ASYNC_STATUS_CN: 9068 lpfc_sli_abts_err_handler(phba, iocbq); 9069 break; 9070 default: 9071 iocb_w = (uint32_t *) icmd; 9072 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 9073 "0346 Ring %d handler: unexpected ASYNC_STATUS" 9074 " evt_code 0x%x\n" 9075 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n" 9076 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n" 9077 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n" 9078 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n", 9079 pring->ringno, icmd->un.asyncstat.evt_code, 9080 iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3], 9081 iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7], 9082 iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11], 9083 iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]); 9084 9085 break; 9086 } 9087 } 9088 9089 9090 /** 9091 * lpfc_sli_setup - SLI ring setup function 9092 * @phba: Pointer to HBA context object. 9093 * 9094 * lpfc_sli_setup sets up rings of the SLI interface with 9095 * number of iocbs per ring and iotags. This function is 9096 * called while driver attach to the HBA and before the 9097 * interrupts are enabled. So there is no need for locking. 9098 * 9099 * This function always returns 0. 9100 **/ 9101 int 9102 lpfc_sli_setup(struct lpfc_hba *phba) 9103 { 9104 int i, totiocbsize = 0; 9105 struct lpfc_sli *psli = &phba->sli; 9106 struct lpfc_sli_ring *pring; 9107 9108 psli->num_rings = MAX_SLI3_CONFIGURED_RINGS; 9109 if (phba->sli_rev == LPFC_SLI_REV4) 9110 psli->num_rings += phba->cfg_fcp_io_channel; 9111 psli->sli_flag = 0; 9112 psli->fcp_ring = LPFC_FCP_RING; 9113 psli->next_ring = LPFC_FCP_NEXT_RING; 9114 psli->extra_ring = LPFC_EXTRA_RING; 9115 9116 psli->iocbq_lookup = NULL; 9117 psli->iocbq_lookup_len = 0; 9118 psli->last_iotag = 0; 9119 9120 for (i = 0; i < psli->num_rings; i++) { 9121 pring = &psli->ring[i]; 9122 switch (i) { 9123 case LPFC_FCP_RING: /* ring 0 - FCP */ 9124 /* numCiocb and numRiocb are used in config_port */ 9125 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES; 9126 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES; 9127 pring->sli.sli3.numCiocb += 9128 SLI2_IOCB_CMD_R1XTRA_ENTRIES; 9129 pring->sli.sli3.numRiocb += 9130 SLI2_IOCB_RSP_R1XTRA_ENTRIES; 9131 pring->sli.sli3.numCiocb += 9132 SLI2_IOCB_CMD_R3XTRA_ENTRIES; 9133 pring->sli.sli3.numRiocb += 9134 SLI2_IOCB_RSP_R3XTRA_ENTRIES; 9135 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9136 SLI3_IOCB_CMD_SIZE : 9137 SLI2_IOCB_CMD_SIZE; 9138 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9139 SLI3_IOCB_RSP_SIZE : 9140 SLI2_IOCB_RSP_SIZE; 9141 pring->iotag_ctr = 0; 9142 pring->iotag_max = 9143 (phba->cfg_hba_queue_depth * 2); 9144 pring->fast_iotag = pring->iotag_max; 9145 pring->num_mask = 0; 9146 break; 9147 case LPFC_EXTRA_RING: /* ring 1 - EXTRA */ 9148 /* numCiocb and numRiocb are used in config_port */ 9149 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES; 9150 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES; 9151 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9152 SLI3_IOCB_CMD_SIZE : 9153 SLI2_IOCB_CMD_SIZE; 9154 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9155 SLI3_IOCB_RSP_SIZE : 9156 SLI2_IOCB_RSP_SIZE; 9157 pring->iotag_max = phba->cfg_hba_queue_depth; 9158 pring->num_mask = 0; 9159 break; 9160 case LPFC_ELS_RING: /* ring 2 - ELS / CT */ 9161 /* numCiocb and numRiocb are used in config_port */ 9162 pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES; 9163 pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES; 9164 pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ? 9165 SLI3_IOCB_CMD_SIZE : 9166 SLI2_IOCB_CMD_SIZE; 9167 pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ? 9168 SLI3_IOCB_RSP_SIZE : 9169 SLI2_IOCB_RSP_SIZE; 9170 pring->fast_iotag = 0; 9171 pring->iotag_ctr = 0; 9172 pring->iotag_max = 4096; 9173 pring->lpfc_sli_rcv_async_status = 9174 lpfc_sli_async_event_handler; 9175 pring->num_mask = LPFC_MAX_RING_MASK; 9176 pring->prt[0].profile = 0; /* Mask 0 */ 9177 pring->prt[0].rctl = FC_RCTL_ELS_REQ; 9178 pring->prt[0].type = FC_TYPE_ELS; 9179 pring->prt[0].lpfc_sli_rcv_unsol_event = 9180 lpfc_els_unsol_event; 9181 pring->prt[1].profile = 0; /* Mask 1 */ 9182 pring->prt[1].rctl = FC_RCTL_ELS_REP; 9183 pring->prt[1].type = FC_TYPE_ELS; 9184 pring->prt[1].lpfc_sli_rcv_unsol_event = 9185 lpfc_els_unsol_event; 9186 pring->prt[2].profile = 0; /* Mask 2 */ 9187 /* NameServer Inquiry */ 9188 pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL; 9189 /* NameServer */ 9190 pring->prt[2].type = FC_TYPE_CT; 9191 pring->prt[2].lpfc_sli_rcv_unsol_event = 9192 lpfc_ct_unsol_event; 9193 pring->prt[3].profile = 0; /* Mask 3 */ 9194 /* NameServer response */ 9195 pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL; 9196 /* NameServer */ 9197 pring->prt[3].type = FC_TYPE_CT; 9198 pring->prt[3].lpfc_sli_rcv_unsol_event = 9199 lpfc_ct_unsol_event; 9200 break; 9201 } 9202 totiocbsize += (pring->sli.sli3.numCiocb * 9203 pring->sli.sli3.sizeCiocb) + 9204 (pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb); 9205 } 9206 if (totiocbsize > MAX_SLIM_IOCB_SIZE) { 9207 /* Too many cmd / rsp ring entries in SLI2 SLIM */ 9208 printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in " 9209 "SLI2 SLIM Data: x%x x%lx\n", 9210 phba->brd_no, totiocbsize, 9211 (unsigned long) MAX_SLIM_IOCB_SIZE); 9212 } 9213 if (phba->cfg_multi_ring_support == 2) 9214 lpfc_extra_ring_setup(phba); 9215 9216 return 0; 9217 } 9218 9219 /** 9220 * lpfc_sli_queue_setup - Queue initialization function 9221 * @phba: Pointer to HBA context object. 9222 * 9223 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each 9224 * ring. This function also initializes ring indices of each ring. 9225 * This function is called during the initialization of the SLI 9226 * interface of an HBA. 9227 * This function is called with no lock held and always returns 9228 * 1. 9229 **/ 9230 int 9231 lpfc_sli_queue_setup(struct lpfc_hba *phba) 9232 { 9233 struct lpfc_sli *psli; 9234 struct lpfc_sli_ring *pring; 9235 int i; 9236 9237 psli = &phba->sli; 9238 spin_lock_irq(&phba->hbalock); 9239 INIT_LIST_HEAD(&psli->mboxq); 9240 INIT_LIST_HEAD(&psli->mboxq_cmpl); 9241 /* Initialize list headers for txq and txcmplq as double linked lists */ 9242 for (i = 0; i < psli->num_rings; i++) { 9243 pring = &psli->ring[i]; 9244 pring->ringno = i; 9245 pring->sli.sli3.next_cmdidx = 0; 9246 pring->sli.sli3.local_getidx = 0; 9247 pring->sli.sli3.cmdidx = 0; 9248 pring->flag = 0; 9249 INIT_LIST_HEAD(&pring->txq); 9250 INIT_LIST_HEAD(&pring->txcmplq); 9251 INIT_LIST_HEAD(&pring->iocb_continueq); 9252 INIT_LIST_HEAD(&pring->iocb_continue_saveq); 9253 INIT_LIST_HEAD(&pring->postbufq); 9254 spin_lock_init(&pring->ring_lock); 9255 } 9256 spin_unlock_irq(&phba->hbalock); 9257 return 1; 9258 } 9259 9260 /** 9261 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system 9262 * @phba: Pointer to HBA context object. 9263 * 9264 * This routine flushes the mailbox command subsystem. It will unconditionally 9265 * flush all the mailbox commands in the three possible stages in the mailbox 9266 * command sub-system: pending mailbox command queue; the outstanding mailbox 9267 * command; and completed mailbox command queue. It is caller's responsibility 9268 * to make sure that the driver is in the proper state to flush the mailbox 9269 * command sub-system. Namely, the posting of mailbox commands into the 9270 * pending mailbox command queue from the various clients must be stopped; 9271 * either the HBA is in a state that it will never works on the outstanding 9272 * mailbox command (such as in EEH or ERATT conditions) or the outstanding 9273 * mailbox command has been completed. 9274 **/ 9275 static void 9276 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) 9277 { 9278 LIST_HEAD(completions); 9279 struct lpfc_sli *psli = &phba->sli; 9280 LPFC_MBOXQ_t *pmb; 9281 unsigned long iflag; 9282 9283 /* Flush all the mailbox commands in the mbox system */ 9284 spin_lock_irqsave(&phba->hbalock, iflag); 9285 /* The pending mailbox command queue */ 9286 list_splice_init(&phba->sli.mboxq, &completions); 9287 /* The outstanding active mailbox command */ 9288 if (psli->mbox_active) { 9289 list_add_tail(&psli->mbox_active->list, &completions); 9290 psli->mbox_active = NULL; 9291 psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 9292 } 9293 /* The completed mailbox command queue */ 9294 list_splice_init(&phba->sli.mboxq_cmpl, &completions); 9295 spin_unlock_irqrestore(&phba->hbalock, iflag); 9296 9297 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ 9298 while (!list_empty(&completions)) { 9299 list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); 9300 pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; 9301 if (pmb->mbox_cmpl) 9302 pmb->mbox_cmpl(phba, pmb); 9303 } 9304 } 9305 9306 /** 9307 * lpfc_sli_host_down - Vport cleanup function 9308 * @vport: Pointer to virtual port object. 9309 * 9310 * lpfc_sli_host_down is called to clean up the resources 9311 * associated with a vport before destroying virtual 9312 * port data structures. 9313 * This function does following operations: 9314 * - Free discovery resources associated with this virtual 9315 * port. 9316 * - Free iocbs associated with this virtual port in 9317 * the txq. 9318 * - Send abort for all iocb commands associated with this 9319 * vport in txcmplq. 9320 * 9321 * This function is called with no lock held and always returns 1. 9322 **/ 9323 int 9324 lpfc_sli_host_down(struct lpfc_vport *vport) 9325 { 9326 LIST_HEAD(completions); 9327 struct lpfc_hba *phba = vport->phba; 9328 struct lpfc_sli *psli = &phba->sli; 9329 struct lpfc_sli_ring *pring; 9330 struct lpfc_iocbq *iocb, *next_iocb; 9331 int i; 9332 unsigned long flags = 0; 9333 uint16_t prev_pring_flag; 9334 9335 lpfc_cleanup_discovery_resources(vport); 9336 9337 spin_lock_irqsave(&phba->hbalock, flags); 9338 for (i = 0; i < psli->num_rings; i++) { 9339 pring = &psli->ring[i]; 9340 prev_pring_flag = pring->flag; 9341 /* Only slow rings */ 9342 if (pring->ringno == LPFC_ELS_RING) { 9343 pring->flag |= LPFC_DEFERRED_RING_EVENT; 9344 /* Set the lpfc data pending flag */ 9345 set_bit(LPFC_DATA_READY, &phba->data_flags); 9346 } 9347 /* 9348 * Error everything on the txq since these iocbs have not been 9349 * given to the FW yet. 9350 */ 9351 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 9352 if (iocb->vport != vport) 9353 continue; 9354 list_move_tail(&iocb->list, &completions); 9355 } 9356 9357 /* Next issue ABTS for everything on the txcmplq */ 9358 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, 9359 list) { 9360 if (iocb->vport != vport) 9361 continue; 9362 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 9363 } 9364 9365 pring->flag = prev_pring_flag; 9366 } 9367 9368 spin_unlock_irqrestore(&phba->hbalock, flags); 9369 9370 /* Cancel all the IOCBs from the completions list */ 9371 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9372 IOERR_SLI_DOWN); 9373 return 1; 9374 } 9375 9376 /** 9377 * lpfc_sli_hba_down - Resource cleanup function for the HBA 9378 * @phba: Pointer to HBA context object. 9379 * 9380 * This function cleans up all iocb, buffers, mailbox commands 9381 * while shutting down the HBA. This function is called with no 9382 * lock held and always returns 1. 9383 * This function does the following to cleanup driver resources: 9384 * - Free discovery resources for each virtual port 9385 * - Cleanup any pending fabric iocbs 9386 * - Iterate through the iocb txq and free each entry 9387 * in the list. 9388 * - Free up any buffer posted to the HBA 9389 * - Free mailbox commands in the mailbox queue. 9390 **/ 9391 int 9392 lpfc_sli_hba_down(struct lpfc_hba *phba) 9393 { 9394 LIST_HEAD(completions); 9395 struct lpfc_sli *psli = &phba->sli; 9396 struct lpfc_sli_ring *pring; 9397 struct lpfc_dmabuf *buf_ptr; 9398 unsigned long flags = 0; 9399 int i; 9400 9401 /* Shutdown the mailbox command sub-system */ 9402 lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT); 9403 9404 lpfc_hba_down_prep(phba); 9405 9406 lpfc_fabric_abort_hba(phba); 9407 9408 spin_lock_irqsave(&phba->hbalock, flags); 9409 for (i = 0; i < psli->num_rings; i++) { 9410 pring = &psli->ring[i]; 9411 /* Only slow rings */ 9412 if (pring->ringno == LPFC_ELS_RING) { 9413 pring->flag |= LPFC_DEFERRED_RING_EVENT; 9414 /* Set the lpfc data pending flag */ 9415 set_bit(LPFC_DATA_READY, &phba->data_flags); 9416 } 9417 9418 /* 9419 * Error everything on the txq since these iocbs have not been 9420 * given to the FW yet. 9421 */ 9422 list_splice_init(&pring->txq, &completions); 9423 } 9424 spin_unlock_irqrestore(&phba->hbalock, flags); 9425 9426 /* Cancel all the IOCBs from the completions list */ 9427 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 9428 IOERR_SLI_DOWN); 9429 9430 spin_lock_irqsave(&phba->hbalock, flags); 9431 list_splice_init(&phba->elsbuf, &completions); 9432 phba->elsbuf_cnt = 0; 9433 phba->elsbuf_prev_cnt = 0; 9434 spin_unlock_irqrestore(&phba->hbalock, flags); 9435 9436 while (!list_empty(&completions)) { 9437 list_remove_head(&completions, buf_ptr, 9438 struct lpfc_dmabuf, list); 9439 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); 9440 kfree(buf_ptr); 9441 } 9442 9443 /* Return any active mbox cmds */ 9444 del_timer_sync(&psli->mbox_tmo); 9445 9446 spin_lock_irqsave(&phba->pport->work_port_lock, flags); 9447 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 9448 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); 9449 9450 return 1; 9451 } 9452 9453 /** 9454 * lpfc_sli_pcimem_bcopy - SLI memory copy function 9455 * @srcp: Source memory pointer. 9456 * @destp: Destination memory pointer. 9457 * @cnt: Number of words required to be copied. 9458 * 9459 * This function is used for copying data between driver memory 9460 * and the SLI memory. This function also changes the endianness 9461 * of each word if native endianness is different from SLI 9462 * endianness. This function can be called with or without 9463 * lock. 9464 **/ 9465 void 9466 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt) 9467 { 9468 uint32_t *src = srcp; 9469 uint32_t *dest = destp; 9470 uint32_t ldata; 9471 int i; 9472 9473 for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) { 9474 ldata = *src; 9475 ldata = le32_to_cpu(ldata); 9476 *dest = ldata; 9477 src++; 9478 dest++; 9479 } 9480 } 9481 9482 9483 /** 9484 * lpfc_sli_bemem_bcopy - SLI memory copy function 9485 * @srcp: Source memory pointer. 9486 * @destp: Destination memory pointer. 9487 * @cnt: Number of words required to be copied. 9488 * 9489 * This function is used for copying data between a data structure 9490 * with big endian representation to local endianness. 9491 * This function can be called with or without lock. 9492 **/ 9493 void 9494 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt) 9495 { 9496 uint32_t *src = srcp; 9497 uint32_t *dest = destp; 9498 uint32_t ldata; 9499 int i; 9500 9501 for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) { 9502 ldata = *src; 9503 ldata = be32_to_cpu(ldata); 9504 *dest = ldata; 9505 src++; 9506 dest++; 9507 } 9508 } 9509 9510 /** 9511 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq 9512 * @phba: Pointer to HBA context object. 9513 * @pring: Pointer to driver SLI ring object. 9514 * @mp: Pointer to driver buffer object. 9515 * 9516 * This function is called with no lock held. 9517 * It always return zero after adding the buffer to the postbufq 9518 * buffer list. 9519 **/ 9520 int 9521 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9522 struct lpfc_dmabuf *mp) 9523 { 9524 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up 9525 later */ 9526 spin_lock_irq(&phba->hbalock); 9527 list_add_tail(&mp->list, &pring->postbufq); 9528 pring->postbufq_cnt++; 9529 spin_unlock_irq(&phba->hbalock); 9530 return 0; 9531 } 9532 9533 /** 9534 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer 9535 * @phba: Pointer to HBA context object. 9536 * 9537 * When HBQ is enabled, buffers are searched based on tags. This function 9538 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The 9539 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag 9540 * does not conflict with tags of buffer posted for unsolicited events. 9541 * The function returns the allocated tag. The function is called with 9542 * no locks held. 9543 **/ 9544 uint32_t 9545 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba) 9546 { 9547 spin_lock_irq(&phba->hbalock); 9548 phba->buffer_tag_count++; 9549 /* 9550 * Always set the QUE_BUFTAG_BIT to distiguish between 9551 * a tag assigned by HBQ. 9552 */ 9553 phba->buffer_tag_count |= QUE_BUFTAG_BIT; 9554 spin_unlock_irq(&phba->hbalock); 9555 return phba->buffer_tag_count; 9556 } 9557 9558 /** 9559 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag 9560 * @phba: Pointer to HBA context object. 9561 * @pring: Pointer to driver SLI ring object. 9562 * @tag: Buffer tag. 9563 * 9564 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq 9565 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX 9566 * iocb is posted to the response ring with the tag of the buffer. 9567 * This function searches the pring->postbufq list using the tag 9568 * to find buffer associated with CMD_IOCB_RET_XRI64_CX 9569 * iocb. If the buffer is found then lpfc_dmabuf object of the 9570 * buffer is returned to the caller else NULL is returned. 9571 * This function is called with no lock held. 9572 **/ 9573 struct lpfc_dmabuf * 9574 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9575 uint32_t tag) 9576 { 9577 struct lpfc_dmabuf *mp, *next_mp; 9578 struct list_head *slp = &pring->postbufq; 9579 9580 /* Search postbufq, from the beginning, looking for a match on tag */ 9581 spin_lock_irq(&phba->hbalock); 9582 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 9583 if (mp->buffer_tag == tag) { 9584 list_del_init(&mp->list); 9585 pring->postbufq_cnt--; 9586 spin_unlock_irq(&phba->hbalock); 9587 return mp; 9588 } 9589 } 9590 9591 spin_unlock_irq(&phba->hbalock); 9592 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9593 "0402 Cannot find virtual addr for buffer tag on " 9594 "ring %d Data x%lx x%p x%p x%x\n", 9595 pring->ringno, (unsigned long) tag, 9596 slp->next, slp->prev, pring->postbufq_cnt); 9597 9598 return NULL; 9599 } 9600 9601 /** 9602 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events 9603 * @phba: Pointer to HBA context object. 9604 * @pring: Pointer to driver SLI ring object. 9605 * @phys: DMA address of the buffer. 9606 * 9607 * This function searches the buffer list using the dma_address 9608 * of unsolicited event to find the driver's lpfc_dmabuf object 9609 * corresponding to the dma_address. The function returns the 9610 * lpfc_dmabuf object if a buffer is found else it returns NULL. 9611 * This function is called by the ct and els unsolicited event 9612 * handlers to get the buffer associated with the unsolicited 9613 * event. 9614 * 9615 * This function is called with no lock held. 9616 **/ 9617 struct lpfc_dmabuf * 9618 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9619 dma_addr_t phys) 9620 { 9621 struct lpfc_dmabuf *mp, *next_mp; 9622 struct list_head *slp = &pring->postbufq; 9623 9624 /* Search postbufq, from the beginning, looking for a match on phys */ 9625 spin_lock_irq(&phba->hbalock); 9626 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { 9627 if (mp->phys == phys) { 9628 list_del_init(&mp->list); 9629 pring->postbufq_cnt--; 9630 spin_unlock_irq(&phba->hbalock); 9631 return mp; 9632 } 9633 } 9634 9635 spin_unlock_irq(&phba->hbalock); 9636 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 9637 "0410 Cannot find virtual addr for mapped buf on " 9638 "ring %d Data x%llx x%p x%p x%x\n", 9639 pring->ringno, (unsigned long long)phys, 9640 slp->next, slp->prev, pring->postbufq_cnt); 9641 return NULL; 9642 } 9643 9644 /** 9645 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs 9646 * @phba: Pointer to HBA context object. 9647 * @cmdiocb: Pointer to driver command iocb object. 9648 * @rspiocb: Pointer to driver response iocb object. 9649 * 9650 * This function is the completion handler for the abort iocbs for 9651 * ELS commands. This function is called from the ELS ring event 9652 * handler with no lock held. This function frees memory resources 9653 * associated with the abort iocb. 9654 **/ 9655 static void 9656 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9657 struct lpfc_iocbq *rspiocb) 9658 { 9659 IOCB_t *irsp = &rspiocb->iocb; 9660 uint16_t abort_iotag, abort_context; 9661 struct lpfc_iocbq *abort_iocb = NULL; 9662 9663 if (irsp->ulpStatus) { 9664 9665 /* 9666 * Assume that the port already completed and returned, or 9667 * will return the iocb. Just Log the message. 9668 */ 9669 abort_context = cmdiocb->iocb.un.acxri.abortContextTag; 9670 abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag; 9671 9672 spin_lock_irq(&phba->hbalock); 9673 if (phba->sli_rev < LPFC_SLI_REV4) { 9674 if (abort_iotag != 0 && 9675 abort_iotag <= phba->sli.last_iotag) 9676 abort_iocb = 9677 phba->sli.iocbq_lookup[abort_iotag]; 9678 } else 9679 /* For sli4 the abort_tag is the XRI, 9680 * so the abort routine puts the iotag of the iocb 9681 * being aborted in the context field of the abort 9682 * IOCB. 9683 */ 9684 abort_iocb = phba->sli.iocbq_lookup[abort_context]; 9685 9686 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, 9687 "0327 Cannot abort els iocb %p " 9688 "with tag %x context %x, abort status %x, " 9689 "abort code %x\n", 9690 abort_iocb, abort_iotag, abort_context, 9691 irsp->ulpStatus, irsp->un.ulpWord[4]); 9692 9693 spin_unlock_irq(&phba->hbalock); 9694 } 9695 lpfc_sli_release_iocbq(phba, cmdiocb); 9696 return; 9697 } 9698 9699 /** 9700 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command 9701 * @phba: Pointer to HBA context object. 9702 * @cmdiocb: Pointer to driver command iocb object. 9703 * @rspiocb: Pointer to driver response iocb object. 9704 * 9705 * The function is called from SLI ring event handler with no 9706 * lock held. This function is the completion handler for ELS commands 9707 * which are aborted. The function frees memory resources used for 9708 * the aborted ELS commands. 9709 **/ 9710 static void 9711 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 9712 struct lpfc_iocbq *rspiocb) 9713 { 9714 IOCB_t *irsp = &rspiocb->iocb; 9715 9716 /* ELS cmd tag <ulpIoTag> completes */ 9717 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 9718 "0139 Ignoring ELS cmd tag x%x completion Data: " 9719 "x%x x%x x%x\n", 9720 irsp->ulpIoTag, irsp->ulpStatus, 9721 irsp->un.ulpWord[4], irsp->ulpTimeout); 9722 if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) 9723 lpfc_ct_free_iocb(phba, cmdiocb); 9724 else 9725 lpfc_els_free_iocb(phba, cmdiocb); 9726 return; 9727 } 9728 9729 /** 9730 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb 9731 * @phba: Pointer to HBA context object. 9732 * @pring: Pointer to driver SLI ring object. 9733 * @cmdiocb: Pointer to driver command iocb object. 9734 * 9735 * This function issues an abort iocb for the provided command iocb down to 9736 * the port. Other than the case the outstanding command iocb is an abort 9737 * request, this function issues abort out unconditionally. This function is 9738 * called with hbalock held. The function returns 0 when it fails due to 9739 * memory allocation failure or when the command iocb is an abort request. 9740 **/ 9741 static int 9742 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9743 struct lpfc_iocbq *cmdiocb) 9744 { 9745 struct lpfc_vport *vport = cmdiocb->vport; 9746 struct lpfc_iocbq *abtsiocbp; 9747 IOCB_t *icmd = NULL; 9748 IOCB_t *iabt = NULL; 9749 int retval; 9750 unsigned long iflags; 9751 9752 /* 9753 * There are certain command types we don't want to abort. And we 9754 * don't want to abort commands that are already in the process of 9755 * being aborted. 9756 */ 9757 icmd = &cmdiocb->iocb; 9758 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 9759 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 9760 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 9761 return 0; 9762 9763 /* issue ABTS for this IOCB based on iotag */ 9764 abtsiocbp = __lpfc_sli_get_iocbq(phba); 9765 if (abtsiocbp == NULL) 9766 return 0; 9767 9768 /* This signals the response to set the correct status 9769 * before calling the completion handler 9770 */ 9771 cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED; 9772 9773 iabt = &abtsiocbp->iocb; 9774 iabt->un.acxri.abortType = ABORT_TYPE_ABTS; 9775 iabt->un.acxri.abortContextTag = icmd->ulpContext; 9776 if (phba->sli_rev == LPFC_SLI_REV4) { 9777 iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; 9778 iabt->un.acxri.abortContextTag = cmdiocb->iotag; 9779 } 9780 else 9781 iabt->un.acxri.abortIoTag = icmd->ulpIoTag; 9782 iabt->ulpLe = 1; 9783 iabt->ulpClass = icmd->ulpClass; 9784 9785 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 9786 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; 9787 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 9788 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 9789 9790 if (phba->link_state >= LPFC_LINK_UP) 9791 iabt->ulpCommand = CMD_ABORT_XRI_CN; 9792 else 9793 iabt->ulpCommand = CMD_CLOSE_XRI_CN; 9794 9795 abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl; 9796 9797 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 9798 "0339 Abort xri x%x, original iotag x%x, " 9799 "abort cmd iotag x%x\n", 9800 iabt->un.acxri.abortIoTag, 9801 iabt->un.acxri.abortContextTag, 9802 abtsiocbp->iotag); 9803 9804 if (phba->sli_rev == LPFC_SLI_REV4) { 9805 /* Note: both hbalock and ring_lock need to be set here */ 9806 spin_lock_irqsave(&pring->ring_lock, iflags); 9807 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 9808 abtsiocbp, 0); 9809 spin_unlock_irqrestore(&pring->ring_lock, iflags); 9810 } else { 9811 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 9812 abtsiocbp, 0); 9813 } 9814 9815 if (retval) 9816 __lpfc_sli_release_iocbq(phba, abtsiocbp); 9817 9818 /* 9819 * Caller to this routine should check for IOCB_ERROR 9820 * and handle it properly. This routine no longer removes 9821 * iocb off txcmplq and call compl in case of IOCB_ERROR. 9822 */ 9823 return retval; 9824 } 9825 9826 /** 9827 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb 9828 * @phba: Pointer to HBA context object. 9829 * @pring: Pointer to driver SLI ring object. 9830 * @cmdiocb: Pointer to driver command iocb object. 9831 * 9832 * This function issues an abort iocb for the provided command iocb. In case 9833 * of unloading, the abort iocb will not be issued to commands on the ELS 9834 * ring. Instead, the callback function shall be changed to those commands 9835 * so that nothing happens when them finishes. This function is called with 9836 * hbalock held. The function returns 0 when the command iocb is an abort 9837 * request. 9838 **/ 9839 int 9840 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 9841 struct lpfc_iocbq *cmdiocb) 9842 { 9843 struct lpfc_vport *vport = cmdiocb->vport; 9844 int retval = IOCB_ERROR; 9845 IOCB_t *icmd = NULL; 9846 9847 /* 9848 * There are certain command types we don't want to abort. And we 9849 * don't want to abort commands that are already in the process of 9850 * being aborted. 9851 */ 9852 icmd = &cmdiocb->iocb; 9853 if (icmd->ulpCommand == CMD_ABORT_XRI_CN || 9854 icmd->ulpCommand == CMD_CLOSE_XRI_CN || 9855 (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) 9856 return 0; 9857 9858 /* 9859 * If we're unloading, don't abort iocb on the ELS ring, but change 9860 * the callback so that nothing happens when it finishes. 9861 */ 9862 if ((vport->load_flag & FC_UNLOADING) && 9863 (pring->ringno == LPFC_ELS_RING)) { 9864 if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) 9865 cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; 9866 else 9867 cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; 9868 goto abort_iotag_exit; 9869 } 9870 9871 /* Now, we try to issue the abort to the cmdiocb out */ 9872 retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); 9873 9874 abort_iotag_exit: 9875 /* 9876 * Caller to this routine should check for IOCB_ERROR 9877 * and handle it properly. This routine no longer removes 9878 * iocb off txcmplq and call compl in case of IOCB_ERROR. 9879 */ 9880 return retval; 9881 } 9882 9883 /** 9884 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. 9885 * @phba: pointer to lpfc HBA data structure. 9886 * 9887 * This routine will abort all pending and outstanding iocbs to an HBA. 9888 **/ 9889 void 9890 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) 9891 { 9892 struct lpfc_sli *psli = &phba->sli; 9893 struct lpfc_sli_ring *pring; 9894 int i; 9895 9896 for (i = 0; i < psli->num_rings; i++) { 9897 pring = &psli->ring[i]; 9898 lpfc_sli_abort_iocb_ring(phba, pring); 9899 } 9900 } 9901 9902 /** 9903 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN 9904 * @iocbq: Pointer to driver iocb object. 9905 * @vport: Pointer to driver virtual port object. 9906 * @tgt_id: SCSI ID of the target. 9907 * @lun_id: LUN ID of the scsi device. 9908 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST 9909 * 9910 * This function acts as an iocb filter for functions which abort or count 9911 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return 9912 * 0 if the filtering criteria is met for the given iocb and will return 9913 * 1 if the filtering criteria is not met. 9914 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the 9915 * given iocb is for the SCSI device specified by vport, tgt_id and 9916 * lun_id parameter. 9917 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the 9918 * given iocb is for the SCSI target specified by vport and tgt_id 9919 * parameters. 9920 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the 9921 * given iocb is for the SCSI host associated with the given vport. 9922 * This function is called with no locks held. 9923 **/ 9924 static int 9925 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, 9926 uint16_t tgt_id, uint64_t lun_id, 9927 lpfc_ctx_cmd ctx_cmd) 9928 { 9929 struct lpfc_scsi_buf *lpfc_cmd; 9930 int rc = 1; 9931 9932 if (!(iocbq->iocb_flag & LPFC_IO_FCP)) 9933 return rc; 9934 9935 if (iocbq->vport != vport) 9936 return rc; 9937 9938 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); 9939 9940 if (lpfc_cmd->pCmd == NULL) 9941 return rc; 9942 9943 switch (ctx_cmd) { 9944 case LPFC_CTX_LUN: 9945 if ((lpfc_cmd->rdata->pnode) && 9946 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && 9947 (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) 9948 rc = 0; 9949 break; 9950 case LPFC_CTX_TGT: 9951 if ((lpfc_cmd->rdata->pnode) && 9952 (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) 9953 rc = 0; 9954 break; 9955 case LPFC_CTX_HOST: 9956 rc = 0; 9957 break; 9958 default: 9959 printk(KERN_ERR "%s: Unknown context cmd type, value %d\n", 9960 __func__, ctx_cmd); 9961 break; 9962 } 9963 9964 return rc; 9965 } 9966 9967 /** 9968 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending 9969 * @vport: Pointer to virtual port. 9970 * @tgt_id: SCSI ID of the target. 9971 * @lun_id: LUN ID of the scsi device. 9972 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 9973 * 9974 * This function returns number of FCP commands pending for the vport. 9975 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP 9976 * commands pending on the vport associated with SCSI device specified 9977 * by tgt_id and lun_id parameters. 9978 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP 9979 * commands pending on the vport associated with SCSI target specified 9980 * by tgt_id parameter. 9981 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP 9982 * commands pending on the vport. 9983 * This function returns the number of iocbs which satisfy the filter. 9984 * This function is called without any lock held. 9985 **/ 9986 int 9987 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id, 9988 lpfc_ctx_cmd ctx_cmd) 9989 { 9990 struct lpfc_hba *phba = vport->phba; 9991 struct lpfc_iocbq *iocbq; 9992 int sum, i; 9993 9994 for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) { 9995 iocbq = phba->sli.iocbq_lookup[i]; 9996 9997 if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id, 9998 ctx_cmd) == 0) 9999 sum++; 10000 } 10001 10002 return sum; 10003 } 10004 10005 /** 10006 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs 10007 * @phba: Pointer to HBA context object 10008 * @cmdiocb: Pointer to command iocb object. 10009 * @rspiocb: Pointer to response iocb object. 10010 * 10011 * This function is called when an aborted FCP iocb completes. This 10012 * function is called by the ring event handler with no lock held. 10013 * This function frees the iocb. 10014 **/ 10015 void 10016 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, 10017 struct lpfc_iocbq *rspiocb) 10018 { 10019 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10020 "3096 ABORT_XRI_CN completing on rpi x%x " 10021 "original iotag x%x, abort cmd iotag x%x " 10022 "status 0x%x, reason 0x%x\n", 10023 cmdiocb->iocb.un.acxri.abortContextTag, 10024 cmdiocb->iocb.un.acxri.abortIoTag, 10025 cmdiocb->iotag, rspiocb->iocb.ulpStatus, 10026 rspiocb->iocb.un.ulpWord[4]); 10027 lpfc_sli_release_iocbq(phba, cmdiocb); 10028 return; 10029 } 10030 10031 /** 10032 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN 10033 * @vport: Pointer to virtual port. 10034 * @pring: Pointer to driver SLI ring object. 10035 * @tgt_id: SCSI ID of the target. 10036 * @lun_id: LUN ID of the scsi device. 10037 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 10038 * 10039 * This function sends an abort command for every SCSI command 10040 * associated with the given virtual port pending on the ring 10041 * filtered by lpfc_sli_validate_fcp_iocb function. 10042 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the 10043 * FCP iocbs associated with lun specified by tgt_id and lun_id 10044 * parameters 10045 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the 10046 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 10047 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all 10048 * FCP iocbs associated with virtual port. 10049 * This function returns number of iocbs it failed to abort. 10050 * This function is called with no locks held. 10051 **/ 10052 int 10053 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 10054 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd) 10055 { 10056 struct lpfc_hba *phba = vport->phba; 10057 struct lpfc_iocbq *iocbq; 10058 struct lpfc_iocbq *abtsiocb; 10059 IOCB_t *cmd = NULL; 10060 int errcnt = 0, ret_val = 0; 10061 int i; 10062 10063 for (i = 1; i <= phba->sli.last_iotag; i++) { 10064 iocbq = phba->sli.iocbq_lookup[i]; 10065 10066 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 10067 abort_cmd) != 0) 10068 continue; 10069 10070 /* 10071 * If the iocbq is already being aborted, don't take a second 10072 * action, but do count it. 10073 */ 10074 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 10075 continue; 10076 10077 /* issue ABTS for this IOCB based on iotag */ 10078 abtsiocb = lpfc_sli_get_iocbq(phba); 10079 if (abtsiocb == NULL) { 10080 errcnt++; 10081 continue; 10082 } 10083 10084 /* indicate the IO is being aborted by the driver. */ 10085 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 10086 10087 cmd = &iocbq->iocb; 10088 abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 10089 abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; 10090 if (phba->sli_rev == LPFC_SLI_REV4) 10091 abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; 10092 else 10093 abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; 10094 abtsiocb->iocb.ulpLe = 1; 10095 abtsiocb->iocb.ulpClass = cmd->ulpClass; 10096 abtsiocb->vport = vport; 10097 10098 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 10099 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; 10100 if (iocbq->iocb_flag & LPFC_IO_FCP) 10101 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 10102 10103 if (lpfc_is_link_up(phba)) 10104 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 10105 else 10106 abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 10107 10108 /* Setup callback routine and issue the command. */ 10109 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 10110 ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, 10111 abtsiocb, 0); 10112 if (ret_val == IOCB_ERROR) { 10113 lpfc_sli_release_iocbq(phba, abtsiocb); 10114 errcnt++; 10115 continue; 10116 } 10117 } 10118 10119 return errcnt; 10120 } 10121 10122 /** 10123 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN 10124 * @vport: Pointer to virtual port. 10125 * @pring: Pointer to driver SLI ring object. 10126 * @tgt_id: SCSI ID of the target. 10127 * @lun_id: LUN ID of the scsi device. 10128 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. 10129 * 10130 * This function sends an abort command for every SCSI command 10131 * associated with the given virtual port pending on the ring 10132 * filtered by lpfc_sli_validate_fcp_iocb function. 10133 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the 10134 * FCP iocbs associated with lun specified by tgt_id and lun_id 10135 * parameters 10136 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the 10137 * FCP iocbs associated with SCSI target specified by tgt_id parameter. 10138 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all 10139 * FCP iocbs associated with virtual port. 10140 * This function returns number of iocbs it aborted . 10141 * This function is called with no locks held right after a taskmgmt 10142 * command is sent. 10143 **/ 10144 int 10145 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, 10146 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 10147 { 10148 struct lpfc_hba *phba = vport->phba; 10149 struct lpfc_iocbq *abtsiocbq; 10150 struct lpfc_iocbq *iocbq; 10151 IOCB_t *icmd; 10152 int sum, i, ret_val; 10153 unsigned long iflags; 10154 struct lpfc_sli_ring *pring_s4; 10155 uint32_t ring_number; 10156 10157 spin_lock_irq(&phba->hbalock); 10158 10159 /* all I/Os are in process of being flushed */ 10160 if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { 10161 spin_unlock_irq(&phba->hbalock); 10162 return 0; 10163 } 10164 sum = 0; 10165 10166 for (i = 1; i <= phba->sli.last_iotag; i++) { 10167 iocbq = phba->sli.iocbq_lookup[i]; 10168 10169 if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, 10170 cmd) != 0) 10171 continue; 10172 10173 /* 10174 * If the iocbq is already being aborted, don't take a second 10175 * action, but do count it. 10176 */ 10177 if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) 10178 continue; 10179 10180 /* issue ABTS for this IOCB based on iotag */ 10181 abtsiocbq = __lpfc_sli_get_iocbq(phba); 10182 if (abtsiocbq == NULL) 10183 continue; 10184 10185 icmd = &iocbq->iocb; 10186 abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; 10187 abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; 10188 if (phba->sli_rev == LPFC_SLI_REV4) 10189 abtsiocbq->iocb.un.acxri.abortIoTag = 10190 iocbq->sli4_xritag; 10191 else 10192 abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; 10193 abtsiocbq->iocb.ulpLe = 1; 10194 abtsiocbq->iocb.ulpClass = icmd->ulpClass; 10195 abtsiocbq->vport = vport; 10196 10197 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 10198 abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx; 10199 if (iocbq->iocb_flag & LPFC_IO_FCP) 10200 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 10201 10202 if (lpfc_is_link_up(phba)) 10203 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; 10204 else 10205 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 10206 10207 /* Setup callback routine and issue the command. */ 10208 abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; 10209 10210 /* 10211 * Indicate the IO is being aborted by the driver and set 10212 * the caller's flag into the aborted IO. 10213 */ 10214 iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; 10215 10216 if (phba->sli_rev == LPFC_SLI_REV4) { 10217 ring_number = MAX_SLI3_CONFIGURED_RINGS + 10218 iocbq->fcp_wqidx; 10219 pring_s4 = &phba->sli.ring[ring_number]; 10220 /* Note: both hbalock and ring_lock must be set here */ 10221 spin_lock_irqsave(&pring_s4->ring_lock, iflags); 10222 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, 10223 abtsiocbq, 0); 10224 spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); 10225 } else { 10226 ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, 10227 abtsiocbq, 0); 10228 } 10229 10230 10231 if (ret_val == IOCB_ERROR) 10232 __lpfc_sli_release_iocbq(phba, abtsiocbq); 10233 else 10234 sum++; 10235 } 10236 spin_unlock_irq(&phba->hbalock); 10237 return sum; 10238 } 10239 10240 /** 10241 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler 10242 * @phba: Pointer to HBA context object. 10243 * @cmdiocbq: Pointer to command iocb. 10244 * @rspiocbq: Pointer to response iocb. 10245 * 10246 * This function is the completion handler for iocbs issued using 10247 * lpfc_sli_issue_iocb_wait function. This function is called by the 10248 * ring event handler function without any lock held. This function 10249 * can be called from both worker thread context and interrupt 10250 * context. This function also can be called from other thread which 10251 * cleans up the SLI layer objects. 10252 * This function copy the contents of the response iocb to the 10253 * response iocb memory object provided by the caller of 10254 * lpfc_sli_issue_iocb_wait and then wakes up the thread which 10255 * sleeps for the iocb completion. 10256 **/ 10257 static void 10258 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, 10259 struct lpfc_iocbq *cmdiocbq, 10260 struct lpfc_iocbq *rspiocbq) 10261 { 10262 wait_queue_head_t *pdone_q; 10263 unsigned long iflags; 10264 struct lpfc_scsi_buf *lpfc_cmd; 10265 10266 spin_lock_irqsave(&phba->hbalock, iflags); 10267 if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) { 10268 10269 /* 10270 * A time out has occurred for the iocb. If a time out 10271 * completion handler has been supplied, call it. Otherwise, 10272 * just free the iocbq. 10273 */ 10274 10275 spin_unlock_irqrestore(&phba->hbalock, iflags); 10276 cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl; 10277 cmdiocbq->wait_iocb_cmpl = NULL; 10278 if (cmdiocbq->iocb_cmpl) 10279 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL); 10280 else 10281 lpfc_sli_release_iocbq(phba, cmdiocbq); 10282 return; 10283 } 10284 10285 cmdiocbq->iocb_flag |= LPFC_IO_WAKE; 10286 if (cmdiocbq->context2 && rspiocbq) 10287 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, 10288 &rspiocbq->iocb, sizeof(IOCB_t)); 10289 10290 /* Set the exchange busy flag for task management commands */ 10291 if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) && 10292 !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { 10293 lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf, 10294 cur_iocbq); 10295 lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; 10296 } 10297 10298 pdone_q = cmdiocbq->context_un.wait_queue; 10299 if (pdone_q) 10300 wake_up(pdone_q); 10301 spin_unlock_irqrestore(&phba->hbalock, iflags); 10302 return; 10303 } 10304 10305 /** 10306 * lpfc_chk_iocb_flg - Test IOCB flag with lock held. 10307 * @phba: Pointer to HBA context object.. 10308 * @piocbq: Pointer to command iocb. 10309 * @flag: Flag to test. 10310 * 10311 * This routine grabs the hbalock and then test the iocb_flag to 10312 * see if the passed in flag is set. 10313 * Returns: 10314 * 1 if flag is set. 10315 * 0 if flag is not set. 10316 **/ 10317 static int 10318 lpfc_chk_iocb_flg(struct lpfc_hba *phba, 10319 struct lpfc_iocbq *piocbq, uint32_t flag) 10320 { 10321 unsigned long iflags; 10322 int ret; 10323 10324 spin_lock_irqsave(&phba->hbalock, iflags); 10325 ret = piocbq->iocb_flag & flag; 10326 spin_unlock_irqrestore(&phba->hbalock, iflags); 10327 return ret; 10328 10329 } 10330 10331 /** 10332 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands 10333 * @phba: Pointer to HBA context object.. 10334 * @pring: Pointer to sli ring. 10335 * @piocb: Pointer to command iocb. 10336 * @prspiocbq: Pointer to response iocb. 10337 * @timeout: Timeout in number of seconds. 10338 * 10339 * This function issues the iocb to firmware and waits for the 10340 * iocb to complete. The iocb_cmpl field of the shall be used 10341 * to handle iocbs which time out. If the field is NULL, the 10342 * function shall free the iocbq structure. If more clean up is 10343 * needed, the caller is expected to provide a completion function 10344 * that will provide the needed clean up. If the iocb command is 10345 * not completed within timeout seconds, the function will either 10346 * free the iocbq structure (if iocb_cmpl == NULL) or execute the 10347 * completion function set in the iocb_cmpl field and then return 10348 * a status of IOCB_TIMEDOUT. The caller should not free the iocb 10349 * resources if this function returns IOCB_TIMEDOUT. 10350 * The function waits for the iocb completion using an 10351 * non-interruptible wait. 10352 * This function will sleep while waiting for iocb completion. 10353 * So, this function should not be called from any context which 10354 * does not allow sleeping. Due to the same reason, this function 10355 * cannot be called with interrupt disabled. 10356 * This function assumes that the iocb completions occur while 10357 * this function sleep. So, this function cannot be called from 10358 * the thread which process iocb completion for this ring. 10359 * This function clears the iocb_flag of the iocb object before 10360 * issuing the iocb and the iocb completion handler sets this 10361 * flag and wakes this thread when the iocb completes. 10362 * The contents of the response iocb will be copied to prspiocbq 10363 * by the completion handler when the command completes. 10364 * This function returns IOCB_SUCCESS when success. 10365 * This function is called with no lock held. 10366 **/ 10367 int 10368 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, 10369 uint32_t ring_number, 10370 struct lpfc_iocbq *piocb, 10371 struct lpfc_iocbq *prspiocbq, 10372 uint32_t timeout) 10373 { 10374 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 10375 long timeleft, timeout_req = 0; 10376 int retval = IOCB_SUCCESS; 10377 uint32_t creg_val; 10378 struct lpfc_iocbq *iocb; 10379 int txq_cnt = 0; 10380 int txcmplq_cnt = 0; 10381 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 10382 unsigned long iflags; 10383 bool iocb_completed = true; 10384 10385 /* 10386 * If the caller has provided a response iocbq buffer, then context2 10387 * is NULL or its an error. 10388 */ 10389 if (prspiocbq) { 10390 if (piocb->context2) 10391 return IOCB_ERROR; 10392 piocb->context2 = prspiocbq; 10393 } 10394 10395 piocb->wait_iocb_cmpl = piocb->iocb_cmpl; 10396 piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait; 10397 piocb->context_un.wait_queue = &done_q; 10398 piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO); 10399 10400 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 10401 if (lpfc_readl(phba->HCregaddr, &creg_val)) 10402 return IOCB_ERROR; 10403 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING); 10404 writel(creg_val, phba->HCregaddr); 10405 readl(phba->HCregaddr); /* flush */ 10406 } 10407 10408 retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 10409 SLI_IOCB_RET_IOCB); 10410 if (retval == IOCB_SUCCESS) { 10411 timeout_req = msecs_to_jiffies(timeout * 1000); 10412 timeleft = wait_event_timeout(done_q, 10413 lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), 10414 timeout_req); 10415 spin_lock_irqsave(&phba->hbalock, iflags); 10416 if (!(piocb->iocb_flag & LPFC_IO_WAKE)) { 10417 10418 /* 10419 * IOCB timed out. Inform the wake iocb wait 10420 * completion function and set local status 10421 */ 10422 10423 iocb_completed = false; 10424 piocb->iocb_flag |= LPFC_IO_WAKE_TMO; 10425 } 10426 spin_unlock_irqrestore(&phba->hbalock, iflags); 10427 if (iocb_completed) { 10428 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10429 "0331 IOCB wake signaled\n"); 10430 /* Note: we are not indicating if the IOCB has a success 10431 * status or not - that's for the caller to check. 10432 * IOCB_SUCCESS means just that the command was sent and 10433 * completed. Not that it completed successfully. 10434 * */ 10435 } else if (timeleft == 0) { 10436 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10437 "0338 IOCB wait timeout error - no " 10438 "wake response Data x%x\n", timeout); 10439 retval = IOCB_TIMEDOUT; 10440 } else { 10441 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 10442 "0330 IOCB wake NOT set, " 10443 "Data x%x x%lx\n", 10444 timeout, (timeleft / jiffies)); 10445 retval = IOCB_TIMEDOUT; 10446 } 10447 } else if (retval == IOCB_BUSY) { 10448 if (phba->cfg_log_verbose & LOG_SLI) { 10449 list_for_each_entry(iocb, &pring->txq, list) { 10450 txq_cnt++; 10451 } 10452 list_for_each_entry(iocb, &pring->txcmplq, list) { 10453 txcmplq_cnt++; 10454 } 10455 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10456 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n", 10457 phba->iocb_cnt, txq_cnt, txcmplq_cnt); 10458 } 10459 return retval; 10460 } else { 10461 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 10462 "0332 IOCB wait issue failed, Data x%x\n", 10463 retval); 10464 retval = IOCB_ERROR; 10465 } 10466 10467 if (phba->cfg_poll & DISABLE_FCP_RING_INT) { 10468 if (lpfc_readl(phba->HCregaddr, &creg_val)) 10469 return IOCB_ERROR; 10470 creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING); 10471 writel(creg_val, phba->HCregaddr); 10472 readl(phba->HCregaddr); /* flush */ 10473 } 10474 10475 if (prspiocbq) 10476 piocb->context2 = NULL; 10477 10478 piocb->context_un.wait_queue = NULL; 10479 piocb->iocb_cmpl = NULL; 10480 return retval; 10481 } 10482 10483 /** 10484 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox 10485 * @phba: Pointer to HBA context object. 10486 * @pmboxq: Pointer to driver mailbox object. 10487 * @timeout: Timeout in number of seconds. 10488 * 10489 * This function issues the mailbox to firmware and waits for the 10490 * mailbox command to complete. If the mailbox command is not 10491 * completed within timeout seconds, it returns MBX_TIMEOUT. 10492 * The function waits for the mailbox completion using an 10493 * interruptible wait. If the thread is woken up due to a 10494 * signal, MBX_TIMEOUT error is returned to the caller. Caller 10495 * should not free the mailbox resources, if this function returns 10496 * MBX_TIMEOUT. 10497 * This function will sleep while waiting for mailbox completion. 10498 * So, this function should not be called from any context which 10499 * does not allow sleeping. Due to the same reason, this function 10500 * cannot be called with interrupt disabled. 10501 * This function assumes that the mailbox completion occurs while 10502 * this function sleep. So, this function cannot be called from 10503 * the worker thread which processes mailbox completion. 10504 * This function is called in the context of HBA management 10505 * applications. 10506 * This function returns MBX_SUCCESS when successful. 10507 * This function is called with no lock held. 10508 **/ 10509 int 10510 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, 10511 uint32_t timeout) 10512 { 10513 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q); 10514 MAILBOX_t *mb = NULL; 10515 int retval; 10516 unsigned long flag; 10517 10518 /* The caller might set context1 for extended buffer */ 10519 if (pmboxq->context1) 10520 mb = (MAILBOX_t *)pmboxq->context1; 10521 10522 pmboxq->mbox_flag &= ~LPFC_MBX_WAKE; 10523 /* setup wake call as IOCB callback */ 10524 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait; 10525 /* setup context field to pass wait_queue pointer to wake function */ 10526 pmboxq->context1 = &done_q; 10527 10528 /* now issue the command */ 10529 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 10530 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 10531 wait_event_interruptible_timeout(done_q, 10532 pmboxq->mbox_flag & LPFC_MBX_WAKE, 10533 msecs_to_jiffies(timeout * 1000)); 10534 10535 spin_lock_irqsave(&phba->hbalock, flag); 10536 /* restore the possible extended buffer for free resource */ 10537 pmboxq->context1 = (uint8_t *)mb; 10538 /* 10539 * if LPFC_MBX_WAKE flag is set the mailbox is completed 10540 * else do not free the resources. 10541 */ 10542 if (pmboxq->mbox_flag & LPFC_MBX_WAKE) { 10543 retval = MBX_SUCCESS; 10544 } else { 10545 retval = MBX_TIMEOUT; 10546 pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 10547 } 10548 spin_unlock_irqrestore(&phba->hbalock, flag); 10549 } else { 10550 /* restore the possible extended buffer for free resource */ 10551 pmboxq->context1 = (uint8_t *)mb; 10552 } 10553 10554 return retval; 10555 } 10556 10557 /** 10558 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system 10559 * @phba: Pointer to HBA context. 10560 * 10561 * This function is called to shutdown the driver's mailbox sub-system. 10562 * It first marks the mailbox sub-system is in a block state to prevent 10563 * the asynchronous mailbox command from issued off the pending mailbox 10564 * command queue. If the mailbox command sub-system shutdown is due to 10565 * HBA error conditions such as EEH or ERATT, this routine shall invoke 10566 * the mailbox sub-system flush routine to forcefully bring down the 10567 * mailbox sub-system. Otherwise, if it is due to normal condition (such 10568 * as with offline or HBA function reset), this routine will wait for the 10569 * outstanding mailbox command to complete before invoking the mailbox 10570 * sub-system flush routine to gracefully bring down mailbox sub-system. 10571 **/ 10572 void 10573 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) 10574 { 10575 struct lpfc_sli *psli = &phba->sli; 10576 unsigned long timeout; 10577 10578 if (mbx_action == LPFC_MBX_NO_WAIT) { 10579 /* delay 100ms for port state */ 10580 msleep(100); 10581 lpfc_sli_mbox_sys_flush(phba); 10582 return; 10583 } 10584 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; 10585 10586 spin_lock_irq(&phba->hbalock); 10587 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 10588 10589 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 10590 /* Determine how long we might wait for the active mailbox 10591 * command to be gracefully completed by firmware. 10592 */ 10593 if (phba->sli.mbox_active) 10594 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 10595 phba->sli.mbox_active) * 10596 1000) + jiffies; 10597 spin_unlock_irq(&phba->hbalock); 10598 10599 while (phba->sli.mbox_active) { 10600 /* Check active mailbox complete status every 2ms */ 10601 msleep(2); 10602 if (time_after(jiffies, timeout)) 10603 /* Timeout, let the mailbox flush routine to 10604 * forcefully release active mailbox command 10605 */ 10606 break; 10607 } 10608 } else 10609 spin_unlock_irq(&phba->hbalock); 10610 10611 lpfc_sli_mbox_sys_flush(phba); 10612 } 10613 10614 /** 10615 * lpfc_sli_eratt_read - read sli-3 error attention events 10616 * @phba: Pointer to HBA context. 10617 * 10618 * This function is called to read the SLI3 device error attention registers 10619 * for possible error attention events. The caller must hold the hostlock 10620 * with spin_lock_irq(). 10621 * 10622 * This function returns 1 when there is Error Attention in the Host Attention 10623 * Register and returns 0 otherwise. 10624 **/ 10625 static int 10626 lpfc_sli_eratt_read(struct lpfc_hba *phba) 10627 { 10628 uint32_t ha_copy; 10629 10630 /* Read chip Host Attention (HA) register */ 10631 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10632 goto unplug_err; 10633 10634 if (ha_copy & HA_ERATT) { 10635 /* Read host status register to retrieve error event */ 10636 if (lpfc_sli_read_hs(phba)) 10637 goto unplug_err; 10638 10639 /* Check if there is a deferred error condition is active */ 10640 if ((HS_FFER1 & phba->work_hs) && 10641 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 10642 HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) { 10643 phba->hba_flag |= DEFER_ERATT; 10644 /* Clear all interrupt enable conditions */ 10645 writel(0, phba->HCregaddr); 10646 readl(phba->HCregaddr); 10647 } 10648 10649 /* Set the driver HA work bitmap */ 10650 phba->work_ha |= HA_ERATT; 10651 /* Indicate polling handles this ERATT */ 10652 phba->hba_flag |= HBA_ERATT_HANDLED; 10653 return 1; 10654 } 10655 return 0; 10656 10657 unplug_err: 10658 /* Set the driver HS work bitmap */ 10659 phba->work_hs |= UNPLUG_ERR; 10660 /* Set the driver HA work bitmap */ 10661 phba->work_ha |= HA_ERATT; 10662 /* Indicate polling handles this ERATT */ 10663 phba->hba_flag |= HBA_ERATT_HANDLED; 10664 return 1; 10665 } 10666 10667 /** 10668 * lpfc_sli4_eratt_read - read sli-4 error attention events 10669 * @phba: Pointer to HBA context. 10670 * 10671 * This function is called to read the SLI4 device error attention registers 10672 * for possible error attention events. The caller must hold the hostlock 10673 * with spin_lock_irq(). 10674 * 10675 * This function returns 1 when there is Error Attention in the Host Attention 10676 * Register and returns 0 otherwise. 10677 **/ 10678 static int 10679 lpfc_sli4_eratt_read(struct lpfc_hba *phba) 10680 { 10681 uint32_t uerr_sta_hi, uerr_sta_lo; 10682 uint32_t if_type, portsmphr; 10683 struct lpfc_register portstat_reg; 10684 10685 /* 10686 * For now, use the SLI4 device internal unrecoverable error 10687 * registers for error attention. This can be changed later. 10688 */ 10689 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10690 switch (if_type) { 10691 case LPFC_SLI_INTF_IF_TYPE_0: 10692 if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr, 10693 &uerr_sta_lo) || 10694 lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr, 10695 &uerr_sta_hi)) { 10696 phba->work_hs |= UNPLUG_ERR; 10697 phba->work_ha |= HA_ERATT; 10698 phba->hba_flag |= HBA_ERATT_HANDLED; 10699 return 1; 10700 } 10701 if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) || 10702 (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) { 10703 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10704 "1423 HBA Unrecoverable error: " 10705 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " 10706 "ue_mask_lo_reg=0x%x, " 10707 "ue_mask_hi_reg=0x%x\n", 10708 uerr_sta_lo, uerr_sta_hi, 10709 phba->sli4_hba.ue_mask_lo, 10710 phba->sli4_hba.ue_mask_hi); 10711 phba->work_status[0] = uerr_sta_lo; 10712 phba->work_status[1] = uerr_sta_hi; 10713 phba->work_ha |= HA_ERATT; 10714 phba->hba_flag |= HBA_ERATT_HANDLED; 10715 return 1; 10716 } 10717 break; 10718 case LPFC_SLI_INTF_IF_TYPE_2: 10719 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 10720 &portstat_reg.word0) || 10721 lpfc_readl(phba->sli4_hba.PSMPHRregaddr, 10722 &portsmphr)){ 10723 phba->work_hs |= UNPLUG_ERR; 10724 phba->work_ha |= HA_ERATT; 10725 phba->hba_flag |= HBA_ERATT_HANDLED; 10726 return 1; 10727 } 10728 if (bf_get(lpfc_sliport_status_err, &portstat_reg)) { 10729 phba->work_status[0] = 10730 readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 10731 phba->work_status[1] = 10732 readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 10733 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10734 "2885 Port Status Event: " 10735 "port status reg 0x%x, " 10736 "port smphr reg 0x%x, " 10737 "error 1=0x%x, error 2=0x%x\n", 10738 portstat_reg.word0, 10739 portsmphr, 10740 phba->work_status[0], 10741 phba->work_status[1]); 10742 phba->work_ha |= HA_ERATT; 10743 phba->hba_flag |= HBA_ERATT_HANDLED; 10744 return 1; 10745 } 10746 break; 10747 case LPFC_SLI_INTF_IF_TYPE_1: 10748 default: 10749 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10750 "2886 HBA Error Attention on unsupported " 10751 "if type %d.", if_type); 10752 return 1; 10753 } 10754 10755 return 0; 10756 } 10757 10758 /** 10759 * lpfc_sli_check_eratt - check error attention events 10760 * @phba: Pointer to HBA context. 10761 * 10762 * This function is called from timer soft interrupt context to check HBA's 10763 * error attention register bit for error attention events. 10764 * 10765 * This function returns 1 when there is Error Attention in the Host Attention 10766 * Register and returns 0 otherwise. 10767 **/ 10768 int 10769 lpfc_sli_check_eratt(struct lpfc_hba *phba) 10770 { 10771 uint32_t ha_copy; 10772 10773 /* If somebody is waiting to handle an eratt, don't process it 10774 * here. The brdkill function will do this. 10775 */ 10776 if (phba->link_flag & LS_IGNORE_ERATT) 10777 return 0; 10778 10779 /* Check if interrupt handler handles this ERATT */ 10780 spin_lock_irq(&phba->hbalock); 10781 if (phba->hba_flag & HBA_ERATT_HANDLED) { 10782 /* Interrupt handler has handled ERATT */ 10783 spin_unlock_irq(&phba->hbalock); 10784 return 0; 10785 } 10786 10787 /* 10788 * If there is deferred error attention, do not check for error 10789 * attention 10790 */ 10791 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10792 spin_unlock_irq(&phba->hbalock); 10793 return 0; 10794 } 10795 10796 /* If PCI channel is offline, don't process it */ 10797 if (unlikely(pci_channel_offline(phba->pcidev))) { 10798 spin_unlock_irq(&phba->hbalock); 10799 return 0; 10800 } 10801 10802 switch (phba->sli_rev) { 10803 case LPFC_SLI_REV2: 10804 case LPFC_SLI_REV3: 10805 /* Read chip Host Attention (HA) register */ 10806 ha_copy = lpfc_sli_eratt_read(phba); 10807 break; 10808 case LPFC_SLI_REV4: 10809 /* Read device Uncoverable Error (UERR) registers */ 10810 ha_copy = lpfc_sli4_eratt_read(phba); 10811 break; 10812 default: 10813 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 10814 "0299 Invalid SLI revision (%d)\n", 10815 phba->sli_rev); 10816 ha_copy = 0; 10817 break; 10818 } 10819 spin_unlock_irq(&phba->hbalock); 10820 10821 return ha_copy; 10822 } 10823 10824 /** 10825 * lpfc_intr_state_check - Check device state for interrupt handling 10826 * @phba: Pointer to HBA context. 10827 * 10828 * This inline routine checks whether a device or its PCI slot is in a state 10829 * that the interrupt should be handled. 10830 * 10831 * This function returns 0 if the device or the PCI slot is in a state that 10832 * interrupt should be handled, otherwise -EIO. 10833 */ 10834 static inline int 10835 lpfc_intr_state_check(struct lpfc_hba *phba) 10836 { 10837 /* If the pci channel is offline, ignore all the interrupts */ 10838 if (unlikely(pci_channel_offline(phba->pcidev))) 10839 return -EIO; 10840 10841 /* Update device level interrupt statistics */ 10842 phba->sli.slistat.sli_intr++; 10843 10844 /* Ignore all interrupts during initialization. */ 10845 if (unlikely(phba->link_state < LPFC_LINK_DOWN)) 10846 return -EIO; 10847 10848 return 0; 10849 } 10850 10851 /** 10852 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device 10853 * @irq: Interrupt number. 10854 * @dev_id: The device context pointer. 10855 * 10856 * This function is directly called from the PCI layer as an interrupt 10857 * service routine when device with SLI-3 interface spec is enabled with 10858 * MSI-X multi-message interrupt mode and there are slow-path events in 10859 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ 10860 * interrupt mode, this function is called as part of the device-level 10861 * interrupt handler. When the PCI slot is in error recovery or the HBA 10862 * is undergoing initialization, the interrupt handler will not process 10863 * the interrupt. The link attention and ELS ring attention events are 10864 * handled by the worker thread. The interrupt handler signals the worker 10865 * thread and returns for these events. This function is called without 10866 * any lock held. It gets the hbalock to access and update SLI data 10867 * structures. 10868 * 10869 * This function returns IRQ_HANDLED when interrupt is handled else it 10870 * returns IRQ_NONE. 10871 **/ 10872 irqreturn_t 10873 lpfc_sli_sp_intr_handler(int irq, void *dev_id) 10874 { 10875 struct lpfc_hba *phba; 10876 uint32_t ha_copy, hc_copy; 10877 uint32_t work_ha_copy; 10878 unsigned long status; 10879 unsigned long iflag; 10880 uint32_t control; 10881 10882 MAILBOX_t *mbox, *pmbox; 10883 struct lpfc_vport *vport; 10884 struct lpfc_nodelist *ndlp; 10885 struct lpfc_dmabuf *mp; 10886 LPFC_MBOXQ_t *pmb; 10887 int rc; 10888 10889 /* 10890 * Get the driver's phba structure from the dev_id and 10891 * assume the HBA is not interrupting. 10892 */ 10893 phba = (struct lpfc_hba *)dev_id; 10894 10895 if (unlikely(!phba)) 10896 return IRQ_NONE; 10897 10898 /* 10899 * Stuff needs to be attented to when this function is invoked as an 10900 * individual interrupt handler in MSI-X multi-message interrupt mode 10901 */ 10902 if (phba->intr_type == MSIX) { 10903 /* Check device state for handling interrupt */ 10904 if (lpfc_intr_state_check(phba)) 10905 return IRQ_NONE; 10906 /* Need to read HA REG for slow-path events */ 10907 spin_lock_irqsave(&phba->hbalock, iflag); 10908 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 10909 goto unplug_error; 10910 /* If somebody is waiting to handle an eratt don't process it 10911 * here. The brdkill function will do this. 10912 */ 10913 if (phba->link_flag & LS_IGNORE_ERATT) 10914 ha_copy &= ~HA_ERATT; 10915 /* Check the need for handling ERATT in interrupt handler */ 10916 if (ha_copy & HA_ERATT) { 10917 if (phba->hba_flag & HBA_ERATT_HANDLED) 10918 /* ERATT polling has handled ERATT */ 10919 ha_copy &= ~HA_ERATT; 10920 else 10921 /* Indicate interrupt handler handles ERATT */ 10922 phba->hba_flag |= HBA_ERATT_HANDLED; 10923 } 10924 10925 /* 10926 * If there is deferred error attention, do not check for any 10927 * interrupt. 10928 */ 10929 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 10930 spin_unlock_irqrestore(&phba->hbalock, iflag); 10931 return IRQ_NONE; 10932 } 10933 10934 /* Clear up only attention source related to slow-path */ 10935 if (lpfc_readl(phba->HCregaddr, &hc_copy)) 10936 goto unplug_error; 10937 10938 writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA | 10939 HC_LAINT_ENA | HC_ERINT_ENA), 10940 phba->HCregaddr); 10941 writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)), 10942 phba->HAregaddr); 10943 writel(hc_copy, phba->HCregaddr); 10944 readl(phba->HAregaddr); /* flush */ 10945 spin_unlock_irqrestore(&phba->hbalock, iflag); 10946 } else 10947 ha_copy = phba->ha_copy; 10948 10949 work_ha_copy = ha_copy & phba->work_ha_mask; 10950 10951 if (work_ha_copy) { 10952 if (work_ha_copy & HA_LATT) { 10953 if (phba->sli.sli_flag & LPFC_PROCESS_LA) { 10954 /* 10955 * Turn off Link Attention interrupts 10956 * until CLEAR_LA done 10957 */ 10958 spin_lock_irqsave(&phba->hbalock, iflag); 10959 phba->sli.sli_flag &= ~LPFC_PROCESS_LA; 10960 if (lpfc_readl(phba->HCregaddr, &control)) 10961 goto unplug_error; 10962 control &= ~HC_LAINT_ENA; 10963 writel(control, phba->HCregaddr); 10964 readl(phba->HCregaddr); /* flush */ 10965 spin_unlock_irqrestore(&phba->hbalock, iflag); 10966 } 10967 else 10968 work_ha_copy &= ~HA_LATT; 10969 } 10970 10971 if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) { 10972 /* 10973 * Turn off Slow Rings interrupts, LPFC_ELS_RING is 10974 * the only slow ring. 10975 */ 10976 status = (work_ha_copy & 10977 (HA_RXMASK << (4*LPFC_ELS_RING))); 10978 status >>= (4*LPFC_ELS_RING); 10979 if (status & HA_RXMASK) { 10980 spin_lock_irqsave(&phba->hbalock, iflag); 10981 if (lpfc_readl(phba->HCregaddr, &control)) 10982 goto unplug_error; 10983 10984 lpfc_debugfs_slow_ring_trc(phba, 10985 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x", 10986 control, status, 10987 (uint32_t)phba->sli.slistat.sli_intr); 10988 10989 if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) { 10990 lpfc_debugfs_slow_ring_trc(phba, 10991 "ISR Disable ring:" 10992 "pwork:x%x hawork:x%x wait:x%x", 10993 phba->work_ha, work_ha_copy, 10994 (uint32_t)((unsigned long) 10995 &phba->work_waitq)); 10996 10997 control &= 10998 ~(HC_R0INT_ENA << LPFC_ELS_RING); 10999 writel(control, phba->HCregaddr); 11000 readl(phba->HCregaddr); /* flush */ 11001 } 11002 else { 11003 lpfc_debugfs_slow_ring_trc(phba, 11004 "ISR slow ring: pwork:" 11005 "x%x hawork:x%x wait:x%x", 11006 phba->work_ha, work_ha_copy, 11007 (uint32_t)((unsigned long) 11008 &phba->work_waitq)); 11009 } 11010 spin_unlock_irqrestore(&phba->hbalock, iflag); 11011 } 11012 } 11013 spin_lock_irqsave(&phba->hbalock, iflag); 11014 if (work_ha_copy & HA_ERATT) { 11015 if (lpfc_sli_read_hs(phba)) 11016 goto unplug_error; 11017 /* 11018 * Check if there is a deferred error condition 11019 * is active 11020 */ 11021 if ((HS_FFER1 & phba->work_hs) && 11022 ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | 11023 HS_FFER6 | HS_FFER7 | HS_FFER8) & 11024 phba->work_hs)) { 11025 phba->hba_flag |= DEFER_ERATT; 11026 /* Clear all interrupt enable conditions */ 11027 writel(0, phba->HCregaddr); 11028 readl(phba->HCregaddr); 11029 } 11030 } 11031 11032 if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { 11033 pmb = phba->sli.mbox_active; 11034 pmbox = &pmb->u.mb; 11035 mbox = phba->mbox; 11036 vport = pmb->vport; 11037 11038 /* First check out the status word */ 11039 lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t)); 11040 if (pmbox->mbxOwner != OWN_HOST) { 11041 spin_unlock_irqrestore(&phba->hbalock, iflag); 11042 /* 11043 * Stray Mailbox Interrupt, mbxCommand <cmd> 11044 * mbxStatus <status> 11045 */ 11046 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 11047 LOG_SLI, 11048 "(%d):0304 Stray Mailbox " 11049 "Interrupt mbxCommand x%x " 11050 "mbxStatus x%x\n", 11051 (vport ? vport->vpi : 0), 11052 pmbox->mbxCommand, 11053 pmbox->mbxStatus); 11054 /* clear mailbox attention bit */ 11055 work_ha_copy &= ~HA_MBATT; 11056 } else { 11057 phba->sli.mbox_active = NULL; 11058 spin_unlock_irqrestore(&phba->hbalock, iflag); 11059 phba->last_completion_time = jiffies; 11060 del_timer(&phba->sli.mbox_tmo); 11061 if (pmb->mbox_cmpl) { 11062 lpfc_sli_pcimem_bcopy(mbox, pmbox, 11063 MAILBOX_CMD_SIZE); 11064 if (pmb->out_ext_byte_len && 11065 pmb->context2) 11066 lpfc_sli_pcimem_bcopy( 11067 phba->mbox_ext, 11068 pmb->context2, 11069 pmb->out_ext_byte_len); 11070 } 11071 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 11072 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 11073 11074 lpfc_debugfs_disc_trc(vport, 11075 LPFC_DISC_TRC_MBOX_VPORT, 11076 "MBOX dflt rpi: : " 11077 "status:x%x rpi:x%x", 11078 (uint32_t)pmbox->mbxStatus, 11079 pmbox->un.varWords[0], 0); 11080 11081 if (!pmbox->mbxStatus) { 11082 mp = (struct lpfc_dmabuf *) 11083 (pmb->context1); 11084 ndlp = (struct lpfc_nodelist *) 11085 pmb->context2; 11086 11087 /* Reg_LOGIN of dflt RPI was 11088 * successful. new lets get 11089 * rid of the RPI using the 11090 * same mbox buffer. 11091 */ 11092 lpfc_unreg_login(phba, 11093 vport->vpi, 11094 pmbox->un.varWords[0], 11095 pmb); 11096 pmb->mbox_cmpl = 11097 lpfc_mbx_cmpl_dflt_rpi; 11098 pmb->context1 = mp; 11099 pmb->context2 = ndlp; 11100 pmb->vport = vport; 11101 rc = lpfc_sli_issue_mbox(phba, 11102 pmb, 11103 MBX_NOWAIT); 11104 if (rc != MBX_BUSY) 11105 lpfc_printf_log(phba, 11106 KERN_ERR, 11107 LOG_MBOX | LOG_SLI, 11108 "0350 rc should have" 11109 "been MBX_BUSY\n"); 11110 if (rc != MBX_NOT_FINISHED) 11111 goto send_current_mbox; 11112 } 11113 } 11114 spin_lock_irqsave( 11115 &phba->pport->work_port_lock, 11116 iflag); 11117 phba->pport->work_port_events &= 11118 ~WORKER_MBOX_TMO; 11119 spin_unlock_irqrestore( 11120 &phba->pport->work_port_lock, 11121 iflag); 11122 lpfc_mbox_cmpl_put(phba, pmb); 11123 } 11124 } else 11125 spin_unlock_irqrestore(&phba->hbalock, iflag); 11126 11127 if ((work_ha_copy & HA_MBATT) && 11128 (phba->sli.mbox_active == NULL)) { 11129 send_current_mbox: 11130 /* Process next mailbox command if there is one */ 11131 do { 11132 rc = lpfc_sli_issue_mbox(phba, NULL, 11133 MBX_NOWAIT); 11134 } while (rc == MBX_NOT_FINISHED); 11135 if (rc != MBX_SUCCESS) 11136 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 11137 LOG_SLI, "0349 rc should be " 11138 "MBX_SUCCESS\n"); 11139 } 11140 11141 spin_lock_irqsave(&phba->hbalock, iflag); 11142 phba->work_ha |= work_ha_copy; 11143 spin_unlock_irqrestore(&phba->hbalock, iflag); 11144 lpfc_worker_wake_up(phba); 11145 } 11146 return IRQ_HANDLED; 11147 unplug_error: 11148 spin_unlock_irqrestore(&phba->hbalock, iflag); 11149 return IRQ_HANDLED; 11150 11151 } /* lpfc_sli_sp_intr_handler */ 11152 11153 /** 11154 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. 11155 * @irq: Interrupt number. 11156 * @dev_id: The device context pointer. 11157 * 11158 * This function is directly called from the PCI layer as an interrupt 11159 * service routine when device with SLI-3 interface spec is enabled with 11160 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 11161 * ring event in the HBA. However, when the device is enabled with either 11162 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 11163 * device-level interrupt handler. When the PCI slot is in error recovery 11164 * or the HBA is undergoing initialization, the interrupt handler will not 11165 * process the interrupt. The SCSI FCP fast-path ring event are handled in 11166 * the intrrupt context. This function is called without any lock held. 11167 * It gets the hbalock to access and update SLI data structures. 11168 * 11169 * This function returns IRQ_HANDLED when interrupt is handled else it 11170 * returns IRQ_NONE. 11171 **/ 11172 irqreturn_t 11173 lpfc_sli_fp_intr_handler(int irq, void *dev_id) 11174 { 11175 struct lpfc_hba *phba; 11176 uint32_t ha_copy; 11177 unsigned long status; 11178 unsigned long iflag; 11179 11180 /* Get the driver's phba structure from the dev_id and 11181 * assume the HBA is not interrupting. 11182 */ 11183 phba = (struct lpfc_hba *) dev_id; 11184 11185 if (unlikely(!phba)) 11186 return IRQ_NONE; 11187 11188 /* 11189 * Stuff needs to be attented to when this function is invoked as an 11190 * individual interrupt handler in MSI-X multi-message interrupt mode 11191 */ 11192 if (phba->intr_type == MSIX) { 11193 /* Check device state for handling interrupt */ 11194 if (lpfc_intr_state_check(phba)) 11195 return IRQ_NONE; 11196 /* Need to read HA REG for FCP ring and other ring events */ 11197 if (lpfc_readl(phba->HAregaddr, &ha_copy)) 11198 return IRQ_HANDLED; 11199 /* Clear up only attention source related to fast-path */ 11200 spin_lock_irqsave(&phba->hbalock, iflag); 11201 /* 11202 * If there is deferred error attention, do not check for 11203 * any interrupt. 11204 */ 11205 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 11206 spin_unlock_irqrestore(&phba->hbalock, iflag); 11207 return IRQ_NONE; 11208 } 11209 writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), 11210 phba->HAregaddr); 11211 readl(phba->HAregaddr); /* flush */ 11212 spin_unlock_irqrestore(&phba->hbalock, iflag); 11213 } else 11214 ha_copy = phba->ha_copy; 11215 11216 /* 11217 * Process all events on FCP ring. Take the optimized path for FCP IO. 11218 */ 11219 ha_copy &= ~(phba->work_ha_mask); 11220 11221 status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 11222 status >>= (4*LPFC_FCP_RING); 11223 if (status & HA_RXMASK) 11224 lpfc_sli_handle_fast_ring_event(phba, 11225 &phba->sli.ring[LPFC_FCP_RING], 11226 status); 11227 11228 if (phba->cfg_multi_ring_support == 2) { 11229 /* 11230 * Process all events on extra ring. Take the optimized path 11231 * for extra ring IO. 11232 */ 11233 status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 11234 status >>= (4*LPFC_EXTRA_RING); 11235 if (status & HA_RXMASK) { 11236 lpfc_sli_handle_fast_ring_event(phba, 11237 &phba->sli.ring[LPFC_EXTRA_RING], 11238 status); 11239 } 11240 } 11241 return IRQ_HANDLED; 11242 } /* lpfc_sli_fp_intr_handler */ 11243 11244 /** 11245 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device 11246 * @irq: Interrupt number. 11247 * @dev_id: The device context pointer. 11248 * 11249 * This function is the HBA device-level interrupt handler to device with 11250 * SLI-3 interface spec, called from the PCI layer when either MSI or 11251 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which 11252 * requires driver attention. This function invokes the slow-path interrupt 11253 * attention handling function and fast-path interrupt attention handling 11254 * function in turn to process the relevant HBA attention events. This 11255 * function is called without any lock held. It gets the hbalock to access 11256 * and update SLI data structures. 11257 * 11258 * This function returns IRQ_HANDLED when interrupt is handled, else it 11259 * returns IRQ_NONE. 11260 **/ 11261 irqreturn_t 11262 lpfc_sli_intr_handler(int irq, void *dev_id) 11263 { 11264 struct lpfc_hba *phba; 11265 irqreturn_t sp_irq_rc, fp_irq_rc; 11266 unsigned long status1, status2; 11267 uint32_t hc_copy; 11268 11269 /* 11270 * Get the driver's phba structure from the dev_id and 11271 * assume the HBA is not interrupting. 11272 */ 11273 phba = (struct lpfc_hba *) dev_id; 11274 11275 if (unlikely(!phba)) 11276 return IRQ_NONE; 11277 11278 /* Check device state for handling interrupt */ 11279 if (lpfc_intr_state_check(phba)) 11280 return IRQ_NONE; 11281 11282 spin_lock(&phba->hbalock); 11283 if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) { 11284 spin_unlock(&phba->hbalock); 11285 return IRQ_HANDLED; 11286 } 11287 11288 if (unlikely(!phba->ha_copy)) { 11289 spin_unlock(&phba->hbalock); 11290 return IRQ_NONE; 11291 } else if (phba->ha_copy & HA_ERATT) { 11292 if (phba->hba_flag & HBA_ERATT_HANDLED) 11293 /* ERATT polling has handled ERATT */ 11294 phba->ha_copy &= ~HA_ERATT; 11295 else 11296 /* Indicate interrupt handler handles ERATT */ 11297 phba->hba_flag |= HBA_ERATT_HANDLED; 11298 } 11299 11300 /* 11301 * If there is deferred error attention, do not check for any interrupt. 11302 */ 11303 if (unlikely(phba->hba_flag & DEFER_ERATT)) { 11304 spin_unlock(&phba->hbalock); 11305 return IRQ_NONE; 11306 } 11307 11308 /* Clear attention sources except link and error attentions */ 11309 if (lpfc_readl(phba->HCregaddr, &hc_copy)) { 11310 spin_unlock(&phba->hbalock); 11311 return IRQ_HANDLED; 11312 } 11313 writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA 11314 | HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA), 11315 phba->HCregaddr); 11316 writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr); 11317 writel(hc_copy, phba->HCregaddr); 11318 readl(phba->HAregaddr); /* flush */ 11319 spin_unlock(&phba->hbalock); 11320 11321 /* 11322 * Invokes slow-path host attention interrupt handling as appropriate. 11323 */ 11324 11325 /* status of events with mailbox and link attention */ 11326 status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT); 11327 11328 /* status of events with ELS ring */ 11329 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 11330 status2 >>= (4*LPFC_ELS_RING); 11331 11332 if (status1 || (status2 & HA_RXMASK)) 11333 sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); 11334 else 11335 sp_irq_rc = IRQ_NONE; 11336 11337 /* 11338 * Invoke fast-path host attention interrupt handling as appropriate. 11339 */ 11340 11341 /* status of events with FCP ring */ 11342 status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING))); 11343 status1 >>= (4*LPFC_FCP_RING); 11344 11345 /* status of events with extra ring */ 11346 if (phba->cfg_multi_ring_support == 2) { 11347 status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING))); 11348 status2 >>= (4*LPFC_EXTRA_RING); 11349 } else 11350 status2 = 0; 11351 11352 if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) 11353 fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); 11354 else 11355 fp_irq_rc = IRQ_NONE; 11356 11357 /* Return device-level interrupt handling status */ 11358 return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; 11359 } /* lpfc_sli_intr_handler */ 11360 11361 /** 11362 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event 11363 * @phba: pointer to lpfc hba data structure. 11364 * 11365 * This routine is invoked by the worker thread to process all the pending 11366 * SLI4 FCP abort XRI events. 11367 **/ 11368 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) 11369 { 11370 struct lpfc_cq_event *cq_event; 11371 11372 /* First, declare the fcp xri abort event has been handled */ 11373 spin_lock_irq(&phba->hbalock); 11374 phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; 11375 spin_unlock_irq(&phba->hbalock); 11376 /* Now, handle all the fcp xri abort events */ 11377 while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { 11378 /* Get the first event from the head of the event queue */ 11379 spin_lock_irq(&phba->hbalock); 11380 list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, 11381 cq_event, struct lpfc_cq_event, list); 11382 spin_unlock_irq(&phba->hbalock); 11383 /* Notify aborted XRI for FCP work queue */ 11384 lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 11385 /* Free the event processed back to the free pool */ 11386 lpfc_sli4_cq_event_release(phba, cq_event); 11387 } 11388 } 11389 11390 /** 11391 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event 11392 * @phba: pointer to lpfc hba data structure. 11393 * 11394 * This routine is invoked by the worker thread to process all the pending 11395 * SLI4 els abort xri events. 11396 **/ 11397 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) 11398 { 11399 struct lpfc_cq_event *cq_event; 11400 11401 /* First, declare the els xri abort event has been handled */ 11402 spin_lock_irq(&phba->hbalock); 11403 phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; 11404 spin_unlock_irq(&phba->hbalock); 11405 /* Now, handle all the els xri abort events */ 11406 while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { 11407 /* Get the first event from the head of the event queue */ 11408 spin_lock_irq(&phba->hbalock); 11409 list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, 11410 cq_event, struct lpfc_cq_event, list); 11411 spin_unlock_irq(&phba->hbalock); 11412 /* Notify aborted XRI for ELS work queue */ 11413 lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); 11414 /* Free the event processed back to the free pool */ 11415 lpfc_sli4_cq_event_release(phba, cq_event); 11416 } 11417 } 11418 11419 /** 11420 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn 11421 * @phba: pointer to lpfc hba data structure 11422 * @pIocbIn: pointer to the rspiocbq 11423 * @pIocbOut: pointer to the cmdiocbq 11424 * @wcqe: pointer to the complete wcqe 11425 * 11426 * This routine transfers the fields of a command iocbq to a response iocbq 11427 * by copying all the IOCB fields from command iocbq and transferring the 11428 * completion status information from the complete wcqe. 11429 **/ 11430 static void 11431 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba, 11432 struct lpfc_iocbq *pIocbIn, 11433 struct lpfc_iocbq *pIocbOut, 11434 struct lpfc_wcqe_complete *wcqe) 11435 { 11436 int numBdes, i; 11437 unsigned long iflags; 11438 uint32_t status, max_response; 11439 struct lpfc_dmabuf *dmabuf; 11440 struct ulp_bde64 *bpl, bde; 11441 size_t offset = offsetof(struct lpfc_iocbq, iocb); 11442 11443 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 11444 sizeof(struct lpfc_iocbq) - offset); 11445 /* Map WCQE parameters into irspiocb parameters */ 11446 status = bf_get(lpfc_wcqe_c_status, wcqe); 11447 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK); 11448 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 11449 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 11450 pIocbIn->iocb.un.fcpi.fcpi_parm = 11451 pIocbOut->iocb.un.fcpi.fcpi_parm - 11452 wcqe->total_data_placed; 11453 else 11454 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 11455 else { 11456 pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; 11457 switch (pIocbOut->iocb.ulpCommand) { 11458 case CMD_ELS_REQUEST64_CR: 11459 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 11460 bpl = (struct ulp_bde64 *)dmabuf->virt; 11461 bde.tus.w = le32_to_cpu(bpl[1].tus.w); 11462 max_response = bde.tus.f.bdeSize; 11463 break; 11464 case CMD_GEN_REQUEST64_CR: 11465 max_response = 0; 11466 if (!pIocbOut->context3) 11467 break; 11468 numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ 11469 sizeof(struct ulp_bde64); 11470 dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; 11471 bpl = (struct ulp_bde64 *)dmabuf->virt; 11472 for (i = 0; i < numBdes; i++) { 11473 bde.tus.w = le32_to_cpu(bpl[i].tus.w); 11474 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) 11475 max_response += bde.tus.f.bdeSize; 11476 } 11477 break; 11478 default: 11479 max_response = wcqe->total_data_placed; 11480 break; 11481 } 11482 if (max_response < wcqe->total_data_placed) 11483 pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; 11484 else 11485 pIocbIn->iocb.un.genreq64.bdl.bdeSize = 11486 wcqe->total_data_placed; 11487 } 11488 11489 /* Convert BG errors for completion status */ 11490 if (status == CQE_STATUS_DI_ERROR) { 11491 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT; 11492 11493 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe)) 11494 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED; 11495 else 11496 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED; 11497 11498 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0; 11499 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */ 11500 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11501 BGS_GUARD_ERR_MASK; 11502 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */ 11503 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11504 BGS_APPTAG_ERR_MASK; 11505 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */ 11506 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11507 BGS_REFTAG_ERR_MASK; 11508 11509 /* Check to see if there was any good data before the error */ 11510 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) { 11511 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11512 BGS_HI_WATER_MARK_PRESENT_MASK; 11513 pIocbIn->iocb.unsli3.sli3_bg.bghm = 11514 wcqe->total_data_placed; 11515 } 11516 11517 /* 11518 * Set ALL the error bits to indicate we don't know what 11519 * type of error it is. 11520 */ 11521 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat) 11522 pIocbIn->iocb.unsli3.sli3_bg.bgstat |= 11523 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK | 11524 BGS_GUARD_ERR_MASK); 11525 } 11526 11527 /* Pick up HBA exchange busy condition */ 11528 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 11529 spin_lock_irqsave(&phba->hbalock, iflags); 11530 pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY; 11531 spin_unlock_irqrestore(&phba->hbalock, iflags); 11532 } 11533 } 11534 11535 /** 11536 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe 11537 * @phba: Pointer to HBA context object. 11538 * @wcqe: Pointer to work-queue completion queue entry. 11539 * 11540 * This routine handles an ELS work-queue completion event and construct 11541 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common 11542 * discovery engine to handle. 11543 * 11544 * Return: Pointer to the receive IOCBQ, NULL otherwise. 11545 **/ 11546 static struct lpfc_iocbq * 11547 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba, 11548 struct lpfc_iocbq *irspiocbq) 11549 { 11550 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 11551 struct lpfc_iocbq *cmdiocbq; 11552 struct lpfc_wcqe_complete *wcqe; 11553 unsigned long iflags; 11554 11555 wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl; 11556 spin_lock_irqsave(&pring->ring_lock, iflags); 11557 pring->stats.iocb_event++; 11558 /* Look up the ELS command IOCB and create pseudo response IOCB */ 11559 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 11560 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11561 spin_unlock_irqrestore(&pring->ring_lock, iflags); 11562 11563 if (unlikely(!cmdiocbq)) { 11564 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11565 "0386 ELS complete with no corresponding " 11566 "cmdiocb: iotag (%d)\n", 11567 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 11568 lpfc_sli_release_iocbq(phba, irspiocbq); 11569 return NULL; 11570 } 11571 11572 /* Fake the irspiocbq and copy necessary response information */ 11573 lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe); 11574 11575 return irspiocbq; 11576 } 11577 11578 /** 11579 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event 11580 * @phba: Pointer to HBA context object. 11581 * @cqe: Pointer to mailbox completion queue entry. 11582 * 11583 * This routine process a mailbox completion queue entry with asynchrous 11584 * event. 11585 * 11586 * Return: true if work posted to worker thread, otherwise false. 11587 **/ 11588 static bool 11589 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 11590 { 11591 struct lpfc_cq_event *cq_event; 11592 unsigned long iflags; 11593 11594 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 11595 "0392 Async Event: word0:x%x, word1:x%x, " 11596 "word2:x%x, word3:x%x\n", mcqe->word0, 11597 mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); 11598 11599 /* Allocate a new internal CQ_EVENT entry */ 11600 cq_event = lpfc_sli4_cq_event_alloc(phba); 11601 if (!cq_event) { 11602 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11603 "0394 Failed to allocate CQ_EVENT entry\n"); 11604 return false; 11605 } 11606 11607 /* Move the CQE into an asynchronous event entry */ 11608 memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); 11609 spin_lock_irqsave(&phba->hbalock, iflags); 11610 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); 11611 /* Set the async event flag */ 11612 phba->hba_flag |= ASYNC_EVENT; 11613 spin_unlock_irqrestore(&phba->hbalock, iflags); 11614 11615 return true; 11616 } 11617 11618 /** 11619 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event 11620 * @phba: Pointer to HBA context object. 11621 * @cqe: Pointer to mailbox completion queue entry. 11622 * 11623 * This routine process a mailbox completion queue entry with mailbox 11624 * completion event. 11625 * 11626 * Return: true if work posted to worker thread, otherwise false. 11627 **/ 11628 static bool 11629 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) 11630 { 11631 uint32_t mcqe_status; 11632 MAILBOX_t *mbox, *pmbox; 11633 struct lpfc_mqe *mqe; 11634 struct lpfc_vport *vport; 11635 struct lpfc_nodelist *ndlp; 11636 struct lpfc_dmabuf *mp; 11637 unsigned long iflags; 11638 LPFC_MBOXQ_t *pmb; 11639 bool workposted = false; 11640 int rc; 11641 11642 /* If not a mailbox complete MCQE, out by checking mailbox consume */ 11643 if (!bf_get(lpfc_trailer_completed, mcqe)) 11644 goto out_no_mqe_complete; 11645 11646 /* Get the reference to the active mbox command */ 11647 spin_lock_irqsave(&phba->hbalock, iflags); 11648 pmb = phba->sli.mbox_active; 11649 if (unlikely(!pmb)) { 11650 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 11651 "1832 No pending MBOX command to handle\n"); 11652 spin_unlock_irqrestore(&phba->hbalock, iflags); 11653 goto out_no_mqe_complete; 11654 } 11655 spin_unlock_irqrestore(&phba->hbalock, iflags); 11656 mqe = &pmb->u.mqe; 11657 pmbox = (MAILBOX_t *)&pmb->u.mqe; 11658 mbox = phba->mbox; 11659 vport = pmb->vport; 11660 11661 /* Reset heartbeat timer */ 11662 phba->last_completion_time = jiffies; 11663 del_timer(&phba->sli.mbox_tmo); 11664 11665 /* Move mbox data to caller's mailbox region, do endian swapping */ 11666 if (pmb->mbox_cmpl && mbox) 11667 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 11668 11669 /* 11670 * For mcqe errors, conditionally move a modified error code to 11671 * the mbox so that the error will not be missed. 11672 */ 11673 mcqe_status = bf_get(lpfc_mcqe_status, mcqe); 11674 if (mcqe_status != MB_CQE_STATUS_SUCCESS) { 11675 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) 11676 bf_set(lpfc_mqe_status, mqe, 11677 (LPFC_MBX_ERROR_RANGE | mcqe_status)); 11678 } 11679 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 11680 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 11681 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 11682 "MBOX dflt rpi: status:x%x rpi:x%x", 11683 mcqe_status, 11684 pmbox->un.varWords[0], 0); 11685 if (mcqe_status == MB_CQE_STATUS_SUCCESS) { 11686 mp = (struct lpfc_dmabuf *)(pmb->context1); 11687 ndlp = (struct lpfc_nodelist *)pmb->context2; 11688 /* Reg_LOGIN of dflt RPI was successful. Now lets get 11689 * RID of the PPI using the same mbox buffer. 11690 */ 11691 lpfc_unreg_login(phba, vport->vpi, 11692 pmbox->un.varWords[0], pmb); 11693 pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 11694 pmb->context1 = mp; 11695 pmb->context2 = ndlp; 11696 pmb->vport = vport; 11697 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 11698 if (rc != MBX_BUSY) 11699 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | 11700 LOG_SLI, "0385 rc should " 11701 "have been MBX_BUSY\n"); 11702 if (rc != MBX_NOT_FINISHED) 11703 goto send_current_mbox; 11704 } 11705 } 11706 spin_lock_irqsave(&phba->pport->work_port_lock, iflags); 11707 phba->pport->work_port_events &= ~WORKER_MBOX_TMO; 11708 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); 11709 11710 /* There is mailbox completion work to do */ 11711 spin_lock_irqsave(&phba->hbalock, iflags); 11712 __lpfc_mbox_cmpl_put(phba, pmb); 11713 phba->work_ha |= HA_MBATT; 11714 spin_unlock_irqrestore(&phba->hbalock, iflags); 11715 workposted = true; 11716 11717 send_current_mbox: 11718 spin_lock_irqsave(&phba->hbalock, iflags); 11719 /* Release the mailbox command posting token */ 11720 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; 11721 /* Setting active mailbox pointer need to be in sync to flag clear */ 11722 phba->sli.mbox_active = NULL; 11723 spin_unlock_irqrestore(&phba->hbalock, iflags); 11724 /* Wake up worker thread to post the next pending mailbox command */ 11725 lpfc_worker_wake_up(phba); 11726 out_no_mqe_complete: 11727 if (bf_get(lpfc_trailer_consumed, mcqe)) 11728 lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); 11729 return workposted; 11730 } 11731 11732 /** 11733 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry 11734 * @phba: Pointer to HBA context object. 11735 * @cqe: Pointer to mailbox completion queue entry. 11736 * 11737 * This routine process a mailbox completion queue entry, it invokes the 11738 * proper mailbox complete handling or asynchrous event handling routine 11739 * according to the MCQE's async bit. 11740 * 11741 * Return: true if work posted to worker thread, otherwise false. 11742 **/ 11743 static bool 11744 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) 11745 { 11746 struct lpfc_mcqe mcqe; 11747 bool workposted; 11748 11749 /* Copy the mailbox MCQE and convert endian order as needed */ 11750 lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); 11751 11752 /* Invoke the proper event handling routine */ 11753 if (!bf_get(lpfc_trailer_async, &mcqe)) 11754 workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); 11755 else 11756 workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); 11757 return workposted; 11758 } 11759 11760 /** 11761 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event 11762 * @phba: Pointer to HBA context object. 11763 * @cq: Pointer to associated CQ 11764 * @wcqe: Pointer to work-queue completion queue entry. 11765 * 11766 * This routine handles an ELS work-queue completion event. 11767 * 11768 * Return: true if work posted to worker thread, otherwise false. 11769 **/ 11770 static bool 11771 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11772 struct lpfc_wcqe_complete *wcqe) 11773 { 11774 struct lpfc_iocbq *irspiocbq; 11775 unsigned long iflags; 11776 struct lpfc_sli_ring *pring = cq->pring; 11777 int txq_cnt = 0; 11778 int txcmplq_cnt = 0; 11779 int fcp_txcmplq_cnt = 0; 11780 11781 /* Get an irspiocbq for later ELS response processing use */ 11782 irspiocbq = lpfc_sli_get_iocbq(phba); 11783 if (!irspiocbq) { 11784 if (!list_empty(&pring->txq)) 11785 txq_cnt++; 11786 if (!list_empty(&pring->txcmplq)) 11787 txcmplq_cnt++; 11788 if (!list_empty(&phba->sli.ring[LPFC_FCP_RING].txcmplq)) 11789 fcp_txcmplq_cnt++; 11790 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11791 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " 11792 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", 11793 txq_cnt, phba->iocb_cnt, 11794 fcp_txcmplq_cnt, 11795 txcmplq_cnt); 11796 return false; 11797 } 11798 11799 /* Save off the slow-path queue event for work thread to process */ 11800 memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe)); 11801 spin_lock_irqsave(&phba->hbalock, iflags); 11802 list_add_tail(&irspiocbq->cq_event.list, 11803 &phba->sli4_hba.sp_queue_event); 11804 phba->hba_flag |= HBA_SP_QUEUE_EVT; 11805 spin_unlock_irqrestore(&phba->hbalock, iflags); 11806 11807 return true; 11808 } 11809 11810 /** 11811 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event 11812 * @phba: Pointer to HBA context object. 11813 * @wcqe: Pointer to work-queue completion queue entry. 11814 * 11815 * This routine handles slow-path WQ entry comsumed event by invoking the 11816 * proper WQ release routine to the slow-path WQ. 11817 **/ 11818 static void 11819 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, 11820 struct lpfc_wcqe_release *wcqe) 11821 { 11822 /* sanity check on queue memory */ 11823 if (unlikely(!phba->sli4_hba.els_wq)) 11824 return; 11825 /* Check for the slow-path ELS work queue */ 11826 if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) 11827 lpfc_sli4_wq_release(phba->sli4_hba.els_wq, 11828 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 11829 else 11830 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 11831 "2579 Slow-path wqe consume event carries " 11832 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", 11833 bf_get(lpfc_wcqe_r_wqe_index, wcqe), 11834 phba->sli4_hba.els_wq->queue_id); 11835 } 11836 11837 /** 11838 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event 11839 * @phba: Pointer to HBA context object. 11840 * @cq: Pointer to a WQ completion queue. 11841 * @wcqe: Pointer to work-queue completion queue entry. 11842 * 11843 * This routine handles an XRI abort event. 11844 * 11845 * Return: true if work posted to worker thread, otherwise false. 11846 **/ 11847 static bool 11848 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, 11849 struct lpfc_queue *cq, 11850 struct sli4_wcqe_xri_aborted *wcqe) 11851 { 11852 bool workposted = false; 11853 struct lpfc_cq_event *cq_event; 11854 unsigned long iflags; 11855 11856 /* Allocate a new internal CQ_EVENT entry */ 11857 cq_event = lpfc_sli4_cq_event_alloc(phba); 11858 if (!cq_event) { 11859 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11860 "0602 Failed to allocate CQ_EVENT entry\n"); 11861 return false; 11862 } 11863 11864 /* Move the CQE into the proper xri abort event list */ 11865 memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); 11866 switch (cq->subtype) { 11867 case LPFC_FCP: 11868 spin_lock_irqsave(&phba->hbalock, iflags); 11869 list_add_tail(&cq_event->list, 11870 &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); 11871 /* Set the fcp xri abort event flag */ 11872 phba->hba_flag |= FCP_XRI_ABORT_EVENT; 11873 spin_unlock_irqrestore(&phba->hbalock, iflags); 11874 workposted = true; 11875 break; 11876 case LPFC_ELS: 11877 spin_lock_irqsave(&phba->hbalock, iflags); 11878 list_add_tail(&cq_event->list, 11879 &phba->sli4_hba.sp_els_xri_aborted_work_queue); 11880 /* Set the els xri abort event flag */ 11881 phba->hba_flag |= ELS_XRI_ABORT_EVENT; 11882 spin_unlock_irqrestore(&phba->hbalock, iflags); 11883 workposted = true; 11884 break; 11885 default: 11886 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11887 "0603 Invalid work queue CQE subtype (x%x)\n", 11888 cq->subtype); 11889 workposted = false; 11890 break; 11891 } 11892 return workposted; 11893 } 11894 11895 /** 11896 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry 11897 * @phba: Pointer to HBA context object. 11898 * @rcqe: Pointer to receive-queue completion queue entry. 11899 * 11900 * This routine process a receive-queue completion queue entry. 11901 * 11902 * Return: true if work posted to worker thread, otherwise false. 11903 **/ 11904 static bool 11905 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) 11906 { 11907 bool workposted = false; 11908 struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; 11909 struct lpfc_queue *drq = phba->sli4_hba.dat_rq; 11910 struct hbq_dmabuf *dma_buf; 11911 uint32_t status, rq_id; 11912 unsigned long iflags; 11913 11914 /* sanity check on queue memory */ 11915 if (unlikely(!hrq) || unlikely(!drq)) 11916 return workposted; 11917 11918 if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1) 11919 rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe); 11920 else 11921 rq_id = bf_get(lpfc_rcqe_rq_id, rcqe); 11922 if (rq_id != hrq->queue_id) 11923 goto out; 11924 11925 status = bf_get(lpfc_rcqe_status, rcqe); 11926 switch (status) { 11927 case FC_STATUS_RQ_BUF_LEN_EXCEEDED: 11928 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 11929 "2537 Receive Frame Truncated!!\n"); 11930 hrq->RQ_buf_trunc++; 11931 case FC_STATUS_RQ_SUCCESS: 11932 lpfc_sli4_rq_release(hrq, drq); 11933 spin_lock_irqsave(&phba->hbalock, iflags); 11934 dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); 11935 if (!dma_buf) { 11936 hrq->RQ_no_buf_found++; 11937 spin_unlock_irqrestore(&phba->hbalock, iflags); 11938 goto out; 11939 } 11940 hrq->RQ_rcv_buf++; 11941 memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe)); 11942 /* save off the frame for the word thread to process */ 11943 list_add_tail(&dma_buf->cq_event.list, 11944 &phba->sli4_hba.sp_queue_event); 11945 /* Frame received */ 11946 phba->hba_flag |= HBA_SP_QUEUE_EVT; 11947 spin_unlock_irqrestore(&phba->hbalock, iflags); 11948 workposted = true; 11949 break; 11950 case FC_STATUS_INSUFF_BUF_NEED_BUF: 11951 case FC_STATUS_INSUFF_BUF_FRM_DISC: 11952 hrq->RQ_no_posted_buf++; 11953 /* Post more buffers if possible */ 11954 spin_lock_irqsave(&phba->hbalock, iflags); 11955 phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; 11956 spin_unlock_irqrestore(&phba->hbalock, iflags); 11957 workposted = true; 11958 break; 11959 } 11960 out: 11961 return workposted; 11962 } 11963 11964 /** 11965 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry 11966 * @phba: Pointer to HBA context object. 11967 * @cq: Pointer to the completion queue. 11968 * @wcqe: Pointer to a completion queue entry. 11969 * 11970 * This routine process a slow-path work-queue or receive queue completion queue 11971 * entry. 11972 * 11973 * Return: true if work posted to worker thread, otherwise false. 11974 **/ 11975 static bool 11976 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 11977 struct lpfc_cqe *cqe) 11978 { 11979 struct lpfc_cqe cqevt; 11980 bool workposted = false; 11981 11982 /* Copy the work queue CQE and convert endian order if needed */ 11983 lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe)); 11984 11985 /* Check and process for different type of WCQE and dispatch */ 11986 switch (bf_get(lpfc_cqe_code, &cqevt)) { 11987 case CQE_CODE_COMPL_WQE: 11988 /* Process the WQ/RQ complete event */ 11989 phba->last_completion_time = jiffies; 11990 workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq, 11991 (struct lpfc_wcqe_complete *)&cqevt); 11992 break; 11993 case CQE_CODE_RELEASE_WQE: 11994 /* Process the WQ release event */ 11995 lpfc_sli4_sp_handle_rel_wcqe(phba, 11996 (struct lpfc_wcqe_release *)&cqevt); 11997 break; 11998 case CQE_CODE_XRI_ABORTED: 11999 /* Process the WQ XRI abort event */ 12000 phba->last_completion_time = jiffies; 12001 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 12002 (struct sli4_wcqe_xri_aborted *)&cqevt); 12003 break; 12004 case CQE_CODE_RECEIVE: 12005 case CQE_CODE_RECEIVE_V1: 12006 /* Process the RQ event */ 12007 phba->last_completion_time = jiffies; 12008 workposted = lpfc_sli4_sp_handle_rcqe(phba, 12009 (struct lpfc_rcqe *)&cqevt); 12010 break; 12011 default: 12012 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12013 "0388 Not a valid WCQE code: x%x\n", 12014 bf_get(lpfc_cqe_code, &cqevt)); 12015 break; 12016 } 12017 return workposted; 12018 } 12019 12020 /** 12021 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry 12022 * @phba: Pointer to HBA context object. 12023 * @eqe: Pointer to fast-path event queue entry. 12024 * 12025 * This routine process a event queue entry from the slow-path event queue. 12026 * It will check the MajorCode and MinorCode to determine this is for a 12027 * completion event on a completion queue, if not, an error shall be logged 12028 * and just return. Otherwise, it will get to the corresponding completion 12029 * queue and process all the entries on that completion queue, rearm the 12030 * completion queue, and then return. 12031 * 12032 **/ 12033 static void 12034 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 12035 struct lpfc_queue *speq) 12036 { 12037 struct lpfc_queue *cq = NULL, *childq; 12038 struct lpfc_cqe *cqe; 12039 bool workposted = false; 12040 int ecount = 0; 12041 uint16_t cqid; 12042 12043 /* Get the reference to the corresponding CQ */ 12044 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 12045 12046 list_for_each_entry(childq, &speq->child_list, list) { 12047 if (childq->queue_id == cqid) { 12048 cq = childq; 12049 break; 12050 } 12051 } 12052 if (unlikely(!cq)) { 12053 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 12054 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12055 "0365 Slow-path CQ identifier " 12056 "(%d) does not exist\n", cqid); 12057 return; 12058 } 12059 12060 /* Process all the entries to the CQ */ 12061 switch (cq->type) { 12062 case LPFC_MCQ: 12063 while ((cqe = lpfc_sli4_cq_get(cq))) { 12064 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 12065 if (!(++ecount % cq->entry_repost)) 12066 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 12067 cq->CQ_mbox++; 12068 } 12069 break; 12070 case LPFC_WCQ: 12071 while ((cqe = lpfc_sli4_cq_get(cq))) { 12072 if (cq->subtype == LPFC_FCP) 12073 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, 12074 cqe); 12075 else 12076 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 12077 cqe); 12078 if (!(++ecount % cq->entry_repost)) 12079 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 12080 } 12081 12082 /* Track the max number of CQEs processed in 1 EQ */ 12083 if (ecount > cq->CQ_max_cqe) 12084 cq->CQ_max_cqe = ecount; 12085 break; 12086 default: 12087 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12088 "0370 Invalid completion queue type (%d)\n", 12089 cq->type); 12090 return; 12091 } 12092 12093 /* Catch the no cq entry condition, log an error */ 12094 if (unlikely(ecount == 0)) 12095 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12096 "0371 No entry from the CQ: identifier " 12097 "(x%x), type (%d)\n", cq->queue_id, cq->type); 12098 12099 /* In any case, flash and re-arm the RCQ */ 12100 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 12101 12102 /* wake up worker thread if there are works to be done */ 12103 if (workposted) 12104 lpfc_worker_wake_up(phba); 12105 } 12106 12107 /** 12108 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry 12109 * @phba: Pointer to HBA context object. 12110 * @cq: Pointer to associated CQ 12111 * @wcqe: Pointer to work-queue completion queue entry. 12112 * 12113 * This routine process a fast-path work queue completion entry from fast-path 12114 * event queue for FCP command response completion. 12115 **/ 12116 static void 12117 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 12118 struct lpfc_wcqe_complete *wcqe) 12119 { 12120 struct lpfc_sli_ring *pring = cq->pring; 12121 struct lpfc_iocbq *cmdiocbq; 12122 struct lpfc_iocbq irspiocbq; 12123 unsigned long iflags; 12124 12125 /* Check for response status */ 12126 if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { 12127 /* If resource errors reported from HBA, reduce queue 12128 * depth of the SCSI device. 12129 */ 12130 if (((bf_get(lpfc_wcqe_c_status, wcqe) == 12131 IOSTAT_LOCAL_REJECT)) && 12132 ((wcqe->parameter & IOERR_PARAM_MASK) == 12133 IOERR_NO_RESOURCES)) 12134 phba->lpfc_rampdown_queue_depth(phba); 12135 12136 /* Log the error status */ 12137 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12138 "0373 FCP complete error: status=x%x, " 12139 "hw_status=x%x, total_data_specified=%d, " 12140 "parameter=x%x, word3=x%x\n", 12141 bf_get(lpfc_wcqe_c_status, wcqe), 12142 bf_get(lpfc_wcqe_c_hw_status, wcqe), 12143 wcqe->total_data_placed, wcqe->parameter, 12144 wcqe->word3); 12145 } 12146 12147 /* Look up the FCP command IOCB and create pseudo response IOCB */ 12148 spin_lock_irqsave(&pring->ring_lock, iflags); 12149 pring->stats.iocb_event++; 12150 cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, 12151 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 12152 spin_unlock_irqrestore(&pring->ring_lock, iflags); 12153 if (unlikely(!cmdiocbq)) { 12154 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12155 "0374 FCP complete with no corresponding " 12156 "cmdiocb: iotag (%d)\n", 12157 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 12158 return; 12159 } 12160 if (unlikely(!cmdiocbq->iocb_cmpl)) { 12161 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12162 "0375 FCP cmdiocb not callback function " 12163 "iotag: (%d)\n", 12164 bf_get(lpfc_wcqe_c_request_tag, wcqe)); 12165 return; 12166 } 12167 12168 /* Fake the irspiocb and copy necessary response information */ 12169 lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); 12170 12171 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) { 12172 spin_lock_irqsave(&phba->hbalock, iflags); 12173 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED; 12174 spin_unlock_irqrestore(&phba->hbalock, iflags); 12175 } 12176 12177 /* Pass the cmd_iocb and the rsp state to the upper layer */ 12178 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); 12179 } 12180 12181 /** 12182 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event 12183 * @phba: Pointer to HBA context object. 12184 * @cq: Pointer to completion queue. 12185 * @wcqe: Pointer to work-queue completion queue entry. 12186 * 12187 * This routine handles an fast-path WQ entry comsumed event by invoking the 12188 * proper WQ release routine to the slow-path WQ. 12189 **/ 12190 static void 12191 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 12192 struct lpfc_wcqe_release *wcqe) 12193 { 12194 struct lpfc_queue *childwq; 12195 bool wqid_matched = false; 12196 uint16_t fcp_wqid; 12197 12198 /* Check for fast-path FCP work queue release */ 12199 fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); 12200 list_for_each_entry(childwq, &cq->child_list, list) { 12201 if (childwq->queue_id == fcp_wqid) { 12202 lpfc_sli4_wq_release(childwq, 12203 bf_get(lpfc_wcqe_r_wqe_index, wcqe)); 12204 wqid_matched = true; 12205 break; 12206 } 12207 } 12208 /* Report warning log message if no match found */ 12209 if (wqid_matched != true) 12210 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12211 "2580 Fast-path wqe consume event carries " 12212 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); 12213 } 12214 12215 /** 12216 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry 12217 * @cq: Pointer to the completion queue. 12218 * @eqe: Pointer to fast-path completion queue entry. 12219 * 12220 * This routine process a fast-path work queue completion entry from fast-path 12221 * event queue for FCP command response completion. 12222 **/ 12223 static int 12224 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, 12225 struct lpfc_cqe *cqe) 12226 { 12227 struct lpfc_wcqe_release wcqe; 12228 bool workposted = false; 12229 12230 /* Copy the work queue CQE and convert endian order if needed */ 12231 lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); 12232 12233 /* Check and process for different type of WCQE and dispatch */ 12234 switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { 12235 case CQE_CODE_COMPL_WQE: 12236 cq->CQ_wq++; 12237 /* Process the WQ complete event */ 12238 phba->last_completion_time = jiffies; 12239 lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, 12240 (struct lpfc_wcqe_complete *)&wcqe); 12241 break; 12242 case CQE_CODE_RELEASE_WQE: 12243 cq->CQ_release_wqe++; 12244 /* Process the WQ release event */ 12245 lpfc_sli4_fp_handle_rel_wcqe(phba, cq, 12246 (struct lpfc_wcqe_release *)&wcqe); 12247 break; 12248 case CQE_CODE_XRI_ABORTED: 12249 cq->CQ_xri_aborted++; 12250 /* Process the WQ XRI abort event */ 12251 phba->last_completion_time = jiffies; 12252 workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, 12253 (struct sli4_wcqe_xri_aborted *)&wcqe); 12254 break; 12255 default: 12256 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12257 "0144 Not a valid WCQE code: x%x\n", 12258 bf_get(lpfc_wcqe_c_code, &wcqe)); 12259 break; 12260 } 12261 return workposted; 12262 } 12263 12264 /** 12265 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry 12266 * @phba: Pointer to HBA context object. 12267 * @eqe: Pointer to fast-path event queue entry. 12268 * 12269 * This routine process a event queue entry from the fast-path event queue. 12270 * It will check the MajorCode and MinorCode to determine this is for a 12271 * completion event on a completion queue, if not, an error shall be logged 12272 * and just return. Otherwise, it will get to the corresponding completion 12273 * queue and process all the entries on the completion queue, rearm the 12274 * completion queue, and then return. 12275 **/ 12276 static void 12277 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, 12278 uint32_t qidx) 12279 { 12280 struct lpfc_queue *cq; 12281 struct lpfc_cqe *cqe; 12282 bool workposted = false; 12283 uint16_t cqid; 12284 int ecount = 0; 12285 12286 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 12287 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12288 "0366 Not a valid completion " 12289 "event: majorcode=x%x, minorcode=x%x\n", 12290 bf_get_le32(lpfc_eqe_major_code, eqe), 12291 bf_get_le32(lpfc_eqe_minor_code, eqe)); 12292 return; 12293 } 12294 12295 /* Get the reference to the corresponding CQ */ 12296 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 12297 12298 /* Check if this is a Slow path event */ 12299 if (unlikely(cqid != phba->sli4_hba.fcp_cq_map[qidx])) { 12300 lpfc_sli4_sp_handle_eqe(phba, eqe, 12301 phba->sli4_hba.hba_eq[qidx]); 12302 return; 12303 } 12304 12305 if (unlikely(!phba->sli4_hba.fcp_cq)) { 12306 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12307 "3146 Fast-path completion queues " 12308 "does not exist\n"); 12309 return; 12310 } 12311 cq = phba->sli4_hba.fcp_cq[qidx]; 12312 if (unlikely(!cq)) { 12313 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 12314 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12315 "0367 Fast-path completion queue " 12316 "(%d) does not exist\n", qidx); 12317 return; 12318 } 12319 12320 if (unlikely(cqid != cq->queue_id)) { 12321 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12322 "0368 Miss-matched fast-path completion " 12323 "queue identifier: eqcqid=%d, fcpcqid=%d\n", 12324 cqid, cq->queue_id); 12325 return; 12326 } 12327 12328 /* Process all the entries to the CQ */ 12329 while ((cqe = lpfc_sli4_cq_get(cq))) { 12330 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); 12331 if (!(++ecount % cq->entry_repost)) 12332 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 12333 } 12334 12335 /* Track the max number of CQEs processed in 1 EQ */ 12336 if (ecount > cq->CQ_max_cqe) 12337 cq->CQ_max_cqe = ecount; 12338 12339 /* Catch the no cq entry condition */ 12340 if (unlikely(ecount == 0)) 12341 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12342 "0369 No entry from fast-path completion " 12343 "queue fcpcqid=%d\n", cq->queue_id); 12344 12345 /* In any case, flash and re-arm the CQ */ 12346 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 12347 12348 /* wake up worker thread if there are works to be done */ 12349 if (workposted) 12350 lpfc_worker_wake_up(phba); 12351 } 12352 12353 static void 12354 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) 12355 { 12356 struct lpfc_eqe *eqe; 12357 12358 /* walk all the EQ entries and drop on the floor */ 12359 while ((eqe = lpfc_sli4_eq_get(eq))) 12360 ; 12361 12362 /* Clear and re-arm the EQ */ 12363 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 12364 } 12365 12366 12367 /** 12368 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue 12369 * entry 12370 * @phba: Pointer to HBA context object. 12371 * @eqe: Pointer to fast-path event queue entry. 12372 * 12373 * This routine process a event queue entry from the Flash Optimized Fabric 12374 * event queue. It will check the MajorCode and MinorCode to determine this 12375 * is for a completion event on a completion queue, if not, an error shall be 12376 * logged and just return. Otherwise, it will get to the corresponding 12377 * completion queue and process all the entries on the completion queue, rearm 12378 * the completion queue, and then return. 12379 **/ 12380 static void 12381 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) 12382 { 12383 struct lpfc_queue *cq; 12384 struct lpfc_cqe *cqe; 12385 bool workposted = false; 12386 uint16_t cqid; 12387 int ecount = 0; 12388 12389 if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { 12390 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12391 "9147 Not a valid completion " 12392 "event: majorcode=x%x, minorcode=x%x\n", 12393 bf_get_le32(lpfc_eqe_major_code, eqe), 12394 bf_get_le32(lpfc_eqe_minor_code, eqe)); 12395 return; 12396 } 12397 12398 /* Get the reference to the corresponding CQ */ 12399 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); 12400 12401 /* Next check for OAS */ 12402 cq = phba->sli4_hba.oas_cq; 12403 if (unlikely(!cq)) { 12404 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) 12405 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12406 "9148 OAS completion queue " 12407 "does not exist\n"); 12408 return; 12409 } 12410 12411 if (unlikely(cqid != cq->queue_id)) { 12412 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12413 "9149 Miss-matched fast-path compl " 12414 "queue id: eqcqid=%d, fcpcqid=%d\n", 12415 cqid, cq->queue_id); 12416 return; 12417 } 12418 12419 /* Process all the entries to the OAS CQ */ 12420 while ((cqe = lpfc_sli4_cq_get(cq))) { 12421 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); 12422 if (!(++ecount % cq->entry_repost)) 12423 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 12424 } 12425 12426 /* Track the max number of CQEs processed in 1 EQ */ 12427 if (ecount > cq->CQ_max_cqe) 12428 cq->CQ_max_cqe = ecount; 12429 12430 /* Catch the no cq entry condition */ 12431 if (unlikely(ecount == 0)) 12432 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12433 "9153 No entry from fast-path completion " 12434 "queue fcpcqid=%d\n", cq->queue_id); 12435 12436 /* In any case, flash and re-arm the CQ */ 12437 lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); 12438 12439 /* wake up worker thread if there are works to be done */ 12440 if (workposted) 12441 lpfc_worker_wake_up(phba); 12442 } 12443 12444 /** 12445 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device 12446 * @irq: Interrupt number. 12447 * @dev_id: The device context pointer. 12448 * 12449 * This function is directly called from the PCI layer as an interrupt 12450 * service routine when device with SLI-4 interface spec is enabled with 12451 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric 12452 * IOCB ring event in the HBA. However, when the device is enabled with either 12453 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12454 * device-level interrupt handler. When the PCI slot is in error recovery 12455 * or the HBA is undergoing initialization, the interrupt handler will not 12456 * process the interrupt. The Flash Optimized Fabric ring event are handled in 12457 * the intrrupt context. This function is called without any lock held. 12458 * It gets the hbalock to access and update SLI data structures. Note that, 12459 * the EQ to CQ are one-to-one map such that the EQ index is 12460 * equal to that of CQ index. 12461 * 12462 * This function returns IRQ_HANDLED when interrupt is handled else it 12463 * returns IRQ_NONE. 12464 **/ 12465 irqreturn_t 12466 lpfc_sli4_fof_intr_handler(int irq, void *dev_id) 12467 { 12468 struct lpfc_hba *phba; 12469 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 12470 struct lpfc_queue *eq; 12471 struct lpfc_eqe *eqe; 12472 unsigned long iflag; 12473 int ecount = 0; 12474 uint32_t eqidx; 12475 12476 /* Get the driver's phba structure from the dev_id */ 12477 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 12478 phba = fcp_eq_hdl->phba; 12479 eqidx = fcp_eq_hdl->idx; 12480 12481 if (unlikely(!phba)) 12482 return IRQ_NONE; 12483 12484 /* Get to the EQ struct associated with this vector */ 12485 eq = phba->sli4_hba.fof_eq; 12486 if (unlikely(!eq)) 12487 return IRQ_NONE; 12488 12489 /* Check device state for handling interrupt */ 12490 if (unlikely(lpfc_intr_state_check(phba))) { 12491 eq->EQ_badstate++; 12492 /* Check again for link_state with lock held */ 12493 spin_lock_irqsave(&phba->hbalock, iflag); 12494 if (phba->link_state < LPFC_LINK_DOWN) 12495 /* Flush, clear interrupt, and rearm the EQ */ 12496 lpfc_sli4_eq_flush(phba, eq); 12497 spin_unlock_irqrestore(&phba->hbalock, iflag); 12498 return IRQ_NONE; 12499 } 12500 12501 /* 12502 * Process all the event on FCP fast-path EQ 12503 */ 12504 while ((eqe = lpfc_sli4_eq_get(eq))) { 12505 lpfc_sli4_fof_handle_eqe(phba, eqe); 12506 if (!(++ecount % eq->entry_repost)) 12507 lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM); 12508 eq->EQ_processed++; 12509 } 12510 12511 /* Track the max number of EQEs processed in 1 intr */ 12512 if (ecount > eq->EQ_max_eqe) 12513 eq->EQ_max_eqe = ecount; 12514 12515 12516 if (unlikely(ecount == 0)) { 12517 eq->EQ_no_entry++; 12518 12519 if (phba->intr_type == MSIX) 12520 /* MSI-X treated interrupt served as no EQ share INT */ 12521 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12522 "9145 MSI-X interrupt with no EQE\n"); 12523 else { 12524 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12525 "9146 ISR interrupt with no EQE\n"); 12526 /* Non MSI-X treated on interrupt as EQ share INT */ 12527 return IRQ_NONE; 12528 } 12529 } 12530 /* Always clear and re-arm the fast-path EQ */ 12531 lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); 12532 return IRQ_HANDLED; 12533 } 12534 12535 /** 12536 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device 12537 * @irq: Interrupt number. 12538 * @dev_id: The device context pointer. 12539 * 12540 * This function is directly called from the PCI layer as an interrupt 12541 * service routine when device with SLI-4 interface spec is enabled with 12542 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB 12543 * ring event in the HBA. However, when the device is enabled with either 12544 * MSI or Pin-IRQ interrupt mode, this function is called as part of the 12545 * device-level interrupt handler. When the PCI slot is in error recovery 12546 * or the HBA is undergoing initialization, the interrupt handler will not 12547 * process the interrupt. The SCSI FCP fast-path ring event are handled in 12548 * the intrrupt context. This function is called without any lock held. 12549 * It gets the hbalock to access and update SLI data structures. Note that, 12550 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is 12551 * equal to that of FCP CQ index. 12552 * 12553 * The link attention and ELS ring attention events are handled 12554 * by the worker thread. The interrupt handler signals the worker thread 12555 * and returns for these events. This function is called without any lock 12556 * held. It gets the hbalock to access and update SLI data structures. 12557 * 12558 * This function returns IRQ_HANDLED when interrupt is handled else it 12559 * returns IRQ_NONE. 12560 **/ 12561 irqreturn_t 12562 lpfc_sli4_hba_intr_handler(int irq, void *dev_id) 12563 { 12564 struct lpfc_hba *phba; 12565 struct lpfc_fcp_eq_hdl *fcp_eq_hdl; 12566 struct lpfc_queue *fpeq; 12567 struct lpfc_eqe *eqe; 12568 unsigned long iflag; 12569 int ecount = 0; 12570 int fcp_eqidx; 12571 12572 /* Get the driver's phba structure from the dev_id */ 12573 fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; 12574 phba = fcp_eq_hdl->phba; 12575 fcp_eqidx = fcp_eq_hdl->idx; 12576 12577 if (unlikely(!phba)) 12578 return IRQ_NONE; 12579 if (unlikely(!phba->sli4_hba.hba_eq)) 12580 return IRQ_NONE; 12581 12582 /* Get to the EQ struct associated with this vector */ 12583 fpeq = phba->sli4_hba.hba_eq[fcp_eqidx]; 12584 if (unlikely(!fpeq)) 12585 return IRQ_NONE; 12586 12587 if (lpfc_fcp_look_ahead) { 12588 if (atomic_dec_and_test(&fcp_eq_hdl->fcp_eq_in_use)) 12589 lpfc_sli4_eq_clr_intr(fpeq); 12590 else { 12591 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 12592 return IRQ_NONE; 12593 } 12594 } 12595 12596 /* Check device state for handling interrupt */ 12597 if (unlikely(lpfc_intr_state_check(phba))) { 12598 fpeq->EQ_badstate++; 12599 /* Check again for link_state with lock held */ 12600 spin_lock_irqsave(&phba->hbalock, iflag); 12601 if (phba->link_state < LPFC_LINK_DOWN) 12602 /* Flush, clear interrupt, and rearm the EQ */ 12603 lpfc_sli4_eq_flush(phba, fpeq); 12604 spin_unlock_irqrestore(&phba->hbalock, iflag); 12605 if (lpfc_fcp_look_ahead) 12606 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 12607 return IRQ_NONE; 12608 } 12609 12610 /* 12611 * Process all the event on FCP fast-path EQ 12612 */ 12613 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 12614 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx); 12615 if (!(++ecount % fpeq->entry_repost)) 12616 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 12617 fpeq->EQ_processed++; 12618 } 12619 12620 /* Track the max number of EQEs processed in 1 intr */ 12621 if (ecount > fpeq->EQ_max_eqe) 12622 fpeq->EQ_max_eqe = ecount; 12623 12624 /* Always clear and re-arm the fast-path EQ */ 12625 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); 12626 12627 if (unlikely(ecount == 0)) { 12628 fpeq->EQ_no_entry++; 12629 12630 if (lpfc_fcp_look_ahead) { 12631 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 12632 return IRQ_NONE; 12633 } 12634 12635 if (phba->intr_type == MSIX) 12636 /* MSI-X treated interrupt served as no EQ share INT */ 12637 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 12638 "0358 MSI-X interrupt with no EQE\n"); 12639 else 12640 /* Non MSI-X treated on interrupt as EQ share INT */ 12641 return IRQ_NONE; 12642 } 12643 12644 if (lpfc_fcp_look_ahead) 12645 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use); 12646 return IRQ_HANDLED; 12647 } /* lpfc_sli4_fp_intr_handler */ 12648 12649 /** 12650 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device 12651 * @irq: Interrupt number. 12652 * @dev_id: The device context pointer. 12653 * 12654 * This function is the device-level interrupt handler to device with SLI-4 12655 * interface spec, called from the PCI layer when either MSI or Pin-IRQ 12656 * interrupt mode is enabled and there is an event in the HBA which requires 12657 * driver attention. This function invokes the slow-path interrupt attention 12658 * handling function and fast-path interrupt attention handling function in 12659 * turn to process the relevant HBA attention events. This function is called 12660 * without any lock held. It gets the hbalock to access and update SLI data 12661 * structures. 12662 * 12663 * This function returns IRQ_HANDLED when interrupt is handled, else it 12664 * returns IRQ_NONE. 12665 **/ 12666 irqreturn_t 12667 lpfc_sli4_intr_handler(int irq, void *dev_id) 12668 { 12669 struct lpfc_hba *phba; 12670 irqreturn_t hba_irq_rc; 12671 bool hba_handled = false; 12672 int fcp_eqidx; 12673 12674 /* Get the driver's phba structure from the dev_id */ 12675 phba = (struct lpfc_hba *)dev_id; 12676 12677 if (unlikely(!phba)) 12678 return IRQ_NONE; 12679 12680 /* 12681 * Invoke fast-path host attention interrupt handling as appropriate. 12682 */ 12683 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) { 12684 hba_irq_rc = lpfc_sli4_hba_intr_handler(irq, 12685 &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); 12686 if (hba_irq_rc == IRQ_HANDLED) 12687 hba_handled |= true; 12688 } 12689 12690 if (phba->cfg_fof) { 12691 hba_irq_rc = lpfc_sli4_fof_intr_handler(irq, 12692 &phba->sli4_hba.fcp_eq_hdl[0]); 12693 if (hba_irq_rc == IRQ_HANDLED) 12694 hba_handled |= true; 12695 } 12696 12697 return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; 12698 } /* lpfc_sli4_intr_handler */ 12699 12700 /** 12701 * lpfc_sli4_queue_free - free a queue structure and associated memory 12702 * @queue: The queue structure to free. 12703 * 12704 * This function frees a queue structure and the DMAable memory used for 12705 * the host resident queue. This function must be called after destroying the 12706 * queue on the HBA. 12707 **/ 12708 void 12709 lpfc_sli4_queue_free(struct lpfc_queue *queue) 12710 { 12711 struct lpfc_dmabuf *dmabuf; 12712 12713 if (!queue) 12714 return; 12715 12716 while (!list_empty(&queue->page_list)) { 12717 list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, 12718 list); 12719 dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE, 12720 dmabuf->virt, dmabuf->phys); 12721 kfree(dmabuf); 12722 } 12723 kfree(queue); 12724 return; 12725 } 12726 12727 /** 12728 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure 12729 * @phba: The HBA that this queue is being created on. 12730 * @entry_size: The size of each queue entry for this queue. 12731 * @entry count: The number of entries that this queue will handle. 12732 * 12733 * This function allocates a queue structure and the DMAable memory used for 12734 * the host resident queue. This function must be called before creating the 12735 * queue on the HBA. 12736 **/ 12737 struct lpfc_queue * 12738 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, 12739 uint32_t entry_count) 12740 { 12741 struct lpfc_queue *queue; 12742 struct lpfc_dmabuf *dmabuf; 12743 int x, total_qe_count; 12744 void *dma_pointer; 12745 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12746 12747 if (!phba->sli4_hba.pc_sli4_params.supported) 12748 hw_page_size = SLI4_PAGE_SIZE; 12749 12750 queue = kzalloc(sizeof(struct lpfc_queue) + 12751 (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); 12752 if (!queue) 12753 return NULL; 12754 queue->page_count = (ALIGN(entry_size * entry_count, 12755 hw_page_size))/hw_page_size; 12756 INIT_LIST_HEAD(&queue->list); 12757 INIT_LIST_HEAD(&queue->page_list); 12758 INIT_LIST_HEAD(&queue->child_list); 12759 for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { 12760 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 12761 if (!dmabuf) 12762 goto out_fail; 12763 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 12764 hw_page_size, &dmabuf->phys, 12765 GFP_KERNEL); 12766 if (!dmabuf->virt) { 12767 kfree(dmabuf); 12768 goto out_fail; 12769 } 12770 memset(dmabuf->virt, 0, hw_page_size); 12771 dmabuf->buffer_tag = x; 12772 list_add_tail(&dmabuf->list, &queue->page_list); 12773 /* initialize queue's entry array */ 12774 dma_pointer = dmabuf->virt; 12775 for (; total_qe_count < entry_count && 12776 dma_pointer < (hw_page_size + dmabuf->virt); 12777 total_qe_count++, dma_pointer += entry_size) { 12778 queue->qe[total_qe_count].address = dma_pointer; 12779 } 12780 } 12781 queue->entry_size = entry_size; 12782 queue->entry_count = entry_count; 12783 12784 /* 12785 * entry_repost is calculated based on the number of entries in the 12786 * queue. This works out except for RQs. If buffers are NOT initially 12787 * posted for every RQE, entry_repost should be adjusted accordingly. 12788 */ 12789 queue->entry_repost = (entry_count >> 3); 12790 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST) 12791 queue->entry_repost = LPFC_QUEUE_MIN_REPOST; 12792 queue->phba = phba; 12793 12794 return queue; 12795 out_fail: 12796 lpfc_sli4_queue_free(queue); 12797 return NULL; 12798 } 12799 12800 /** 12801 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory 12802 * @phba: HBA structure that indicates port to create a queue on. 12803 * @pci_barset: PCI BAR set flag. 12804 * 12805 * This function shall perform iomap of the specified PCI BAR address to host 12806 * memory address if not already done so and return it. The returned host 12807 * memory address can be NULL. 12808 */ 12809 static void __iomem * 12810 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) 12811 { 12812 struct pci_dev *pdev; 12813 12814 if (!phba->pcidev) 12815 return NULL; 12816 else 12817 pdev = phba->pcidev; 12818 12819 switch (pci_barset) { 12820 case WQ_PCI_BAR_0_AND_1: 12821 return phba->pci_bar0_memmap_p; 12822 case WQ_PCI_BAR_2_AND_3: 12823 return phba->pci_bar2_memmap_p; 12824 case WQ_PCI_BAR_4_AND_5: 12825 return phba->pci_bar4_memmap_p; 12826 default: 12827 break; 12828 } 12829 return NULL; 12830 } 12831 12832 /** 12833 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs 12834 * @phba: HBA structure that indicates port to create a queue on. 12835 * @startq: The starting FCP EQ to modify 12836 * 12837 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA. 12838 * 12839 * The @phba struct is used to send mailbox command to HBA. The @startq 12840 * is used to get the starting FCP EQ to change. 12841 * This function is asynchronous and will wait for the mailbox 12842 * command to finish before continuing. 12843 * 12844 * On success this function will return a zero. If unable to allocate enough 12845 * memory this function will return -ENOMEM. If the queue create mailbox command 12846 * fails this function will return -ENXIO. 12847 **/ 12848 uint32_t 12849 lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq) 12850 { 12851 struct lpfc_mbx_modify_eq_delay *eq_delay; 12852 LPFC_MBOXQ_t *mbox; 12853 struct lpfc_queue *eq; 12854 int cnt, rc, length, status = 0; 12855 uint32_t shdr_status, shdr_add_status; 12856 uint32_t result; 12857 int fcp_eqidx; 12858 union lpfc_sli4_cfg_shdr *shdr; 12859 uint16_t dmult; 12860 12861 if (startq >= phba->cfg_fcp_io_channel) 12862 return 0; 12863 12864 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12865 if (!mbox) 12866 return -ENOMEM; 12867 length = (sizeof(struct lpfc_mbx_modify_eq_delay) - 12868 sizeof(struct lpfc_sli4_cfg_mhdr)); 12869 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12870 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY, 12871 length, LPFC_SLI4_MBX_EMBED); 12872 eq_delay = &mbox->u.mqe.un.eq_delay; 12873 12874 /* Calculate delay multiper from maximum interrupt per second */ 12875 result = phba->cfg_fcp_imax / phba->cfg_fcp_io_channel; 12876 if (result > LPFC_DMULT_CONST) 12877 dmult = 0; 12878 else 12879 dmult = LPFC_DMULT_CONST/result - 1; 12880 12881 cnt = 0; 12882 for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_io_channel; 12883 fcp_eqidx++) { 12884 eq = phba->sli4_hba.hba_eq[fcp_eqidx]; 12885 if (!eq) 12886 continue; 12887 eq_delay->u.request.eq[cnt].eq_id = eq->queue_id; 12888 eq_delay->u.request.eq[cnt].phase = 0; 12889 eq_delay->u.request.eq[cnt].delay_multi = dmult; 12890 cnt++; 12891 if (cnt >= LPFC_MAX_EQ_DELAY) 12892 break; 12893 } 12894 eq_delay->u.request.num_eq = cnt; 12895 12896 mbox->vport = phba->pport; 12897 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 12898 mbox->context1 = NULL; 12899 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12900 shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr; 12901 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 12902 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 12903 if (shdr_status || shdr_add_status || rc) { 12904 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 12905 "2512 MODIFY_EQ_DELAY mailbox failed with " 12906 "status x%x add_status x%x, mbx status x%x\n", 12907 shdr_status, shdr_add_status, rc); 12908 status = -ENXIO; 12909 } 12910 mempool_free(mbox, phba->mbox_mem_pool); 12911 return status; 12912 } 12913 12914 /** 12915 * lpfc_eq_create - Create an Event Queue on the HBA 12916 * @phba: HBA structure that indicates port to create a queue on. 12917 * @eq: The queue structure to use to create the event queue. 12918 * @imax: The maximum interrupt per second limit. 12919 * 12920 * This function creates an event queue, as detailed in @eq, on a port, 12921 * described by @phba by sending an EQ_CREATE mailbox command to the HBA. 12922 * 12923 * The @phba struct is used to send mailbox command to HBA. The @eq struct 12924 * is used to get the entry count and entry size that are necessary to 12925 * determine the number of pages to allocate and use for this queue. This 12926 * function will send the EQ_CREATE mailbox command to the HBA to setup the 12927 * event queue. This function is asynchronous and will wait for the mailbox 12928 * command to finish before continuing. 12929 * 12930 * On success this function will return a zero. If unable to allocate enough 12931 * memory this function will return -ENOMEM. If the queue create mailbox command 12932 * fails this function will return -ENXIO. 12933 **/ 12934 uint32_t 12935 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 12936 { 12937 struct lpfc_mbx_eq_create *eq_create; 12938 LPFC_MBOXQ_t *mbox; 12939 int rc, length, status = 0; 12940 struct lpfc_dmabuf *dmabuf; 12941 uint32_t shdr_status, shdr_add_status; 12942 union lpfc_sli4_cfg_shdr *shdr; 12943 uint16_t dmult; 12944 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 12945 12946 /* sanity check on queue memory */ 12947 if (!eq) 12948 return -ENODEV; 12949 if (!phba->sli4_hba.pc_sli4_params.supported) 12950 hw_page_size = SLI4_PAGE_SIZE; 12951 12952 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 12953 if (!mbox) 12954 return -ENOMEM; 12955 length = (sizeof(struct lpfc_mbx_eq_create) - 12956 sizeof(struct lpfc_sli4_cfg_mhdr)); 12957 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 12958 LPFC_MBOX_OPCODE_EQ_CREATE, 12959 length, LPFC_SLI4_MBX_EMBED); 12960 eq_create = &mbox->u.mqe.un.eq_create; 12961 bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, 12962 eq->page_count); 12963 bf_set(lpfc_eq_context_size, &eq_create->u.request.context, 12964 LPFC_EQE_SIZE); 12965 bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); 12966 /* Calculate delay multiper from maximum interrupt per second */ 12967 if (imax > LPFC_DMULT_CONST) 12968 dmult = 0; 12969 else 12970 dmult = LPFC_DMULT_CONST/imax - 1; 12971 bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, 12972 dmult); 12973 switch (eq->entry_count) { 12974 default: 12975 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 12976 "0360 Unsupported EQ count. (%d)\n", 12977 eq->entry_count); 12978 if (eq->entry_count < 256) 12979 return -EINVAL; 12980 /* otherwise default to smallest count (drop through) */ 12981 case 256: 12982 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12983 LPFC_EQ_CNT_256); 12984 break; 12985 case 512: 12986 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12987 LPFC_EQ_CNT_512); 12988 break; 12989 case 1024: 12990 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12991 LPFC_EQ_CNT_1024); 12992 break; 12993 case 2048: 12994 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12995 LPFC_EQ_CNT_2048); 12996 break; 12997 case 4096: 12998 bf_set(lpfc_eq_context_count, &eq_create->u.request.context, 12999 LPFC_EQ_CNT_4096); 13000 break; 13001 } 13002 list_for_each_entry(dmabuf, &eq->page_list, list) { 13003 memset(dmabuf->virt, 0, hw_page_size); 13004 eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 13005 putPaddrLow(dmabuf->phys); 13006 eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13007 putPaddrHigh(dmabuf->phys); 13008 } 13009 mbox->vport = phba->pport; 13010 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13011 mbox->context1 = NULL; 13012 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13013 shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; 13014 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13015 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13016 if (shdr_status || shdr_add_status || rc) { 13017 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13018 "2500 EQ_CREATE mailbox failed with " 13019 "status x%x add_status x%x, mbx status x%x\n", 13020 shdr_status, shdr_add_status, rc); 13021 status = -ENXIO; 13022 } 13023 eq->type = LPFC_EQ; 13024 eq->subtype = LPFC_NONE; 13025 eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); 13026 if (eq->queue_id == 0xFFFF) 13027 status = -ENXIO; 13028 eq->host_index = 0; 13029 eq->hba_index = 0; 13030 13031 mempool_free(mbox, phba->mbox_mem_pool); 13032 return status; 13033 } 13034 13035 /** 13036 * lpfc_cq_create - Create a Completion Queue on the HBA 13037 * @phba: HBA structure that indicates port to create a queue on. 13038 * @cq: The queue structure to use to create the completion queue. 13039 * @eq: The event queue to bind this completion queue to. 13040 * 13041 * This function creates a completion queue, as detailed in @wq, on a port, 13042 * described by @phba by sending a CQ_CREATE mailbox command to the HBA. 13043 * 13044 * The @phba struct is used to send mailbox command to HBA. The @cq struct 13045 * is used to get the entry count and entry size that are necessary to 13046 * determine the number of pages to allocate and use for this queue. The @eq 13047 * is used to indicate which event queue to bind this completion queue to. This 13048 * function will send the CQ_CREATE mailbox command to the HBA to setup the 13049 * completion queue. This function is asynchronous and will wait for the mailbox 13050 * command to finish before continuing. 13051 * 13052 * On success this function will return a zero. If unable to allocate enough 13053 * memory this function will return -ENOMEM. If the queue create mailbox command 13054 * fails this function will return -ENXIO. 13055 **/ 13056 uint32_t 13057 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 13058 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 13059 { 13060 struct lpfc_mbx_cq_create *cq_create; 13061 struct lpfc_dmabuf *dmabuf; 13062 LPFC_MBOXQ_t *mbox; 13063 int rc, length, status = 0; 13064 uint32_t shdr_status, shdr_add_status; 13065 union lpfc_sli4_cfg_shdr *shdr; 13066 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 13067 13068 /* sanity check on queue memory */ 13069 if (!cq || !eq) 13070 return -ENODEV; 13071 if (!phba->sli4_hba.pc_sli4_params.supported) 13072 hw_page_size = SLI4_PAGE_SIZE; 13073 13074 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13075 if (!mbox) 13076 return -ENOMEM; 13077 length = (sizeof(struct lpfc_mbx_cq_create) - 13078 sizeof(struct lpfc_sli4_cfg_mhdr)); 13079 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13080 LPFC_MBOX_OPCODE_CQ_CREATE, 13081 length, LPFC_SLI4_MBX_EMBED); 13082 cq_create = &mbox->u.mqe.un.cq_create; 13083 shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; 13084 bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, 13085 cq->page_count); 13086 bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); 13087 bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); 13088 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13089 phba->sli4_hba.pc_sli4_params.cqv); 13090 if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) { 13091 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */ 13092 bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1); 13093 bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context, 13094 eq->queue_id); 13095 } else { 13096 bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, 13097 eq->queue_id); 13098 } 13099 switch (cq->entry_count) { 13100 default: 13101 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13102 "0361 Unsupported CQ count. (%d)\n", 13103 cq->entry_count); 13104 if (cq->entry_count < 256) { 13105 status = -EINVAL; 13106 goto out; 13107 } 13108 /* otherwise default to smallest count (drop through) */ 13109 case 256: 13110 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 13111 LPFC_CQ_CNT_256); 13112 break; 13113 case 512: 13114 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 13115 LPFC_CQ_CNT_512); 13116 break; 13117 case 1024: 13118 bf_set(lpfc_cq_context_count, &cq_create->u.request.context, 13119 LPFC_CQ_CNT_1024); 13120 break; 13121 } 13122 list_for_each_entry(dmabuf, &cq->page_list, list) { 13123 memset(dmabuf->virt, 0, hw_page_size); 13124 cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 13125 putPaddrLow(dmabuf->phys); 13126 cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13127 putPaddrHigh(dmabuf->phys); 13128 } 13129 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13130 13131 /* The IOCTL status is embedded in the mailbox subheader. */ 13132 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13133 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13134 if (shdr_status || shdr_add_status || rc) { 13135 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13136 "2501 CQ_CREATE mailbox failed with " 13137 "status x%x add_status x%x, mbx status x%x\n", 13138 shdr_status, shdr_add_status, rc); 13139 status = -ENXIO; 13140 goto out; 13141 } 13142 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 13143 if (cq->queue_id == 0xFFFF) { 13144 status = -ENXIO; 13145 goto out; 13146 } 13147 /* link the cq onto the parent eq child list */ 13148 list_add_tail(&cq->list, &eq->child_list); 13149 /* Set up completion queue's type and subtype */ 13150 cq->type = type; 13151 cq->subtype = subtype; 13152 cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); 13153 cq->assoc_qid = eq->queue_id; 13154 cq->host_index = 0; 13155 cq->hba_index = 0; 13156 13157 out: 13158 mempool_free(mbox, phba->mbox_mem_pool); 13159 return status; 13160 } 13161 13162 /** 13163 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration 13164 * @phba: HBA structure that indicates port to create a queue on. 13165 * @mq: The queue structure to use to create the mailbox queue. 13166 * @mbox: An allocated pointer to type LPFC_MBOXQ_t 13167 * @cq: The completion queue to associate with this cq. 13168 * 13169 * This function provides failback (fb) functionality when the 13170 * mq_create_ext fails on older FW generations. It's purpose is identical 13171 * to mq_create_ext otherwise. 13172 * 13173 * This routine cannot fail as all attributes were previously accessed and 13174 * initialized in mq_create_ext. 13175 **/ 13176 static void 13177 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, 13178 LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq) 13179 { 13180 struct lpfc_mbx_mq_create *mq_create; 13181 struct lpfc_dmabuf *dmabuf; 13182 int length; 13183 13184 length = (sizeof(struct lpfc_mbx_mq_create) - 13185 sizeof(struct lpfc_sli4_cfg_mhdr)); 13186 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13187 LPFC_MBOX_OPCODE_MQ_CREATE, 13188 length, LPFC_SLI4_MBX_EMBED); 13189 mq_create = &mbox->u.mqe.un.mq_create; 13190 bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, 13191 mq->page_count); 13192 bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, 13193 cq->queue_id); 13194 bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); 13195 switch (mq->entry_count) { 13196 case 16: 13197 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 13198 LPFC_MQ_RING_SIZE_16); 13199 break; 13200 case 32: 13201 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 13202 LPFC_MQ_RING_SIZE_32); 13203 break; 13204 case 64: 13205 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 13206 LPFC_MQ_RING_SIZE_64); 13207 break; 13208 case 128: 13209 bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context, 13210 LPFC_MQ_RING_SIZE_128); 13211 break; 13212 } 13213 list_for_each_entry(dmabuf, &mq->page_list, list) { 13214 mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 13215 putPaddrLow(dmabuf->phys); 13216 mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13217 putPaddrHigh(dmabuf->phys); 13218 } 13219 } 13220 13221 /** 13222 * lpfc_mq_create - Create a mailbox Queue on the HBA 13223 * @phba: HBA structure that indicates port to create a queue on. 13224 * @mq: The queue structure to use to create the mailbox queue. 13225 * @cq: The completion queue to associate with this cq. 13226 * @subtype: The queue's subtype. 13227 * 13228 * This function creates a mailbox queue, as detailed in @mq, on a port, 13229 * described by @phba by sending a MQ_CREATE mailbox command to the HBA. 13230 * 13231 * The @phba struct is used to send mailbox command to HBA. The @cq struct 13232 * is used to get the entry count and entry size that are necessary to 13233 * determine the number of pages to allocate and use for this queue. This 13234 * function will send the MQ_CREATE mailbox command to the HBA to setup the 13235 * mailbox queue. This function is asynchronous and will wait for the mailbox 13236 * command to finish before continuing. 13237 * 13238 * On success this function will return a zero. If unable to allocate enough 13239 * memory this function will return -ENOMEM. If the queue create mailbox command 13240 * fails this function will return -ENXIO. 13241 **/ 13242 int32_t 13243 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, 13244 struct lpfc_queue *cq, uint32_t subtype) 13245 { 13246 struct lpfc_mbx_mq_create *mq_create; 13247 struct lpfc_mbx_mq_create_ext *mq_create_ext; 13248 struct lpfc_dmabuf *dmabuf; 13249 LPFC_MBOXQ_t *mbox; 13250 int rc, length, status = 0; 13251 uint32_t shdr_status, shdr_add_status; 13252 union lpfc_sli4_cfg_shdr *shdr; 13253 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 13254 13255 /* sanity check on queue memory */ 13256 if (!mq || !cq) 13257 return -ENODEV; 13258 if (!phba->sli4_hba.pc_sli4_params.supported) 13259 hw_page_size = SLI4_PAGE_SIZE; 13260 13261 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13262 if (!mbox) 13263 return -ENOMEM; 13264 length = (sizeof(struct lpfc_mbx_mq_create_ext) - 13265 sizeof(struct lpfc_sli4_cfg_mhdr)); 13266 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13267 LPFC_MBOX_OPCODE_MQ_CREATE_EXT, 13268 length, LPFC_SLI4_MBX_EMBED); 13269 13270 mq_create_ext = &mbox->u.mqe.un.mq_create_ext; 13271 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr; 13272 bf_set(lpfc_mbx_mq_create_ext_num_pages, 13273 &mq_create_ext->u.request, mq->page_count); 13274 bf_set(lpfc_mbx_mq_create_ext_async_evt_link, 13275 &mq_create_ext->u.request, 1); 13276 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip, 13277 &mq_create_ext->u.request, 1); 13278 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5, 13279 &mq_create_ext->u.request, 1); 13280 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc, 13281 &mq_create_ext->u.request, 1); 13282 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli, 13283 &mq_create_ext->u.request, 1); 13284 bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1); 13285 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13286 phba->sli4_hba.pc_sli4_params.mqv); 13287 if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1) 13288 bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request, 13289 cq->queue_id); 13290 else 13291 bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context, 13292 cq->queue_id); 13293 switch (mq->entry_count) { 13294 default: 13295 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13296 "0362 Unsupported MQ count. (%d)\n", 13297 mq->entry_count); 13298 if (mq->entry_count < 16) { 13299 status = -EINVAL; 13300 goto out; 13301 } 13302 /* otherwise default to smallest count (drop through) */ 13303 case 16: 13304 bf_set(lpfc_mq_context_ring_size, 13305 &mq_create_ext->u.request.context, 13306 LPFC_MQ_RING_SIZE_16); 13307 break; 13308 case 32: 13309 bf_set(lpfc_mq_context_ring_size, 13310 &mq_create_ext->u.request.context, 13311 LPFC_MQ_RING_SIZE_32); 13312 break; 13313 case 64: 13314 bf_set(lpfc_mq_context_ring_size, 13315 &mq_create_ext->u.request.context, 13316 LPFC_MQ_RING_SIZE_64); 13317 break; 13318 case 128: 13319 bf_set(lpfc_mq_context_ring_size, 13320 &mq_create_ext->u.request.context, 13321 LPFC_MQ_RING_SIZE_128); 13322 break; 13323 } 13324 list_for_each_entry(dmabuf, &mq->page_list, list) { 13325 memset(dmabuf->virt, 0, hw_page_size); 13326 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo = 13327 putPaddrLow(dmabuf->phys); 13328 mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi = 13329 putPaddrHigh(dmabuf->phys); 13330 } 13331 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13332 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 13333 &mq_create_ext->u.response); 13334 if (rc != MBX_SUCCESS) { 13335 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13336 "2795 MQ_CREATE_EXT failed with " 13337 "status x%x. Failback to MQ_CREATE.\n", 13338 rc); 13339 lpfc_mq_create_fb_init(phba, mq, mbox, cq); 13340 mq_create = &mbox->u.mqe.un.mq_create; 13341 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13342 shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; 13343 mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, 13344 &mq_create->u.response); 13345 } 13346 13347 /* The IOCTL status is embedded in the mailbox subheader. */ 13348 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13349 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13350 if (shdr_status || shdr_add_status || rc) { 13351 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13352 "2502 MQ_CREATE mailbox failed with " 13353 "status x%x add_status x%x, mbx status x%x\n", 13354 shdr_status, shdr_add_status, rc); 13355 status = -ENXIO; 13356 goto out; 13357 } 13358 if (mq->queue_id == 0xFFFF) { 13359 status = -ENXIO; 13360 goto out; 13361 } 13362 mq->type = LPFC_MQ; 13363 mq->assoc_qid = cq->queue_id; 13364 mq->subtype = subtype; 13365 mq->host_index = 0; 13366 mq->hba_index = 0; 13367 13368 /* link the mq onto the parent cq child list */ 13369 list_add_tail(&mq->list, &cq->child_list); 13370 out: 13371 mempool_free(mbox, phba->mbox_mem_pool); 13372 return status; 13373 } 13374 13375 /** 13376 * lpfc_wq_create - Create a Work Queue on the HBA 13377 * @phba: HBA structure that indicates port to create a queue on. 13378 * @wq: The queue structure to use to create the work queue. 13379 * @cq: The completion queue to bind this work queue to. 13380 * @subtype: The subtype of the work queue indicating its functionality. 13381 * 13382 * This function creates a work queue, as detailed in @wq, on a port, described 13383 * by @phba by sending a WQ_CREATE mailbox command to the HBA. 13384 * 13385 * The @phba struct is used to send mailbox command to HBA. The @wq struct 13386 * is used to get the entry count and entry size that are necessary to 13387 * determine the number of pages to allocate and use for this queue. The @cq 13388 * is used to indicate which completion queue to bind this work queue to. This 13389 * function will send the WQ_CREATE mailbox command to the HBA to setup the 13390 * work queue. This function is asynchronous and will wait for the mailbox 13391 * command to finish before continuing. 13392 * 13393 * On success this function will return a zero. If unable to allocate enough 13394 * memory this function will return -ENOMEM. If the queue create mailbox command 13395 * fails this function will return -ENXIO. 13396 **/ 13397 uint32_t 13398 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 13399 struct lpfc_queue *cq, uint32_t subtype) 13400 { 13401 struct lpfc_mbx_wq_create *wq_create; 13402 struct lpfc_dmabuf *dmabuf; 13403 LPFC_MBOXQ_t *mbox; 13404 int rc, length, status = 0; 13405 uint32_t shdr_status, shdr_add_status; 13406 union lpfc_sli4_cfg_shdr *shdr; 13407 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 13408 struct dma_address *page; 13409 void __iomem *bar_memmap_p; 13410 uint32_t db_offset; 13411 uint16_t pci_barset; 13412 13413 /* sanity check on queue memory */ 13414 if (!wq || !cq) 13415 return -ENODEV; 13416 if (!phba->sli4_hba.pc_sli4_params.supported) 13417 hw_page_size = SLI4_PAGE_SIZE; 13418 13419 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13420 if (!mbox) 13421 return -ENOMEM; 13422 length = (sizeof(struct lpfc_mbx_wq_create) - 13423 sizeof(struct lpfc_sli4_cfg_mhdr)); 13424 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13425 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, 13426 length, LPFC_SLI4_MBX_EMBED); 13427 wq_create = &mbox->u.mqe.un.wq_create; 13428 shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; 13429 bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, 13430 wq->page_count); 13431 bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, 13432 cq->queue_id); 13433 13434 /* wqv is the earliest version supported, NOT the latest */ 13435 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13436 phba->sli4_hba.pc_sli4_params.wqv); 13437 13438 switch (phba->sli4_hba.pc_sli4_params.wqv) { 13439 case LPFC_Q_CREATE_VERSION_0: 13440 switch (wq->entry_size) { 13441 default: 13442 case 64: 13443 /* Nothing to do, version 0 ONLY supports 64 byte */ 13444 page = wq_create->u.request.page; 13445 break; 13446 case 128: 13447 if (!(phba->sli4_hba.pc_sli4_params.wqsize & 13448 LPFC_WQ_SZ128_SUPPORT)) { 13449 status = -ERANGE; 13450 goto out; 13451 } 13452 /* If we get here the HBA MUST also support V1 and 13453 * we MUST use it 13454 */ 13455 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13456 LPFC_Q_CREATE_VERSION_1); 13457 13458 bf_set(lpfc_mbx_wq_create_wqe_count, 13459 &wq_create->u.request_1, wq->entry_count); 13460 bf_set(lpfc_mbx_wq_create_wqe_size, 13461 &wq_create->u.request_1, 13462 LPFC_WQ_WQE_SIZE_128); 13463 bf_set(lpfc_mbx_wq_create_page_size, 13464 &wq_create->u.request_1, 13465 (PAGE_SIZE/SLI4_PAGE_SIZE)); 13466 page = wq_create->u.request_1.page; 13467 break; 13468 } 13469 break; 13470 case LPFC_Q_CREATE_VERSION_1: 13471 bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, 13472 wq->entry_count); 13473 switch (wq->entry_size) { 13474 default: 13475 case 64: 13476 bf_set(lpfc_mbx_wq_create_wqe_size, 13477 &wq_create->u.request_1, 13478 LPFC_WQ_WQE_SIZE_64); 13479 break; 13480 case 128: 13481 if (!(phba->sli4_hba.pc_sli4_params.wqsize & 13482 LPFC_WQ_SZ128_SUPPORT)) { 13483 status = -ERANGE; 13484 goto out; 13485 } 13486 bf_set(lpfc_mbx_wq_create_wqe_size, 13487 &wq_create->u.request_1, 13488 LPFC_WQ_WQE_SIZE_128); 13489 break; 13490 } 13491 bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1, 13492 (PAGE_SIZE/SLI4_PAGE_SIZE)); 13493 page = wq_create->u.request_1.page; 13494 break; 13495 default: 13496 status = -ERANGE; 13497 goto out; 13498 } 13499 13500 list_for_each_entry(dmabuf, &wq->page_list, list) { 13501 memset(dmabuf->virt, 0, hw_page_size); 13502 page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys); 13503 page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys); 13504 } 13505 13506 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 13507 bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1); 13508 13509 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13510 /* The IOCTL status is embedded in the mailbox subheader. */ 13511 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13512 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13513 if (shdr_status || shdr_add_status || rc) { 13514 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13515 "2503 WQ_CREATE mailbox failed with " 13516 "status x%x add_status x%x, mbx status x%x\n", 13517 shdr_status, shdr_add_status, rc); 13518 status = -ENXIO; 13519 goto out; 13520 } 13521 wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); 13522 if (wq->queue_id == 0xFFFF) { 13523 status = -ENXIO; 13524 goto out; 13525 } 13526 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 13527 wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, 13528 &wq_create->u.response); 13529 if ((wq->db_format != LPFC_DB_LIST_FORMAT) && 13530 (wq->db_format != LPFC_DB_RING_FORMAT)) { 13531 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13532 "3265 WQ[%d] doorbell format not " 13533 "supported: x%x\n", wq->queue_id, 13534 wq->db_format); 13535 status = -EINVAL; 13536 goto out; 13537 } 13538 pci_barset = bf_get(lpfc_mbx_wq_create_bar_set, 13539 &wq_create->u.response); 13540 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 13541 if (!bar_memmap_p) { 13542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13543 "3263 WQ[%d] failed to memmap pci " 13544 "barset:x%x\n", wq->queue_id, 13545 pci_barset); 13546 status = -ENOMEM; 13547 goto out; 13548 } 13549 db_offset = wq_create->u.response.doorbell_offset; 13550 if ((db_offset != LPFC_ULP0_WQ_DOORBELL) && 13551 (db_offset != LPFC_ULP1_WQ_DOORBELL)) { 13552 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13553 "3252 WQ[%d] doorbell offset not " 13554 "supported: x%x\n", wq->queue_id, 13555 db_offset); 13556 status = -EINVAL; 13557 goto out; 13558 } 13559 wq->db_regaddr = bar_memmap_p + db_offset; 13560 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13561 "3264 WQ[%d]: barset:x%x, offset:x%x, " 13562 "format:x%x\n", wq->queue_id, pci_barset, 13563 db_offset, wq->db_format); 13564 } else { 13565 wq->db_format = LPFC_DB_LIST_FORMAT; 13566 wq->db_regaddr = phba->sli4_hba.WQDBregaddr; 13567 } 13568 wq->type = LPFC_WQ; 13569 wq->assoc_qid = cq->queue_id; 13570 wq->subtype = subtype; 13571 wq->host_index = 0; 13572 wq->hba_index = 0; 13573 wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL; 13574 13575 /* link the wq onto the parent cq child list */ 13576 list_add_tail(&wq->list, &cq->child_list); 13577 out: 13578 mempool_free(mbox, phba->mbox_mem_pool); 13579 return status; 13580 } 13581 13582 /** 13583 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ 13584 * @phba: HBA structure that indicates port to create a queue on. 13585 * @rq: The queue structure to use for the receive queue. 13586 * @qno: The associated HBQ number 13587 * 13588 * 13589 * For SLI4 we need to adjust the RQ repost value based on 13590 * the number of buffers that are initially posted to the RQ. 13591 */ 13592 void 13593 lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno) 13594 { 13595 uint32_t cnt; 13596 13597 /* sanity check on queue memory */ 13598 if (!rq) 13599 return; 13600 cnt = lpfc_hbq_defs[qno]->entry_count; 13601 13602 /* Recalc repost for RQs based on buffers initially posted */ 13603 cnt = (cnt >> 3); 13604 if (cnt < LPFC_QUEUE_MIN_REPOST) 13605 cnt = LPFC_QUEUE_MIN_REPOST; 13606 13607 rq->entry_repost = cnt; 13608 } 13609 13610 /** 13611 * lpfc_rq_create - Create a Receive Queue on the HBA 13612 * @phba: HBA structure that indicates port to create a queue on. 13613 * @hrq: The queue structure to use to create the header receive queue. 13614 * @drq: The queue structure to use to create the data receive queue. 13615 * @cq: The completion queue to bind this work queue to. 13616 * 13617 * This function creates a receive buffer queue pair , as detailed in @hrq and 13618 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command 13619 * to the HBA. 13620 * 13621 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq 13622 * struct is used to get the entry count that is necessary to determine the 13623 * number of pages to use for this queue. The @cq is used to indicate which 13624 * completion queue to bind received buffers that are posted to these queues to. 13625 * This function will send the RQ_CREATE mailbox command to the HBA to setup the 13626 * receive queue pair. This function is asynchronous and will wait for the 13627 * mailbox command to finish before continuing. 13628 * 13629 * On success this function will return a zero. If unable to allocate enough 13630 * memory this function will return -ENOMEM. If the queue create mailbox command 13631 * fails this function will return -ENXIO. 13632 **/ 13633 uint32_t 13634 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 13635 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 13636 { 13637 struct lpfc_mbx_rq_create *rq_create; 13638 struct lpfc_dmabuf *dmabuf; 13639 LPFC_MBOXQ_t *mbox; 13640 int rc, length, status = 0; 13641 uint32_t shdr_status, shdr_add_status; 13642 union lpfc_sli4_cfg_shdr *shdr; 13643 uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz; 13644 void __iomem *bar_memmap_p; 13645 uint32_t db_offset; 13646 uint16_t pci_barset; 13647 13648 /* sanity check on queue memory */ 13649 if (!hrq || !drq || !cq) 13650 return -ENODEV; 13651 if (!phba->sli4_hba.pc_sli4_params.supported) 13652 hw_page_size = SLI4_PAGE_SIZE; 13653 13654 if (hrq->entry_count != drq->entry_count) 13655 return -EINVAL; 13656 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 13657 if (!mbox) 13658 return -ENOMEM; 13659 length = (sizeof(struct lpfc_mbx_rq_create) - 13660 sizeof(struct lpfc_sli4_cfg_mhdr)); 13661 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13662 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 13663 length, LPFC_SLI4_MBX_EMBED); 13664 rq_create = &mbox->u.mqe.un.rq_create; 13665 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 13666 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13667 phba->sli4_hba.pc_sli4_params.rqv); 13668 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 13669 bf_set(lpfc_rq_context_rqe_count_1, 13670 &rq_create->u.request.context, 13671 hrq->entry_count); 13672 rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE; 13673 bf_set(lpfc_rq_context_rqe_size, 13674 &rq_create->u.request.context, 13675 LPFC_RQE_SIZE_8); 13676 bf_set(lpfc_rq_context_page_size, 13677 &rq_create->u.request.context, 13678 (PAGE_SIZE/SLI4_PAGE_SIZE)); 13679 } else { 13680 switch (hrq->entry_count) { 13681 default: 13682 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13683 "2535 Unsupported RQ count. (%d)\n", 13684 hrq->entry_count); 13685 if (hrq->entry_count < 512) { 13686 status = -EINVAL; 13687 goto out; 13688 } 13689 /* otherwise default to smallest count (drop through) */ 13690 case 512: 13691 bf_set(lpfc_rq_context_rqe_count, 13692 &rq_create->u.request.context, 13693 LPFC_RQ_RING_SIZE_512); 13694 break; 13695 case 1024: 13696 bf_set(lpfc_rq_context_rqe_count, 13697 &rq_create->u.request.context, 13698 LPFC_RQ_RING_SIZE_1024); 13699 break; 13700 case 2048: 13701 bf_set(lpfc_rq_context_rqe_count, 13702 &rq_create->u.request.context, 13703 LPFC_RQ_RING_SIZE_2048); 13704 break; 13705 case 4096: 13706 bf_set(lpfc_rq_context_rqe_count, 13707 &rq_create->u.request.context, 13708 LPFC_RQ_RING_SIZE_4096); 13709 break; 13710 } 13711 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 13712 LPFC_HDR_BUF_SIZE); 13713 } 13714 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 13715 cq->queue_id); 13716 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 13717 hrq->page_count); 13718 list_for_each_entry(dmabuf, &hrq->page_list, list) { 13719 memset(dmabuf->virt, 0, hw_page_size); 13720 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 13721 putPaddrLow(dmabuf->phys); 13722 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13723 putPaddrHigh(dmabuf->phys); 13724 } 13725 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 13726 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 13727 13728 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13729 /* The IOCTL status is embedded in the mailbox subheader. */ 13730 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13731 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13732 if (shdr_status || shdr_add_status || rc) { 13733 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13734 "2504 RQ_CREATE mailbox failed with " 13735 "status x%x add_status x%x, mbx status x%x\n", 13736 shdr_status, shdr_add_status, rc); 13737 status = -ENXIO; 13738 goto out; 13739 } 13740 hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 13741 if (hrq->queue_id == 0xFFFF) { 13742 status = -ENXIO; 13743 goto out; 13744 } 13745 13746 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) { 13747 hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format, 13748 &rq_create->u.response); 13749 if ((hrq->db_format != LPFC_DB_LIST_FORMAT) && 13750 (hrq->db_format != LPFC_DB_RING_FORMAT)) { 13751 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13752 "3262 RQ [%d] doorbell format not " 13753 "supported: x%x\n", hrq->queue_id, 13754 hrq->db_format); 13755 status = -EINVAL; 13756 goto out; 13757 } 13758 13759 pci_barset = bf_get(lpfc_mbx_rq_create_bar_set, 13760 &rq_create->u.response); 13761 bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset); 13762 if (!bar_memmap_p) { 13763 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13764 "3269 RQ[%d] failed to memmap pci " 13765 "barset:x%x\n", hrq->queue_id, 13766 pci_barset); 13767 status = -ENOMEM; 13768 goto out; 13769 } 13770 13771 db_offset = rq_create->u.response.doorbell_offset; 13772 if ((db_offset != LPFC_ULP0_RQ_DOORBELL) && 13773 (db_offset != LPFC_ULP1_RQ_DOORBELL)) { 13774 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13775 "3270 RQ[%d] doorbell offset not " 13776 "supported: x%x\n", hrq->queue_id, 13777 db_offset); 13778 status = -EINVAL; 13779 goto out; 13780 } 13781 hrq->db_regaddr = bar_memmap_p + db_offset; 13782 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 13783 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " 13784 "format:x%x\n", hrq->queue_id, pci_barset, 13785 db_offset, hrq->db_format); 13786 } else { 13787 hrq->db_format = LPFC_DB_RING_FORMAT; 13788 hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; 13789 } 13790 hrq->type = LPFC_HRQ; 13791 hrq->assoc_qid = cq->queue_id; 13792 hrq->subtype = subtype; 13793 hrq->host_index = 0; 13794 hrq->hba_index = 0; 13795 13796 /* now create the data queue */ 13797 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 13798 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, 13799 length, LPFC_SLI4_MBX_EMBED); 13800 bf_set(lpfc_mbox_hdr_version, &shdr->request, 13801 phba->sli4_hba.pc_sli4_params.rqv); 13802 if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) { 13803 bf_set(lpfc_rq_context_rqe_count_1, 13804 &rq_create->u.request.context, hrq->entry_count); 13805 rq_create->u.request.context.buffer_size = LPFC_DATA_BUF_SIZE; 13806 bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context, 13807 LPFC_RQE_SIZE_8); 13808 bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context, 13809 (PAGE_SIZE/SLI4_PAGE_SIZE)); 13810 } else { 13811 switch (drq->entry_count) { 13812 default: 13813 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 13814 "2536 Unsupported RQ count. (%d)\n", 13815 drq->entry_count); 13816 if (drq->entry_count < 512) { 13817 status = -EINVAL; 13818 goto out; 13819 } 13820 /* otherwise default to smallest count (drop through) */ 13821 case 512: 13822 bf_set(lpfc_rq_context_rqe_count, 13823 &rq_create->u.request.context, 13824 LPFC_RQ_RING_SIZE_512); 13825 break; 13826 case 1024: 13827 bf_set(lpfc_rq_context_rqe_count, 13828 &rq_create->u.request.context, 13829 LPFC_RQ_RING_SIZE_1024); 13830 break; 13831 case 2048: 13832 bf_set(lpfc_rq_context_rqe_count, 13833 &rq_create->u.request.context, 13834 LPFC_RQ_RING_SIZE_2048); 13835 break; 13836 case 4096: 13837 bf_set(lpfc_rq_context_rqe_count, 13838 &rq_create->u.request.context, 13839 LPFC_RQ_RING_SIZE_4096); 13840 break; 13841 } 13842 bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, 13843 LPFC_DATA_BUF_SIZE); 13844 } 13845 bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, 13846 cq->queue_id); 13847 bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, 13848 drq->page_count); 13849 list_for_each_entry(dmabuf, &drq->page_list, list) { 13850 rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = 13851 putPaddrLow(dmabuf->phys); 13852 rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = 13853 putPaddrHigh(dmabuf->phys); 13854 } 13855 if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) 13856 bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1); 13857 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13858 /* The IOCTL status is embedded in the mailbox subheader. */ 13859 shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; 13860 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13861 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13862 if (shdr_status || shdr_add_status || rc) { 13863 status = -ENXIO; 13864 goto out; 13865 } 13866 drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); 13867 if (drq->queue_id == 0xFFFF) { 13868 status = -ENXIO; 13869 goto out; 13870 } 13871 drq->type = LPFC_DRQ; 13872 drq->assoc_qid = cq->queue_id; 13873 drq->subtype = subtype; 13874 drq->host_index = 0; 13875 drq->hba_index = 0; 13876 13877 /* link the header and data RQs onto the parent cq child list */ 13878 list_add_tail(&hrq->list, &cq->child_list); 13879 list_add_tail(&drq->list, &cq->child_list); 13880 13881 out: 13882 mempool_free(mbox, phba->mbox_mem_pool); 13883 return status; 13884 } 13885 13886 /** 13887 * lpfc_eq_destroy - Destroy an event Queue on the HBA 13888 * @eq: The queue structure associated with the queue to destroy. 13889 * 13890 * This function destroys a queue, as detailed in @eq by sending an mailbox 13891 * command, specific to the type of queue, to the HBA. 13892 * 13893 * The @eq struct is used to get the queue ID of the queue to destroy. 13894 * 13895 * On success this function will return a zero. If the queue destroy mailbox 13896 * command fails this function will return -ENXIO. 13897 **/ 13898 uint32_t 13899 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 13900 { 13901 LPFC_MBOXQ_t *mbox; 13902 int rc, length, status = 0; 13903 uint32_t shdr_status, shdr_add_status; 13904 union lpfc_sli4_cfg_shdr *shdr; 13905 13906 /* sanity check on queue memory */ 13907 if (!eq) 13908 return -ENODEV; 13909 mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); 13910 if (!mbox) 13911 return -ENOMEM; 13912 length = (sizeof(struct lpfc_mbx_eq_destroy) - 13913 sizeof(struct lpfc_sli4_cfg_mhdr)); 13914 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13915 LPFC_MBOX_OPCODE_EQ_DESTROY, 13916 length, LPFC_SLI4_MBX_EMBED); 13917 bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, 13918 eq->queue_id); 13919 mbox->vport = eq->phba->pport; 13920 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13921 13922 rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); 13923 /* The IOCTL status is embedded in the mailbox subheader. */ 13924 shdr = (union lpfc_sli4_cfg_shdr *) 13925 &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; 13926 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13927 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13928 if (shdr_status || shdr_add_status || rc) { 13929 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13930 "2505 EQ_DESTROY mailbox failed with " 13931 "status x%x add_status x%x, mbx status x%x\n", 13932 shdr_status, shdr_add_status, rc); 13933 status = -ENXIO; 13934 } 13935 13936 /* Remove eq from any list */ 13937 list_del_init(&eq->list); 13938 mempool_free(mbox, eq->phba->mbox_mem_pool); 13939 return status; 13940 } 13941 13942 /** 13943 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA 13944 * @cq: The queue structure associated with the queue to destroy. 13945 * 13946 * This function destroys a queue, as detailed in @cq by sending an mailbox 13947 * command, specific to the type of queue, to the HBA. 13948 * 13949 * The @cq struct is used to get the queue ID of the queue to destroy. 13950 * 13951 * On success this function will return a zero. If the queue destroy mailbox 13952 * command fails this function will return -ENXIO. 13953 **/ 13954 uint32_t 13955 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 13956 { 13957 LPFC_MBOXQ_t *mbox; 13958 int rc, length, status = 0; 13959 uint32_t shdr_status, shdr_add_status; 13960 union lpfc_sli4_cfg_shdr *shdr; 13961 13962 /* sanity check on queue memory */ 13963 if (!cq) 13964 return -ENODEV; 13965 mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); 13966 if (!mbox) 13967 return -ENOMEM; 13968 length = (sizeof(struct lpfc_mbx_cq_destroy) - 13969 sizeof(struct lpfc_sli4_cfg_mhdr)); 13970 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 13971 LPFC_MBOX_OPCODE_CQ_DESTROY, 13972 length, LPFC_SLI4_MBX_EMBED); 13973 bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, 13974 cq->queue_id); 13975 mbox->vport = cq->phba->pport; 13976 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 13977 rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); 13978 /* The IOCTL status is embedded in the mailbox subheader. */ 13979 shdr = (union lpfc_sli4_cfg_shdr *) 13980 &mbox->u.mqe.un.wq_create.header.cfg_shdr; 13981 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 13982 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 13983 if (shdr_status || shdr_add_status || rc) { 13984 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 13985 "2506 CQ_DESTROY mailbox failed with " 13986 "status x%x add_status x%x, mbx status x%x\n", 13987 shdr_status, shdr_add_status, rc); 13988 status = -ENXIO; 13989 } 13990 /* Remove cq from any list */ 13991 list_del_init(&cq->list); 13992 mempool_free(mbox, cq->phba->mbox_mem_pool); 13993 return status; 13994 } 13995 13996 /** 13997 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA 13998 * @qm: The queue structure associated with the queue to destroy. 13999 * 14000 * This function destroys a queue, as detailed in @mq by sending an mailbox 14001 * command, specific to the type of queue, to the HBA. 14002 * 14003 * The @mq struct is used to get the queue ID of the queue to destroy. 14004 * 14005 * On success this function will return a zero. If the queue destroy mailbox 14006 * command fails this function will return -ENXIO. 14007 **/ 14008 uint32_t 14009 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 14010 { 14011 LPFC_MBOXQ_t *mbox; 14012 int rc, length, status = 0; 14013 uint32_t shdr_status, shdr_add_status; 14014 union lpfc_sli4_cfg_shdr *shdr; 14015 14016 /* sanity check on queue memory */ 14017 if (!mq) 14018 return -ENODEV; 14019 mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); 14020 if (!mbox) 14021 return -ENOMEM; 14022 length = (sizeof(struct lpfc_mbx_mq_destroy) - 14023 sizeof(struct lpfc_sli4_cfg_mhdr)); 14024 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 14025 LPFC_MBOX_OPCODE_MQ_DESTROY, 14026 length, LPFC_SLI4_MBX_EMBED); 14027 bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, 14028 mq->queue_id); 14029 mbox->vport = mq->phba->pport; 14030 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14031 rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); 14032 /* The IOCTL status is embedded in the mailbox subheader. */ 14033 shdr = (union lpfc_sli4_cfg_shdr *) 14034 &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; 14035 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14036 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14037 if (shdr_status || shdr_add_status || rc) { 14038 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14039 "2507 MQ_DESTROY mailbox failed with " 14040 "status x%x add_status x%x, mbx status x%x\n", 14041 shdr_status, shdr_add_status, rc); 14042 status = -ENXIO; 14043 } 14044 /* Remove mq from any list */ 14045 list_del_init(&mq->list); 14046 mempool_free(mbox, mq->phba->mbox_mem_pool); 14047 return status; 14048 } 14049 14050 /** 14051 * lpfc_wq_destroy - Destroy a Work Queue on the HBA 14052 * @wq: The queue structure associated with the queue to destroy. 14053 * 14054 * This function destroys a queue, as detailed in @wq by sending an mailbox 14055 * command, specific to the type of queue, to the HBA. 14056 * 14057 * The @wq struct is used to get the queue ID of the queue to destroy. 14058 * 14059 * On success this function will return a zero. If the queue destroy mailbox 14060 * command fails this function will return -ENXIO. 14061 **/ 14062 uint32_t 14063 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 14064 { 14065 LPFC_MBOXQ_t *mbox; 14066 int rc, length, status = 0; 14067 uint32_t shdr_status, shdr_add_status; 14068 union lpfc_sli4_cfg_shdr *shdr; 14069 14070 /* sanity check on queue memory */ 14071 if (!wq) 14072 return -ENODEV; 14073 mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); 14074 if (!mbox) 14075 return -ENOMEM; 14076 length = (sizeof(struct lpfc_mbx_wq_destroy) - 14077 sizeof(struct lpfc_sli4_cfg_mhdr)); 14078 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14079 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, 14080 length, LPFC_SLI4_MBX_EMBED); 14081 bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, 14082 wq->queue_id); 14083 mbox->vport = wq->phba->pport; 14084 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14085 rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); 14086 shdr = (union lpfc_sli4_cfg_shdr *) 14087 &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; 14088 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14089 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14090 if (shdr_status || shdr_add_status || rc) { 14091 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14092 "2508 WQ_DESTROY mailbox failed with " 14093 "status x%x add_status x%x, mbx status x%x\n", 14094 shdr_status, shdr_add_status, rc); 14095 status = -ENXIO; 14096 } 14097 /* Remove wq from any list */ 14098 list_del_init(&wq->list); 14099 mempool_free(mbox, wq->phba->mbox_mem_pool); 14100 return status; 14101 } 14102 14103 /** 14104 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA 14105 * @rq: The queue structure associated with the queue to destroy. 14106 * 14107 * This function destroys a queue, as detailed in @rq by sending an mailbox 14108 * command, specific to the type of queue, to the HBA. 14109 * 14110 * The @rq struct is used to get the queue ID of the queue to destroy. 14111 * 14112 * On success this function will return a zero. If the queue destroy mailbox 14113 * command fails this function will return -ENXIO. 14114 **/ 14115 uint32_t 14116 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 14117 struct lpfc_queue *drq) 14118 { 14119 LPFC_MBOXQ_t *mbox; 14120 int rc, length, status = 0; 14121 uint32_t shdr_status, shdr_add_status; 14122 union lpfc_sli4_cfg_shdr *shdr; 14123 14124 /* sanity check on queue memory */ 14125 if (!hrq || !drq) 14126 return -ENODEV; 14127 mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); 14128 if (!mbox) 14129 return -ENOMEM; 14130 length = (sizeof(struct lpfc_mbx_rq_destroy) - 14131 sizeof(struct lpfc_sli4_cfg_mhdr)); 14132 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14133 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, 14134 length, LPFC_SLI4_MBX_EMBED); 14135 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 14136 hrq->queue_id); 14137 mbox->vport = hrq->phba->pport; 14138 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 14139 rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); 14140 /* The IOCTL status is embedded in the mailbox subheader. */ 14141 shdr = (union lpfc_sli4_cfg_shdr *) 14142 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 14143 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14144 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14145 if (shdr_status || shdr_add_status || rc) { 14146 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14147 "2509 RQ_DESTROY mailbox failed with " 14148 "status x%x add_status x%x, mbx status x%x\n", 14149 shdr_status, shdr_add_status, rc); 14150 if (rc != MBX_TIMEOUT) 14151 mempool_free(mbox, hrq->phba->mbox_mem_pool); 14152 return -ENXIO; 14153 } 14154 bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, 14155 drq->queue_id); 14156 rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); 14157 shdr = (union lpfc_sli4_cfg_shdr *) 14158 &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; 14159 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14160 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14161 if (shdr_status || shdr_add_status || rc) { 14162 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14163 "2510 RQ_DESTROY mailbox failed with " 14164 "status x%x add_status x%x, mbx status x%x\n", 14165 shdr_status, shdr_add_status, rc); 14166 status = -ENXIO; 14167 } 14168 list_del_init(&hrq->list); 14169 list_del_init(&drq->list); 14170 mempool_free(mbox, hrq->phba->mbox_mem_pool); 14171 return status; 14172 } 14173 14174 /** 14175 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA 14176 * @phba: The virtual port for which this call being executed. 14177 * @pdma_phys_addr0: Physical address of the 1st SGL page. 14178 * @pdma_phys_addr1: Physical address of the 2nd SGL page. 14179 * @xritag: the xritag that ties this io to the SGL pages. 14180 * 14181 * This routine will post the sgl pages for the IO that has the xritag 14182 * that is in the iocbq structure. The xritag is assigned during iocbq 14183 * creation and persists for as long as the driver is loaded. 14184 * if the caller has fewer than 256 scatter gather segments to map then 14185 * pdma_phys_addr1 should be 0. 14186 * If the caller needs to map more than 256 scatter gather segment then 14187 * pdma_phys_addr1 should be a valid physical address. 14188 * physical address for SGLs must be 64 byte aligned. 14189 * If you are going to map 2 SGL's then the first one must have 256 entries 14190 * the second sgl can have between 1 and 256 entries. 14191 * 14192 * Return codes: 14193 * 0 - Success 14194 * -ENXIO, -ENOMEM - Failure 14195 **/ 14196 int 14197 lpfc_sli4_post_sgl(struct lpfc_hba *phba, 14198 dma_addr_t pdma_phys_addr0, 14199 dma_addr_t pdma_phys_addr1, 14200 uint16_t xritag) 14201 { 14202 struct lpfc_mbx_post_sgl_pages *post_sgl_pages; 14203 LPFC_MBOXQ_t *mbox; 14204 int rc; 14205 uint32_t shdr_status, shdr_add_status; 14206 uint32_t mbox_tmo; 14207 union lpfc_sli4_cfg_shdr *shdr; 14208 14209 if (xritag == NO_XRI) { 14210 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14211 "0364 Invalid param:\n"); 14212 return -EINVAL; 14213 } 14214 14215 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14216 if (!mbox) 14217 return -ENOMEM; 14218 14219 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14220 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, 14221 sizeof(struct lpfc_mbx_post_sgl_pages) - 14222 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 14223 14224 post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) 14225 &mbox->u.mqe.un.post_sgl_pages; 14226 bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); 14227 bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); 14228 14229 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = 14230 cpu_to_le32(putPaddrLow(pdma_phys_addr0)); 14231 post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = 14232 cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); 14233 14234 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = 14235 cpu_to_le32(putPaddrLow(pdma_phys_addr1)); 14236 post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = 14237 cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); 14238 if (!phba->sli4_hba.intr_enable) 14239 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14240 else { 14241 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 14242 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 14243 } 14244 /* The IOCTL status is embedded in the mailbox subheader. */ 14245 shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; 14246 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14247 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14248 if (rc != MBX_TIMEOUT) 14249 mempool_free(mbox, phba->mbox_mem_pool); 14250 if (shdr_status || shdr_add_status || rc) { 14251 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14252 "2511 POST_SGL mailbox failed with " 14253 "status x%x add_status x%x, mbx status x%x\n", 14254 shdr_status, shdr_add_status, rc); 14255 rc = -ENXIO; 14256 } 14257 return 0; 14258 } 14259 14260 /** 14261 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range 14262 * @phba: pointer to lpfc hba data structure. 14263 * 14264 * This routine is invoked to post rpi header templates to the 14265 * HBA consistent with the SLI-4 interface spec. This routine 14266 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 14267 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 14268 * 14269 * Returns 14270 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 14271 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 14272 **/ 14273 uint16_t 14274 lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 14275 { 14276 unsigned long xri; 14277 14278 /* 14279 * Fetch the next logical xri. Because this index is logical, 14280 * the driver starts at 0 each time. 14281 */ 14282 spin_lock_irq(&phba->hbalock); 14283 xri = find_next_zero_bit(phba->sli4_hba.xri_bmask, 14284 phba->sli4_hba.max_cfg_param.max_xri, 0); 14285 if (xri >= phba->sli4_hba.max_cfg_param.max_xri) { 14286 spin_unlock_irq(&phba->hbalock); 14287 return NO_XRI; 14288 } else { 14289 set_bit(xri, phba->sli4_hba.xri_bmask); 14290 phba->sli4_hba.max_cfg_param.xri_used++; 14291 } 14292 spin_unlock_irq(&phba->hbalock); 14293 return xri; 14294 } 14295 14296 /** 14297 * lpfc_sli4_free_xri - Release an xri for reuse. 14298 * @phba: pointer to lpfc hba data structure. 14299 * 14300 * This routine is invoked to release an xri to the pool of 14301 * available rpis maintained by the driver. 14302 **/ 14303 void 14304 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 14305 { 14306 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 14307 phba->sli4_hba.max_cfg_param.xri_used--; 14308 } 14309 } 14310 14311 /** 14312 * lpfc_sli4_free_xri - Release an xri for reuse. 14313 * @phba: pointer to lpfc hba data structure. 14314 * 14315 * This routine is invoked to release an xri to the pool of 14316 * available rpis maintained by the driver. 14317 **/ 14318 void 14319 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 14320 { 14321 spin_lock_irq(&phba->hbalock); 14322 __lpfc_sli4_free_xri(phba, xri); 14323 spin_unlock_irq(&phba->hbalock); 14324 } 14325 14326 /** 14327 * lpfc_sli4_next_xritag - Get an xritag for the io 14328 * @phba: Pointer to HBA context object. 14329 * 14330 * This function gets an xritag for the iocb. If there is no unused xritag 14331 * it will return 0xffff. 14332 * The function returns the allocated xritag if successful, else returns zero. 14333 * Zero is not a valid xritag. 14334 * The caller is not required to hold any lock. 14335 **/ 14336 uint16_t 14337 lpfc_sli4_next_xritag(struct lpfc_hba *phba) 14338 { 14339 uint16_t xri_index; 14340 14341 xri_index = lpfc_sli4_alloc_xri(phba); 14342 if (xri_index == NO_XRI) 14343 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 14344 "2004 Failed to allocate XRI.last XRITAG is %d" 14345 " Max XRI is %d, Used XRI is %d\n", 14346 xri_index, 14347 phba->sli4_hba.max_cfg_param.max_xri, 14348 phba->sli4_hba.max_cfg_param.xri_used); 14349 return xri_index; 14350 } 14351 14352 /** 14353 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port. 14354 * @phba: pointer to lpfc hba data structure. 14355 * @post_sgl_list: pointer to els sgl entry list. 14356 * @count: number of els sgl entries on the list. 14357 * 14358 * This routine is invoked to post a block of driver's sgl pages to the 14359 * HBA using non-embedded mailbox command. No Lock is held. This routine 14360 * is only called when the driver is loading and after all IO has been 14361 * stopped. 14362 **/ 14363 static int 14364 lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba, 14365 struct list_head *post_sgl_list, 14366 int post_cnt) 14367 { 14368 struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; 14369 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 14370 struct sgl_page_pairs *sgl_pg_pairs; 14371 void *viraddr; 14372 LPFC_MBOXQ_t *mbox; 14373 uint32_t reqlen, alloclen, pg_pairs; 14374 uint32_t mbox_tmo; 14375 uint16_t xritag_start = 0; 14376 int rc = 0; 14377 uint32_t shdr_status, shdr_add_status; 14378 union lpfc_sli4_cfg_shdr *shdr; 14379 14380 reqlen = phba->sli4_hba.els_xri_cnt * sizeof(struct sgl_page_pairs) + 14381 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 14382 if (reqlen > SLI4_PAGE_SIZE) { 14383 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 14384 "2559 Block sgl registration required DMA " 14385 "size (%d) great than a page\n", reqlen); 14386 return -ENOMEM; 14387 } 14388 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14389 if (!mbox) 14390 return -ENOMEM; 14391 14392 /* Allocate DMA memory and set up the non-embedded mailbox command */ 14393 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14394 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 14395 LPFC_SLI4_MBX_NEMBED); 14396 14397 if (alloclen < reqlen) { 14398 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14399 "0285 Allocated DMA memory size (%d) is " 14400 "less than the requested DMA memory " 14401 "size (%d)\n", alloclen, reqlen); 14402 lpfc_sli4_mbox_cmd_free(phba, mbox); 14403 return -ENOMEM; 14404 } 14405 /* Set up the SGL pages in the non-embedded DMA pages */ 14406 viraddr = mbox->sge_array->addr[0]; 14407 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 14408 sgl_pg_pairs = &sgl->sgl_pg_pairs; 14409 14410 pg_pairs = 0; 14411 list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) { 14412 /* Set up the sge entry */ 14413 sgl_pg_pairs->sgl_pg0_addr_lo = 14414 cpu_to_le32(putPaddrLow(sglq_entry->phys)); 14415 sgl_pg_pairs->sgl_pg0_addr_hi = 14416 cpu_to_le32(putPaddrHigh(sglq_entry->phys)); 14417 sgl_pg_pairs->sgl_pg1_addr_lo = 14418 cpu_to_le32(putPaddrLow(0)); 14419 sgl_pg_pairs->sgl_pg1_addr_hi = 14420 cpu_to_le32(putPaddrHigh(0)); 14421 14422 /* Keep the first xritag on the list */ 14423 if (pg_pairs == 0) 14424 xritag_start = sglq_entry->sli4_xritag; 14425 sgl_pg_pairs++; 14426 pg_pairs++; 14427 } 14428 14429 /* Complete initialization and perform endian conversion. */ 14430 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 14431 bf_set(lpfc_post_sgl_pages_xricnt, sgl, phba->sli4_hba.els_xri_cnt); 14432 sgl->word0 = cpu_to_le32(sgl->word0); 14433 if (!phba->sli4_hba.intr_enable) 14434 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14435 else { 14436 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 14437 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 14438 } 14439 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 14440 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14441 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14442 if (rc != MBX_TIMEOUT) 14443 lpfc_sli4_mbox_cmd_free(phba, mbox); 14444 if (shdr_status || shdr_add_status || rc) { 14445 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14446 "2513 POST_SGL_BLOCK mailbox command failed " 14447 "status x%x add_status x%x mbx status x%x\n", 14448 shdr_status, shdr_add_status, rc); 14449 rc = -ENXIO; 14450 } 14451 return rc; 14452 } 14453 14454 /** 14455 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware 14456 * @phba: pointer to lpfc hba data structure. 14457 * @sblist: pointer to scsi buffer list. 14458 * @count: number of scsi buffers on the list. 14459 * 14460 * This routine is invoked to post a block of @count scsi sgl pages from a 14461 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. 14462 * No Lock is held. 14463 * 14464 **/ 14465 int 14466 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, 14467 struct list_head *sblist, 14468 int count) 14469 { 14470 struct lpfc_scsi_buf *psb; 14471 struct lpfc_mbx_post_uembed_sgl_page1 *sgl; 14472 struct sgl_page_pairs *sgl_pg_pairs; 14473 void *viraddr; 14474 LPFC_MBOXQ_t *mbox; 14475 uint32_t reqlen, alloclen, pg_pairs; 14476 uint32_t mbox_tmo; 14477 uint16_t xritag_start = 0; 14478 int rc = 0; 14479 uint32_t shdr_status, shdr_add_status; 14480 dma_addr_t pdma_phys_bpl1; 14481 union lpfc_sli4_cfg_shdr *shdr; 14482 14483 /* Calculate the requested length of the dma memory */ 14484 reqlen = count * sizeof(struct sgl_page_pairs) + 14485 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); 14486 if (reqlen > SLI4_PAGE_SIZE) { 14487 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 14488 "0217 Block sgl registration required DMA " 14489 "size (%d) great than a page\n", reqlen); 14490 return -ENOMEM; 14491 } 14492 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 14493 if (!mbox) { 14494 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14495 "0283 Failed to allocate mbox cmd memory\n"); 14496 return -ENOMEM; 14497 } 14498 14499 /* Allocate DMA memory and set up the non-embedded mailbox command */ 14500 alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 14501 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, 14502 LPFC_SLI4_MBX_NEMBED); 14503 14504 if (alloclen < reqlen) { 14505 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 14506 "2561 Allocated DMA memory size (%d) is " 14507 "less than the requested DMA memory " 14508 "size (%d)\n", alloclen, reqlen); 14509 lpfc_sli4_mbox_cmd_free(phba, mbox); 14510 return -ENOMEM; 14511 } 14512 14513 /* Get the first SGE entry from the non-embedded DMA memory */ 14514 viraddr = mbox->sge_array->addr[0]; 14515 14516 /* Set up the SGL pages in the non-embedded DMA pages */ 14517 sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; 14518 sgl_pg_pairs = &sgl->sgl_pg_pairs; 14519 14520 pg_pairs = 0; 14521 list_for_each_entry(psb, sblist, list) { 14522 /* Set up the sge entry */ 14523 sgl_pg_pairs->sgl_pg0_addr_lo = 14524 cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); 14525 sgl_pg_pairs->sgl_pg0_addr_hi = 14526 cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); 14527 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) 14528 pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; 14529 else 14530 pdma_phys_bpl1 = 0; 14531 sgl_pg_pairs->sgl_pg1_addr_lo = 14532 cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); 14533 sgl_pg_pairs->sgl_pg1_addr_hi = 14534 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); 14535 /* Keep the first xritag on the list */ 14536 if (pg_pairs == 0) 14537 xritag_start = psb->cur_iocbq.sli4_xritag; 14538 sgl_pg_pairs++; 14539 pg_pairs++; 14540 } 14541 bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); 14542 bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); 14543 /* Perform endian conversion if necessary */ 14544 sgl->word0 = cpu_to_le32(sgl->word0); 14545 14546 if (!phba->sli4_hba.intr_enable) 14547 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 14548 else { 14549 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 14550 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 14551 } 14552 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 14553 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 14554 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 14555 if (rc != MBX_TIMEOUT) 14556 lpfc_sli4_mbox_cmd_free(phba, mbox); 14557 if (shdr_status || shdr_add_status || rc) { 14558 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14559 "2564 POST_SGL_BLOCK mailbox command failed " 14560 "status x%x add_status x%x mbx status x%x\n", 14561 shdr_status, shdr_add_status, rc); 14562 rc = -ENXIO; 14563 } 14564 return rc; 14565 } 14566 14567 /** 14568 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle 14569 * @phba: pointer to lpfc_hba struct that the frame was received on 14570 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 14571 * 14572 * This function checks the fields in the @fc_hdr to see if the FC frame is a 14573 * valid type of frame that the LPFC driver will handle. This function will 14574 * return a zero if the frame is a valid frame or a non zero value when the 14575 * frame does not pass the check. 14576 **/ 14577 static int 14578 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) 14579 { 14580 /* make rctl_names static to save stack space */ 14581 static char *rctl_names[] = FC_RCTL_NAMES_INIT; 14582 char *type_names[] = FC_TYPE_NAMES_INIT; 14583 struct fc_vft_header *fc_vft_hdr; 14584 uint32_t *header = (uint32_t *) fc_hdr; 14585 14586 switch (fc_hdr->fh_r_ctl) { 14587 case FC_RCTL_DD_UNCAT: /* uncategorized information */ 14588 case FC_RCTL_DD_SOL_DATA: /* solicited data */ 14589 case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ 14590 case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ 14591 case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ 14592 case FC_RCTL_DD_DATA_DESC: /* data descriptor */ 14593 case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ 14594 case FC_RCTL_DD_CMD_STATUS: /* command status */ 14595 case FC_RCTL_ELS_REQ: /* extended link services request */ 14596 case FC_RCTL_ELS_REP: /* extended link services reply */ 14597 case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ 14598 case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ 14599 case FC_RCTL_BA_NOP: /* basic link service NOP */ 14600 case FC_RCTL_BA_ABTS: /* basic link service abort */ 14601 case FC_RCTL_BA_RMC: /* remove connection */ 14602 case FC_RCTL_BA_ACC: /* basic accept */ 14603 case FC_RCTL_BA_RJT: /* basic reject */ 14604 case FC_RCTL_BA_PRMT: 14605 case FC_RCTL_ACK_1: /* acknowledge_1 */ 14606 case FC_RCTL_ACK_0: /* acknowledge_0 */ 14607 case FC_RCTL_P_RJT: /* port reject */ 14608 case FC_RCTL_F_RJT: /* fabric reject */ 14609 case FC_RCTL_P_BSY: /* port busy */ 14610 case FC_RCTL_F_BSY: /* fabric busy to data frame */ 14611 case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ 14612 case FC_RCTL_LCR: /* link credit reset */ 14613 case FC_RCTL_END: /* end */ 14614 break; 14615 case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ 14616 fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 14617 fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; 14618 return lpfc_fc_frame_check(phba, fc_hdr); 14619 default: 14620 goto drop; 14621 } 14622 switch (fc_hdr->fh_type) { 14623 case FC_TYPE_BLS: 14624 case FC_TYPE_ELS: 14625 case FC_TYPE_FCP: 14626 case FC_TYPE_CT: 14627 break; 14628 case FC_TYPE_IP: 14629 case FC_TYPE_ILS: 14630 default: 14631 goto drop; 14632 } 14633 14634 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 14635 "2538 Received frame rctl:%s (x%x), type:%s (x%x), " 14636 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", 14637 rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, 14638 type_names[fc_hdr->fh_type], fc_hdr->fh_type, 14639 be32_to_cpu(header[0]), be32_to_cpu(header[1]), 14640 be32_to_cpu(header[2]), be32_to_cpu(header[3]), 14641 be32_to_cpu(header[4]), be32_to_cpu(header[5]), 14642 be32_to_cpu(header[6])); 14643 return 0; 14644 drop: 14645 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, 14646 "2539 Dropped frame rctl:%s type:%s\n", 14647 rctl_names[fc_hdr->fh_r_ctl], 14648 type_names[fc_hdr->fh_type]); 14649 return 1; 14650 } 14651 14652 /** 14653 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame 14654 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 14655 * 14656 * This function processes the FC header to retrieve the VFI from the VF 14657 * header, if one exists. This function will return the VFI if one exists 14658 * or 0 if no VSAN Header exists. 14659 **/ 14660 static uint32_t 14661 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) 14662 { 14663 struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; 14664 14665 if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) 14666 return 0; 14667 return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); 14668 } 14669 14670 /** 14671 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to 14672 * @phba: Pointer to the HBA structure to search for the vport on 14673 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) 14674 * @fcfi: The FC Fabric ID that the frame came from 14675 * 14676 * This function searches the @phba for a vport that matches the content of the 14677 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the 14678 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function 14679 * returns the matching vport pointer or NULL if unable to match frame to a 14680 * vport. 14681 **/ 14682 static struct lpfc_vport * 14683 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, 14684 uint16_t fcfi) 14685 { 14686 struct lpfc_vport **vports; 14687 struct lpfc_vport *vport = NULL; 14688 int i; 14689 uint32_t did = (fc_hdr->fh_d_id[0] << 16 | 14690 fc_hdr->fh_d_id[1] << 8 | 14691 fc_hdr->fh_d_id[2]); 14692 14693 if (did == Fabric_DID) 14694 return phba->pport; 14695 if ((phba->pport->fc_flag & FC_PT2PT) && 14696 !(phba->link_state == LPFC_HBA_READY)) 14697 return phba->pport; 14698 14699 vports = lpfc_create_vport_work_array(phba); 14700 if (vports != NULL) 14701 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 14702 if (phba->fcf.fcfi == fcfi && 14703 vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && 14704 vports[i]->fc_myDID == did) { 14705 vport = vports[i]; 14706 break; 14707 } 14708 } 14709 lpfc_destroy_vport_work_array(phba, vports); 14710 return vport; 14711 } 14712 14713 /** 14714 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp 14715 * @vport: The vport to work on. 14716 * 14717 * This function updates the receive sequence time stamp for this vport. The 14718 * receive sequence time stamp indicates the time that the last frame of the 14719 * the sequence that has been idle for the longest amount of time was received. 14720 * the driver uses this time stamp to indicate if any received sequences have 14721 * timed out. 14722 **/ 14723 void 14724 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 14725 { 14726 struct lpfc_dmabuf *h_buf; 14727 struct hbq_dmabuf *dmabuf = NULL; 14728 14729 /* get the oldest sequence on the rcv list */ 14730 h_buf = list_get_first(&vport->rcv_buffer_list, 14731 struct lpfc_dmabuf, list); 14732 if (!h_buf) 14733 return; 14734 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14735 vport->rcv_buffer_time_stamp = dmabuf->time_stamp; 14736 } 14737 14738 /** 14739 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences. 14740 * @vport: The vport that the received sequences were sent to. 14741 * 14742 * This function cleans up all outstanding received sequences. This is called 14743 * by the driver when a link event or user action invalidates all the received 14744 * sequences. 14745 **/ 14746 void 14747 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport) 14748 { 14749 struct lpfc_dmabuf *h_buf, *hnext; 14750 struct lpfc_dmabuf *d_buf, *dnext; 14751 struct hbq_dmabuf *dmabuf = NULL; 14752 14753 /* start with the oldest sequence on the rcv list */ 14754 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 14755 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14756 list_del_init(&dmabuf->hbuf.list); 14757 list_for_each_entry_safe(d_buf, dnext, 14758 &dmabuf->dbuf.list, list) { 14759 list_del_init(&d_buf->list); 14760 lpfc_in_buf_free(vport->phba, d_buf); 14761 } 14762 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 14763 } 14764 } 14765 14766 /** 14767 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences. 14768 * @vport: The vport that the received sequences were sent to. 14769 * 14770 * This function determines whether any received sequences have timed out by 14771 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp 14772 * indicates that there is at least one timed out sequence this routine will 14773 * go through the received sequences one at a time from most inactive to most 14774 * active to determine which ones need to be cleaned up. Once it has determined 14775 * that a sequence needs to be cleaned up it will simply free up the resources 14776 * without sending an abort. 14777 **/ 14778 void 14779 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport) 14780 { 14781 struct lpfc_dmabuf *h_buf, *hnext; 14782 struct lpfc_dmabuf *d_buf, *dnext; 14783 struct hbq_dmabuf *dmabuf = NULL; 14784 unsigned long timeout; 14785 int abort_count = 0; 14786 14787 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 14788 vport->rcv_buffer_time_stamp); 14789 if (list_empty(&vport->rcv_buffer_list) || 14790 time_before(jiffies, timeout)) 14791 return; 14792 /* start with the oldest sequence on the rcv list */ 14793 list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) { 14794 dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14795 timeout = (msecs_to_jiffies(vport->phba->fc_edtov) + 14796 dmabuf->time_stamp); 14797 if (time_before(jiffies, timeout)) 14798 break; 14799 abort_count++; 14800 list_del_init(&dmabuf->hbuf.list); 14801 list_for_each_entry_safe(d_buf, dnext, 14802 &dmabuf->dbuf.list, list) { 14803 list_del_init(&d_buf->list); 14804 lpfc_in_buf_free(vport->phba, d_buf); 14805 } 14806 lpfc_in_buf_free(vport->phba, &dmabuf->dbuf); 14807 } 14808 if (abort_count) 14809 lpfc_update_rcv_time_stamp(vport); 14810 } 14811 14812 /** 14813 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences 14814 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame 14815 * 14816 * This function searches through the existing incomplete sequences that have 14817 * been sent to this @vport. If the frame matches one of the incomplete 14818 * sequences then the dbuf in the @dmabuf is added to the list of frames that 14819 * make up that sequence. If no sequence is found that matches this frame then 14820 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list 14821 * This function returns a pointer to the first dmabuf in the sequence list that 14822 * the frame was linked to. 14823 **/ 14824 static struct hbq_dmabuf * 14825 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 14826 { 14827 struct fc_frame_header *new_hdr; 14828 struct fc_frame_header *temp_hdr; 14829 struct lpfc_dmabuf *d_buf; 14830 struct lpfc_dmabuf *h_buf; 14831 struct hbq_dmabuf *seq_dmabuf = NULL; 14832 struct hbq_dmabuf *temp_dmabuf = NULL; 14833 14834 INIT_LIST_HEAD(&dmabuf->dbuf.list); 14835 dmabuf->time_stamp = jiffies; 14836 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14837 /* Use the hdr_buf to find the sequence that this frame belongs to */ 14838 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 14839 temp_hdr = (struct fc_frame_header *)h_buf->virt; 14840 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 14841 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 14842 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 14843 continue; 14844 /* found a pending sequence that matches this frame */ 14845 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14846 break; 14847 } 14848 if (!seq_dmabuf) { 14849 /* 14850 * This indicates first frame received for this sequence. 14851 * Queue the buffer on the vport's rcv_buffer_list. 14852 */ 14853 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 14854 lpfc_update_rcv_time_stamp(vport); 14855 return dmabuf; 14856 } 14857 temp_hdr = seq_dmabuf->hbuf.virt; 14858 if (be16_to_cpu(new_hdr->fh_seq_cnt) < 14859 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 14860 list_del_init(&seq_dmabuf->hbuf.list); 14861 list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); 14862 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 14863 lpfc_update_rcv_time_stamp(vport); 14864 return dmabuf; 14865 } 14866 /* move this sequence to the tail to indicate a young sequence */ 14867 list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list); 14868 seq_dmabuf->time_stamp = jiffies; 14869 lpfc_update_rcv_time_stamp(vport); 14870 if (list_empty(&seq_dmabuf->dbuf.list)) { 14871 temp_hdr = dmabuf->hbuf.virt; 14872 list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list); 14873 return seq_dmabuf; 14874 } 14875 /* find the correct place in the sequence to insert this frame */ 14876 list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { 14877 temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 14878 temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; 14879 /* 14880 * If the frame's sequence count is greater than the frame on 14881 * the list then insert the frame right after this frame 14882 */ 14883 if (be16_to_cpu(new_hdr->fh_seq_cnt) > 14884 be16_to_cpu(temp_hdr->fh_seq_cnt)) { 14885 list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); 14886 return seq_dmabuf; 14887 } 14888 } 14889 return NULL; 14890 } 14891 14892 /** 14893 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence 14894 * @vport: pointer to a vitural port 14895 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14896 * 14897 * This function tries to abort from the partially assembed sequence, described 14898 * by the information from basic abbort @dmabuf. It checks to see whether such 14899 * partially assembled sequence held by the driver. If so, it shall free up all 14900 * the frames from the partially assembled sequence. 14901 * 14902 * Return 14903 * true -- if there is matching partially assembled sequence present and all 14904 * the frames freed with the sequence; 14905 * false -- if there is no matching partially assembled sequence present so 14906 * nothing got aborted in the lower layer driver 14907 **/ 14908 static bool 14909 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport, 14910 struct hbq_dmabuf *dmabuf) 14911 { 14912 struct fc_frame_header *new_hdr; 14913 struct fc_frame_header *temp_hdr; 14914 struct lpfc_dmabuf *d_buf, *n_buf, *h_buf; 14915 struct hbq_dmabuf *seq_dmabuf = NULL; 14916 14917 /* Use the hdr_buf to find the sequence that matches this frame */ 14918 INIT_LIST_HEAD(&dmabuf->dbuf.list); 14919 INIT_LIST_HEAD(&dmabuf->hbuf.list); 14920 new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 14921 list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { 14922 temp_hdr = (struct fc_frame_header *)h_buf->virt; 14923 if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || 14924 (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || 14925 (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) 14926 continue; 14927 /* found a pending sequence that matches this frame */ 14928 seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); 14929 break; 14930 } 14931 14932 /* Free up all the frames from the partially assembled sequence */ 14933 if (seq_dmabuf) { 14934 list_for_each_entry_safe(d_buf, n_buf, 14935 &seq_dmabuf->dbuf.list, list) { 14936 list_del_init(&d_buf->list); 14937 lpfc_in_buf_free(vport->phba, d_buf); 14938 } 14939 return true; 14940 } 14941 return false; 14942 } 14943 14944 /** 14945 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp 14946 * @vport: pointer to a vitural port 14947 * @dmabuf: pointer to a dmabuf that describes the FC sequence 14948 * 14949 * This function tries to abort from the assembed sequence from upper level 14950 * protocol, described by the information from basic abbort @dmabuf. It 14951 * checks to see whether such pending context exists at upper level protocol. 14952 * If so, it shall clean up the pending context. 14953 * 14954 * Return 14955 * true -- if there is matching pending context of the sequence cleaned 14956 * at ulp; 14957 * false -- if there is no matching pending context of the sequence present 14958 * at ulp. 14959 **/ 14960 static bool 14961 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) 14962 { 14963 struct lpfc_hba *phba = vport->phba; 14964 int handled; 14965 14966 /* Accepting abort at ulp with SLI4 only */ 14967 if (phba->sli_rev < LPFC_SLI_REV4) 14968 return false; 14969 14970 /* Register all caring upper level protocols to attend abort */ 14971 handled = lpfc_ct_handle_unsol_abort(phba, dmabuf); 14972 if (handled) 14973 return true; 14974 14975 return false; 14976 } 14977 14978 /** 14979 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler 14980 * @phba: Pointer to HBA context object. 14981 * @cmd_iocbq: pointer to the command iocbq structure. 14982 * @rsp_iocbq: pointer to the response iocbq structure. 14983 * 14984 * This function handles the sequence abort response iocb command complete 14985 * event. It properly releases the memory allocated to the sequence abort 14986 * accept iocb. 14987 **/ 14988 static void 14989 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba, 14990 struct lpfc_iocbq *cmd_iocbq, 14991 struct lpfc_iocbq *rsp_iocbq) 14992 { 14993 struct lpfc_nodelist *ndlp; 14994 14995 if (cmd_iocbq) { 14996 ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1; 14997 lpfc_nlp_put(ndlp); 14998 lpfc_nlp_not_used(ndlp); 14999 lpfc_sli_release_iocbq(phba, cmd_iocbq); 15000 } 15001 15002 /* Failure means BLS ABORT RSP did not get delivered to remote node*/ 15003 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus) 15004 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15005 "3154 BLS ABORT RSP failed, data: x%x/x%x\n", 15006 rsp_iocbq->iocb.ulpStatus, 15007 rsp_iocbq->iocb.un.ulpWord[4]); 15008 } 15009 15010 /** 15011 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver. 15012 * @phba: Pointer to HBA context object. 15013 * @xri: xri id in transaction. 15014 * 15015 * This function validates the xri maps to the known range of XRIs allocated an 15016 * used by the driver. 15017 **/ 15018 uint16_t 15019 lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 15020 uint16_t xri) 15021 { 15022 int i; 15023 15024 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 15025 if (xri == phba->sli4_hba.xri_ids[i]) 15026 return i; 15027 } 15028 return NO_XRI; 15029 } 15030 15031 /** 15032 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort 15033 * @phba: Pointer to HBA context object. 15034 * @fc_hdr: pointer to a FC frame header. 15035 * 15036 * This function sends a basic response to a previous unsol sequence abort 15037 * event after aborting the sequence handling. 15038 **/ 15039 static void 15040 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, 15041 struct fc_frame_header *fc_hdr, bool aborted) 15042 { 15043 struct lpfc_hba *phba = vport->phba; 15044 struct lpfc_iocbq *ctiocb = NULL; 15045 struct lpfc_nodelist *ndlp; 15046 uint16_t oxid, rxid, xri, lxri; 15047 uint32_t sid, fctl; 15048 IOCB_t *icmd; 15049 int rc; 15050 15051 if (!lpfc_is_link_up(phba)) 15052 return; 15053 15054 sid = sli4_sid_from_fc_hdr(fc_hdr); 15055 oxid = be16_to_cpu(fc_hdr->fh_ox_id); 15056 rxid = be16_to_cpu(fc_hdr->fh_rx_id); 15057 15058 ndlp = lpfc_findnode_did(vport, sid); 15059 if (!ndlp) { 15060 ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL); 15061 if (!ndlp) { 15062 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 15063 "1268 Failed to allocate ndlp for " 15064 "oxid:x%x SID:x%x\n", oxid, sid); 15065 return; 15066 } 15067 lpfc_nlp_init(vport, ndlp, sid); 15068 /* Put ndlp onto pport node list */ 15069 lpfc_enqueue_node(vport, ndlp); 15070 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 15071 /* re-setup ndlp without removing from node list */ 15072 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); 15073 if (!ndlp) { 15074 lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, 15075 "3275 Failed to active ndlp found " 15076 "for oxid:x%x SID:x%x\n", oxid, sid); 15077 return; 15078 } 15079 } 15080 15081 /* Allocate buffer for rsp iocb */ 15082 ctiocb = lpfc_sli_get_iocbq(phba); 15083 if (!ctiocb) 15084 return; 15085 15086 /* Extract the F_CTL field from FC_HDR */ 15087 fctl = sli4_fctl_from_fc_hdr(fc_hdr); 15088 15089 icmd = &ctiocb->iocb; 15090 icmd->un.xseq64.bdl.bdeSize = 0; 15091 icmd->un.xseq64.bdl.ulpIoTag32 = 0; 15092 icmd->un.xseq64.w5.hcsw.Dfctl = 0; 15093 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC; 15094 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS; 15095 15096 /* Fill in the rest of iocb fields */ 15097 icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX; 15098 icmd->ulpBdeCount = 0; 15099 icmd->ulpLe = 1; 15100 icmd->ulpClass = CLASS3; 15101 icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 15102 ctiocb->context1 = lpfc_nlp_get(ndlp); 15103 15104 ctiocb->iocb_cmpl = NULL; 15105 ctiocb->vport = phba->pport; 15106 ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; 15107 ctiocb->sli4_lxritag = NO_XRI; 15108 ctiocb->sli4_xritag = NO_XRI; 15109 15110 if (fctl & FC_FC_EX_CTX) 15111 /* Exchange responder sent the abort so we 15112 * own the oxid. 15113 */ 15114 xri = oxid; 15115 else 15116 xri = rxid; 15117 lxri = lpfc_sli4_xri_inrange(phba, xri); 15118 if (lxri != NO_XRI) 15119 lpfc_set_rrq_active(phba, ndlp, lxri, 15120 (xri == oxid) ? rxid : oxid, 0); 15121 /* For BA_ABTS from exchange responder, if the logical xri with 15122 * the oxid maps to the FCP XRI range, the port no longer has 15123 * that exchange context, send a BLS_RJT. Override the IOCB for 15124 * a BA_RJT. 15125 */ 15126 if ((fctl & FC_FC_EX_CTX) && 15127 (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) { 15128 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 15129 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 15130 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 15131 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 15132 } 15133 15134 /* If BA_ABTS failed to abort a partially assembled receive sequence, 15135 * the driver no longer has that exchange, send a BLS_RJT. Override 15136 * the IOCB for a BA_RJT. 15137 */ 15138 if (aborted == false) { 15139 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT; 15140 bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0); 15141 bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID); 15142 bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE); 15143 } 15144 15145 if (fctl & FC_FC_EX_CTX) { 15146 /* ABTS sent by responder to CT exchange, construction 15147 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG 15148 * field and RX_ID from ABTS for RX_ID field. 15149 */ 15150 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP); 15151 } else { 15152 /* ABTS sent by initiator to CT exchange, construction 15153 * of BA_ACC will need to allocate a new XRI as for the 15154 * XRI_TAG field. 15155 */ 15156 bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT); 15157 } 15158 bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid); 15159 bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid); 15160 15161 /* Xmit CT abts response on exchange <xid> */ 15162 lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, 15163 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n", 15164 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state); 15165 15166 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); 15167 if (rc == IOCB_ERROR) { 15168 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 15169 "2925 Failed to issue CT ABTS RSP x%x on " 15170 "xri x%x, Data x%x\n", 15171 icmd->un.xseq64.w5.hcsw.Rctl, oxid, 15172 phba->link_state); 15173 lpfc_nlp_put(ndlp); 15174 ctiocb->context1 = NULL; 15175 lpfc_sli_release_iocbq(phba, ctiocb); 15176 } 15177 } 15178 15179 /** 15180 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event 15181 * @vport: Pointer to the vport on which this sequence was received 15182 * @dmabuf: pointer to a dmabuf that describes the FC sequence 15183 * 15184 * This function handles an SLI-4 unsolicited abort event. If the unsolicited 15185 * receive sequence is only partially assembed by the driver, it shall abort 15186 * the partially assembled frames for the sequence. Otherwise, if the 15187 * unsolicited receive sequence has been completely assembled and passed to 15188 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the 15189 * unsolicited sequence has been aborted. After that, it will issue a basic 15190 * accept to accept the abort. 15191 **/ 15192 void 15193 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 15194 struct hbq_dmabuf *dmabuf) 15195 { 15196 struct lpfc_hba *phba = vport->phba; 15197 struct fc_frame_header fc_hdr; 15198 uint32_t fctl; 15199 bool aborted; 15200 15201 /* Make a copy of fc_hdr before the dmabuf being released */ 15202 memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header)); 15203 fctl = sli4_fctl_from_fc_hdr(&fc_hdr); 15204 15205 if (fctl & FC_FC_EX_CTX) { 15206 /* ABTS by responder to exchange, no cleanup needed */ 15207 aborted = true; 15208 } else { 15209 /* ABTS by initiator to exchange, need to do cleanup */ 15210 aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf); 15211 if (aborted == false) 15212 aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf); 15213 } 15214 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15215 15216 /* Respond with BA_ACC or BA_RJT accordingly */ 15217 lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted); 15218 } 15219 15220 /** 15221 * lpfc_seq_complete - Indicates if a sequence is complete 15222 * @dmabuf: pointer to a dmabuf that describes the FC sequence 15223 * 15224 * This function checks the sequence, starting with the frame described by 15225 * @dmabuf, to see if all the frames associated with this sequence are present. 15226 * the frames associated with this sequence are linked to the @dmabuf using the 15227 * dbuf list. This function looks for two major things. 1) That the first frame 15228 * has a sequence count of zero. 2) There is a frame with last frame of sequence 15229 * set. 3) That there are no holes in the sequence count. The function will 15230 * return 1 when the sequence is complete, otherwise it will return 0. 15231 **/ 15232 static int 15233 lpfc_seq_complete(struct hbq_dmabuf *dmabuf) 15234 { 15235 struct fc_frame_header *hdr; 15236 struct lpfc_dmabuf *d_buf; 15237 struct hbq_dmabuf *seq_dmabuf; 15238 uint32_t fctl; 15239 int seq_count = 0; 15240 15241 hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 15242 /* make sure first fame of sequence has a sequence count of zero */ 15243 if (hdr->fh_seq_cnt != seq_count) 15244 return 0; 15245 fctl = (hdr->fh_f_ctl[0] << 16 | 15246 hdr->fh_f_ctl[1] << 8 | 15247 hdr->fh_f_ctl[2]); 15248 /* If last frame of sequence we can return success. */ 15249 if (fctl & FC_FC_END_SEQ) 15250 return 1; 15251 list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { 15252 seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); 15253 hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 15254 /* If there is a hole in the sequence count then fail. */ 15255 if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt)) 15256 return 0; 15257 fctl = (hdr->fh_f_ctl[0] << 16 | 15258 hdr->fh_f_ctl[1] << 8 | 15259 hdr->fh_f_ctl[2]); 15260 /* If last frame of sequence we can return success. */ 15261 if (fctl & FC_FC_END_SEQ) 15262 return 1; 15263 } 15264 return 0; 15265 } 15266 15267 /** 15268 * lpfc_prep_seq - Prep sequence for ULP processing 15269 * @vport: Pointer to the vport on which this sequence was received 15270 * @dmabuf: pointer to a dmabuf that describes the FC sequence 15271 * 15272 * This function takes a sequence, described by a list of frames, and creates 15273 * a list of iocbq structures to describe the sequence. This iocbq list will be 15274 * used to issue to the generic unsolicited sequence handler. This routine 15275 * returns a pointer to the first iocbq in the list. If the function is unable 15276 * to allocate an iocbq then it throw out the received frames that were not 15277 * able to be described and return a pointer to the first iocbq. If unable to 15278 * allocate any iocbqs (including the first) this function will return NULL. 15279 **/ 15280 static struct lpfc_iocbq * 15281 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) 15282 { 15283 struct hbq_dmabuf *hbq_buf; 15284 struct lpfc_dmabuf *d_buf, *n_buf; 15285 struct lpfc_iocbq *first_iocbq, *iocbq; 15286 struct fc_frame_header *fc_hdr; 15287 uint32_t sid; 15288 uint32_t len, tot_len; 15289 struct ulp_bde64 *pbde; 15290 15291 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 15292 /* remove from receive buffer list */ 15293 list_del_init(&seq_dmabuf->hbuf.list); 15294 lpfc_update_rcv_time_stamp(vport); 15295 /* get the Remote Port's SID */ 15296 sid = sli4_sid_from_fc_hdr(fc_hdr); 15297 tot_len = 0; 15298 /* Get an iocbq struct to fill in. */ 15299 first_iocbq = lpfc_sli_get_iocbq(vport->phba); 15300 if (first_iocbq) { 15301 /* Initialize the first IOCB. */ 15302 first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0; 15303 first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; 15304 15305 /* Check FC Header to see what TYPE of frame we are rcv'ing */ 15306 if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) { 15307 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX; 15308 first_iocbq->iocb.un.rcvels.parmRo = 15309 sli4_did_from_fc_hdr(fc_hdr); 15310 first_iocbq->iocb.ulpPU = PARM_NPIV_DID; 15311 } else 15312 first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; 15313 first_iocbq->iocb.ulpContext = NO_XRI; 15314 first_iocbq->iocb.unsli3.rcvsli3.ox_id = 15315 be16_to_cpu(fc_hdr->fh_ox_id); 15316 /* iocbq is prepped for internal consumption. Physical vpi. */ 15317 first_iocbq->iocb.unsli3.rcvsli3.vpi = 15318 vport->phba->vpi_ids[vport->vpi]; 15319 /* put the first buffer into the first IOCBq */ 15320 tot_len = bf_get(lpfc_rcqe_length, 15321 &seq_dmabuf->cq_event.cqe.rcqe_cmpl); 15322 15323 first_iocbq->context2 = &seq_dmabuf->dbuf; 15324 first_iocbq->context3 = NULL; 15325 first_iocbq->iocb.ulpBdeCount = 1; 15326 if (tot_len > LPFC_DATA_BUF_SIZE) 15327 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = 15328 LPFC_DATA_BUF_SIZE; 15329 else 15330 first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len; 15331 15332 first_iocbq->iocb.un.rcvels.remoteID = sid; 15333 15334 first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 15335 } 15336 iocbq = first_iocbq; 15337 /* 15338 * Each IOCBq can have two Buffers assigned, so go through the list 15339 * of buffers for this sequence and save two buffers in each IOCBq 15340 */ 15341 list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { 15342 if (!iocbq) { 15343 lpfc_in_buf_free(vport->phba, d_buf); 15344 continue; 15345 } 15346 if (!iocbq->context3) { 15347 iocbq->context3 = d_buf; 15348 iocbq->iocb.ulpBdeCount++; 15349 /* We need to get the size out of the right CQE */ 15350 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 15351 len = bf_get(lpfc_rcqe_length, 15352 &hbq_buf->cq_event.cqe.rcqe_cmpl); 15353 pbde = (struct ulp_bde64 *) 15354 &iocbq->iocb.unsli3.sli3Words[4]; 15355 if (len > LPFC_DATA_BUF_SIZE) 15356 pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE; 15357 else 15358 pbde->tus.f.bdeSize = len; 15359 15360 iocbq->iocb.unsli3.rcvsli3.acc_len += len; 15361 tot_len += len; 15362 } else { 15363 iocbq = lpfc_sli_get_iocbq(vport->phba); 15364 if (!iocbq) { 15365 if (first_iocbq) { 15366 first_iocbq->iocb.ulpStatus = 15367 IOSTAT_FCP_RSP_ERROR; 15368 first_iocbq->iocb.un.ulpWord[4] = 15369 IOERR_NO_RESOURCES; 15370 } 15371 lpfc_in_buf_free(vport->phba, d_buf); 15372 continue; 15373 } 15374 /* We need to get the size out of the right CQE */ 15375 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); 15376 len = bf_get(lpfc_rcqe_length, 15377 &hbq_buf->cq_event.cqe.rcqe_cmpl); 15378 iocbq->context2 = d_buf; 15379 iocbq->context3 = NULL; 15380 iocbq->iocb.ulpBdeCount = 1; 15381 if (len > LPFC_DATA_BUF_SIZE) 15382 iocbq->iocb.un.cont64[0].tus.f.bdeSize = 15383 LPFC_DATA_BUF_SIZE; 15384 else 15385 iocbq->iocb.un.cont64[0].tus.f.bdeSize = len; 15386 15387 tot_len += len; 15388 iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len; 15389 15390 iocbq->iocb.un.rcvels.remoteID = sid; 15391 list_add_tail(&iocbq->list, &first_iocbq->list); 15392 } 15393 } 15394 return first_iocbq; 15395 } 15396 15397 static void 15398 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, 15399 struct hbq_dmabuf *seq_dmabuf) 15400 { 15401 struct fc_frame_header *fc_hdr; 15402 struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb; 15403 struct lpfc_hba *phba = vport->phba; 15404 15405 fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; 15406 iocbq = lpfc_prep_seq(vport, seq_dmabuf); 15407 if (!iocbq) { 15408 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15409 "2707 Ring %d handler: Failed to allocate " 15410 "iocb Rctl x%x Type x%x received\n", 15411 LPFC_ELS_RING, 15412 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 15413 return; 15414 } 15415 if (!lpfc_complete_unsol_iocb(phba, 15416 &phba->sli.ring[LPFC_ELS_RING], 15417 iocbq, fc_hdr->fh_r_ctl, 15418 fc_hdr->fh_type)) 15419 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15420 "2540 Ring %d handler: unexpected Rctl " 15421 "x%x Type x%x received\n", 15422 LPFC_ELS_RING, 15423 fc_hdr->fh_r_ctl, fc_hdr->fh_type); 15424 15425 /* Free iocb created in lpfc_prep_seq */ 15426 list_for_each_entry_safe(curr_iocb, next_iocb, 15427 &iocbq->list, list) { 15428 list_del_init(&curr_iocb->list); 15429 lpfc_sli_release_iocbq(phba, curr_iocb); 15430 } 15431 lpfc_sli_release_iocbq(phba, iocbq); 15432 } 15433 15434 /** 15435 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware 15436 * @phba: Pointer to HBA context object. 15437 * 15438 * This function is called with no lock held. This function processes all 15439 * the received buffers and gives it to upper layers when a received buffer 15440 * indicates that it is the final frame in the sequence. The interrupt 15441 * service routine processes received buffers at interrupt contexts and adds 15442 * received dma buffers to the rb_pend_list queue and signals the worker thread. 15443 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the 15444 * appropriate receive function when the final frame in a sequence is received. 15445 **/ 15446 void 15447 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, 15448 struct hbq_dmabuf *dmabuf) 15449 { 15450 struct hbq_dmabuf *seq_dmabuf; 15451 struct fc_frame_header *fc_hdr; 15452 struct lpfc_vport *vport; 15453 uint32_t fcfi; 15454 uint32_t did; 15455 15456 /* Process each received buffer */ 15457 fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; 15458 /* check to see if this a valid type of frame */ 15459 if (lpfc_fc_frame_check(phba, fc_hdr)) { 15460 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15461 return; 15462 } 15463 if ((bf_get(lpfc_cqe_code, 15464 &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1)) 15465 fcfi = bf_get(lpfc_rcqe_fcf_id_v1, 15466 &dmabuf->cq_event.cqe.rcqe_cmpl); 15467 else 15468 fcfi = bf_get(lpfc_rcqe_fcf_id, 15469 &dmabuf->cq_event.cqe.rcqe_cmpl); 15470 15471 vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); 15472 if (!vport) { 15473 /* throw out the frame */ 15474 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15475 return; 15476 } 15477 15478 /* d_id this frame is directed to */ 15479 did = sli4_did_from_fc_hdr(fc_hdr); 15480 15481 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */ 15482 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) && 15483 (did != Fabric_DID)) { 15484 /* 15485 * Throw out the frame if we are not pt2pt. 15486 * The pt2pt protocol allows for discovery frames 15487 * to be received without a registered VPI. 15488 */ 15489 if (!(vport->fc_flag & FC_PT2PT) || 15490 (phba->link_state == LPFC_HBA_READY)) { 15491 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15492 return; 15493 } 15494 } 15495 15496 /* Handle the basic abort sequence (BA_ABTS) event */ 15497 if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) { 15498 lpfc_sli4_handle_unsol_abort(vport, dmabuf); 15499 return; 15500 } 15501 15502 /* Link this frame */ 15503 seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); 15504 if (!seq_dmabuf) { 15505 /* unable to add frame to vport - throw it out */ 15506 lpfc_in_buf_free(phba, &dmabuf->dbuf); 15507 return; 15508 } 15509 /* If not last frame in sequence continue processing frames. */ 15510 if (!lpfc_seq_complete(seq_dmabuf)) 15511 return; 15512 15513 /* Send the complete sequence to the upper layer protocol */ 15514 lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf); 15515 } 15516 15517 /** 15518 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port 15519 * @phba: pointer to lpfc hba data structure. 15520 * 15521 * This routine is invoked to post rpi header templates to the 15522 * HBA consistent with the SLI-4 interface spec. This routine 15523 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 15524 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 15525 * 15526 * This routine does not require any locks. It's usage is expected 15527 * to be driver load or reset recovery when the driver is 15528 * sequential. 15529 * 15530 * Return codes 15531 * 0 - successful 15532 * -EIO - The mailbox failed to complete successfully. 15533 * When this error occurs, the driver is not guaranteed 15534 * to have any rpi regions posted to the device and 15535 * must either attempt to repost the regions or take a 15536 * fatal error. 15537 **/ 15538 int 15539 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) 15540 { 15541 struct lpfc_rpi_hdr *rpi_page; 15542 uint32_t rc = 0; 15543 uint16_t lrpi = 0; 15544 15545 /* SLI4 ports that support extents do not require RPI headers. */ 15546 if (!phba->sli4_hba.rpi_hdrs_in_use) 15547 goto exit; 15548 if (phba->sli4_hba.extents_in_use) 15549 return -EIO; 15550 15551 list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { 15552 /* 15553 * Assign the rpi headers a physical rpi only if the driver 15554 * has not initialized those resources. A port reset only 15555 * needs the headers posted. 15556 */ 15557 if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) != 15558 LPFC_RPI_RSRC_RDY) 15559 rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 15560 15561 rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); 15562 if (rc != MBX_SUCCESS) { 15563 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15564 "2008 Error %d posting all rpi " 15565 "headers\n", rc); 15566 rc = -EIO; 15567 break; 15568 } 15569 } 15570 15571 exit: 15572 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 15573 LPFC_RPI_RSRC_RDY); 15574 return rc; 15575 } 15576 15577 /** 15578 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port 15579 * @phba: pointer to lpfc hba data structure. 15580 * @rpi_page: pointer to the rpi memory region. 15581 * 15582 * This routine is invoked to post a single rpi header to the 15583 * HBA consistent with the SLI-4 interface spec. This memory region 15584 * maps up to 64 rpi context regions. 15585 * 15586 * Return codes 15587 * 0 - successful 15588 * -ENOMEM - No available memory 15589 * -EIO - The mailbox failed to complete successfully. 15590 **/ 15591 int 15592 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) 15593 { 15594 LPFC_MBOXQ_t *mboxq; 15595 struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; 15596 uint32_t rc = 0; 15597 uint32_t shdr_status, shdr_add_status; 15598 union lpfc_sli4_cfg_shdr *shdr; 15599 15600 /* SLI4 ports that support extents do not require RPI headers. */ 15601 if (!phba->sli4_hba.rpi_hdrs_in_use) 15602 return rc; 15603 if (phba->sli4_hba.extents_in_use) 15604 return -EIO; 15605 15606 /* The port is notified of the header region via a mailbox command. */ 15607 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15608 if (!mboxq) { 15609 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15610 "2001 Unable to allocate memory for issuing " 15611 "SLI_CONFIG_SPECIAL mailbox command\n"); 15612 return -ENOMEM; 15613 } 15614 15615 /* Post all rpi memory regions to the port. */ 15616 hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; 15617 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 15618 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, 15619 sizeof(struct lpfc_mbx_post_hdr_tmpl) - 15620 sizeof(struct lpfc_sli4_cfg_mhdr), 15621 LPFC_SLI4_MBX_EMBED); 15622 15623 15624 /* Post the physical rpi to the port for this rpi header. */ 15625 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, 15626 rpi_page->start_rpi); 15627 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, 15628 hdr_tmpl, rpi_page->page_count); 15629 15630 hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); 15631 hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); 15632 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 15633 shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; 15634 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15635 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15636 if (rc != MBX_TIMEOUT) 15637 mempool_free(mboxq, phba->mbox_mem_pool); 15638 if (shdr_status || shdr_add_status || rc) { 15639 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15640 "2514 POST_RPI_HDR mailbox failed with " 15641 "status x%x add_status x%x, mbx status x%x\n", 15642 shdr_status, shdr_add_status, rc); 15643 rc = -ENXIO; 15644 } 15645 return rc; 15646 } 15647 15648 /** 15649 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range 15650 * @phba: pointer to lpfc hba data structure. 15651 * 15652 * This routine is invoked to post rpi header templates to the 15653 * HBA consistent with the SLI-4 interface spec. This routine 15654 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to 15655 * SLI4_PAGE_SIZE modulo 64 rpi context headers. 15656 * 15657 * Returns 15658 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 15659 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 15660 **/ 15661 int 15662 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) 15663 { 15664 unsigned long rpi; 15665 uint16_t max_rpi, rpi_limit; 15666 uint16_t rpi_remaining, lrpi = 0; 15667 struct lpfc_rpi_hdr *rpi_hdr; 15668 unsigned long iflag; 15669 15670 max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; 15671 rpi_limit = phba->sli4_hba.next_rpi; 15672 15673 /* 15674 * Fetch the next logical rpi. Because this index is logical, 15675 * the driver starts at 0 each time. 15676 */ 15677 spin_lock_irqsave(&phba->hbalock, iflag); 15678 rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0); 15679 if (rpi >= rpi_limit) 15680 rpi = LPFC_RPI_ALLOC_ERROR; 15681 else { 15682 set_bit(rpi, phba->sli4_hba.rpi_bmask); 15683 phba->sli4_hba.max_cfg_param.rpi_used++; 15684 phba->sli4_hba.rpi_count++; 15685 } 15686 15687 /* 15688 * Don't try to allocate more rpi header regions if the device limit 15689 * has been exhausted. 15690 */ 15691 if ((rpi == LPFC_RPI_ALLOC_ERROR) && 15692 (phba->sli4_hba.rpi_count >= max_rpi)) { 15693 spin_unlock_irqrestore(&phba->hbalock, iflag); 15694 return rpi; 15695 } 15696 15697 /* 15698 * RPI header postings are not required for SLI4 ports capable of 15699 * extents. 15700 */ 15701 if (!phba->sli4_hba.rpi_hdrs_in_use) { 15702 spin_unlock_irqrestore(&phba->hbalock, iflag); 15703 return rpi; 15704 } 15705 15706 /* 15707 * If the driver is running low on rpi resources, allocate another 15708 * page now. Note that the next_rpi value is used because 15709 * it represents how many are actually in use whereas max_rpi notes 15710 * how many are supported max by the device. 15711 */ 15712 rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; 15713 spin_unlock_irqrestore(&phba->hbalock, iflag); 15714 if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { 15715 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); 15716 if (!rpi_hdr) { 15717 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15718 "2002 Error Could not grow rpi " 15719 "count\n"); 15720 } else { 15721 lrpi = rpi_hdr->start_rpi; 15722 rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi]; 15723 lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); 15724 } 15725 } 15726 15727 return rpi; 15728 } 15729 15730 /** 15731 * lpfc_sli4_free_rpi - Release an rpi for reuse. 15732 * @phba: pointer to lpfc hba data structure. 15733 * 15734 * This routine is invoked to release an rpi to the pool of 15735 * available rpis maintained by the driver. 15736 **/ 15737 void 15738 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 15739 { 15740 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 15741 phba->sli4_hba.rpi_count--; 15742 phba->sli4_hba.max_cfg_param.rpi_used--; 15743 } 15744 } 15745 15746 /** 15747 * lpfc_sli4_free_rpi - Release an rpi for reuse. 15748 * @phba: pointer to lpfc hba data structure. 15749 * 15750 * This routine is invoked to release an rpi to the pool of 15751 * available rpis maintained by the driver. 15752 **/ 15753 void 15754 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 15755 { 15756 spin_lock_irq(&phba->hbalock); 15757 __lpfc_sli4_free_rpi(phba, rpi); 15758 spin_unlock_irq(&phba->hbalock); 15759 } 15760 15761 /** 15762 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region 15763 * @phba: pointer to lpfc hba data structure. 15764 * 15765 * This routine is invoked to remove the memory region that 15766 * provided rpi via a bitmask. 15767 **/ 15768 void 15769 lpfc_sli4_remove_rpis(struct lpfc_hba *phba) 15770 { 15771 kfree(phba->sli4_hba.rpi_bmask); 15772 kfree(phba->sli4_hba.rpi_ids); 15773 bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); 15774 } 15775 15776 /** 15777 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region 15778 * @phba: pointer to lpfc hba data structure. 15779 * 15780 * This routine is invoked to remove the memory region that 15781 * provided rpi via a bitmask. 15782 **/ 15783 int 15784 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp, 15785 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg) 15786 { 15787 LPFC_MBOXQ_t *mboxq; 15788 struct lpfc_hba *phba = ndlp->phba; 15789 int rc; 15790 15791 /* The port is notified of the header region via a mailbox command. */ 15792 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15793 if (!mboxq) 15794 return -ENOMEM; 15795 15796 /* Post all rpi memory regions to the port. */ 15797 lpfc_resume_rpi(mboxq, ndlp); 15798 if (cmpl) { 15799 mboxq->mbox_cmpl = cmpl; 15800 mboxq->context1 = arg; 15801 mboxq->context2 = ndlp; 15802 } else 15803 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 15804 mboxq->vport = ndlp->vport; 15805 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15806 if (rc == MBX_NOT_FINISHED) { 15807 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 15808 "2010 Resume RPI Mailbox failed " 15809 "status %d, mbxStatus x%x\n", rc, 15810 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 15811 mempool_free(mboxq, phba->mbox_mem_pool); 15812 return -EIO; 15813 } 15814 return 0; 15815 } 15816 15817 /** 15818 * lpfc_sli4_init_vpi - Initialize a vpi with the port 15819 * @vport: Pointer to the vport for which the vpi is being initialized 15820 * 15821 * This routine is invoked to activate a vpi with the port. 15822 * 15823 * Returns: 15824 * 0 success 15825 * -Evalue otherwise 15826 **/ 15827 int 15828 lpfc_sli4_init_vpi(struct lpfc_vport *vport) 15829 { 15830 LPFC_MBOXQ_t *mboxq; 15831 int rc = 0; 15832 int retval = MBX_SUCCESS; 15833 uint32_t mbox_tmo; 15834 struct lpfc_hba *phba = vport->phba; 15835 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15836 if (!mboxq) 15837 return -ENOMEM; 15838 lpfc_init_vpi(phba, mboxq, vport->vpi); 15839 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); 15840 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 15841 if (rc != MBX_SUCCESS) { 15842 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 15843 "2022 INIT VPI Mailbox failed " 15844 "status %d, mbxStatus x%x\n", rc, 15845 bf_get(lpfc_mqe_status, &mboxq->u.mqe)); 15846 retval = -EIO; 15847 } 15848 if (rc != MBX_TIMEOUT) 15849 mempool_free(mboxq, vport->phba->mbox_mem_pool); 15850 15851 return retval; 15852 } 15853 15854 /** 15855 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. 15856 * @phba: pointer to lpfc hba data structure. 15857 * @mboxq: Pointer to mailbox object. 15858 * 15859 * This routine is invoked to manually add a single FCF record. The caller 15860 * must pass a completely initialized FCF_Record. This routine takes 15861 * care of the nonembedded mailbox operations. 15862 **/ 15863 static void 15864 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 15865 { 15866 void *virt_addr; 15867 union lpfc_sli4_cfg_shdr *shdr; 15868 uint32_t shdr_status, shdr_add_status; 15869 15870 virt_addr = mboxq->sge_array->addr[0]; 15871 /* The IOCTL status is embedded in the mailbox subheader. */ 15872 shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; 15873 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 15874 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 15875 15876 if ((shdr_status || shdr_add_status) && 15877 (shdr_status != STATUS_FCF_IN_USE)) 15878 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15879 "2558 ADD_FCF_RECORD mailbox failed with " 15880 "status x%x add_status x%x\n", 15881 shdr_status, shdr_add_status); 15882 15883 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15884 } 15885 15886 /** 15887 * lpfc_sli4_add_fcf_record - Manually add an FCF Record. 15888 * @phba: pointer to lpfc hba data structure. 15889 * @fcf_record: pointer to the initialized fcf record to add. 15890 * 15891 * This routine is invoked to manually add a single FCF record. The caller 15892 * must pass a completely initialized FCF_Record. This routine takes 15893 * care of the nonembedded mailbox operations. 15894 **/ 15895 int 15896 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) 15897 { 15898 int rc = 0; 15899 LPFC_MBOXQ_t *mboxq; 15900 uint8_t *bytep; 15901 void *virt_addr; 15902 dma_addr_t phys_addr; 15903 struct lpfc_mbx_sge sge; 15904 uint32_t alloc_len, req_len; 15905 uint32_t fcfindex; 15906 15907 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 15908 if (!mboxq) { 15909 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15910 "2009 Failed to allocate mbox for ADD_FCF cmd\n"); 15911 return -ENOMEM; 15912 } 15913 15914 req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + 15915 sizeof(uint32_t); 15916 15917 /* Allocate DMA memory and set up the non-embedded mailbox command */ 15918 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 15919 LPFC_MBOX_OPCODE_FCOE_ADD_FCF, 15920 req_len, LPFC_SLI4_MBX_NEMBED); 15921 if (alloc_len < req_len) { 15922 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15923 "2523 Allocated DMA memory size (x%x) is " 15924 "less than the requested DMA memory " 15925 "size (x%x)\n", alloc_len, req_len); 15926 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15927 return -ENOMEM; 15928 } 15929 15930 /* 15931 * Get the first SGE entry from the non-embedded DMA memory. This 15932 * routine only uses a single SGE. 15933 */ 15934 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 15935 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 15936 virt_addr = mboxq->sge_array->addr[0]; 15937 /* 15938 * Configure the FCF record for FCFI 0. This is the driver's 15939 * hardcoded default and gets used in nonFIP mode. 15940 */ 15941 fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); 15942 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 15943 lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); 15944 15945 /* 15946 * Copy the fcf_index and the FCF Record Data. The data starts after 15947 * the FCoE header plus word10. The data copy needs to be endian 15948 * correct. 15949 */ 15950 bytep += sizeof(uint32_t); 15951 lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); 15952 mboxq->vport = phba->pport; 15953 mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; 15954 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 15955 if (rc == MBX_NOT_FINISHED) { 15956 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 15957 "2515 ADD_FCF_RECORD mailbox failed with " 15958 "status 0x%x\n", rc); 15959 lpfc_sli4_mbox_cmd_free(phba, mboxq); 15960 rc = -EIO; 15961 } else 15962 rc = 0; 15963 15964 return rc; 15965 } 15966 15967 /** 15968 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. 15969 * @phba: pointer to lpfc hba data structure. 15970 * @fcf_record: pointer to the fcf record to write the default data. 15971 * @fcf_index: FCF table entry index. 15972 * 15973 * This routine is invoked to build the driver's default FCF record. The 15974 * values used are hardcoded. This routine handles memory initialization. 15975 * 15976 **/ 15977 void 15978 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, 15979 struct fcf_record *fcf_record, 15980 uint16_t fcf_index) 15981 { 15982 memset(fcf_record, 0, sizeof(struct fcf_record)); 15983 fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; 15984 fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; 15985 fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; 15986 bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); 15987 bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); 15988 bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); 15989 bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); 15990 bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); 15991 bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); 15992 bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); 15993 bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); 15994 bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); 15995 bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); 15996 bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1); 15997 bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); 15998 bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, 15999 LPFC_FCF_FPMA | LPFC_FCF_SPMA); 16000 /* Set the VLAN bit map */ 16001 if (phba->valid_vlan) { 16002 fcf_record->vlan_bitmap[phba->vlan_id / 8] 16003 = 1 << (phba->vlan_id % 8); 16004 } 16005 } 16006 16007 /** 16008 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan. 16009 * @phba: pointer to lpfc hba data structure. 16010 * @fcf_index: FCF table entry offset. 16011 * 16012 * This routine is invoked to scan the entire FCF table by reading FCF 16013 * record and processing it one at a time starting from the @fcf_index 16014 * for initial FCF discovery or fast FCF failover rediscovery. 16015 * 16016 * Return 0 if the mailbox command is submitted successfully, none 0 16017 * otherwise. 16018 **/ 16019 int 16020 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 16021 { 16022 int rc = 0, error; 16023 LPFC_MBOXQ_t *mboxq; 16024 16025 phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag; 16026 phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag; 16027 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16028 if (!mboxq) { 16029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16030 "2000 Failed to allocate mbox for " 16031 "READ_FCF cmd\n"); 16032 error = -ENOMEM; 16033 goto fail_fcf_scan; 16034 } 16035 /* Construct the read FCF record mailbox command */ 16036 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 16037 if (rc) { 16038 error = -EINVAL; 16039 goto fail_fcf_scan; 16040 } 16041 /* Issue the mailbox command asynchronously */ 16042 mboxq->vport = phba->pport; 16043 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; 16044 16045 spin_lock_irq(&phba->hbalock); 16046 phba->hba_flag |= FCF_TS_INPROG; 16047 spin_unlock_irq(&phba->hbalock); 16048 16049 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 16050 if (rc == MBX_NOT_FINISHED) 16051 error = -EIO; 16052 else { 16053 /* Reset eligible FCF count for new scan */ 16054 if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) 16055 phba->fcf.eligible_fcf_cnt = 0; 16056 error = 0; 16057 } 16058 fail_fcf_scan: 16059 if (error) { 16060 if (mboxq) 16061 lpfc_sli4_mbox_cmd_free(phba, mboxq); 16062 /* FCF scan failed, clear FCF_TS_INPROG flag */ 16063 spin_lock_irq(&phba->hbalock); 16064 phba->hba_flag &= ~FCF_TS_INPROG; 16065 spin_unlock_irq(&phba->hbalock); 16066 } 16067 return error; 16068 } 16069 16070 /** 16071 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. 16072 * @phba: pointer to lpfc hba data structure. 16073 * @fcf_index: FCF table entry offset. 16074 * 16075 * This routine is invoked to read an FCF record indicated by @fcf_index 16076 * and to use it for FLOGI roundrobin FCF failover. 16077 * 16078 * Return 0 if the mailbox command is submitted successfully, none 0 16079 * otherwise. 16080 **/ 16081 int 16082 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 16083 { 16084 int rc = 0, error; 16085 LPFC_MBOXQ_t *mboxq; 16086 16087 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16088 if (!mboxq) { 16089 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 16090 "2763 Failed to allocate mbox for " 16091 "READ_FCF cmd\n"); 16092 error = -ENOMEM; 16093 goto fail_fcf_read; 16094 } 16095 /* Construct the read FCF record mailbox command */ 16096 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 16097 if (rc) { 16098 error = -EINVAL; 16099 goto fail_fcf_read; 16100 } 16101 /* Issue the mailbox command asynchronously */ 16102 mboxq->vport = phba->pport; 16103 mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec; 16104 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 16105 if (rc == MBX_NOT_FINISHED) 16106 error = -EIO; 16107 else 16108 error = 0; 16109 16110 fail_fcf_read: 16111 if (error && mboxq) 16112 lpfc_sli4_mbox_cmd_free(phba, mboxq); 16113 return error; 16114 } 16115 16116 /** 16117 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask. 16118 * @phba: pointer to lpfc hba data structure. 16119 * @fcf_index: FCF table entry offset. 16120 * 16121 * This routine is invoked to read an FCF record indicated by @fcf_index to 16122 * determine whether it's eligible for FLOGI roundrobin failover list. 16123 * 16124 * Return 0 if the mailbox command is submitted successfully, none 0 16125 * otherwise. 16126 **/ 16127 int 16128 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) 16129 { 16130 int rc = 0, error; 16131 LPFC_MBOXQ_t *mboxq; 16132 16133 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16134 if (!mboxq) { 16135 lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT, 16136 "2758 Failed to allocate mbox for " 16137 "READ_FCF cmd\n"); 16138 error = -ENOMEM; 16139 goto fail_fcf_read; 16140 } 16141 /* Construct the read FCF record mailbox command */ 16142 rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index); 16143 if (rc) { 16144 error = -EINVAL; 16145 goto fail_fcf_read; 16146 } 16147 /* Issue the mailbox command asynchronously */ 16148 mboxq->vport = phba->pport; 16149 mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec; 16150 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 16151 if (rc == MBX_NOT_FINISHED) 16152 error = -EIO; 16153 else 16154 error = 0; 16155 16156 fail_fcf_read: 16157 if (error && mboxq) 16158 lpfc_sli4_mbox_cmd_free(phba, mboxq); 16159 return error; 16160 } 16161 16162 /** 16163 * lpfc_check_next_fcf_pri 16164 * phba pointer to the lpfc_hba struct for this port. 16165 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get 16166 * routine when the rr_bmask is empty. The FCF indecies are put into the 16167 * rr_bmask based on their priority level. Starting from the highest priority 16168 * to the lowest. The most likely FCF candidate will be in the highest 16169 * priority group. When this routine is called it searches the fcf_pri list for 16170 * next lowest priority group and repopulates the rr_bmask with only those 16171 * fcf_indexes. 16172 * returns: 16173 * 1=success 0=failure 16174 **/ 16175 int 16176 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 16177 { 16178 uint16_t next_fcf_pri; 16179 uint16_t last_index; 16180 struct lpfc_fcf_pri *fcf_pri; 16181 int rc; 16182 int ret = 0; 16183 16184 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 16185 LPFC_SLI4_FCF_TBL_INDX_MAX); 16186 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 16187 "3060 Last IDX %d\n", last_index); 16188 16189 /* Verify the priority list has 2 or more entries */ 16190 spin_lock_irq(&phba->hbalock); 16191 if (list_empty(&phba->fcf.fcf_pri_list) || 16192 list_is_singular(&phba->fcf.fcf_pri_list)) { 16193 spin_unlock_irq(&phba->hbalock); 16194 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 16195 "3061 Last IDX %d\n", last_index); 16196 return 0; /* Empty rr list */ 16197 } 16198 spin_unlock_irq(&phba->hbalock); 16199 16200 next_fcf_pri = 0; 16201 /* 16202 * Clear the rr_bmask and set all of the bits that are at this 16203 * priority. 16204 */ 16205 memset(phba->fcf.fcf_rr_bmask, 0, 16206 sizeof(*phba->fcf.fcf_rr_bmask)); 16207 spin_lock_irq(&phba->hbalock); 16208 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 16209 if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED) 16210 continue; 16211 /* 16212 * the 1st priority that has not FLOGI failed 16213 * will be the highest. 16214 */ 16215 if (!next_fcf_pri) 16216 next_fcf_pri = fcf_pri->fcf_rec.priority; 16217 spin_unlock_irq(&phba->hbalock); 16218 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 16219 rc = lpfc_sli4_fcf_rr_index_set(phba, 16220 fcf_pri->fcf_rec.fcf_index); 16221 if (rc) 16222 return 0; 16223 } 16224 spin_lock_irq(&phba->hbalock); 16225 } 16226 /* 16227 * if next_fcf_pri was not set above and the list is not empty then 16228 * we have failed flogis on all of them. So reset flogi failed 16229 * and start at the beginning. 16230 */ 16231 if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { 16232 list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { 16233 fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED; 16234 /* 16235 * the 1st priority that has not FLOGI failed 16236 * will be the highest. 16237 */ 16238 if (!next_fcf_pri) 16239 next_fcf_pri = fcf_pri->fcf_rec.priority; 16240 spin_unlock_irq(&phba->hbalock); 16241 if (fcf_pri->fcf_rec.priority == next_fcf_pri) { 16242 rc = lpfc_sli4_fcf_rr_index_set(phba, 16243 fcf_pri->fcf_rec.fcf_index); 16244 if (rc) 16245 return 0; 16246 } 16247 spin_lock_irq(&phba->hbalock); 16248 } 16249 } else 16250 ret = 1; 16251 spin_unlock_irq(&phba->hbalock); 16252 16253 return ret; 16254 } 16255 /** 16256 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index 16257 * @phba: pointer to lpfc hba data structure. 16258 * 16259 * This routine is to get the next eligible FCF record index in a round 16260 * robin fashion. If the next eligible FCF record index equals to the 16261 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) 16262 * shall be returned, otherwise, the next eligible FCF record's index 16263 * shall be returned. 16264 **/ 16265 uint16_t 16266 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) 16267 { 16268 uint16_t next_fcf_index; 16269 16270 initial_priority: 16271 /* Search start from next bit of currently registered FCF index */ 16272 next_fcf_index = phba->fcf.current_rec.fcf_indx; 16273 16274 next_priority: 16275 /* Determine the next fcf index to check */ 16276 next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX; 16277 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 16278 LPFC_SLI4_FCF_TBL_INDX_MAX, 16279 next_fcf_index); 16280 16281 /* Wrap around condition on phba->fcf.fcf_rr_bmask */ 16282 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 16283 /* 16284 * If we have wrapped then we need to clear the bits that 16285 * have been tested so that we can detect when we should 16286 * change the priority level. 16287 */ 16288 next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask, 16289 LPFC_SLI4_FCF_TBL_INDX_MAX, 0); 16290 } 16291 16292 16293 /* Check roundrobin failover list empty condition */ 16294 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX || 16295 next_fcf_index == phba->fcf.current_rec.fcf_indx) { 16296 /* 16297 * If next fcf index is not found check if there are lower 16298 * Priority level fcf's in the fcf_priority list. 16299 * Set up the rr_bmask with all of the avaiable fcf bits 16300 * at that level and continue the selection process. 16301 */ 16302 if (lpfc_check_next_fcf_pri_level(phba)) 16303 goto initial_priority; 16304 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 16305 "2844 No roundrobin failover FCF available\n"); 16306 if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) 16307 return LPFC_FCOE_FCF_NEXT_NONE; 16308 else { 16309 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 16310 "3063 Only FCF available idx %d, flag %x\n", 16311 next_fcf_index, 16312 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag); 16313 return next_fcf_index; 16314 } 16315 } 16316 16317 if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX && 16318 phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag & 16319 LPFC_FCF_FLOGI_FAILED) 16320 goto next_priority; 16321 16322 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 16323 "2845 Get next roundrobin failover FCF (x%x)\n", 16324 next_fcf_index); 16325 16326 return next_fcf_index; 16327 } 16328 16329 /** 16330 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index 16331 * @phba: pointer to lpfc hba data structure. 16332 * 16333 * This routine sets the FCF record index in to the eligible bmask for 16334 * roundrobin failover search. It checks to make sure that the index 16335 * does not go beyond the range of the driver allocated bmask dimension 16336 * before setting the bit. 16337 * 16338 * Returns 0 if the index bit successfully set, otherwise, it returns 16339 * -EINVAL. 16340 **/ 16341 int 16342 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) 16343 { 16344 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 16345 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 16346 "2610 FCF (x%x) reached driver's book " 16347 "keeping dimension:x%x\n", 16348 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 16349 return -EINVAL; 16350 } 16351 /* Set the eligible FCF record index bmask */ 16352 set_bit(fcf_index, phba->fcf.fcf_rr_bmask); 16353 16354 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 16355 "2790 Set FCF (x%x) to roundrobin FCF failover " 16356 "bmask\n", fcf_index); 16357 16358 return 0; 16359 } 16360 16361 /** 16362 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index 16363 * @phba: pointer to lpfc hba data structure. 16364 * 16365 * This routine clears the FCF record index from the eligible bmask for 16366 * roundrobin failover search. It checks to make sure that the index 16367 * does not go beyond the range of the driver allocated bmask dimension 16368 * before clearing the bit. 16369 **/ 16370 void 16371 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) 16372 { 16373 struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; 16374 if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 16375 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 16376 "2762 FCF (x%x) reached driver's book " 16377 "keeping dimension:x%x\n", 16378 fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); 16379 return; 16380 } 16381 /* Clear the eligible FCF record index bmask */ 16382 spin_lock_irq(&phba->hbalock); 16383 list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, 16384 list) { 16385 if (fcf_pri->fcf_rec.fcf_index == fcf_index) { 16386 list_del_init(&fcf_pri->list); 16387 break; 16388 } 16389 } 16390 spin_unlock_irq(&phba->hbalock); 16391 clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); 16392 16393 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 16394 "2791 Clear FCF (x%x) from roundrobin failover " 16395 "bmask\n", fcf_index); 16396 } 16397 16398 /** 16399 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table 16400 * @phba: pointer to lpfc hba data structure. 16401 * 16402 * This routine is the completion routine for the rediscover FCF table mailbox 16403 * command. If the mailbox command returned failure, it will try to stop the 16404 * FCF rediscover wait timer. 16405 **/ 16406 void 16407 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 16408 { 16409 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 16410 uint32_t shdr_status, shdr_add_status; 16411 16412 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 16413 16414 shdr_status = bf_get(lpfc_mbox_hdr_status, 16415 &redisc_fcf->header.cfg_shdr.response); 16416 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, 16417 &redisc_fcf->header.cfg_shdr.response); 16418 if (shdr_status || shdr_add_status) { 16419 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 16420 "2746 Requesting for FCF rediscovery failed " 16421 "status x%x add_status x%x\n", 16422 shdr_status, shdr_add_status); 16423 if (phba->fcf.fcf_flag & FCF_ACVL_DISC) { 16424 spin_lock_irq(&phba->hbalock); 16425 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC; 16426 spin_unlock_irq(&phba->hbalock); 16427 /* 16428 * CVL event triggered FCF rediscover request failed, 16429 * last resort to re-try current registered FCF entry. 16430 */ 16431 lpfc_retry_pport_discovery(phba); 16432 } else { 16433 spin_lock_irq(&phba->hbalock); 16434 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC; 16435 spin_unlock_irq(&phba->hbalock); 16436 /* 16437 * DEAD FCF event triggered FCF rediscover request 16438 * failed, last resort to fail over as a link down 16439 * to FCF registration. 16440 */ 16441 lpfc_sli4_fcf_dead_failthrough(phba); 16442 } 16443 } else { 16444 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 16445 "2775 Start FCF rediscover quiescent timer\n"); 16446 /* 16447 * Start FCF rediscovery wait timer for pending FCF 16448 * before rescan FCF record table. 16449 */ 16450 lpfc_fcf_redisc_wait_start_timer(phba); 16451 } 16452 16453 mempool_free(mbox, phba->mbox_mem_pool); 16454 } 16455 16456 /** 16457 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port. 16458 * @phba: pointer to lpfc hba data structure. 16459 * 16460 * This routine is invoked to request for rediscovery of the entire FCF table 16461 * by the port. 16462 **/ 16463 int 16464 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) 16465 { 16466 LPFC_MBOXQ_t *mbox; 16467 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 16468 int rc, length; 16469 16470 /* Cancel retry delay timers to all vports before FCF rediscover */ 16471 lpfc_cancel_all_vport_retry_delay_timer(phba); 16472 16473 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16474 if (!mbox) { 16475 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16476 "2745 Failed to allocate mbox for " 16477 "requesting FCF rediscover.\n"); 16478 return -ENOMEM; 16479 } 16480 16481 length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) - 16482 sizeof(struct lpfc_sli4_cfg_mhdr)); 16483 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, 16484 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF, 16485 length, LPFC_SLI4_MBX_EMBED); 16486 16487 redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl; 16488 /* Set count to 0 for invalidating the entire FCF database */ 16489 bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0); 16490 16491 /* Issue the mailbox command asynchronously */ 16492 mbox->vport = phba->pport; 16493 mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table; 16494 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 16495 16496 if (rc == MBX_NOT_FINISHED) { 16497 mempool_free(mbox, phba->mbox_mem_pool); 16498 return -EIO; 16499 } 16500 return 0; 16501 } 16502 16503 /** 16504 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event 16505 * @phba: pointer to lpfc hba data structure. 16506 * 16507 * This function is the failover routine as a last resort to the FCF DEAD 16508 * event when driver failed to perform fast FCF failover. 16509 **/ 16510 void 16511 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba) 16512 { 16513 uint32_t link_state; 16514 16515 /* 16516 * Last resort as FCF DEAD event failover will treat this as 16517 * a link down, but save the link state because we don't want 16518 * it to be changed to Link Down unless it is already down. 16519 */ 16520 link_state = phba->link_state; 16521 lpfc_linkdown(phba); 16522 phba->link_state = link_state; 16523 16524 /* Unregister FCF if no devices connected to it */ 16525 lpfc_unregister_unused_fcf(phba); 16526 } 16527 16528 /** 16529 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data. 16530 * @phba: pointer to lpfc hba data structure. 16531 * @rgn23_data: pointer to configure region 23 data. 16532 * 16533 * This function gets SLI3 port configure region 23 data through memory dump 16534 * mailbox command. When it successfully retrieves data, the size of the data 16535 * will be returned, otherwise, 0 will be returned. 16536 **/ 16537 static uint32_t 16538 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 16539 { 16540 LPFC_MBOXQ_t *pmb = NULL; 16541 MAILBOX_t *mb; 16542 uint32_t offset = 0; 16543 int rc; 16544 16545 if (!rgn23_data) 16546 return 0; 16547 16548 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16549 if (!pmb) { 16550 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16551 "2600 failed to allocate mailbox memory\n"); 16552 return 0; 16553 } 16554 mb = &pmb->u.mb; 16555 16556 do { 16557 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23); 16558 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); 16559 16560 if (rc != MBX_SUCCESS) { 16561 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 16562 "2601 failed to read config " 16563 "region 23, rc 0x%x Status 0x%x\n", 16564 rc, mb->mbxStatus); 16565 mb->un.varDmp.word_cnt = 0; 16566 } 16567 /* 16568 * dump mem may return a zero when finished or we got a 16569 * mailbox error, either way we are done. 16570 */ 16571 if (mb->un.varDmp.word_cnt == 0) 16572 break; 16573 if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset) 16574 mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset; 16575 16576 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 16577 rgn23_data + offset, 16578 mb->un.varDmp.word_cnt); 16579 offset += mb->un.varDmp.word_cnt; 16580 } while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE); 16581 16582 mempool_free(pmb, phba->mbox_mem_pool); 16583 return offset; 16584 } 16585 16586 /** 16587 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data. 16588 * @phba: pointer to lpfc hba data structure. 16589 * @rgn23_data: pointer to configure region 23 data. 16590 * 16591 * This function gets SLI4 port configure region 23 data through memory dump 16592 * mailbox command. When it successfully retrieves data, the size of the data 16593 * will be returned, otherwise, 0 will be returned. 16594 **/ 16595 static uint32_t 16596 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data) 16597 { 16598 LPFC_MBOXQ_t *mboxq = NULL; 16599 struct lpfc_dmabuf *mp = NULL; 16600 struct lpfc_mqe *mqe; 16601 uint32_t data_length = 0; 16602 int rc; 16603 16604 if (!rgn23_data) 16605 return 0; 16606 16607 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16608 if (!mboxq) { 16609 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16610 "3105 failed to allocate mailbox memory\n"); 16611 return 0; 16612 } 16613 16614 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) 16615 goto out; 16616 mqe = &mboxq->u.mqe; 16617 mp = (struct lpfc_dmabuf *) mboxq->context1; 16618 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 16619 if (rc) 16620 goto out; 16621 data_length = mqe->un.mb_words[5]; 16622 if (data_length == 0) 16623 goto out; 16624 if (data_length > DMP_RGN23_SIZE) { 16625 data_length = 0; 16626 goto out; 16627 } 16628 lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length); 16629 out: 16630 mempool_free(mboxq, phba->mbox_mem_pool); 16631 if (mp) { 16632 lpfc_mbuf_free(phba, mp->virt, mp->phys); 16633 kfree(mp); 16634 } 16635 return data_length; 16636 } 16637 16638 /** 16639 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled. 16640 * @phba: pointer to lpfc hba data structure. 16641 * 16642 * This function read region 23 and parse TLV for port status to 16643 * decide if the user disaled the port. If the TLV indicates the 16644 * port is disabled, the hba_flag is set accordingly. 16645 **/ 16646 void 16647 lpfc_sli_read_link_ste(struct lpfc_hba *phba) 16648 { 16649 uint8_t *rgn23_data = NULL; 16650 uint32_t if_type, data_size, sub_tlv_len, tlv_offset; 16651 uint32_t offset = 0; 16652 16653 /* Get adapter Region 23 data */ 16654 rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL); 16655 if (!rgn23_data) 16656 goto out; 16657 16658 if (phba->sli_rev < LPFC_SLI_REV4) 16659 data_size = lpfc_sli_get_config_region23(phba, rgn23_data); 16660 else { 16661 if_type = bf_get(lpfc_sli_intf_if_type, 16662 &phba->sli4_hba.sli_intf); 16663 if (if_type == LPFC_SLI_INTF_IF_TYPE_0) 16664 goto out; 16665 data_size = lpfc_sli4_get_config_region23(phba, rgn23_data); 16666 } 16667 16668 if (!data_size) 16669 goto out; 16670 16671 /* Check the region signature first */ 16672 if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) { 16673 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16674 "2619 Config region 23 has bad signature\n"); 16675 goto out; 16676 } 16677 offset += 4; 16678 16679 /* Check the data structure version */ 16680 if (rgn23_data[offset] != LPFC_REGION23_VERSION) { 16681 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16682 "2620 Config region 23 has bad version\n"); 16683 goto out; 16684 } 16685 offset += 4; 16686 16687 /* Parse TLV entries in the region */ 16688 while (offset < data_size) { 16689 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) 16690 break; 16691 /* 16692 * If the TLV is not driver specific TLV or driver id is 16693 * not linux driver id, skip the record. 16694 */ 16695 if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) || 16696 (rgn23_data[offset + 2] != LINUX_DRIVER_ID) || 16697 (rgn23_data[offset + 3] != 0)) { 16698 offset += rgn23_data[offset + 1] * 4 + 4; 16699 continue; 16700 } 16701 16702 /* Driver found a driver specific TLV in the config region */ 16703 sub_tlv_len = rgn23_data[offset + 1] * 4; 16704 offset += 4; 16705 tlv_offset = 0; 16706 16707 /* 16708 * Search for configured port state sub-TLV. 16709 */ 16710 while ((offset < data_size) && 16711 (tlv_offset < sub_tlv_len)) { 16712 if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) { 16713 offset += 4; 16714 tlv_offset += 4; 16715 break; 16716 } 16717 if (rgn23_data[offset] != PORT_STE_TYPE) { 16718 offset += rgn23_data[offset + 1] * 4 + 4; 16719 tlv_offset += rgn23_data[offset + 1] * 4 + 4; 16720 continue; 16721 } 16722 16723 /* This HBA contains PORT_STE configured */ 16724 if (!rgn23_data[offset + 2]) 16725 phba->hba_flag |= LINK_DISABLED; 16726 16727 goto out; 16728 } 16729 } 16730 16731 out: 16732 kfree(rgn23_data); 16733 return; 16734 } 16735 16736 /** 16737 * lpfc_wr_object - write an object to the firmware 16738 * @phba: HBA structure that indicates port to create a queue on. 16739 * @dmabuf_list: list of dmabufs to write to the port. 16740 * @size: the total byte value of the objects to write to the port. 16741 * @offset: the current offset to be used to start the transfer. 16742 * 16743 * This routine will create a wr_object mailbox command to send to the port. 16744 * the mailbox command will be constructed using the dma buffers described in 16745 * @dmabuf_list to create a list of BDEs. This routine will fill in as many 16746 * BDEs that the imbedded mailbox can support. The @offset variable will be 16747 * used to indicate the starting offset of the transfer and will also return 16748 * the offset after the write object mailbox has completed. @size is used to 16749 * determine the end of the object and whether the eof bit should be set. 16750 * 16751 * Return 0 is successful and offset will contain the the new offset to use 16752 * for the next write. 16753 * Return negative value for error cases. 16754 **/ 16755 int 16756 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, 16757 uint32_t size, uint32_t *offset) 16758 { 16759 struct lpfc_mbx_wr_object *wr_object; 16760 LPFC_MBOXQ_t *mbox; 16761 int rc = 0, i = 0; 16762 uint32_t shdr_status, shdr_add_status; 16763 uint32_t mbox_tmo; 16764 union lpfc_sli4_cfg_shdr *shdr; 16765 struct lpfc_dmabuf *dmabuf; 16766 uint32_t written = 0; 16767 16768 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 16769 if (!mbox) 16770 return -ENOMEM; 16771 16772 lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, 16773 LPFC_MBOX_OPCODE_WRITE_OBJECT, 16774 sizeof(struct lpfc_mbx_wr_object) - 16775 sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED); 16776 16777 wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object; 16778 wr_object->u.request.write_offset = *offset; 16779 sprintf((uint8_t *)wr_object->u.request.object_name, "/"); 16780 wr_object->u.request.object_name[0] = 16781 cpu_to_le32(wr_object->u.request.object_name[0]); 16782 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0); 16783 list_for_each_entry(dmabuf, dmabuf_list, list) { 16784 if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size) 16785 break; 16786 wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys); 16787 wr_object->u.request.bde[i].addrHigh = 16788 putPaddrHigh(dmabuf->phys); 16789 if (written + SLI4_PAGE_SIZE >= size) { 16790 wr_object->u.request.bde[i].tus.f.bdeSize = 16791 (size - written); 16792 written += (size - written); 16793 bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1); 16794 } else { 16795 wr_object->u.request.bde[i].tus.f.bdeSize = 16796 SLI4_PAGE_SIZE; 16797 written += SLI4_PAGE_SIZE; 16798 } 16799 i++; 16800 } 16801 wr_object->u.request.bde_count = i; 16802 bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written); 16803 if (!phba->sli4_hba.intr_enable) 16804 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 16805 else { 16806 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); 16807 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 16808 } 16809 /* The IOCTL status is embedded in the mailbox subheader. */ 16810 shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr; 16811 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 16812 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 16813 if (rc != MBX_TIMEOUT) 16814 mempool_free(mbox, phba->mbox_mem_pool); 16815 if (shdr_status || shdr_add_status || rc) { 16816 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 16817 "3025 Write Object mailbox failed with " 16818 "status x%x add_status x%x, mbx status x%x\n", 16819 shdr_status, shdr_add_status, rc); 16820 rc = -ENXIO; 16821 } else 16822 *offset += wr_object->u.response.actual_write_length; 16823 return rc; 16824 } 16825 16826 /** 16827 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands. 16828 * @vport: pointer to vport data structure. 16829 * 16830 * This function iterate through the mailboxq and clean up all REG_LOGIN 16831 * and REG_VPI mailbox commands associated with the vport. This function 16832 * is called when driver want to restart discovery of the vport due to 16833 * a Clear Virtual Link event. 16834 **/ 16835 void 16836 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) 16837 { 16838 struct lpfc_hba *phba = vport->phba; 16839 LPFC_MBOXQ_t *mb, *nextmb; 16840 struct lpfc_dmabuf *mp; 16841 struct lpfc_nodelist *ndlp; 16842 struct lpfc_nodelist *act_mbx_ndlp = NULL; 16843 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 16844 LIST_HEAD(mbox_cmd_list); 16845 uint8_t restart_loop; 16846 16847 /* Clean up internally queued mailbox commands with the vport */ 16848 spin_lock_irq(&phba->hbalock); 16849 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 16850 if (mb->vport != vport) 16851 continue; 16852 16853 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 16854 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 16855 continue; 16856 16857 list_del(&mb->list); 16858 list_add_tail(&mb->list, &mbox_cmd_list); 16859 } 16860 /* Clean up active mailbox command with the vport */ 16861 mb = phba->sli.mbox_active; 16862 if (mb && (mb->vport == vport)) { 16863 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) || 16864 (mb->u.mb.mbxCommand == MBX_REG_VPI)) 16865 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16866 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 16867 act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2; 16868 /* Put reference count for delayed processing */ 16869 act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp); 16870 /* Unregister the RPI when mailbox complete */ 16871 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 16872 } 16873 } 16874 /* Cleanup any mailbox completions which are not yet processed */ 16875 do { 16876 restart_loop = 0; 16877 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 16878 /* 16879 * If this mailox is already processed or it is 16880 * for another vport ignore it. 16881 */ 16882 if ((mb->vport != vport) || 16883 (mb->mbox_flag & LPFC_MBX_IMED_UNREG)) 16884 continue; 16885 16886 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) && 16887 (mb->u.mb.mbxCommand != MBX_REG_VPI)) 16888 continue; 16889 16890 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 16891 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 16892 ndlp = (struct lpfc_nodelist *)mb->context2; 16893 /* Unregister the RPI when mailbox complete */ 16894 mb->mbox_flag |= LPFC_MBX_IMED_UNREG; 16895 restart_loop = 1; 16896 spin_unlock_irq(&phba->hbalock); 16897 spin_lock(shost->host_lock); 16898 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 16899 spin_unlock(shost->host_lock); 16900 spin_lock_irq(&phba->hbalock); 16901 break; 16902 } 16903 } 16904 } while (restart_loop); 16905 16906 spin_unlock_irq(&phba->hbalock); 16907 16908 /* Release the cleaned-up mailbox commands */ 16909 while (!list_empty(&mbox_cmd_list)) { 16910 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list); 16911 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) { 16912 mp = (struct lpfc_dmabuf *) (mb->context1); 16913 if (mp) { 16914 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 16915 kfree(mp); 16916 } 16917 ndlp = (struct lpfc_nodelist *) mb->context2; 16918 mb->context2 = NULL; 16919 if (ndlp) { 16920 spin_lock(shost->host_lock); 16921 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 16922 spin_unlock(shost->host_lock); 16923 lpfc_nlp_put(ndlp); 16924 } 16925 } 16926 mempool_free(mb, phba->mbox_mem_pool); 16927 } 16928 16929 /* Release the ndlp with the cleaned-up active mailbox command */ 16930 if (act_mbx_ndlp) { 16931 spin_lock(shost->host_lock); 16932 act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 16933 spin_unlock(shost->host_lock); 16934 lpfc_nlp_put(act_mbx_ndlp); 16935 } 16936 } 16937 16938 /** 16939 * lpfc_drain_txq - Drain the txq 16940 * @phba: Pointer to HBA context object. 16941 * 16942 * This function attempt to submit IOCBs on the txq 16943 * to the adapter. For SLI4 adapters, the txq contains 16944 * ELS IOCBs that have been deferred because the there 16945 * are no SGLs. This congestion can occur with large 16946 * vport counts during node discovery. 16947 **/ 16948 16949 uint32_t 16950 lpfc_drain_txq(struct lpfc_hba *phba) 16951 { 16952 LIST_HEAD(completions); 16953 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; 16954 struct lpfc_iocbq *piocbq = NULL; 16955 unsigned long iflags = 0; 16956 char *fail_msg = NULL; 16957 struct lpfc_sglq *sglq; 16958 union lpfc_wqe wqe; 16959 int txq_cnt = 0; 16960 16961 spin_lock_irqsave(&pring->ring_lock, iflags); 16962 list_for_each_entry(piocbq, &pring->txq, list) { 16963 txq_cnt++; 16964 } 16965 16966 if (txq_cnt > pring->txq_max) 16967 pring->txq_max = txq_cnt; 16968 16969 spin_unlock_irqrestore(&pring->ring_lock, iflags); 16970 16971 while (!list_empty(&pring->txq)) { 16972 spin_lock_irqsave(&pring->ring_lock, iflags); 16973 16974 piocbq = lpfc_sli_ringtx_get(phba, pring); 16975 if (!piocbq) { 16976 spin_unlock_irqrestore(&pring->ring_lock, iflags); 16977 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 16978 "2823 txq empty and txq_cnt is %d\n ", 16979 txq_cnt); 16980 break; 16981 } 16982 sglq = __lpfc_sli_get_sglq(phba, piocbq); 16983 if (!sglq) { 16984 __lpfc_sli_ringtx_put(phba, pring, piocbq); 16985 spin_unlock_irqrestore(&pring->ring_lock, iflags); 16986 break; 16987 } 16988 txq_cnt--; 16989 16990 /* The xri and iocb resources secured, 16991 * attempt to issue request 16992 */ 16993 piocbq->sli4_lxritag = sglq->sli4_lxritag; 16994 piocbq->sli4_xritag = sglq->sli4_xritag; 16995 if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq)) 16996 fail_msg = "to convert bpl to sgl"; 16997 else if (lpfc_sli4_iocb2wqe(phba, piocbq, &wqe)) 16998 fail_msg = "to convert iocb to wqe"; 16999 else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) 17000 fail_msg = " - Wq is full"; 17001 else 17002 lpfc_sli_ringtxcmpl_put(phba, pring, piocbq); 17003 17004 if (fail_msg) { 17005 /* Failed means we can't issue and need to cancel */ 17006 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 17007 "2822 IOCB failed %s iotag 0x%x " 17008 "xri 0x%x\n", 17009 fail_msg, 17010 piocbq->iotag, piocbq->sli4_xritag); 17011 list_add_tail(&piocbq->list, &completions); 17012 } 17013 spin_unlock_irqrestore(&pring->ring_lock, iflags); 17014 } 17015 17016 /* Cancel all the IOCBs that cannot be issued */ 17017 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 17018 IOERR_SLI_ABORTED); 17019 17020 return txq_cnt; 17021 } 17022